From 7598b3498bfdad49d2b19bf0cba829922a9689a9 Mon Sep 17 00:00:00 2001 From: Zhu Yanjun Date: Thu, 14 Sep 2017 23:01:51 -0400 Subject: forcedeth: replace pci_map_single with dma_map_single functions pci_map_single functions are obsolete. So replace them with dma_map_single functions. Signed-off-by: Zhu Yanjun Signed-off-by: David S. Miller --- drivers/net/ethernet/nvidia/forcedeth.c | 70 +++++++++++++++++---------------- 1 file changed, 36 insertions(+), 34 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/nvidia/forcedeth.c b/drivers/net/ethernet/nvidia/forcedeth.c index 994a83a1f0a5..b605b94f4567 100644 --- a/drivers/net/ethernet/nvidia/forcedeth.c +++ b/drivers/net/ethernet/nvidia/forcedeth.c @@ -1813,12 +1813,12 @@ static int nv_alloc_rx(struct net_device *dev) struct sk_buff *skb = netdev_alloc_skb(dev, np->rx_buf_sz + NV_RX_ALLOC_PAD); if (skb) { np->put_rx_ctx->skb = skb; - np->put_rx_ctx->dma = pci_map_single(np->pci_dev, + np->put_rx_ctx->dma = dma_map_single(&np->pci_dev->dev, skb->data, skb_tailroom(skb), - PCI_DMA_FROMDEVICE); - if (pci_dma_mapping_error(np->pci_dev, - np->put_rx_ctx->dma)) { + DMA_FROM_DEVICE); + if (dma_mapping_error(&np->pci_dev->dev, + np->put_rx_ctx->dma)) { kfree_skb(skb); goto packet_dropped; } @@ -1854,12 +1854,12 @@ static int nv_alloc_rx_optimized(struct net_device *dev) struct sk_buff *skb = netdev_alloc_skb(dev, np->rx_buf_sz + NV_RX_ALLOC_PAD); if (skb) { np->put_rx_ctx->skb = skb; - np->put_rx_ctx->dma = pci_map_single(np->pci_dev, + np->put_rx_ctx->dma = dma_map_single(&np->pci_dev->dev, skb->data, skb_tailroom(skb), - PCI_DMA_FROMDEVICE); - if (pci_dma_mapping_error(np->pci_dev, - np->put_rx_ctx->dma)) { + DMA_FROM_DEVICE); + if (dma_mapping_error(&np->pci_dev->dev, + np->put_rx_ctx->dma)) { kfree_skb(skb); goto packet_dropped; } @@ -1977,9 +1977,9 @@ static void nv_unmap_txskb(struct fe_priv *np, struct nv_skb_map *tx_skb) { if (tx_skb->dma) { if (tx_skb->dma_single) - pci_unmap_single(np->pci_dev, tx_skb->dma, + dma_unmap_single(&np->pci_dev->dev, tx_skb->dma, tx_skb->dma_len, - PCI_DMA_TODEVICE); + DMA_TO_DEVICE); else pci_unmap_page(np->pci_dev, tx_skb->dma, tx_skb->dma_len, @@ -2047,10 +2047,10 @@ static void nv_drain_rx(struct net_device *dev) } wmb(); if (np->rx_skb[i].skb) { - pci_unmap_single(np->pci_dev, np->rx_skb[i].dma, + dma_unmap_single(&np->pci_dev->dev, np->rx_skb[i].dma, (skb_end_pointer(np->rx_skb[i].skb) - - np->rx_skb[i].skb->data), - PCI_DMA_FROMDEVICE); + np->rx_skb[i].skb->data), + DMA_FROM_DEVICE); dev_kfree_skb(np->rx_skb[i].skb); np->rx_skb[i].skb = NULL; } @@ -2224,10 +2224,11 @@ static netdev_tx_t nv_start_xmit(struct sk_buff *skb, struct net_device *dev) prev_tx = put_tx; prev_tx_ctx = np->put_tx_ctx; bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size; - np->put_tx_ctx->dma = pci_map_single(np->pci_dev, skb->data + offset, bcnt, - PCI_DMA_TODEVICE); - if (pci_dma_mapping_error(np->pci_dev, - np->put_tx_ctx->dma)) { + np->put_tx_ctx->dma = dma_map_single(&np->pci_dev->dev, + skb->data + offset, bcnt, + DMA_TO_DEVICE); + if (dma_mapping_error(&np->pci_dev->dev, + np->put_tx_ctx->dma)) { /* on DMA mapping error - drop the packet */ dev_kfree_skb_any(skb); u64_stats_update_begin(&np->swstats_tx_syncp); @@ -2373,10 +2374,11 @@ static netdev_tx_t nv_start_xmit_optimized(struct sk_buff *skb, prev_tx = put_tx; prev_tx_ctx = np->put_tx_ctx; bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size; - np->put_tx_ctx->dma = pci_map_single(np->pci_dev, skb->data + offset, bcnt, - PCI_DMA_TODEVICE); - if (pci_dma_mapping_error(np->pci_dev, - np->put_tx_ctx->dma)) { + np->put_tx_ctx->dma = dma_map_single(&np->pci_dev->dev, + skb->data + offset, bcnt, + DMA_TO_DEVICE); + if (dma_mapping_error(&np->pci_dev->dev, + np->put_tx_ctx->dma)) { /* on DMA mapping error - drop the packet */ dev_kfree_skb_any(skb); u64_stats_update_begin(&np->swstats_tx_syncp); @@ -2810,9 +2812,9 @@ static int nv_rx_process(struct net_device *dev, int limit) * TODO: check if a prefetch of the first cacheline improves * the performance. */ - pci_unmap_single(np->pci_dev, np->get_rx_ctx->dma, - np->get_rx_ctx->dma_len, - PCI_DMA_FROMDEVICE); + dma_unmap_single(&np->pci_dev->dev, np->get_rx_ctx->dma, + np->get_rx_ctx->dma_len, + DMA_FROM_DEVICE); skb = np->get_rx_ctx->skb; np->get_rx_ctx->skb = NULL; @@ -2916,9 +2918,9 @@ static int nv_rx_process_optimized(struct net_device *dev, int limit) * TODO: check if a prefetch of the first cacheline improves * the performance. */ - pci_unmap_single(np->pci_dev, np->get_rx_ctx->dma, - np->get_rx_ctx->dma_len, - PCI_DMA_FROMDEVICE); + dma_unmap_single(&np->pci_dev->dev, np->get_rx_ctx->dma, + np->get_rx_ctx->dma_len, + DMA_FROM_DEVICE); skb = np->get_rx_ctx->skb; np->get_rx_ctx->skb = NULL; @@ -5070,11 +5072,11 @@ static int nv_loopback_test(struct net_device *dev) ret = 0; goto out; } - test_dma_addr = pci_map_single(np->pci_dev, tx_skb->data, + test_dma_addr = dma_map_single(&np->pci_dev->dev, tx_skb->data, skb_tailroom(tx_skb), - PCI_DMA_FROMDEVICE); - if (pci_dma_mapping_error(np->pci_dev, - test_dma_addr)) { + DMA_FROM_DEVICE); + if (dma_mapping_error(&np->pci_dev->dev, + test_dma_addr)) { dev_kfree_skb_any(tx_skb); goto out; } @@ -5129,9 +5131,9 @@ static int nv_loopback_test(struct net_device *dev) } } - pci_unmap_single(np->pci_dev, test_dma_addr, - (skb_end_pointer(tx_skb) - tx_skb->data), - PCI_DMA_TODEVICE); + dma_unmap_single(&np->pci_dev->dev, test_dma_addr, + (skb_end_pointer(tx_skb) - tx_skb->data), + DMA_TO_DEVICE); dev_kfree_skb_any(tx_skb); out: /* stop engines */ -- cgit v1.2.3 From 2df9d6730215db85f7306f0bda03b7391e392837 Mon Sep 17 00:00:00 2001 From: Valentin Longchamp Date: Fri, 15 Sep 2017 07:58:47 +0200 Subject: net/ethernet/freescale: fix warning for ucc_geth uf_info.regs is resource_size_t i.e. phys_addr_t that can be either u32 or u64 according to CONFIG_PHYS_ADDR_T_64BIT. The printk format is thus adaptet to u64 and the regs value cast to u64 to take both u32 and u64 into account. Signed-off-by: Valentin Longchamp Signed-off-by: David S. Miller --- drivers/net/ethernet/freescale/ucc_geth.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/freescale/ucc_geth.c b/drivers/net/ethernet/freescale/ucc_geth.c index f77ba9fa257b..a96b838cffce 100644 --- a/drivers/net/ethernet/freescale/ucc_geth.c +++ b/drivers/net/ethernet/freescale/ucc_geth.c @@ -3857,8 +3857,9 @@ static int ucc_geth_probe(struct platform_device* ofdev) } if (netif_msg_probe(&debug)) - pr_info("UCC%1d at 0x%8x (irq = %d)\n", - ug_info->uf_info.ucc_num + 1, ug_info->uf_info.regs, + pr_info("UCC%1d at 0x%8llx (irq = %d)\n", + ug_info->uf_info.ucc_num + 1, + (u64)ug_info->uf_info.regs, ug_info->uf_info.irq); /* Create an ethernet device instance */ -- cgit v1.2.3 From 7ce103b4cbb20babf19b881e28228b7fd40ce0b3 Mon Sep 17 00:00:00 2001 From: Roman Yeryomin Date: Sun, 17 Sep 2017 20:24:15 +0300 Subject: net: korina: don't use overflow and underflow interrupts When such interrupts occur there is not much we can do. Dropping the whole ring doesn't help and only produces high packet loss. If we just ignore the interrupt the mac will drop one or few packets instead of the whole ring. Also this will lower the irq handling load and increase performance. Signed-off-by: Roman Yeryomin Signed-off-by: David S. Miller --- drivers/net/ethernet/korina.c | 83 +------------------------------------------ 1 file changed, 1 insertion(+), 82 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/korina.c b/drivers/net/ethernet/korina.c index 3c0a6451273d..98d686ed69a9 100644 --- a/drivers/net/ethernet/korina.c +++ b/drivers/net/ethernet/korina.c @@ -122,8 +122,6 @@ struct korina_private { int rx_irq; int tx_irq; - int ovr_irq; - int und_irq; spinlock_t lock; /* NIC xmit lock */ @@ -891,8 +889,6 @@ static void korina_restart_task(struct work_struct *work) */ disable_irq(lp->rx_irq); disable_irq(lp->tx_irq); - disable_irq(lp->ovr_irq); - disable_irq(lp->und_irq); writel(readl(&lp->tx_dma_regs->dmasm) | DMA_STAT_FINI | DMA_STAT_ERR, @@ -911,40 +907,10 @@ static void korina_restart_task(struct work_struct *work) } korina_multicast_list(dev); - enable_irq(lp->und_irq); - enable_irq(lp->ovr_irq); enable_irq(lp->tx_irq); enable_irq(lp->rx_irq); } -static void korina_clear_and_restart(struct net_device *dev, u32 value) -{ - struct korina_private *lp = netdev_priv(dev); - - netif_stop_queue(dev); - writel(value, &lp->eth_regs->ethintfc); - schedule_work(&lp->restart_task); -} - -/* Ethernet Tx Underflow interrupt */ -static irqreturn_t korina_und_interrupt(int irq, void *dev_id) -{ - struct net_device *dev = dev_id; - struct korina_private *lp = netdev_priv(dev); - unsigned int und; - - spin_lock(&lp->lock); - - und = readl(&lp->eth_regs->ethintfc); - - if (und & ETH_INT_FC_UND) - korina_clear_and_restart(dev, und & ~ETH_INT_FC_UND); - - spin_unlock(&lp->lock); - - return IRQ_HANDLED; -} - static void korina_tx_timeout(struct net_device *dev) { struct korina_private *lp = netdev_priv(dev); @@ -952,25 +918,6 @@ static void korina_tx_timeout(struct net_device *dev) schedule_work(&lp->restart_task); } -/* Ethernet Rx Overflow interrupt */ -static irqreturn_t -korina_ovr_interrupt(int irq, void *dev_id) -{ - struct net_device *dev = dev_id; - struct korina_private *lp = netdev_priv(dev); - unsigned int ovr; - - spin_lock(&lp->lock); - ovr = readl(&lp->eth_regs->ethintfc); - - if (ovr & ETH_INT_FC_OVR) - korina_clear_and_restart(dev, ovr & ~ETH_INT_FC_OVR); - - spin_unlock(&lp->lock); - - return IRQ_HANDLED; -} - #ifdef CONFIG_NET_POLL_CONTROLLER static void korina_poll_controller(struct net_device *dev) { @@ -993,8 +940,7 @@ static int korina_open(struct net_device *dev) } /* Install the interrupt handler - * that handles the Done Finished - * Ovr and Und Events */ + * that handles the Done Finished */ ret = request_irq(lp->rx_irq, korina_rx_dma_interrupt, 0, "Korina ethernet Rx", dev); if (ret < 0) { @@ -1010,31 +956,10 @@ static int korina_open(struct net_device *dev) goto err_free_rx_irq; } - /* Install handler for overrun error. */ - ret = request_irq(lp->ovr_irq, korina_ovr_interrupt, - 0, "Ethernet Overflow", dev); - if (ret < 0) { - printk(KERN_ERR "%s: unable to get OVR IRQ %d\n", - dev->name, lp->ovr_irq); - goto err_free_tx_irq; - } - - /* Install handler for underflow error. */ - ret = request_irq(lp->und_irq, korina_und_interrupt, - 0, "Ethernet Underflow", dev); - if (ret < 0) { - printk(KERN_ERR "%s: unable to get UND IRQ %d\n", - dev->name, lp->und_irq); - goto err_free_ovr_irq; - } mod_timer(&lp->media_check_timer, jiffies + 1); out: return ret; -err_free_ovr_irq: - free_irq(lp->ovr_irq, dev); -err_free_tx_irq: - free_irq(lp->tx_irq, dev); err_free_rx_irq: free_irq(lp->rx_irq, dev); err_release: @@ -1052,8 +977,6 @@ static int korina_close(struct net_device *dev) /* Disable interrupts */ disable_irq(lp->rx_irq); disable_irq(lp->tx_irq); - disable_irq(lp->ovr_irq); - disable_irq(lp->und_irq); korina_abort_tx(dev); tmp = readl(&lp->tx_dma_regs->dmasm); @@ -1073,8 +996,6 @@ static int korina_close(struct net_device *dev) free_irq(lp->rx_irq, dev); free_irq(lp->tx_irq, dev); - free_irq(lp->ovr_irq, dev); - free_irq(lp->und_irq, dev); return 0; } @@ -1113,8 +1034,6 @@ static int korina_probe(struct platform_device *pdev) lp->rx_irq = platform_get_irq_byname(pdev, "korina_rx"); lp->tx_irq = platform_get_irq_byname(pdev, "korina_tx"); - lp->ovr_irq = platform_get_irq_byname(pdev, "korina_ovr"); - lp->und_irq = platform_get_irq_byname(pdev, "korina_und"); r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "korina_regs"); dev->base_addr = r->start; -- cgit v1.2.3 From 364a97f5d1ae3102d53a3ad1efea3fa546781f78 Mon Sep 17 00:00:00 2001 From: Roman Yeryomin Date: Sun, 17 Sep 2017 20:24:26 +0300 Subject: net: korina: optimize rx descriptor flags processing Signed-off-by: Roman Yeryomin Signed-off-by: David S. Miller --- drivers/net/ethernet/korina.c | 87 ++++++++++++++++++++++--------------------- 1 file changed, 44 insertions(+), 43 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/korina.c b/drivers/net/ethernet/korina.c index 98d686ed69a9..e5466e19994a 100644 --- a/drivers/net/ethernet/korina.c +++ b/drivers/net/ethernet/korina.c @@ -363,59 +363,60 @@ static int korina_rx(struct net_device *dev, int limit) if ((KORINA_RBSIZE - (u32)DMA_COUNT(rd->control)) == 0) break; - /* Update statistics counters */ - if (devcs & ETH_RX_CRC) - dev->stats.rx_crc_errors++; - if (devcs & ETH_RX_LOR) - dev->stats.rx_length_errors++; - if (devcs & ETH_RX_LE) - dev->stats.rx_length_errors++; - if (devcs & ETH_RX_OVR) - dev->stats.rx_fifo_errors++; - if (devcs & ETH_RX_CV) - dev->stats.rx_frame_errors++; - if (devcs & ETH_RX_CES) - dev->stats.rx_length_errors++; - if (devcs & ETH_RX_MP) - dev->stats.multicast++; - - if ((devcs & ETH_RX_LD) != ETH_RX_LD) { - /* check that this is a whole packet - * WARNING: DMA_FD bit incorrectly set - * in Rc32434 (errata ref #077) */ + /* check that this is a whole packet + * WARNING: DMA_FD bit incorrectly set + * in Rc32434 (errata ref #077) */ + if (!(devcs & ETH_RX_LD)) + goto next; + + if (!(devcs & ETH_RX_ROK)) { + /* Update statistics counters */ dev->stats.rx_errors++; dev->stats.rx_dropped++; - } else if ((devcs & ETH_RX_ROK)) { - pkt_len = RCVPKT_LENGTH(devcs); + if (devcs & ETH_RX_CRC) + dev->stats.rx_crc_errors++; + if (devcs & ETH_RX_LE) + dev->stats.rx_length_errors++; + if (devcs & ETH_RX_OVR) + dev->stats.rx_fifo_errors++; + if (devcs & ETH_RX_CV) + dev->stats.rx_frame_errors++; + if (devcs & ETH_RX_CES) + dev->stats.rx_frame_errors++; + + goto next; + } - /* must be the (first and) last - * descriptor then */ - pkt_buf = (u8 *)lp->rx_skb[lp->rx_next_done]->data; + pkt_len = RCVPKT_LENGTH(devcs); - /* invalidate the cache */ - dma_cache_inv((unsigned long)pkt_buf, pkt_len - 4); + /* must be the (first and) last + * descriptor then */ + pkt_buf = (u8 *)lp->rx_skb[lp->rx_next_done]->data; - /* Malloc up new buffer. */ - skb_new = netdev_alloc_skb_ip_align(dev, KORINA_RBSIZE); + /* invalidate the cache */ + dma_cache_inv((unsigned long)pkt_buf, pkt_len - 4); - if (!skb_new) - break; - /* Do not count the CRC */ - skb_put(skb, pkt_len - 4); - skb->protocol = eth_type_trans(skb, dev); + /* Malloc up new buffer. */ + skb_new = netdev_alloc_skb_ip_align(dev, KORINA_RBSIZE); - /* Pass the packet to upper layers */ - netif_receive_skb(skb); - dev->stats.rx_packets++; - dev->stats.rx_bytes += pkt_len; + if (!skb_new) + break; + /* Do not count the CRC */ + skb_put(skb, pkt_len - 4); + skb->protocol = eth_type_trans(skb, dev); - /* Update the mcast stats */ - if (devcs & ETH_RX_MP) - dev->stats.multicast++; + /* Pass the packet to upper layers */ + netif_receive_skb(skb); + dev->stats.rx_packets++; + dev->stats.rx_bytes += pkt_len; - lp->rx_skb[lp->rx_next_done] = skb_new; - } + /* Update the mcast stats */ + if (devcs & ETH_RX_MP) + dev->stats.multicast++; + + lp->rx_skb[lp->rx_next_done] = skb_new; +next: rd->devcs = 0; /* Restore descriptor's curr_addr */ -- cgit v1.2.3 From d609d2893c25a3336422d06e1aff50d13ba5b7f2 Mon Sep 17 00:00:00 2001 From: Roman Yeryomin Date: Sun, 17 Sep 2017 20:24:38 +0300 Subject: net: korina: use NAPI_POLL_WEIGHT Signed-off-by: Roman Yeryomin Signed-off-by: David S. Miller --- drivers/net/ethernet/korina.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/korina.c b/drivers/net/ethernet/korina.c index e5466e19994a..c210add9b654 100644 --- a/drivers/net/ethernet/korina.c +++ b/drivers/net/ethernet/korina.c @@ -1082,7 +1082,7 @@ static int korina_probe(struct platform_device *pdev) dev->netdev_ops = &korina_netdev_ops; dev->ethtool_ops = &netdev_ethtool_ops; dev->watchdog_timeo = TX_TIMEOUT; - netif_napi_add(dev, &lp->napi, korina_poll, 64); + netif_napi_add(dev, &lp->napi, korina_poll, NAPI_POLL_WEIGHT); lp->phy_addr = (((lp->rx_irq == 0x2c? 1:0) << 8) | 0x05); lp->mii_if.dev = dev; -- cgit v1.2.3 From 247c78f2bed0c4d72c381c9caf429173513dcc51 Mon Sep 17 00:00:00 2001 From: Roman Yeryomin Date: Sun, 17 Sep 2017 20:24:50 +0300 Subject: net: korina: use GRO Performance gain when receiving locally is 55->95Mbps and 50->65Mbps for NAT. Signed-off-by: Roman Yeryomin Signed-off-by: David S. Miller --- drivers/net/ethernet/korina.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/korina.c b/drivers/net/ethernet/korina.c index c210add9b654..5f36e1703378 100644 --- a/drivers/net/ethernet/korina.c +++ b/drivers/net/ethernet/korina.c @@ -406,7 +406,7 @@ static int korina_rx(struct net_device *dev, int limit) skb->protocol = eth_type_trans(skb, dev); /* Pass the packet to upper layers */ - netif_receive_skb(skb); + napi_gro_receive(&lp->napi, skb); dev->stats.rx_packets++; dev->stats.rx_bytes += pkt_len; -- cgit v1.2.3 From 2e5396b14db3c885c5b9de698ecf38652a0f3c15 Mon Sep 17 00:00:00 2001 From: Roman Yeryomin Date: Sun, 17 Sep 2017 20:25:02 +0300 Subject: net: korina: whitespace cleanup Signed-off-by: Roman Yeryomin Signed-off-by: David S. Miller --- drivers/net/ethernet/korina.c | 58 +++++++++++++++++++++++-------------------- 1 file changed, 31 insertions(+), 27 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/korina.c b/drivers/net/ethernet/korina.c index 5f36e1703378..c26f0d84ba6b 100644 --- a/drivers/net/ethernet/korina.c +++ b/drivers/net/ethernet/korina.c @@ -64,9 +64,9 @@ #include #include -#define DRV_NAME "korina" -#define DRV_VERSION "0.10" -#define DRV_RELDATE "04Mar2008" +#define DRV_NAME "korina" +#define DRV_VERSION "0.10" +#define DRV_RELDATE "04Mar2008" #define STATION_ADDRESS_HIGH(dev) (((dev)->dev_addr[0] << 8) | \ ((dev)->dev_addr[1])) @@ -75,7 +75,7 @@ ((dev)->dev_addr[4] << 8) | \ ((dev)->dev_addr[5])) -#define MII_CLOCK 1250000 /* no more than 2.5MHz */ +#define MII_CLOCK 1250000 /* no more than 2.5MHz */ /* the following must be powers of two */ #define KORINA_NUM_RDS 64 /* number of receive descriptors */ @@ -87,15 +87,19 @@ #define KORINA_RBSIZE 1536 /* size of one resource buffer = Ether MTU */ #define KORINA_RDS_MASK (KORINA_NUM_RDS - 1) #define KORINA_TDS_MASK (KORINA_NUM_TDS - 1) -#define RD_RING_SIZE (KORINA_NUM_RDS * sizeof(struct dma_desc)) +#define RD_RING_SIZE (KORINA_NUM_RDS * sizeof(struct dma_desc)) #define TD_RING_SIZE (KORINA_NUM_TDS * sizeof(struct dma_desc)) -#define TX_TIMEOUT (6000 * HZ / 1000) +#define TX_TIMEOUT (6000 * HZ / 1000) -enum chain_status { desc_filled, desc_empty }; -#define IS_DMA_FINISHED(X) (((X) & (DMA_DESC_FINI)) != 0) -#define IS_DMA_DONE(X) (((X) & (DMA_DESC_DONE)) != 0) -#define RCVPKT_LENGTH(X) (((X) & ETH_RX_LEN) >> ETH_RX_LEN_BIT) +enum chain_status { + desc_filled, + desc_empty +}; + +#define IS_DMA_FINISHED(X) (((X) & (DMA_DESC_FINI)) != 0) +#define IS_DMA_DONE(X) (((X) & (DMA_DESC_DONE)) != 0) +#define RCVPKT_LENGTH(X) (((X) & ETH_RX_LEN) >> ETH_RX_LEN_BIT) /* Information that need to be kept for each board. */ struct korina_private { @@ -123,7 +127,7 @@ struct korina_private { int rx_irq; int tx_irq; - spinlock_t lock; /* NIC xmit lock */ + spinlock_t lock; /* NIC xmit lock */ int dma_halt_cnt; int dma_run_cnt; @@ -146,17 +150,17 @@ static inline void korina_start_dma(struct dma_reg *ch, u32 dma_addr) static inline void korina_abort_dma(struct net_device *dev, struct dma_reg *ch) { - if (readl(&ch->dmac) & DMA_CHAN_RUN_BIT) { - writel(0x10, &ch->dmac); + if (readl(&ch->dmac) & DMA_CHAN_RUN_BIT) { + writel(0x10, &ch->dmac); - while (!(readl(&ch->dmas) & DMA_STAT_HALT)) - netif_trans_update(dev); + while (!(readl(&ch->dmas) & DMA_STAT_HALT)) + netif_trans_update(dev); - writel(0, &ch->dmas); - } + writel(0, &ch->dmas); + } - writel(0, &ch->dmadptr); - writel(0, &ch->dmandptr); + writel(0, &ch->dmadptr); + writel(0, &ch->dmandptr); } static inline void korina_chain_dma(struct dma_reg *ch, u32 dma_addr) @@ -685,7 +689,7 @@ static int korina_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) /* ethtool helpers */ static void netdev_get_drvinfo(struct net_device *dev, - struct ethtool_drvinfo *info) + struct ethtool_drvinfo *info) { struct korina_private *lp = netdev_priv(dev); @@ -728,10 +732,10 @@ static u32 netdev_get_link(struct net_device *dev) } static const struct ethtool_ops netdev_ethtool_ops = { - .get_drvinfo = netdev_get_drvinfo, - .get_link = netdev_get_link, - .get_link_ksettings = netdev_get_link_ksettings, - .set_link_ksettings = netdev_set_link_ksettings, + .get_drvinfo = netdev_get_drvinfo, + .get_link = netdev_get_link, + .get_link_ksettings = netdev_get_link_ksettings, + .set_link_ksettings = netdev_set_link_ksettings, }; static int korina_alloc_ring(struct net_device *dev) @@ -863,7 +867,7 @@ static int korina_init(struct net_device *dev) /* Management Clock Prescaler Divisor * Clock independent setting */ writel(((idt_cpu_freq) / MII_CLOCK + 1) & ~1, - &lp->eth_regs->ethmcp); + &lp->eth_regs->ethmcp); /* don't transmit until fifo contains 48b */ writel(48, &lp->eth_regs->ethfifott); @@ -946,14 +950,14 @@ static int korina_open(struct net_device *dev) 0, "Korina ethernet Rx", dev); if (ret < 0) { printk(KERN_ERR "%s: unable to get Rx DMA IRQ %d\n", - dev->name, lp->rx_irq); + dev->name, lp->rx_irq); goto err_release; } ret = request_irq(lp->tx_irq, korina_tx_dma_interrupt, 0, "Korina ethernet Tx", dev); if (ret < 0) { printk(KERN_ERR "%s: unable to get Tx DMA IRQ %d\n", - dev->name, lp->tx_irq); + dev->name, lp->tx_irq); goto err_free_rx_irq; } -- cgit v1.2.3 From 87736fc6f75f23ae2583ee197a0e85515f246ba6 Mon Sep 17 00:00:00 2001 From: Roman Yeryomin Date: Sun, 17 Sep 2017 20:25:11 +0300 Subject: net: korina: update authors Signed-off-by: Roman Yeryomin Signed-off-by: David S. Miller --- drivers/net/ethernet/korina.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/korina.c b/drivers/net/ethernet/korina.c index c26f0d84ba6b..d58aa4bfcb58 100644 --- a/drivers/net/ethernet/korina.c +++ b/drivers/net/ethernet/korina.c @@ -4,6 +4,7 @@ * Copyright 2004 IDT Inc. (rischelp@idt.com) * Copyright 2006 Felix Fietkau * Copyright 2008 Florian Fainelli + * Copyright 2017 Roman Yeryomin * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the @@ -1150,5 +1151,6 @@ module_platform_driver(korina_driver); MODULE_AUTHOR("Philip Rischel "); MODULE_AUTHOR("Felix Fietkau "); MODULE_AUTHOR("Florian Fainelli "); +MODULE_AUTHOR("Roman Yeryomin "); MODULE_DESCRIPTION("IDT RC32434 (Korina) Ethernet driver"); MODULE_LICENSE("GPL"); -- cgit v1.2.3 From da1d2def654dc5a9cae346a93f25bd2e8959b080 Mon Sep 17 00:00:00 2001 From: Roman Yeryomin Date: Sun, 17 Sep 2017 20:25:21 +0300 Subject: net: korina: bump version Signed-off-by: Roman Yeryomin Signed-off-by: David S. Miller --- drivers/net/ethernet/korina.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/korina.c b/drivers/net/ethernet/korina.c index d58aa4bfcb58..7cecd9dbc111 100644 --- a/drivers/net/ethernet/korina.c +++ b/drivers/net/ethernet/korina.c @@ -66,8 +66,8 @@ #include #define DRV_NAME "korina" -#define DRV_VERSION "0.10" -#define DRV_RELDATE "04Mar2008" +#define DRV_VERSION "0.20" +#define DRV_RELDATE "15Sep2017" #define STATION_ADDRESS_HIGH(dev) (((dev)->dev_addr[0] << 8) | \ ((dev)->dev_addr[1])) -- cgit v1.2.3 From 38c5eb93aca9dc1b21a2c96d583ce7f9886a44e6 Mon Sep 17 00:00:00 2001 From: Antoine Tenart Date: Mon, 18 Sep 2017 15:36:51 +0200 Subject: net: mvpp2: remove useless goto Remove a goto in the PPv2 tx function which jumps to the next line anyway. This is a cosmetic commit. Signed-off-by: Antoine Tenart Signed-off-by: David S. Miller --- drivers/net/ethernet/marvell/mvpp2.c | 1 - 1 file changed, 1 deletion(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/marvell/mvpp2.c b/drivers/net/ethernet/marvell/mvpp2.c index dd0ee2691c86..8041d692db3c 100644 --- a/drivers/net/ethernet/marvell/mvpp2.c +++ b/drivers/net/ethernet/marvell/mvpp2.c @@ -6452,7 +6452,6 @@ static int mvpp2_tx(struct sk_buff *skb, struct net_device *dev) if (mvpp2_tx_frag_process(port, skb, aggr_txq, txq)) { tx_desc_unmap_put(port, txq, tx_desc); frags = 0; - goto out; } } -- cgit v1.2.3 From e2b2d35a052d9264a774715bc6aa3395a45dcfa2 Mon Sep 17 00:00:00 2001 From: Yotam Gigi Date: Tue, 19 Sep 2017 10:00:08 +0200 Subject: mlxsw: spectrum: Change init order The multicast router offloading code is going to require the counter_pools initialization to occur before the router initialization, thus, change the spectrum initialization order to fix it. Signed-off-by: Yotam Gigi Reviewed-by: Ido Schimmel Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/spectrum.c | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c index 696b99e65a5a..97284161ea35 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c @@ -3693,6 +3693,12 @@ static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core, goto err_switchdev_init; } + err = mlxsw_sp_counter_pool_init(mlxsw_sp); + if (err) { + dev_err(mlxsw_sp->bus_info->dev, "Failed to init counter pool\n"); + goto err_counter_pool_init; + } + err = mlxsw_sp_router_init(mlxsw_sp); if (err) { dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize router\n"); @@ -3711,12 +3717,6 @@ static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core, goto err_acl_init; } - err = mlxsw_sp_counter_pool_init(mlxsw_sp); - if (err) { - dev_err(mlxsw_sp->bus_info->dev, "Failed to init counter pool\n"); - goto err_counter_pool_init; - } - err = mlxsw_sp_dpipe_init(mlxsw_sp); if (err) { dev_err(mlxsw_sp->bus_info->dev, "Failed to init pipeline debug\n"); @@ -3734,14 +3734,14 @@ static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core, err_ports_create: mlxsw_sp_dpipe_fini(mlxsw_sp); err_dpipe_init: - mlxsw_sp_counter_pool_fini(mlxsw_sp); -err_counter_pool_init: mlxsw_sp_acl_fini(mlxsw_sp); err_acl_init: mlxsw_sp_span_fini(mlxsw_sp); err_span_init: mlxsw_sp_router_fini(mlxsw_sp); err_router_init: + mlxsw_sp_counter_pool_fini(mlxsw_sp); +err_counter_pool_init: mlxsw_sp_switchdev_fini(mlxsw_sp); err_switchdev_init: mlxsw_sp_lag_fini(mlxsw_sp); @@ -3760,10 +3760,10 @@ static void mlxsw_sp_fini(struct mlxsw_core *mlxsw_core) mlxsw_sp_ports_remove(mlxsw_sp); mlxsw_sp_dpipe_fini(mlxsw_sp); - mlxsw_sp_counter_pool_fini(mlxsw_sp); mlxsw_sp_acl_fini(mlxsw_sp); mlxsw_sp_span_fini(mlxsw_sp); mlxsw_sp_router_fini(mlxsw_sp); + mlxsw_sp_counter_pool_fini(mlxsw_sp); mlxsw_sp_switchdev_fini(mlxsw_sp); mlxsw_sp_lag_fini(mlxsw_sp); mlxsw_sp_buffers_fini(mlxsw_sp); -- cgit v1.2.3 From d3b939b8f9a571da82359b6baa5506c9179770d1 Mon Sep 17 00:00:00 2001 From: Yotam Gigi Date: Tue, 19 Sep 2017 10:00:09 +0200 Subject: mlxsw: spectrum: Move ACL flexible actions instance to spectrum A flexible action instance allows, given a set of ops, creating, committing and sharing a set of ACL action blocks. The flexible action instance in question is using the spectrum KVD linear space to store the flexible action sets. Move this flexible action instance to the common spectrum struct to allow other users (such as multicast router) to get that functionality. Signed-off-by: Yotam Gigi Reviewed-by: Ido Schimmel Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/Makefile | 2 +- drivers/net/ethernet/mellanox/mlxsw/spectrum.c | 10 ++ drivers/net/ethernet/mellanox/mlxsw/spectrum.h | 1 + drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c | 93 +-------------- .../mellanox/mlxsw/spectrum_acl_flex_actions.c | 129 +++++++++++++++++++++ .../mellanox/mlxsw/spectrum_acl_flex_actions.h | 44 +++++++ 6 files changed, 186 insertions(+), 93 deletions(-) create mode 100644 drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.c create mode 100644 drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.h (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/mellanox/mlxsw/Makefile b/drivers/net/ethernet/mellanox/mlxsw/Makefile index 891ff418bb5e..4b88158173f3 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/Makefile +++ b/drivers/net/ethernet/mellanox/mlxsw/Makefile @@ -17,7 +17,7 @@ mlxsw_spectrum-objs := spectrum.o spectrum_buffers.o \ spectrum_kvdl.o spectrum_acl_tcam.o \ spectrum_acl.o spectrum_flower.o \ spectrum_cnt.o spectrum_fid.o \ - spectrum_ipip.o + spectrum_ipip.o spectrum_acl_flex_actions.o mlxsw_spectrum-$(CONFIG_MLXSW_SPECTRUM_DCB) += spectrum_dcb.o mlxsw_spectrum-$(CONFIG_NET_DEVLINK) += spectrum_dpipe.o obj-$(CONFIG_MLXSW_MINIMAL) += mlxsw_minimal.o diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c index 97284161ea35..6ba6ff276b17 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c @@ -69,6 +69,7 @@ #include "txheader.h" #include "spectrum_cnt.h" #include "spectrum_dpipe.h" +#include "spectrum_acl_flex_actions.h" #include "../mlxfw/mlxfw.h" #define MLXSW_FWREV_MAJOR 13 @@ -3699,6 +3700,12 @@ static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core, goto err_counter_pool_init; } + err = mlxsw_sp_afa_init(mlxsw_sp); + if (err) { + dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize ACL actions\n"); + goto err_afa_init; + } + err = mlxsw_sp_router_init(mlxsw_sp); if (err) { dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize router\n"); @@ -3740,6 +3747,8 @@ err_acl_init: err_span_init: mlxsw_sp_router_fini(mlxsw_sp); err_router_init: + mlxsw_sp_afa_fini(mlxsw_sp); +err_afa_init: mlxsw_sp_counter_pool_fini(mlxsw_sp); err_counter_pool_init: mlxsw_sp_switchdev_fini(mlxsw_sp); @@ -3763,6 +3772,7 @@ static void mlxsw_sp_fini(struct mlxsw_core *mlxsw_core) mlxsw_sp_acl_fini(mlxsw_sp); mlxsw_sp_span_fini(mlxsw_sp); mlxsw_sp_router_fini(mlxsw_sp); + mlxsw_sp_afa_fini(mlxsw_sp); mlxsw_sp_counter_pool_fini(mlxsw_sp); mlxsw_sp_switchdev_fini(mlxsw_sp); mlxsw_sp_lag_fini(mlxsw_sp); diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h index 84ce83acdc19..7180d8f3de75 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h @@ -152,6 +152,7 @@ struct mlxsw_sp { struct mlxsw_sp_sb *sb; struct mlxsw_sp_bridge *bridge; struct mlxsw_sp_router *router; + struct mlxsw_afa *afa; struct mlxsw_sp_acl *acl; struct mlxsw_sp_fid_core *fid_core; struct { diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c index 4b2455e3e079..2523785f1904 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c @@ -52,7 +52,6 @@ struct mlxsw_sp_acl { struct mlxsw_sp *mlxsw_sp; struct mlxsw_afk *afk; - struct mlxsw_afa *afa; struct mlxsw_sp_fid *dummy_fid; const struct mlxsw_sp_acl_ops *ops; struct rhashtable ruleset_ht; @@ -333,7 +332,7 @@ mlxsw_sp_acl_rulei_create(struct mlxsw_sp_acl *acl) rulei = kzalloc(sizeof(*rulei), GFP_KERNEL); if (!rulei) return NULL; - rulei->act_block = mlxsw_afa_block_create(acl->afa); + rulei->act_block = mlxsw_afa_block_create(acl->mlxsw_sp->afa); if (IS_ERR(rulei->act_block)) { err = PTR_ERR(rulei->act_block); goto err_afa_block_create; @@ -653,85 +652,6 @@ int mlxsw_sp_acl_rule_get_stats(struct mlxsw_sp *mlxsw_sp, return 0; } -#define MLXSW_SP_KDVL_ACT_EXT_SIZE 1 - -static int mlxsw_sp_act_kvdl_set_add(void *priv, u32 *p_kvdl_index, - char *enc_actions, bool is_first) -{ - struct mlxsw_sp *mlxsw_sp = priv; - char pefa_pl[MLXSW_REG_PEFA_LEN]; - u32 kvdl_index; - int err; - - /* The first action set of a TCAM entry is stored directly in TCAM, - * not KVD linear area. - */ - if (is_first) - return 0; - - err = mlxsw_sp_kvdl_alloc(mlxsw_sp, MLXSW_SP_KDVL_ACT_EXT_SIZE, - &kvdl_index); - if (err) - return err; - mlxsw_reg_pefa_pack(pefa_pl, kvdl_index, enc_actions); - err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pefa), pefa_pl); - if (err) - goto err_pefa_write; - *p_kvdl_index = kvdl_index; - return 0; - -err_pefa_write: - mlxsw_sp_kvdl_free(mlxsw_sp, kvdl_index); - return err; -} - -static void mlxsw_sp_act_kvdl_set_del(void *priv, u32 kvdl_index, - bool is_first) -{ - struct mlxsw_sp *mlxsw_sp = priv; - - if (is_first) - return; - mlxsw_sp_kvdl_free(mlxsw_sp, kvdl_index); -} - -static int mlxsw_sp_act_kvdl_fwd_entry_add(void *priv, u32 *p_kvdl_index, - u8 local_port) -{ - struct mlxsw_sp *mlxsw_sp = priv; - char ppbs_pl[MLXSW_REG_PPBS_LEN]; - u32 kvdl_index; - int err; - - err = mlxsw_sp_kvdl_alloc(mlxsw_sp, 1, &kvdl_index); - if (err) - return err; - mlxsw_reg_ppbs_pack(ppbs_pl, kvdl_index, local_port); - err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ppbs), ppbs_pl); - if (err) - goto err_ppbs_write; - *p_kvdl_index = kvdl_index; - return 0; - -err_ppbs_write: - mlxsw_sp_kvdl_free(mlxsw_sp, kvdl_index); - return err; -} - -static void mlxsw_sp_act_kvdl_fwd_entry_del(void *priv, u32 kvdl_index) -{ - struct mlxsw_sp *mlxsw_sp = priv; - - mlxsw_sp_kvdl_free(mlxsw_sp, kvdl_index); -} - -static const struct mlxsw_afa_ops mlxsw_sp_act_afa_ops = { - .kvdl_set_add = mlxsw_sp_act_kvdl_set_add, - .kvdl_set_del = mlxsw_sp_act_kvdl_set_del, - .kvdl_fwd_entry_add = mlxsw_sp_act_kvdl_fwd_entry_add, - .kvdl_fwd_entry_del = mlxsw_sp_act_kvdl_fwd_entry_del, -}; - int mlxsw_sp_acl_init(struct mlxsw_sp *mlxsw_sp) { const struct mlxsw_sp_acl_ops *acl_ops = &mlxsw_sp_acl_tcam_ops; @@ -753,14 +673,6 @@ int mlxsw_sp_acl_init(struct mlxsw_sp *mlxsw_sp) goto err_afk_create; } - acl->afa = mlxsw_afa_create(MLXSW_CORE_RES_GET(mlxsw_sp->core, - ACL_ACTIONS_PER_SET), - &mlxsw_sp_act_afa_ops, mlxsw_sp); - if (IS_ERR(acl->afa)) { - err = PTR_ERR(acl->afa); - goto err_afa_create; - } - err = rhashtable_init(&acl->ruleset_ht, &mlxsw_sp_acl_ruleset_ht_params); if (err) @@ -792,8 +704,6 @@ err_acl_ops_init: err_fid_get: rhashtable_destroy(&acl->ruleset_ht); err_rhashtable_init: - mlxsw_afa_destroy(acl->afa); -err_afa_create: mlxsw_afk_destroy(acl->afk); err_afk_create: kfree(acl); @@ -810,7 +720,6 @@ void mlxsw_sp_acl_fini(struct mlxsw_sp *mlxsw_sp) WARN_ON(!list_empty(&acl->rules)); mlxsw_sp_fid_put(acl->dummy_fid); rhashtable_destroy(&acl->ruleset_ht); - mlxsw_afa_destroy(acl->afa); mlxsw_afk_destroy(acl->afk); kfree(acl); } diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.c new file mode 100644 index 000000000000..4d3340ed0291 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.c @@ -0,0 +1,129 @@ +/* + * drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.c + * Copyright (c) 2017 Mellanox Technologies. All rights reserved. + * Copyright (c) 2017 Jiri Pirko + * Copyright (c) 2017 Yotam Gigi + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the names of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include "spectrum_acl_flex_actions.h" +#include "core_acl_flex_actions.h" + +#define MLXSW_SP_KVDL_ACT_EXT_SIZE 1 + +static int mlxsw_sp_act_kvdl_set_add(void *priv, u32 *p_kvdl_index, + char *enc_actions, bool is_first) +{ + struct mlxsw_sp *mlxsw_sp = priv; + char pefa_pl[MLXSW_REG_PEFA_LEN]; + u32 kvdl_index; + int err; + + /* The first action set of a TCAM entry is stored directly in TCAM, + * not KVD linear area. + */ + if (is_first) + return 0; + + err = mlxsw_sp_kvdl_alloc(mlxsw_sp, MLXSW_SP_KVDL_ACT_EXT_SIZE, + &kvdl_index); + if (err) + return err; + mlxsw_reg_pefa_pack(pefa_pl, kvdl_index, enc_actions); + err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pefa), pefa_pl); + if (err) + goto err_pefa_write; + *p_kvdl_index = kvdl_index; + return 0; + +err_pefa_write: + mlxsw_sp_kvdl_free(mlxsw_sp, kvdl_index); + return err; +} + +static void mlxsw_sp_act_kvdl_set_del(void *priv, u32 kvdl_index, + bool is_first) +{ + struct mlxsw_sp *mlxsw_sp = priv; + + if (is_first) + return; + mlxsw_sp_kvdl_free(mlxsw_sp, kvdl_index); +} + +static int mlxsw_sp_act_kvdl_fwd_entry_add(void *priv, u32 *p_kvdl_index, + u8 local_port) +{ + struct mlxsw_sp *mlxsw_sp = priv; + char ppbs_pl[MLXSW_REG_PPBS_LEN]; + u32 kvdl_index; + int err; + + err = mlxsw_sp_kvdl_alloc(mlxsw_sp, 1, &kvdl_index); + if (err) + return err; + mlxsw_reg_ppbs_pack(ppbs_pl, kvdl_index, local_port); + err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ppbs), ppbs_pl); + if (err) + goto err_ppbs_write; + *p_kvdl_index = kvdl_index; + return 0; + +err_ppbs_write: + mlxsw_sp_kvdl_free(mlxsw_sp, kvdl_index); + return err; +} + +static void mlxsw_sp_act_kvdl_fwd_entry_del(void *priv, u32 kvdl_index) +{ + struct mlxsw_sp *mlxsw_sp = priv; + + mlxsw_sp_kvdl_free(mlxsw_sp, kvdl_index); +} + +static const struct mlxsw_afa_ops mlxsw_sp_act_afa_ops = { + .kvdl_set_add = mlxsw_sp_act_kvdl_set_add, + .kvdl_set_del = mlxsw_sp_act_kvdl_set_del, + .kvdl_fwd_entry_add = mlxsw_sp_act_kvdl_fwd_entry_add, + .kvdl_fwd_entry_del = mlxsw_sp_act_kvdl_fwd_entry_del, +}; + +int mlxsw_sp_afa_init(struct mlxsw_sp *mlxsw_sp) +{ + mlxsw_sp->afa = mlxsw_afa_create(MLXSW_CORE_RES_GET(mlxsw_sp->core, + ACL_ACTIONS_PER_SET), + &mlxsw_sp_act_afa_ops, mlxsw_sp); + return PTR_ERR_OR_ZERO(mlxsw_sp->afa); +} + +void mlxsw_sp_afa_fini(struct mlxsw_sp *mlxsw_sp) +{ + mlxsw_afa_destroy(mlxsw_sp->afa); +} diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.h new file mode 100644 index 000000000000..2726192836ad --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.h @@ -0,0 +1,44 @@ +/* + * drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.h + * Copyright (c) 2017 Mellanox Technologies. All rights reserved. + * Copyright (c) 2017 Jiri Pirko + * Copyright (c) 2017 Yotam Gigi + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the names of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _MLXSW_SPECTRUM_ACL_FLEX_KEYS_H +#define _MLXSW_SPECTRUM_ACL_FLEX_KEYS_H + +#include "spectrum.h" + +int mlxsw_sp_afa_init(struct mlxsw_sp *mlxsw_sp); +void mlxsw_sp_afa_fini(struct mlxsw_sp *mlxsw_sp); + +#endif -- cgit v1.2.3 From 4b8a79ff27645c1201287c3b17091add748d1fb9 Mon Sep 17 00:00:00 2001 From: Yotam Gigi Date: Tue, 19 Sep 2017 10:00:10 +0200 Subject: mlxsw: acl: Introduce mcrouter ACL action The Spectrum multicast forwarding is done using an ACL action. Add the mcrouter ACL action that will be used to offload the multicast router logic. Signed-off-by: Yotam Gigi Reviewed-by: Ido Schimmel Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- .../mellanox/mlxsw/core_acl_flex_actions.c | 71 ++++++++++++++++++++++ .../mellanox/mlxsw/core_acl_flex_actions.h | 3 + 2 files changed, 74 insertions(+) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c index 5ae110172c22..65a32d7b4350 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c +++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c @@ -891,3 +891,74 @@ int mlxsw_afa_block_append_fid_set(struct mlxsw_afa_block *block, u16 fid) return 0; } EXPORT_SYMBOL(mlxsw_afa_block_append_fid_set); + +/* MC Routing Action + * ----------------- + * The Multicast router action. Can be used by RMFT_V2 - Router Multicast + * Forwarding Table Version 2 Register. + */ + +#define MLXSW_AFA_MCROUTER_CODE 0x10 +#define MLXSW_AFA_MCROUTER_SIZE 2 + +enum mlxsw_afa_mcrouter_rpf_action { + MLXSW_AFA_MCROUTER_RPF_ACTION_NOP, + MLXSW_AFA_MCROUTER_RPF_ACTION_TRAP, + MLXSW_AFA_MCROUTER_RPF_ACTION_DISCARD_ERROR, +}; + +/* afa_mcrouter_rpf_action */ +MLXSW_ITEM32(afa, mcrouter, rpf_action, 0x00, 28, 3); + +/* afa_mcrouter_expected_irif */ +MLXSW_ITEM32(afa, mcrouter, expected_irif, 0x00, 0, 16); + +/* afa_mcrouter_min_mtu */ +MLXSW_ITEM32(afa, mcrouter, min_mtu, 0x08, 0, 16); + +enum mlxsw_afa_mrouter_vrmid { + MLXSW_AFA_MCROUTER_VRMID_INVALID, + MLXSW_AFA_MCROUTER_VRMID_VALID +}; + +/* afa_mcrouter_vrmid + * Valid RMID: rigr_rmid_index is used as RMID + */ +MLXSW_ITEM32(afa, mcrouter, vrmid, 0x0C, 31, 1); + +/* afa_mcrouter_rigr_rmid_index + * When the vrmid field is set to invalid, the field is used as pointer to + * Router Interface Group (RIGR) Table in the KVD linear. + * When the vrmid is set to valid, the field is used as RMID index, ranged + * from 0 to max_mid - 1. The index is to the Port Group Table. + */ +MLXSW_ITEM32(afa, mcrouter, rigr_rmid_index, 0x0C, 0, 24); + +static inline void +mlxsw_afa_mcrouter_pack(char *payload, + enum mlxsw_afa_mcrouter_rpf_action rpf_action, + u16 expected_irif, u16 min_mtu, + enum mlxsw_afa_mrouter_vrmid vrmid, u32 rigr_rmid_index) + +{ + mlxsw_afa_mcrouter_rpf_action_set(payload, rpf_action); + mlxsw_afa_mcrouter_expected_irif_set(payload, expected_irif); + mlxsw_afa_mcrouter_min_mtu_set(payload, min_mtu); + mlxsw_afa_mcrouter_vrmid_set(payload, vrmid); + mlxsw_afa_mcrouter_rigr_rmid_index_set(payload, rigr_rmid_index); +} + +int mlxsw_afa_block_append_mcrouter(struct mlxsw_afa_block *block, + u16 expected_irif, u16 min_mtu, + bool rmid_valid, u32 kvdl_index) +{ + char *act = mlxsw_afa_block_append_action(block, + MLXSW_AFA_MCROUTER_CODE, + MLXSW_AFA_MCROUTER_SIZE); + if (!act) + return -ENOBUFS; + mlxsw_afa_mcrouter_pack(act, MLXSW_AFA_MCROUTER_RPF_ACTION_TRAP, + expected_irif, min_mtu, rmid_valid, kvdl_index); + return 0; +} +EXPORT_SYMBOL(mlxsw_afa_block_append_mcrouter); diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h index f99c341b2497..5dbb31fa5a27 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h +++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h @@ -68,5 +68,8 @@ int mlxsw_afa_block_append_vlan_modify(struct mlxsw_afa_block *block, int mlxsw_afa_block_append_counter(struct mlxsw_afa_block *block, u32 counter_index); int mlxsw_afa_block_append_fid_set(struct mlxsw_afa_block *block, u16 fid); +int mlxsw_afa_block_append_mcrouter(struct mlxsw_afa_block *block, + u16 expected_irif, u16 min_mtu, + bool rmid_valid, u32 kvdl_index); #endif -- cgit v1.2.3 From 9cb3fa940e2c1c62d35972ab8433531a4ba421a5 Mon Sep 17 00:00:00 2001 From: Yotam Gigi Date: Tue, 19 Sep 2017 10:00:11 +0200 Subject: mlxsw: acl: Change trap ACL action to get the trap_id as a parameter Allow the trap ACL action to be configured with different traps. This allows the multicast router offloading code to use that same ACL action with the multicast router traps. By using different traps, the multicast router can have different trap policies and can handle the packet differently. Signed-off-by: Yotam Gigi Reviewed-by: Ido Schimmel Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c | 4 ++-- drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h | 2 +- drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c | 3 ++- 3 files changed, 5 insertions(+), 4 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c index 65a32d7b4350..ab3ffe7a8eda 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c +++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c @@ -712,7 +712,7 @@ int mlxsw_afa_block_append_drop(struct mlxsw_afa_block *block) } EXPORT_SYMBOL(mlxsw_afa_block_append_drop); -int mlxsw_afa_block_append_trap(struct mlxsw_afa_block *block) +int mlxsw_afa_block_append_trap(struct mlxsw_afa_block *block, u16 trap_id) { char *act = mlxsw_afa_block_append_action(block, MLXSW_AFA_TRAPDISC_CODE, @@ -722,7 +722,7 @@ int mlxsw_afa_block_append_trap(struct mlxsw_afa_block *block) return -ENOBUFS; mlxsw_afa_trapdisc_pack(act, MLXSW_AFA_TRAPDISC_TRAP_ACTION_TRAP, MLXSW_AFA_TRAPDISC_FORWARD_ACTION_DISCARD, - MLXSW_TRAP_ID_ACL0); + trap_id); return 0; } EXPORT_SYMBOL(mlxsw_afa_block_append_trap); diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h index 5dbb31fa5a27..501819c790d6 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h +++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h @@ -60,7 +60,7 @@ u32 mlxsw_afa_block_first_set_kvdl_index(struct mlxsw_afa_block *block); void mlxsw_afa_block_continue(struct mlxsw_afa_block *block); void mlxsw_afa_block_jump(struct mlxsw_afa_block *block, u16 group_id); int mlxsw_afa_block_append_drop(struct mlxsw_afa_block *block); -int mlxsw_afa_block_append_trap(struct mlxsw_afa_block *block); +int mlxsw_afa_block_append_trap(struct mlxsw_afa_block *block, u16 trap_id); int mlxsw_afa_block_append_fwd(struct mlxsw_afa_block *block, u8 local_port, bool in_port); int mlxsw_afa_block_append_vlan_modify(struct mlxsw_afa_block *block, diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c index 2523785f1904..eede75fbd585 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c @@ -396,7 +396,8 @@ int mlxsw_sp_acl_rulei_act_drop(struct mlxsw_sp_acl_rule_info *rulei) int mlxsw_sp_acl_rulei_act_trap(struct mlxsw_sp_acl_rule_info *rulei) { - return mlxsw_afa_block_append_trap(rulei->act_block); + return mlxsw_afa_block_append_trap(rulei->act_block, + MLXSW_TRAP_ID_ACL0); } int mlxsw_sp_acl_rulei_act_fwd(struct mlxsw_sp *mlxsw_sp, -- cgit v1.2.3 From 587265655159d73247a56236092917131183496e Mon Sep 17 00:00:00 2001 From: Yotam Gigi Date: Tue, 19 Sep 2017 10:00:12 +0200 Subject: mlxsw: reg: Rename the flexible action set length field The MLXSW_REG_PXXX_FLEX_ACTION_SET_LEN is relevant for the multicast router registers too, so rename it to have a general name which is not bound to a specific register. Signed-off-by: Yotam Gigi Reviewed-by: Ido Schimmel Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/reg.h | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h index cc27c5de5a1d..fb8ab441b11e 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/reg.h +++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h @@ -2142,15 +2142,14 @@ MLXSW_REG_DEFINE(pefa, MLXSW_REG_PEFA_ID, MLXSW_REG_PEFA_LEN); */ MLXSW_ITEM32(reg, pefa, index, 0x00, 0, 24); -#define MLXSW_REG_PXXX_FLEX_ACTION_SET_LEN 0xA8 +#define MLXSW_REG_FLEX_ACTION_SET_LEN 0xA8 /* reg_pefa_flex_action_set * Action-set to perform when rule is matched. * Must be zero padded if action set is shorter. * Access: RW */ -MLXSW_ITEM_BUF(reg, pefa, flex_action_set, 0x08, - MLXSW_REG_PXXX_FLEX_ACTION_SET_LEN); +MLXSW_ITEM_BUF(reg, pefa, flex_action_set, 0x08, MLXSW_REG_FLEX_ACTION_SET_LEN); static inline void mlxsw_reg_pefa_pack(char *payload, u32 index, const char *flex_action_set) @@ -2243,7 +2242,7 @@ MLXSW_ITEM_BUF(reg, ptce2, mask, 0x80, * Access: RW */ MLXSW_ITEM_BUF(reg, ptce2, flex_action_set, 0xE0, - MLXSW_REG_PXXX_FLEX_ACTION_SET_LEN); + MLXSW_REG_FLEX_ACTION_SET_LEN); static inline void mlxsw_reg_ptce2_pack(char *payload, bool valid, enum mlxsw_reg_ptce2_op op, -- cgit v1.2.3 From 46a7054ebace0fcd0d1826881aa5ab219faa6a77 Mon Sep 17 00:00:00 2001 From: Yotam Gigi Date: Tue, 19 Sep 2017 10:00:13 +0200 Subject: mlxsw: reg: Add The Router TCAM Allocation register This register is used for allocation of regions in the TCAM table and it will be used by the multicast router offloading logic. Signed-off-by: Yotam Gigi Reviewed-by: Ido Schimmel Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/reg.h | 52 +++++++++++++++++++++++++++++++ 1 file changed, 52 insertions(+) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h index fb8ab441b11e..e9f37eac8788 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/reg.h +++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h @@ -4310,6 +4310,57 @@ mlxsw_reg_ritr_loopback_ipip4_pack(char *payload, mlxsw_reg_ritr_loopback_ipip_usip4_set(payload, usip); } +/* RTAR - Router TCAM Allocation Register + * -------------------------------------- + * This register is used for allocation of regions in the TCAM table. + */ +#define MLXSW_REG_RTAR_ID 0x8004 +#define MLXSW_REG_RTAR_LEN 0x20 + +MLXSW_REG_DEFINE(rtar, MLXSW_REG_RTAR_ID, MLXSW_REG_RTAR_LEN); + +enum mlxsw_reg_rtar_op { + MLXSW_REG_RTAR_OP_ALLOCATE, + MLXSW_REG_RTAR_OP_RESIZE, + MLXSW_REG_RTAR_OP_DEALLOCATE, +}; + +/* reg_rtar_op + * Access: WO + */ +MLXSW_ITEM32(reg, rtar, op, 0x00, 28, 4); + +enum mlxsw_reg_rtar_key_type { + MLXSW_REG_RTAR_KEY_TYPE_IPV4_MULTICAST = 1, + MLXSW_REG_RTAR_KEY_TYPE_IPV6_MULTICAST = 3 +}; + +/* reg_rtar_key_type + * TCAM key type for the region. + * Access: WO + */ +MLXSW_ITEM32(reg, rtar, key_type, 0x00, 0, 8); + +/* reg_rtar_region_size + * TCAM region size. When allocating/resizing this is the requested + * size, the response is the actual size. + * Note: Actual size may be larger than requested. + * Reserved for op = Deallocate + * Access: WO + */ +MLXSW_ITEM32(reg, rtar, region_size, 0x04, 0, 16); + +static inline void mlxsw_reg_rtar_pack(char *payload, + enum mlxsw_reg_rtar_op op, + enum mlxsw_reg_rtar_key_type key_type, + u16 region_size) +{ + MLXSW_REG_ZERO(rtar, payload); + mlxsw_reg_rtar_op_set(payload, op); + mlxsw_reg_rtar_key_type_set(payload, key_type); + mlxsw_reg_rtar_region_size_set(payload, region_size); +} + /* RATR - Router Adjacency Table Register * -------------------------------------- * The RATR register is used to configure the Router Adjacency (next-hop) @@ -6855,6 +6906,7 @@ static const struct mlxsw_reg_info *mlxsw_reg_infos[] = { MLXSW_REG(hpkt), MLXSW_REG(rgcr), MLXSW_REG(ritr), + MLXSW_REG(rtar), MLXSW_REG(ratr), MLXSW_REG(rtdp), MLXSW_REG(ricnt), -- cgit v1.2.3 From 5080c7e91701744ef1a5d7aab51f568f889bfddb Mon Sep 17 00:00:00 2001 From: Yotam Gigi Date: Tue, 19 Sep 2017 10:00:14 +0200 Subject: mlxsw: reg: Add the Router Interface Group Version 2 register The RIGR-V2 register is used to add, remove and query egress interface list of a multicast forwarding entry and it will be used by the multicast router offloading logic. Signed-off-by: Yotam Gigi Reviewed-by: Ido Schimmel Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/reg.h | 83 +++++++++++++++++++++++++++++++ 1 file changed, 83 insertions(+) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h index e9f37eac8788..1778d7f5f843 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/reg.h +++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h @@ -5646,6 +5646,88 @@ mlxsw_reg_rtdp_ipip4_pack(char *payload, u16 irif, mlxsw_reg_rtdp_ipip_expected_gre_key_set(payload, expected_gre_key); } +/* RIGR-V2 - Router Interface Group Register Version 2 + * --------------------------------------------------- + * The RIGR_V2 register is used to add, remove and query egress interface list + * of a multicast forwarding entry. + */ +#define MLXSW_REG_RIGR2_ID 0x8023 +#define MLXSW_REG_RIGR2_LEN 0xB0 + +#define MLXSW_REG_RIGR2_MAX_ERIFS 32 + +MLXSW_REG_DEFINE(rigr2, MLXSW_REG_RIGR2_ID, MLXSW_REG_RIGR2_LEN); + +/* reg_rigr2_rigr_index + * KVD Linear index. + * Access: Index + */ +MLXSW_ITEM32(reg, rigr2, rigr_index, 0x04, 0, 24); + +/* reg_rigr2_vnext + * Next RIGR Index is valid. + * Access: RW + */ +MLXSW_ITEM32(reg, rigr2, vnext, 0x08, 31, 1); + +/* reg_rigr2_next_rigr_index + * Next RIGR Index. The index is to the KVD linear. + * Reserved when vnxet = '0'. + * Access: RW + */ +MLXSW_ITEM32(reg, rigr2, next_rigr_index, 0x08, 0, 24); + +/* reg_rigr2_vrmid + * RMID Index is valid. + * Access: RW + */ +MLXSW_ITEM32(reg, rigr2, vrmid, 0x20, 31, 1); + +/* reg_rigr2_rmid_index + * RMID Index. + * Range 0 .. max_mid - 1 + * Reserved when vrmid = '0'. + * The index is to the Port Group Table (PGT) + * Access: RW + */ +MLXSW_ITEM32(reg, rigr2, rmid_index, 0x20, 0, 16); + +/* reg_rigr2_erif_entry_v + * Egress Router Interface is valid. + * Note that low-entries must be set if high-entries are set. For + * example: if erif_entry[2].v is set then erif_entry[1].v and + * erif_entry[0].v must be set. + * Index can be from 0 to cap_mc_erif_list_entries-1 + * Access: RW + */ +MLXSW_ITEM32_INDEXED(reg, rigr2, erif_entry_v, 0x24, 31, 1, 4, 0, false); + +/* reg_rigr2_erif_entry_erif + * Egress Router Interface. + * Valid range is from 0 to cap_max_router_interfaces - 1 + * Index can be from 0 to MLXSW_REG_RIGR2_MAX_ERIFS - 1 + * Access: RW + */ +MLXSW_ITEM32_INDEXED(reg, rigr2, erif_entry_erif, 0x24, 0, 16, 4, 0, false); + +static inline void mlxsw_reg_rigr2_pack(char *payload, u32 rigr_index, + bool vnext, u32 next_rigr_index) +{ + MLXSW_REG_ZERO(rigr2, payload); + mlxsw_reg_rigr2_rigr_index_set(payload, rigr_index); + mlxsw_reg_rigr2_vnext_set(payload, vnext); + mlxsw_reg_rigr2_next_rigr_index_set(payload, next_rigr_index); + mlxsw_reg_rigr2_vrmid_set(payload, 0); + mlxsw_reg_rigr2_rmid_index_set(payload, 0); +} + +static inline void mlxsw_reg_rigr2_erif_entry_pack(char *payload, int index, + bool v, u16 erif) +{ + mlxsw_reg_rigr2_erif_entry_v_set(payload, index, v); + mlxsw_reg_rigr2_erif_entry_erif_set(payload, index, erif); +} + /* MFCR - Management Fan Control Register * -------------------------------------- * This register controls the settings of the Fan Speed PWM mechanism. @@ -6917,6 +6999,7 @@ static const struct mlxsw_reg_info *mlxsw_reg_infos[] = { MLXSW_REG(rauht), MLXSW_REG(raleu), MLXSW_REG(rauhtd), + MLXSW_REG(rigr2), MLXSW_REG(mfcr), MLXSW_REG(mfsc), MLXSW_REG(mfsm), -- cgit v1.2.3 From 771ced742a4f02ac248ad679325bd434843d78d0 Mon Sep 17 00:00:00 2001 From: Yotam Gigi Date: Tue, 19 Sep 2017 10:00:15 +0200 Subject: mlxsw: resources: Add multicast ERIF list entries resource The multicast ERIF list entries resource indicates the number of entries that can be put in one rigr2 register operation. While the register can hold up to MLXSW_REG_RIGR2_MAX_ERIFS ( = 32) ERIF entries, the actual number allowed by firmware is indicated with this resource. Signed-off-by: Yotam Gigi Reviewed-by: Ido Schimmel Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/resources.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/mellanox/mlxsw/resources.h b/drivers/net/ethernet/mellanox/mlxsw/resources.h index 9556d934714b..087aad52c195 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/resources.h +++ b/drivers/net/ethernet/mellanox/mlxsw/resources.h @@ -63,6 +63,7 @@ enum mlxsw_res_id { MLXSW_RES_ID_MAX_CPU_POLICERS, MLXSW_RES_ID_MAX_VRS, MLXSW_RES_ID_MAX_RIFS, + MLXSW_RES_ID_MC_ERIF_LIST_ENTRIES, MLXSW_RES_ID_MAX_LPM_TREES, /* Internal resources. @@ -100,6 +101,7 @@ static u16 mlxsw_res_ids[] = { [MLXSW_RES_ID_MAX_CPU_POLICERS] = 0x2A13, [MLXSW_RES_ID_MAX_VRS] = 0x2C01, [MLXSW_RES_ID_MAX_RIFS] = 0x2C02, + [MLXSW_RES_ID_MC_ERIF_LIST_ENTRIES] = 0x2C10, [MLXSW_RES_ID_MAX_LPM_TREES] = 0x2C30, }; -- cgit v1.2.3 From 2e654e33c5791332d7abf759fd9d34a39082ffc7 Mon Sep 17 00:00:00 2001 From: Yotam Gigi Date: Tue, 19 Sep 2017 10:00:16 +0200 Subject: mlxsw: reg: Add the Router Multicast Forwarding Table Version 2 register The RMFT-V2 register is used to configure and query the multicast table and will be used by the multicast router offloading logic. Signed-off-by: Yotam Gigi Reviewed-by: Ido Schimmel Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/reg.h | 142 ++++++++++++++++++++++++++++++ 1 file changed, 142 insertions(+) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h index 1778d7f5f843..046525ebe5ac 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/reg.h +++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h @@ -5728,6 +5728,147 @@ static inline void mlxsw_reg_rigr2_erif_entry_pack(char *payload, int index, mlxsw_reg_rigr2_erif_entry_erif_set(payload, index, erif); } +/* RMFT-V2 - Router Multicast Forwarding Table Version 2 Register + * -------------------------------------------------------------- + * The RMFT_V2 register is used to configure and query the multicast table. + */ +#define MLXSW_REG_RMFT2_ID 0x8027 +#define MLXSW_REG_RMFT2_LEN 0x174 + +MLXSW_REG_DEFINE(rmft2, MLXSW_REG_RMFT2_ID, MLXSW_REG_RMFT2_LEN); + +/* reg_rmft2_v + * Valid + * Access: RW + */ +MLXSW_ITEM32(reg, rmft2, v, 0x00, 31, 1); + +enum mlxsw_reg_rmft2_type { + MLXSW_REG_RMFT2_TYPE_IPV4, + MLXSW_REG_RMFT2_TYPE_IPV6 +}; + +/* reg_rmft2_type + * Access: Index + */ +MLXSW_ITEM32(reg, rmft2, type, 0x00, 28, 2); + +enum mlxsw_sp_reg_rmft2_op { + /* For Write: + * Write operation. Used to write a new entry to the table. All RW + * fields are relevant for new entry. Activity bit is set for new + * entries - Note write with v (Valid) 0 will delete the entry. + * For Query: + * Read operation + */ + MLXSW_REG_RMFT2_OP_READ_WRITE, +}; + +/* reg_rmft2_op + * Operation. + * Access: OP + */ +MLXSW_ITEM32(reg, rmft2, op, 0x00, 20, 2); + +/* reg_rmft2_a + * Activity. Set for new entries. Set if a packet lookup has hit on the specific + * entry. + * Access: RO + */ +MLXSW_ITEM32(reg, rmft2, a, 0x00, 16, 1); + +/* reg_rmft2_offset + * Offset within the multicast forwarding table to write to. + * Access: Index + */ +MLXSW_ITEM32(reg, rmft2, offset, 0x00, 0, 16); + +/* reg_rmft2_virtual_router + * Virtual Router ID. Range from 0..cap_max_virtual_routers-1 + * Access: RW + */ +MLXSW_ITEM32(reg, rmft2, virtual_router, 0x04, 0, 16); + +enum mlxsw_reg_rmft2_irif_mask { + MLXSW_REG_RMFT2_IRIF_MASK_IGNORE, + MLXSW_REG_RMFT2_IRIF_MASK_COMPARE +}; + +/* reg_rmft2_irif_mask + * Ingress RIF mask. + * Access: RW + */ +MLXSW_ITEM32(reg, rmft2, irif_mask, 0x08, 24, 1); + +/* reg_rmft2_irif + * Ingress RIF index. + * Access: RW + */ +MLXSW_ITEM32(reg, rmft2, irif, 0x08, 0, 16); + +/* reg_rmft2_dip4 + * Destination IPv4 address + * Access: RW + */ +MLXSW_ITEM32(reg, rmft2, dip4, 0x1C, 0, 32); + +/* reg_rmft2_dip4_mask + * A bit that is set directs the TCAM to compare the corresponding bit in key. A + * bit that is clear directs the TCAM to ignore the corresponding bit in key. + * Access: RW + */ +MLXSW_ITEM32(reg, rmft2, dip4_mask, 0x2C, 0, 32); + +/* reg_rmft2_sip4 + * Source IPv4 address + * Access: RW + */ +MLXSW_ITEM32(reg, rmft2, sip4, 0x3C, 0, 32); + +/* reg_rmft2_sip4_mask + * A bit that is set directs the TCAM to compare the corresponding bit in key. A + * bit that is clear directs the TCAM to ignore the corresponding bit in key. + * Access: RW + */ +MLXSW_ITEM32(reg, rmft2, sip4_mask, 0x4C, 0, 32); + +/* reg_rmft2_flexible_action_set + * ACL action set. The only supported action types in this field and in any + * action-set pointed from here are as follows: + * 00h: ACTION_NULL + * 01h: ACTION_MAC_TTL, only TTL configuration is supported. + * 03h: ACTION_TRAP + * 06h: ACTION_QOS + * 08h: ACTION_POLICING_MONITORING + * 10h: ACTION_ROUTER_MC + * Access: RW + */ +MLXSW_ITEM_BUF(reg, rmft2, flexible_action_set, 0x80, + MLXSW_REG_FLEX_ACTION_SET_LEN); + +static inline void +mlxsw_reg_rmft2_ipv4_pack(char *payload, bool v, u16 offset, u16 virtual_router, + enum mlxsw_reg_rmft2_irif_mask irif_mask, u16 irif, + u32 dip4, u32 dip4_mask, u32 sip4, u32 sip4_mask, + const char *flexible_action_set) +{ + MLXSW_REG_ZERO(rmft2, payload); + mlxsw_reg_rmft2_v_set(payload, v); + mlxsw_reg_rmft2_type_set(payload, MLXSW_REG_RMFT2_TYPE_IPV4); + mlxsw_reg_rmft2_op_set(payload, MLXSW_REG_RMFT2_OP_READ_WRITE); + mlxsw_reg_rmft2_offset_set(payload, offset); + mlxsw_reg_rmft2_virtual_router_set(payload, virtual_router); + mlxsw_reg_rmft2_irif_mask_set(payload, irif_mask); + mlxsw_reg_rmft2_irif_set(payload, irif); + mlxsw_reg_rmft2_dip4_set(payload, dip4); + mlxsw_reg_rmft2_dip4_mask_set(payload, dip4_mask); + mlxsw_reg_rmft2_sip4_set(payload, sip4); + mlxsw_reg_rmft2_sip4_mask_set(payload, sip4_mask); + if (flexible_action_set) + mlxsw_reg_rmft2_flexible_action_set_memcpy_to(payload, + flexible_action_set); +} + /* MFCR - Management Fan Control Register * -------------------------------------- * This register controls the settings of the Fan Speed PWM mechanism. @@ -7000,6 +7141,7 @@ static const struct mlxsw_reg_info *mlxsw_reg_infos[] = { MLXSW_REG(raleu), MLXSW_REG(rauhtd), MLXSW_REG(rigr2), + MLXSW_REG(rmft2), MLXSW_REG(mfcr), MLXSW_REG(mfsc), MLXSW_REG(mfsm), -- cgit v1.2.3 From 4fc92846f65b0a3470b433c54251a40feae7b2d5 Mon Sep 17 00:00:00 2001 From: Yotam Gigi Date: Tue, 19 Sep 2017 10:00:17 +0200 Subject: mlxsw: reg: Add Router Rules Copy Register The RRCR register is used for copying and moving TCAM multicast routes from different offsets. It will be used to allow routes relocation for parman ops as part of the multicast router offloading logic. Signed-off-by: Yotam Gigi Reviewed-by: Ido Schimmel Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/reg.h | 60 +++++++++++++++++++++++++++++++ 1 file changed, 60 insertions(+) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h index 046525ebe5ac..31d120ae8dc6 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/reg.h +++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h @@ -4680,6 +4680,65 @@ static inline void mlxsw_reg_ricnt_pack(char *payload, u32 index, MLXSW_REG_RICNT_COUNTER_SET_TYPE_BASIC); } +/* RRCR - Router Rules Copy Register Layout + * ---------------------------------------- + * This register is used for moving and copying route entry rules. + */ +#define MLXSW_REG_RRCR_ID 0x800F +#define MLXSW_REG_RRCR_LEN 0x24 + +MLXSW_REG_DEFINE(rrcr, MLXSW_REG_RRCR_ID, MLXSW_REG_RRCR_LEN); + +enum mlxsw_reg_rrcr_op { + /* Move rules */ + MLXSW_REG_RRCR_OP_MOVE, + /* Copy rules */ + MLXSW_REG_RRCR_OP_COPY, +}; + +/* reg_rrcr_op + * Access: WO + */ +MLXSW_ITEM32(reg, rrcr, op, 0x00, 28, 4); + +/* reg_rrcr_offset + * Offset within the region from which to copy/move. + * Access: Index + */ +MLXSW_ITEM32(reg, rrcr, offset, 0x00, 0, 16); + +/* reg_rrcr_size + * The number of rules to copy/move. + * Access: WO + */ +MLXSW_ITEM32(reg, rrcr, size, 0x04, 0, 16); + +/* reg_rrcr_table_id + * Identifier of the table on which to perform the operation. Encoding is the + * same as in RTAR.key_type + * Access: Index + */ +MLXSW_ITEM32(reg, rrcr, table_id, 0x10, 0, 4); + +/* reg_rrcr_dest_offset + * Offset within the region to which to copy/move + * Access: Index + */ +MLXSW_ITEM32(reg, rrcr, dest_offset, 0x20, 0, 16); + +static inline void mlxsw_reg_rrcr_pack(char *payload, enum mlxsw_reg_rrcr_op op, + u16 offset, u16 size, + enum mlxsw_reg_rtar_key_type table_id, + u16 dest_offset) +{ + MLXSW_REG_ZERO(rrcr, payload); + mlxsw_reg_rrcr_op_set(payload, op); + mlxsw_reg_rrcr_offset_set(payload, offset); + mlxsw_reg_rrcr_size_set(payload, size); + mlxsw_reg_rrcr_table_id_set(payload, table_id); + mlxsw_reg_rrcr_dest_offset_set(payload, dest_offset); +} + /* RALTA - Router Algorithmic LPM Tree Allocation Register * ------------------------------------------------------- * RALTA is used to allocate the LPM trees of the SHSPM method. @@ -7133,6 +7192,7 @@ static const struct mlxsw_reg_info *mlxsw_reg_infos[] = { MLXSW_REG(ratr), MLXSW_REG(rtdp), MLXSW_REG(ricnt), + MLXSW_REG(rrcr), MLXSW_REG(ralta), MLXSW_REG(ralst), MLXSW_REG(raltb), -- cgit v1.2.3 From 4af5964e58884855d28ae68ddf01279868e70853 Mon Sep 17 00:00:00 2001 From: Yotam Gigi Date: Tue, 19 Sep 2017 10:00:18 +0200 Subject: mlxsw: reg: Configure RIF to forward IPv4 multicast packets by default Turn on two bits on the Spectrum RIF configuration: - IPv4 multicast: when a multicast packet arrives on a RIF, send it to go through multicast routes lookup. - IPv4 multicast forwarding enable: when multicast packet arrives on a RIF, allow it to be forwarded by multicast routes. If this bit is not set, multicast packets will go through multicast routing lookup but will be dropped at the egress of the ports. Signed-off-by: Yotam Gigi Reviewed-by: Ido Schimmel Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/reg.h | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h index 31d120ae8dc6..c203e0dfa827 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/reg.h +++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h @@ -3991,6 +3991,12 @@ MLXSW_ITEM32(reg, ritr, ipv4, 0x00, 29, 1); */ MLXSW_ITEM32(reg, ritr, ipv6, 0x00, 28, 1); +/* reg_ritr_ipv4_mc + * IPv4 multicast routing enable. + * Access: RW + */ +MLXSW_ITEM32(reg, ritr, ipv4_mc, 0x00, 27, 1); + enum mlxsw_reg_ritr_if_type { /* VLAN interface. */ MLXSW_REG_RITR_VLAN_IF, @@ -4048,6 +4054,14 @@ MLXSW_ITEM32(reg, ritr, ipv4_fe, 0x04, 29, 1); */ MLXSW_ITEM32(reg, ritr, ipv6_fe, 0x04, 28, 1); +/* reg_ritr_ipv4_mc_fe + * IPv4 Multicast Forwarding Enable. + * When disabled, forwarding is blocked but local traffic (traps and IP to me) + * will be enabled. + * Access: RW + */ +MLXSW_ITEM32(reg, ritr, ipv4_mc_fe, 0x04, 27, 1); + /* reg_ritr_lb_en * Loop-back filter enable for unicast packets. * If the flag is set then loop-back filter for unicast packets is @@ -4270,11 +4284,13 @@ static inline void mlxsw_reg_ritr_pack(char *payload, bool enable, mlxsw_reg_ritr_enable_set(payload, enable); mlxsw_reg_ritr_ipv4_set(payload, 1); mlxsw_reg_ritr_ipv6_set(payload, 1); + mlxsw_reg_ritr_ipv4_mc_set(payload, 1); mlxsw_reg_ritr_type_set(payload, type); mlxsw_reg_ritr_op_set(payload, op); mlxsw_reg_ritr_rif_set(payload, rif); mlxsw_reg_ritr_ipv4_fe_set(payload, 1); mlxsw_reg_ritr_ipv6_fe_set(payload, 1); + mlxsw_reg_ritr_ipv4_mc_fe_set(payload, 1); mlxsw_reg_ritr_lb_en_set(payload, 1); mlxsw_reg_ritr_virtual_router_set(payload, vr_id); mlxsw_reg_ritr_mtu_set(payload, mtu); -- cgit v1.2.3 From 91e4d59a4600afe64b44e013a7c1805bbfe61e59 Mon Sep 17 00:00:00 2001 From: Yotam Gigi Date: Tue, 19 Sep 2017 10:00:19 +0200 Subject: mlxsw: spectrum_router: Export RIF dev access function The mlxsw_sp_rif struct, defined as private struct in spectrum_router.c will be used in the multicast router source file. Due to the fact that the dev field will be needed by the multicast router logic, add an access function to it. Signed-off-by: Yotam Gigi Reviewed-by: Ido Schimmel Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c | 5 +++++ drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h | 1 + 2 files changed, 6 insertions(+) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c index 2cfb3f5d092d..0bd93dc88ffa 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c @@ -5049,6 +5049,11 @@ int mlxsw_sp_rif_dev_ifindex(const struct mlxsw_sp_rif *rif) return rif->dev->ifindex; } +const struct net_device *mlxsw_sp_rif_dev(const struct mlxsw_sp_rif *rif) +{ + return rif->dev; +} + static struct mlxsw_sp_rif * mlxsw_sp_rif_create(struct mlxsw_sp *mlxsw_sp, const struct mlxsw_sp_rif_params *params) diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h index 345fcc4f38e9..ae4c99b3f2fc 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h @@ -69,6 +69,7 @@ u16 mlxsw_sp_rif_index(const struct mlxsw_sp_rif *rif); u16 mlxsw_sp_ipip_lb_rif_index(const struct mlxsw_sp_rif_ipip_lb *rif); u16 mlxsw_sp_ipip_lb_ul_vr_id(const struct mlxsw_sp_rif_ipip_lb *rif); int mlxsw_sp_rif_dev_ifindex(const struct mlxsw_sp_rif *rif); +const struct net_device *mlxsw_sp_rif_dev(const struct mlxsw_sp_rif *rif); int mlxsw_sp_rif_counter_value_get(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_rif *rif, enum mlxsw_sp_rif_counter_dir dir, -- cgit v1.2.3 From b48cfc80ce9c27368e331d9aa742314487b0ee12 Mon Sep 17 00:00:00 2001 From: Yotam Gigi Date: Tue, 19 Sep 2017 10:00:20 +0200 Subject: mlxsw: spectrum: Add multicast router traps and trap groups Add three new traps needed for multicast routing: - PIM: Trap for PIM protocol control packets. - RPF: Trap for packets that fail the RPF check on a specific hardware route entry. - MULTICAST: Generic trap for multicast. It is used for routes that trap the packets to the CPU. The RPF and MULTICAST traps have rate limiters as these traps may have line-rate of packets trapped. The PIM trap has a rate limiter similarly to other L3 control protocols. The rate limiters are implemented by adding three new trap groups for the newly introduced traps. Signed-off-by: Yotam Gigi Reviewed-by: Ido Schimmel Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/reg.h | 3 +++ drivers/net/ethernet/mellanox/mlxsw/spectrum.c | 10 ++++++++++ drivers/net/ethernet/mellanox/mlxsw/trap.h | 4 ++++ 3 files changed, 17 insertions(+) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h index c203e0dfa827..17eba19100de 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/reg.h +++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h @@ -3681,12 +3681,15 @@ enum mlxsw_reg_htgt_trap_group { MLXSW_REG_HTGT_TRAP_GROUP_SP_IGMP, MLXSW_REG_HTGT_TRAP_GROUP_SP_BGP, MLXSW_REG_HTGT_TRAP_GROUP_SP_OSPF, + MLXSW_REG_HTGT_TRAP_GROUP_SP_PIM, + MLXSW_REG_HTGT_TRAP_GROUP_SP_MULTICAST, MLXSW_REG_HTGT_TRAP_GROUP_SP_ARP, MLXSW_REG_HTGT_TRAP_GROUP_SP_HOST_MISS, MLXSW_REG_HTGT_TRAP_GROUP_SP_ROUTER_EXP, MLXSW_REG_HTGT_TRAP_GROUP_SP_REMOTE_ROUTE, MLXSW_REG_HTGT_TRAP_GROUP_SP_IP2ME, MLXSW_REG_HTGT_TRAP_GROUP_SP_DHCP, + MLXSW_REG_HTGT_TRAP_GROUP_SP_RPF, MLXSW_REG_HTGT_TRAP_GROUP_SP_EVENT, MLXSW_REG_HTGT_TRAP_GROUP_SP_IPV6_MLD, MLXSW_REG_HTGT_TRAP_GROUP_SP_IPV6_ND, diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c index 6ba6ff276b17..e9b94430afed 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c @@ -3421,6 +3421,10 @@ static const struct mlxsw_listener mlxsw_sp_listener[] = { false, SP_IP2ME, DISCARD), /* ACL trap */ MLXSW_SP_RXL_NO_MARK(ACL0, TRAP_TO_CPU, IP2ME, false), + /* Multicast Router Traps */ + MLXSW_SP_RXL_MARK(IPV4_PIM, TRAP_TO_CPU, PIM, false), + MLXSW_SP_RXL_MARK(RPF, TRAP_TO_CPU, RPF, false), + MLXSW_SP_RXL_MARK(ACL1, TRAP_TO_CPU, MULTICAST, false), }; static int mlxsw_sp_cpu_policers_set(struct mlxsw_core *mlxsw_core) @@ -3446,6 +3450,8 @@ static int mlxsw_sp_cpu_policers_set(struct mlxsw_core *mlxsw_core) case MLXSW_REG_HTGT_TRAP_GROUP_SP_LACP: case MLXSW_REG_HTGT_TRAP_GROUP_SP_LLDP: case MLXSW_REG_HTGT_TRAP_GROUP_SP_OSPF: + case MLXSW_REG_HTGT_TRAP_GROUP_SP_PIM: + case MLXSW_REG_HTGT_TRAP_GROUP_SP_RPF: rate = 128; burst_size = 7; break; @@ -3461,6 +3467,7 @@ static int mlxsw_sp_cpu_policers_set(struct mlxsw_core *mlxsw_core) case MLXSW_REG_HTGT_TRAP_GROUP_SP_ROUTER_EXP: case MLXSW_REG_HTGT_TRAP_GROUP_SP_REMOTE_ROUTE: case MLXSW_REG_HTGT_TRAP_GROUP_SP_IPV6_ND: + case MLXSW_REG_HTGT_TRAP_GROUP_SP_MULTICAST: rate = 1024; burst_size = 7; break; @@ -3506,6 +3513,7 @@ static int mlxsw_sp_trap_groups_set(struct mlxsw_core *mlxsw_core) case MLXSW_REG_HTGT_TRAP_GROUP_SP_LACP: case MLXSW_REG_HTGT_TRAP_GROUP_SP_LLDP: case MLXSW_REG_HTGT_TRAP_GROUP_SP_OSPF: + case MLXSW_REG_HTGT_TRAP_GROUP_SP_PIM: priority = 5; tc = 5; break; @@ -3522,12 +3530,14 @@ static int mlxsw_sp_trap_groups_set(struct mlxsw_core *mlxsw_core) break; case MLXSW_REG_HTGT_TRAP_GROUP_SP_ARP: case MLXSW_REG_HTGT_TRAP_GROUP_SP_IPV6_ND: + case MLXSW_REG_HTGT_TRAP_GROUP_SP_RPF: priority = 2; tc = 2; break; case MLXSW_REG_HTGT_TRAP_GROUP_SP_HOST_MISS: case MLXSW_REG_HTGT_TRAP_GROUP_SP_ROUTER_EXP: case MLXSW_REG_HTGT_TRAP_GROUP_SP_REMOTE_ROUTE: + case MLXSW_REG_HTGT_TRAP_GROUP_SP_MULTICAST: priority = 1; tc = 1; break; diff --git a/drivers/net/ethernet/mellanox/mlxsw/trap.h b/drivers/net/ethernet/mellanox/mlxsw/trap.h index f396a1fef633..a98103539f6b 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/trap.h +++ b/drivers/net/ethernet/mellanox/mlxsw/trap.h @@ -62,6 +62,8 @@ enum { MLXSW_TRAP_ID_TTLERROR = 0x53, MLXSW_TRAP_ID_LBERROR = 0x54, MLXSW_TRAP_ID_IPV4_OSPF = 0x55, + MLXSW_TRAP_ID_IPV4_PIM = 0x58, + MLXSW_TRAP_ID_RPF = 0x5C, MLXSW_TRAP_ID_IP2ME = 0x5F, MLXSW_TRAP_ID_IPV6_UNSPECIFIED_ADDRESS = 0x60, MLXSW_TRAP_ID_IPV6_LINK_LOCAL_DEST = 0x61, @@ -89,6 +91,8 @@ enum { MLXSW_TRAP_ID_ROUTER_ALERT_IPV4 = 0xD6, MLXSW_TRAP_ID_ROUTER_ALERT_IPV6 = 0xD7, MLXSW_TRAP_ID_ACL0 = 0x1C0, + /* Multicast trap used for routes with trap action */ + MLXSW_TRAP_ID_ACL1 = 0x1C1, MLXSW_TRAP_ID_MAX = 0x1FF }; -- cgit v1.2.3 From 34929cb4d691f7f9e217ba0e3f536978cd56aa6c Mon Sep 17 00:00:00 2001 From: Ganesh Goudar Date: Wed, 20 Sep 2017 11:32:07 +0530 Subject: cxgb4: add new T5 pci device id's Add 0x50a5, 0x50a6, 0x50a7, 0x50a8 and 0x50a9 T5 device id's. Signed-off-by: Ganesh Goudar Signed-off-by: David S. Miller --- drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h b/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h index aa28299aef5f..37d90d63e4a3 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h @@ -176,6 +176,11 @@ CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN CH_PCI_ID_TABLE_FENTRY(0x50a2), /* Custom T540-KR4 */ CH_PCI_ID_TABLE_FENTRY(0x50a3), /* Custom T580-KR4 */ CH_PCI_ID_TABLE_FENTRY(0x50a4), /* Custom 2x T540-CR */ + CH_PCI_ID_TABLE_FENTRY(0x50a5), /* Custom T522-BT */ + CH_PCI_ID_TABLE_FENTRY(0x50a6), /* Custom T522-BT-SO */ + CH_PCI_ID_TABLE_FENTRY(0x50a7), /* Custom T580-CR */ + CH_PCI_ID_TABLE_FENTRY(0x50a8), /* Custom T580-KR */ + CH_PCI_ID_TABLE_FENTRY(0x50a9), /* Custom T580-KR */ /* T6 adapters: */ -- cgit v1.2.3 From dff37b58ca53b1978e02edf6f8c1dd681799342b Mon Sep 17 00:00:00 2001 From: Nogah Frankel Date: Wed, 20 Sep 2017 16:15:01 +0200 Subject: mlxsw: spectrum_switchdev: Change mc_router to mrouter Change the naming of mc_router to mrouter to keep consistency. Signed-off-by: Nogah Frankel Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- .../net/ethernet/mellanox/mlxsw/spectrum_switchdev.c | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c index d39ffbfcc436..22f8d7428d96 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c @@ -699,10 +699,10 @@ static int mlxsw_sp_port_attr_br_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, return -EINVAL; } -static int mlxsw_sp_port_attr_mc_router_set(struct mlxsw_sp_port *mlxsw_sp_port, - struct switchdev_trans *trans, - struct net_device *orig_dev, - bool is_port_mc_router) +static int mlxsw_sp_port_attr_mrouter_set(struct mlxsw_sp_port *mlxsw_sp_port, + struct switchdev_trans *trans, + struct net_device *orig_dev, + bool is_port_mrouter) { struct mlxsw_sp_bridge_port *bridge_port; int err; @@ -720,12 +720,12 @@ static int mlxsw_sp_port_attr_mc_router_set(struct mlxsw_sp_port *mlxsw_sp_port, err = mlxsw_sp_bridge_port_flood_table_set(mlxsw_sp_port, bridge_port, MLXSW_SP_FLOOD_TYPE_MC, - is_port_mc_router); + is_port_mrouter); if (err) return err; out: - bridge_port->mrouter = is_port_mc_router; + bridge_port->mrouter = is_port_mrouter; return 0; } @@ -793,9 +793,9 @@ static int mlxsw_sp_port_attr_set(struct net_device *dev, attr->u.vlan_filtering); break; case SWITCHDEV_ATTR_ID_PORT_MROUTER: - err = mlxsw_sp_port_attr_mc_router_set(mlxsw_sp_port, trans, - attr->orig_dev, - attr->u.mrouter); + err = mlxsw_sp_port_attr_mrouter_set(mlxsw_sp_port, trans, + attr->orig_dev, + attr->u.mrouter); break; case SWITCHDEV_ATTR_ID_BRIDGE_MC_DISABLED: err = mlxsw_sp_port_mc_disabled_set(mlxsw_sp_port, trans, -- cgit v1.2.3 From 4cdc35e4ebf2e6b1cf4fe028eb9e711723f9199a Mon Sep 17 00:00:00 2001 From: Nogah Frankel Date: Wed, 20 Sep 2017 16:15:02 +0200 Subject: mlxsw: spectrum_switchdev: Add a ports bitmap to the mid db Add a bitmap of ports to the mid struct to hold the ports that are registered to this mid. Signed-off-by: Nogah Frankel Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/spectrum.h | 1 + .../net/ethernet/mellanox/mlxsw/spectrum_switchdev.c | 20 +++++++++++++++++--- 2 files changed, 18 insertions(+), 3 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h index 7180d8f3de75..0424bee88407 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h @@ -95,6 +95,7 @@ struct mlxsw_sp_mid { u16 fid; u16 mid; unsigned int ref_count; + unsigned long *ports_in_mid; /* bits array */ }; enum mlxsw_sp_span_type { diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c index 22f8d7428d96..0fde16a22b72 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c @@ -1239,6 +1239,7 @@ static struct mlxsw_sp_mid *__mlxsw_sp_mc_alloc(struct mlxsw_sp *mlxsw_sp, u16 fid) { struct mlxsw_sp_mid *mid; + size_t alloc_size; u16 mid_idx; mid_idx = find_first_zero_bit(mlxsw_sp->bridge->mids_bitmap, @@ -1250,6 +1251,14 @@ static struct mlxsw_sp_mid *__mlxsw_sp_mc_alloc(struct mlxsw_sp *mlxsw_sp, if (!mid) return NULL; + alloc_size = sizeof(unsigned long) * + BITS_TO_LONGS(mlxsw_core_max_ports(mlxsw_sp->core)); + mid->ports_in_mid = kzalloc(alloc_size, GFP_KERNEL); + if (!mid->ports_in_mid) { + kfree(mid); + return NULL; + } + set_bit(mid_idx, mlxsw_sp->bridge->mids_bitmap); ether_addr_copy(mid->addr, addr); mid->fid = fid; @@ -1260,12 +1269,16 @@ static struct mlxsw_sp_mid *__mlxsw_sp_mc_alloc(struct mlxsw_sp *mlxsw_sp, return mid; } -static int __mlxsw_sp_mc_dec_ref(struct mlxsw_sp *mlxsw_sp, +static int __mlxsw_sp_mc_dec_ref(struct mlxsw_sp_port *mlxsw_sp_port, struct mlxsw_sp_mid *mid) { + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + + clear_bit(mlxsw_sp_port->local_port, mid->ports_in_mid); if (--mid->ref_count == 0) { list_del(&mid->list); clear_bit(mid->mid, mlxsw_sp->bridge->mids_bitmap); + kfree(mid->ports_in_mid); kfree(mid); return 1; } @@ -1311,6 +1324,7 @@ static int mlxsw_sp_port_mdb_add(struct mlxsw_sp_port *mlxsw_sp_port, } } mid->ref_count++; + set_bit(mlxsw_sp_port->local_port, mid->ports_in_mid); err = mlxsw_sp_port_smid_set(mlxsw_sp_port, mid->mid, true, mid->ref_count == 1); @@ -1331,7 +1345,7 @@ static int mlxsw_sp_port_mdb_add(struct mlxsw_sp_port *mlxsw_sp_port, return 0; err_out: - __mlxsw_sp_mc_dec_ref(mlxsw_sp, mid); + __mlxsw_sp_mc_dec_ref(mlxsw_sp_port, mid); return err; } @@ -1437,7 +1451,7 @@ static int mlxsw_sp_port_mdb_del(struct mlxsw_sp_port *mlxsw_sp_port, netdev_err(dev, "Unable to remove port from SMID\n"); mid_idx = mid->mid; - if (__mlxsw_sp_mc_dec_ref(mlxsw_sp, mid)) { + if (__mlxsw_sp_mc_dec_ref(mlxsw_sp_port, mid)) { err = mlxsw_sp_port_mdb_op(mlxsw_sp, mdb->addr, fid_index, mid_idx, false); if (err) -- cgit v1.2.3 From 0161b9505ab59d4bfc0de66073c9240d1b05040d Mon Sep 17 00:00:00 2001 From: Nogah Frankel Date: Wed, 20 Sep 2017 16:15:03 +0200 Subject: mlxsw: spectrum_switchdev: Remove reference count from mid Since there is a bitmap for the ports registered to each mid, there is no need for a ref count, since it will always be the number of set bits in this bitmap. Any check of the ref count was replaced with checking if the bitmap is empty. Signed-off-by: Nogah Frankel Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/spectrum.h | 1 - .../net/ethernet/mellanox/mlxsw/spectrum_switchdev.c | 20 ++++++++++---------- 2 files changed, 10 insertions(+), 11 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h index 0424bee88407..6fd0afe4b7a3 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h @@ -94,7 +94,6 @@ struct mlxsw_sp_mid { unsigned char addr[ETH_ALEN]; u16 fid; u16 mid; - unsigned int ref_count; unsigned long *ports_in_mid; /* bits array */ }; diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c index 0fde16a22b72..cb2275ed42ab 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c @@ -1263,19 +1263,19 @@ static struct mlxsw_sp_mid *__mlxsw_sp_mc_alloc(struct mlxsw_sp *mlxsw_sp, ether_addr_copy(mid->addr, addr); mid->fid = fid; mid->mid = mid_idx; - mid->ref_count = 0; list_add_tail(&mid->list, &mlxsw_sp->bridge->mids_list); return mid; } -static int __mlxsw_sp_mc_dec_ref(struct mlxsw_sp_port *mlxsw_sp_port, - struct mlxsw_sp_mid *mid) +static int mlxsw_sp_port_remove_from_mid(struct mlxsw_sp_port *mlxsw_sp_port, + struct mlxsw_sp_mid *mid) { struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; clear_bit(mlxsw_sp_port->local_port, mid->ports_in_mid); - if (--mid->ref_count == 0) { + if (bitmap_empty(mid->ports_in_mid, + mlxsw_core_max_ports(mlxsw_sp->core))) { list_del(&mid->list); clear_bit(mid->mid, mlxsw_sp->bridge->mids_bitmap); kfree(mid->ports_in_mid); @@ -1296,6 +1296,7 @@ static int mlxsw_sp_port_mdb_add(struct mlxsw_sp_port *mlxsw_sp_port, struct mlxsw_sp_bridge_device *bridge_device; struct mlxsw_sp_bridge_port *bridge_port; struct mlxsw_sp_mid *mid; + bool is_new_mid = false; u16 fid_index; int err = 0; @@ -1322,18 +1323,17 @@ static int mlxsw_sp_port_mdb_add(struct mlxsw_sp_port *mlxsw_sp_port, netdev_err(dev, "Unable to allocate MC group\n"); return -ENOMEM; } + is_new_mid = true; } - mid->ref_count++; set_bit(mlxsw_sp_port->local_port, mid->ports_in_mid); - err = mlxsw_sp_port_smid_set(mlxsw_sp_port, mid->mid, true, - mid->ref_count == 1); + err = mlxsw_sp_port_smid_set(mlxsw_sp_port, mid->mid, true, is_new_mid); if (err) { netdev_err(dev, "Unable to set SMID\n"); goto err_out; } - if (mid->ref_count == 1) { + if (is_new_mid) { err = mlxsw_sp_port_mdb_op(mlxsw_sp, mdb->addr, fid_index, mid->mid, true); if (err) { @@ -1345,7 +1345,7 @@ static int mlxsw_sp_port_mdb_add(struct mlxsw_sp_port *mlxsw_sp_port, return 0; err_out: - __mlxsw_sp_mc_dec_ref(mlxsw_sp_port, mid); + mlxsw_sp_port_remove_from_mid(mlxsw_sp_port, mid); return err; } @@ -1451,7 +1451,7 @@ static int mlxsw_sp_port_mdb_del(struct mlxsw_sp_port *mlxsw_sp_port, netdev_err(dev, "Unable to remove port from SMID\n"); mid_idx = mid->mid; - if (__mlxsw_sp_mc_dec_ref(mlxsw_sp_port, mid)) { + if (mlxsw_sp_port_remove_from_mid(mlxsw_sp_port, mid)) { err = mlxsw_sp_port_mdb_op(mlxsw_sp, mdb->addr, fid_index, mid_idx, false); if (err) -- cgit v1.2.3 From b80888a9194f3aecd5edf6a7ede5c23d77bade8b Mon Sep 17 00:00:00 2001 From: Nogah Frankel Date: Wed, 20 Sep 2017 16:15:04 +0200 Subject: mlxsw: spectrum_switchdev: Save mids list per bridge device Instead of saving all the mids in the same list, save them per vlan device. This change allows a more efficient mid find. Also, in the next patches, there will be added a lot of loops over all the mids in bridge device for multicast disable, mrouter change and ndb flush. Signed-off-by: Nogah Frankel Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- .../ethernet/mellanox/mlxsw/spectrum_switchdev.c | 49 +++++++++++----------- 1 file changed, 24 insertions(+), 25 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c index cb2275ed42ab..2ba8a44e1933 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c @@ -67,7 +67,6 @@ struct mlxsw_sp_bridge { u32 ageing_time; bool vlan_enabled_exists; struct list_head bridges_list; - struct list_head mids_list; DECLARE_BITMAP(mids_bitmap, MLXSW_SP_MID_MAX); const struct mlxsw_sp_bridge_ops *bridge_8021q_ops; const struct mlxsw_sp_bridge_ops *bridge_8021d_ops; @@ -77,6 +76,7 @@ struct mlxsw_sp_bridge_device { struct net_device *dev; struct list_head list; struct list_head ports_list; + struct list_head mids_list; u8 vlan_enabled:1, multicast_enabled:1; const struct mlxsw_sp_bridge_ops *ops; @@ -161,6 +161,7 @@ mlxsw_sp_bridge_device_create(struct mlxsw_sp_bridge *bridge, } else { bridge_device->ops = bridge->bridge_8021d_ops; } + INIT_LIST_HEAD(&bridge_device->mids_list); list_add(&bridge_device->list, &bridge->bridges_list); return bridge_device; @@ -170,10 +171,17 @@ static void mlxsw_sp_bridge_device_destroy(struct mlxsw_sp_bridge *bridge, struct mlxsw_sp_bridge_device *bridge_device) { + struct mlxsw_sp_mid *mid, *tmp; + list_del(&bridge_device->list); if (bridge_device->vlan_enabled) bridge->vlan_enabled_exists = false; WARN_ON(!list_empty(&bridge_device->ports_list)); + list_for_each_entry_safe(mid, tmp, &bridge_device->mids_list, list) { + list_del(&mid->list); + clear_bit(mid->mid, bridge->mids_bitmap); + kfree(mid); + } kfree(bridge_device); } @@ -1221,22 +1229,25 @@ static int mlxsw_sp_port_smid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 mid, return err; } -static struct mlxsw_sp_mid *__mlxsw_sp_mc_get(struct mlxsw_sp *mlxsw_sp, - const unsigned char *addr, - u16 fid) +static struct +mlxsw_sp_mid *__mlxsw_sp_mc_get(struct mlxsw_sp_bridge_device *bridge_device, + const unsigned char *addr, + u16 fid) { struct mlxsw_sp_mid *mid; - list_for_each_entry(mid, &mlxsw_sp->bridge->mids_list, list) { + list_for_each_entry(mid, &bridge_device->mids_list, list) { if (ether_addr_equal(mid->addr, addr) && mid->fid == fid) return mid; } return NULL; } -static struct mlxsw_sp_mid *__mlxsw_sp_mc_alloc(struct mlxsw_sp *mlxsw_sp, - const unsigned char *addr, - u16 fid) +static struct +mlxsw_sp_mid *__mlxsw_sp_mc_alloc(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_bridge_device *bridge_device, + const unsigned char *addr, + u16 fid) { struct mlxsw_sp_mid *mid; size_t alloc_size; @@ -1263,7 +1274,7 @@ static struct mlxsw_sp_mid *__mlxsw_sp_mc_alloc(struct mlxsw_sp *mlxsw_sp, ether_addr_copy(mid->addr, addr); mid->fid = fid; mid->mid = mid_idx; - list_add_tail(&mid->list, &mlxsw_sp->bridge->mids_list); + list_add_tail(&mid->list, &bridge_device->mids_list); return mid; } @@ -1316,9 +1327,10 @@ static int mlxsw_sp_port_mdb_add(struct mlxsw_sp_port *mlxsw_sp_port, fid_index = mlxsw_sp_fid_index(mlxsw_sp_port_vlan->fid); - mid = __mlxsw_sp_mc_get(mlxsw_sp, mdb->addr, fid_index); + mid = __mlxsw_sp_mc_get(bridge_device, mdb->addr, fid_index); if (!mid) { - mid = __mlxsw_sp_mc_alloc(mlxsw_sp, mdb->addr, fid_index); + mid = __mlxsw_sp_mc_alloc(mlxsw_sp, bridge_device, mdb->addr, + fid_index); if (!mid) { netdev_err(dev, "Unable to allocate MC group\n"); return -ENOMEM; @@ -1440,7 +1452,7 @@ static int mlxsw_sp_port_mdb_del(struct mlxsw_sp_port *mlxsw_sp_port, fid_index = mlxsw_sp_fid_index(mlxsw_sp_port_vlan->fid); - mid = __mlxsw_sp_mc_get(mlxsw_sp, mdb->addr, fid_index); + mid = __mlxsw_sp_mc_get(bridge_device, mdb->addr, fid_index); if (!mid) { netdev_err(dev, "Unable to remove port from MC DB\n"); return -EINVAL; @@ -1995,17 +2007,6 @@ static void mlxsw_sp_fdb_fini(struct mlxsw_sp *mlxsw_sp) } -static void mlxsw_sp_mids_fini(struct mlxsw_sp *mlxsw_sp) -{ - struct mlxsw_sp_mid *mid, *tmp; - - list_for_each_entry_safe(mid, tmp, &mlxsw_sp->bridge->mids_list, list) { - list_del(&mid->list); - clear_bit(mid->mid, mlxsw_sp->bridge->mids_bitmap); - kfree(mid); - } -} - int mlxsw_sp_switchdev_init(struct mlxsw_sp *mlxsw_sp) { struct mlxsw_sp_bridge *bridge; @@ -2017,7 +2018,6 @@ int mlxsw_sp_switchdev_init(struct mlxsw_sp *mlxsw_sp) bridge->mlxsw_sp = mlxsw_sp; INIT_LIST_HEAD(&mlxsw_sp->bridge->bridges_list); - INIT_LIST_HEAD(&mlxsw_sp->bridge->mids_list); bridge->bridge_8021q_ops = &mlxsw_sp_bridge_8021q_ops; bridge->bridge_8021d_ops = &mlxsw_sp_bridge_8021d_ops; @@ -2028,7 +2028,6 @@ int mlxsw_sp_switchdev_init(struct mlxsw_sp *mlxsw_sp) void mlxsw_sp_switchdev_fini(struct mlxsw_sp *mlxsw_sp) { mlxsw_sp_fdb_fini(mlxsw_sp); - mlxsw_sp_mids_fini(mlxsw_sp); WARN_ON(!list_empty(&mlxsw_sp->bridge->bridges_list)); kfree(mlxsw_sp->bridge); } -- cgit v1.2.3 From 5f9abc597cdd7a63433b7ebfcf26ee2746a76638 Mon Sep 17 00:00:00 2001 From: Nogah Frankel Date: Wed, 20 Sep 2017 16:15:05 +0200 Subject: mlxsw: spectrum_switchdev: Break smid write function Break the smid write function into two, one that cleans the ports that might be still written there and one that changes an exiting mid entry. Signed-off-by: Nogah Frankel Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- .../ethernet/mellanox/mlxsw/spectrum_switchdev.c | 42 +++++++++++++++------- 1 file changed, 30 insertions(+), 12 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c index 2ba8a44e1933..09ead97d9442 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c @@ -1190,7 +1190,7 @@ mlxsw_sp_port_fdb_set(struct mlxsw_sp_port *mlxsw_sp_port, } static int mlxsw_sp_port_mdb_op(struct mlxsw_sp *mlxsw_sp, const char *addr, - u16 fid, u16 mid, bool adding) + u16 fid, u16 mid_idx, bool adding) { char *sfd_pl; int err; @@ -1201,16 +1201,16 @@ static int mlxsw_sp_port_mdb_op(struct mlxsw_sp *mlxsw_sp, const char *addr, mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0); mlxsw_reg_sfd_mc_pack(sfd_pl, 0, addr, fid, - MLXSW_REG_SFD_REC_ACTION_NOP, mid); + MLXSW_REG_SFD_REC_ACTION_NOP, mid_idx); err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl); kfree(sfd_pl); return err; } -static int mlxsw_sp_port_smid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 mid, - bool add, bool clear_all_ports) +/* clean the an entry from the HW and write there a full new entry */ +static int mlxsw_sp_port_smid_full_entry(struct mlxsw_sp *mlxsw_sp, + u16 mid_idx) { - struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; char *smid_pl; int err, i; @@ -1218,12 +1218,29 @@ static int mlxsw_sp_port_smid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 mid, if (!smid_pl) return -ENOMEM; - mlxsw_reg_smid_pack(smid_pl, mid, mlxsw_sp_port->local_port, add); - if (clear_all_ports) { - for (i = 1; i < mlxsw_core_max_ports(mlxsw_sp->core); i++) - if (mlxsw_sp->ports[i]) - mlxsw_reg_smid_port_mask_set(smid_pl, i, 1); + mlxsw_reg_smid_pack(smid_pl, mid_idx, 0, false); + for (i = 1; i < mlxsw_core_max_ports(mlxsw_sp->core); i++) { + if (mlxsw_sp->ports[i]) + mlxsw_reg_smid_port_mask_set(smid_pl, i, 1); } + + err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(smid), smid_pl); + kfree(smid_pl); + return err; +} + +static int mlxsw_sp_port_smid_set(struct mlxsw_sp_port *mlxsw_sp_port, + u16 mid_idx, bool add) +{ + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + char *smid_pl; + int err; + + smid_pl = kmalloc(MLXSW_REG_SMID_LEN, GFP_KERNEL); + if (!smid_pl) + return -ENOMEM; + + mlxsw_reg_smid_pack(smid_pl, mid_idx, mlxsw_sp_port->local_port, add); err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(smid), smid_pl); kfree(smid_pl); return err; @@ -1336,10 +1353,11 @@ static int mlxsw_sp_port_mdb_add(struct mlxsw_sp_port *mlxsw_sp_port, return -ENOMEM; } is_new_mid = true; + mlxsw_sp_port_smid_full_entry(mlxsw_sp, mid->mid); } set_bit(mlxsw_sp_port->local_port, mid->ports_in_mid); - err = mlxsw_sp_port_smid_set(mlxsw_sp_port, mid->mid, true, is_new_mid); + err = mlxsw_sp_port_smid_set(mlxsw_sp_port, mid->mid, true); if (err) { netdev_err(dev, "Unable to set SMID\n"); goto err_out; @@ -1458,7 +1476,7 @@ static int mlxsw_sp_port_mdb_del(struct mlxsw_sp_port *mlxsw_sp_port, return -EINVAL; } - err = mlxsw_sp_port_smid_set(mlxsw_sp_port, mid->mid, false, false); + err = mlxsw_sp_port_smid_set(mlxsw_sp_port, mid->mid, false); if (err) netdev_err(dev, "Unable to remove port from SMID\n"); -- cgit v1.2.3 From 73b433e803d2a3547ee38d1fb2a0bc6f3b03a6d9 Mon Sep 17 00:00:00 2001 From: Nogah Frankel Date: Wed, 20 Sep 2017 16:15:06 +0200 Subject: mlxsw: spectrum_switchdev: Attach mid id allocation to HW write Attach mid getting and releasing mid id to the HW write / remove, and add a flag to indicate whether the mid is in the HW. It is done because mid id is also HW index to this mid. This change allows adding in the following patches the ability to have a mid in the mdb cache but not in the HW. It will be useful for being able to disable the multicast. It means that the mdb is being written / delete to the HW in the mid allocation / removing function, not after them. Signed-off-by: Nogah Frankel Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/spectrum.h | 1 + .../ethernet/mellanox/mlxsw/spectrum_switchdev.c | 88 ++++++++++++++-------- 2 files changed, 56 insertions(+), 33 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h index 6fd0afe4b7a3..e907ec446a73 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h @@ -94,6 +94,7 @@ struct mlxsw_sp_mid { unsigned char addr[ETH_ALEN]; u16 fid; u16 mid; + bool in_hw; unsigned long *ports_in_mid; /* bits array */ }; diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c index 09ead97d9442..9dd05d87b662 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c @@ -1260,6 +1260,42 @@ mlxsw_sp_mid *__mlxsw_sp_mc_get(struct mlxsw_sp_bridge_device *bridge_device, return NULL; } +static bool +mlxsw_sp_mc_write_mdb_entry(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_mid *mid) +{ + u16 mid_idx; + int err; + + mid_idx = find_first_zero_bit(mlxsw_sp->bridge->mids_bitmap, + MLXSW_SP_MID_MAX); + if (mid_idx == MLXSW_SP_MID_MAX) + return false; + + mid->mid = mid_idx; + err = mlxsw_sp_port_smid_full_entry(mlxsw_sp, mid_idx); + if (err) + return false; + + err = mlxsw_sp_port_mdb_op(mlxsw_sp, mid->addr, mid->fid, mid_idx, + true); + if (err) + return false; + + set_bit(mid_idx, mlxsw_sp->bridge->mids_bitmap); + mid->in_hw = true; + return true; +} + +static int mlxsw_sp_mc_remove_mdb_entry(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_mid *mid) +{ + clear_bit(mid->mid, mlxsw_sp->bridge->mids_bitmap); + mid->in_hw = false; + return mlxsw_sp_port_mdb_op(mlxsw_sp, mid->addr, mid->fid, mid->mid, + false); +} + static struct mlxsw_sp_mid *__mlxsw_sp_mc_alloc(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_bridge_device *bridge_device, @@ -1268,12 +1304,6 @@ mlxsw_sp_mid *__mlxsw_sp_mc_alloc(struct mlxsw_sp *mlxsw_sp, { struct mlxsw_sp_mid *mid; size_t alloc_size; - u16 mid_idx; - - mid_idx = find_first_zero_bit(mlxsw_sp->bridge->mids_bitmap, - MLXSW_SP_MID_MAX); - if (mid_idx == MLXSW_SP_MID_MAX) - return NULL; mid = kzalloc(sizeof(*mid), GFP_KERNEL); if (!mid) @@ -1281,36 +1311,43 @@ mlxsw_sp_mid *__mlxsw_sp_mc_alloc(struct mlxsw_sp *mlxsw_sp, alloc_size = sizeof(unsigned long) * BITS_TO_LONGS(mlxsw_core_max_ports(mlxsw_sp->core)); + mid->ports_in_mid = kzalloc(alloc_size, GFP_KERNEL); - if (!mid->ports_in_mid) { - kfree(mid); - return NULL; - } + if (!mid->ports_in_mid) + goto err_ports_in_mid_alloc; - set_bit(mid_idx, mlxsw_sp->bridge->mids_bitmap); ether_addr_copy(mid->addr, addr); mid->fid = fid; - mid->mid = mid_idx; + mid->in_hw = false; + if (!mlxsw_sp_mc_write_mdb_entry(mlxsw_sp, mid)) + goto err_write_mdb_entry; + list_add_tail(&mid->list, &bridge_device->mids_list); return mid; + +err_write_mdb_entry: + kfree(mid->ports_in_mid); +err_ports_in_mid_alloc: + kfree(mid); + return NULL; } static int mlxsw_sp_port_remove_from_mid(struct mlxsw_sp_port *mlxsw_sp_port, struct mlxsw_sp_mid *mid) { struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + int err = 0; clear_bit(mlxsw_sp_port->local_port, mid->ports_in_mid); if (bitmap_empty(mid->ports_in_mid, mlxsw_core_max_ports(mlxsw_sp->core))) { + err = mlxsw_sp_mc_remove_mdb_entry(mlxsw_sp, mid); list_del(&mid->list); - clear_bit(mid->mid, mlxsw_sp->bridge->mids_bitmap); kfree(mid->ports_in_mid); kfree(mid); - return 1; } - return 0; + return err; } static int mlxsw_sp_port_mdb_add(struct mlxsw_sp_port *mlxsw_sp_port, @@ -1324,7 +1361,6 @@ static int mlxsw_sp_port_mdb_add(struct mlxsw_sp_port *mlxsw_sp_port, struct mlxsw_sp_bridge_device *bridge_device; struct mlxsw_sp_bridge_port *bridge_port; struct mlxsw_sp_mid *mid; - bool is_new_mid = false; u16 fid_index; int err = 0; @@ -1352,8 +1388,6 @@ static int mlxsw_sp_port_mdb_add(struct mlxsw_sp_port *mlxsw_sp_port, netdev_err(dev, "Unable to allocate MC group\n"); return -ENOMEM; } - is_new_mid = true; - mlxsw_sp_port_smid_full_entry(mlxsw_sp, mid->mid); } set_bit(mlxsw_sp_port->local_port, mid->ports_in_mid); @@ -1363,15 +1397,6 @@ static int mlxsw_sp_port_mdb_add(struct mlxsw_sp_port *mlxsw_sp_port, goto err_out; } - if (is_new_mid) { - err = mlxsw_sp_port_mdb_op(mlxsw_sp, mdb->addr, fid_index, - mid->mid, true); - if (err) { - netdev_err(dev, "Unable to set MC SFD\n"); - goto err_out; - } - } - return 0; err_out: @@ -1481,12 +1506,9 @@ static int mlxsw_sp_port_mdb_del(struct mlxsw_sp_port *mlxsw_sp_port, netdev_err(dev, "Unable to remove port from SMID\n"); mid_idx = mid->mid; - if (mlxsw_sp_port_remove_from_mid(mlxsw_sp_port, mid)) { - err = mlxsw_sp_port_mdb_op(mlxsw_sp, mdb->addr, fid_index, - mid_idx, false); - if (err) - netdev_err(dev, "Unable to remove MC SFD\n"); - } + err = mlxsw_sp_port_remove_from_mid(mlxsw_sp_port, mid); + if (err) + netdev_err(dev, "Unable to remove MC SFD\n"); return err; } -- cgit v1.2.3 From 061e55bfb83e632afcd34130bb19fe7a32325b02 Mon Sep 17 00:00:00 2001 From: Nogah Frankel Date: Wed, 20 Sep 2017 16:15:07 +0200 Subject: mlxsw: spectrum_switchdev: Break mid deletion into two function Break mid deletion into two function, so it will be possible in the future to delete a mid entry for other reasons then switchdev command (like port deletion). Signed-off-by: Nogah Frankel Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- .../ethernet/mellanox/mlxsw/spectrum_switchdev.c | 32 ++++++++++++++-------- 1 file changed, 20 insertions(+), 12 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c index 9dd05d87b662..7f622de6331c 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c @@ -1468,6 +1468,25 @@ static int mlxsw_sp_port_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port, return 0; } +static int +__mlxsw_sp_port_mdb_del(struct mlxsw_sp_port *mlxsw_sp_port, + struct mlxsw_sp_bridge_port *bridge_port, + struct mlxsw_sp_mid *mid) +{ + struct net_device *dev = mlxsw_sp_port->dev; + int err; + + err = mlxsw_sp_port_smid_set(mlxsw_sp_port, mid->mid, false); + if (err) + netdev_err(dev, "Unable to remove port from SMID\n"); + + err = mlxsw_sp_port_remove_from_mid(mlxsw_sp_port, mid); + if (err) + netdev_err(dev, "Unable to remove MC SFD\n"); + + return err; +} + static int mlxsw_sp_port_mdb_del(struct mlxsw_sp_port *mlxsw_sp_port, const struct switchdev_obj_port_mdb *mdb) { @@ -1479,8 +1498,6 @@ static int mlxsw_sp_port_mdb_del(struct mlxsw_sp_port *mlxsw_sp_port, struct mlxsw_sp_bridge_port *bridge_port; struct mlxsw_sp_mid *mid; u16 fid_index; - u16 mid_idx; - int err = 0; bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev); if (!bridge_port) @@ -1501,16 +1518,7 @@ static int mlxsw_sp_port_mdb_del(struct mlxsw_sp_port *mlxsw_sp_port, return -EINVAL; } - err = mlxsw_sp_port_smid_set(mlxsw_sp_port, mid->mid, false); - if (err) - netdev_err(dev, "Unable to remove port from SMID\n"); - - mid_idx = mid->mid; - err = mlxsw_sp_port_remove_from_mid(mlxsw_sp_port, mid); - if (err) - netdev_err(dev, "Unable to remove MC SFD\n"); - - return err; + return __mlxsw_sp_port_mdb_del(mlxsw_sp_port, bridge_port, mid); } static int mlxsw_sp_port_obj_del(struct net_device *dev, -- cgit v1.2.3 From 846fd8a0e7dcd9f455a86dc17ddf0a51c124f9c0 Mon Sep 17 00:00:00 2001 From: Nogah Frankel Date: Wed, 20 Sep 2017 16:15:08 +0200 Subject: mlxsw: spectrum_switchdev: Don't write mids to the HW when mc is disabled Don't write multicast related data to the HW when mc is disabled. Also, don't allocate mid id to new mids (so the remove function could know that they weren't wrote to the HW) Signed-off-by: Nogah Frankel Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- .../ethernet/mellanox/mlxsw/spectrum_switchdev.c | 21 +++++++++++++++++---- 1 file changed, 17 insertions(+), 4 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c index 7f622de6331c..cea257a77d09 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c @@ -1290,6 +1290,9 @@ mlxsw_sp_mc_write_mdb_entry(struct mlxsw_sp *mlxsw_sp, static int mlxsw_sp_mc_remove_mdb_entry(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_mid *mid) { + if (!mid->in_hw) + return 0; + clear_bit(mid->mid, mlxsw_sp->bridge->mids_bitmap); mid->in_hw = false; return mlxsw_sp_port_mdb_op(mlxsw_sp, mid->addr, mid->fid, mid->mid, @@ -1319,11 +1322,15 @@ mlxsw_sp_mid *__mlxsw_sp_mc_alloc(struct mlxsw_sp *mlxsw_sp, ether_addr_copy(mid->addr, addr); mid->fid = fid; mid->in_hw = false; + + if (!bridge_device->multicast_enabled) + goto out; + if (!mlxsw_sp_mc_write_mdb_entry(mlxsw_sp, mid)) goto err_write_mdb_entry; +out: list_add_tail(&mid->list, &bridge_device->mids_list); - return mid; err_write_mdb_entry: @@ -1391,6 +1398,9 @@ static int mlxsw_sp_port_mdb_add(struct mlxsw_sp_port *mlxsw_sp_port, } set_bit(mlxsw_sp_port->local_port, mid->ports_in_mid); + if (!bridge_device->multicast_enabled) + return 0; + err = mlxsw_sp_port_smid_set(mlxsw_sp_port, mid->mid, true); if (err) { netdev_err(dev, "Unable to set SMID\n"); @@ -1476,9 +1486,12 @@ __mlxsw_sp_port_mdb_del(struct mlxsw_sp_port *mlxsw_sp_port, struct net_device *dev = mlxsw_sp_port->dev; int err; - err = mlxsw_sp_port_smid_set(mlxsw_sp_port, mid->mid, false); - if (err) - netdev_err(dev, "Unable to remove port from SMID\n"); + if (bridge_port->bridge_device->multicast_enabled) { + err = mlxsw_sp_port_smid_set(mlxsw_sp_port, mid->mid, false); + + if (err) + netdev_err(dev, "Unable to remove port from SMID\n"); + } err = mlxsw_sp_port_remove_from_mid(mlxsw_sp_port, mid); if (err) -- cgit v1.2.3 From 2e3496cd3488729200ff0f1c6381b7016ecd41bd Mon Sep 17 00:00:00 2001 From: Nogah Frankel Date: Wed, 20 Sep 2017 16:15:09 +0200 Subject: mlxsw: spectrum_switchdev: Disable mdb when mc is disabled Remove all the mdb entries from the HW when mc is being disabled and re-write them when it is being enabled. Signed-off-by: Nogah Frankel Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- .../ethernet/mellanox/mlxsw/spectrum_switchdev.c | 41 +++++++++++++++++++--- 1 file changed, 37 insertions(+), 4 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c index cea257a77d09..79806af87b93 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c @@ -121,6 +121,11 @@ mlxsw_sp_bridge_port_fdb_flush(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_bridge_port *bridge_port, u16 fid_index); +static void +mlxsw_sp_bridge_mdb_mc_enable_sync(struct mlxsw_sp_port *mlxsw_sp_port, + struct mlxsw_sp_bridge_device + *bridge_device); + static struct mlxsw_sp_bridge_device * mlxsw_sp_bridge_device_find(const struct mlxsw_sp_bridge *bridge, const struct net_device *br_dev) @@ -757,6 +762,12 @@ static int mlxsw_sp_port_mc_disabled_set(struct mlxsw_sp_port *mlxsw_sp_port, if (!bridge_device) return 0; + if (bridge_device->multicast_enabled != !mc_disabled) { + bridge_device->multicast_enabled = !mc_disabled; + mlxsw_sp_bridge_mdb_mc_enable_sync(mlxsw_sp_port, + bridge_device); + } + list_for_each_entry(bridge_port, &bridge_device->ports_list, list) { enum mlxsw_sp_flood_type packet_type = MLXSW_SP_FLOOD_TYPE_MC; bool member = mc_disabled ? true : bridge_port->mrouter; @@ -1207,9 +1218,8 @@ static int mlxsw_sp_port_mdb_op(struct mlxsw_sp *mlxsw_sp, const char *addr, return err; } -/* clean the an entry from the HW and write there a full new entry */ -static int mlxsw_sp_port_smid_full_entry(struct mlxsw_sp *mlxsw_sp, - u16 mid_idx) +static int mlxsw_sp_port_smid_full_entry(struct mlxsw_sp *mlxsw_sp, u16 mid_idx, + long *ports_bitmap) { char *smid_pl; int err, i; @@ -1224,6 +1234,9 @@ static int mlxsw_sp_port_smid_full_entry(struct mlxsw_sp *mlxsw_sp, mlxsw_reg_smid_port_mask_set(smid_pl, i, 1); } + for_each_set_bit(i, ports_bitmap, mlxsw_core_max_ports(mlxsw_sp->core)) + mlxsw_reg_smid_port_set(smid_pl, i, 1); + err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(smid), smid_pl); kfree(smid_pl); return err; @@ -1273,7 +1286,8 @@ mlxsw_sp_mc_write_mdb_entry(struct mlxsw_sp *mlxsw_sp, return false; mid->mid = mid_idx; - err = mlxsw_sp_port_smid_full_entry(mlxsw_sp, mid_idx); + err = mlxsw_sp_port_smid_full_entry(mlxsw_sp, mid_idx, + mid->ports_in_mid); if (err) return false; @@ -1414,6 +1428,25 @@ err_out: return err; } +static void +mlxsw_sp_bridge_mdb_mc_enable_sync(struct mlxsw_sp_port *mlxsw_sp_port, + struct mlxsw_sp_bridge_device + *bridge_device) +{ + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + struct mlxsw_sp_mid *mid; + bool mc_enabled; + + mc_enabled = bridge_device->multicast_enabled; + + list_for_each_entry(mid, &bridge_device->mids_list, list) { + if (mc_enabled) + mlxsw_sp_mc_write_mdb_entry(mlxsw_sp, mid); + else + mlxsw_sp_mc_remove_mdb_entry(mlxsw_sp, mid); + } +} + static int mlxsw_sp_port_obj_add(struct net_device *dev, const struct switchdev_obj *obj, struct switchdev_trans *trans) -- cgit v1.2.3 From 218a8f8a6379bfd359e58f369b7b7660cd12e865 Mon Sep 17 00:00:00 2001 From: Nogah Frankel Date: Wed, 20 Sep 2017 16:15:10 +0200 Subject: mlxsw: spectrum_switchdev: Use generic mc flood function Use the generic mc flood function to decide whether to flood mc to a port when mc is being enabled / disabled. Move this function in the file to avoid forward declaration. Signed-off-by: Nogah Frankel Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- .../net/ethernet/mellanox/mlxsw/spectrum_switchdev.c | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c index 79806af87b93..19ac206879ff 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c @@ -742,6 +742,14 @@ out: return 0; } +static bool mlxsw_sp_mc_flood(const struct mlxsw_sp_bridge_port *bridge_port) +{ + const struct mlxsw_sp_bridge_device *bridge_device; + + bridge_device = bridge_port->bridge_device; + return !bridge_device->multicast_enabled ? true : bridge_port->mrouter; +} + static int mlxsw_sp_port_mc_disabled_set(struct mlxsw_sp_port *mlxsw_sp_port, struct switchdev_trans *trans, struct net_device *orig_dev, @@ -770,7 +778,7 @@ static int mlxsw_sp_port_mc_disabled_set(struct mlxsw_sp_port *mlxsw_sp_port, list_for_each_entry(bridge_port, &bridge_device->ports_list, list) { enum mlxsw_sp_flood_type packet_type = MLXSW_SP_FLOOD_TYPE_MC; - bool member = mc_disabled ? true : bridge_port->mrouter; + bool member = mlxsw_sp_mc_flood(bridge_port); err = mlxsw_sp_bridge_port_flood_table_set(mlxsw_sp_port, bridge_port, @@ -829,14 +837,6 @@ static int mlxsw_sp_port_attr_set(struct net_device *dev, return err; } -static bool mlxsw_sp_mc_flood(const struct mlxsw_sp_bridge_port *bridge_port) -{ - const struct mlxsw_sp_bridge_device *bridge_device; - - bridge_device = bridge_port->bridge_device; - return !bridge_device->multicast_enabled ? true : bridge_port->mrouter; -} - static int mlxsw_sp_port_vlan_fid_join(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan, struct mlxsw_sp_bridge_port *bridge_port) -- cgit v1.2.3 From 9dad51bdaa4b1846bd9d5307b54dca74efa555ea Mon Sep 17 00:00:00 2001 From: Nogah Frankel Date: Wed, 20 Sep 2017 16:15:11 +0200 Subject: mlxsw: spectrum_switchdev: Flood mc when mc is disabled by user flag When multicast is disabled, flood mc packets only to port that are marked BR_MCAST_FLOOD (instead to all). Signed-off-by: Nogah Frankel Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- .../ethernet/mellanox/mlxsw/spectrum_switchdev.c | 21 +++++++++++++++++---- 1 file changed, 17 insertions(+), 4 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c index 19ac206879ff..50c4d7c735df 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c @@ -262,7 +262,8 @@ mlxsw_sp_bridge_port_create(struct mlxsw_sp_bridge_device *bridge_device, bridge_port->dev = brport_dev; bridge_port->bridge_device = bridge_device; bridge_port->stp_state = BR_STATE_DISABLED; - bridge_port->flags = BR_LEARNING | BR_FLOOD | BR_LEARNING_SYNC; + bridge_port->flags = BR_LEARNING | BR_FLOOD | BR_LEARNING_SYNC | + BR_MCAST_FLOOD; INIT_LIST_HEAD(&bridge_port->vlans_list); list_add(&bridge_port->list, &bridge_device->ports_list); bridge_port->ref_count = 1; @@ -468,7 +469,8 @@ static int mlxsw_sp_port_attr_get(struct net_device *dev, &attr->u.brport_flags); break; case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS_SUPPORT: - attr->u.brport_flags_support = BR_LEARNING | BR_FLOOD; + attr->u.brport_flags_support = BR_LEARNING | BR_FLOOD | + BR_MCAST_FLOOD; break; default: return -EOPNOTSUPP; @@ -653,8 +655,18 @@ static int mlxsw_sp_port_attr_br_flags_set(struct mlxsw_sp_port *mlxsw_sp_port, if (err) return err; - memcpy(&bridge_port->flags, &brport_flags, sizeof(brport_flags)); + if (bridge_port->bridge_device->multicast_enabled) + goto out; + err = mlxsw_sp_bridge_port_flood_table_set(mlxsw_sp_port, bridge_port, + MLXSW_SP_FLOOD_TYPE_MC, + brport_flags & + BR_MCAST_FLOOD); + if (err) + return err; + +out: + memcpy(&bridge_port->flags, &brport_flags, sizeof(brport_flags)); return 0; } @@ -747,7 +759,8 @@ static bool mlxsw_sp_mc_flood(const struct mlxsw_sp_bridge_port *bridge_port) const struct mlxsw_sp_bridge_device *bridge_device; bridge_device = bridge_port->bridge_device; - return !bridge_device->multicast_enabled ? true : bridge_port->mrouter; + return bridge_device->multicast_enabled ? bridge_port->mrouter : + bridge_port->flags & BR_MCAST_FLOOD; } static int mlxsw_sp_port_mc_disabled_set(struct mlxsw_sp_port *mlxsw_sp_port, -- cgit v1.2.3 From bb5355b27c9da3786a2b5e1583c9d64f492ac7ad Mon Sep 17 00:00:00 2001 From: Nogah Frankel Date: Wed, 20 Sep 2017 16:15:12 +0200 Subject: mlxsw: spectrum_switchdev: Flush the mdb when a port is being removed When a port is being removed from a bridge, flush the bridge mdb to remove the mids of that port. Signed-off-by: Nogah Frankel Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- .../ethernet/mellanox/mlxsw/spectrum_switchdev.c | 39 ++++++++++++++++------ 1 file changed, 29 insertions(+), 10 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c index 50c4d7c735df..bc0787312a06 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c @@ -121,6 +121,10 @@ mlxsw_sp_bridge_port_fdb_flush(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_bridge_port *bridge_port, u16 fid_index); +static void +mlxsw_sp_bridge_port_mdb_flush(struct mlxsw_sp_port *mlxsw_sp_port, + struct mlxsw_sp_bridge_port *bridge_port); + static void mlxsw_sp_bridge_mdb_mc_enable_sync(struct mlxsw_sp_port *mlxsw_sp_port, struct mlxsw_sp_bridge_device @@ -176,17 +180,11 @@ static void mlxsw_sp_bridge_device_destroy(struct mlxsw_sp_bridge *bridge, struct mlxsw_sp_bridge_device *bridge_device) { - struct mlxsw_sp_mid *mid, *tmp; - list_del(&bridge_device->list); if (bridge_device->vlan_enabled) bridge->vlan_enabled_exists = false; WARN_ON(!list_empty(&bridge_device->ports_list)); - list_for_each_entry_safe(mid, tmp, &bridge_device->mids_list, list) { - list_del(&mid->list); - clear_bit(mid->mid, bridge->mids_bitmap); - kfree(mid); - } + WARN_ON(!list_empty(&bridge_device->mids_list)); kfree(bridge_device); } @@ -987,24 +985,28 @@ mlxsw_sp_port_vlan_bridge_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan) struct mlxsw_sp_bridge_vlan *bridge_vlan; struct mlxsw_sp_bridge_port *bridge_port; u16 vid = mlxsw_sp_port_vlan->vid; - bool last; + bool last_port, last_vlan; if (WARN_ON(mlxsw_sp_fid_type(fid) != MLXSW_SP_FID_TYPE_8021Q && mlxsw_sp_fid_type(fid) != MLXSW_SP_FID_TYPE_8021D)) return; bridge_port = mlxsw_sp_port_vlan->bridge_port; + last_vlan = list_is_singular(&bridge_port->vlans_list); bridge_vlan = mlxsw_sp_bridge_vlan_find(bridge_port, vid); - last = list_is_singular(&bridge_vlan->port_vlan_list); + last_port = list_is_singular(&bridge_vlan->port_vlan_list); list_del(&mlxsw_sp_port_vlan->bridge_vlan_node); mlxsw_sp_bridge_vlan_put(bridge_vlan); mlxsw_sp_port_vid_stp_set(mlxsw_sp_port, vid, BR_STATE_DISABLED); mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, false); - if (last) + if (last_port) mlxsw_sp_bridge_port_fdb_flush(mlxsw_sp_port->mlxsw_sp, bridge_port, mlxsw_sp_fid_index(fid)); + if (last_vlan) + mlxsw_sp_bridge_port_mdb_flush(mlxsw_sp_port, bridge_port); + mlxsw_sp_port_vlan_fid_leave(mlxsw_sp_port_vlan); mlxsw_sp_bridge_port_put(mlxsw_sp_port->mlxsw_sp->bridge, bridge_port); @@ -1580,6 +1582,23 @@ static int mlxsw_sp_port_mdb_del(struct mlxsw_sp_port *mlxsw_sp_port, return __mlxsw_sp_port_mdb_del(mlxsw_sp_port, bridge_port, mid); } +static void +mlxsw_sp_bridge_port_mdb_flush(struct mlxsw_sp_port *mlxsw_sp_port, + struct mlxsw_sp_bridge_port *bridge_port) +{ + struct mlxsw_sp_bridge_device *bridge_device; + struct mlxsw_sp_mid *mid, *tmp; + + bridge_device = bridge_port->bridge_device; + + list_for_each_entry_safe(mid, tmp, &bridge_device->mids_list, list) { + if (test_bit(mlxsw_sp_port->local_port, mid->ports_in_mid)) { + __mlxsw_sp_port_mdb_del(mlxsw_sp_port, bridge_port, + mid); + } + } +} + static int mlxsw_sp_port_obj_del(struct net_device *dev, const struct switchdev_obj *obj) { -- cgit v1.2.3 From 3fba877cb68cfbc1826dd4abc7b1a8fe862adb2a Mon Sep 17 00:00:00 2001 From: Nogah Frankel Date: Wed, 20 Sep 2017 16:15:13 +0200 Subject: mlxsw: spectrum_switchdev: Flood all mc packets to mrouter ports When mc is enabled, whenever a mc packet doesn't hit any mdb entry it is being flood to the ports marked as mrouters. However, all mc packets should be flooded to them even if they match an entry in the mdb. This patch adds the mrouter ports to every mdb entry that is being written to the HW. Signed-off-by: Nogah Frankel Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- .../ethernet/mellanox/mlxsw/spectrum_switchdev.c | 65 ++++++++++++++++++++-- 1 file changed, 60 insertions(+), 5 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c index bc0787312a06..146beaa6b2da 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c @@ -1288,10 +1288,55 @@ mlxsw_sp_mid *__mlxsw_sp_mc_get(struct mlxsw_sp_bridge_device *bridge_device, return NULL; } +static void +mlxsw_sp_bridge_port_get_ports_bitmap(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_bridge_port *bridge_port, + unsigned long *ports_bitmap) +{ + struct mlxsw_sp_port *mlxsw_sp_port; + u64 max_lag_members, i; + int lag_id; + + if (!bridge_port->lagged) { + set_bit(bridge_port->system_port, ports_bitmap); + } else { + max_lag_members = MLXSW_CORE_RES_GET(mlxsw_sp->core, + MAX_LAG_MEMBERS); + lag_id = bridge_port->lag_id; + for (i = 0; i < max_lag_members; i++) { + mlxsw_sp_port = mlxsw_sp_port_lagged_get(mlxsw_sp, + lag_id, i); + if (mlxsw_sp_port) + set_bit(mlxsw_sp_port->local_port, + ports_bitmap); + } + } +} + +static void +mlxsw_sp_mc_get_mrouters_bitmap(unsigned long *flood_bitmap, + struct mlxsw_sp_bridge_device *bridge_device, + struct mlxsw_sp *mlxsw_sp) +{ + struct mlxsw_sp_bridge_port *bridge_port; + + list_for_each_entry(bridge_port, &bridge_device->ports_list, list) { + if (bridge_port->mrouter) { + mlxsw_sp_bridge_port_get_ports_bitmap(mlxsw_sp, + bridge_port, + flood_bitmap); + } + } +} + static bool mlxsw_sp_mc_write_mdb_entry(struct mlxsw_sp *mlxsw_sp, - struct mlxsw_sp_mid *mid) + struct mlxsw_sp_mid *mid, + struct mlxsw_sp_bridge_device *bridge_device) { + long *flood_bitmap; + int num_of_ports; + int alloc_size; u16 mid_idx; int err; @@ -1300,9 +1345,18 @@ mlxsw_sp_mc_write_mdb_entry(struct mlxsw_sp *mlxsw_sp, if (mid_idx == MLXSW_SP_MID_MAX) return false; + num_of_ports = mlxsw_core_max_ports(mlxsw_sp->core); + alloc_size = sizeof(long) * BITS_TO_LONGS(num_of_ports); + flood_bitmap = kzalloc(alloc_size, GFP_KERNEL); + if (!flood_bitmap) + return false; + + bitmap_copy(flood_bitmap, mid->ports_in_mid, num_of_ports); + mlxsw_sp_mc_get_mrouters_bitmap(flood_bitmap, bridge_device, mlxsw_sp); + mid->mid = mid_idx; - err = mlxsw_sp_port_smid_full_entry(mlxsw_sp, mid_idx, - mid->ports_in_mid); + err = mlxsw_sp_port_smid_full_entry(mlxsw_sp, mid_idx, flood_bitmap); + kfree(flood_bitmap); if (err) return false; @@ -1355,7 +1409,7 @@ mlxsw_sp_mid *__mlxsw_sp_mc_alloc(struct mlxsw_sp *mlxsw_sp, if (!bridge_device->multicast_enabled) goto out; - if (!mlxsw_sp_mc_write_mdb_entry(mlxsw_sp, mid)) + if (!mlxsw_sp_mc_write_mdb_entry(mlxsw_sp, mid, bridge_device)) goto err_write_mdb_entry; out: @@ -1456,7 +1510,8 @@ mlxsw_sp_bridge_mdb_mc_enable_sync(struct mlxsw_sp_port *mlxsw_sp_port, list_for_each_entry(mid, &bridge_device->mids_list, list) { if (mc_enabled) - mlxsw_sp_mc_write_mdb_entry(mlxsw_sp, mid); + mlxsw_sp_mc_write_mdb_entry(mlxsw_sp, mid, + bridge_device); else mlxsw_sp_mc_remove_mdb_entry(mlxsw_sp, mid); } -- cgit v1.2.3 From 3ddda1178e41bbe26bb5c6ebf66ae3d0a87ac410 Mon Sep 17 00:00:00 2001 From: Nogah Frankel Date: Wed, 20 Sep 2017 16:15:14 +0200 Subject: mlxsw: spectrum_switchdev: Update the mdb of mrouter port change Whenever a port starts / stops being mrouter, update all the mdb entries in the HW to flood / stop flooding mc packets there. The change should happen only if the port is not in the mid. (If it is, the mid should flood mc packets to this port anyway) Signed-off-by: Nogah Frankel Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- .../ethernet/mellanox/mlxsw/spectrum_switchdev.c | 23 ++++++++++++++++++++++ 1 file changed, 23 insertions(+) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c index 146beaa6b2da..bf1a17557fb3 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c @@ -130,6 +130,11 @@ mlxsw_sp_bridge_mdb_mc_enable_sync(struct mlxsw_sp_port *mlxsw_sp_port, struct mlxsw_sp_bridge_device *bridge_device); +static void +mlxsw_sp_port_mrouter_update_mdb(struct mlxsw_sp_port *mlxsw_sp_port, + struct mlxsw_sp_bridge_port *bridge_port, + bool add); + static struct mlxsw_sp_bridge_device * mlxsw_sp_bridge_device_find(const struct mlxsw_sp_bridge *bridge, const struct net_device *br_dev) @@ -747,6 +752,8 @@ static int mlxsw_sp_port_attr_mrouter_set(struct mlxsw_sp_port *mlxsw_sp_port, if (err) return err; + mlxsw_sp_port_mrouter_update_mdb(mlxsw_sp_port, bridge_port, + is_port_mrouter); out: bridge_port->mrouter = is_port_mrouter; return 0; @@ -1517,6 +1524,22 @@ mlxsw_sp_bridge_mdb_mc_enable_sync(struct mlxsw_sp_port *mlxsw_sp_port, } } +static void +mlxsw_sp_port_mrouter_update_mdb(struct mlxsw_sp_port *mlxsw_sp_port, + struct mlxsw_sp_bridge_port *bridge_port, + bool add) +{ + struct mlxsw_sp_bridge_device *bridge_device; + struct mlxsw_sp_mid *mid; + + bridge_device = bridge_port->bridge_device; + + list_for_each_entry(mid, &bridge_device->mids_list, list) { + if (!test_bit(mlxsw_sp_port->local_port, mid->ports_in_mid)) + mlxsw_sp_port_smid_set(mlxsw_sp_port, mid->mid, add); + } +} + static int mlxsw_sp_port_obj_add(struct net_device *dev, const struct switchdev_obj *obj, struct switchdev_trans *trans) -- cgit v1.2.3 From 0166277706e57779f06b741d25a9e86d99726e2a Mon Sep 17 00:00:00 2001 From: Nogah Frankel Date: Wed, 20 Sep 2017 16:15:15 +0200 Subject: mlxsw: spectrum_switchdev: Remove mrouter flood in mdb flush In mdb flush the port is being removed from all the mids it is registered to. But if the port is mrouter, all the mids floods to it. This patch remove mrouter ports from mids it is not registered to in the mdb flush. Signed-off-by: Nogah Frankel Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c index bf1a17557fb3..459cedc23c47 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c @@ -1673,6 +1673,9 @@ mlxsw_sp_bridge_port_mdb_flush(struct mlxsw_sp_port *mlxsw_sp_port, if (test_bit(mlxsw_sp_port->local_port, mid->ports_in_mid)) { __mlxsw_sp_port_mdb_del(mlxsw_sp_port, bridge_port, mid); + } else if (bridge_device->multicast_enabled && + bridge_port->mrouter) { + mlxsw_sp_port_smid_set(mlxsw_sp_port, mid->mid, false); } } } -- cgit v1.2.3 From ded711c87a0411a7f3f56f8c575d7b642ee0110e Mon Sep 17 00:00:00 2001 From: Nogah Frankel Date: Wed, 20 Sep 2017 16:15:16 +0200 Subject: mlxsw: spectrum_switchdev: Consider mrouter status for mdb changes When a mrouter is registered or leaves a mid, don't update the HW. Signed-off-by: Nogah Frankel Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c index 459cedc23c47..0f9eac5f4ebf 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c @@ -1491,6 +1491,9 @@ static int mlxsw_sp_port_mdb_add(struct mlxsw_sp_port *mlxsw_sp_port, if (!bridge_device->multicast_enabled) return 0; + if (bridge_port->mrouter) + return 0; + err = mlxsw_sp_port_smid_set(mlxsw_sp_port, mid->mid, true); if (err) { netdev_err(dev, "Unable to set SMID\n"); @@ -1613,10 +1616,12 @@ __mlxsw_sp_port_mdb_del(struct mlxsw_sp_port *mlxsw_sp_port, int err; if (bridge_port->bridge_device->multicast_enabled) { - err = mlxsw_sp_port_smid_set(mlxsw_sp_port, mid->mid, false); - - if (err) - netdev_err(dev, "Unable to remove port from SMID\n"); + if (bridge_port->bridge_device->multicast_enabled) { + err = mlxsw_sp_port_smid_set(mlxsw_sp_port, mid->mid, + false); + if (err) + netdev_err(dev, "Unable to remove port from SMID\n"); + } } err = mlxsw_sp_port_remove_from_mid(mlxsw_sp_port, mid); -- cgit v1.2.3 From 4f88836d4f806d212361eb426bc8a6ce897dbef9 Mon Sep 17 00:00:00 2001 From: Allen Pais Date: Thu, 21 Sep 2017 22:34:22 +0530 Subject: drivers: net: de4x: use setup_timer() helper. Use setup_timer function instead of initializing timer with the function and data fields. Signed-off-by: Allen Pais Signed-off-by: David S. Miller --- drivers/net/ethernet/dec/tulip/de4x5.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/dec/tulip/de4x5.c b/drivers/net/ethernet/dec/tulip/de4x5.c index 0affee9c8aa2..299812e92db7 100644 --- a/drivers/net/ethernet/dec/tulip/de4x5.c +++ b/drivers/net/ethernet/dec/tulip/de4x5.c @@ -1147,9 +1147,8 @@ de4x5_hw_init(struct net_device *dev, u_long iobase, struct device *gendev) lp->timeout = -1; lp->gendev = gendev; spin_lock_init(&lp->lock); - init_timer(&lp->timer); - lp->timer.function = (void (*)(unsigned long))de4x5_ast; - lp->timer.data = (unsigned long)dev; + setup_timer(&lp->timer, (void (*)(unsigned long))de4x5_ast, + (unsigned long)dev); de4x5_parse_params(dev); /* -- cgit v1.2.3 From cdc91b31b81abaa2cf491fd5e9007f4bcd45bc68 Mon Sep 17 00:00:00 2001 From: Allen Pais Date: Thu, 21 Sep 2017 22:34:23 +0530 Subject: drivers: net: b44: use setup_timer() helper. Use setup_timer function instead of initializing timer with the function and data fields. Signed-off-by: Allen Pais Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/b44.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/broadcom/b44.c b/drivers/net/ethernet/broadcom/b44.c index a1125d10c825..42e44fc03a18 100644 --- a/drivers/net/ethernet/broadcom/b44.c +++ b/drivers/net/ethernet/broadcom/b44.c @@ -1474,10 +1474,8 @@ static int b44_open(struct net_device *dev) goto out; } - init_timer(&bp->timer); + setup_timer(&bp->timer, b44_timer, (unsigned long)bp); bp->timer.expires = jiffies + HZ; - bp->timer.data = (unsigned long) bp; - bp->timer.function = b44_timer; add_timer(&bp->timer); b44_enable_ints(bp); -- cgit v1.2.3 From 334e4a7d5505f59a741b0549f41082e29a914439 Mon Sep 17 00:00:00 2001 From: Allen Pais Date: Thu, 21 Sep 2017 22:34:24 +0530 Subject: drivers: net: pcnet32: use setup_timer() helper. Use setup_timer function instead of initializing timer with the function and data fields. Signed-off-by: Allen Pais Signed-off-by: David S. Miller --- drivers/net/ethernet/amd/pcnet32.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/amd/pcnet32.c b/drivers/net/ethernet/amd/pcnet32.c index 7f60d17819ce..e46153654016 100644 --- a/drivers/net/ethernet/amd/pcnet32.c +++ b/drivers/net/ethernet/amd/pcnet32.c @@ -1970,9 +1970,8 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev) lp->options |= PCNET32_PORT_MII; } - init_timer(&lp->watchdog_timer); - lp->watchdog_timer.data = (unsigned long)dev; - lp->watchdog_timer.function = (void *)&pcnet32_watchdog; + setup_timer(&lp->watchdog_timer, (void *)&pcnet32_watchdog, + (unsigned long)dev); /* The PCNET32-specific entries in the device structure. */ dev->netdev_ops = &pcnet32_netdev_ops; -- cgit v1.2.3 From ba4cc08793a58a97cffc2769acaa53fff4433332 Mon Sep 17 00:00:00 2001 From: Allen Pais Date: Thu, 21 Sep 2017 22:34:26 +0530 Subject: drivers : net: niu: use setup_timer() helper. Use setup_timer function instead of initializing timer with the function and data fields. Signed-off-by: Allen Pais Signed-off-by: David S. Miller --- drivers/net/ethernet/sun/niu.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c index 6a4e8e1bbd90..bde19b307d0d 100644 --- a/drivers/net/ethernet/sun/niu.c +++ b/drivers/net/ethernet/sun/niu.c @@ -6123,10 +6123,8 @@ static int niu_open(struct net_device *dev) err = niu_init_hw(np); if (!err) { - init_timer(&np->timer); + setup_timer(&np->timer, niu_timer, (unsigned long)np); np->timer.expires = jiffies + HZ; - np->timer.data = (unsigned long) np; - np->timer.function = niu_timer; err = niu_enable_interrupts(np, 1); if (err) -- cgit v1.2.3 From c3bd81cccbaa89da70047f1dcc73443f889735a8 Mon Sep 17 00:00:00 2001 From: Allen Pais Date: Thu, 21 Sep 2017 22:34:27 +0530 Subject: drivers: net: bcm63xx: use setup_timer() helper. Use setup_timer function instead of initializing timer with the function and data fields. Signed-off-by: Allen Pais Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bcm63xx_enet.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/broadcom/bcm63xx_enet.c b/drivers/net/ethernet/broadcom/bcm63xx_enet.c index 4f3845a58126..f8bbbbfca06e 100644 --- a/drivers/net/ethernet/broadcom/bcm63xx_enet.c +++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.c @@ -1857,9 +1857,8 @@ static int bcm_enet_probe(struct platform_device *pdev) spin_lock_init(&priv->rx_lock); /* init rx timeout (used for oom) */ - init_timer(&priv->rx_timeout); - priv->rx_timeout.function = bcm_enet_refill_rx_timer; - priv->rx_timeout.data = (unsigned long)dev; + setup_timer(&priv->rx_timeout, bcm_enet_refill_rx_timer, + (unsigned long)dev); /* init the mib update lock&work */ mutex_init(&priv->mib_update_lock); -- cgit v1.2.3 From b0b404bd9ba28edb795b999e3c35596e236cc5dd Mon Sep 17 00:00:00 2001 From: Allen Pais Date: Thu, 21 Sep 2017 22:34:28 +0530 Subject: drivers: net: declance: use setup_timer() helper. Use setup_timer function instead of initializing timer with the function and data fields. Signed-off-by: Allen Pais Signed-off-by: David S. Miller --- drivers/net/ethernet/amd/declance.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/amd/declance.c b/drivers/net/ethernet/amd/declance.c index 82cc81385033..9bdf81c2cd00 100644 --- a/drivers/net/ethernet/amd/declance.c +++ b/drivers/net/ethernet/amd/declance.c @@ -1246,9 +1246,9 @@ static int dec_lance_probe(struct device *bdev, const int type) * can occur from interrupts (ex. IPv6). So we * use a timer to try again later when necessary. -DaveM */ - init_timer(&lp->multicast_timer); - lp->multicast_timer.data = (unsigned long) dev; - lp->multicast_timer.function = lance_set_multicast_retry; + setup_timer(&lp->multicast_timer, lance_set_multicast_retry, + (unsigned long)dev); + ret = register_netdev(dev); if (ret) { -- cgit v1.2.3 From aa0c72859972f209d7d07654037bd974c11f93ed Mon Sep 17 00:00:00 2001 From: Allen Pais Date: Thu, 21 Sep 2017 22:34:29 +0530 Subject: drivers: net: am79c961: use setup_timer() helper. Use setup_timer function instead of initializing timer with the function and data fields. Signed-off-by: Allen Pais Signed-off-by: David S. Miller --- drivers/net/ethernet/amd/am79c961a.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/amd/am79c961a.c b/drivers/net/ethernet/amd/am79c961a.c index b11e910850f7..0612dbee00d2 100644 --- a/drivers/net/ethernet/amd/am79c961a.c +++ b/drivers/net/ethernet/amd/am79c961a.c @@ -728,9 +728,7 @@ static int am79c961_probe(struct platform_device *pdev) am79c961_banner(); spin_lock_init(&priv->chip_lock); - init_timer(&priv->timer); - priv->timer.data = (unsigned long)dev; - priv->timer.function = am79c961_timer; + setup_timer(&priv->timer, am79c961_timer, (unsigned long)dev); if (am79c961_hw_init(dev)) goto release; -- cgit v1.2.3 From 07b6901f61813aa547c5a25e118f977022fec9eb Mon Sep 17 00:00:00 2001 From: Allen Pais Date: Thu, 21 Sep 2017 22:34:30 +0530 Subject: drivers: net: et131x: use setup_timer() helper. Use setup_timer function instead of initializing timer with the function and data fields. Signed-off-by: Allen Pais Signed-off-by: David S. Miller --- drivers/net/ethernet/agere/et131x.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/agere/et131x.c b/drivers/net/ethernet/agere/et131x.c index 54eff90e2f02..658e92f79d36 100644 --- a/drivers/net/ethernet/agere/et131x.c +++ b/drivers/net/ethernet/agere/et131x.c @@ -3624,11 +3624,10 @@ static int et131x_open(struct net_device *netdev) int result; /* Start the timer to track NIC errors */ - init_timer(&adapter->error_timer); + setup_timer(&adapter->error_timer, et131x_error_timer_handler, + (unsigned long)adapter); adapter->error_timer.expires = jiffies + msecs_to_jiffies(TX_ERROR_PERIOD); - adapter->error_timer.function = et131x_error_timer_handler; - adapter->error_timer.data = (unsigned long)adapter; add_timer(&adapter->error_timer); result = request_irq(irq, et131x_isr, -- cgit v1.2.3 From e7bbad4487ae4005904d00a0a04622f07fadbc5b Mon Sep 17 00:00:00 2001 From: Allen Pais Date: Thu, 21 Sep 2017 22:34:33 +0530 Subject: drivers: net: atp: use setup_timer() helper. Use setup_timer function instead of initializing timer with the function and data fields. Signed-off-by: Allen Pais Signed-off-by: David S. Miller --- drivers/net/ethernet/realtek/atp.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/realtek/atp.c b/drivers/net/ethernet/realtek/atp.c index bed34684994f..bdc3833fab7e 100644 --- a/drivers/net/ethernet/realtek/atp.c +++ b/drivers/net/ethernet/realtek/atp.c @@ -438,10 +438,8 @@ static int net_open(struct net_device *dev) hardware_init(dev); - init_timer(&lp->timer); + setup_timer(&lp->timer, atp_timed_checker, (unsigned long)dev); lp->timer.expires = jiffies + TIMED_CHECKER; - lp->timer.data = (unsigned long)dev; - lp->timer.function = atp_timed_checker; /* timer handler */ add_timer(&lp->timer); netif_start_queue(dev); -- cgit v1.2.3 From f40c9d5aea0fdf5454464bc477898cc981ff9715 Mon Sep 17 00:00:00 2001 From: Allen Pais Date: Thu, 21 Sep 2017 22:34:34 +0530 Subject: drivers: net: ns83820: use setup_timer() helper. Use setup_timer function instead of initializing timer with the function and data fields. Signed-off-by: Allen Pais Signed-off-by: David S. Miller --- drivers/net/ethernet/natsemi/ns83820.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/natsemi/ns83820.c b/drivers/net/ethernet/natsemi/ns83820.c index 729095db3e08..99d3c7884a4a 100644 --- a/drivers/net/ethernet/natsemi/ns83820.c +++ b/drivers/net/ethernet/natsemi/ns83820.c @@ -1652,9 +1652,7 @@ static int ns83820_open(struct net_device *ndev) writel(0, dev->base + TXDP_HI); writel(desc, dev->base + TXDP); - init_timer(&dev->tx_watchdog); - dev->tx_watchdog.data = (unsigned long)ndev; - dev->tx_watchdog.function = ns83820_tx_watch; + setup_timer(&dev->tx_watchdog, ns83820_tx_watch, (unsigned long)ndev); mod_timer(&dev->tx_watchdog, jiffies + 2*HZ); netif_start_queue(ndev); /* FIXME: wait for phy to come up */ -- cgit v1.2.3 From 82a8c6745169ec932473658c28679069a7ded95a Mon Sep 17 00:00:00 2001 From: Allen Pais Date: Thu, 21 Sep 2017 22:34:35 +0530 Subject: drivers: net: ixgb: use setup_timer() helper. Use setup_timer function instead of initializing timer with the function and data fields. Signed-off-by: Allen Pais Signed-off-by: David S. Miller --- drivers/net/ethernet/intel/ixgb/ixgb_main.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_main.c b/drivers/net/ethernet/intel/ixgb/ixgb_main.c index 5a713199653c..1e6ec2277d54 100644 --- a/drivers/net/ethernet/intel/ixgb/ixgb_main.c +++ b/drivers/net/ethernet/intel/ixgb/ixgb_main.c @@ -508,9 +508,8 @@ ixgb_probe(struct pci_dev *pdev, const struct pci_device_id *ent) adapter->part_num = ixgb_get_ee_pba_number(&adapter->hw); - init_timer(&adapter->watchdog_timer); - adapter->watchdog_timer.function = ixgb_watchdog; - adapter->watchdog_timer.data = (unsigned long)adapter; + setup_timer(&adapter->watchdog_timer, ixgb_watchdog, + (unsigned long)adapter); INIT_WORK(&adapter->tx_timeout_task, ixgb_tx_timeout_task); -- cgit v1.2.3 From 88e8aa172596d2eda971d3553d52a8e877805e90 Mon Sep 17 00:00:00 2001 From: Allen Pais Date: Thu, 21 Sep 2017 22:34:36 +0530 Subject: drivers: net: sundance: use setup_timer() helper. Use setup_timer function instead of initializing timer with the function and data fields. Signed-off-by: Allen Pais Signed-off-by: David S. Miller --- drivers/net/ethernet/dlink/sundance.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/dlink/sundance.c b/drivers/net/ethernet/dlink/sundance.c index 2704bcf023be..6ca9e981ad57 100644 --- a/drivers/net/ethernet/dlink/sundance.c +++ b/drivers/net/ethernet/dlink/sundance.c @@ -913,10 +913,8 @@ static int netdev_open(struct net_device *dev) ioread16(ioaddr + MACCtrl1), ioread16(ioaddr + MACCtrl0)); /* Set the timer to check for link beat. */ - init_timer(&np->timer); + setup_timer(&np->timer, netdev_timer, (unsigned long)dev); np->timer.expires = jiffies + 3*HZ; - np->timer.data = (unsigned long)dev; - np->timer.function = netdev_timer; /* timer handler */ add_timer(&np->timer); /* Enable interrupts by setting the interrupt mask. */ -- cgit v1.2.3 From 4896ad68ec3803d01a0d9fead64451377ea1ec5f Mon Sep 17 00:00:00 2001 From: Allen Pais Date: Thu, 21 Sep 2017 22:34:37 +0530 Subject: drivers: net: tg3: use setup_timer() helper. Use setup_timer function instead of initializing timer with the function and data fields. Signed-off-by: Allen Pais Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/tg3.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c index 656e6af70f0a..d8d5f207c759 100644 --- a/drivers/net/ethernet/broadcom/tg3.c +++ b/drivers/net/ethernet/broadcom/tg3.c @@ -11087,9 +11087,7 @@ static void tg3_timer_init(struct tg3 *tp) tp->asf_multiplier = (HZ / tp->timer_offset) * TG3_FW_UPDATE_FREQ_SEC; - init_timer(&tp->timer); - tp->timer.data = (unsigned long) tp; - tp->timer.function = tg3_timer; + setup_timer(&tp->timer, tg3_timer, (unsigned long)tp); } static void tg3_timer_start(struct tg3 *tp) -- cgit v1.2.3 From e998092f7b7c2ce3199db91177aa43397a3f76d7 Mon Sep 17 00:00:00 2001 From: Allen Pais Date: Thu, 21 Sep 2017 22:34:41 +0530 Subject: drivers: net: spider_net: use setup_timer() helper. Use setup_timer function instead of initializing timer with the function and data fields. Signed-off-by: Allen Pais Signed-off-by: David S. Miller --- drivers/net/ethernet/toshiba/spider_net.c | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/toshiba/spider_net.c b/drivers/net/ethernet/toshiba/spider_net.c index cec9e70ab995..a913538d3213 100644 --- a/drivers/net/ethernet/toshiba/spider_net.c +++ b/drivers/net/ethernet/toshiba/spider_net.c @@ -2256,16 +2256,14 @@ spider_net_setup_netdev(struct spider_net_card *card) pci_set_drvdata(card->pdev, netdev); - init_timer(&card->tx_timer); - card->tx_timer.function = - (void (*)(unsigned long)) spider_net_cleanup_tx_ring; - card->tx_timer.data = (unsigned long) card; + setup_timer(&card->tx_timer, + (void(*)(unsigned long))spider_net_cleanup_tx_ring, + (unsigned long)card); netdev->irq = card->pdev->irq; card->aneg_count = 0; - init_timer(&card->aneg_timer); - card->aneg_timer.function = spider_net_link_phy; - card->aneg_timer.data = (unsigned long) card; + setup_timer(&card->aneg_timer, spider_net_link_phy, + (unsigned long)card); netif_napi_add(netdev, &card->napi, spider_net_poll, SPIDER_NET_NAPI_WEIGHT); -- cgit v1.2.3 From f891f36603dff38a15390e5c950e8ac66f73352b Mon Sep 17 00:00:00 2001 From: Allen Pais Date: Thu, 21 Sep 2017 22:34:42 +0530 Subject: drivers: net: sun: cassini: use setup_timer() helper. Use setup_timer function instead of initializing timer with the function and data fields. Signed-off-by: Allen Pais Signed-off-by: David S. Miller --- drivers/net/ethernet/sun/cassini.c | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/sun/cassini.c b/drivers/net/ethernet/sun/cassini.c index 382993c1561c..a74d78f64af9 100644 --- a/drivers/net/ethernet/sun/cassini.c +++ b/drivers/net/ethernet/sun/cassini.c @@ -5039,10 +5039,7 @@ static int cas_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) spin_lock_init(&cp->stat_lock[N_TX_RINGS]); mutex_init(&cp->pm_mutex); - init_timer(&cp->link_timer); - cp->link_timer.function = cas_link_timer; - cp->link_timer.data = (unsigned long) cp; - + setup_timer(&cp->link_timer, cas_link_timer, (unsigned long)cp); #if 1 /* Just in case the implementation of atomic operations * change so that an explicit initialization is necessary. -- cgit v1.2.3 From ba98e9e2eb32ba8f604f947fa656ef2071d22fa0 Mon Sep 17 00:00:00 2001 From: Allen Pais Date: Thu, 21 Sep 2017 22:34:43 +0530 Subject: drivers: net: natsemi: use setup_timer() helper. Use setup_timer function instead of initializing timer with the function and data fields. Signed-off-by: Allen Pais Signed-off-by: David S. Miller --- drivers/net/ethernet/natsemi/natsemi.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/natsemi/natsemi.c b/drivers/net/ethernet/natsemi/natsemi.c index 18af2a23a933..dedeacd0bbca 100644 --- a/drivers/net/ethernet/natsemi/natsemi.c +++ b/drivers/net/ethernet/natsemi/natsemi.c @@ -1571,10 +1571,8 @@ static int netdev_open(struct net_device *dev) dev->name, (int)readl(ioaddr + ChipCmd)); /* Set the timer to check for link beat. */ - init_timer(&np->timer); + setup_timer(&np->timer, netdev_timer, (unsigned long)dev); np->timer.expires = round_jiffies(jiffies + NATSEMI_TIMER_FREQ); - np->timer.data = (unsigned long)dev; - np->timer.function = netdev_timer; /* timer handler */ add_timer(&np->timer); return 0; -- cgit v1.2.3 From 3e436a25fcca449cbb044c7483116772873f3e28 Mon Sep 17 00:00:00 2001 From: Allen Pais Date: Thu, 21 Sep 2017 22:34:44 +0530 Subject: drivers: net: winbond-840: use setup_timer() helper. Use setup_timer function instead of initializing timer with the function and data fields. Signed-off-by: Allen Pais Signed-off-by: David S. Miller --- drivers/net/ethernet/dec/tulip/winbond-840.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/dec/tulip/winbond-840.c b/drivers/net/ethernet/dec/tulip/winbond-840.c index 32d7229544fa..6f88d687b6d2 100644 --- a/drivers/net/ethernet/dec/tulip/winbond-840.c +++ b/drivers/net/ethernet/dec/tulip/winbond-840.c @@ -655,10 +655,8 @@ static int netdev_open(struct net_device *dev) netdev_dbg(dev, "Done netdev_open()\n"); /* Set the timer to check for link beat. */ - init_timer(&np->timer); + setup_timer(&np->timer, netdev_timer, (unsigned long)dev); np->timer.expires = jiffies + 1*HZ; - np->timer.data = (unsigned long)dev; - np->timer.function = netdev_timer; /* timer handler */ add_timer(&np->timer); return 0; out_err: -- cgit v1.2.3 From 7afd516ff75e967873d7bdcb8f9b1180c2400b57 Mon Sep 17 00:00:00 2001 From: Allen Pais Date: Thu, 21 Sep 2017 22:34:45 +0530 Subject: drivers: net: enic: use setup_timer() helper. Use setup_timer function instead of initializing timer with the function and data fields. Signed-off-by: Allen Pais Signed-off-by: David S. Miller --- drivers/net/ethernet/cisco/enic/enic_main.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c index d24ee1ad3be1..4a11baffe02d 100644 --- a/drivers/net/ethernet/cisco/enic/enic_main.c +++ b/drivers/net/ethernet/cisco/enic/enic_main.c @@ -2846,9 +2846,8 @@ static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) /* Setup notification timer, HW reset task, and wq locks */ - init_timer(&enic->notify_timer); - enic->notify_timer.function = enic_notify_timer; - enic->notify_timer.data = (unsigned long)enic; + setup_timer(&enic->notify_timer, enic_notify_timer, + (unsigned long)enic); enic_set_rx_coal_setting(enic); INIT_WORK(&enic->reset, enic_reset); -- cgit v1.2.3 From c41326fbb3a7d64c329267d20c58dd9cc8f22a47 Mon Sep 17 00:00:00 2001 From: Allen Pais Date: Thu, 21 Sep 2017 22:34:46 +0530 Subject: drivers: net: bnx2: use setup_timer() helper. Use setup_timer function instead of initializing timer with the function and data fields. Signed-off-by: Allen Pais Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnx2.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c index e3af1f3cb61f..b3055a76dfbf 100644 --- a/drivers/net/ethernet/broadcom/bnx2.c +++ b/drivers/net/ethernet/broadcom/bnx2.c @@ -8462,10 +8462,8 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev) bnx2_set_default_link(bp); bp->req_flow_ctrl = FLOW_CTRL_RX | FLOW_CTRL_TX; - init_timer(&bp->timer); + setup_timer(&bp->timer, bnx2_timer, (unsigned long)bp); bp->timer.expires = RUN_AT(BNX2_TIMER_INTERVAL); - bp->timer.data = (unsigned long) bp; - bp->timer.function = bnx2_timer; #ifdef BCM_CNIC if (bnx2_shmem_rd(bp, BNX2_ISCSI_INITIATOR) & BNX2_ISCSI_INITIATOR_EN) -- cgit v1.2.3 From 32db034501d37c60c433d24e9faa41c1fa3136e5 Mon Sep 17 00:00:00 2001 From: Allen Pais Date: Thu, 21 Sep 2017 22:34:50 +0530 Subject: drivers: net: smsc: use setup_timer() helper. Use setup_timer function instead of initializing timer with the function and data fields. Signed-off-by: Allen Pais Signed-off-by: David S. Miller --- drivers/net/ethernet/smsc/epic100.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/smsc/epic100.c b/drivers/net/ethernet/smsc/epic100.c index 6a0e1d4b597c..2a9724898fcf 100644 --- a/drivers/net/ethernet/smsc/epic100.c +++ b/drivers/net/ethernet/smsc/epic100.c @@ -739,10 +739,8 @@ static int epic_open(struct net_device *dev) /* Set the timer to switch to check for link beat and perhaps switch to an alternate media type. */ - init_timer(&ep->timer); + setup_timer(&ep->timer, epic_timer, (unsigned long)dev); ep->timer.expires = jiffies + 3*HZ; - ep->timer.data = (unsigned long)dev; - ep->timer.function = epic_timer; /* timer handler */ add_timer(&ep->timer); return rc; -- cgit v1.2.3 From d4d8db71db1bf602623e859e6c3e700b604c2072 Mon Sep 17 00:00:00 2001 From: Allen Pais Date: Thu, 21 Sep 2017 22:34:51 +0530 Subject: drivers: net: qlogic: use setup_timer() helper. Use setup_timer function instead of initializing timer with the function and data fields. Signed-off-by: Allen Pais Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qla3xxx.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/qlogic/qla3xxx.c b/drivers/net/ethernet/qlogic/qla3xxx.c index 2991179c2fd0..05479d435469 100644 --- a/drivers/net/ethernet/qlogic/qla3xxx.c +++ b/drivers/net/ethernet/qlogic/qla3xxx.c @@ -3891,10 +3891,8 @@ static int ql3xxx_probe(struct pci_dev *pdev, INIT_DELAYED_WORK(&qdev->tx_timeout_work, ql_tx_timeout_work); INIT_DELAYED_WORK(&qdev->link_state_work, ql_link_state_machine_work); - init_timer(&qdev->adapter_timer); - qdev->adapter_timer.function = ql3xxx_timer; + setup_timer(&qdev->adapter_timer, ql3xxx_timer, (unsigned long)qdev); qdev->adapter_timer.expires = jiffies + HZ * 2; /* two second delay */ - qdev->adapter_timer.data = (unsigned long)qdev; if (!cards_found) { pr_alert("%s\n", DRV_STRING); -- cgit v1.2.3 From 4a9c07ed71c2b8d755ee585264f80dd2d82a8066 Mon Sep 17 00:00:00 2001 From: Allen Pais Date: Thu, 21 Sep 2017 22:34:52 +0530 Subject: drivers: net: e1000e: use setup_timer() helper. Use setup_timer function instead of initializing timer with the function and data fields. Signed-off-by: Allen Pais Signed-off-by: David S. Miller --- drivers/net/ethernet/intel/e1000e/netdev.c | 11 ++++------- 1 file changed, 4 insertions(+), 7 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c index 327dfe5bedc0..8436c5f2c3e8 100644 --- a/drivers/net/ethernet/intel/e1000e/netdev.c +++ b/drivers/net/ethernet/intel/e1000e/netdev.c @@ -7252,13 +7252,10 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent) goto err_eeprom; } - init_timer(&adapter->watchdog_timer); - adapter->watchdog_timer.function = e1000_watchdog; - adapter->watchdog_timer.data = (unsigned long)adapter; - - init_timer(&adapter->phy_info_timer); - adapter->phy_info_timer.function = e1000_update_phy_info; - adapter->phy_info_timer.data = (unsigned long)adapter; + setup_timer(&adapter->watchdog_timer, e1000_watchdog, + (unsigned long)adapter); + setup_timer(&adapter->phy_info_timer, e1000_update_phy_info, + (unsigned long)adapter); INIT_WORK(&adapter->reset_task, e1000_reset_task); INIT_WORK(&adapter->watchdog_task, e1000_watchdog_task); -- cgit v1.2.3 From af25c31d4bf4bc7d4dea4c093af8b7704ef24f81 Mon Sep 17 00:00:00 2001 From: Allen Pais Date: Thu, 21 Sep 2017 22:34:53 +0530 Subject: drivers: net: amd: use setup_timer() helper. Use setup_timer function instead of initializing timer with the function and data fields. Signed-off-by: Allen Pais Signed-off-by: David S. Miller --- drivers/net/ethernet/amd/sunlance.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/amd/sunlance.c b/drivers/net/ethernet/amd/sunlance.c index 291ca5187f12..0183ffb9d3ba 100644 --- a/drivers/net/ethernet/amd/sunlance.c +++ b/drivers/net/ethernet/amd/sunlance.c @@ -1459,9 +1459,8 @@ no_link_test: * can occur from interrupts (ex. IPv6). So we * use a timer to try again later when necessary. -DaveM */ - init_timer(&lp->multicast_timer); - lp->multicast_timer.data = (unsigned long) dev; - lp->multicast_timer.function = lance_set_multicast_retry; + setup_timer(&lp->multicast_timer, lance_set_multicast_retry, + (unsigned long)dev); if (register_netdev(dev)) { printk(KERN_ERR "SunLance: Cannot register device.\n"); -- cgit v1.2.3 From cec55a92a98fb3205cc4f127d9b70b5a965f3bd7 Mon Sep 17 00:00:00 2001 From: Allen Pais Date: Thu, 21 Sep 2017 22:34:54 +0530 Subject: drivers: net: amd8111e: use setup_timer() helper. Use setup_timer function instead of initializing timer with the function and data fields. Signed-off-by: Allen Pais Signed-off-by: David S. Miller --- drivers/net/ethernet/amd/amd8111e.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/amd/amd8111e.c b/drivers/net/ethernet/amd/amd8111e.c index 7b5df562f30f..7f22af6e37e0 100644 --- a/drivers/net/ethernet/amd/amd8111e.c +++ b/drivers/net/ethernet/amd/amd8111e.c @@ -1883,9 +1883,8 @@ static int amd8111e_probe_one(struct pci_dev *pdev, /* Initialize software ipg timer */ if(lp->options & OPTION_DYN_IPG_ENABLE){ - init_timer(&lp->ipg_data.ipg_timer); - lp->ipg_data.ipg_timer.data = (unsigned long) dev; - lp->ipg_data.ipg_timer.function = (void *)&amd8111e_config_ipg; + setup_timer(&lp->ipg_data.ipg_timer, + (void *)&amd8111e_config_ipg, (unsigned long)dev); lp->ipg_data.ipg_timer.expires = jiffies + IPG_CONVERGE_JIFFIES; lp->ipg_data.ipg = DEFAULT_IPG; -- cgit v1.2.3 From 6d2bcc14f5731e9357f15d41f7c5677a72e354f9 Mon Sep 17 00:00:00 2001 From: Allen Pais Date: Thu, 21 Sep 2017 22:35:00 +0530 Subject: drivers: net: sun: use setup_timer() helper. Use setup_timer function instead of initializing timer with the function and data fields. Signed-off-by: Allen Pais Signed-off-by: David S. Miller --- drivers/net/ethernet/sun/sungem.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/sun/sungem.c b/drivers/net/ethernet/sun/sungem.c index fa607d062cb3..b75ab8f44968 100644 --- a/drivers/net/ethernet/sun/sungem.c +++ b/drivers/net/ethernet/sun/sungem.c @@ -2910,9 +2910,7 @@ static int gem_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) gp->msg_enable = DEFAULT_MSG; - init_timer(&gp->link_timer); - gp->link_timer.function = gem_link_timer; - gp->link_timer.data = (unsigned long) gp; + setup_timer(&gp->link_timer, gem_link_timer, (unsigned long)gp); INIT_WORK(&gp->reset_task, gem_reset_task); -- cgit v1.2.3 From ac803d1c5f62937dc142a35dafd180f09b9f9c83 Mon Sep 17 00:00:00 2001 From: Allen Pais Date: Thu, 21 Sep 2017 22:35:01 +0530 Subject: drivers: net: sis900: use setup_timer() helper. Use setup_timer function instead of initializing timer with the function and data fields. Signed-off-by: Allen Pais Signed-off-by: David S. Miller --- drivers/net/ethernet/sis/sis900.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/sis/sis900.c b/drivers/net/ethernet/sis/sis900.c index 40bd88362e3d..cb61247b0526 100644 --- a/drivers/net/ethernet/sis/sis900.c +++ b/drivers/net/ethernet/sis/sis900.c @@ -1065,10 +1065,8 @@ sis900_open(struct net_device *net_dev) /* Set the timer to switch to check for link beat and perhaps switch to an alternate media type. */ - init_timer(&sis_priv->timer); + setup_timer(&sis_priv->timer, sis900_timer, (unsigned long)net_dev); sis_priv->timer.expires = jiffies + HZ; - sis_priv->timer.data = (unsigned long)net_dev; - sis_priv->timer.function = sis900_timer; add_timer(&sis_priv->timer); return 0; -- cgit v1.2.3 From f1ce56ce5d2a18f5d61ec335aaf5aad978fa9cd6 Mon Sep 17 00:00:00 2001 From: Allen Pais Date: Thu, 21 Sep 2017 22:35:02 +0530 Subject: drivers: net: packetengines: use setup_timer() helper. Use setup_timer function instead of initializing timer with the function and data fields. Signed-off-by: Allen Pais Signed-off-by: David S. Miller --- drivers/net/ethernet/packetengines/yellowfin.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/packetengines/yellowfin.c b/drivers/net/ethernet/packetengines/yellowfin.c index fa7770da6ef8..33c241f52a71 100644 --- a/drivers/net/ethernet/packetengines/yellowfin.c +++ b/drivers/net/ethernet/packetengines/yellowfin.c @@ -632,10 +632,8 @@ static int yellowfin_open(struct net_device *dev) } /* Set the timer to check for link beat. */ - init_timer(&yp->timer); + setup_timer(&yp->timer, yellowfin_timer, (unsigned long)dev); yp->timer.expires = jiffies + 3*HZ; - yp->timer.data = (unsigned long)dev; - yp->timer.function = yellowfin_timer; /* timer handler */ add_timer(&yp->timer); out: return rc; -- cgit v1.2.3 From 590deff6e7a898fadf3f1fd6425937d481913fb1 Mon Sep 17 00:00:00 2001 From: Allen Pais Date: Thu, 21 Sep 2017 22:35:03 +0530 Subject: drivers: net: mlx5: use setup_timer() helper. Use setup_timer function instead of initializing timer with the function and data fields. Signed-off-by: Allen Pais Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx5/core/health.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c b/drivers/net/ethernet/mellanox/mlx5/core/health.c index 8aea0a065e56..a89a68ce53ad 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/health.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c @@ -320,15 +320,13 @@ void mlx5_start_health_poll(struct mlx5_core_dev *dev) { struct mlx5_core_health *health = &dev->priv.health; - init_timer(&health->timer); + setup_timer(&health->timer, poll_health, (unsigned long)dev); health->sick = 0; clear_bit(MLX5_DROP_NEW_HEALTH_WORK, &health->flags); clear_bit(MLX5_DROP_NEW_RECOVERY_WORK, &health->flags); health->health = &dev->iseg->health; health->health_counter = &dev->iseg->health_counter; - health->timer.data = (unsigned long)dev; - health->timer.function = poll_health; health->timer.expires = round_jiffies(jiffies + MLX5_HEALTH_POLL_INTERVAL); add_timer(&health->timer); } -- cgit v1.2.3 From d2a0012e7632a588683ad6320529659c4cd27131 Mon Sep 17 00:00:00 2001 From: Allen Pais Date: Thu, 21 Sep 2017 22:35:04 +0530 Subject: drivers: net: mlx4: use setup_timer() helper. Use setup_timer function instead of initializing timer with the function and data fields. Signed-off-by: Allen Pais Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx4/catas.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/mellanox/mlx4/catas.c b/drivers/net/ethernet/mellanox/mlx4/catas.c index 53daa6ca5d83..de0f9e5e42ec 100644 --- a/drivers/net/ethernet/mellanox/mlx4/catas.c +++ b/drivers/net/ethernet/mellanox/mlx4/catas.c @@ -277,7 +277,7 @@ void mlx4_start_catas_poll(struct mlx4_dev *dev) phys_addr_t addr; INIT_LIST_HEAD(&priv->catas_err.list); - init_timer(&priv->catas_err.timer); + setup_timer(&priv->catas_err.timer, poll_catas, (unsigned long)dev); priv->catas_err.map = NULL; if (!mlx4_is_slave(dev)) { @@ -293,8 +293,6 @@ void mlx4_start_catas_poll(struct mlx4_dev *dev) } } - priv->catas_err.timer.data = (unsigned long) dev; - priv->catas_err.timer.function = poll_catas; priv->catas_err.timer.expires = round_jiffies(jiffies + MLX4_CATAS_POLL_INTERVAL); add_timer(&priv->catas_err.timer); -- cgit v1.2.3 From 636873890c63c892fca5ccab8af3a9f3607eb1fc Mon Sep 17 00:00:00 2001 From: Allen Pais Date: Thu, 21 Sep 2017 22:35:05 +0530 Subject: drivers: net: pxa168: use setup_timer() helper. Use setup_timer function instead of initializing timer with the function and data fields. Signed-off-by: Allen Pais Signed-off-by: David S. Miller --- drivers/net/ethernet/marvell/pxa168_eth.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/marvell/pxa168_eth.c b/drivers/net/ethernet/marvell/pxa168_eth.c index 993724959a7c..91b1c154fd29 100644 --- a/drivers/net/ethernet/marvell/pxa168_eth.c +++ b/drivers/net/ethernet/marvell/pxa168_eth.c @@ -1496,9 +1496,8 @@ static int pxa168_eth_probe(struct platform_device *pdev) netif_napi_add(dev, &pep->napi, pxa168_rx_poll, pep->rx_ring_size); memset(&pep->timeout, 0, sizeof(struct timer_list)); - init_timer(&pep->timeout); - pep->timeout.function = rxq_refill_timer_wrapper; - pep->timeout.data = (unsigned long)pep; + setup_timer(&pep->timeout, rxq_refill_timer_wrapper, + (unsigned long)pep); pep->smi_bus = mdiobus_alloc(); if (!pep->smi_bus) { -- cgit v1.2.3 From 34b0cf069d174b2615769102d75b4ce687addeb9 Mon Sep 17 00:00:00 2001 From: Allen Pais Date: Thu, 21 Sep 2017 22:35:06 +0530 Subject: drivers: net: fealnx: use setup_timer() helper. Use setup_timer function instead of initializing timer with the function and data fields. Signed-off-by: Allen Pais Signed-off-by: David S. Miller --- drivers/net/ethernet/fealnx.c | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/fealnx.c b/drivers/net/ethernet/fealnx.c index e92859dab7ae..c8982313d850 100644 --- a/drivers/net/ethernet/fealnx.c +++ b/drivers/net/ethernet/fealnx.c @@ -909,17 +909,13 @@ static int netdev_open(struct net_device *dev) printk(KERN_DEBUG "%s: Done netdev_open().\n", dev->name); /* Set the timer to check for link beat. */ - init_timer(&np->timer); + setup_timer(&np->timer, netdev_timer, (unsigned long)dev); np->timer.expires = RUN_AT(3 * HZ); - np->timer.data = (unsigned long) dev; - np->timer.function = netdev_timer; /* timer handler */ add_timer(&np->timer); - init_timer(&np->reset_timer); - np->reset_timer.data = (unsigned long) dev; - np->reset_timer.function = reset_timer; + setup_timer(&np->reset_timer, reset_timer, (unsigned long)dev); np->reset_timer_armed = 0; return rc; } -- cgit v1.2.3 From a76aec2ac51f8a74659cff9b19f712e8fb984393 Mon Sep 17 00:00:00 2001 From: Allen Pais Date: Thu, 21 Sep 2017 22:35:07 +0530 Subject: drivers: net: dmfe: use setup_timer() helper. Use setup_timer function instead of initializing timer with the function and data fields. Signed-off-by: Allen Pais Signed-off-by: David S. Miller --- drivers/net/ethernet/dec/tulip/dmfe.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/dec/tulip/dmfe.c b/drivers/net/ethernet/dec/tulip/dmfe.c index 07e10a45beaa..6585f737d08b 100644 --- a/drivers/net/ethernet/dec/tulip/dmfe.c +++ b/drivers/net/ethernet/dec/tulip/dmfe.c @@ -596,10 +596,8 @@ static int dmfe_open(struct net_device *dev) netif_wake_queue(dev); /* set and active a timer process */ - init_timer(&db->timer); + setup_timer(&db->timer, dmfe_timer, (unsigned long)dev); db->timer.expires = DMFE_TIMER_WUT + HZ * 2; - db->timer.data = (unsigned long)dev; - db->timer.function = dmfe_timer; add_timer(&db->timer); return 0; -- cgit v1.2.3 From 6c43824477c2ac722325ba460c2ce683c48fb76b Mon Sep 17 00:00:00 2001 From: Allen Pais Date: Thu, 21 Sep 2017 22:35:08 +0530 Subject: drivers: net: bnxt: use setup_timer() helper. Use setup_timer function instead of initializing timer with the function and data fields. Signed-off-by: Allen Pais Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnxt/bnxt.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index aacec8bc19d5..c25f5b555adf 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -7190,9 +7190,7 @@ static int bnxt_init_board(struct pci_dev *pdev, struct net_device *dev) bp->stats_coal_ticks = BNXT_DEF_STATS_COAL_TICKS; - init_timer(&bp->timer); - bp->timer.data = (unsigned long)bp; - bp->timer.function = bnxt_timer; + setup_timer(&bp->timer, bnxt_timer, (unsigned long)bp); bp->current_interval = BNXT_TIMER_INTERVAL; clear_bit(BNXT_STATE_OPEN, &bp->state); -- cgit v1.2.3 From cac40a458ae68b41955cc55a80fc9e0166e429b1 Mon Sep 17 00:00:00 2001 From: Allen Pais Date: Thu, 21 Sep 2017 22:35:09 +0530 Subject: drivers: net: amd: use setup_timer() helper. Use setup_timer function instead of initializing timer with the function and data fields. Signed-off-by: Allen Pais Signed-off-by: David S. Miller --- drivers/net/ethernet/amd/a2065.c | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/amd/a2065.c b/drivers/net/ethernet/amd/a2065.c index e22f976a0d18..998d30e050a6 100644 --- a/drivers/net/ethernet/amd/a2065.c +++ b/drivers/net/ethernet/amd/a2065.c @@ -733,10 +733,9 @@ static int a2065_init_one(struct zorro_dev *z, dev->watchdog_timeo = 5*HZ; dev->dma = 0; - init_timer(&priv->multicast_timer); - priv->multicast_timer.data = (unsigned long) dev; - priv->multicast_timer.function = - (void (*)(unsigned long))lance_set_multicast; + setup_timer(&priv->multicast_timer, + (void(*)(unsigned long))lance_set_multicast, + (unsigned long)dev); err = register_netdev(dev); if (err) { -- cgit v1.2.3 From 7c214194de36217c06b0dc4ed64d9cf251b261df Mon Sep 17 00:00:00 2001 From: Allen Pais Date: Thu, 21 Sep 2017 22:35:10 +0530 Subject: drivers: net: adi: use setup_timer() helper. Use setup_timer function instead of initializing timer with the function and data fields. Signed-off-by: Allen Pais Signed-off-by: David S. Miller --- drivers/net/ethernet/adi/bfin_mac.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/adi/bfin_mac.c b/drivers/net/ethernet/adi/bfin_mac.c index a251de8d9a91..0658cde1586a 100644 --- a/drivers/net/ethernet/adi/bfin_mac.c +++ b/drivers/net/ethernet/adi/bfin_mac.c @@ -1650,9 +1650,8 @@ static int bfin_mac_probe(struct platform_device *pdev) ndev->netdev_ops = &bfin_mac_netdev_ops; ndev->ethtool_ops = &bfin_mac_ethtool_ops; - init_timer(&lp->tx_reclaim_timer); - lp->tx_reclaim_timer.data = (unsigned long)lp; - lp->tx_reclaim_timer.function = tx_reclaim_skb_timeout; + setup_timer(&lp->tx_reclaim_timer, tx_reclaim_skb_timeout, + (unsigned long)lp); lp->flags = 0; netif_napi_add(ndev, &lp->napi, bfin_mac_poll, CONFIG_BFIN_RX_DESC_NUM); -- cgit v1.2.3 From 9d90725f33ebd0f30790c26eb5e9e0a098567895 Mon Sep 17 00:00:00 2001 From: Allen Pais Date: Thu, 21 Sep 2017 22:35:17 +0530 Subject: drivers: net: cpsw_ale: use setup_timer() helper. Use setup_timer function instead of initializing timer with the function and data fields. Signed-off-by: Allen Pais Signed-off-by: David S. Miller --- drivers/net/ethernet/ti/cpsw_ale.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/ti/cpsw_ale.c b/drivers/net/ethernet/ti/cpsw_ale.c index ddd43e09111e..cd1185e66133 100644 --- a/drivers/net/ethernet/ti/cpsw_ale.c +++ b/drivers/net/ethernet/ti/cpsw_ale.c @@ -859,9 +859,7 @@ void cpsw_ale_start(struct cpsw_ale *ale) cpsw_ale_control_set(ale, 0, ALE_ENABLE, 1); cpsw_ale_control_set(ale, 0, ALE_CLEAR, 1); - init_timer(&ale->timer); - ale->timer.data = (unsigned long)ale; - ale->timer.function = cpsw_ale_timer; + setup_timer(&ale->timer, cpsw_ale_timer, (unsigned long)ale); if (ale->ageout) { ale->timer.expires = jiffies + ale->ageout; add_timer(&ale->timer); -- cgit v1.2.3 From 997decfb6aeaa9be41ff557741845bb9fb4bf5bc Mon Sep 17 00:00:00 2001 From: Allen Pais Date: Thu, 21 Sep 2017 22:35:18 +0530 Subject: drivers: net: stmmac: use setup_timer() helper. Use setup_timer function instead of initializing timer with the function and data fields. Signed-off-by: Allen Pais Signed-off-by: David S. Miller --- drivers/net/ethernet/stmicro/stmmac/stmmac_main.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 1763e48c84e2..f41661a04f23 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -2217,10 +2217,8 @@ static void stmmac_init_tx_coalesce(struct stmmac_priv *priv) { priv->tx_coal_frames = STMMAC_TX_FRAMES; priv->tx_coal_timer = STMMAC_COAL_TX_TIMER; - init_timer(&priv->txtimer); + setup_timer(&priv->txtimer, stmmac_tx_timer, (unsigned long)priv); priv->txtimer.expires = STMMAC_COAL_TIMER(priv->tx_coal_timer); - priv->txtimer.data = (unsigned long)priv; - priv->txtimer.function = stmmac_tx_timer; add_timer(&priv->txtimer); } -- cgit v1.2.3 From 9be5813a29e5b3379db30d00319682fe965febe5 Mon Sep 17 00:00:00 2001 From: Allen Pais Date: Thu, 21 Sep 2017 22:35:19 +0530 Subject: drivers: net: packetengines: use setup_timer() helper. Use setup_timer function instead of initializing timer with the function and data fields. Signed-off-by: Allen Pais Signed-off-by: David S. Miller --- drivers/net/ethernet/packetengines/hamachi.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/packetengines/hamachi.c b/drivers/net/ethernet/packetengines/hamachi.c index 482b85e4d665..77bc7cca8980 100644 --- a/drivers/net/ethernet/packetengines/hamachi.c +++ b/drivers/net/ethernet/packetengines/hamachi.c @@ -979,10 +979,8 @@ static int hamachi_open(struct net_device *dev) dev->name, readw(ioaddr + RxStatus), readw(ioaddr + TxStatus)); } /* Set the timer to check for link beat. */ - init_timer(&hmp->timer); + setup_timer(&hmp->timer, hamachi_timer, (unsigned long)dev); hmp->timer.expires = RUN_AT((24*HZ)/10); /* 2.4 sec. */ - hmp->timer.data = (unsigned long)dev; - hmp->timer.function = hamachi_timer; /* timer handler */ add_timer(&hmp->timer); return 0; -- cgit v1.2.3 From 7d8fb3a7742513bb5434be704e0b0bf785032f45 Mon Sep 17 00:00:00 2001 From: Allen Pais Date: Thu, 21 Sep 2017 22:35:20 +0530 Subject: drivers: net: i40evf: use setup_timer() helper. Use setup_timer function instead of initializing timer with the function and data fields. Signed-off-by: Allen Pais Signed-off-by: David S. Miller --- drivers/net/ethernet/intel/i40evf/i40evf_main.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_main.c b/drivers/net/ethernet/intel/i40evf/i40evf_main.c index 1825d956bb00..c243f9da95ae 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf_main.c +++ b/drivers/net/ethernet/intel/i40evf/i40evf_main.c @@ -2686,9 +2686,8 @@ static void i40evf_init_task(struct work_struct *work) ether_addr_copy(netdev->perm_addr, adapter->hw.mac.addr); } - init_timer(&adapter->watchdog_timer); - adapter->watchdog_timer.function = &i40evf_watchdog_timer; - adapter->watchdog_timer.data = (unsigned long)adapter; + setup_timer(&adapter->watchdog_timer, &i40evf_watchdog_timer, + (unsigned long)adapter); mod_timer(&adapter->watchdog_timer, jiffies + 1); adapter->tx_desc_count = I40EVF_DEFAULT_TXD; -- cgit v1.2.3 From 99e3aa1ea47d0804a69ea7948ddd1251bcd1a635 Mon Sep 17 00:00:00 2001 From: Allen Pais Date: Thu, 21 Sep 2017 22:35:21 +0530 Subject: drivers: net: uli526x: use setup_timer() helper. Use setup_timer function instead of initializing timer with the function and data fields. Signed-off-by: Allen Pais Signed-off-by: David S. Miller --- drivers/net/ethernet/dec/tulip/uli526x.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/dec/tulip/uli526x.c b/drivers/net/ethernet/dec/tulip/uli526x.c index 7fc248efc4ba..5fbbc0caba99 100644 --- a/drivers/net/ethernet/dec/tulip/uli526x.c +++ b/drivers/net/ethernet/dec/tulip/uli526x.c @@ -491,10 +491,8 @@ static int uli526x_open(struct net_device *dev) netif_wake_queue(dev); /* set and active a timer process */ - init_timer(&db->timer); + setup_timer(&db->timer, uli526x_timer, (unsigned long)dev); db->timer.expires = ULI526X_TIMER_WUT + HZ * 2; - db->timer.data = (unsigned long)dev; - db->timer.function = uli526x_timer; add_timer(&db->timer); return 0; -- cgit v1.2.3 From 570ba3e82befbba7649e459fedc4aab27510ef44 Mon Sep 17 00:00:00 2001 From: Allen Pais Date: Thu, 21 Sep 2017 22:35:22 +0530 Subject: drivers: net: enic: use setup_timer() helper. Use setup_timer function instead of initializing timer with the function and data fields. Signed-off-by: Allen Pais Signed-off-by: David S. Miller --- drivers/net/ethernet/cisco/enic/enic_clsf.h | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/cisco/enic/enic_clsf.h b/drivers/net/ethernet/cisco/enic/enic_clsf.h index 6aa9f89d073b..4bfbf25f9ddc 100644 --- a/drivers/net/ethernet/cisco/enic/enic_clsf.h +++ b/drivers/net/ethernet/cisco/enic/enic_clsf.h @@ -19,9 +19,8 @@ void enic_flow_may_expire(unsigned long data); static inline void enic_rfs_timer_start(struct enic *enic) { - init_timer(&enic->rfs_h.rfs_may_expire); - enic->rfs_h.rfs_may_expire.function = enic_flow_may_expire; - enic->rfs_h.rfs_may_expire.data = (unsigned long)enic; + setup_timer(&enic->rfs_h.rfs_may_expire, enic_flow_may_expire, + (unsigned long)enic); mod_timer(&enic->rfs_h.rfs_may_expire, jiffies + HZ/4); } -- cgit v1.2.3 From 66f06890305eb2c8200cefbc3d6405ff6baef47e Mon Sep 17 00:00:00 2001 From: Allen Pais Date: Thu, 21 Sep 2017 22:35:23 +0530 Subject: drivers: net: cxgb: use setup_timer() helper. Use setup_timer function instead of initializing timer with the function and data fields. Signed-off-by: Allen Pais Signed-off-by: David S. Miller --- drivers/net/ethernet/chelsio/cxgb/sge.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/chelsio/cxgb/sge.c b/drivers/net/ethernet/chelsio/cxgb/sge.c index 0f13a7f7c1d3..75e439918700 100644 --- a/drivers/net/ethernet/chelsio/cxgb/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb/sge.c @@ -2075,9 +2075,8 @@ struct sge *t1_sge_create(struct adapter *adapter, struct sge_params *p) goto nomem_port; } - init_timer(&sge->tx_reclaim_timer); - sge->tx_reclaim_timer.data = (unsigned long)sge; - sge->tx_reclaim_timer.function = sge_tx_reclaim_cb; + setup_timer(&sge->tx_reclaim_timer, sge_tx_reclaim_cb, + (unsigned long)sge); if (is_T2(sge->adapter)) { init_timer(&sge->espibug_timer); -- cgit v1.2.3 From 804dea920b66fa5813278fc55eaa5b2ae39ab110 Mon Sep 17 00:00:00 2001 From: Allen Pais Date: Thu, 21 Sep 2017 22:35:24 +0530 Subject: drivers: net: bnx2x: use setup_timer() helper. Use setup_timer function instead of initializing timer with the function and data fields. Signed-off-by: Allen Pais Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index c12b4d3e946e..54d1571384a0 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c @@ -12414,10 +12414,8 @@ static int bnx2x_init_bp(struct bnx2x *bp) bp->current_interval = CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ; - init_timer(&bp->timer); + setup_timer(&bp->timer, bnx2x_timer, (unsigned long)bp); bp->timer.expires = jiffies + bp->current_interval; - bp->timer.data = (unsigned long) bp; - bp->timer.function = bnx2x_timer; if (SHMEM2_HAS(bp, dcbx_lldp_params_offset) && SHMEM2_HAS(bp, dcbx_lldp_dcbx_stat_offset) && -- cgit v1.2.3 From b6cd4b5895848968e8fee93fc5e3dc8babc40b9e Mon Sep 17 00:00:00 2001 From: Thomas Meyer Date: Thu, 21 Sep 2017 08:15:26 +0200 Subject: e100: Cocci spatch "pool_zalloc-simple" Use *_pool_zalloc rather than *_pool_alloc followed by memset with 0. Found by coccinelle spatch "api/alloc/pool_zalloc-simple.cocci" Signed-off-by: Thomas Meyer Signed-off-by: David S. Miller --- drivers/net/ethernet/intel/e100.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/intel/e100.c b/drivers/net/ethernet/intel/e100.c index 4d10270ddf8f..184f11242f56 100644 --- a/drivers/net/ethernet/intel/e100.c +++ b/drivers/net/ethernet/intel/e100.c @@ -1910,11 +1910,10 @@ static int e100_alloc_cbs(struct nic *nic) nic->cb_to_use = nic->cb_to_send = nic->cb_to_clean = NULL; nic->cbs_avail = 0; - nic->cbs = pci_pool_alloc(nic->cbs_pool, GFP_KERNEL, - &nic->cbs_dma_addr); + nic->cbs = pci_pool_zalloc(nic->cbs_pool, GFP_KERNEL, + &nic->cbs_dma_addr); if (!nic->cbs) return -ENOMEM; - memset(nic->cbs, 0, count * sizeof(struct cb)); for (cb = nic->cbs, i = 0; i < count; cb++, i++) { cb->next = (i + 1 < count) ? cb + 1 : nic->cbs; -- cgit v1.2.3 From e1f6198e221f472c03b88e352432a01076ec8647 Mon Sep 17 00:00:00 2001 From: Ganesh Goudar Date: Thu, 21 Sep 2017 12:50:47 +0530 Subject: cxgb4: avoid stall while shutting down the adapter do not wait for completion while deleting the filters when the adapter is shutting down because we may not get the response as interrupts will be disabled. Signed-off-by: Ganesh Goudar Signed-off-by: David S. Miller --- drivers/net/ethernet/chelsio/cxgb4/cxgb4.h | 1 + drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c | 7 ++++++- drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c | 4 ++++ 3 files changed, 11 insertions(+), 1 deletion(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h index ea72d2d2e1b4..c4e997fdff64 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h @@ -549,6 +549,7 @@ enum { /* adapter flags */ MASTER_PF = (1 << 7), FW_OFLD_CONN = (1 << 9), ROOT_NO_RELAXED_ORDERING = (1 << 10), + SHUTTING_DOWN = (1 << 11), }; enum { diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c index 45b5853ca2f1..97ead2c66751 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c @@ -191,7 +191,8 @@ static int del_filter_wr(struct adapter *adapter, int fidx) return -ENOMEM; fwr = __skb_put(skb, len); - t4_mk_filtdelwr(f->tid, fwr, adapter->sge.fw_evtq.abs_id); + t4_mk_filtdelwr(f->tid, fwr, (adapter->flags & SHUTTING_DOWN) ? -1 + : adapter->sge.fw_evtq.abs_id); /* Mark the filter as "pending" and ship off the Filter Work Request. * When we get the Work Request Reply we'll clear the pending status. @@ -636,6 +637,10 @@ int cxgb4_del_filter(struct net_device *dev, int filter_id) struct filter_ctx ctx; int ret; + /* If we are shutting down the adapter do not wait for completion */ + if (netdev2adap(dev)->flags & SHUTTING_DOWN) + return __cxgb4_del_filter(dev, filter_id, NULL); + init_completion(&ctx.completion); ret = __cxgb4_del_filter(dev, filter_id, &ctx); diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index 92d9d795d874..5fe81a4e26a6 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c @@ -5254,6 +5254,8 @@ static void remove_one(struct pci_dev *pdev) return; } + adapter->flags |= SHUTTING_DOWN; + if (adapter->pf == 4) { int i; @@ -5339,6 +5341,8 @@ static void shutdown_one(struct pci_dev *pdev) return; } + adapter->flags |= SHUTTING_DOWN; + if (adapter->pf == 4) { int i; -- cgit v1.2.3 From 8447779637172809060e5064afdf52f16a09aa13 Mon Sep 17 00:00:00 2001 From: Allen Pais Date: Thu, 21 Sep 2017 18:32:58 +0530 Subject: net: ti: netcp: use setup_timer Use setup_timer function instead of initializing timer with the function and data fields. Signed-off-by: Allen Pais Signed-off-by: David S. Miller --- drivers/net/ethernet/ti/netcp_ethss.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/ti/netcp_ethss.c b/drivers/net/ethernet/ti/netcp_ethss.c index 28cb38af1a34..4ad821655e51 100644 --- a/drivers/net/ethernet/ti/netcp_ethss.c +++ b/drivers/net/ethernet/ti/netcp_ethss.c @@ -3616,9 +3616,8 @@ static int gbe_probe(struct netcp_device *netcp_device, struct device *dev, } spin_unlock_bh(&gbe_dev->hw_stats_lock); - init_timer(&gbe_dev->timer); - gbe_dev->timer.data = (unsigned long)gbe_dev; - gbe_dev->timer.function = netcp_ethss_timer; + setup_timer(&gbe_dev->timer, netcp_ethss_timer, + (unsigned long)gbe_dev); gbe_dev->timer.expires = jiffies + GBE_TIMER_INTERVAL; add_timer(&gbe_dev->timer); *inst_priv = gbe_dev; -- cgit v1.2.3 From 6a345b3dbd1ed83a7877993c6e23c977a84bb483 Mon Sep 17 00:00:00 2001 From: Kumar Sanghvi Date: Thu, 21 Sep 2017 23:41:13 +0530 Subject: cxgb4: add tc flower offload skeleton Add basic skeleton to prepare for offloading tc-flower flows. Signed-off-by: Kumar Sanghvi Signed-off-by: Rahul Lakkireddy Signed-off-by: Ganesh Goudar Signed-off-by: David S. Miller --- drivers/net/ethernet/chelsio/cxgb4/Makefile | 4 +- drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c | 22 +++++++++ .../net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c | 57 ++++++++++++++++++++++ .../net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.h | 46 +++++++++++++++++ 4 files changed, 128 insertions(+), 1 deletion(-) create mode 100644 drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c create mode 100644 drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.h (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/chelsio/cxgb4/Makefile b/drivers/net/ethernet/chelsio/cxgb4/Makefile index 817212702f0a..fecd7aab673b 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/Makefile +++ b/drivers/net/ethernet/chelsio/cxgb4/Makefile @@ -4,7 +4,9 @@ obj-$(CONFIG_CHELSIO_T4) += cxgb4.o -cxgb4-objs := cxgb4_main.o l2t.o t4_hw.o sge.o clip_tbl.o cxgb4_ethtool.o cxgb4_uld.o sched.o cxgb4_filter.o cxgb4_tc_u32.o cxgb4_ptp.o +cxgb4-objs := cxgb4_main.o l2t.o t4_hw.o sge.o clip_tbl.o cxgb4_ethtool.o \ + cxgb4_uld.o sched.o cxgb4_filter.o cxgb4_tc_u32.o \ + cxgb4_ptp.o cxgb4_tc_flower.o cxgb4-$(CONFIG_CHELSIO_T4_DCB) += cxgb4_dcb.o cxgb4-$(CONFIG_CHELSIO_T4_FCOE) += cxgb4_fcoe.o cxgb4-$(CONFIG_DEBUG_FS) += cxgb4_debugfs.o diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index 5fe81a4e26a6..5079246aaf2c 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c @@ -79,6 +79,7 @@ #include "l2t.h" #include "sched.h" #include "cxgb4_tc_u32.h" +#include "cxgb4_tc_flower.h" #include "cxgb4_ptp.h" char cxgb4_driver_name[] = KBUILD_MODNAME; @@ -2873,6 +2874,25 @@ static int cxgb_set_tx_maxrate(struct net_device *dev, int index, u32 rate) return err; } +static int cxgb_setup_tc_flower(struct net_device *dev, + struct tc_cls_flower_offload *cls_flower) +{ + if (!is_classid_clsact_ingress(cls_flower->common.classid) || + cls_flower->common.chain_index) + return -EOPNOTSUPP; + + switch (cls_flower->command) { + case TC_CLSFLOWER_REPLACE: + return cxgb4_tc_flower_replace(dev, cls_flower); + case TC_CLSFLOWER_DESTROY: + return cxgb4_tc_flower_destroy(dev, cls_flower); + case TC_CLSFLOWER_STATS: + return cxgb4_tc_flower_stats(dev, cls_flower); + default: + return -EOPNOTSUPP; + } +} + static int cxgb_setup_tc_cls_u32(struct net_device *dev, struct tc_cls_u32_offload *cls_u32) { @@ -2907,6 +2927,8 @@ static int cxgb_setup_tc(struct net_device *dev, enum tc_setup_type type, switch (type) { case TC_SETUP_CLSU32: return cxgb_setup_tc_cls_u32(dev, type_data); + case TC_SETUP_CLSFLOWER: + return cxgb_setup_tc_flower(dev, type_data); default: return -EOPNOTSUPP; } diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c new file mode 100644 index 000000000000..16dff71e4d02 --- /dev/null +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c @@ -0,0 +1,57 @@ +/* + * This file is part of the Chelsio T4/T5/T6 Ethernet driver for Linux. + * + * Copyright (c) 2017 Chelsio Communications, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include +#include + +#include "cxgb4.h" +#include "cxgb4_tc_flower.h" + +int cxgb4_tc_flower_replace(struct net_device *dev, + struct tc_cls_flower_offload *cls) +{ + return -EOPNOTSUPP; +} + +int cxgb4_tc_flower_destroy(struct net_device *dev, + struct tc_cls_flower_offload *cls) +{ + return -EOPNOTSUPP; +} + +int cxgb4_tc_flower_stats(struct net_device *dev, + struct tc_cls_flower_offload *cls) +{ + return -EOPNOTSUPP; +} diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.h new file mode 100644 index 000000000000..b321fc205b5a --- /dev/null +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.h @@ -0,0 +1,46 @@ +/* + * This file is part of the Chelsio T4/T5/T6 Ethernet driver for Linux. + * + * Copyright (c) 2017 Chelsio Communications, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef __CXGB4_TC_FLOWER_H +#define __CXGB4_TC_FLOWER_H + +#include + +int cxgb4_tc_flower_replace(struct net_device *dev, + struct tc_cls_flower_offload *cls); +int cxgb4_tc_flower_destroy(struct net_device *dev, + struct tc_cls_flower_offload *cls); +int cxgb4_tc_flower_stats(struct net_device *dev, + struct tc_cls_flower_offload *cls); +#endif /* __CXGB4_TC_FLOWER_H */ -- cgit v1.2.3 From 62488e4b53ae02d82ac000f91ec82b5cfb41d6f2 Mon Sep 17 00:00:00 2001 From: Kumar Sanghvi Date: Thu, 21 Sep 2017 23:41:14 +0530 Subject: cxgb4: add basic tc flower offload support Add support to add/remove flows for offload. Following match and action are supported for offloading a flow: Match: ether-protocol, IPv4/IPv6 addresses, L4 ports (TCP/UDP) Action: drop, redirect to another port on the device. The qualifying flows can have accompanying mask information. Signed-off-by: Kumar Sanghvi Signed-off-by: Rahul Lakkireddy Signed-off-by: Ganesh Goudar Signed-off-by: David S. Miller --- drivers/net/ethernet/chelsio/cxgb4/cxgb4.h | 3 + drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c | 24 ++ drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c | 2 + .../net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c | 280 ++++++++++++++++++++- .../net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.h | 17 ++ drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h | 1 + 6 files changed, 325 insertions(+), 2 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h index c4e997fdff64..d05721b06178 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h @@ -905,6 +905,9 @@ struct adapter { /* TC u32 offload */ struct cxgb4_tc_u32_table *tc_u32; struct chcr_stats_debug chcr_stats; + + /* TC flower offload */ + DECLARE_HASHTABLE(flower_anymatch_tbl, 9); }; /* Support for "sched-class" command to allow a TX Scheduling Class to be diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c index 97ead2c66751..f3de9cdd4181 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c @@ -148,6 +148,30 @@ static int get_filter_steerq(struct net_device *dev, return iq; } +int cxgb4_get_free_ftid(struct net_device *dev, int family) +{ + struct adapter *adap = netdev2adap(dev); + struct tid_info *t = &adap->tids; + int ftid; + + spin_lock_bh(&t->ftid_lock); + if (family == PF_INET) { + ftid = find_first_zero_bit(t->ftid_bmap, t->nftids); + if (ftid >= t->nftids) + ftid = -1; + } else { + ftid = bitmap_find_free_region(t->ftid_bmap, t->nftids, 2); + if (ftid < 0) + goto out_unlock; + + /* this is only a lookup, keep the found region unallocated */ + bitmap_release_region(t->ftid_bmap, ftid, 2); + } +out_unlock: + spin_unlock_bh(&t->ftid_lock); + return ftid; +} + static int cxgb4_set_ftid(struct tid_info *t, int fidx, int family) { spin_lock_bh(&t->ftid_lock); diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index 5079246aaf2c..ce33c3addc2b 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c @@ -5105,6 +5105,8 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) if (!adapter->tc_u32) dev_warn(&pdev->dev, "could not offload tc u32, continuing\n"); + + cxgb4_init_tc_flower(adapter); } if (is_offload(adapter)) { diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c index 16dff71e4d02..dda34d5a52fb 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c @@ -38,16 +38,287 @@ #include "cxgb4.h" #include "cxgb4_tc_flower.h" +static struct ch_tc_flower_entry *allocate_flower_entry(void) +{ + struct ch_tc_flower_entry *new = kzalloc(sizeof(*new), GFP_KERNEL); + return new; +} + +/* Must be called with either RTNL or rcu_read_lock */ +static struct ch_tc_flower_entry *ch_flower_lookup(struct adapter *adap, + unsigned long flower_cookie) +{ + struct ch_tc_flower_entry *flower_entry; + + hash_for_each_possible_rcu(adap->flower_anymatch_tbl, flower_entry, + link, flower_cookie) + if (flower_entry->tc_flower_cookie == flower_cookie) + return flower_entry; + return NULL; +} + +static void cxgb4_process_flow_match(struct net_device *dev, + struct tc_cls_flower_offload *cls, + struct ch_filter_specification *fs) +{ + u16 addr_type = 0; + + if (dissector_uses_key(cls->dissector, FLOW_DISSECTOR_KEY_CONTROL)) { + struct flow_dissector_key_control *key = + skb_flow_dissector_target(cls->dissector, + FLOW_DISSECTOR_KEY_CONTROL, + cls->key); + + addr_type = key->addr_type; + } + + if (dissector_uses_key(cls->dissector, FLOW_DISSECTOR_KEY_BASIC)) { + struct flow_dissector_key_basic *key = + skb_flow_dissector_target(cls->dissector, + FLOW_DISSECTOR_KEY_BASIC, + cls->key); + struct flow_dissector_key_basic *mask = + skb_flow_dissector_target(cls->dissector, + FLOW_DISSECTOR_KEY_BASIC, + cls->mask); + u16 ethtype_key = ntohs(key->n_proto); + u16 ethtype_mask = ntohs(mask->n_proto); + + if (ethtype_key == ETH_P_ALL) { + ethtype_key = 0; + ethtype_mask = 0; + } + + fs->val.ethtype = ethtype_key; + fs->mask.ethtype = ethtype_mask; + fs->val.proto = key->ip_proto; + fs->mask.proto = mask->ip_proto; + } + + if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) { + struct flow_dissector_key_ipv4_addrs *key = + skb_flow_dissector_target(cls->dissector, + FLOW_DISSECTOR_KEY_IPV4_ADDRS, + cls->key); + struct flow_dissector_key_ipv4_addrs *mask = + skb_flow_dissector_target(cls->dissector, + FLOW_DISSECTOR_KEY_IPV4_ADDRS, + cls->mask); + fs->type = 0; + memcpy(&fs->val.lip[0], &key->dst, sizeof(key->dst)); + memcpy(&fs->val.fip[0], &key->src, sizeof(key->src)); + memcpy(&fs->mask.lip[0], &mask->dst, sizeof(mask->dst)); + memcpy(&fs->mask.fip[0], &mask->src, sizeof(mask->src)); + } + + if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) { + struct flow_dissector_key_ipv6_addrs *key = + skb_flow_dissector_target(cls->dissector, + FLOW_DISSECTOR_KEY_IPV6_ADDRS, + cls->key); + struct flow_dissector_key_ipv6_addrs *mask = + skb_flow_dissector_target(cls->dissector, + FLOW_DISSECTOR_KEY_IPV6_ADDRS, + cls->mask); + + fs->type = 1; + memcpy(&fs->val.lip[0], key->dst.s6_addr, sizeof(key->dst)); + memcpy(&fs->val.fip[0], key->src.s6_addr, sizeof(key->src)); + memcpy(&fs->mask.lip[0], mask->dst.s6_addr, sizeof(mask->dst)); + memcpy(&fs->mask.fip[0], mask->src.s6_addr, sizeof(mask->src)); + } + + if (dissector_uses_key(cls->dissector, FLOW_DISSECTOR_KEY_PORTS)) { + struct flow_dissector_key_ports *key, *mask; + + key = skb_flow_dissector_target(cls->dissector, + FLOW_DISSECTOR_KEY_PORTS, + cls->key); + mask = skb_flow_dissector_target(cls->dissector, + FLOW_DISSECTOR_KEY_PORTS, + cls->mask); + fs->val.lport = cpu_to_be16(key->dst); + fs->mask.lport = cpu_to_be16(mask->dst); + fs->val.fport = cpu_to_be16(key->src); + fs->mask.fport = cpu_to_be16(mask->src); + } + + /* Match only packets coming from the ingress port where this + * filter will be created. + */ + fs->val.iport = netdev2pinfo(dev)->port_id; + fs->mask.iport = ~0; +} + +static int cxgb4_validate_flow_match(struct net_device *dev, + struct tc_cls_flower_offload *cls) +{ + if (cls->dissector->used_keys & + ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) | + BIT(FLOW_DISSECTOR_KEY_BASIC) | + BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) | + BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) | + BIT(FLOW_DISSECTOR_KEY_PORTS))) { + netdev_warn(dev, "Unsupported key used: 0x%x\n", + cls->dissector->used_keys); + return -EOPNOTSUPP; + } + return 0; +} + +static void cxgb4_process_flow_actions(struct net_device *in, + struct tc_cls_flower_offload *cls, + struct ch_filter_specification *fs) +{ + const struct tc_action *a; + LIST_HEAD(actions); + + tcf_exts_to_list(cls->exts, &actions); + list_for_each_entry(a, &actions, list) { + if (is_tcf_gact_shot(a)) { + fs->action = FILTER_DROP; + } else if (is_tcf_mirred_egress_redirect(a)) { + int ifindex = tcf_mirred_ifindex(a); + struct net_device *out = __dev_get_by_index(dev_net(in), + ifindex); + struct port_info *pi = netdev_priv(out); + + fs->action = FILTER_SWITCH; + fs->eport = pi->port_id; + } + } +} + +static int cxgb4_validate_flow_actions(struct net_device *dev, + struct tc_cls_flower_offload *cls) +{ + const struct tc_action *a; + LIST_HEAD(actions); + + tcf_exts_to_list(cls->exts, &actions); + list_for_each_entry(a, &actions, list) { + if (is_tcf_gact_shot(a)) { + /* Do nothing */ + } else if (is_tcf_mirred_egress_redirect(a)) { + struct adapter *adap = netdev2adap(dev); + struct net_device *n_dev; + unsigned int i, ifindex; + bool found = false; + + ifindex = tcf_mirred_ifindex(a); + for_each_port(adap, i) { + n_dev = adap->port[i]; + if (ifindex == n_dev->ifindex) { + found = true; + break; + } + } + + /* If interface doesn't belong to our hw, then + * the provided output port is not valid + */ + if (!found) { + netdev_err(dev, "%s: Out port invalid\n", + __func__); + return -EINVAL; + } + } else { + netdev_err(dev, "%s: Unsupported action\n", __func__); + return -EOPNOTSUPP; + } + } + return 0; +} + int cxgb4_tc_flower_replace(struct net_device *dev, struct tc_cls_flower_offload *cls) { - return -EOPNOTSUPP; + struct adapter *adap = netdev2adap(dev); + struct ch_tc_flower_entry *ch_flower; + struct ch_filter_specification *fs; + struct filter_ctx ctx; + int fidx; + int ret; + + if (cxgb4_validate_flow_actions(dev, cls)) + return -EOPNOTSUPP; + + if (cxgb4_validate_flow_match(dev, cls)) + return -EOPNOTSUPP; + + ch_flower = allocate_flower_entry(); + if (!ch_flower) { + netdev_err(dev, "%s: ch_flower alloc failed.\n", __func__); + return -ENOMEM; + } + + fs = &ch_flower->fs; + fs->hitcnts = 1; + cxgb4_process_flow_actions(dev, cls, fs); + cxgb4_process_flow_match(dev, cls, fs); + + fidx = cxgb4_get_free_ftid(dev, fs->type ? PF_INET6 : PF_INET); + if (fidx < 0) { + netdev_err(dev, "%s: No fidx for offload.\n", __func__); + ret = -ENOMEM; + goto free_entry; + } + + init_completion(&ctx.completion); + ret = __cxgb4_set_filter(dev, fidx, fs, &ctx); + if (ret) { + netdev_err(dev, "%s: filter creation err %d\n", + __func__, ret); + goto free_entry; + } + + /* Wait for reply */ + ret = wait_for_completion_timeout(&ctx.completion, 10 * HZ); + if (!ret) { + ret = -ETIMEDOUT; + goto free_entry; + } + + ret = ctx.result; + /* Check if hw returned error for filter creation */ + if (ret) { + netdev_err(dev, "%s: filter creation err %d\n", + __func__, ret); + goto free_entry; + } + + INIT_HLIST_NODE(&ch_flower->link); + ch_flower->tc_flower_cookie = cls->cookie; + ch_flower->filter_id = ctx.tid; + hash_add_rcu(adap->flower_anymatch_tbl, &ch_flower->link, cls->cookie); + + return ret; + +free_entry: + kfree(ch_flower); + return ret; } int cxgb4_tc_flower_destroy(struct net_device *dev, struct tc_cls_flower_offload *cls) { - return -EOPNOTSUPP; + struct adapter *adap = netdev2adap(dev); + struct ch_tc_flower_entry *ch_flower; + int ret; + + ch_flower = ch_flower_lookup(adap, cls->cookie); + if (!ch_flower) + return -ENOENT; + + ret = cxgb4_del_filter(dev, ch_flower->filter_id); + if (ret) + goto err; + + hash_del_rcu(&ch_flower->link); + kfree_rcu(ch_flower, rcu); + +err: + return ret; } int cxgb4_tc_flower_stats(struct net_device *dev, @@ -55,3 +326,8 @@ int cxgb4_tc_flower_stats(struct net_device *dev, { return -EOPNOTSUPP; } + +void cxgb4_init_tc_flower(struct adapter *adap) +{ + hash_init(adap->flower_anymatch_tbl); +} diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.h index b321fc205b5a..6145a9e056eb 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.h @@ -37,10 +37,27 @@ #include +struct ch_tc_flower_stats { + u64 packet_count; + u64 byte_count; + u64 last_used; +}; + +struct ch_tc_flower_entry { + struct ch_filter_specification fs; + struct ch_tc_flower_stats stats; + unsigned long tc_flower_cookie; + struct hlist_node link; + struct rcu_head rcu; + u32 filter_id; +}; + int cxgb4_tc_flower_replace(struct net_device *dev, struct tc_cls_flower_offload *cls); int cxgb4_tc_flower_destroy(struct net_device *dev, struct tc_cls_flower_offload *cls); int cxgb4_tc_flower_stats(struct net_device *dev, struct tc_cls_flower_offload *cls); + +void cxgb4_init_tc_flower(struct adapter *adap); #endif /* __CXGB4_TC_FLOWER_H */ diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h index 84541fce94c5..88487095d14f 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h @@ -212,6 +212,7 @@ struct filter_ctx { struct ch_filter_specification; +int cxgb4_get_free_ftid(struct net_device *dev, int family); int __cxgb4_set_filter(struct net_device *dev, int filter_id, struct ch_filter_specification *fs, struct filter_ctx *ctx); -- cgit v1.2.3 From cf2885a70fc71d5f6b434b86eedfc18ad66ba6f6 Mon Sep 17 00:00:00 2001 From: Kumar Sanghvi Date: Thu, 21 Sep 2017 23:41:15 +0530 Subject: cxgb4: add support to offload action vlan Add support for offloading tc-flower flows having vlan actions: pop, push and modify. Signed-off-by: Kumar Sanghvi Signed-off-by: Rahul Lakkireddy Signed-off-by: Ganesh Goudar Signed-off-by: David S. Miller --- .../net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c | 42 ++++++++++++++++++++++ 1 file changed, 42 insertions(+) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c index dda34d5a52fb..e42d2efc9ea2 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c @@ -34,6 +34,7 @@ #include #include +#include #include "cxgb4.h" #include "cxgb4_tc_flower.h" @@ -185,6 +186,27 @@ static void cxgb4_process_flow_actions(struct net_device *in, fs->action = FILTER_SWITCH; fs->eport = pi->port_id; + } else if (is_tcf_vlan(a)) { + u32 vlan_action = tcf_vlan_action(a); + u8 prio = tcf_vlan_push_prio(a); + u16 vid = tcf_vlan_push_vid(a); + u16 vlan_tci = (prio << VLAN_PRIO_SHIFT) | vid; + + switch (vlan_action) { + case TCA_VLAN_ACT_POP: + fs->newvlan |= VLAN_REMOVE; + break; + case TCA_VLAN_ACT_PUSH: + fs->newvlan |= VLAN_INSERT; + fs->vlan = vlan_tci; + break; + case TCA_VLAN_ACT_MODIFY: + fs->newvlan |= VLAN_REWRITE; + fs->vlan = vlan_tci; + break; + default: + break; + } } } } @@ -222,6 +244,26 @@ static int cxgb4_validate_flow_actions(struct net_device *dev, __func__); return -EINVAL; } + } else if (is_tcf_vlan(a)) { + u16 proto = be16_to_cpu(tcf_vlan_push_proto(a)); + u32 vlan_action = tcf_vlan_action(a); + + switch (vlan_action) { + case TCA_VLAN_ACT_POP: + break; + case TCA_VLAN_ACT_PUSH: + case TCA_VLAN_ACT_MODIFY: + if (proto != ETH_P_8021Q) { + netdev_err(dev, "%s: Unsupported vlan proto\n", + __func__); + return -EOPNOTSUPP; + } + break; + default: + netdev_err(dev, "%s: Unsupported vlan action\n", + __func__); + return -EOPNOTSUPP; + } } else { netdev_err(dev, "%s: Unsupported action\n", __func__); return -EOPNOTSUPP; -- cgit v1.2.3 From e0f911c81e93fc23fe1a4fb0318ff1c3b1c9027f Mon Sep 17 00:00:00 2001 From: Kumar Sanghvi Date: Thu, 21 Sep 2017 23:41:16 +0530 Subject: cxgb4: fetch stats for offloaded tc flower flows Add support to retrieve stats from hardware for offloaded tc flower flows. Also, poll for the stats of offloaded flows via timer callback. Signed-off-by: Kumar Sanghvi Signed-off-by: Rahul Lakkireddy Signed-off-by: Ganesh Goudar Signed-off-by: David S. Miller --- drivers/net/ethernet/chelsio/cxgb4/cxgb4.h | 1 + drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c | 76 +++++++++++++++++++++ drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c | 1 + .../net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c | 79 +++++++++++++++++++++- .../net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.h | 3 + drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h | 2 + 6 files changed, 161 insertions(+), 1 deletion(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h index d05721b06178..0db3ab6ad094 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h @@ -908,6 +908,7 @@ struct adapter { /* TC flower offload */ DECLARE_HASHTABLE(flower_anymatch_tbl, 9); + struct timer_list flower_stats_timer; }; /* Support for "sched-class" command to allow a TX Scheduling Class to be diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c index f3de9cdd4181..15361ca2857c 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c @@ -148,6 +148,82 @@ static int get_filter_steerq(struct net_device *dev, return iq; } +static int get_filter_count(struct adapter *adapter, unsigned int fidx, + u64 *pkts, u64 *bytes) +{ + unsigned int tcb_base, tcbaddr; + unsigned int word_offset; + struct filter_entry *f; + __be64 be64_byte_count; + int ret; + + tcb_base = t4_read_reg(adapter, TP_CMM_TCB_BASE_A); + if ((fidx != (adapter->tids.nftids + adapter->tids.nsftids - 1)) && + fidx >= adapter->tids.nftids) + return -E2BIG; + + f = &adapter->tids.ftid_tab[fidx]; + if (!f->valid) + return -EINVAL; + + tcbaddr = tcb_base + f->tid * TCB_SIZE; + + spin_lock(&adapter->win0_lock); + if (is_t4(adapter->params.chip)) { + __be64 be64_count; + + /* T4 doesn't maintain byte counts in hw */ + *bytes = 0; + + /* Get pkts */ + word_offset = 4; + ret = t4_memory_rw(adapter, MEMWIN_NIC, MEM_EDC0, + tcbaddr + (word_offset * sizeof(__be32)), + sizeof(be64_count), + (__be32 *)&be64_count, + T4_MEMORY_READ); + if (ret < 0) + goto out; + *pkts = be64_to_cpu(be64_count); + } else { + __be32 be32_count; + + /* Get bytes */ + word_offset = 4; + ret = t4_memory_rw(adapter, MEMWIN_NIC, MEM_EDC0, + tcbaddr + (word_offset * sizeof(__be32)), + sizeof(be64_byte_count), + &be64_byte_count, + T4_MEMORY_READ); + if (ret < 0) + goto out; + *bytes = be64_to_cpu(be64_byte_count); + + /* Get pkts */ + word_offset = 6; + ret = t4_memory_rw(adapter, MEMWIN_NIC, MEM_EDC0, + tcbaddr + (word_offset * sizeof(__be32)), + sizeof(be32_count), + &be32_count, + T4_MEMORY_READ); + if (ret < 0) + goto out; + *pkts = (u64)be32_to_cpu(be32_count); + } + +out: + spin_unlock(&adapter->win0_lock); + return ret; +} + +int cxgb4_get_filter_counters(struct net_device *dev, unsigned int fidx, + u64 *hitcnt, u64 *bytecnt) +{ + struct adapter *adapter = netdev2adap(dev); + + return get_filter_count(adapter, fidx, hitcnt, bytecnt); +} + int cxgb4_get_free_ftid(struct net_device *dev, int family) { struct adapter *adap = netdev2adap(dev); diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index ce33c3addc2b..aa93ae95d3b9 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c @@ -4637,6 +4637,7 @@ static void free_some_resources(struct adapter *adapter) kvfree(adapter->l2t); t4_cleanup_sched(adapter); kvfree(adapter->tids.tid_tab); + cxgb4_cleanup_tc_flower(adapter); cxgb4_cleanup_tc_u32(adapter); kfree(adapter->sge.egr_map); kfree(adapter->sge.ingr_map); diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c index e42d2efc9ea2..a36bd66d2834 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c @@ -39,9 +39,12 @@ #include "cxgb4.h" #include "cxgb4_tc_flower.h" +#define STATS_CHECK_PERIOD (HZ / 2) + static struct ch_tc_flower_entry *allocate_flower_entry(void) { struct ch_tc_flower_entry *new = kzalloc(sizeof(*new), GFP_KERNEL); + spin_lock_init(&new->lock); return new; } @@ -363,13 +366,87 @@ err: return ret; } +void ch_flower_stats_cb(unsigned long data) +{ + struct adapter *adap = (struct adapter *)data; + struct ch_tc_flower_entry *flower_entry; + struct ch_tc_flower_stats *ofld_stats; + unsigned int i; + u64 packets; + u64 bytes; + int ret; + + rcu_read_lock(); + hash_for_each_rcu(adap->flower_anymatch_tbl, i, flower_entry, link) { + ret = cxgb4_get_filter_counters(adap->port[0], + flower_entry->filter_id, + &packets, &bytes); + if (!ret) { + spin_lock(&flower_entry->lock); + ofld_stats = &flower_entry->stats; + + if (ofld_stats->prev_packet_count != packets) { + ofld_stats->prev_packet_count = packets; + ofld_stats->last_used = jiffies; + } + spin_unlock(&flower_entry->lock); + } + } + rcu_read_unlock(); + mod_timer(&adap->flower_stats_timer, jiffies + STATS_CHECK_PERIOD); +} + int cxgb4_tc_flower_stats(struct net_device *dev, struct tc_cls_flower_offload *cls) { - return -EOPNOTSUPP; + struct adapter *adap = netdev2adap(dev); + struct ch_tc_flower_stats *ofld_stats; + struct ch_tc_flower_entry *ch_flower; + u64 packets; + u64 bytes; + int ret; + + ch_flower = ch_flower_lookup(adap, cls->cookie); + if (!ch_flower) { + ret = -ENOENT; + goto err; + } + + ret = cxgb4_get_filter_counters(dev, ch_flower->filter_id, + &packets, &bytes); + if (ret < 0) + goto err; + + spin_lock_bh(&ch_flower->lock); + ofld_stats = &ch_flower->stats; + if (ofld_stats->packet_count != packets) { + if (ofld_stats->prev_packet_count != packets) + ofld_stats->last_used = jiffies; + tcf_exts_stats_update(cls->exts, bytes - ofld_stats->byte_count, + packets - ofld_stats->packet_count, + ofld_stats->last_used); + + ofld_stats->packet_count = packets; + ofld_stats->byte_count = bytes; + ofld_stats->prev_packet_count = packets; + } + spin_unlock_bh(&ch_flower->lock); + return 0; + +err: + return ret; } void cxgb4_init_tc_flower(struct adapter *adap) { hash_init(adap->flower_anymatch_tbl); + setup_timer(&adap->flower_stats_timer, ch_flower_stats_cb, + (unsigned long)adap); + mod_timer(&adap->flower_stats_timer, jiffies + STATS_CHECK_PERIOD); +} + +void cxgb4_cleanup_tc_flower(struct adapter *adap) +{ + if (adap->flower_stats_timer.function) + del_timer_sync(&adap->flower_stats_timer); } diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.h index 6145a9e056eb..604feffc752e 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.h @@ -38,6 +38,7 @@ #include struct ch_tc_flower_stats { + u64 prev_packet_count; u64 packet_count; u64 byte_count; u64 last_used; @@ -49,6 +50,7 @@ struct ch_tc_flower_entry { unsigned long tc_flower_cookie; struct hlist_node link; struct rcu_head rcu; + spinlock_t lock; /* lock for stats */ u32 filter_id; }; @@ -60,4 +62,5 @@ int cxgb4_tc_flower_stats(struct net_device *dev, struct tc_cls_flower_offload *cls); void cxgb4_init_tc_flower(struct adapter *adap); +void cxgb4_cleanup_tc_flower(struct adapter *adap); #endif /* __CXGB4_TC_FLOWER_H */ diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h index 88487095d14f..52324c77a4fe 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h @@ -221,6 +221,8 @@ int __cxgb4_del_filter(struct net_device *dev, int filter_id, int cxgb4_set_filter(struct net_device *dev, int filter_id, struct ch_filter_specification *fs); int cxgb4_del_filter(struct net_device *dev, int filter_id); +int cxgb4_get_filter_counters(struct net_device *dev, unsigned int fidx, + u64 *hitcnt, u64 *bytecnt); static inline void set_wr_txq(struct sk_buff *skb, int prio, int queue) { -- cgit v1.2.3 From 39e50d9637f9a31967ac9e956b829ee8b50a750f Mon Sep 17 00:00:00 2001 From: Zhu Yanjun Date: Fri, 22 Sep 2017 10:20:21 -0400 Subject: forcedeth: optimize the xmit/rx with unlikely In the xmit/rx fastpath, the function dma_map_single rarely fails. Therefore, add an unlikely() optimization to this error check conditional. Signed-off-by: Zhu Yanjun Signed-off-by: David S. Miller --- drivers/net/ethernet/nvidia/forcedeth.c | 26 ++++++++++++++------------ 1 file changed, 14 insertions(+), 12 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/nvidia/forcedeth.c b/drivers/net/ethernet/nvidia/forcedeth.c index b605b94f4567..a235e8881af9 100644 --- a/drivers/net/ethernet/nvidia/forcedeth.c +++ b/drivers/net/ethernet/nvidia/forcedeth.c @@ -1817,8 +1817,8 @@ static int nv_alloc_rx(struct net_device *dev) skb->data, skb_tailroom(skb), DMA_FROM_DEVICE); - if (dma_mapping_error(&np->pci_dev->dev, - np->put_rx_ctx->dma)) { + if (unlikely(dma_mapping_error(&np->pci_dev->dev, + np->put_rx_ctx->dma))) { kfree_skb(skb); goto packet_dropped; } @@ -1858,8 +1858,8 @@ static int nv_alloc_rx_optimized(struct net_device *dev) skb->data, skb_tailroom(skb), DMA_FROM_DEVICE); - if (dma_mapping_error(&np->pci_dev->dev, - np->put_rx_ctx->dma)) { + if (unlikely(dma_mapping_error(&np->pci_dev->dev, + np->put_rx_ctx->dma))) { kfree_skb(skb); goto packet_dropped; } @@ -2227,8 +2227,8 @@ static netdev_tx_t nv_start_xmit(struct sk_buff *skb, struct net_device *dev) np->put_tx_ctx->dma = dma_map_single(&np->pci_dev->dev, skb->data + offset, bcnt, DMA_TO_DEVICE); - if (dma_mapping_error(&np->pci_dev->dev, - np->put_tx_ctx->dma)) { + if (unlikely(dma_mapping_error(&np->pci_dev->dev, + np->put_tx_ctx->dma))) { /* on DMA mapping error - drop the packet */ dev_kfree_skb_any(skb); u64_stats_update_begin(&np->swstats_tx_syncp); @@ -2268,7 +2268,8 @@ static netdev_tx_t nv_start_xmit(struct sk_buff *skb, struct net_device *dev) frag, offset, bcnt, DMA_TO_DEVICE); - if (dma_mapping_error(&np->pci_dev->dev, np->put_tx_ctx->dma)) { + if (unlikely(dma_mapping_error(&np->pci_dev->dev, + np->put_tx_ctx->dma))) { /* Unwind the mapped fragments */ do { @@ -2377,8 +2378,8 @@ static netdev_tx_t nv_start_xmit_optimized(struct sk_buff *skb, np->put_tx_ctx->dma = dma_map_single(&np->pci_dev->dev, skb->data + offset, bcnt, DMA_TO_DEVICE); - if (dma_mapping_error(&np->pci_dev->dev, - np->put_tx_ctx->dma)) { + if (unlikely(dma_mapping_error(&np->pci_dev->dev, + np->put_tx_ctx->dma))) { /* on DMA mapping error - drop the packet */ dev_kfree_skb_any(skb); u64_stats_update_begin(&np->swstats_tx_syncp); @@ -2419,7 +2420,8 @@ static netdev_tx_t nv_start_xmit_optimized(struct sk_buff *skb, bcnt, DMA_TO_DEVICE); - if (dma_mapping_error(&np->pci_dev->dev, np->put_tx_ctx->dma)) { + if (unlikely(dma_mapping_error(&np->pci_dev->dev, + np->put_tx_ctx->dma))) { /* Unwind the mapped fragments */ do { @@ -5075,8 +5077,8 @@ static int nv_loopback_test(struct net_device *dev) test_dma_addr = dma_map_single(&np->pci_dev->dev, tx_skb->data, skb_tailroom(tx_skb), DMA_FROM_DEVICE); - if (dma_mapping_error(&np->pci_dev->dev, - test_dma_addr)) { + if (unlikely(dma_mapping_error(&np->pci_dev->dev, + test_dma_addr))) { dev_kfree_skb_any(tx_skb); goto out; } -- cgit v1.2.3 From 088b8749da1e35b0dd9cb0e6500ca1c94c9bf547 Mon Sep 17 00:00:00 2001 From: Rick Farrington Date: Fri, 22 Sep 2017 17:12:43 -0700 Subject: liquidio: allow override of firmware present in flash Signed-off-by: Rick Farrington Signed-off-by: Felix Manlunas Signed-off-by: David S. Miller --- drivers/net/ethernet/cavium/liquidio/lio_main.c | 68 ++++++++++++++-------- .../net/ethernet/cavium/liquidio/liquidio_image.h | 1 + .../net/ethernet/cavium/liquidio/octeon_device.c | 11 +++- .../net/ethernet/cavium/liquidio/octeon_device.h | 10 ++++ 4 files changed, 64 insertions(+), 26 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c index e7f54948173f..ce08f710de0b 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_main.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c @@ -59,9 +59,9 @@ static int debug = -1; module_param(debug, int, 0644); MODULE_PARM_DESC(debug, "NETIF_MSG debug bits"); -static char fw_type[LIO_MAX_FW_TYPE_LEN] = LIO_FW_NAME_TYPE_NIC; +static char fw_type[LIO_MAX_FW_TYPE_LEN] = LIO_FW_NAME_TYPE_AUTO; module_param_string(fw_type, fw_type, sizeof(fw_type), 0444); -MODULE_PARM_DESC(fw_type, "Type of firmware to be loaded. Default \"nic\". Use \"none\" to load firmware from flash."); +MODULE_PARM_DESC(fw_type, "Type of firmware to be loaded (default is \"auto\"), which uses firmware in flash, if present, else loads \"nic\"."); static u32 console_bitmask; module_param(console_bitmask, int, 0644); @@ -1115,10 +1115,10 @@ liquidio_probe(struct pci_dev *pdev, return 0; } -static bool fw_type_is_none(void) +static bool fw_type_is_auto(void) { - return strncmp(fw_type, LIO_FW_NAME_TYPE_NONE, - sizeof(LIO_FW_NAME_TYPE_NONE)) == 0; + return strncmp(fw_type, LIO_FW_NAME_TYPE_AUTO, + sizeof(LIO_FW_NAME_TYPE_AUTO)) == 0; } /** @@ -1302,7 +1302,7 @@ static void octeon_destroy_resources(struct octeon_device *oct) * Implementation note: only soft-reset the device * if it is a CN6XXX OR the LAST CN23XX device. */ - if (fw_type_is_none()) + if (atomic_read(oct->adapter_fw_state) == FW_IS_PRELOADED) octeon_pci_flr(oct); else if (OCTEON_CN6XXX(oct) || !refcount) oct->fn_list.soft_reset(oct); @@ -1934,7 +1934,7 @@ static int load_firmware(struct octeon_device *oct) char fw_name[LIO_MAX_FW_FILENAME_LEN]; char *tmp_fw_type; - if (fw_type[0] == '\0') + if (fw_type_is_auto()) tmp_fw_type = LIO_FW_NAME_TYPE_NIC; else tmp_fw_type = fw_type; @@ -3882,9 +3882,9 @@ octeon_recv_vf_drv_notice(struct octeon_recv_info *recv_info, void *buf) static int octeon_device_init(struct octeon_device *octeon_dev) { int j, ret; - int fw_loaded = 0; char bootcmd[] = "\n"; char *dbg_enb = NULL; + enum lio_fw_state fw_state; struct octeon_device_priv *oct_priv = (struct octeon_device_priv *)octeon_dev->priv; atomic_set(&octeon_dev->status, OCT_DEV_BEGIN_STATE); @@ -3916,24 +3916,40 @@ static int octeon_device_init(struct octeon_device *octeon_dev) octeon_dev->app_mode = CVM_DRV_INVALID_APP; - if (OCTEON_CN23XX_PF(octeon_dev)) { - if (!cn23xx_fw_loaded(octeon_dev) && !fw_type_is_none()) { - fw_loaded = 0; - /* Do a soft reset of the Octeon device. */ - if (octeon_dev->fn_list.soft_reset(octeon_dev)) - return 1; - /* things might have changed */ - if (!cn23xx_fw_loaded(octeon_dev)) - fw_loaded = 0; - else - fw_loaded = 1; - } else { - fw_loaded = 1; - } - } else if (octeon_dev->fn_list.soft_reset(octeon_dev)) { - return 1; + /* CN23XX supports preloaded firmware if the following is true: + * + * The adapter indicates that firmware is currently running AND + * 'fw_type' is 'auto'. + * + * (default state is NEEDS_TO_BE_LOADED, override it if appropriate). + */ + if (OCTEON_CN23XX_PF(octeon_dev) && + cn23xx_fw_loaded(octeon_dev) && fw_type_is_auto()) { + atomic_cmpxchg(octeon_dev->adapter_fw_state, + FW_NEEDS_TO_BE_LOADED, FW_IS_PRELOADED); } + /* If loading firmware, only first device of adapter needs to do so. */ + fw_state = atomic_cmpxchg(octeon_dev->adapter_fw_state, + FW_NEEDS_TO_BE_LOADED, + FW_IS_BEING_LOADED); + + /* Here, [local variable] 'fw_state' is set to one of: + * + * FW_IS_PRELOADED: No firmware is to be loaded (see above) + * FW_NEEDS_TO_BE_LOADED: The driver's first instance will load + * firmware to the adapter. + * FW_IS_BEING_LOADED: The driver's second instance will not load + * firmware to the adapter. + */ + + /* Prior to f/w load, perform a soft reset of the Octeon device; + * if error resetting, return w/error. + */ + if (fw_state == FW_NEEDS_TO_BE_LOADED) + if (octeon_dev->fn_list.soft_reset(octeon_dev)) + return 1; + /* Initialize the dispatch mechanism used to push packets arriving on * Octeon Output queues. */ @@ -4063,7 +4079,7 @@ static int octeon_device_init(struct octeon_device *octeon_dev) atomic_set(&octeon_dev->status, OCT_DEV_IO_QUEUES_DONE); - if ((!OCTEON_CN23XX_PF(octeon_dev)) || !fw_loaded) { + if (fw_state == FW_NEEDS_TO_BE_LOADED) { dev_dbg(&octeon_dev->pci_dev->dev, "Waiting for DDR initialization...\n"); if (!ddr_timeout) { dev_info(&octeon_dev->pci_dev->dev, @@ -4125,6 +4141,8 @@ static int octeon_device_init(struct octeon_device *octeon_dev) dev_err(&octeon_dev->pci_dev->dev, "Could not load firmware to board\n"); return 1; } + + atomic_set(octeon_dev->adapter_fw_state, FW_HAS_BEEN_LOADED); } handshake[octeon_dev->octeon_id].init_ok = 1; diff --git a/drivers/net/ethernet/cavium/liquidio/liquidio_image.h b/drivers/net/ethernet/cavium/liquidio/liquidio_image.h index 78a3685f6fe0..5bf5e8791dfb 100644 --- a/drivers/net/ethernet/cavium/liquidio/liquidio_image.h +++ b/drivers/net/ethernet/cavium/liquidio/liquidio_image.h @@ -24,6 +24,7 @@ #define LIO_FW_BASE_NAME "lio_" #define LIO_FW_NAME_SUFFIX ".bin" #define LIO_FW_NAME_TYPE_NIC "nic" +#define LIO_FW_NAME_TYPE_AUTO "auto" #define LIO_FW_NAME_TYPE_NONE "none" #define LIO_MAX_FIRMWARE_VERSION_LEN 16 diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_device.c b/drivers/net/ethernet/cavium/liquidio/octeon_device.c index 29d53b1763a7..e4aa3395a578 100644 --- a/drivers/net/ethernet/cavium/liquidio/octeon_device.c +++ b/drivers/net/ethernet/cavium/liquidio/octeon_device.c @@ -541,6 +541,7 @@ static char oct_dev_app_str[CVM_DRV_APP_COUNT + 1][32] = { static struct octeon_device *octeon_device[MAX_OCTEON_DEVICES]; static atomic_t adapter_refcounts[MAX_OCTEON_DEVICES]; +static atomic_t adapter_fw_states[MAX_OCTEON_DEVICES]; static u32 octeon_device_count; /* locks device array (i.e. octeon_device[]) */ @@ -770,6 +771,10 @@ int octeon_register_device(struct octeon_device *oct, oct->adapter_refcount = &adapter_refcounts[oct->octeon_id]; atomic_set(oct->adapter_refcount, 0); + /* Like the reference count, the f/w state is shared 'per-adapter' */ + oct->adapter_fw_state = &adapter_fw_states[oct->octeon_id]; + atomic_set(oct->adapter_fw_state, FW_NEEDS_TO_BE_LOADED); + spin_lock(&octeon_devices_lock); for (idx = (int)oct->octeon_id - 1; idx >= 0; idx--) { if (!octeon_device[idx]) { @@ -780,11 +785,15 @@ int octeon_register_device(struct octeon_device *oct, atomic_inc(oct->adapter_refcount); return 1; /* here, refcount is guaranteed to be 1 */ } - /* if another device is at same bus/dev, use its refcounter */ + /* If another device is at same bus/dev, use its refcounter + * (and f/w state variable). + */ if ((octeon_device[idx]->loc.bus == bus) && (octeon_device[idx]->loc.dev == dev)) { oct->adapter_refcount = octeon_device[idx]->adapter_refcount; + oct->adapter_fw_state = + octeon_device[idx]->adapter_fw_state; break; } } diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_device.h b/drivers/net/ethernet/cavium/liquidio/octeon_device.h index 894af199ddef..33d19c4509bc 100644 --- a/drivers/net/ethernet/cavium/liquidio/octeon_device.h +++ b/drivers/net/ethernet/cavium/liquidio/octeon_device.h @@ -50,6 +50,13 @@ enum octeon_pci_swap_mode { OCTEON_PCI_32BIT_LW_SWAP = 3 }; +enum lio_fw_state { + FW_IS_PRELOADED = 0, + FW_NEEDS_TO_BE_LOADED = 1, + FW_IS_BEING_LOADED = 2, + FW_HAS_BEEN_LOADED = 3, +}; + enum { OCTEON_CONFIG_TYPE_DEFAULT = 0, NUM_OCTEON_CONFS, @@ -557,6 +564,9 @@ struct octeon_device { } loc; atomic_t *adapter_refcount; /* reference count of adapter */ + + atomic_t *adapter_fw_state; /* per-adapter, lio_fw_state */ + bool ptp_enable; }; -- cgit v1.2.3 From b36e48209157fdd98a5589a3dd60ff3fbf51e16d Mon Sep 17 00:00:00 2001 From: Rick Farrington Date: Fri, 22 Sep 2017 17:12:47 -0700 Subject: liquidio: verify firmware version when auto-loaded from flash. Signed-off-by: Rick Farrington Signed-off-by: Felix Manlunas Signed-off-by: David S. Miller --- drivers/net/ethernet/cavium/liquidio/lio_main.c | 18 +++++++++++++++++- 1 file changed, 17 insertions(+), 1 deletion(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c index ce08f710de0b..a3c9867c0340 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_main.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c @@ -3303,7 +3303,7 @@ static int setup_nic_devices(struct octeon_device *octeon_dev) { struct lio *lio = NULL; struct net_device *netdev; - u8 mac[6], i, j; + u8 mac[6], i, j, *fw_ver; struct octeon_soft_command *sc; struct liquidio_if_cfg_context *ctx; struct liquidio_if_cfg_resp *resp; @@ -3414,6 +3414,22 @@ static int setup_nic_devices(struct octeon_device *octeon_dev) goto setup_nic_dev_fail; } + /* Verify f/w version (in case of 'auto' loading from flash) */ + fw_ver = octeon_dev->fw_info.liquidio_firmware_version; + if (memcmp(LIQUIDIO_BASE_VERSION, + fw_ver, + strlen(LIQUIDIO_BASE_VERSION))) { + dev_err(&octeon_dev->pci_dev->dev, + "Unmatched firmware version. Expected %s.x, got %s.\n", + LIQUIDIO_BASE_VERSION, fw_ver); + goto setup_nic_dev_fail; + } else if (atomic_read(octeon_dev->adapter_fw_state) == + FW_IS_PRELOADED) { + dev_info(&octeon_dev->pci_dev->dev, + "Using auto-loaded firmware version %s.\n", + fw_ver); + } + octeon_swap_8B_data((u64 *)(&resp->cfg_info), (sizeof(struct liquidio_if_cfg_info)) >> 3); -- cgit v1.2.3 From 429cbf6bde1adff108171ad4b2387e62f851d609 Mon Sep 17 00:00:00 2001 From: Rick Farrington Date: Fri, 22 Sep 2017 17:12:51 -0700 Subject: liquidio: update module parameter fw_type to reflect firmware type loaded Signed-off-by: Rick Farrington Signed-off-by: Felix Manlunas Signed-off-by: David S. Miller --- drivers/net/ethernet/cavium/liquidio/lio_main.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c index a3c9867c0340..963803bc6633 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_main.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c @@ -1934,10 +1934,12 @@ static int load_firmware(struct octeon_device *oct) char fw_name[LIO_MAX_FW_FILENAME_LEN]; char *tmp_fw_type; - if (fw_type_is_auto()) + if (fw_type_is_auto()) { tmp_fw_type = LIO_FW_NAME_TYPE_NIC; - else + strncpy(fw_type, tmp_fw_type, sizeof(fw_type)); + } else { tmp_fw_type = fw_type; + } sprintf(fw_name, "%s%s%s_%s%s", LIO_FW_DIR, LIO_FW_BASE_NAME, octeon_get_conf(oct)->card_name, tmp_fw_type, -- cgit v1.2.3 From ba581f77df23c8ee70b372966e69cf10bc5453d8 Mon Sep 17 00:00:00 2001 From: Ganesh Goudar Date: Sat, 23 Sep 2017 16:07:28 +0530 Subject: cxgb4: do DCB state reset in couple of places reset the driver's DCB state in couple of places where it was missing. Signed-off-by: Casey Leedom Signed-off-by: Ganesh Goudar Signed-off-by: David S. Miller --- drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c | 15 +++++++++++---- drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.h | 1 + drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c | 10 ++++++++-- 3 files changed, 20 insertions(+), 6 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c index 6ee2ed30626b..4e7f72b17e82 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c @@ -40,8 +40,7 @@ static inline bool cxgb4_dcb_state_synced(enum cxgb4_dcb_state state) return false; } -/* Initialize a port's Data Center Bridging state. Typically used after a - * Link Down event. +/* Initialize a port's Data Center Bridging state. */ void cxgb4_dcb_state_init(struct net_device *dev) { @@ -106,6 +105,15 @@ static void cxgb4_dcb_cleanup_apps(struct net_device *dev) } } +/* Reset a port's Data Center Bridging state. Typically used after a + * Link Down event. + */ +void cxgb4_dcb_reset(struct net_device *dev) +{ + cxgb4_dcb_cleanup_apps(dev); + cxgb4_dcb_state_init(dev); +} + /* Finite State machine for Data Center Bridging. */ void cxgb4_dcb_state_fsm(struct net_device *dev, @@ -194,8 +202,7 @@ void cxgb4_dcb_state_fsm(struct net_device *dev, * state. We need to reset back to a ground state * of incomplete. */ - cxgb4_dcb_cleanup_apps(dev); - cxgb4_dcb_state_init(dev); + cxgb4_dcb_reset(dev); dcb->state = CXGB4_DCB_STATE_FW_INCOMPLETE; dcb->supported = CXGB4_DCBX_FW_SUPPORT; linkwatch_fire_event(dev); diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.h index ccf24d3dc982..02040b99c78a 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.h @@ -131,6 +131,7 @@ struct port_dcb_info { void cxgb4_dcb_state_init(struct net_device *); void cxgb4_dcb_version_init(struct net_device *); +void cxgb4_dcb_reset(struct net_device *dev); void cxgb4_dcb_state_fsm(struct net_device *, enum cxgb4_dcb_state_input); void cxgb4_dcb_handle_fw_update(struct adapter *, const struct fw_port_cmd *); void cxgb4_dcb_set_caps(struct adapter *, const struct fw_port_cmd *); diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index aa93ae95d3b9..13b636b0af5f 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c @@ -281,7 +281,7 @@ void t4_os_link_changed(struct adapter *adapter, int port_id, int link_stat) else { #ifdef CONFIG_CHELSIO_T4_DCB if (cxgb4_dcb_enabled(dev)) { - cxgb4_dcb_state_init(dev); + cxgb4_dcb_reset(dev); dcb_tx_queue_prio_enable(dev, false); } #endif /* CONFIG_CHELSIO_T4_DCB */ @@ -2304,10 +2304,16 @@ static int cxgb_close(struct net_device *dev) { struct port_info *pi = netdev_priv(dev); struct adapter *adapter = pi->adapter; + int ret; netif_tx_stop_all_queues(dev); netif_carrier_off(dev); - return t4_enable_vi(adapter, adapter->pf, pi->viid, false, false); + ret = t4_enable_vi(adapter, adapter->pf, pi->viid, false, false); +#ifdef CONFIG_CHELSIO_T4_DCB + cxgb4_dcb_reset(dev); + dcb_tx_queue_prio_enable(dev, false); +#endif + return ret; } int cxgb4_create_server_filter(const struct net_device *dev, unsigned int stid, -- cgit v1.2.3 From 01ccdf126ca5f9d4fe0889f65ee67afac910f19c Mon Sep 17 00:00:00 2001 From: Alexey Dobriyan Date: Sat, 23 Sep 2017 23:03:04 +0300 Subject: neigh: make strucrt neigh_table::entry_size unsigned int Key length can't be negative. Leave comparisons against nla_len() signed just in case truncated attribute can sneak in there. Space savings: add/remove: 0/0 grow/shrink: 0/7 up/down: 0/-7 (-7) function old new delta pneigh_delete 273 272 -1 mlx5e_rep_netevent_event 1415 1414 -1 mlx5e_create_encap_header_ipv6 1194 1193 -1 mlx5e_create_encap_header_ipv4 1071 1070 -1 cxgb4_l2t_get 1104 1103 -1 __pneigh_lookup 69 68 -1 __neigh_create 2452 2451 -1 Signed-off-by: Alexey Dobriyan Signed-off-by: David S. Miller --- drivers/net/ethernet/chelsio/cxgb4/l2t.c | 4 ++-- include/net/neighbour.h | 2 +- net/core/neighbour.c | 18 +++++++++--------- 3 files changed, 12 insertions(+), 12 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/chelsio/cxgb4/l2t.c b/drivers/net/ethernet/chelsio/cxgb4/l2t.c index f7ef8871dd0b..1817a0307d26 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/l2t.c +++ b/drivers/net/ethernet/chelsio/cxgb4/l2t.c @@ -422,7 +422,7 @@ struct l2t_entry *cxgb4_l2t_get(struct l2t_data *d, struct neighbour *neigh, u8 lport; u16 vlan; struct l2t_entry *e; - int addr_len = neigh->tbl->key_len; + unsigned int addr_len = neigh->tbl->key_len; u32 *addr = (u32 *)neigh->primary_key; int ifidx = neigh->dev->ifindex; int hash = addr_hash(d, addr, addr_len, ifidx); @@ -536,7 +536,7 @@ void t4_l2t_update(struct adapter *adap, struct neighbour *neigh) struct l2t_entry *e; struct sk_buff_head *arpq = NULL; struct l2t_data *d = adap->l2t; - int addr_len = neigh->tbl->key_len; + unsigned int addr_len = neigh->tbl->key_len; u32 *addr = (u32 *) neigh->primary_key; int ifidx = neigh->dev->ifindex; int hash = addr_hash(d, addr, addr_len, ifidx); diff --git a/include/net/neighbour.h b/include/net/neighbour.h index 9a25512e0a6e..2492000e1035 100644 --- a/include/net/neighbour.h +++ b/include/net/neighbour.h @@ -191,7 +191,7 @@ struct neigh_hash_table { struct neigh_table { int family; unsigned int entry_size; - int key_len; + unsigned int key_len; __be16 protocol; __u32 (*hash)(const void *pkey, const struct net_device *dev, diff --git a/net/core/neighbour.c b/net/core/neighbour.c index 16a1a4c4eb57..6ea3a1a7f36a 100644 --- a/net/core/neighbour.c +++ b/net/core/neighbour.c @@ -457,7 +457,7 @@ struct neighbour *neigh_lookup_nodev(struct neigh_table *tbl, struct net *net, const void *pkey) { struct neighbour *n; - int key_len = tbl->key_len; + unsigned int key_len = tbl->key_len; u32 hash_val; struct neigh_hash_table *nht; @@ -488,7 +488,7 @@ struct neighbour *__neigh_create(struct neigh_table *tbl, const void *pkey, struct net_device *dev, bool want_ref) { u32 hash_val; - int key_len = tbl->key_len; + unsigned int key_len = tbl->key_len; int error; struct neighbour *n1, *rc, *n = neigh_alloc(tbl, dev); struct neigh_hash_table *nht; @@ -572,7 +572,7 @@ out_neigh_release: } EXPORT_SYMBOL(__neigh_create); -static u32 pneigh_hash(const void *pkey, int key_len) +static u32 pneigh_hash(const void *pkey, unsigned int key_len) { u32 hash_val = *(u32 *)(pkey + key_len - 4); hash_val ^= (hash_val >> 16); @@ -585,7 +585,7 @@ static u32 pneigh_hash(const void *pkey, int key_len) static struct pneigh_entry *__pneigh_lookup_1(struct pneigh_entry *n, struct net *net, const void *pkey, - int key_len, + unsigned int key_len, struct net_device *dev) { while (n) { @@ -601,7 +601,7 @@ static struct pneigh_entry *__pneigh_lookup_1(struct pneigh_entry *n, struct pneigh_entry *__pneigh_lookup(struct neigh_table *tbl, struct net *net, const void *pkey, struct net_device *dev) { - int key_len = tbl->key_len; + unsigned int key_len = tbl->key_len; u32 hash_val = pneigh_hash(pkey, key_len); return __pneigh_lookup_1(tbl->phash_buckets[hash_val], @@ -614,7 +614,7 @@ struct pneigh_entry * pneigh_lookup(struct neigh_table *tbl, struct net_device *dev, int creat) { struct pneigh_entry *n; - int key_len = tbl->key_len; + unsigned int key_len = tbl->key_len; u32 hash_val = pneigh_hash(pkey, key_len); read_lock_bh(&tbl->lock); @@ -659,7 +659,7 @@ int pneigh_delete(struct neigh_table *tbl, struct net *net, const void *pkey, struct net_device *dev) { struct pneigh_entry *n, **np; - int key_len = tbl->key_len; + unsigned int key_len = tbl->key_len; u32 hash_val = pneigh_hash(pkey, key_len); write_lock_bh(&tbl->lock); @@ -1662,7 +1662,7 @@ static int neigh_delete(struct sk_buff *skb, struct nlmsghdr *nlh, if (tbl == NULL) return -EAFNOSUPPORT; - if (nla_len(dst_attr) < tbl->key_len) + if (nla_len(dst_attr) < (int)tbl->key_len) goto out; if (ndm->ndm_flags & NTF_PROXY) { @@ -1730,7 +1730,7 @@ static int neigh_add(struct sk_buff *skb, struct nlmsghdr *nlh, if (tbl == NULL) return -EAFNOSUPPORT; - if (nla_len(tb[NDA_DST]) < tbl->key_len) + if (nla_len(tb[NDA_DST]) < (int)tbl->key_len) goto out; dst = nla_data(tb[NDA_DST]); lladdr = tb[NDA_LLADDR] ? nla_data(tb[NDA_LLADDR]) : NULL; -- cgit v1.2.3 From 1f4cf93b133ba6596ae69cfd09b48aa7b47cca41 Mon Sep 17 00:00:00 2001 From: Tobias Klauser Date: Tue, 26 Sep 2017 11:04:23 +0200 Subject: net: ena: Remove redundant unlikely() IS_ERR() already implies unlikely(), so it can be omitted. Signed-off-by: Tobias Klauser Signed-off-by: David S. Miller --- drivers/net/ethernet/amazon/ena/ena_com.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/amazon/ena/ena_com.c b/drivers/net/ethernet/amazon/ena/ena_com.c index 52beba8c7a39..ded29af648c9 100644 --- a/drivers/net/ethernet/amazon/ena/ena_com.c +++ b/drivers/net/ethernet/amazon/ena/ena_com.c @@ -315,7 +315,7 @@ static struct ena_comp_ctx *ena_com_submit_admin_cmd(struct ena_com_admin_queue cmd_size_in_bytes, comp, comp_size_in_bytes); - if (unlikely(IS_ERR(comp_ctx))) + if (IS_ERR(comp_ctx)) admin_queue->running_state = false; spin_unlock_irqrestore(&admin_queue->q_lock, flags); @@ -1130,7 +1130,7 @@ int ena_com_execute_admin_command(struct ena_com_admin_queue *admin_queue, comp_ctx = ena_com_submit_admin_cmd(admin_queue, cmd, cmd_size, comp, comp_size); - if (unlikely(IS_ERR(comp_ctx))) { + if (IS_ERR(comp_ctx)) { if (comp_ctx == ERR_PTR(-ENODEV)) pr_debug("Failed to submit command [%ld]\n", PTR_ERR(comp_ctx)); -- cgit v1.2.3 From 1fac4b2fdbccab69cb781aae68f540be94d5549e Mon Sep 17 00:00:00 2001 From: Tobias Klauser Date: Tue, 26 Sep 2017 15:12:26 +0200 Subject: bnxt_en: Remove redundant unlikely() IS_ERR() already implies unlikely(), so it can be omitted. Signed-off-by: Tobias Klauser Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnxt/bnxt.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index c25f5b555adf..5ba49938ba55 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -1491,7 +1491,7 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_napi *bnapi, u32 *raw_cons, (struct rx_tpa_end_cmp *)rxcmp, (struct rx_tpa_end_cmp_ext *)rxcmp1, event); - if (unlikely(IS_ERR(skb))) + if (IS_ERR(skb)) return -EBUSY; rc = -ENOMEM; -- cgit v1.2.3 From 92978ee801844b16180f2168ffffd05647da551a Mon Sep 17 00:00:00 2001 From: Tobias Klauser Date: Tue, 26 Sep 2017 15:13:23 +0200 Subject: net/mlx5: Remove redundant unlikely() IS_ERR() already implies unlikely(), so it can be omitted. Signed-off-by: Tobias Klauser Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c index 4614ddfa91eb..6a7c8b04447e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c @@ -256,7 +256,7 @@ struct sk_buff *mlx5e_ipsec_handle_tx_skb(struct net_device *netdev, goto drop; } mdata = mlx5e_ipsec_add_metadata(skb); - if (unlikely(IS_ERR(mdata))) { + if (IS_ERR(mdata)) { atomic64_inc(&priv->ipsec->sw_stats.ipsec_tx_drop_metadata); goto drop; } -- cgit v1.2.3 From 2091c227fa855776aafffad7ecd25ac0734df1a0 Mon Sep 17 00:00:00 2001 From: Tobias Klauser Date: Tue, 26 Sep 2017 15:14:15 +0200 Subject: ldmvsw: Remove redundant unlikely() IS_ERR() already implies unlikely(), so it can be omitted. Signed-off-by: Tobias Klauser Signed-off-by: David S. Miller --- drivers/net/ethernet/sun/ldmvsw.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/sun/ldmvsw.c b/drivers/net/ethernet/sun/ldmvsw.c index 5b56c24b6ed2..5feeaa9f0a9e 100644 --- a/drivers/net/ethernet/sun/ldmvsw.c +++ b/drivers/net/ethernet/sun/ldmvsw.c @@ -307,7 +307,7 @@ static int vsw_port_probe(struct vio_dev *vdev, const struct vio_device_id *id) /* Get (or create) the vnet associated with this port */ vp = vsw_get_vnet(hp, vdev->mp, &handle); - if (unlikely(IS_ERR(vp))) { + if (IS_ERR(vp)) { err = PTR_ERR(vp); pr_err("Failed to get vnet for vsw-port\n"); mdesc_release(hp); -- cgit v1.2.3 From b92af5a72ca982f0aa3df22f57a178aa5b0f4357 Mon Sep 17 00:00:00 2001 From: Matan Barak Date: Thu, 10 Aug 2017 15:29:12 +0300 Subject: net/mlx5: Fix creating a new FTE when an existing but full FTE exists Currently, when a flow steering rule is added, we look for a FTE with an identical value. If we find a match, we try to merge the required destinations with the existing ones. In a case where the existing destination list is full, the code should return an error to its consumer. However, the current code just tries to create another FTE. Fixing that by returning an error in this special scenario. Fixes: f478be79a22e ("net/mlx5: Add hash table for flow groups in flow table") Signed-off-by: Matan Barak Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/fs_core.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c index 5a7bea688ec8..6ffe9251bf62 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c @@ -1449,7 +1449,7 @@ try_add_to_existing_fg(struct mlx5_flow_table *ft, int dest_num) { struct mlx5_flow_group *g; - struct mlx5_flow_handle *rule = ERR_PTR(-ENOENT); + struct mlx5_flow_handle *rule; struct rhlist_head *tmp, *list; struct match_list { struct list_head list; @@ -1513,6 +1513,8 @@ try_add_to_existing_fg(struct mlx5_flow_table *ft, unlock_ref_node(&g->node); } + rule = ERR_PTR(-ENOENT); + free_list: if (!list_empty(&match_head)) { struct match_list *match_tmp; @@ -1553,7 +1555,7 @@ _mlx5_add_flow_rules(struct mlx5_flow_table *ft, nested_lock_ref_node(&ft->node, FS_MUTEX_GRANDPARENT); rule = try_add_to_existing_fg(ft, spec, flow_act, dest, dest_num); - if (!IS_ERR(rule)) + if (!IS_ERR(rule) || PTR_ERR(rule) != -ENOENT) goto unlock; g = create_autogroup(ft, spec->match_criteria_enable, -- cgit v1.2.3 From 800350a3f145010c353bd7425428c05ac5cfc26a Mon Sep 17 00:00:00 2001 From: Maor Gottlieb Date: Wed, 23 Aug 2017 17:50:03 +0300 Subject: net/mlx5: Avoid NULL pointer dereference on steering cleanup On cleanup, when the node is the last child of parent then it calls to tree_put_node on the parent, if the parent's reference count is decremented to 0 (for e.g. when deleting last destination of FTE) then we free the parent as well and vice versa. In such a case we will try to free the parent node again. Increment the parent reference count before cleaning it's children will prevent implicit release of the parent object. Fixes: 0da2d66666d3 ('net/mlx5: Properly remove all steering objects') signed-off-by: Maor Gottlieb Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/fs_core.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c index 6ffe9251bf62..f390828d8728 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c @@ -2068,8 +2068,10 @@ static void clean_tree(struct fs_node *node) struct fs_node *iter; struct fs_node *temp; + tree_get_node(node); list_for_each_entry_safe(iter, temp, &node->children, list) clean_tree(iter); + tree_put_node(node); tree_remove_node(node); } } -- cgit v1.2.3 From 75d1d187b2ac86d1af2f1fd125ec21f104ca34b0 Mon Sep 17 00:00:00 2001 From: Maor Gottlieb Date: Sun, 16 Jul 2017 15:18:45 +0300 Subject: net/mlx5: Move the entry index allocator to flow group When new flow table entry is added, we search for free index in the flow group and not in the flow table, therefore we can move the allocator from flow table to flow group. In downstream patches it will enable us to lock smaller part of the steering tree. Signed-off-by: Maor Gottlieb Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/fs_core.c | 18 +++++++++--------- drivers/net/ethernet/mellanox/mlx5/core/fs_core.h | 2 +- 2 files changed, 10 insertions(+), 10 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c index f390828d8728..2a0b5560a8b5 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c @@ -384,7 +384,6 @@ static void del_flow_table(struct fs_node *node) err = mlx5_cmd_destroy_flow_table(dev, ft); if (err) mlx5_core_warn(dev, "flow steering can't destroy ft\n"); - ida_destroy(&ft->fte_allocator); rhltable_destroy(&ft->fgs_hash); fs_get_obj(prio, ft->node.parent); prio->num_ft--; @@ -445,7 +444,7 @@ static void destroy_fte(struct fs_fte *fte, struct mlx5_flow_group *fg) WARN_ON(ret); fte->status = 0; fs_get_obj(ft, fg->node.parent); - ida_simple_remove(&ft->fte_allocator, fte->index); + ida_simple_remove(&fg->fte_allocator, fte->index - fg->start_index); } static void del_fte(struct fs_node *node) @@ -488,6 +487,7 @@ static void del_flow_group(struct fs_node *node) ft->autogroup.num_groups--; rhashtable_destroy(&fg->ftes_hash); + ida_destroy(&fg->fte_allocator); err = rhltable_remove(&ft->fgs_hash, &fg->hash, rhash_fg); @@ -537,6 +537,7 @@ static struct mlx5_flow_group *alloc_flow_group(u32 *create_fg_in) kfree(fg); return ERR_PTR(ret); } + ida_init(&fg->fte_allocator); fg->mask.match_criteria_enable = match_criteria_enable; memcpy(&fg->mask.match_criteria, match_criteria, sizeof(fg->mask.match_criteria)); @@ -575,7 +576,6 @@ static struct mlx5_flow_table *alloc_flow_table(int level, u16 vport, int max_ft ft->flags = flags; INIT_LIST_HEAD(&ft->fwd_rules); mutex_init(&ft->lock); - ida_init(&ft->fte_allocator); return ft; } @@ -892,7 +892,6 @@ static struct mlx5_flow_table *__mlx5_create_flow_table(struct mlx5_flow_namespa destroy_ft: mlx5_cmd_destroy_flow_table(root->dev, ft); free_ft: - ida_destroy(&ft->fte_allocator); kfree(ft); unlock_root: mutex_unlock(&root->chain_lock); @@ -1003,6 +1002,7 @@ err_remove_fg: rhash_fg)); err_free_fg: rhashtable_destroy(&fg->ftes_hash); + ida_destroy(&fg->fte_allocator); kfree(fg); return ERR_PTR(err); @@ -1181,18 +1181,18 @@ static struct fs_fte *create_fte(struct mlx5_flow_group *fg, u32 *match_value, struct mlx5_flow_act *flow_act) { - struct mlx5_flow_table *ft; struct fs_fte *fte; int index; int ret; - fs_get_obj(ft, fg->node.parent); - index = ida_simple_get(&ft->fte_allocator, fg->start_index, - fg->start_index + fg->max_ftes, + index = ida_simple_get(&fg->fte_allocator, 0, + fg->max_ftes, GFP_KERNEL); if (index < 0) return ERR_PTR(index); + index += fg->start_index; + fte = alloc_fte(flow_act, match_value, index); if (IS_ERR(fte)) { ret = PTR_ERR(fte); @@ -1207,7 +1207,7 @@ static struct fs_fte *create_fte(struct mlx5_flow_group *fg, err_hash: kfree(fte); err_alloc: - ida_simple_remove(&ft->fte_allocator, index); + ida_simple_remove(&fg->fte_allocator, index - fg->start_index); return ERR_PTR(ret); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h index 5509a752f98e..02c969c3d333 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h @@ -119,7 +119,6 @@ struct mlx5_flow_table { /* FWD rules that point on this flow table */ struct list_head fwd_rules; u32 flags; - struct ida fte_allocator; struct rhltable fgs_hash; }; @@ -199,6 +198,7 @@ struct mlx5_flow_group { struct mlx5_flow_group_mask mask; u32 start_index; u32 max_ftes; + struct ida fte_allocator; u32 id; struct rhashtable ftes_hash; struct rhlist_head hash; -- cgit v1.2.3 From 46719d77d5f38b8ef04aa5a5cd91263b11d741d7 Mon Sep 17 00:00:00 2001 From: Maor Gottlieb Date: Tue, 25 Jul 2017 19:20:49 +0300 Subject: net/mlx5: Export building of matched flow groups list Refactor the code and export the build of the matched flow groups list to separate function. Signed-off-by: Maor Gottlieb Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/fs_core.c | 100 ++++++++++++++-------- 1 file changed, 64 insertions(+), 36 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c index 2a0b5560a8b5..33bcaca70a69 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c @@ -1441,47 +1441,87 @@ static bool dest_is_valid(struct mlx5_flow_destination *dest, return true; } -static struct mlx5_flow_handle * -try_add_to_existing_fg(struct mlx5_flow_table *ft, - struct mlx5_flow_spec *spec, - struct mlx5_flow_act *flow_act, - struct mlx5_flow_destination *dest, - int dest_num) -{ +struct match_list { + struct list_head list; struct mlx5_flow_group *g; - struct mlx5_flow_handle *rule; +}; + +struct match_list_head { + struct list_head list; + struct match_list first; +}; + +static void free_match_list(struct match_list_head *head) +{ + if (!list_empty(&head->list)) { + struct match_list *iter, *match_tmp; + + list_del(&head->first.list); + list_for_each_entry_safe(iter, match_tmp, &head->list, + list) { + list_del(&iter->list); + kfree(iter); + } + } +} + +static int build_match_list(struct match_list_head *match_head, + struct mlx5_flow_table *ft, + struct mlx5_flow_spec *spec) +{ struct rhlist_head *tmp, *list; - struct match_list { - struct list_head list; - struct mlx5_flow_group *g; - } match_list, *iter; - LIST_HEAD(match_head); + struct mlx5_flow_group *g; + int err = 0; rcu_read_lock(); + INIT_LIST_HEAD(&match_head->list); /* Collect all fgs which has a matching match_criteria */ list = rhltable_lookup(&ft->fgs_hash, spec, rhash_fg); + /* RCU is atomic, we can't execute FW commands here */ rhl_for_each_entry_rcu(g, tmp, list, hash) { struct match_list *curr_match; - if (likely(list_empty(&match_head))) { - match_list.g = g; - list_add_tail(&match_list.list, &match_head); + if (likely(list_empty(&match_head->list))) { + match_head->first.g = g; + list_add_tail(&match_head->first.list, + &match_head->list); continue; } - curr_match = kmalloc(sizeof(*curr_match), GFP_ATOMIC); + curr_match = kmalloc(sizeof(*curr_match), GFP_ATOMIC); if (!curr_match) { - rcu_read_unlock(); - rule = ERR_PTR(-ENOMEM); - goto free_list; + free_match_list(match_head); + err = -ENOMEM; + goto out; } curr_match->g = g; - list_add_tail(&curr_match->list, &match_head); + list_add_tail(&curr_match->list, &match_head->list); } +out: rcu_read_unlock(); + return err; +} + +static struct mlx5_flow_handle * +try_add_to_existing_fg(struct mlx5_flow_table *ft, + struct mlx5_flow_spec *spec, + struct mlx5_flow_act *flow_act, + struct mlx5_flow_destination *dest, + int dest_num) +{ + struct mlx5_flow_group *g; + struct mlx5_flow_handle *rule; + struct match_list_head match_head; + struct match_list *iter; + int err; + + /* Collect all fgs which has a matching match_criteria */ + err = build_match_list(&match_head, ft, spec); + if (err) + return ERR_PTR(err); /* Try to find a fg that already contains a matching fte */ - list_for_each_entry(iter, &match_head, list) { + list_for_each_entry(iter, &match_head.list, list) { struct fs_fte *fte; g = iter->g; @@ -1500,7 +1540,7 @@ try_add_to_existing_fg(struct mlx5_flow_table *ft, /* No group with matching fte found. Try to add a new fte to any * matching fg. */ - list_for_each_entry(iter, &match_head, list) { + list_for_each_entry(iter, &match_head.list, list) { g = iter->g; nested_lock_ref_node(&g->node, FS_MUTEX_PARENT); @@ -1516,19 +1556,7 @@ try_add_to_existing_fg(struct mlx5_flow_table *ft, rule = ERR_PTR(-ENOENT); free_list: - if (!list_empty(&match_head)) { - struct match_list *match_tmp; - - /* The most common case is having one FG. Since we want to - * optimize this case, we save the first on the stack. - * Therefore, no need to free it. - */ - list_del(&list_first_entry(&match_head, typeof(*iter), list)->list); - list_for_each_entry_safe(iter, match_tmp, &match_head, list) { - list_del(&iter->list); - kfree(iter); - } - } + free_match_list(&match_head); return rule; } -- cgit v1.2.3 From 19f100fef4ad46f21cfdfb1eeeb63fc38c2e57f1 Mon Sep 17 00:00:00 2001 From: Maor Gottlieb Date: Wed, 26 Jul 2017 16:28:26 +0300 Subject: net/mlx5: Refactor FTE and FG creation code Split the creation code to two parts: 1) Object allocation - allocate the steering node and initialize its resources. 2) The firmware command execution. Adding active flag to each node - this flag indicates if the object exists in the hardware or not, if not we don't free the hardware resource in error flow. This change will give us the ability to take write lock on the parent node (e.g. FG for FTE creationg) only on the first part. Signed-off-by: Maor Gottlieb Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/fs_core.c | 356 ++++++++++++---------- drivers/net/ethernet/mellanox/mlx5/core/fs_core.h | 1 + 2 files changed, 190 insertions(+), 167 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c index 33bcaca70a69..41f26f440099 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c @@ -179,14 +179,14 @@ find_flow_rule(struct fs_fte *fte, struct mlx5_flow_destination *dest); static void tree_init_node(struct fs_node *node, - unsigned int refcount, void (*remove_func)(struct fs_node *)) { - atomic_set(&node->refcount, refcount); + atomic_set(&node->refcount, 1); INIT_LIST_HEAD(&node->list); INIT_LIST_HEAD(&node->children); mutex_init(&node->lock); node->remove_func = remove_func; + node->active = false; } static void tree_add_node(struct fs_node *node, struct fs_node *parent) @@ -381,9 +381,11 @@ static void del_flow_table(struct fs_node *node) fs_get_obj(ft, node); dev = get_dev(&ft->node); - err = mlx5_cmd_destroy_flow_table(dev, ft); - if (err) - mlx5_core_warn(dev, "flow steering can't destroy ft\n"); + if (node->active) { + err = mlx5_cmd_destroy_flow_table(dev, ft); + if (err) + mlx5_core_warn(dev, "flow steering can't destroy ft\n"); + } rhltable_destroy(&ft->fgs_hash); fs_get_obj(prio, ft->node.parent); prio->num_ft--; @@ -435,18 +437,6 @@ out: } } -static void destroy_fte(struct fs_fte *fte, struct mlx5_flow_group *fg) -{ - struct mlx5_flow_table *ft; - int ret; - - ret = rhashtable_remove_fast(&fg->ftes_hash, &fte->hash, rhash_fte); - WARN_ON(ret); - fte->status = 0; - fs_get_obj(ft, fg->node.parent); - ida_simple_remove(&fg->fte_allocator, fte->index - fg->start_index); -} - static void del_fte(struct fs_node *node) { struct mlx5_flow_table *ft; @@ -461,14 +451,20 @@ static void del_fte(struct fs_node *node) trace_mlx5_fs_del_fte(fte); dev = get_dev(&ft->node); - err = mlx5_cmd_delete_fte(dev, ft, - fte->index); - if (err) - mlx5_core_warn(dev, - "flow steering can't delete fte in index %d of flow group id %d\n", - fte->index, fg->id); + if (node->active) { + err = mlx5_cmd_delete_fte(dev, ft, + fte->index); + if (err) + mlx5_core_warn(dev, + "flow steering can't delete fte in index %d of flow group id %d\n", + fte->index, fg->id); + } - destroy_fte(fte, fg); + err = rhashtable_remove_fast(&fg->ftes_hash, + &fte->hash, + rhash_fte); + WARN_ON(err); + ida_simple_remove(&fg->fte_allocator, fte->index - fg->start_index); } static void del_flow_group(struct fs_node *node) @@ -492,7 +488,7 @@ static void del_flow_group(struct fs_node *node) &fg->hash, rhash_fg); WARN_ON(err); - if (mlx5_cmd_destroy_flow_group(dev, ft, fg->id)) + if (fg->node.active && mlx5_cmd_destroy_flow_group(dev, ft, fg->id)) mlx5_core_warn(dev, "flow steering can't destroy fg %d of ft %d\n", fg->id, ft->id); } @@ -518,14 +514,57 @@ static struct fs_fte *alloc_fte(struct mlx5_flow_act *flow_act, return fte; } -static struct mlx5_flow_group *alloc_flow_group(u32 *create_fg_in) +static struct fs_fte *alloc_insert_fte(struct mlx5_flow_group *fg, + u32 *match_value, + struct mlx5_flow_act *flow_act) +{ + struct fs_fte *fte; + int index; + int ret; + + index = ida_simple_get(&fg->fte_allocator, 0, + fg->max_ftes, + GFP_KERNEL); + if (index < 0) + return ERR_PTR(index); + + fte = alloc_fte(flow_act, match_value, index + fg->start_index); + if (IS_ERR(fte)) { + ret = PTR_ERR(fte); + goto err_ida_remove; + } + + ret = rhashtable_insert_fast(&fg->ftes_hash, + &fte->hash, + rhash_fte); + if (ret) + goto err_free; + + tree_init_node(&fte->node, del_fte); + tree_add_node(&fte->node, &fg->node); + list_add_tail(&fte->node.list, &fg->node.children); + + return fte; + +err_free: + kfree(fte); +err_ida_remove: + ida_simple_remove(&fg->fte_allocator, index); + return ERR_PTR(ret); +} + +static void dealloc_flow_group(struct mlx5_flow_group *fg) +{ + rhashtable_destroy(&fg->ftes_hash); + kfree(fg); +} + +static struct mlx5_flow_group *alloc_flow_group(u8 match_criteria_enable, + void *match_criteria, + int start_index, + int end_index) { struct mlx5_flow_group *fg; - void *match_criteria = MLX5_ADDR_OF(create_flow_group_in, - create_fg_in, match_criteria); - u8 match_criteria_enable = MLX5_GET(create_flow_group_in, - create_fg_in, - match_criteria_enable); int ret; fg = kzalloc(sizeof(*fg), GFP_KERNEL); @@ -536,16 +575,47 @@ static struct mlx5_flow_group *alloc_flow_group(u32 *create_fg_in) if (ret) { kfree(fg); return ERR_PTR(ret); - } +} ida_init(&fg->fte_allocator); fg->mask.match_criteria_enable = match_criteria_enable; memcpy(&fg->mask.match_criteria, match_criteria, sizeof(fg->mask.match_criteria)); fg->node.type = FS_TYPE_FLOW_GROUP; - fg->start_index = MLX5_GET(create_flow_group_in, create_fg_in, - start_flow_index); - fg->max_ftes = MLX5_GET(create_flow_group_in, create_fg_in, - end_flow_index) - fg->start_index + 1; + fg->start_index = start_index; + fg->max_ftes = end_index - start_index + 1; + + return fg; +} + +static struct mlx5_flow_group *alloc_insert_flow_group(struct mlx5_flow_table *ft, + u8 match_criteria_enable, + void *match_criteria, + int start_index, + int end_index, + struct list_head *prev) +{ + struct mlx5_flow_group *fg; + int ret; + + fg = alloc_flow_group(match_criteria_enable, match_criteria, + start_index, end_index); + if (IS_ERR(fg)) + return fg; + + /* initialize refcnt, add to parent list */ + ret = rhltable_insert(&ft->fgs_hash, + &fg->hash, + rhash_fg); + if (ret) { + dealloc_flow_group(fg); + return ERR_PTR(ret); + } + + tree_init_node(&fg->node, del_flow_group); + tree_add_node(&fg->node, &ft->node); + /* Add node to group list */ + list_add(&fg->node.list, prev); + return fg; } @@ -870,7 +940,7 @@ static struct mlx5_flow_table *__mlx5_create_flow_table(struct mlx5_flow_namespa goto unlock_root; } - tree_init_node(&ft->node, 1, del_flow_table); + tree_init_node(&ft->node, del_flow_table); log_table_sz = ft->max_fte ? ilog2(ft->max_fte) : 0; next_ft = find_next_chained_ft(fs_prio); err = mlx5_cmd_create_flow_table(root->dev, ft->vport, ft->op_mod, ft->type, @@ -882,6 +952,7 @@ static struct mlx5_flow_table *__mlx5_create_flow_table(struct mlx5_flow_namespa err = connect_flow_table(root->dev, ft, fs_prio); if (err) goto destroy_ft; + ft->node.active = true; lock_ref_node(&fs_prio->node); tree_add_node(&ft->node, &fs_prio->node); list_add_flow_table(ft, fs_prio); @@ -959,55 +1030,6 @@ mlx5_create_auto_grouped_flow_table(struct mlx5_flow_namespace *ns, } EXPORT_SYMBOL(mlx5_create_auto_grouped_flow_table); -/* Flow table should be locked */ -static struct mlx5_flow_group *create_flow_group_common(struct mlx5_flow_table *ft, - u32 *fg_in, - struct list_head - *prev_fg, - bool is_auto_fg) -{ - struct mlx5_flow_group *fg; - struct mlx5_core_dev *dev = get_dev(&ft->node); - int err; - - if (!dev) - return ERR_PTR(-ENODEV); - - fg = alloc_flow_group(fg_in); - if (IS_ERR(fg)) - return fg; - - err = rhltable_insert(&ft->fgs_hash, &fg->hash, rhash_fg); - if (err) - goto err_free_fg; - - err = mlx5_cmd_create_flow_group(dev, ft, fg_in, &fg->id); - if (err) - goto err_remove_fg; - - if (ft->autogroup.active) - ft->autogroup.num_groups++; - /* Add node to tree */ - tree_init_node(&fg->node, !is_auto_fg, del_flow_group); - tree_add_node(&fg->node, &ft->node); - /* Add node to group list */ - list_add(&fg->node.list, prev_fg); - - trace_mlx5_fs_add_fg(fg); - return fg; - -err_remove_fg: - WARN_ON(rhltable_remove(&ft->fgs_hash, - &fg->hash, - rhash_fg)); -err_free_fg: - rhashtable_destroy(&fg->ftes_hash); - ida_destroy(&fg->fte_allocator); - kfree(fg); - - return ERR_PTR(err); -} - struct mlx5_flow_group *mlx5_create_flow_group(struct mlx5_flow_table *ft, u32 *fg_in) { @@ -1016,7 +1038,13 @@ struct mlx5_flow_group *mlx5_create_flow_group(struct mlx5_flow_table *ft, u8 match_criteria_enable = MLX5_GET(create_flow_group_in, fg_in, match_criteria_enable); + int start_index = MLX5_GET(create_flow_group_in, fg_in, + start_flow_index); + int end_index = MLX5_GET(create_flow_group_in, fg_in, + end_flow_index); + struct mlx5_core_dev *dev = get_dev(&ft->node); struct mlx5_flow_group *fg; + int err; if (!check_valid_mask(match_criteria_enable, match_criteria)) return ERR_PTR(-EINVAL); @@ -1025,8 +1053,20 @@ struct mlx5_flow_group *mlx5_create_flow_group(struct mlx5_flow_table *ft, return ERR_PTR(-EPERM); lock_ref_node(&ft->node); - fg = create_flow_group_common(ft, fg_in, ft->node.children.prev, false); + fg = alloc_insert_flow_group(ft, match_criteria_enable, match_criteria, + start_index, end_index, + ft->node.children.prev); unlock_ref_node(&ft->node); + if (IS_ERR(fg)) + return fg; + + err = mlx5_cmd_create_flow_group(dev, ft, fg_in, &fg->id); + if (err) { + tree_put_node(&fg->node); + return ERR_PTR(err); + } + trace_mlx5_fs_add_fg(fg); + fg->node.active = true; return fg; } @@ -1111,7 +1151,7 @@ create_flow_handle(struct fs_fte *fte, /* Add dest to dests list- we need flow tables to be in the * end of the list for forward to next prio rules. */ - tree_init_node(&rule->node, 1, del_rule); + tree_init_node(&rule->node, del_rule); if (dest && dest[i].type != MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE) list_add(&rule->node.list, &fte->node.children); @@ -1167,6 +1207,7 @@ add_rule_fte(struct fs_fte *fte, if (err) goto free_handle; + fte->node.active = true; fte->status |= FS_FTE_STATUS_EXISTING; out: @@ -1177,59 +1218,17 @@ free_handle: return ERR_PTR(err); } -static struct fs_fte *create_fte(struct mlx5_flow_group *fg, - u32 *match_value, - struct mlx5_flow_act *flow_act) +static struct mlx5_flow_group *alloc_auto_flow_group(struct mlx5_flow_table *ft, + struct mlx5_flow_spec *spec) { - struct fs_fte *fte; - int index; - int ret; - - index = ida_simple_get(&fg->fte_allocator, 0, - fg->max_ftes, - GFP_KERNEL); - if (index < 0) - return ERR_PTR(index); - - index += fg->start_index; - - fte = alloc_fte(flow_act, match_value, index); - if (IS_ERR(fte)) { - ret = PTR_ERR(fte); - goto err_alloc; - } - ret = rhashtable_insert_fast(&fg->ftes_hash, &fte->hash, rhash_fte); - if (ret) - goto err_hash; - - return fte; - -err_hash: - kfree(fte); -err_alloc: - ida_simple_remove(&fg->fte_allocator, index - fg->start_index); - return ERR_PTR(ret); -} - -static struct mlx5_flow_group *create_autogroup(struct mlx5_flow_table *ft, - u8 match_criteria_enable, - u32 *match_criteria) -{ - int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); struct list_head *prev = &ft->node.children; - unsigned int candidate_index = 0; struct mlx5_flow_group *fg; - void *match_criteria_addr; + unsigned int candidate_index = 0; unsigned int group_size = 0; - u32 *in; if (!ft->autogroup.active) return ERR_PTR(-ENOENT); - in = kvzalloc(inlen, GFP_KERNEL); - if (!in) - return ERR_PTR(-ENOMEM); - if (ft->autogroup.num_groups < ft->autogroup.required_groups) /* We save place for flow groups in addition to max types */ group_size = ft->max_fte / (ft->autogroup.required_groups + 1); @@ -1247,25 +1246,55 @@ static struct mlx5_flow_group *create_autogroup(struct mlx5_flow_table *ft, prev = &fg->node.list; } - if (candidate_index + group_size > ft->max_fte) { - fg = ERR_PTR(-ENOSPC); + if (candidate_index + group_size > ft->max_fte) + return ERR_PTR(-ENOSPC); + + fg = alloc_insert_flow_group(ft, + spec->match_criteria_enable, + spec->match_criteria, + candidate_index, + candidate_index + group_size - 1, + prev); + if (IS_ERR(fg)) goto out; - } + + ft->autogroup.num_groups++; + +out: + return fg; +} + +static int create_auto_flow_group(struct mlx5_flow_table *ft, + struct mlx5_flow_group *fg) +{ + struct mlx5_core_dev *dev = get_dev(&ft->node); + int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); + void *match_criteria_addr; + int err; + u32 *in; + + in = kvzalloc(inlen, GFP_KERNEL); + if (!in) + return -ENOMEM; MLX5_SET(create_flow_group_in, in, match_criteria_enable, - match_criteria_enable); - MLX5_SET(create_flow_group_in, in, start_flow_index, candidate_index); - MLX5_SET(create_flow_group_in, in, end_flow_index, candidate_index + - group_size - 1); + fg->mask.match_criteria_enable); + MLX5_SET(create_flow_group_in, in, start_flow_index, fg->start_index); + MLX5_SET(create_flow_group_in, in, end_flow_index, fg->start_index + + fg->max_ftes - 1); match_criteria_addr = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria); - memcpy(match_criteria_addr, match_criteria, - MLX5_ST_SZ_BYTES(fte_match_param)); + memcpy(match_criteria_addr, fg->mask.match_criteria, + sizeof(fg->mask.match_criteria)); + + err = mlx5_cmd_create_flow_group(dev, ft, in, &fg->id); + if (!err) { + fg->node.active = true; + trace_mlx5_fs_add_fg(fg); + } - fg = create_flow_group_common(ft, in, prev, true); -out: kvfree(in); - return fg; + return err; } static bool mlx5_flow_dests_cmp(struct mlx5_flow_destination *d1, @@ -1368,23 +1397,17 @@ static struct mlx5_flow_handle *add_rule_fg(struct mlx5_flow_group *fg, } fs_get_obj(ft, fg->node.parent); - fte = create_fte(fg, match_value, flow_act); + fte = alloc_insert_fte(fg, match_value, flow_act); if (IS_ERR(fte)) return (void *)fte; - tree_init_node(&fte->node, 0, del_fte); nested_lock_ref_node(&fte->node, FS_MUTEX_CHILD); handle = add_rule_fte(fte, fg, dest, dest_num, false); if (IS_ERR(handle)) { unlock_ref_node(&fte->node); - destroy_fte(fte, fg); - kfree(fte); + tree_put_node(&fte->node); return handle; } - tree_add_node(&fte->node, &fg->node); - /* fte list isn't sorted */ - list_add_tail(&fte->node.list, &fg->node.children); - trace_mlx5_fs_set_fte(fte, true); add_rules: for (i = 0; i < handle->num_rules; i++) { if (atomic_read(&handle->rule[i]->node.refcount) == 1) { @@ -1571,6 +1594,7 @@ _mlx5_add_flow_rules(struct mlx5_flow_table *ft, { struct mlx5_flow_group *g; struct mlx5_flow_handle *rule; + int err; int i; if (!check_valid_spec(spec)) @@ -1586,24 +1610,22 @@ _mlx5_add_flow_rules(struct mlx5_flow_table *ft, if (!IS_ERR(rule) || PTR_ERR(rule) != -ENOENT) goto unlock; - g = create_autogroup(ft, spec->match_criteria_enable, - spec->match_criteria); + g = alloc_auto_flow_group(ft, spec); if (IS_ERR(g)) { rule = (void *)g; goto unlock; } + err = create_auto_flow_group(ft, g); + if (err) { + rule = ERR_PTR(err); + goto put_fg; + } + rule = add_rule_fg(g, spec->match_value, flow_act, dest, dest_num, NULL); - if (IS_ERR(rule)) { - /* Remove assumes refcount > 0 and autogroup creates a group - * with a refcount = 0. - */ - unlock_ref_node(&ft->node); - tree_get_node(&g->node); - tree_remove_node(&g->node); - return rule; - } +put_fg: + tree_put_node(&g->node); unlock: unlock_ref_node(&ft->node); return rule; @@ -1847,7 +1869,7 @@ static struct fs_prio *fs_create_prio(struct mlx5_flow_namespace *ns, return ERR_PTR(-ENOMEM); fs_prio->node.type = FS_TYPE_PRIO; - tree_init_node(&fs_prio->node, 1, NULL); + tree_init_node(&fs_prio->node, NULL); tree_add_node(&fs_prio->node, &ns->node); fs_prio->num_levels = num_levels; fs_prio->prio = prio; @@ -1873,7 +1895,7 @@ static struct mlx5_flow_namespace *fs_create_namespace(struct fs_prio *prio) return ERR_PTR(-ENOMEM); fs_init_namespace(ns); - tree_init_node(&ns->node, 1, NULL); + tree_init_node(&ns->node, NULL); tree_add_node(&ns->node, &prio->node); list_add_tail(&ns->node.list, &prio->node.children); @@ -1998,7 +2020,7 @@ static struct mlx5_flow_root_namespace *create_root_ns(struct mlx5_flow_steering ns = &root_ns->ns; fs_init_namespace(ns); mutex_init(&root_ns->chain_lock); - tree_init_node(&ns->node, 1, NULL); + tree_init_node(&ns->node, NULL); tree_add_node(&ns->node, NULL); return root_ns; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h index 02c969c3d333..6e5d25b4f8de 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h @@ -82,6 +82,7 @@ struct fs_node { /* lock the node for writing and traversing */ struct mutex lock; atomic_t refcount; + bool active; void (*remove_func)(struct fs_node *); }; -- cgit v1.2.3 From c7784b1c8ab3f44dc2e643a8feb77584792c9108 Mon Sep 17 00:00:00 2001 From: Maor Gottlieb Date: Sun, 27 Aug 2017 09:07:44 +0300 Subject: net/mlx5: Replace fs_node mutex with reader/writer semaphore Currently, steering object is protected by mutex lock, replace the mutex lock with reader/writer semaphore . In this patch we still use only write semaphore. In downstream patches we will switch part of the write locks to read locks. Signed-off-by: Maor Gottlieb Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/fs_core.c | 28 +++++++++++------------ drivers/net/ethernet/mellanox/mlx5/core/fs_core.h | 2 +- 2 files changed, 15 insertions(+), 15 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c index 41f26f440099..9406e7272807 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c @@ -145,10 +145,10 @@ static struct init_tree_node { } }; -enum fs_i_mutex_lock_class { - FS_MUTEX_GRANDPARENT, - FS_MUTEX_PARENT, - FS_MUTEX_CHILD +enum fs_i_lock_class { + FS_LOCK_GRANDPARENT, + FS_LOCK_PARENT, + FS_LOCK_CHILD }; static const struct rhashtable_params rhash_fte = { @@ -184,7 +184,7 @@ static void tree_init_node(struct fs_node *node, atomic_set(&node->refcount, 1); INIT_LIST_HEAD(&node->list); INIT_LIST_HEAD(&node->children); - mutex_init(&node->lock); + init_rwsem(&node->lock); node->remove_func = remove_func; node->active = false; } @@ -208,10 +208,10 @@ static void tree_get_node(struct fs_node *node) } static void nested_lock_ref_node(struct fs_node *node, - enum fs_i_mutex_lock_class class) + enum fs_i_lock_class class) { if (node) { - mutex_lock_nested(&node->lock, class); + down_write_nested(&node->lock, class); atomic_inc(&node->refcount); } } @@ -219,7 +219,7 @@ static void nested_lock_ref_node(struct fs_node *node, static void lock_ref_node(struct fs_node *node) { if (node) { - mutex_lock(&node->lock); + down_write(&node->lock); atomic_inc(&node->refcount); } } @@ -228,7 +228,7 @@ static void unlock_ref_node(struct fs_node *node) { if (node) { atomic_dec(&node->refcount); - mutex_unlock(&node->lock); + up_write(&node->lock); } } @@ -1376,7 +1376,7 @@ static struct mlx5_flow_handle *add_rule_fg(struct mlx5_flow_group *fg, int old_action; int ret; - nested_lock_ref_node(&fte->node, FS_MUTEX_CHILD); + nested_lock_ref_node(&fte->node, FS_LOCK_CHILD); ret = check_conflicting_ftes(fte, flow_act); if (ret) { handle = ERR_PTR(ret); @@ -1400,7 +1400,7 @@ static struct mlx5_flow_handle *add_rule_fg(struct mlx5_flow_group *fg, fte = alloc_insert_fte(fg, match_value, flow_act); if (IS_ERR(fte)) return (void *)fte; - nested_lock_ref_node(&fte->node, FS_MUTEX_CHILD); + nested_lock_ref_node(&fte->node, FS_LOCK_CHILD); handle = add_rule_fte(fte, fg, dest, dest_num, false); if (IS_ERR(handle)) { unlock_ref_node(&fte->node); @@ -1548,7 +1548,7 @@ try_add_to_existing_fg(struct mlx5_flow_table *ft, struct fs_fte *fte; g = iter->g; - nested_lock_ref_node(&g->node, FS_MUTEX_PARENT); + nested_lock_ref_node(&g->node, FS_LOCK_PARENT); fte = rhashtable_lookup_fast(&g->ftes_hash, spec->match_value, rhash_fte); if (fte) { @@ -1566,7 +1566,7 @@ try_add_to_existing_fg(struct mlx5_flow_table *ft, list_for_each_entry(iter, &match_head.list, list) { g = iter->g; - nested_lock_ref_node(&g->node, FS_MUTEX_PARENT); + nested_lock_ref_node(&g->node, FS_LOCK_PARENT); rule = add_rule_fg(g, spec->match_value, flow_act, dest, dest_num, NULL); if (!IS_ERR(rule) || PTR_ERR(rule) != -ENOSPC) { @@ -1605,7 +1605,7 @@ _mlx5_add_flow_rules(struct mlx5_flow_table *ft, return ERR_PTR(-EINVAL); } - nested_lock_ref_node(&ft->node, FS_MUTEX_GRANDPARENT); + nested_lock_ref_node(&ft->node, FS_LOCK_GRANDPARENT); rule = try_add_to_existing_fg(ft, spec, flow_act, dest, dest_num); if (!IS_ERR(rule) || PTR_ERR(rule) != -ENOENT) goto unlock; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h index 6e5d25b4f8de..b5c079f35051 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h @@ -80,7 +80,7 @@ struct fs_node { struct fs_node *parent; struct fs_node *root; /* lock the node for writing and traversing */ - struct mutex lock; + struct rw_semaphore lock; atomic_t refcount; bool active; void (*remove_func)(struct fs_node *); -- cgit v1.2.3 From bd71b08ec2ee4504bcc3b37a9283ce15e93dfacd Mon Sep 17 00:00:00 2001 From: Maor Gottlieb Date: Sun, 27 Aug 2017 09:19:11 +0300 Subject: net/mlx5: Support multiple updates of steering rules in parallel Most of the time spent on adding new flow steering rule is executing the firmware command. The most common action is adding a new flow steering entry. In order to enhance the update rate we parallelize the commands by doing the following: 1) Replace the mutex lock with readers-writers semaphore and take the write lock only when necessary (e.g. allocating a new flow table entry index or adding a node to the parent's children list). When we try to find a suitable child in the parent's children list (e.g. search for flow group with the same match_criteria of the rule) then we only take the read lock. 2) Add versioning mechanism - each steering entity (FT, FG, FTE, DST) will have an incremental version. The version is increased when the entity is changed (e.g. when a new FTE was added to FG - the FG's version is increased). Versioning is used in order to determine if the last traverse of an entity's children is valid or a rescan under write lock is required. This support improves the insertion rate of steering rules from ~5k/sec to ~40k/sec. Signed-off-by: Maor Gottlieb Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/fs_core.c | 386 +++++++++++++++------- drivers/net/ethernet/mellanox/mlx5/core/fs_core.h | 4 +- 2 files changed, 264 insertions(+), 126 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c index 9406e7272807..e7301cf747c5 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c @@ -168,10 +168,16 @@ static const struct rhashtable_params rhash_fg = { }; -static void del_rule(struct fs_node *node); -static void del_flow_table(struct fs_node *node); -static void del_flow_group(struct fs_node *node); -static void del_fte(struct fs_node *node); +static void del_hw_flow_table(struct fs_node *node); +static void del_hw_flow_group(struct fs_node *node); +static void del_hw_fte(struct fs_node *node); +static void del_sw_flow_table(struct fs_node *node); +static void del_sw_flow_group(struct fs_node *node); +static void del_sw_fte(struct fs_node *node); +/* Delete rule (destination) is special case that + * requires to lock the FTE for all the deletion process. + */ +static void del_sw_hw_rule(struct fs_node *node); static bool mlx5_flow_dests_cmp(struct mlx5_flow_destination *d1, struct mlx5_flow_destination *d2); static struct mlx5_flow_rule * @@ -179,13 +185,15 @@ find_flow_rule(struct fs_fte *fte, struct mlx5_flow_destination *dest); static void tree_init_node(struct fs_node *node, - void (*remove_func)(struct fs_node *)) + void (*del_hw_func)(struct fs_node *), + void (*del_sw_func)(struct fs_node *)) { atomic_set(&node->refcount, 1); INIT_LIST_HEAD(&node->list); INIT_LIST_HEAD(&node->children); init_rwsem(&node->lock); - node->remove_func = remove_func; + node->del_hw_func = del_hw_func; + node->del_sw_func = del_sw_func; node->active = false; } @@ -202,50 +210,69 @@ static void tree_add_node(struct fs_node *node, struct fs_node *parent) node->root = parent->root; } -static void tree_get_node(struct fs_node *node) +static int tree_get_node(struct fs_node *node) { - atomic_inc(&node->refcount); + return atomic_add_unless(&node->refcount, 1, 0); } -static void nested_lock_ref_node(struct fs_node *node, - enum fs_i_lock_class class) +static void nested_down_read_ref_node(struct fs_node *node, + enum fs_i_lock_class class) { if (node) { - down_write_nested(&node->lock, class); + down_read_nested(&node->lock, class); atomic_inc(&node->refcount); } } -static void lock_ref_node(struct fs_node *node) +static void nested_down_write_ref_node(struct fs_node *node, + enum fs_i_lock_class class) { if (node) { - down_write(&node->lock); + down_write_nested(&node->lock, class); atomic_inc(&node->refcount); } } -static void unlock_ref_node(struct fs_node *node) +static void down_write_ref_node(struct fs_node *node) { if (node) { - atomic_dec(&node->refcount); - up_write(&node->lock); + down_write(&node->lock); + atomic_inc(&node->refcount); } } +static void up_read_ref_node(struct fs_node *node) +{ + atomic_dec(&node->refcount); + up_read(&node->lock); +} + +static void up_write_ref_node(struct fs_node *node) +{ + atomic_dec(&node->refcount); + up_write(&node->lock); +} + static void tree_put_node(struct fs_node *node) { struct fs_node *parent_node = node->parent; - lock_ref_node(parent_node); if (atomic_dec_and_test(&node->refcount)) { - if (parent_node) + if (node->del_hw_func) + node->del_hw_func(node); + if (parent_node) { + /* Only root namespace doesn't have parent and we just + * need to free its node. + */ + down_write_ref_node(parent_node); list_del_init(&node->list); - if (node->remove_func) - node->remove_func(node); + if (node->del_sw_func) + node->del_sw_func(node); + up_write_ref_node(parent_node); + } kfree(node); node = NULL; } - unlock_ref_node(parent_node); if (!node && parent_node) tree_put_node(parent_node); } @@ -371,11 +398,10 @@ static inline struct mlx5_core_dev *get_dev(struct fs_node *node) return NULL; } -static void del_flow_table(struct fs_node *node) +static void del_hw_flow_table(struct fs_node *node) { struct mlx5_flow_table *ft; struct mlx5_core_dev *dev; - struct fs_prio *prio; int err; fs_get_obj(ft, node); @@ -386,12 +412,21 @@ static void del_flow_table(struct fs_node *node) if (err) mlx5_core_warn(dev, "flow steering can't destroy ft\n"); } +} + +static void del_sw_flow_table(struct fs_node *node) +{ + struct mlx5_flow_table *ft; + struct fs_prio *prio; + + fs_get_obj(ft, node); + rhltable_destroy(&ft->fgs_hash); fs_get_obj(prio, ft->node.parent); prio->num_ft--; } -static void del_rule(struct fs_node *node) +static void del_sw_hw_rule(struct fs_node *node) { struct mlx5_flow_rule *rule; struct mlx5_flow_table *ft; @@ -407,7 +442,6 @@ static void del_rule(struct fs_node *node) fs_get_obj(fg, fte->node.parent); fs_get_obj(ft, fg->node.parent); trace_mlx5_fs_del_rule(rule); - list_del(&rule->node.list); if (rule->sw_action == MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO) { mutex_lock(&rule->dest_attr.ft->lock); list_del(&rule->next_ft); @@ -437,7 +471,7 @@ out: } } -static void del_fte(struct fs_node *node) +static void del_hw_fte(struct fs_node *node) { struct mlx5_flow_table *ft; struct mlx5_flow_group *fg; @@ -448,8 +482,8 @@ static void del_fte(struct fs_node *node) fs_get_obj(fte, node); fs_get_obj(fg, fte->node.parent); fs_get_obj(ft, fg->node.parent); - trace_mlx5_fs_del_fte(fte); + trace_mlx5_fs_del_fte(fte); dev = get_dev(&ft->node); if (node->active) { err = mlx5_cmd_delete_fte(dev, ft, @@ -459,6 +493,16 @@ static void del_fte(struct fs_node *node) "flow steering can't delete fte in index %d of flow group id %d\n", fte->index, fg->id); } +} + +static void del_sw_fte(struct fs_node *node) +{ + struct mlx5_flow_group *fg; + struct fs_fte *fte; + int err; + + fs_get_obj(fte, node); + fs_get_obj(fg, fte->node.parent); err = rhashtable_remove_fast(&fg->ftes_hash, &fte->hash, @@ -467,30 +511,39 @@ static void del_fte(struct fs_node *node) ida_simple_remove(&fg->fte_allocator, fte->index - fg->start_index); } -static void del_flow_group(struct fs_node *node) +static void del_hw_flow_group(struct fs_node *node) { struct mlx5_flow_group *fg; struct mlx5_flow_table *ft; struct mlx5_core_dev *dev; - int err; fs_get_obj(fg, node); fs_get_obj(ft, fg->node.parent); dev = get_dev(&ft->node); trace_mlx5_fs_del_fg(fg); - if (ft->autogroup.active) - ft->autogroup.num_groups--; + if (fg->node.active && mlx5_cmd_destroy_flow_group(dev, ft, fg->id)) + mlx5_core_warn(dev, "flow steering can't destroy fg %d of ft %d\n", + fg->id, ft->id); +} + +static void del_sw_flow_group(struct fs_node *node) +{ + struct mlx5_flow_group *fg; + struct mlx5_flow_table *ft; + int err; + + fs_get_obj(fg, node); + fs_get_obj(ft, fg->node.parent); rhashtable_destroy(&fg->ftes_hash); ida_destroy(&fg->fte_allocator); + if (ft->autogroup.active) + ft->autogroup.num_groups--; err = rhltable_remove(&ft->fgs_hash, &fg->hash, rhash_fg); WARN_ON(err); - if (fg->node.active && mlx5_cmd_destroy_flow_group(dev, ft, fg->id)) - mlx5_core_warn(dev, "flow steering can't destroy fg %d of ft %d\n", - fg->id, ft->id); } static struct fs_fte *alloc_fte(struct mlx5_flow_act *flow_act, @@ -540,7 +593,7 @@ static struct fs_fte *alloc_insert_fte(struct mlx5_flow_group *fg, if (ret) goto err_free; - tree_init_node(&fte->node, del_fte); + tree_init_node(&fte->node, del_hw_fte, del_sw_fte); tree_add_node(&fte->node, &fg->node); list_add_tail(&fte->node.list, &fg->node.children); @@ -611,10 +664,11 @@ static struct mlx5_flow_group *alloc_insert_flow_group(struct mlx5_flow_table *f return ERR_PTR(ret); } - tree_init_node(&fg->node, del_flow_group); + tree_init_node(&fg->node, del_hw_flow_group, del_sw_flow_group); tree_add_node(&fg->node, &ft->node); /* Add node to group list */ list_add(&fg->node.list, prev); + atomic_inc(&ft->node.version); return fg; } @@ -794,7 +848,7 @@ static int _mlx5_modify_rule_destination(struct mlx5_flow_rule *rule, fs_get_obj(fte, rule->node.parent); if (!(fte->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST)) return -EINVAL; - lock_ref_node(&fte->node); + down_write_ref_node(&fte->node); fs_get_obj(fg, fte->node.parent); fs_get_obj(ft, fg->node.parent); @@ -803,7 +857,7 @@ static int _mlx5_modify_rule_destination(struct mlx5_flow_rule *rule, ft, fg->id, modify_mask, fte); - unlock_ref_node(&fte->node); + up_write_ref_node(&fte->node); return err; } @@ -940,7 +994,7 @@ static struct mlx5_flow_table *__mlx5_create_flow_table(struct mlx5_flow_namespa goto unlock_root; } - tree_init_node(&ft->node, del_flow_table); + tree_init_node(&ft->node, del_hw_flow_table, del_sw_flow_table); log_table_sz = ft->max_fte ? ilog2(ft->max_fte) : 0; next_ft = find_next_chained_ft(fs_prio); err = mlx5_cmd_create_flow_table(root->dev, ft->vport, ft->op_mod, ft->type, @@ -953,11 +1007,11 @@ static struct mlx5_flow_table *__mlx5_create_flow_table(struct mlx5_flow_namespa if (err) goto destroy_ft; ft->node.active = true; - lock_ref_node(&fs_prio->node); + down_write_ref_node(&fs_prio->node); tree_add_node(&ft->node, &fs_prio->node); list_add_flow_table(ft, fs_prio); fs_prio->num_ft++; - unlock_ref_node(&fs_prio->node); + up_write_ref_node(&fs_prio->node); mutex_unlock(&root->chain_lock); return ft; destroy_ft: @@ -1052,11 +1106,11 @@ struct mlx5_flow_group *mlx5_create_flow_group(struct mlx5_flow_table *ft, if (ft->autogroup.active) return ERR_PTR(-EPERM); - lock_ref_node(&ft->node); + down_write_ref_node(&ft->node); fg = alloc_insert_flow_group(ft, match_criteria_enable, match_criteria, start_index, end_index, ft->node.children.prev); - unlock_ref_node(&ft->node); + up_write_ref_node(&ft->node); if (IS_ERR(fg)) return fg; @@ -1151,7 +1205,7 @@ create_flow_handle(struct fs_fte *fte, /* Add dest to dests list- we need flow tables to be in the * end of the list for forward to next prio rules. */ - tree_init_node(&rule->node, del_rule); + tree_init_node(&rule->node, NULL, del_sw_hw_rule); if (dest && dest[i].type != MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE) list_add(&rule->node.list, &fte->node.children); @@ -1209,6 +1263,7 @@ add_rule_fte(struct fs_fte *fte, fte->node.active = true; fte->status |= FS_FTE_STATUS_EXISTING; + atomic_inc(&fte->node.version); out: return handle; @@ -1369,54 +1424,30 @@ static struct mlx5_flow_handle *add_rule_fg(struct mlx5_flow_group *fg, struct fs_fte *fte) { struct mlx5_flow_handle *handle; - struct mlx5_flow_table *ft; + int old_action; int i; + int ret; - if (fte) { - int old_action; - int ret; - - nested_lock_ref_node(&fte->node, FS_LOCK_CHILD); - ret = check_conflicting_ftes(fte, flow_act); - if (ret) { - handle = ERR_PTR(ret); - goto unlock_fte; - } - - old_action = fte->action; - fte->action |= flow_act->action; - handle = add_rule_fte(fte, fg, dest, dest_num, - old_action != flow_act->action); - if (IS_ERR(handle)) { - fte->action = old_action; - goto unlock_fte; - } else { - trace_mlx5_fs_set_fte(fte, false); - goto add_rules; - } - } - fs_get_obj(ft, fg->node.parent); + ret = check_conflicting_ftes(fte, flow_act); + if (ret) + return ERR_PTR(ret); - fte = alloc_insert_fte(fg, match_value, flow_act); - if (IS_ERR(fte)) - return (void *)fte; - nested_lock_ref_node(&fte->node, FS_LOCK_CHILD); - handle = add_rule_fte(fte, fg, dest, dest_num, false); + old_action = fte->action; + fte->action |= flow_act->action; + handle = add_rule_fte(fte, fg, dest, dest_num, + old_action != flow_act->action); if (IS_ERR(handle)) { - unlock_ref_node(&fte->node); - tree_put_node(&fte->node); + fte->action = old_action; return handle; } + trace_mlx5_fs_set_fte(fte, false); -add_rules: for (i = 0; i < handle->num_rules; i++) { if (atomic_read(&handle->rule[i]->node.refcount) == 1) { tree_add_node(&handle->rule[i]->node, &fte->node); trace_mlx5_fs_add_rule(handle->rule[i]); } } -unlock_fte: - unlock_ref_node(&fte->node); return handle; } @@ -1480,8 +1511,10 @@ static void free_match_list(struct match_list_head *head) struct match_list *iter, *match_tmp; list_del(&head->first.list); + tree_put_node(&head->first.g->node); list_for_each_entry_safe(iter, match_tmp, &head->list, list) { + tree_put_node(&iter->g->node); list_del(&iter->list); kfree(iter); } @@ -1505,6 +1538,8 @@ static int build_match_list(struct match_list_head *match_head, struct match_list *curr_match; if (likely(list_empty(&match_head->list))) { + if (!tree_get_node(&g->node)) + continue; match_head->first.g = g; list_add_tail(&match_head->first.list, &match_head->list); @@ -1517,6 +1552,10 @@ static int build_match_list(struct match_list_head *match_head, err = -ENOMEM; goto out; } + if (!tree_get_node(&g->node)) { + kfree(curr_match); + continue; + } curr_match->g = g; list_add_tail(&curr_match->list, &match_head->list); } @@ -1525,62 +1564,119 @@ out: return err; } +static u64 matched_fgs_get_version(struct list_head *match_head) +{ + struct match_list *iter; + u64 version = 0; + + list_for_each_entry(iter, match_head, list) + version += (u64)atomic_read(&iter->g->node.version); + return version; +} + static struct mlx5_flow_handle * try_add_to_existing_fg(struct mlx5_flow_table *ft, + struct list_head *match_head, struct mlx5_flow_spec *spec, struct mlx5_flow_act *flow_act, struct mlx5_flow_destination *dest, - int dest_num) + int dest_num, + int ft_version) { struct mlx5_flow_group *g; struct mlx5_flow_handle *rule; - struct match_list_head match_head; struct match_list *iter; - int err; + bool take_write = false; + struct fs_fte *fte; + u64 version; - /* Collect all fgs which has a matching match_criteria */ - err = build_match_list(&match_head, ft, spec); - if (err) - return ERR_PTR(err); + list_for_each_entry(iter, match_head, list) { + nested_down_read_ref_node(&iter->g->node, FS_LOCK_PARENT); + ida_pre_get(&iter->g->fte_allocator, GFP_KERNEL); + } +search_again_locked: + version = matched_fgs_get_version(match_head); /* Try to find a fg that already contains a matching fte */ - list_for_each_entry(iter, &match_head.list, list) { - struct fs_fte *fte; + list_for_each_entry(iter, match_head, list) { + struct fs_fte *fte_tmp; g = iter->g; - nested_lock_ref_node(&g->node, FS_LOCK_PARENT); - fte = rhashtable_lookup_fast(&g->ftes_hash, spec->match_value, - rhash_fte); - if (fte) { - rule = add_rule_fg(g, spec->match_value, - flow_act, dest, dest_num, fte); - unlock_ref_node(&g->node); - goto free_list; + fte_tmp = rhashtable_lookup_fast(&g->ftes_hash, spec->match_value, + rhash_fte); + if (!fte_tmp || !tree_get_node(&fte_tmp->node)) + continue; + + nested_down_write_ref_node(&fte_tmp->node, FS_LOCK_CHILD); + if (!take_write) { + list_for_each_entry(iter, match_head, list) + up_read_ref_node(&iter->g->node); + } else { + list_for_each_entry(iter, match_head, list) + up_write_ref_node(&iter->g->node); } - unlock_ref_node(&g->node); + + rule = add_rule_fg(g, spec->match_value, + flow_act, dest, dest_num, fte_tmp); + up_write_ref_node(&fte_tmp->node); + tree_put_node(&fte_tmp->node); + return rule; } /* No group with matching fte found. Try to add a new fte to any * matching fg. */ - list_for_each_entry(iter, &match_head.list, list) { - g = iter->g; - nested_lock_ref_node(&g->node, FS_LOCK_PARENT); - rule = add_rule_fg(g, spec->match_value, - flow_act, dest, dest_num, NULL); - if (!IS_ERR(rule) || PTR_ERR(rule) != -ENOSPC) { - unlock_ref_node(&g->node); - goto free_list; - } - unlock_ref_node(&g->node); + if (!take_write) { + list_for_each_entry(iter, match_head, list) + up_read_ref_node(&iter->g->node); + list_for_each_entry(iter, match_head, list) + nested_down_write_ref_node(&iter->g->node, + FS_LOCK_PARENT); + take_write = true; } - rule = ERR_PTR(-ENOENT); + /* Check the ft version, for case that new flow group + * was added while the fgs weren't locked + */ + if (atomic_read(&ft->node.version) != ft_version) { + rule = ERR_PTR(-EAGAIN); + goto out; + } -free_list: - free_match_list(&match_head); + /* Check the fgs version, for case the new FTE with the + * same values was added while the fgs weren't locked + */ + if (version != matched_fgs_get_version(match_head)) + goto search_again_locked; + + list_for_each_entry(iter, match_head, list) { + g = iter->g; + + if (!g->node.active) + continue; + fte = alloc_insert_fte(g, spec->match_value, flow_act); + if (IS_ERR(fte)) { + if (PTR_ERR(fte) == -ENOSPC) + continue; + list_for_each_entry(iter, match_head, list) + up_write_ref_node(&iter->g->node); + return (void *)fte; + } + nested_down_write_ref_node(&fte->node, FS_LOCK_CHILD); + list_for_each_entry(iter, match_head, list) + up_write_ref_node(&iter->g->node); + rule = add_rule_fg(g, spec->match_value, + flow_act, dest, dest_num, fte); + up_write_ref_node(&fte->node); + tree_put_node(&fte->node); + return rule; + } + rule = ERR_PTR(-ENOENT); +out: + list_for_each_entry(iter, match_head, list) + up_write_ref_node(&iter->g->node); return rule; } @@ -1594,6 +1690,10 @@ _mlx5_add_flow_rules(struct mlx5_flow_table *ft, { struct mlx5_flow_group *g; struct mlx5_flow_handle *rule; + struct match_list_head match_head; + bool take_write = false; + struct fs_fte *fte; + int version; int err; int i; @@ -1604,31 +1704,67 @@ _mlx5_add_flow_rules(struct mlx5_flow_table *ft, if (!dest_is_valid(&dest[i], flow_act->action, ft)) return ERR_PTR(-EINVAL); } + nested_down_read_ref_node(&ft->node, FS_LOCK_GRANDPARENT); +search_again_locked: + version = atomic_read(&ft->node.version); - nested_lock_ref_node(&ft->node, FS_LOCK_GRANDPARENT); - rule = try_add_to_existing_fg(ft, spec, flow_act, dest, dest_num); - if (!IS_ERR(rule) || PTR_ERR(rule) != -ENOENT) - goto unlock; + /* Collect all fgs which has a matching match_criteria */ + err = build_match_list(&match_head, ft, spec); + if (err) + return ERR_PTR(err); + + if (!take_write) + up_read_ref_node(&ft->node); + + rule = try_add_to_existing_fg(ft, &match_head.list, spec, flow_act, dest, + dest_num, version); + free_match_list(&match_head); + if (!IS_ERR(rule) || + (PTR_ERR(rule) != -ENOENT && PTR_ERR(rule) != -EAGAIN)) + return rule; + + if (!take_write) { + nested_down_write_ref_node(&ft->node, FS_LOCK_GRANDPARENT); + take_write = true; + } + + if (PTR_ERR(rule) == -EAGAIN || + version != atomic_read(&ft->node.version)) + goto search_again_locked; g = alloc_auto_flow_group(ft, spec); if (IS_ERR(g)) { rule = (void *)g; - goto unlock; + up_write_ref_node(&ft->node); + return rule; } + nested_down_write_ref_node(&g->node, FS_LOCK_PARENT); + up_write_ref_node(&ft->node); + err = create_auto_flow_group(ft, g); - if (err) { - rule = ERR_PTR(err); - goto put_fg; + if (err) + goto err_release_fg; + + fte = alloc_insert_fte(g, spec->match_value, flow_act); + if (IS_ERR(fte)) { + err = PTR_ERR(fte); + goto err_release_fg; } + nested_down_write_ref_node(&fte->node, FS_LOCK_CHILD); + up_write_ref_node(&g->node); rule = add_rule_fg(g, spec->match_value, flow_act, dest, - dest_num, NULL); -put_fg: + dest_num, fte); + up_write_ref_node(&fte->node); + tree_put_node(&fte->node); tree_put_node(&g->node); -unlock: - unlock_ref_node(&ft->node); return rule; + +err_release_fg: + up_write_ref_node(&g->node); + tree_put_node(&g->node); + return ERR_PTR(err); } static bool fwd_next_prio_supported(struct mlx5_flow_table *ft) @@ -1869,7 +2005,7 @@ static struct fs_prio *fs_create_prio(struct mlx5_flow_namespace *ns, return ERR_PTR(-ENOMEM); fs_prio->node.type = FS_TYPE_PRIO; - tree_init_node(&fs_prio->node, NULL); + tree_init_node(&fs_prio->node, NULL, NULL); tree_add_node(&fs_prio->node, &ns->node); fs_prio->num_levels = num_levels; fs_prio->prio = prio; @@ -1895,7 +2031,7 @@ static struct mlx5_flow_namespace *fs_create_namespace(struct fs_prio *prio) return ERR_PTR(-ENOMEM); fs_init_namespace(ns); - tree_init_node(&ns->node, NULL); + tree_init_node(&ns->node, NULL, NULL); tree_add_node(&ns->node, &prio->node); list_add_tail(&ns->node.list, &prio->node.children); @@ -2020,7 +2156,7 @@ static struct mlx5_flow_root_namespace *create_root_ns(struct mlx5_flow_steering ns = &root_ns->ns; fs_init_namespace(ns); mutex_init(&root_ns->chain_lock); - tree_init_node(&ns->node, NULL); + tree_init_node(&ns->node, NULL, NULL); tree_add_node(&ns->node, NULL); return root_ns; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h index b5c079f35051..875b753862b0 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h @@ -83,7 +83,9 @@ struct fs_node { struct rw_semaphore lock; atomic_t refcount; bool active; - void (*remove_func)(struct fs_node *); + void (*del_hw_func)(struct fs_node *); + void (*del_sw_func)(struct fs_node *); + atomic_t version; }; struct mlx5_flow_rule { -- cgit v1.2.3 From f5c2ff179f51101893e42e78683b23a487929d6c Mon Sep 17 00:00:00 2001 From: Maor Gottlieb Date: Tue, 29 Aug 2017 19:17:12 +0300 Subject: net/mlx5: Allocate FTE object without lock Allocation of new FTE is a massive operation, part of it could be done without taking the flow group write lock. Split the FTE allocation to two functions of actions which need to be under lock and action which don't have. Signed-off-by: Maor Gottlieb Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/fs_core.c | 92 +++++++++++------------ 1 file changed, 46 insertions(+), 46 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c index e7301cf747c5..bc4bbb72fa86 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c @@ -546,9 +546,33 @@ static void del_sw_flow_group(struct fs_node *node) WARN_ON(err); } -static struct fs_fte *alloc_fte(struct mlx5_flow_act *flow_act, - u32 *match_value, - unsigned int index) +static int insert_fte(struct mlx5_flow_group *fg, struct fs_fte *fte) +{ + int index; + int ret; + + index = ida_simple_get(&fg->fte_allocator, 0, fg->max_ftes, GFP_KERNEL); + if (index < 0) + return index; + + fte->index = index + fg->start_index; + ret = rhashtable_insert_fast(&fg->ftes_hash, + &fte->hash, + rhash_fte); + if (ret) + goto err_ida_remove; + + tree_add_node(&fte->node, &fg->node); + list_add_tail(&fte->node.list, &fg->node.children); + return 0; + +err_ida_remove: + ida_simple_remove(&fg->fte_allocator, index); + return ret; +} + +static struct fs_fte *alloc_fte(u32 *match_value, + struct mlx5_flow_act *flow_act) { struct fs_fte *fte; @@ -559,51 +583,13 @@ static struct fs_fte *alloc_fte(struct mlx5_flow_act *flow_act, memcpy(fte->val, match_value, sizeof(fte->val)); fte->node.type = FS_TYPE_FLOW_ENTRY; fte->flow_tag = flow_act->flow_tag; - fte->index = index; fte->action = flow_act->action; fte->encap_id = flow_act->encap_id; fte->modify_id = flow_act->modify_id; - return fte; -} - -static struct fs_fte *alloc_insert_fte(struct mlx5_flow_group *fg, - u32 *match_value, - struct mlx5_flow_act *flow_act) -{ - struct fs_fte *fte; - int index; - int ret; - - index = ida_simple_get(&fg->fte_allocator, 0, - fg->max_ftes, - GFP_KERNEL); - if (index < 0) - return ERR_PTR(index); - - fte = alloc_fte(flow_act, match_value, index + fg->start_index); - if (IS_ERR(fte)) { - ret = PTR_ERR(fte); - goto err_ida_remove; - } - - ret = rhashtable_insert_fast(&fg->ftes_hash, - &fte->hash, - rhash_fte); - if (ret) - goto err_free; - tree_init_node(&fte->node, del_hw_fte, del_sw_fte); - tree_add_node(&fte->node, &fg->node); - list_add_tail(&fte->node.list, &fg->node.children); return fte; - -err_free: - kfree(fte); -err_ida_remove: - ida_simple_remove(&fg->fte_allocator, index); - return ERR_PTR(ret); } static void dealloc_flow_group(struct mlx5_flow_group *fg) @@ -1589,6 +1575,11 @@ try_add_to_existing_fg(struct mlx5_flow_table *ft, bool take_write = false; struct fs_fte *fte; u64 version; + int err; + + fte = alloc_fte(spec->match_value, flow_act); + if (IS_ERR(fte)) + return ERR_PTR(-ENOMEM); list_for_each_entry(iter, match_head, list) { nested_down_read_ref_node(&iter->g->node, FS_LOCK_PARENT); @@ -1620,6 +1611,7 @@ search_again_locked: flow_act, dest, dest_num, fte_tmp); up_write_ref_node(&fte_tmp->node); tree_put_node(&fte_tmp->node); + kfree(fte); return rule; } @@ -1655,13 +1647,14 @@ search_again_locked: if (!g->node.active) continue; - fte = alloc_insert_fte(g, spec->match_value, flow_act); - if (IS_ERR(fte)) { - if (PTR_ERR(fte) == -ENOSPC) + err = insert_fte(g, fte); + if (err) { + if (err == -ENOSPC) continue; list_for_each_entry(iter, match_head, list) up_write_ref_node(&iter->g->node); - return (void *)fte; + kfree(fte); + return ERR_PTR(err); } nested_down_write_ref_node(&fte->node, FS_LOCK_CHILD); @@ -1677,6 +1670,7 @@ search_again_locked: out: list_for_each_entry(iter, match_head, list) up_write_ref_node(&iter->g->node); + kfree(fte); return rule; } @@ -1746,12 +1740,18 @@ search_again_locked: if (err) goto err_release_fg; - fte = alloc_insert_fte(g, spec->match_value, flow_act); + fte = alloc_fte(spec->match_value, flow_act); if (IS_ERR(fte)) { err = PTR_ERR(fte); goto err_release_fg; } + err = insert_fte(g, fte); + if (err) { + kfree(fte); + goto err_release_fg; + } + nested_down_write_ref_node(&fte->node, FS_LOCK_CHILD); up_write_ref_node(&g->node); rule = add_rule_fg(g, spec->match_value, flow_act, dest, -- cgit v1.2.3 From a369d4ac4dff92129ea0dfa3d66f45a830e29098 Mon Sep 17 00:00:00 2001 From: Maor Gottlieb Date: Sun, 27 Aug 2017 13:18:40 +0300 Subject: net/mlx5: Add FGs and FTEs memory pool Add memory pool allocation for flow groups and flow table entry. It is useful because these objects are not small and could be allocated/deallocated many times. Signed-off-by: Maor Gottlieb Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/fs_core.c | 67 +++++++++++++++++------ drivers/net/ethernet/mellanox/mlx5/core/fs_core.h | 2 + 2 files changed, 53 insertions(+), 16 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c index bc4bbb72fa86..7a136ae2547a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c @@ -269,8 +269,9 @@ static void tree_put_node(struct fs_node *node) if (node->del_sw_func) node->del_sw_func(node); up_write_ref_node(parent_node); + } else { + kfree(node); } - kfree(node); node = NULL; } if (!node && parent_node) @@ -389,6 +390,15 @@ static struct mlx5_flow_root_namespace *find_root(struct fs_node *node) return container_of(ns, struct mlx5_flow_root_namespace, ns); } +static inline struct mlx5_flow_steering *get_steering(struct fs_node *node) +{ + struct mlx5_flow_root_namespace *root = find_root(node); + + if (root) + return root->dev->priv.steering; + return NULL; +} + static inline struct mlx5_core_dev *get_dev(struct fs_node *node) { struct mlx5_flow_root_namespace *root = find_root(node); @@ -424,6 +434,7 @@ static void del_sw_flow_table(struct fs_node *node) rhltable_destroy(&ft->fgs_hash); fs_get_obj(prio, ft->node.parent); prio->num_ft--; + kfree(ft); } static void del_sw_hw_rule(struct fs_node *node) @@ -469,6 +480,7 @@ out: "%s can't del rule fg id=%d fte_index=%d\n", __func__, fg->id, fte->index); } + kfree(rule); } static void del_hw_fte(struct fs_node *node) @@ -497,6 +509,7 @@ static void del_hw_fte(struct fs_node *node) static void del_sw_fte(struct fs_node *node) { + struct mlx5_flow_steering *steering = get_steering(node); struct mlx5_flow_group *fg; struct fs_fte *fte; int err; @@ -509,6 +522,7 @@ static void del_sw_fte(struct fs_node *node) rhash_fte); WARN_ON(err); ida_simple_remove(&fg->fte_allocator, fte->index - fg->start_index); + kmem_cache_free(steering->ftes_cache, fte); } static void del_hw_flow_group(struct fs_node *node) @@ -529,6 +543,7 @@ static void del_hw_flow_group(struct fs_node *node) static void del_sw_flow_group(struct fs_node *node) { + struct mlx5_flow_steering *steering = get_steering(node); struct mlx5_flow_group *fg; struct mlx5_flow_table *ft; int err; @@ -544,6 +559,7 @@ static void del_sw_flow_group(struct fs_node *node) &fg->hash, rhash_fg); WARN_ON(err); + kmem_cache_free(steering->fgs_cache, fg); } static int insert_fte(struct mlx5_flow_group *fg, struct fs_fte *fte) @@ -571,12 +587,14 @@ err_ida_remove: return ret; } -static struct fs_fte *alloc_fte(u32 *match_value, +static struct fs_fte *alloc_fte(struct mlx5_flow_table *ft, + u32 *match_value, struct mlx5_flow_act *flow_act) { + struct mlx5_flow_steering *steering = get_steering(&ft->node); struct fs_fte *fte; - fte = kzalloc(sizeof(*fte), GFP_KERNEL); + fte = kmem_cache_zalloc(steering->ftes_cache, GFP_KERNEL); if (!fte) return ERR_PTR(-ENOMEM); @@ -592,13 +610,15 @@ static struct fs_fte *alloc_fte(u32 *match_value, return fte; } -static void dealloc_flow_group(struct mlx5_flow_group *fg) +static void dealloc_flow_group(struct mlx5_flow_steering *steering, + struct mlx5_flow_group *fg) { rhashtable_destroy(&fg->ftes_hash); - kfree(fg); + kmem_cache_free(steering->fgs_cache, fg); } -static struct mlx5_flow_group *alloc_flow_group(u8 match_criteria_enable, +static struct mlx5_flow_group *alloc_flow_group(struct mlx5_flow_steering *steering, + u8 match_criteria_enable, void *match_criteria, int start_index, int end_index) @@ -606,13 +626,13 @@ static struct mlx5_flow_group *alloc_flow_group(u8 match_criteria_enable, struct mlx5_flow_group *fg; int ret; - fg = kzalloc(sizeof(*fg), GFP_KERNEL); + fg = kmem_cache_zalloc(steering->fgs_cache, GFP_KERNEL); if (!fg) return ERR_PTR(-ENOMEM); ret = rhashtable_init(&fg->ftes_hash, &rhash_fte); if (ret) { - kfree(fg); + kmem_cache_free(steering->fgs_cache, fg); return ERR_PTR(ret); } ida_init(&fg->fte_allocator); @@ -633,10 +653,11 @@ static struct mlx5_flow_group *alloc_insert_flow_group(struct mlx5_flow_table *f int end_index, struct list_head *prev) { + struct mlx5_flow_steering *steering = get_steering(&ft->node); struct mlx5_flow_group *fg; int ret; - fg = alloc_flow_group(match_criteria_enable, match_criteria, + fg = alloc_flow_group(steering, match_criteria_enable, match_criteria, start_index, end_index); if (IS_ERR(fg)) return fg; @@ -646,7 +667,7 @@ static struct mlx5_flow_group *alloc_insert_flow_group(struct mlx5_flow_table *f &fg->hash, rhash_fg); if (ret) { - dealloc_flow_group(fg); + dealloc_flow_group(steering, fg); return ERR_PTR(ret); } @@ -1569,6 +1590,7 @@ try_add_to_existing_fg(struct mlx5_flow_table *ft, int dest_num, int ft_version) { + struct mlx5_flow_steering *steering = get_steering(&ft->node); struct mlx5_flow_group *g; struct mlx5_flow_handle *rule; struct match_list *iter; @@ -1577,7 +1599,7 @@ try_add_to_existing_fg(struct mlx5_flow_table *ft, u64 version; int err; - fte = alloc_fte(spec->match_value, flow_act); + fte = alloc_fte(ft, spec->match_value, flow_act); if (IS_ERR(fte)) return ERR_PTR(-ENOMEM); @@ -1611,7 +1633,7 @@ search_again_locked: flow_act, dest, dest_num, fte_tmp); up_write_ref_node(&fte_tmp->node); tree_put_node(&fte_tmp->node); - kfree(fte); + kmem_cache_free(steering->ftes_cache, fte); return rule; } @@ -1653,7 +1675,7 @@ search_again_locked: continue; list_for_each_entry(iter, match_head, list) up_write_ref_node(&iter->g->node); - kfree(fte); + kmem_cache_free(steering->ftes_cache, fte); return ERR_PTR(err); } @@ -1670,7 +1692,7 @@ search_again_locked: out: list_for_each_entry(iter, match_head, list) up_write_ref_node(&iter->g->node); - kfree(fte); + kmem_cache_free(steering->ftes_cache, fte); return rule; } @@ -1682,6 +1704,7 @@ _mlx5_add_flow_rules(struct mlx5_flow_table *ft, int dest_num) { + struct mlx5_flow_steering *steering = get_steering(&ft->node); struct mlx5_flow_group *g; struct mlx5_flow_handle *rule; struct match_list_head match_head; @@ -1740,7 +1763,7 @@ search_again_locked: if (err) goto err_release_fg; - fte = alloc_fte(spec->match_value, flow_act); + fte = alloc_fte(ft, spec->match_value, flow_act); if (IS_ERR(fte)) { err = PTR_ERR(fte); goto err_release_fg; @@ -1748,7 +1771,7 @@ search_again_locked: err = insert_fte(g, fte); if (err) { - kfree(fte); + kmem_cache_free(steering->ftes_cache, fte); goto err_release_fg; } @@ -2281,6 +2304,8 @@ void mlx5_cleanup_fs(struct mlx5_core_dev *dev) cleanup_root_ns(steering->sniffer_rx_root_ns); cleanup_root_ns(steering->sniffer_tx_root_ns); mlx5_cleanup_fc_stats(dev); + kmem_cache_destroy(steering->ftes_cache); + kmem_cache_destroy(steering->fgs_cache); kfree(steering); } @@ -2386,6 +2411,16 @@ int mlx5_init_fs(struct mlx5_core_dev *dev) steering->dev = dev; dev->priv.steering = steering; + steering->fgs_cache = kmem_cache_create("mlx5_fs_fgs", + sizeof(struct mlx5_flow_group), 0, + 0, NULL); + steering->ftes_cache = kmem_cache_create("mlx5_fs_ftes", sizeof(struct fs_fte), 0, + 0, NULL); + if (!steering->ftes_cache || !steering->fgs_cache) { + err = -ENOMEM; + goto err; + } + if ((((MLX5_CAP_GEN(dev, port_type) == MLX5_CAP_PORT_TYPE_ETH) && (MLX5_CAP_GEN(dev, nic_flow_table))) || ((MLX5_CAP_GEN(dev, port_type) == MLX5_CAP_PORT_TYPE_IB) && diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h index 875b753862b0..ebe184515433 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h @@ -65,6 +65,8 @@ enum fs_fte_status { struct mlx5_flow_steering { struct mlx5_core_dev *dev; + struct kmem_cache *fgs_cache; + struct kmem_cache *ftes_cache; struct mlx5_flow_root_namespace *root_ns; struct mlx5_flow_root_namespace *fdb_root_ns; struct mlx5_flow_root_namespace *esw_egress_root_ns; -- cgit v1.2.3 From e0a8f9de16fce34fc2957eca4c71d3ff2ac286d5 Mon Sep 17 00:00:00 2001 From: Michal Kalderon Date: Sun, 24 Sep 2017 12:09:42 +0300 Subject: qed: Add iWARP enablement support This patch is the last of the initial iWARP patch series. It adds the possiblity to actually detect iWARP from the device and enable it in the critical locations which basically make iWARP available. It wasn't submitted until now as iWARP hadn't been accepted into the rdma tree. Signed-off-by: Michal Kalderon Signed-off-by: Ariel Elior Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qed/qed_cxt.c | 6 ++++++ drivers/net/ethernet/qlogic/qed/qed_mcp.c | 8 ++++---- drivers/net/ethernet/qlogic/qed/qed_rdma.c | 5 ++++- drivers/net/ethernet/qlogic/qed/qed_sp_commands.c | 1 + 4 files changed, 15 insertions(+), 5 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.c b/drivers/net/ethernet/qlogic/qed/qed_cxt.c index af106be8cc08..afd07ad91631 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_cxt.c +++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.c @@ -2069,6 +2069,12 @@ static void qed_rdma_set_pf_params(struct qed_hwfn *p_hwfn, num_srqs = min_t(u32, 32 * 1024, p_params->num_srqs); + if (p_hwfn->mcp_info->func_info.protocol == QED_PCI_ETH_RDMA) { + DP_NOTICE(p_hwfn, + "Current day drivers don't support RoCE & iWARP simultaneously on the same PF. Default to RoCE-only\n"); + p_hwfn->hw_info.personality = QED_PCI_ETH_ROCE; + } + switch (p_hwfn->hw_info.personality) { case QED_PCI_ETH_IWARP: /* Each QP requires one connection */ diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c index 376485d99357..8b99c7d26f34 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c +++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c @@ -1691,12 +1691,12 @@ qed_mcp_get_shmem_proto_mfw(struct qed_hwfn *p_hwfn, case FW_MB_PARAM_GET_PF_RDMA_ROCE: *p_proto = QED_PCI_ETH_ROCE; break; + case FW_MB_PARAM_GET_PF_RDMA_IWARP: + *p_proto = QED_PCI_ETH_IWARP; + break; case FW_MB_PARAM_GET_PF_RDMA_BOTH: - DP_NOTICE(p_hwfn, - "Current day drivers don't support RoCE & iWARP. Default to RoCE-only\n"); - *p_proto = QED_PCI_ETH_ROCE; + *p_proto = QED_PCI_ETH_RDMA; break; - case FW_MB_PARAM_GET_PF_RDMA_IWARP: default: DP_NOTICE(p_hwfn, "MFW answers GET_PF_RDMA_PROTOCOL but param is %08x\n", diff --git a/drivers/net/ethernet/qlogic/qed/qed_rdma.c b/drivers/net/ethernet/qlogic/qed/qed_rdma.c index 6fb99518a61f..06715f7403ef 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_rdma.c +++ b/drivers/net/ethernet/qlogic/qed/qed_rdma.c @@ -156,7 +156,10 @@ static int qed_rdma_alloc(struct qed_hwfn *p_hwfn, return rc; p_hwfn->p_rdma_info = p_rdma_info; - p_rdma_info->proto = PROTOCOLID_ROCE; + if (QED_IS_IWARP_PERSONALITY(p_hwfn)) + p_rdma_info->proto = PROTOCOLID_IWARP; + else + p_rdma_info->proto = PROTOCOLID_ROCE; num_cons = qed_cxt_get_proto_cid_count(p_hwfn, p_rdma_info->proto, NULL); diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c index 46d0c3cb83a5..a1d33f35aad3 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c +++ b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c @@ -377,6 +377,7 @@ int qed_sp_pf_start(struct qed_hwfn *p_hwfn, p_ramrod->personality = PERSONALITY_ISCSI; break; case QED_PCI_ETH_ROCE: + case QED_PCI_ETH_IWARP: p_ramrod->personality = PERSONALITY_RDMA_AND_ETH; break; default: -- cgit v1.2.3 From d1abfd0b4ee2b83af88098a0c7105622c3d66e73 Mon Sep 17 00:00:00 2001 From: Michal Kalderon Date: Sun, 24 Sep 2017 12:09:43 +0300 Subject: qed: Add iWARP out of order support iWARP requires OOO support which is already provided by the ll2 interface (until now was used only for iSCSI offload). The changes mostly include opening a ll2 dedicated connection for OOO and notifiying the FW about the handle id. Signed-off-by: Michal Kalderon Signed-off-by: Ariel Elior Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qed/qed_iwarp.c | 44 +++++++++++++++++++++++++++++ drivers/net/ethernet/qlogic/qed/qed_iwarp.h | 11 +++++++- drivers/net/ethernet/qlogic/qed/qed_rdma.c | 7 +++-- 3 files changed, 59 insertions(+), 3 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/qlogic/qed/qed_iwarp.c b/drivers/net/ethernet/qlogic/qed/qed_iwarp.c index 9d989c96278c..568e9853cc8d 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_iwarp.c +++ b/drivers/net/ethernet/qlogic/qed/qed_iwarp.c @@ -41,6 +41,7 @@ #include "qed_rdma.h" #include "qed_reg_addr.h" #include "qed_sp.h" +#include "qed_ooo.h" #define QED_IWARP_ORD_DEFAULT 32 #define QED_IWARP_IRD_DEFAULT 32 @@ -119,6 +120,13 @@ static void qed_iwarp_cid_cleaned(struct qed_hwfn *p_hwfn, u32 cid) spin_unlock_bh(&p_hwfn->p_rdma_info->lock); } +void qed_iwarp_init_fw_ramrod(struct qed_hwfn *p_hwfn, + struct iwarp_init_func_params *p_ramrod) +{ + p_ramrod->ll2_ooo_q_index = RESC_START(p_hwfn, QED_LL2_QUEUE) + + p_hwfn->p_rdma_info->iwarp.ll2_ooo_handle; +} + static int qed_iwarp_alloc_cid(struct qed_hwfn *p_hwfn, u32 *cid) { int rc; @@ -1876,6 +1884,16 @@ static int qed_iwarp_ll2_stop(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) iwarp_info->ll2_syn_handle = QED_IWARP_HANDLE_INVAL; } + if (iwarp_info->ll2_ooo_handle != QED_IWARP_HANDLE_INVAL) { + rc = qed_ll2_terminate_connection(p_hwfn, + iwarp_info->ll2_ooo_handle); + if (rc) + DP_INFO(p_hwfn, "Failed to terminate ooo connection\n"); + + qed_ll2_release_connection(p_hwfn, iwarp_info->ll2_ooo_handle); + iwarp_info->ll2_ooo_handle = QED_IWARP_HANDLE_INVAL; + } + qed_llh_remove_mac_filter(p_hwfn, p_ptt, p_hwfn->p_rdma_info->iwarp.mac_addr); return rc; @@ -1927,10 +1945,12 @@ qed_iwarp_ll2_start(struct qed_hwfn *p_hwfn, struct qed_iwarp_info *iwarp_info; struct qed_ll2_acquire_data data; struct qed_ll2_cbs cbs; + u16 n_ooo_bufs; int rc = 0; iwarp_info = &p_hwfn->p_rdma_info->iwarp; iwarp_info->ll2_syn_handle = QED_IWARP_HANDLE_INVAL; + iwarp_info->ll2_ooo_handle = QED_IWARP_HANDLE_INVAL; iwarp_info->max_mtu = params->max_mtu; @@ -1978,6 +1998,29 @@ qed_iwarp_ll2_start(struct qed_hwfn *p_hwfn, if (rc) goto err; + /* Start OOO connection */ + data.input.conn_type = QED_LL2_TYPE_OOO; + data.input.mtu = params->max_mtu; + + n_ooo_bufs = (QED_IWARP_MAX_OOO * QED_IWARP_RCV_WND_SIZE_DEF) / + iwarp_info->max_mtu; + n_ooo_bufs = min_t(u32, n_ooo_bufs, QED_IWARP_LL2_OOO_MAX_RX_SIZE); + + data.input.rx_num_desc = n_ooo_bufs; + data.input.rx_num_ooo_buffers = n_ooo_bufs; + + data.input.tx_max_bds_per_packet = 1; /* will never be fragmented */ + data.input.tx_num_desc = QED_IWARP_LL2_OOO_DEF_TX_SIZE; + data.p_connection_handle = &iwarp_info->ll2_ooo_handle; + + rc = qed_ll2_acquire_connection(p_hwfn, &data); + if (rc) + goto err; + + rc = qed_ll2_establish_connection(p_hwfn, iwarp_info->ll2_ooo_handle); + if (rc) + goto err; + return rc; err: qed_iwarp_ll2_stop(p_hwfn, p_ptt); @@ -2014,6 +2057,7 @@ int qed_iwarp_setup(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, qed_spq_register_async_cb(p_hwfn, PROTOCOLID_IWARP, qed_iwarp_async_event); + qed_ooo_setup(p_hwfn); return qed_iwarp_ll2_start(p_hwfn, params, p_ptt); } diff --git a/drivers/net/ethernet/qlogic/qed/qed_iwarp.h b/drivers/net/ethernet/qlogic/qed/qed_iwarp.h index 148ef3c33a5d..9e2bfde894df 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_iwarp.h +++ b/drivers/net/ethernet/qlogic/qed/qed_iwarp.h @@ -47,7 +47,12 @@ enum qed_iwarp_qp_state qed_roce2iwarp_state(enum qed_roce_qp_state state); #define QED_IWARP_LL2_SYN_TX_SIZE (128) #define QED_IWARP_LL2_SYN_RX_SIZE (256) #define QED_IWARP_MAX_SYN_PKT_SIZE (128) -#define QED_IWARP_HANDLE_INVAL (0xff) + +#define QED_IWARP_LL2_OOO_DEF_TX_SIZE (256) +#define QED_IWARP_MAX_OOO (16) +#define QED_IWARP_LL2_OOO_MAX_RX_SIZE (16384) + +#define QED_IWARP_HANDLE_INVAL (0xff) struct qed_iwarp_ll2_buff { void *data; @@ -67,6 +72,7 @@ struct qed_iwarp_info { u8 crc_needed; u8 tcp_flags; u8 ll2_syn_handle; + u8 ll2_ooo_handle; u8 peer2peer; enum mpa_negotiation_mode mpa_rev; enum mpa_rtr_type rtr_type; @@ -147,6 +153,9 @@ int qed_iwarp_alloc(struct qed_hwfn *p_hwfn); int qed_iwarp_setup(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, struct qed_rdma_start_in_params *params); +void qed_iwarp_init_fw_ramrod(struct qed_hwfn *p_hwfn, + struct iwarp_init_func_params *p_ramrod); + int qed_iwarp_stop(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); void qed_iwarp_resc_free(struct qed_hwfn *p_hwfn); diff --git a/drivers/net/ethernet/qlogic/qed/qed_rdma.c b/drivers/net/ethernet/qlogic/qed/qed_rdma.c index 06715f7403ef..4f46f2851780 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_rdma.c +++ b/drivers/net/ethernet/qlogic/qed/qed_rdma.c @@ -551,10 +551,13 @@ static int qed_rdma_start_fw(struct qed_hwfn *p_hwfn, if (rc) return rc; - if (QED_IS_IWARP_PERSONALITY(p_hwfn)) + if (QED_IS_IWARP_PERSONALITY(p_hwfn)) { + qed_iwarp_init_fw_ramrod(p_hwfn, + &p_ent->ramrod.iwarp_init_func.iwarp); p_ramrod = &p_ent->ramrod.iwarp_init_func.rdma; - else + } else { p_ramrod = &p_ent->ramrod.roce_init_func.rdma; + } p_params_header = &p_ramrod->params_header; p_params_header->cnq_start_offset = (u8)RESC_START(p_hwfn, -- cgit v1.2.3 From 471115ab9804f45cb8e091e426c9c67fe75e41b0 Mon Sep 17 00:00:00 2001 From: Michal Kalderon Date: Sun, 24 Sep 2017 12:09:44 +0300 Subject: qed: Fix maximum number of CQs for iWARP The maximum number of CQs supported is bound to the number of connections supported, which differs between RoCE and iWARP. This fixes a crash that occurred in iWARP when running 1000 sessions using perftest. Fixes: 67b40dccc45 ("qed: Implement iWARP initialization, teardown and qp operations") Signed-off-by: Michal Kalderon Signed-off-by: Ariel Elior Reviewed-by: Leon Romanovsky Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qed/qed_rdma.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/qlogic/qed/qed_rdma.c b/drivers/net/ethernet/qlogic/qed/qed_rdma.c index 4f46f2851780..c8c4b3940564 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_rdma.c +++ b/drivers/net/ethernet/qlogic/qed/qed_rdma.c @@ -209,11 +209,11 @@ static int qed_rdma_alloc(struct qed_hwfn *p_hwfn, goto free_pd_map; } - /* Allocate bitmap for cq's. The maximum number of CQs is bounded to - * twice the number of QPs. + /* Allocate bitmap for cq's. The maximum number of CQs is bound to + * the number of connections we support. (num_qps in iWARP or + * num_qps/2 in RoCE). */ - rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->cq_map, - p_rdma_info->num_qps * 2, "CQ"); + rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->cq_map, num_cons, "CQ"); if (rc) { DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Failed to allocate cq bitmap, rc = %d\n", rc); @@ -222,10 +222,10 @@ static int qed_rdma_alloc(struct qed_hwfn *p_hwfn, /* Allocate bitmap for toggle bit for cq icids * We toggle the bit every time we create or resize cq for a given icid. - * The maximum number of CQs is bounded to twice the number of QPs. + * Size needs to equal the size of the cq bmap. */ rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->toggle_bits, - p_rdma_info->num_qps * 2, "Toggle"); + num_cons, "Toggle"); if (rc) { DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Failed to allocate toogle bits, rc = %d\n", rc); -- cgit v1.2.3 From 1e99c497012cd8647972876f1bd18545bc907aea Mon Sep 17 00:00:00 2001 From: Michal Kalderon Date: Sun, 24 Sep 2017 12:09:45 +0300 Subject: qed: iWARP - Add check for errors on a SYN packet A SYN packet which arrives with errors from FW should be dropped. This required adding an additional field to the ll2 rx completion data. Signed-off-by: Michal Kalderon Signed-off-by: Ariel Elior Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qed/qed_iwarp.c | 8 ++++++++ drivers/net/ethernet/qlogic/qed/qed_ll2.c | 1 + include/linux/qed/qed_ll2_if.h | 1 + 3 files changed, 10 insertions(+) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/qlogic/qed/qed_iwarp.c b/drivers/net/ethernet/qlogic/qed/qed_iwarp.c index 568e9853cc8d..8fc9c811f6e3 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_iwarp.c +++ b/drivers/net/ethernet/qlogic/qed/qed_iwarp.c @@ -1733,6 +1733,14 @@ qed_iwarp_ll2_comp_syn_pkt(void *cxt, struct qed_ll2_comp_rx_data *data) memset(&cm_info, 0, sizeof(cm_info)); ll2_syn_handle = p_hwfn->p_rdma_info->iwarp.ll2_syn_handle; + + /* Check if packet was received with errors... */ + if (data->err_flags) { + DP_NOTICE(p_hwfn, "Error received on SYN packet: 0x%x\n", + data->err_flags); + goto err; + } + if (GET_FIELD(data->parse_flags, PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED) && GET_FIELD(data->parse_flags, PARSING_AND_ERR_FLAGS_L4CHKSMERROR)) { diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.c b/drivers/net/ethernet/qlogic/qed/qed_ll2.c index c06ad4f0758e..250afa5486cf 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_ll2.c +++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.c @@ -413,6 +413,7 @@ static void qed_ll2_rxq_parse_reg(struct qed_hwfn *p_hwfn, struct qed_ll2_comp_rx_data *data) { data->parse_flags = le16_to_cpu(p_cqe->rx_cqe_fp.parse_flags.flags); + data->err_flags = le16_to_cpu(p_cqe->rx_cqe_fp.err_flags.flags); data->length.packet_length = le16_to_cpu(p_cqe->rx_cqe_fp.packet_length); data->vlan = le16_to_cpu(p_cqe->rx_cqe_fp.vlan); diff --git a/include/linux/qed/qed_ll2_if.h b/include/linux/qed/qed_ll2_if.h index dd7a3b86bb9e..89fa0bbd54f3 100644 --- a/include/linux/qed/qed_ll2_if.h +++ b/include/linux/qed/qed_ll2_if.h @@ -101,6 +101,7 @@ struct qed_ll2_comp_rx_data { void *cookie; dma_addr_t rx_buf_addr; u16 parse_flags; + u16 err_flags; u16 vlan; bool b_last_packet; u8 connection_handle; -- cgit v1.2.3 From 3bd3b9ed1b602c065aa0b1ba109b9622afa6ff98 Mon Sep 17 00:00:00 2001 From: Himanshu Jha Date: Sun, 24 Sep 2017 17:41:24 +0530 Subject: net: bcm63xx_enet: Use setup_timer and mod_timer Use setup_timer and mod_timer API instead of structure assignments. This is done using Coccinelle and semantic patch used for this as follows: @@ expression x,y,z,a,b; @@ -init_timer (&x); +setup_timer (&x, y, z); +mod_timer (&a, b); -x.function = y; -x.data = z; -x.expires = b; -add_timer(&a); Signed-off-by: Himanshu Jha Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bcm63xx_enet.c | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/broadcom/bcm63xx_enet.c b/drivers/net/ethernet/broadcom/bcm63xx_enet.c index f8bbbbfca06e..c6221f04a748 100644 --- a/drivers/net/ethernet/broadcom/bcm63xx_enet.c +++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.c @@ -2331,11 +2331,8 @@ static int bcm_enetsw_open(struct net_device *dev) } /* start phy polling timer */ - init_timer(&priv->swphy_poll); - priv->swphy_poll.function = swphy_poll_timer; - priv->swphy_poll.data = (unsigned long)priv; - priv->swphy_poll.expires = jiffies; - add_timer(&priv->swphy_poll); + setup_timer(&priv->swphy_poll, swphy_poll_timer, (unsigned long)priv); + mod_timer(&priv->swphy_poll, jiffies); return 0; out: -- cgit v1.2.3 From de8f3a83b0a0fddb2cf56e7a718127e9619ea3da Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Mon, 25 Sep 2017 02:25:51 +0200 Subject: bpf: add meta pointer for direct access This work enables generic transfer of metadata from XDP into skb. The basic idea is that we can make use of the fact that the resulting skb must be linear and already comes with a larger headroom for supporting bpf_xdp_adjust_head(), which mangles xdp->data. Here, we base our work on a similar principle and introduce a small helper bpf_xdp_adjust_meta() for adjusting a new pointer called xdp->data_meta. Thus, the packet has a flexible and programmable room for meta data, followed by the actual packet data. struct xdp_buff is therefore laid out that we first point to data_hard_start, then data_meta directly prepended to data followed by data_end marking the end of packet. bpf_xdp_adjust_head() takes into account whether we have meta data already prepended and if so, memmove()s this along with the given offset provided there's enough room. xdp->data_meta is optional and programs are not required to use it. The rationale is that when we process the packet in XDP (e.g. as DoS filter), we can push further meta data along with it for the XDP_PASS case, and give the guarantee that a clsact ingress BPF program on the same device can pick this up for further post-processing. Since we work with skb there, we can also set skb->mark, skb->priority or other skb meta data out of BPF, thus having this scratch space generic and programmable allows for more flexibility than defining a direct 1:1 transfer of potentially new XDP members into skb (it's also more efficient as we don't need to initialize/handle each of such new members). The facility also works together with GRO aggregation. The scratch space at the head of the packet can be multiple of 4 byte up to 32 byte large. Drivers not yet supporting xdp->data_meta can simply be set up with xdp->data_meta as xdp->data + 1 as bpf_xdp_adjust_meta() will detect this and bail out, such that the subsequent match against xdp->data for later access is guaranteed to fail. The verifier treats xdp->data_meta/xdp->data the same way as we treat xdp->data/xdp->data_end pointer comparisons. The requirement for doing the compare against xdp->data is that it hasn't been modified from it's original address we got from ctx access. It may have a range marking already from prior successful xdp->data/xdp->data_end pointer comparisons though. Signed-off-by: Daniel Borkmann Acked-by: Alexei Starovoitov Acked-by: John Fastabend Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c | 1 + drivers/net/ethernet/cavium/thunder/nicvf_main.c | 1 + drivers/net/ethernet/intel/i40e/i40e_txrx.c | 1 + drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | 1 + drivers/net/ethernet/mellanox/mlx4/en_rx.c | 1 + drivers/net/ethernet/mellanox/mlx5/core/en_rx.c | 1 + .../net/ethernet/netronome/nfp/nfp_net_common.c | 1 + drivers/net/ethernet/qlogic/qede/qede_fp.c | 1 + drivers/net/tun.c | 1 + drivers/net/virtio_net.c | 2 + include/linux/bpf.h | 1 + include/linux/filter.h | 21 +++- include/linux/skbuff.h | 68 +++++++++++- include/uapi/linux/bpf.h | 13 ++- kernel/bpf/verifier.c | 114 ++++++++++++++++----- net/bpf/test_run.c | 1 + net/core/dev.c | 31 +++++- net/core/filter.c | 77 +++++++++++++- net/core/skbuff.c | 2 + 19 files changed, 297 insertions(+), 42 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c index d8f0c837b72c..06ce63c00821 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c @@ -94,6 +94,7 @@ bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons, xdp.data_hard_start = *data_ptr - offset; xdp.data = *data_ptr; + xdp_set_data_meta_invalid(&xdp); xdp.data_end = *data_ptr + *len; orig_data = xdp.data; mapping = rx_buf->mapping - bp->rx_dma_offset; diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c index 49b80da51ba7..d68478afccbf 100644 --- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c +++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c @@ -523,6 +523,7 @@ static inline bool nicvf_xdp_rx(struct nicvf *nic, struct bpf_prog *prog, xdp.data_hard_start = page_address(page); xdp.data = (void *)cpu_addr; + xdp_set_data_meta_invalid(&xdp); xdp.data_end = xdp.data + len; orig_data = xdp.data; diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c index 1519dfb851d0..f426762bd83a 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c @@ -2107,6 +2107,7 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget) if (!skb) { xdp.data = page_address(rx_buffer->page) + rx_buffer->page_offset; + xdp_set_data_meta_invalid(&xdp); xdp.data_hard_start = xdp.data - i40e_rx_offset(rx_ring); xdp.data_end = xdp.data + size; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index d962368d08d0..04bb03bda1cd 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -2326,6 +2326,7 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, if (!skb) { xdp.data = page_address(rx_buffer->page) + rx_buffer->page_offset; + xdp_set_data_meta_invalid(&xdp); xdp.data_hard_start = xdp.data - ixgbe_rx_offset(rx_ring); xdp.data_end = xdp.data + size; diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c index b97a55c827eb..8f9cb8abc497 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c @@ -762,6 +762,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud xdp.data_hard_start = va - frags[0].page_offset; xdp.data = va; + xdp_set_data_meta_invalid(&xdp); xdp.data_end = xdp.data + length; orig_data = xdp.data; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c index f1dd638384d3..30b3f3fbd719 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c @@ -794,6 +794,7 @@ static inline int mlx5e_xdp_handle(struct mlx5e_rq *rq, return false; xdp.data = va + *rx_headroom; + xdp_set_data_meta_invalid(&xdp); xdp.data_end = xdp.data + *len; xdp.data_hard_start = va; diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c index 1c0187f0af51..e3a38be3600a 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c @@ -1583,6 +1583,7 @@ static int nfp_net_run_xdp(struct bpf_prog *prog, void *data, void *hard_start, xdp.data_hard_start = hard_start; xdp.data = data + *off; + xdp_set_data_meta_invalid(&xdp); xdp.data_end = data + *off + *len; orig_data = xdp.data; diff --git a/drivers/net/ethernet/qlogic/qede/qede_fp.c b/drivers/net/ethernet/qlogic/qede/qede_fp.c index 6fc854b120b0..48ec4c56cddf 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_fp.c +++ b/drivers/net/ethernet/qlogic/qede/qede_fp.c @@ -1004,6 +1004,7 @@ static bool qede_rx_xdp(struct qede_dev *edev, xdp.data_hard_start = page_address(bd->data); xdp.data = xdp.data_hard_start + *data_offset; + xdp_set_data_meta_invalid(&xdp); xdp.data_end = xdp.data + *len; /* Queues always have a full reset currently, so for the time diff --git a/drivers/net/tun.c b/drivers/net/tun.c index 2c36f6ebad79..a6e0bffe3d29 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@ -1468,6 +1468,7 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun, xdp.data_hard_start = buf; xdp.data = buf + pad; + xdp_set_data_meta_invalid(&xdp); xdp.data_end = xdp.data + len; orig_data = xdp.data; act = bpf_prog_run_xdp(xdp_prog, &xdp); diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index dd14a4547932..fc059f193e7d 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -554,6 +554,7 @@ static struct sk_buff *receive_small(struct net_device *dev, xdp.data_hard_start = buf + VIRTNET_RX_PAD + vi->hdr_len; xdp.data = xdp.data_hard_start + xdp_headroom; + xdp_set_data_meta_invalid(&xdp); xdp.data_end = xdp.data + len; orig_data = xdp.data; act = bpf_prog_run_xdp(xdp_prog, &xdp); @@ -686,6 +687,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev, data = page_address(xdp_page) + offset; xdp.data_hard_start = data - VIRTIO_XDP_HEADROOM + vi->hdr_len; xdp.data = data + vi->hdr_len; + xdp_set_data_meta_invalid(&xdp); xdp.data_end = xdp.data + (len - vi->hdr_len); act = bpf_prog_run_xdp(xdp_prog, &xdp); diff --git a/include/linux/bpf.h b/include/linux/bpf.h index 8390859e79e7..2b672c50f160 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -137,6 +137,7 @@ enum bpf_reg_type { PTR_TO_MAP_VALUE, /* reg points to map element value */ PTR_TO_MAP_VALUE_OR_NULL,/* points to map elem value or NULL */ PTR_TO_STACK, /* reg == frame_pointer + offset */ + PTR_TO_PACKET_META, /* skb->data - meta_len */ PTR_TO_PACKET, /* reg points to skb->data */ PTR_TO_PACKET_END, /* skb->data + headlen */ }; diff --git a/include/linux/filter.h b/include/linux/filter.h index 052bab3d62e7..911d454af107 100644 --- a/include/linux/filter.h +++ b/include/linux/filter.h @@ -487,12 +487,14 @@ struct sk_filter { struct bpf_skb_data_end { struct qdisc_skb_cb qdisc_cb; + void *data_meta; void *data_end; }; struct xdp_buff { void *data; void *data_end; + void *data_meta; void *data_hard_start; }; @@ -507,7 +509,8 @@ static inline void bpf_compute_data_pointers(struct sk_buff *skb) struct bpf_skb_data_end *cb = (struct bpf_skb_data_end *)skb->cb; BUILD_BUG_ON(sizeof(*cb) > FIELD_SIZEOF(struct sk_buff, cb)); - cb->data_end = skb->data + skb_headlen(skb); + cb->data_meta = skb->data - skb_metadata_len(skb); + cb->data_end = skb->data + skb_headlen(skb); } static inline u8 *bpf_skb_cb(struct sk_buff *skb) @@ -728,8 +731,22 @@ int xdp_do_redirect(struct net_device *dev, struct bpf_prog *prog); void xdp_do_flush_map(void); +/* Drivers not supporting XDP metadata can use this helper, which + * rejects any room expansion for metadata as a result. + */ +static __always_inline void +xdp_set_data_meta_invalid(struct xdp_buff *xdp) +{ + xdp->data_meta = xdp->data + 1; +} + +static __always_inline bool +xdp_data_meta_unsupported(const struct xdp_buff *xdp) +{ + return unlikely(xdp->data_meta > xdp->data); +} + void bpf_warn_invalid_xdp_action(u32 act); -void bpf_warn_invalid_xdp_redirect(u32 ifindex); struct sock *do_sk_redirect_map(void); diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index f9db5539a6fb..19e64bfb1a66 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -489,8 +489,9 @@ int skb_zerocopy_iter_stream(struct sock *sk, struct sk_buff *skb, * the end of the header data, ie. at skb->end. */ struct skb_shared_info { - unsigned short _unused; - unsigned char nr_frags; + __u8 __unused; + __u8 meta_len; + __u8 nr_frags; __u8 tx_flags; unsigned short gso_size; /* Warning: this field is not always filled in (UFO)! */ @@ -3400,6 +3401,69 @@ static inline ktime_t net_invalid_timestamp(void) return 0; } +static inline u8 skb_metadata_len(const struct sk_buff *skb) +{ + return skb_shinfo(skb)->meta_len; +} + +static inline void *skb_metadata_end(const struct sk_buff *skb) +{ + return skb_mac_header(skb); +} + +static inline bool __skb_metadata_differs(const struct sk_buff *skb_a, + const struct sk_buff *skb_b, + u8 meta_len) +{ + const void *a = skb_metadata_end(skb_a); + const void *b = skb_metadata_end(skb_b); + /* Using more efficient varaiant than plain call to memcmp(). */ +#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64 + u64 diffs = 0; + + switch (meta_len) { +#define __it(x, op) (x -= sizeof(u##op)) +#define __it_diff(a, b, op) (*(u##op *)__it(a, op)) ^ (*(u##op *)__it(b, op)) + case 32: diffs |= __it_diff(a, b, 64); + case 24: diffs |= __it_diff(a, b, 64); + case 16: diffs |= __it_diff(a, b, 64); + case 8: diffs |= __it_diff(a, b, 64); + break; + case 28: diffs |= __it_diff(a, b, 64); + case 20: diffs |= __it_diff(a, b, 64); + case 12: diffs |= __it_diff(a, b, 64); + case 4: diffs |= __it_diff(a, b, 32); + break; + } + return diffs; +#else + return memcmp(a - meta_len, b - meta_len, meta_len); +#endif +} + +static inline bool skb_metadata_differs(const struct sk_buff *skb_a, + const struct sk_buff *skb_b) +{ + u8 len_a = skb_metadata_len(skb_a); + u8 len_b = skb_metadata_len(skb_b); + + if (!(len_a | len_b)) + return false; + + return len_a != len_b ? + true : __skb_metadata_differs(skb_a, skb_b, len_a); +} + +static inline void skb_metadata_set(struct sk_buff *skb, u8 meta_len) +{ + skb_shinfo(skb)->meta_len = meta_len; +} + +static inline void skb_metadata_clear(struct sk_buff *skb) +{ + skb_metadata_set(skb, 0); +} + struct sk_buff *skb_clone_sk(struct sk_buff *skb); #ifdef CONFIG_NETWORK_PHY_TIMESTAMPING diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index 43ab5c402f98..e43491ac4823 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -582,6 +582,12 @@ union bpf_attr { * @map: pointer to sockmap to update * @key: key to insert/update sock in map * @flags: same flags as map update elem + * + * int bpf_xdp_adjust_meta(xdp_md, delta) + * Adjust the xdp_md.data_meta by delta + * @xdp_md: pointer to xdp_md + * @delta: An positive/negative integer to be added to xdp_md.data_meta + * Return: 0 on success or negative on error */ #define __BPF_FUNC_MAPPER(FN) \ FN(unspec), \ @@ -638,6 +644,7 @@ union bpf_attr { FN(redirect_map), \ FN(sk_redirect_map), \ FN(sock_map_update), \ + FN(xdp_adjust_meta), /* integer value in 'imm' field of BPF_CALL instruction selects which helper * function eBPF program intends to call @@ -715,7 +722,7 @@ struct __sk_buff { __u32 data_end; __u32 napi_id; - /* accessed by BPF_PROG_TYPE_sk_skb types */ + /* Accessed by BPF_PROG_TYPE_sk_skb types from here to ... */ __u32 family; __u32 remote_ip4; /* Stored in network byte order */ __u32 local_ip4; /* Stored in network byte order */ @@ -723,6 +730,9 @@ struct __sk_buff { __u32 local_ip6[4]; /* Stored in network byte order */ __u32 remote_port; /* Stored in network byte order */ __u32 local_port; /* stored in host byte order */ + /* ... here. */ + + __u32 data_meta; }; struct bpf_tunnel_key { @@ -783,6 +793,7 @@ enum xdp_action { struct xdp_md { __u32 data; __u32 data_end; + __u32 data_meta; }; enum sk_action { diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index b914fbe1383e..f849eca36052 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -177,6 +177,12 @@ static __printf(1, 2) void verbose(const char *fmt, ...) va_end(args); } +static bool type_is_pkt_pointer(enum bpf_reg_type type) +{ + return type == PTR_TO_PACKET || + type == PTR_TO_PACKET_META; +} + /* string representation of 'enum bpf_reg_type' */ static const char * const reg_type_str[] = { [NOT_INIT] = "?", @@ -187,6 +193,7 @@ static const char * const reg_type_str[] = { [PTR_TO_MAP_VALUE_OR_NULL] = "map_value_or_null", [PTR_TO_STACK] = "fp", [PTR_TO_PACKET] = "pkt", + [PTR_TO_PACKET_META] = "pkt_meta", [PTR_TO_PACKET_END] = "pkt_end", }; @@ -226,7 +233,7 @@ static void print_verifier_state(struct bpf_verifier_state *state) verbose("(id=%d", reg->id); if (t != SCALAR_VALUE) verbose(",off=%d", reg->off); - if (t == PTR_TO_PACKET) + if (type_is_pkt_pointer(t)) verbose(",r=%d", reg->range); else if (t == CONST_PTR_TO_MAP || t == PTR_TO_MAP_VALUE || @@ -519,6 +526,31 @@ static void mark_reg_known_zero(struct bpf_reg_state *regs, u32 regno) __mark_reg_known_zero(regs + regno); } +static bool reg_is_pkt_pointer(const struct bpf_reg_state *reg) +{ + return type_is_pkt_pointer(reg->type); +} + +static bool reg_is_pkt_pointer_any(const struct bpf_reg_state *reg) +{ + return reg_is_pkt_pointer(reg) || + reg->type == PTR_TO_PACKET_END; +} + +/* Unmodified PTR_TO_PACKET[_META,_END] register from ctx access. */ +static bool reg_is_init_pkt_pointer(const struct bpf_reg_state *reg, + enum bpf_reg_type which) +{ + /* The register can already have a range from prior markings. + * This is fine as long as it hasn't been advanced from its + * origin. + */ + return reg->type == which && + reg->id == 0 && + reg->off == 0 && + tnum_equals_const(reg->var_off, 0); +} + /* Attempts to improve min/max values based on var_off information */ static void __update_reg_bounds(struct bpf_reg_state *reg) { @@ -702,6 +734,7 @@ static bool is_spillable_regtype(enum bpf_reg_type type) case PTR_TO_STACK: case PTR_TO_CTX: case PTR_TO_PACKET: + case PTR_TO_PACKET_META: case PTR_TO_PACKET_END: case CONST_PTR_TO_MAP: return true; @@ -1047,7 +1080,10 @@ static int check_ptr_alignment(struct bpf_verifier_env *env, switch (reg->type) { case PTR_TO_PACKET: - /* special case, because of NET_IP_ALIGN */ + case PTR_TO_PACKET_META: + /* Special case, because of NET_IP_ALIGN. Given metadata sits + * right in front, treat it the very same way. + */ return check_pkt_ptr_alignment(reg, off, size, strict); case PTR_TO_MAP_VALUE: pointer_desc = "value "; @@ -1124,8 +1160,8 @@ static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regn err = check_ctx_access(env, insn_idx, off, size, t, ®_type); if (!err && t == BPF_READ && value_regno >= 0) { /* ctx access returns either a scalar, or a - * PTR_TO_PACKET[_END]. In the latter case, we know - * the offset is zero. + * PTR_TO_PACKET[_META,_END]. In the latter + * case, we know the offset is zero. */ if (reg_type == SCALAR_VALUE) mark_reg_unknown(state->regs, value_regno); @@ -1170,7 +1206,7 @@ static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regn } else { err = check_stack_read(state, off, size, value_regno); } - } else if (reg->type == PTR_TO_PACKET) { + } else if (reg_is_pkt_pointer(reg)) { if (t == BPF_WRITE && !may_access_direct_pkt_data(env, NULL, t)) { verbose("cannot write into packet\n"); return -EACCES; @@ -1310,6 +1346,7 @@ static int check_helper_mem_access(struct bpf_verifier_env *env, int regno, switch (reg->type) { case PTR_TO_PACKET: + case PTR_TO_PACKET_META: return check_packet_access(env, regno, reg->off, access_size); case PTR_TO_MAP_VALUE: return check_map_access(env, regno, reg->off, access_size); @@ -1342,7 +1379,7 @@ static int check_func_arg(struct bpf_verifier_env *env, u32 regno, return 0; } - if (type == PTR_TO_PACKET && + if (type_is_pkt_pointer(type) && !may_access_direct_pkt_data(env, meta, BPF_READ)) { verbose("helper access to the packet is not allowed\n"); return -EACCES; @@ -1351,7 +1388,8 @@ static int check_func_arg(struct bpf_verifier_env *env, u32 regno, if (arg_type == ARG_PTR_TO_MAP_KEY || arg_type == ARG_PTR_TO_MAP_VALUE) { expected_type = PTR_TO_STACK; - if (type != PTR_TO_PACKET && type != expected_type) + if (!type_is_pkt_pointer(type) && + type != expected_type) goto err_type; } else if (arg_type == ARG_CONST_SIZE || arg_type == ARG_CONST_SIZE_OR_ZERO) { @@ -1375,7 +1413,8 @@ static int check_func_arg(struct bpf_verifier_env *env, u32 regno, */ if (register_is_null(*reg)) /* final test in check_stack_boundary() */; - else if (type != PTR_TO_PACKET && type != PTR_TO_MAP_VALUE && + else if (!type_is_pkt_pointer(type) && + type != PTR_TO_MAP_VALUE && type != expected_type) goto err_type; meta->raw_mode = arg_type == ARG_PTR_TO_UNINIT_MEM; @@ -1401,7 +1440,7 @@ static int check_func_arg(struct bpf_verifier_env *env, u32 regno, verbose("invalid map_ptr to access map->key\n"); return -EACCES; } - if (type == PTR_TO_PACKET) + if (type_is_pkt_pointer(type)) err = check_packet_access(env, regno, reg->off, meta->map_ptr->key_size); else @@ -1417,7 +1456,7 @@ static int check_func_arg(struct bpf_verifier_env *env, u32 regno, verbose("invalid map_ptr to access map->value\n"); return -EACCES; } - if (type == PTR_TO_PACKET) + if (type_is_pkt_pointer(type)) err = check_packet_access(env, regno, reg->off, meta->map_ptr->value_size); else @@ -1590,8 +1629,8 @@ static int check_raw_mode(const struct bpf_func_proto *fn) return count > 1 ? -EINVAL : 0; } -/* Packet data might have moved, any old PTR_TO_PACKET[_END] are now invalid, - * so turn them into unknown SCALAR_VALUE. +/* Packet data might have moved, any old PTR_TO_PACKET[_META,_END] + * are now invalid, so turn them into unknown SCALAR_VALUE. */ static void clear_all_pkt_pointers(struct bpf_verifier_env *env) { @@ -1600,18 +1639,15 @@ static void clear_all_pkt_pointers(struct bpf_verifier_env *env) int i; for (i = 0; i < MAX_BPF_REG; i++) - if (regs[i].type == PTR_TO_PACKET || - regs[i].type == PTR_TO_PACKET_END) + if (reg_is_pkt_pointer_any(®s[i])) mark_reg_unknown(regs, i); for (i = 0; i < MAX_BPF_STACK; i += BPF_REG_SIZE) { if (state->stack_slot_type[i] != STACK_SPILL) continue; reg = &state->spilled_regs[i / BPF_REG_SIZE]; - if (reg->type != PTR_TO_PACKET && - reg->type != PTR_TO_PACKET_END) - continue; - __mark_reg_unknown(reg); + if (reg_is_pkt_pointer_any(reg)) + __mark_reg_unknown(reg); } } @@ -1871,7 +1907,7 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env, } dst_reg->var_off = tnum_add(ptr_reg->var_off, off_reg->var_off); dst_reg->off = ptr_reg->off; - if (ptr_reg->type == PTR_TO_PACKET) { + if (reg_is_pkt_pointer(ptr_reg)) { dst_reg->id = ++env->id_gen; /* something was added to pkt_ptr, set range to zero */ dst_reg->range = 0; @@ -1931,7 +1967,7 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env, } dst_reg->var_off = tnum_sub(ptr_reg->var_off, off_reg->var_off); dst_reg->off = ptr_reg->off; - if (ptr_reg->type == PTR_TO_PACKET) { + if (reg_is_pkt_pointer(ptr_reg)) { dst_reg->id = ++env->id_gen; /* something was added to pkt_ptr, set range to zero */ if (smin_val < 0) @@ -2421,7 +2457,8 @@ static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn) } static void find_good_pkt_pointers(struct bpf_verifier_state *state, - struct bpf_reg_state *dst_reg) + struct bpf_reg_state *dst_reg, + enum bpf_reg_type type) { struct bpf_reg_state *regs = state->regs, *reg; int i; @@ -2483,7 +2520,7 @@ static void find_good_pkt_pointers(struct bpf_verifier_state *state, * dst_reg->off is known < MAX_PACKET_OFF, therefore it fits in a u16. */ for (i = 0; i < MAX_BPF_REG; i++) - if (regs[i].type == PTR_TO_PACKET && regs[i].id == dst_reg->id) + if (regs[i].type == type && regs[i].id == dst_reg->id) /* keep the maximum range already checked */ regs[i].range = max_t(u16, regs[i].range, dst_reg->off); @@ -2491,7 +2528,7 @@ static void find_good_pkt_pointers(struct bpf_verifier_state *state, if (state->stack_slot_type[i] != STACK_SPILL) continue; reg = &state->spilled_regs[i / BPF_REG_SIZE]; - if (reg->type == PTR_TO_PACKET && reg->id == dst_reg->id) + if (reg->type == type && reg->id == dst_reg->id) reg->range = max_t(u16, reg->range, dst_reg->off); } } @@ -2856,19 +2893,39 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env, } else if (BPF_SRC(insn->code) == BPF_X && opcode == BPF_JGT && dst_reg->type == PTR_TO_PACKET && regs[insn->src_reg].type == PTR_TO_PACKET_END) { - find_good_pkt_pointers(this_branch, dst_reg); + find_good_pkt_pointers(this_branch, dst_reg, PTR_TO_PACKET); } else if (BPF_SRC(insn->code) == BPF_X && opcode == BPF_JLT && dst_reg->type == PTR_TO_PACKET && regs[insn->src_reg].type == PTR_TO_PACKET_END) { - find_good_pkt_pointers(other_branch, dst_reg); + find_good_pkt_pointers(other_branch, dst_reg, PTR_TO_PACKET); } else if (BPF_SRC(insn->code) == BPF_X && opcode == BPF_JGE && dst_reg->type == PTR_TO_PACKET_END && regs[insn->src_reg].type == PTR_TO_PACKET) { - find_good_pkt_pointers(other_branch, ®s[insn->src_reg]); + find_good_pkt_pointers(other_branch, ®s[insn->src_reg], + PTR_TO_PACKET); } else if (BPF_SRC(insn->code) == BPF_X && opcode == BPF_JLE && dst_reg->type == PTR_TO_PACKET_END && regs[insn->src_reg].type == PTR_TO_PACKET) { - find_good_pkt_pointers(this_branch, ®s[insn->src_reg]); + find_good_pkt_pointers(this_branch, ®s[insn->src_reg], + PTR_TO_PACKET); + } else if (BPF_SRC(insn->code) == BPF_X && opcode == BPF_JGT && + dst_reg->type == PTR_TO_PACKET_META && + reg_is_init_pkt_pointer(®s[insn->src_reg], PTR_TO_PACKET)) { + find_good_pkt_pointers(this_branch, dst_reg, PTR_TO_PACKET_META); + } else if (BPF_SRC(insn->code) == BPF_X && opcode == BPF_JLT && + dst_reg->type == PTR_TO_PACKET_META && + reg_is_init_pkt_pointer(®s[insn->src_reg], PTR_TO_PACKET)) { + find_good_pkt_pointers(other_branch, dst_reg, PTR_TO_PACKET_META); + } else if (BPF_SRC(insn->code) == BPF_X && opcode == BPF_JGE && + reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) && + regs[insn->src_reg].type == PTR_TO_PACKET_META) { + find_good_pkt_pointers(other_branch, ®s[insn->src_reg], + PTR_TO_PACKET_META); + } else if (BPF_SRC(insn->code) == BPF_X && opcode == BPF_JLE && + reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) && + regs[insn->src_reg].type == PTR_TO_PACKET_META) { + find_good_pkt_pointers(this_branch, ®s[insn->src_reg], + PTR_TO_PACKET_META); } else if (is_pointer_value(env, insn->dst_reg)) { verbose("R%d pointer comparison prohibited\n", insn->dst_reg); return -EACCES; @@ -3298,8 +3355,9 @@ static bool regsafe(struct bpf_reg_state *rold, struct bpf_reg_state *rcur, return false; /* Check our ids match any regs they're supposed to */ return check_ids(rold->id, rcur->id, idmap); + case PTR_TO_PACKET_META: case PTR_TO_PACKET: - if (rcur->type != PTR_TO_PACKET) + if (rcur->type != rold->type) return false; /* We must have at least as much range as the old ptr * did, so that any accesses which were safe before are diff --git a/net/bpf/test_run.c b/net/bpf/test_run.c index df672517b4fd..a86e6687026e 100644 --- a/net/bpf/test_run.c +++ b/net/bpf/test_run.c @@ -162,6 +162,7 @@ int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr, xdp.data_hard_start = data; xdp.data = data + XDP_PACKET_HEADROOM + NET_IP_ALIGN; + xdp.data_meta = xdp.data; xdp.data_end = xdp.data + size; retval = bpf_test_run(prog, &xdp, repeat, &duration); diff --git a/net/core/dev.c b/net/core/dev.c index 97abddd9039a..e350c768d4b5 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -3864,8 +3864,8 @@ drop: static u32 netif_receive_generic_xdp(struct sk_buff *skb, struct bpf_prog *xdp_prog) { + u32 metalen, act = XDP_DROP; struct xdp_buff xdp; - u32 act = XDP_DROP; void *orig_data; int hlen, off; u32 mac_len; @@ -3876,8 +3876,25 @@ static u32 netif_receive_generic_xdp(struct sk_buff *skb, if (skb_cloned(skb)) return XDP_PASS; - if (skb_linearize(skb)) - goto do_drop; + /* XDP packets must be linear and must have sufficient headroom + * of XDP_PACKET_HEADROOM bytes. This is the guarantee that also + * native XDP provides, thus we need to do it here as well. + */ + if (skb_is_nonlinear(skb) || + skb_headroom(skb) < XDP_PACKET_HEADROOM) { + int hroom = XDP_PACKET_HEADROOM - skb_headroom(skb); + int troom = skb->tail + skb->data_len - skb->end; + + /* In case we have to go down the path and also linearize, + * then lets do the pskb_expand_head() work just once here. + */ + if (pskb_expand_head(skb, + hroom > 0 ? ALIGN(hroom, NET_SKB_PAD) : 0, + troom > 0 ? troom + 128 : 0, GFP_ATOMIC)) + goto do_drop; + if (troom > 0 && __skb_linearize(skb)) + goto do_drop; + } /* The XDP program wants to see the packet starting at the MAC * header. @@ -3885,6 +3902,7 @@ static u32 netif_receive_generic_xdp(struct sk_buff *skb, mac_len = skb->data - skb_mac_header(skb); hlen = skb_headlen(skb) + mac_len; xdp.data = skb->data - mac_len; + xdp.data_meta = xdp.data; xdp.data_end = xdp.data + hlen; xdp.data_hard_start = skb->data - skb_headroom(skb); orig_data = xdp.data; @@ -3902,10 +3920,12 @@ static u32 netif_receive_generic_xdp(struct sk_buff *skb, case XDP_REDIRECT: case XDP_TX: __skb_push(skb, mac_len); - /* fall through */ + break; case XDP_PASS: + metalen = xdp.data - xdp.data_meta; + if (metalen) + skb_metadata_set(skb, metalen); break; - default: bpf_warn_invalid_xdp_action(act); /* fall through */ @@ -4695,6 +4715,7 @@ static void gro_list_prepare(struct napi_struct *napi, struct sk_buff *skb) diffs = (unsigned long)p->dev ^ (unsigned long)skb->dev; diffs |= p->vlan_tci ^ skb->vlan_tci; diffs |= skb_metadata_dst_cmp(p, skb); + diffs |= skb_metadata_differs(p, skb); if (maclen == ETH_HLEN) diffs |= compare_ether_header(skb_mac_header(p), skb_mac_header(skb)); diff --git a/net/core/filter.c b/net/core/filter.c index c468e7cfad19..9b6e7e84aafd 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -2447,14 +2447,26 @@ static const struct bpf_func_proto bpf_skb_change_head_proto = { .arg3_type = ARG_ANYTHING, }; +static unsigned long xdp_get_metalen(const struct xdp_buff *xdp) +{ + return xdp_data_meta_unsupported(xdp) ? 0 : + xdp->data - xdp->data_meta; +} + BPF_CALL_2(bpf_xdp_adjust_head, struct xdp_buff *, xdp, int, offset) { + unsigned long metalen = xdp_get_metalen(xdp); + void *data_start = xdp->data_hard_start + metalen; void *data = xdp->data + offset; - if (unlikely(data < xdp->data_hard_start || + if (unlikely(data < data_start || data > xdp->data_end - ETH_HLEN)) return -EINVAL; + if (metalen) + memmove(xdp->data_meta + offset, + xdp->data_meta, metalen); + xdp->data_meta += offset; xdp->data = data; return 0; @@ -2468,6 +2480,33 @@ static const struct bpf_func_proto bpf_xdp_adjust_head_proto = { .arg2_type = ARG_ANYTHING, }; +BPF_CALL_2(bpf_xdp_adjust_meta, struct xdp_buff *, xdp, int, offset) +{ + void *meta = xdp->data_meta + offset; + unsigned long metalen = xdp->data - meta; + + if (xdp_data_meta_unsupported(xdp)) + return -ENOTSUPP; + if (unlikely(meta < xdp->data_hard_start || + meta > xdp->data)) + return -EINVAL; + if (unlikely((metalen & (sizeof(__u32) - 1)) || + (metalen > 32))) + return -EACCES; + + xdp->data_meta = meta; + + return 0; +} + +static const struct bpf_func_proto bpf_xdp_adjust_meta_proto = { + .func = bpf_xdp_adjust_meta, + .gpl_only = false, + .ret_type = RET_INTEGER, + .arg1_type = ARG_PTR_TO_CTX, + .arg2_type = ARG_ANYTHING, +}; + static int __bpf_tx_xdp(struct net_device *dev, struct bpf_map *map, struct xdp_buff *xdp, @@ -2692,7 +2731,8 @@ bool bpf_helper_changes_pkt_data(void *func) func == bpf_clone_redirect || func == bpf_l3_csum_replace || func == bpf_l4_csum_replace || - func == bpf_xdp_adjust_head) + func == bpf_xdp_adjust_head || + func == bpf_xdp_adjust_meta) return true; return false; @@ -3288,6 +3328,8 @@ xdp_func_proto(enum bpf_func_id func_id) return &bpf_get_smp_processor_id_proto; case BPF_FUNC_xdp_adjust_head: return &bpf_xdp_adjust_head_proto; + case BPF_FUNC_xdp_adjust_meta: + return &bpf_xdp_adjust_meta_proto; case BPF_FUNC_redirect: return &bpf_xdp_redirect_proto; case BPF_FUNC_redirect_map: @@ -3418,6 +3460,7 @@ static bool bpf_skb_is_valid_access(int off, int size, enum bpf_access_type type case bpf_ctx_range_till(struct __sk_buff, remote_ip4, remote_ip4): case bpf_ctx_range_till(struct __sk_buff, local_ip4, local_ip4): case bpf_ctx_range(struct __sk_buff, data): + case bpf_ctx_range(struct __sk_buff, data_meta): case bpf_ctx_range(struct __sk_buff, data_end): if (size != size_default) return false; @@ -3444,6 +3487,7 @@ static bool sk_filter_is_valid_access(int off, int size, switch (off) { case bpf_ctx_range(struct __sk_buff, tc_classid): case bpf_ctx_range(struct __sk_buff, data): + case bpf_ctx_range(struct __sk_buff, data_meta): case bpf_ctx_range(struct __sk_buff, data_end): case bpf_ctx_range_till(struct __sk_buff, family, local_port): return false; @@ -3468,6 +3512,7 @@ static bool lwt_is_valid_access(int off, int size, switch (off) { case bpf_ctx_range(struct __sk_buff, tc_classid): case bpf_ctx_range_till(struct __sk_buff, family, local_port): + case bpf_ctx_range(struct __sk_buff, data_meta): return false; } @@ -3586,6 +3631,9 @@ static bool tc_cls_act_is_valid_access(int off, int size, case bpf_ctx_range(struct __sk_buff, data): info->reg_type = PTR_TO_PACKET; break; + case bpf_ctx_range(struct __sk_buff, data_meta): + info->reg_type = PTR_TO_PACKET_META; + break; case bpf_ctx_range(struct __sk_buff, data_end): info->reg_type = PTR_TO_PACKET_END; break; @@ -3619,6 +3667,9 @@ static bool xdp_is_valid_access(int off, int size, case offsetof(struct xdp_md, data): info->reg_type = PTR_TO_PACKET; break; + case offsetof(struct xdp_md, data_meta): + info->reg_type = PTR_TO_PACKET_META; + break; case offsetof(struct xdp_md, data_end): info->reg_type = PTR_TO_PACKET_END; break; @@ -3677,6 +3728,12 @@ static bool sk_skb_is_valid_access(int off, int size, enum bpf_access_type type, struct bpf_insn_access_aux *info) { + switch (off) { + case bpf_ctx_range(struct __sk_buff, tc_classid): + case bpf_ctx_range(struct __sk_buff, data_meta): + return false; + } + if (type == BPF_WRITE) { switch (off) { case bpf_ctx_range(struct __sk_buff, mark): @@ -3689,8 +3746,6 @@ static bool sk_skb_is_valid_access(int off, int size, } switch (off) { - case bpf_ctx_range(struct __sk_buff, tc_classid): - return false; case bpf_ctx_range(struct __sk_buff, data): info->reg_type = PTR_TO_PACKET; break; @@ -3847,6 +3902,15 @@ static u32 bpf_convert_ctx_access(enum bpf_access_type type, offsetof(struct sk_buff, data)); break; + case offsetof(struct __sk_buff, data_meta): + off = si->off; + off -= offsetof(struct __sk_buff, data_meta); + off += offsetof(struct sk_buff, cb); + off += offsetof(struct bpf_skb_data_end, data_meta); + *insn++ = BPF_LDX_MEM(BPF_SIZEOF(void *), si->dst_reg, + si->src_reg, off); + break; + case offsetof(struct __sk_buff, data_end): off = si->off; off -= offsetof(struct __sk_buff, data_end); @@ -4095,6 +4159,11 @@ static u32 xdp_convert_ctx_access(enum bpf_access_type type, si->dst_reg, si->src_reg, offsetof(struct xdp_buff, data)); break; + case offsetof(struct xdp_md, data_meta): + *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct xdp_buff, data_meta), + si->dst_reg, si->src_reg, + offsetof(struct xdp_buff, data_meta)); + break; case offsetof(struct xdp_md, data_end): *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct xdp_buff, data_end), si->dst_reg, si->src_reg, diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 000ce735fa8d..d98c2e3ce2bf 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -1509,6 +1509,8 @@ int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, skb->nohdr = 0; atomic_set(&skb_shinfo(skb)->dataref, 1); + skb_metadata_clear(skb); + /* It is not generally safe to change skb->truesize. * For the moment, we really care of rx path, or * when skb is orphaned (not attached to a socket). -- cgit v1.2.3 From 65d88fd0baaa5c9def9383ac696097911d4ceb73 Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Mon, 25 Sep 2017 02:25:54 +0200 Subject: bpf, nfp: add meta data support Implement support for transferring XDP meta data into skb for nfp driver; before calling into the program, xdp.data_meta points to xdp.data, where on program return with pass verdict, we call into skb_metadata_set(). Signed-off-by: Daniel Borkmann Acked-by: Alexei Starovoitov Acked-by: John Fastabend Reviewed-by: Jakub Kicinski Signed-off-by: David S. Miller --- .../net/ethernet/netronome/nfp/nfp_net_common.c | 40 ++++++++-------------- 1 file changed, 15 insertions(+), 25 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c index e3a38be3600a..d2f73feb8497 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c @@ -1574,27 +1574,6 @@ nfp_net_tx_xdp_buf(struct nfp_net_dp *dp, struct nfp_net_rx_ring *rx_ring, return true; } -static int nfp_net_run_xdp(struct bpf_prog *prog, void *data, void *hard_start, - unsigned int *off, unsigned int *len) -{ - struct xdp_buff xdp; - void *orig_data; - int ret; - - xdp.data_hard_start = hard_start; - xdp.data = data + *off; - xdp_set_data_meta_invalid(&xdp); - xdp.data_end = data + *off + *len; - - orig_data = xdp.data; - ret = bpf_prog_run_xdp(prog, &xdp); - - *len -= xdp.data - orig_data; - *off += xdp.data - orig_data; - - return ret; -} - /** * nfp_net_rx() - receive up to @budget packets on @rx_ring * @rx_ring: RX ring to receive from @@ -1630,6 +1609,7 @@ static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget) struct nfp_meta_parsed meta; struct net_device *netdev; dma_addr_t new_dma_addr; + u32 meta_len_xdp = 0; void *new_frag; idx = D_IDX(rx_ring, rx_ring->rd_p); @@ -1708,16 +1688,24 @@ static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget) if (xdp_prog && !(rxd->rxd.flags & PCIE_DESC_RX_BPF && dp->bpf_offload_xdp) && !meta.portid) { + void *orig_data = rxbuf->frag + pkt_off; unsigned int dma_off; - void *hard_start; + struct xdp_buff xdp; int act; - hard_start = rxbuf->frag + NFP_NET_RX_BUF_HEADROOM; + xdp.data_hard_start = rxbuf->frag + NFP_NET_RX_BUF_HEADROOM; + xdp.data = orig_data; + xdp.data_meta = orig_data; + xdp.data_end = orig_data + pkt_len; + + act = bpf_prog_run_xdp(xdp_prog, &xdp); + + pkt_len -= xdp.data - orig_data; + pkt_off += xdp.data - orig_data; - act = nfp_net_run_xdp(xdp_prog, rxbuf->frag, hard_start, - &pkt_off, &pkt_len); switch (act) { case XDP_PASS: + meta_len_xdp = xdp.data - xdp.data_meta; break; case XDP_TX: dma_off = pkt_off - NFP_NET_RX_BUF_HEADROOM; @@ -1785,6 +1773,8 @@ static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget) if (rxd->rxd.flags & PCIE_DESC_RX_VLAN) __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), le16_to_cpu(rxd->rxd.vlan)); + if (meta_len_xdp) + skb_metadata_set(skb, meta_len_xdp); napi_gro_receive(&rx_ring->r_vec->napi, skb); } -- cgit v1.2.3 From 366a88fe2f40d6772985ec78cdd34df7f109bb88 Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Mon, 25 Sep 2017 02:25:55 +0200 Subject: bpf, ixgbe: add meta data support Implement support for transferring XDP meta data into skb for ixgbe driver; before calling into the program, xdp.data_meta points to xdp.data, where on program return with pass verdict, we call into skb_metadata_set(). We implement this for the default ixgbe_build_skb() variant. For the ixgbe_construct_skb() that is used when legacy-rx buffer mananagement mode is turned on via ethtool, I found that XDP gets 0 headroom, so neither xdp_adjust_head() nor xdp_adjust_meta() can be used with this. Just add a comment with explanation for this operating mode. Signed-off-by: Daniel Borkmann Acked-by: Alexei Starovoitov Acked-by: John Fastabend Signed-off-by: David S. Miller --- drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | 30 +++++++++++++++++++++++---- 1 file changed, 26 insertions(+), 4 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 04bb03bda1cd..3942c6208745 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -2133,6 +2133,21 @@ static struct sk_buff *ixgbe_construct_skb(struct ixgbe_ring *rx_ring, #if L1_CACHE_BYTES < 128 prefetch(xdp->data + L1_CACHE_BYTES); #endif + /* Note, we get here by enabling legacy-rx via: + * + * ethtool --set-priv-flags legacy-rx on + * + * In this mode, we currently get 0 extra XDP headroom as + * opposed to having legacy-rx off, where we process XDP + * packets going to stack via ixgbe_build_skb(). The latter + * provides us currently with 192 bytes of headroom. + * + * For ixgbe_construct_skb() mode it means that the + * xdp->data_meta will always point to xdp->data, since + * the helper cannot expand the head. Should this ever + * change in future for legacy-rx mode on, then lets also + * add xdp->data_meta handling here. + */ /* allocate a skb to store the frags */ skb = napi_alloc_skb(&rx_ring->q_vector->napi, IXGBE_RX_HDR_SIZE); @@ -2165,6 +2180,7 @@ static struct sk_buff *ixgbe_build_skb(struct ixgbe_ring *rx_ring, struct xdp_buff *xdp, union ixgbe_adv_rx_desc *rx_desc) { + unsigned int metasize = xdp->data - xdp->data_meta; #if (PAGE_SIZE < 8192) unsigned int truesize = ixgbe_rx_pg_size(rx_ring) / 2; #else @@ -2174,10 +2190,14 @@ static struct sk_buff *ixgbe_build_skb(struct ixgbe_ring *rx_ring, #endif struct sk_buff *skb; - /* prefetch first cache line of first page */ - prefetch(xdp->data); + /* Prefetch first cache line of first page. If xdp->data_meta + * is unused, this points extactly as xdp->data, otherwise we + * likely have a consumer accessing first few bytes of meta + * data, and then actual data. + */ + prefetch(xdp->data_meta); #if L1_CACHE_BYTES < 128 - prefetch(xdp->data + L1_CACHE_BYTES); + prefetch(xdp->data_meta + L1_CACHE_BYTES); #endif /* build an skb to around the page buffer */ @@ -2188,6 +2208,8 @@ static struct sk_buff *ixgbe_build_skb(struct ixgbe_ring *rx_ring, /* update pointers within the skb to store the data */ skb_reserve(skb, xdp->data - xdp->data_hard_start); __skb_put(skb, xdp->data_end - xdp->data); + if (metasize) + skb_metadata_set(skb, metasize); /* record DMA address if this is the start of a chain of buffers */ if (!ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP)) @@ -2326,7 +2348,7 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, if (!skb) { xdp.data = page_address(rx_buffer->page) + rx_buffer->page_offset; - xdp_set_data_meta_invalid(&xdp); + xdp.data_meta = xdp.data; xdp.data_hard_start = xdp.data - ixgbe_rx_offset(rx_ring); xdp.data_end = xdp.data + size; -- cgit v1.2.3 From be2336ebfd7a1aec597a26f086fc4235ab87dd2c Mon Sep 17 00:00:00 2001 From: Arkadi Sharshevsky Date: Mon, 25 Sep 2017 10:32:21 +0200 Subject: mlxsw: spectrum_dpipe: Fix indentation in header description Fix indentation in mlxsw_meta header's description. Signed-off-by: Arkadi Sharshevsky Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- .../net/ethernet/mellanox/mlxsw/spectrum_dpipe.c | 23 ++++++++++++---------- 1 file changed, 13 insertions(+), 10 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c index 51e6846da72b..91648094ab4c 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c @@ -46,18 +46,21 @@ enum mlxsw_sp_field_metadata_id { }; static struct devlink_dpipe_field mlxsw_sp_dpipe_fields_metadata[] = { - { .name = "erif_port", - .id = MLXSW_SP_DPIPE_FIELD_METADATA_ERIF_PORT, - .bitwidth = 32, - .mapping_type = DEVLINK_DPIPE_FIELD_MAPPING_TYPE_IFINDEX, + { + .name = "erif_port", + .id = MLXSW_SP_DPIPE_FIELD_METADATA_ERIF_PORT, + .bitwidth = 32, + .mapping_type = DEVLINK_DPIPE_FIELD_MAPPING_TYPE_IFINDEX, }, - { .name = "l3_forward", - .id = MLXSW_SP_DPIPE_FIELD_METADATA_L3_FORWARD, - .bitwidth = 1, + { + .name = "l3_forward", + .id = MLXSW_SP_DPIPE_FIELD_METADATA_L3_FORWARD, + .bitwidth = 1, }, - { .name = "l3_drop", - .id = MLXSW_SP_DPIPE_FIELD_METADATA_L3_DROP, - .bitwidth = 1, + { + .name = "l3_drop", + .id = MLXSW_SP_DPIPE_FIELD_METADATA_L3_DROP, + .bitwidth = 1, }, }; -- cgit v1.2.3 From c0859d697c258f7c864e81bc1f83d1c274e7cf4c Mon Sep 17 00:00:00 2001 From: Arkadi Sharshevsky Date: Mon, 25 Sep 2017 10:32:22 +0200 Subject: mlxsw: Add fields for mlxsw's meta header for adjacency table This patch adds field for mlxsw's meta header which will be used to describe the match/action behavior of the adjacency table. The fields are: 1. Adj_index - The global index of the nexthop group in the adjacency table. 2. Adj_hash_index - Local index offset which is based on packets hash mod the nexthop group size. Signed-off-by: Arkadi Sharshevsky Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c | 12 ++++++++++++ 1 file changed, 12 insertions(+) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c index 91648094ab4c..9253273a5c03 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c @@ -43,6 +43,8 @@ enum mlxsw_sp_field_metadata_id { MLXSW_SP_DPIPE_FIELD_METADATA_ERIF_PORT, MLXSW_SP_DPIPE_FIELD_METADATA_L3_FORWARD, MLXSW_SP_DPIPE_FIELD_METADATA_L3_DROP, + MLXSW_SP_DPIPE_FIELD_METADATA_ADJ_INDEX, + MLXSW_SP_DPIPE_FIELD_METADATA_ADJ_HASH_INDEX, }; static struct devlink_dpipe_field mlxsw_sp_dpipe_fields_metadata[] = { @@ -62,6 +64,16 @@ static struct devlink_dpipe_field mlxsw_sp_dpipe_fields_metadata[] = { .id = MLXSW_SP_DPIPE_FIELD_METADATA_L3_DROP, .bitwidth = 1, }, + { + .name = "adj_index", + .id = MLXSW_SP_DPIPE_FIELD_METADATA_ADJ_INDEX, + .bitwidth = 32, + }, + { + .name = "adj_hash_index", + .id = MLXSW_SP_DPIPE_FIELD_METADATA_ADJ_HASH_INDEX, + .bitwidth = 32, + }, }; enum mlxsw_sp_dpipe_header_id { -- cgit v1.2.3 From dbe4598c1e929a24dc352a7dc523a3cc22a093f2 Mon Sep 17 00:00:00 2001 From: Arkadi Sharshevsky Date: Mon, 25 Sep 2017 10:32:23 +0200 Subject: mlxsw: spectrum_router: Keep nexthops in a linked list Keep nexthops in a linked list for easy access. Signed-off-by: Arkadi Sharshevsky Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c | 9 +++++++++ 1 file changed, 9 insertions(+) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c index 0bd93dc88ffa..0cd4b2a7d9d0 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c @@ -78,6 +78,7 @@ struct mlxsw_sp_router { struct rhashtable neigh_ht; struct rhashtable nexthop_group_ht; struct rhashtable nexthop_ht; + struct list_head nexthop_list; struct { struct mlxsw_sp_lpm_tree *trees; unsigned int tree_count; @@ -2028,6 +2029,7 @@ struct mlxsw_sp_nexthop_key { struct mlxsw_sp_nexthop { struct list_head neigh_list_node; /* member of neigh entry list */ struct list_head rif_list_node; + struct list_head router_list_node; struct mlxsw_sp_nexthop_group *nh_grp; /* pointer back to the group * this belongs to */ @@ -2784,6 +2786,8 @@ static int mlxsw_sp_nexthop4_init(struct mlxsw_sp *mlxsw_sp, if (err) return err; + list_add_tail(&nh->router_list_node, &mlxsw_sp->router->nexthop_list); + if (!dev) return 0; @@ -2807,6 +2811,7 @@ static void mlxsw_sp_nexthop4_fini(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_nexthop *nh) { mlxsw_sp_nexthop4_type_fini(mlxsw_sp, nh); + list_del(&nh->router_list_node); mlxsw_sp_nexthop_remove(mlxsw_sp, nh); } @@ -4045,6 +4050,8 @@ static int mlxsw_sp_nexthop6_init(struct mlxsw_sp *mlxsw_sp, nh->nh_grp = nh_grp; memcpy(&nh->gw_addr, &rt->rt6i_gateway, sizeof(nh->gw_addr)); + list_add_tail(&nh->router_list_node, &mlxsw_sp->router->nexthop_list); + if (!dev) return 0; nh->ifindex = dev->ifindex; @@ -4056,6 +4063,7 @@ static void mlxsw_sp_nexthop6_fini(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_nexthop *nh) { mlxsw_sp_nexthop6_type_fini(mlxsw_sp, nh); + list_del(&nh->router_list_node); } static bool mlxsw_sp_rt6_is_gateway(const struct mlxsw_sp *mlxsw_sp, @@ -5990,6 +5998,7 @@ int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp) if (err) goto err_nexthop_group_ht_init; + INIT_LIST_HEAD(&mlxsw_sp->router->nexthop_list); err = mlxsw_sp_lpm_init(mlxsw_sp); if (err) goto err_lpm_init; -- cgit v1.2.3 From ec2437f42b44edc84054feb943d49e8030154c38 Mon Sep 17 00:00:00 2001 From: Arkadi Sharshevsky Date: Mon, 25 Sep 2017 10:32:24 +0200 Subject: mlxsw: spectrum_router: Use helper to check for last neighbor Use list_is_last helper to check for last neighbor. Signed-off-by: Arkadi Sharshevsky Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c index 0cd4b2a7d9d0..65e59a989084 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c @@ -1317,7 +1317,7 @@ mlxsw_sp_rif_neigh_next(struct mlxsw_sp_rif *rif, typeof(*neigh_entry), rif_list_node); } - if (neigh_entry->rif_list_node.next == &rif->neigh_list) + if (list_is_last(&neigh_entry->rif_list_node, &rif->neigh_list)) return NULL; return list_next_entry(neigh_entry, rif_list_node); } -- cgit v1.2.3 From c556cd28930661f337d7989fe74ac31871fd3888 Mon Sep 17 00:00:00 2001 From: Arkadi Sharshevsky Date: Mon, 25 Sep 2017 10:32:25 +0200 Subject: mlxsw: spectrum_router: Add helpers for nexthop access This is done as a preparation before introducing the ability to dump the adjacency table via dpipe, and to count the table size. The current table implementation avoids tunnel entries, thus a helper for checking if the nexthop group contains tunnel entries is also provided. The mlxsw's nexthop representative struct stays private to the router module. Signed-off-by: Arkadi Sharshevsky Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- .../net/ethernet/mellanox/mlxsw/spectrum_router.c | 71 ++++++++++++++++++++++ .../net/ethernet/mellanox/mlxsw/spectrum_router.h | 12 ++++ 2 files changed, 83 insertions(+) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c index 65e59a989084..c062b4f666e3 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c @@ -2068,6 +2068,77 @@ struct mlxsw_sp_nexthop_group { #define nh_rif nexthops[0].rif }; +struct mlxsw_sp_nexthop *mlxsw_sp_nexthop_next(struct mlxsw_sp_router *router, + struct mlxsw_sp_nexthop *nh) +{ + if (!nh) { + if (list_empty(&router->nexthop_list)) + return NULL; + else + return list_first_entry(&router->nexthop_list, + typeof(*nh), router_list_node); + } + if (list_is_last(&nh->router_list_node, &router->nexthop_list)) + return NULL; + return list_next_entry(nh, router_list_node); +} + +bool mlxsw_sp_nexthop_offload(struct mlxsw_sp_nexthop *nh) +{ + return nh->offloaded; +} + +unsigned char *mlxsw_sp_nexthop_ha(struct mlxsw_sp_nexthop *nh) +{ + if (!nh->offloaded) + return NULL; + return nh->neigh_entry->ha; +} + +int mlxsw_sp_nexthop_indexes(struct mlxsw_sp_nexthop *nh, u32 *p_adj_index, + u32 *p_adj_hash_index) +{ + struct mlxsw_sp_nexthop_group *nh_grp = nh->nh_grp; + u32 adj_hash_index = 0; + int i; + + if (!nh->offloaded || !nh_grp->adj_index_valid) + return -EINVAL; + + *p_adj_index = nh_grp->adj_index; + + for (i = 0; i < nh_grp->count; i++) { + struct mlxsw_sp_nexthop *nh_iter = &nh_grp->nexthops[i]; + + if (nh_iter == nh) + break; + if (nh_iter->offloaded) + adj_hash_index++; + } + + *p_adj_hash_index = adj_hash_index; + return 0; +} + +struct mlxsw_sp_rif *mlxsw_sp_nexthop_rif(struct mlxsw_sp_nexthop *nh) +{ + return nh->rif; +} + +bool mlxsw_sp_nexthop_group_has_ipip(struct mlxsw_sp_nexthop *nh) +{ + struct mlxsw_sp_nexthop_group *nh_grp = nh->nh_grp; + int i; + + for (i = 0; i < nh_grp->count; i++) { + struct mlxsw_sp_nexthop *nh_iter = &nh_grp->nexthops[i]; + + if (nh_iter->type == MLXSW_SP_NEXTHOP_TYPE_IPIP) + return true; + } + return false; +} + static struct fib_info * mlxsw_sp_nexthop4_group_fi(const struct mlxsw_sp_nexthop_group *nh_grp) { diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h index ae4c99b3f2fc..d6951d516cf4 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h @@ -62,6 +62,7 @@ enum mlxsw_sp_rif_counter_dir { }; struct mlxsw_sp_neigh_entry; +struct mlxsw_sp_nexthop; struct mlxsw_sp_rif *mlxsw_sp_rif_by_index(const struct mlxsw_sp *mlxsw_sp, u16 rif_index); @@ -108,5 +109,16 @@ union mlxsw_sp_l3addr mlxsw_sp_ipip_netdev_daddr(enum mlxsw_sp_l3proto proto, const struct net_device *ol_dev); __be32 mlxsw_sp_ipip_netdev_daddr4(const struct net_device *ol_dev); +struct mlxsw_sp_nexthop *mlxsw_sp_nexthop_next(struct mlxsw_sp_router *router, + struct mlxsw_sp_nexthop *nh); +bool mlxsw_sp_nexthop_offload(struct mlxsw_sp_nexthop *nh); +unsigned char *mlxsw_sp_nexthop_ha(struct mlxsw_sp_nexthop *nh); +int mlxsw_sp_nexthop_indexes(struct mlxsw_sp_nexthop *nh, u32 *p_adj_index, + u32 *p_adj_hash_index); +struct mlxsw_sp_rif *mlxsw_sp_nexthop_rif(struct mlxsw_sp_nexthop *nh); +bool mlxsw_sp_nexthop_group_has_ipip(struct mlxsw_sp_nexthop *nh); +#define mlxsw_sp_nexthop_for_each(nh, router) \ + for (nh = mlxsw_sp_nexthop_next(router, NULL); nh; \ + nh = mlxsw_sp_nexthop_next(router, nh)) #endif /* _MLXSW_ROUTER_H_*/ -- cgit v1.2.3 From c538adb3c6e77e0f6563b71923a81de182b5132c Mon Sep 17 00:00:00 2001 From: Arkadi Sharshevsky Date: Mon, 25 Sep 2017 10:32:26 +0200 Subject: mlxsw: spectrum_dpipe: Add initial support for the router adjacency table Add initial support for router adjacency table. The table does lookup based on the nexthop-group index and the local nexthop offset. After locating the nexthop entry it sets the destination MAC address and the egress RIF. Signed-off-by: Arkadi Sharshevsky Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- .../net/ethernet/mellanox/mlxsw/spectrum_dpipe.c | 100 ++++++++++++++++++++- .../net/ethernet/mellanox/mlxsw/spectrum_dpipe.h | 1 + 2 files changed, 100 insertions(+), 1 deletion(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c index 9253273a5c03..ca16f8924c0a 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c @@ -841,6 +841,97 @@ static void mlxsw_sp_dpipe_host6_table_fini(struct mlxsw_sp *mlxsw_sp) MLXSW_SP_DPIPE_TABLE_NAME_HOST6); } +static int mlxsw_sp_dpipe_table_adj_matches_dump(void *priv, + struct sk_buff *skb) +{ + struct devlink_dpipe_match match = {0}; + int err; + + match.type = DEVLINK_DPIPE_MATCH_TYPE_FIELD_EXACT; + match.header = &mlxsw_sp_dpipe_header_metadata; + match.field_id = MLXSW_SP_DPIPE_FIELD_METADATA_ADJ_INDEX; + + err = devlink_dpipe_match_put(skb, &match); + if (err) + return err; + + match.type = DEVLINK_DPIPE_MATCH_TYPE_FIELD_EXACT; + match.header = &mlxsw_sp_dpipe_header_metadata; + match.field_id = MLXSW_SP_DPIPE_FIELD_METADATA_ADJ_HASH_INDEX; + + return devlink_dpipe_match_put(skb, &match); +} + +static int mlxsw_sp_dpipe_table_adj_actions_dump(void *priv, + struct sk_buff *skb) +{ + struct devlink_dpipe_action action = {0}; + int err; + + action.type = DEVLINK_DPIPE_ACTION_TYPE_FIELD_MODIFY; + action.header = &devlink_dpipe_header_ethernet; + action.field_id = DEVLINK_DPIPE_FIELD_ETHERNET_DST_MAC; + + err = devlink_dpipe_action_put(skb, &action); + if (err) + return err; + + action.type = DEVLINK_DPIPE_ACTION_TYPE_FIELD_MODIFY; + action.header = &mlxsw_sp_dpipe_header_metadata; + action.field_id = MLXSW_SP_DPIPE_FIELD_METADATA_ERIF_PORT; + + return devlink_dpipe_action_put(skb, &action); +} + +static u64 mlxsw_sp_dpipe_table_adj_size(struct mlxsw_sp *mlxsw_sp) +{ + struct mlxsw_sp_nexthop *nh; + u64 size = 0; + + mlxsw_sp_nexthop_for_each(nh, mlxsw_sp->router) + if (mlxsw_sp_nexthop_offload(nh) && + !mlxsw_sp_nexthop_group_has_ipip(nh)) + size++; + return size; +} + +static u64 +mlxsw_sp_dpipe_table_adj_size_get(void *priv) +{ + struct mlxsw_sp *mlxsw_sp = priv; + u64 size; + + rtnl_lock(); + size = mlxsw_sp_dpipe_table_adj_size(mlxsw_sp); + rtnl_unlock(); + + return size; +} + +static struct devlink_dpipe_table_ops mlxsw_sp_dpipe_table_adj_ops = { + .matches_dump = mlxsw_sp_dpipe_table_adj_matches_dump, + .actions_dump = mlxsw_sp_dpipe_table_adj_actions_dump, + .size_get = mlxsw_sp_dpipe_table_adj_size_get, +}; + +static int mlxsw_sp_dpipe_adj_table_init(struct mlxsw_sp *mlxsw_sp) +{ + struct devlink *devlink = priv_to_devlink(mlxsw_sp->core); + + return devlink_dpipe_table_register(devlink, + MLXSW_SP_DPIPE_TABLE_NAME_ADJ, + &mlxsw_sp_dpipe_table_adj_ops, + mlxsw_sp, false); +} + +static void mlxsw_sp_dpipe_adj_table_fini(struct mlxsw_sp *mlxsw_sp) +{ + struct devlink *devlink = priv_to_devlink(mlxsw_sp->core); + + devlink_dpipe_table_unregister(devlink, + MLXSW_SP_DPIPE_TABLE_NAME_ADJ); +} + int mlxsw_sp_dpipe_init(struct mlxsw_sp *mlxsw_sp) { struct devlink *devlink = priv_to_devlink(mlxsw_sp->core); @@ -861,8 +952,14 @@ int mlxsw_sp_dpipe_init(struct mlxsw_sp *mlxsw_sp) err = mlxsw_sp_dpipe_host6_table_init(mlxsw_sp); if (err) goto err_host6_table_init; - return 0; + err = mlxsw_sp_dpipe_adj_table_init(mlxsw_sp); + if (err) + goto err_adj_table_init; + + return 0; +err_adj_table_init: + mlxsw_sp_dpipe_host6_table_fini(mlxsw_sp); err_host6_table_init: mlxsw_sp_dpipe_host4_table_fini(mlxsw_sp); err_host4_table_init: @@ -876,6 +973,7 @@ void mlxsw_sp_dpipe_fini(struct mlxsw_sp *mlxsw_sp) { struct devlink *devlink = priv_to_devlink(mlxsw_sp->core); + mlxsw_sp_dpipe_adj_table_fini(mlxsw_sp); mlxsw_sp_dpipe_host6_table_fini(mlxsw_sp); mlxsw_sp_dpipe_host4_table_fini(mlxsw_sp); mlxsw_sp_dpipe_erif_table_fini(mlxsw_sp); diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.h index 283fde4e6783..815d543cf114 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.h @@ -56,5 +56,6 @@ static inline void mlxsw_sp_dpipe_fini(struct mlxsw_sp *mlxsw_sp) #define MLXSW_SP_DPIPE_TABLE_NAME_ERIF "mlxsw_erif" #define MLXSW_SP_DPIPE_TABLE_NAME_HOST4 "mlxsw_host4" #define MLXSW_SP_DPIPE_TABLE_NAME_HOST6 "mlxsw_host6" +#define MLXSW_SP_DPIPE_TABLE_NAME_ADJ "mlxsw_adj" #endif /* _MLXSW_PIPELINE_H_*/ -- cgit v1.2.3 From f4de25fb530c936af7c3d9a158a7dde86adb2848 Mon Sep 17 00:00:00 2001 From: Arkadi Sharshevsky Date: Mon, 25 Sep 2017 10:32:27 +0200 Subject: mlxsw: reg: Add support for counters on RATR In order to add the ability for setting counters on nexthops the RATR register should be extended. Signed-off-by: Arkadi Sharshevsky Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/reg.h | 44 ++++++++++++++++++++++++------- 1 file changed, 35 insertions(+), 9 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h index 17eba19100de..d44e673a4c4e 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/reg.h +++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h @@ -4549,6 +4549,27 @@ MLXSW_ITEM32(reg, ratr, ipip_ipv4_udip, 0x18, 0, 32); */ MLXSW_ITEM32(reg, ratr, ipip_ipv6_ptr, 0x1C, 0, 24); +enum mlxsw_reg_flow_counter_set_type { + /* No count */ + MLXSW_REG_FLOW_COUNTER_SET_TYPE_NO_COUNT = 0x00, + /* Count packets and bytes */ + MLXSW_REG_FLOW_COUNTER_SET_TYPE_PACKETS_BYTES = 0x03, + /* Count only packets */ + MLXSW_REG_FLOW_COUNTER_SET_TYPE_PACKETS = 0x05, +}; + +/* reg_ratr_counter_set_type + * Counter set type for flow counters + * Access: RW + */ +MLXSW_ITEM32(reg, ratr, counter_set_type, 0x28, 24, 8); + +/* reg_ratr_counter_index + * Counter index for flow counters + * Access: RW + */ +MLXSW_ITEM32(reg, ratr, counter_index, 0x28, 0, 24); + static inline void mlxsw_reg_ratr_pack(char *payload, enum mlxsw_reg_ratr_op op, bool valid, @@ -4576,6 +4597,20 @@ static inline void mlxsw_reg_ratr_ipip4_entry_pack(char *payload, u32 ipv4_udip) mlxsw_reg_ratr_ipip_ipv4_udip_set(payload, ipv4_udip); } +static inline void mlxsw_reg_ratr_counter_pack(char *payload, u64 counter_index, + bool counter_enable) +{ + enum mlxsw_reg_flow_counter_set_type set_type; + + if (counter_enable) + set_type = MLXSW_REG_FLOW_COUNTER_SET_TYPE_PACKETS_BYTES; + else + set_type = MLXSW_REG_FLOW_COUNTER_SET_TYPE_NO_COUNT; + + mlxsw_reg_ratr_counter_index_set(payload, counter_index); + mlxsw_reg_ratr_counter_set_type_set(payload, set_type); +} + /* RICNT - Router Interface Counter Register * ----------------------------------------- * The RICNT register retrieves per port performance counters @@ -5297,15 +5332,6 @@ enum mlxsw_reg_rauht_trap_id { */ MLXSW_ITEM32(reg, rauht, trap_id, 0x60, 0, 9); -enum mlxsw_reg_flow_counter_set_type { - /* No count */ - MLXSW_REG_FLOW_COUNTER_SET_TYPE_NO_COUNT = 0x00, - /* Count packets and bytes */ - MLXSW_REG_FLOW_COUNTER_SET_TYPE_PACKETS_BYTES = 0x03, - /* Count only packets */ - MLXSW_REG_FLOW_COUNTER_SET_TYPE_PACKETS = 0x05, -}; - /* reg_rauht_counter_set_type * Counter set type for flow counters * Access: RW -- cgit v1.2.3 From a5390278a5eb573b76d2d28ce576b6b62c2200be Mon Sep 17 00:00:00 2001 From: Arkadi Sharshevsky Date: Mon, 25 Sep 2017 10:32:28 +0200 Subject: mlxsw: spectrum: Add support for setting counters on nexthops Add support for setting counters on nexthops based on dpipe's adjacency table counter status. This patch also adds the ability for getting the counter value, which will be used by the dpipe adjacency table dump implementation in the next patches. Signed-off-by: Arkadi Sharshevsky Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- .../net/ethernet/mellanox/mlxsw/spectrum_router.c | 52 ++++++++++++++++++++-- .../net/ethernet/mellanox/mlxsw/spectrum_router.h | 2 + 2 files changed, 51 insertions(+), 3 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c index c062b4f666e3..a75064a8ba80 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c @@ -2052,6 +2052,8 @@ struct mlxsw_sp_nexthop { struct mlxsw_sp_neigh_entry *neigh_entry; struct mlxsw_sp_ipip_entry *ipip_entry; }; + unsigned int counter_index; + bool counter_valid; }; struct mlxsw_sp_nexthop_group { @@ -2068,6 +2070,41 @@ struct mlxsw_sp_nexthop_group { #define nh_rif nexthops[0].rif }; +static void mlxsw_sp_nexthop_counter_alloc(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_nexthop *nh) +{ + struct devlink *devlink; + + devlink = priv_to_devlink(mlxsw_sp->core); + if (!devlink_dpipe_table_counter_enabled(devlink, + MLXSW_SP_DPIPE_TABLE_NAME_ADJ)) + return; + + if (mlxsw_sp_flow_counter_alloc(mlxsw_sp, &nh->counter_index)) + return; + + nh->counter_valid = true; +} + +static void mlxsw_sp_nexthop_counter_free(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_nexthop *nh) +{ + if (!nh->counter_valid) + return; + mlxsw_sp_flow_counter_free(mlxsw_sp, nh->counter_index); + nh->counter_valid = false; +} + +int mlxsw_sp_nexthop_counter_get(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_nexthop *nh, u64 *p_counter) +{ + if (!nh->counter_valid) + return -EINVAL; + + return mlxsw_sp_flow_counter_get(mlxsw_sp, nh->counter_index, + p_counter, NULL); +} + struct mlxsw_sp_nexthop *mlxsw_sp_nexthop_next(struct mlxsw_sp_router *router, struct mlxsw_sp_nexthop *nh) { @@ -2396,8 +2433,8 @@ static int mlxsw_sp_adj_index_mass_update(struct mlxsw_sp *mlxsw_sp, return 0; } -static int mlxsw_sp_nexthop_mac_update(struct mlxsw_sp *mlxsw_sp, u32 adj_index, - struct mlxsw_sp_nexthop *nh) +static int mlxsw_sp_nexthop_update(struct mlxsw_sp *mlxsw_sp, u32 adj_index, + struct mlxsw_sp_nexthop *nh) { struct mlxsw_sp_neigh_entry *neigh_entry = nh->neigh_entry; char ratr_pl[MLXSW_REG_RATR_LEN]; @@ -2406,6 +2443,11 @@ static int mlxsw_sp_nexthop_mac_update(struct mlxsw_sp *mlxsw_sp, u32 adj_index, true, MLXSW_REG_RATR_TYPE_ETHERNET, adj_index, neigh_entry->rif); mlxsw_reg_ratr_eth_entry_pack(ratr_pl, neigh_entry->ha); + if (nh->counter_valid) + mlxsw_reg_ratr_counter_pack(ratr_pl, nh->counter_index, true); + else + mlxsw_reg_ratr_counter_pack(ratr_pl, 0, false); + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ratr), ratr_pl); } @@ -2440,7 +2482,7 @@ mlxsw_sp_nexthop_group_update(struct mlxsw_sp *mlxsw_sp, if (nh->update || reallocate) { switch (nh->type) { case MLXSW_SP_NEXTHOP_TYPE_ETH: - err = mlxsw_sp_nexthop_mac_update + err = mlxsw_sp_nexthop_update (mlxsw_sp, adj_index, nh); break; case MLXSW_SP_NEXTHOP_TYPE_IPIP: @@ -2857,6 +2899,7 @@ static int mlxsw_sp_nexthop4_init(struct mlxsw_sp *mlxsw_sp, if (err) return err; + mlxsw_sp_nexthop_counter_alloc(mlxsw_sp, nh); list_add_tail(&nh->router_list_node, &mlxsw_sp->router->nexthop_list); if (!dev) @@ -2883,6 +2926,7 @@ static void mlxsw_sp_nexthop4_fini(struct mlxsw_sp *mlxsw_sp, { mlxsw_sp_nexthop4_type_fini(mlxsw_sp, nh); list_del(&nh->router_list_node); + mlxsw_sp_nexthop_counter_free(mlxsw_sp, nh); mlxsw_sp_nexthop_remove(mlxsw_sp, nh); } @@ -4120,6 +4164,7 @@ static int mlxsw_sp_nexthop6_init(struct mlxsw_sp *mlxsw_sp, nh->nh_grp = nh_grp; memcpy(&nh->gw_addr, &rt->rt6i_gateway, sizeof(nh->gw_addr)); + mlxsw_sp_nexthop_counter_alloc(mlxsw_sp, nh); list_add_tail(&nh->router_list_node, &mlxsw_sp->router->nexthop_list); @@ -4135,6 +4180,7 @@ static void mlxsw_sp_nexthop6_fini(struct mlxsw_sp *mlxsw_sp, { mlxsw_sp_nexthop6_type_fini(mlxsw_sp, nh); list_del(&nh->router_list_node); + mlxsw_sp_nexthop_counter_free(mlxsw_sp, nh); } static bool mlxsw_sp_rt6_is_gateway(const struct mlxsw_sp *mlxsw_sp, diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h index d6951d516cf4..a6e86590939f 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h @@ -120,5 +120,7 @@ bool mlxsw_sp_nexthop_group_has_ipip(struct mlxsw_sp_nexthop *nh); #define mlxsw_sp_nexthop_for_each(nh, router) \ for (nh = mlxsw_sp_nexthop_next(router, NULL); nh; \ nh = mlxsw_sp_nexthop_next(router, nh)) +int mlxsw_sp_nexthop_counter_get(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_nexthop *nh, u64 *p_counter); #endif /* _MLXSW_ROUTER_H_*/ -- cgit v1.2.3 From 190d38a52a73ef8ac05c1931dda730e0f9b79095 Mon Sep 17 00:00:00 2001 From: Arkadi Sharshevsky Date: Mon, 25 Sep 2017 10:32:29 +0200 Subject: mlxsw: spectrum_dpipe: Add support for adjacency table dump Add support for adjacency table dump. Signed-off-by: Arkadi Sharshevsky Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- .../net/ethernet/mellanox/mlxsw/spectrum_dpipe.c | 238 +++++++++++++++++++++ 1 file changed, 238 insertions(+) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c index ca16f8924c0a..e6755a96b269 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c @@ -895,6 +895,243 @@ static u64 mlxsw_sp_dpipe_table_adj_size(struct mlxsw_sp *mlxsw_sp) return size; } +enum mlxsw_sp_dpipe_table_adj_match { + MLXSW_SP_DPIPE_TABLE_ADJ_MATCH_INDEX, + MLXSW_SP_DPIPE_TABLE_ADJ_MATCH_HASH_INDEX, + MLXSW_SP_DPIPE_TABLE_ADJ_MATCH_COUNT, +}; + +enum mlxsw_sp_dpipe_table_adj_action { + MLXSW_SP_DPIPE_TABLE_ADJ_ACTION_DST_MAC, + MLXSW_SP_DPIPE_TABLE_ADJ_ACTION_ERIF_PORT, + MLXSW_SP_DPIPE_TABLE_ADJ_ACTION_COUNT, +}; + +static void +mlxsw_sp_dpipe_table_adj_match_action_prepare(struct devlink_dpipe_match *matches, + struct devlink_dpipe_action *actions) +{ + struct devlink_dpipe_action *action; + struct devlink_dpipe_match *match; + + match = &matches[MLXSW_SP_DPIPE_TABLE_ADJ_MATCH_INDEX]; + match->type = DEVLINK_DPIPE_MATCH_TYPE_FIELD_EXACT; + match->header = &mlxsw_sp_dpipe_header_metadata; + match->field_id = MLXSW_SP_DPIPE_FIELD_METADATA_ADJ_INDEX; + + match = &matches[MLXSW_SP_DPIPE_TABLE_ADJ_MATCH_HASH_INDEX]; + match->type = DEVLINK_DPIPE_MATCH_TYPE_FIELD_EXACT; + match->header = &mlxsw_sp_dpipe_header_metadata; + match->field_id = MLXSW_SP_DPIPE_FIELD_METADATA_ADJ_HASH_INDEX; + + action = &actions[MLXSW_SP_DPIPE_TABLE_ADJ_ACTION_DST_MAC]; + action->type = DEVLINK_DPIPE_ACTION_TYPE_FIELD_MODIFY; + action->header = &devlink_dpipe_header_ethernet; + action->field_id = DEVLINK_DPIPE_FIELD_ETHERNET_DST_MAC; + + action = &actions[MLXSW_SP_DPIPE_TABLE_ADJ_ACTION_ERIF_PORT]; + action->type = DEVLINK_DPIPE_ACTION_TYPE_FIELD_MODIFY; + action->header = &mlxsw_sp_dpipe_header_metadata; + action->field_id = MLXSW_SP_DPIPE_FIELD_METADATA_ERIF_PORT; +} + +static int +mlxsw_sp_dpipe_table_adj_entry_prepare(struct devlink_dpipe_entry *entry, + struct devlink_dpipe_value *match_values, + struct devlink_dpipe_match *matches, + struct devlink_dpipe_value *action_values, + struct devlink_dpipe_action *actions) +{ struct devlink_dpipe_value *action_value; + struct devlink_dpipe_value *match_value; + struct devlink_dpipe_action *action; + struct devlink_dpipe_match *match; + + entry->match_values = match_values; + entry->match_values_count = MLXSW_SP_DPIPE_TABLE_ADJ_MATCH_COUNT; + + entry->action_values = action_values; + entry->action_values_count = MLXSW_SP_DPIPE_TABLE_ADJ_ACTION_COUNT; + + match = &matches[MLXSW_SP_DPIPE_TABLE_ADJ_MATCH_INDEX]; + match_value = &match_values[MLXSW_SP_DPIPE_TABLE_ADJ_MATCH_INDEX]; + + match_value->match = match; + match_value->value_size = sizeof(u32); + match_value->value = kmalloc(match_value->value_size, GFP_KERNEL); + if (!match_value->value) + return -ENOMEM; + + match = &matches[MLXSW_SP_DPIPE_TABLE_ADJ_MATCH_HASH_INDEX]; + match_value = &match_values[MLXSW_SP_DPIPE_TABLE_ADJ_MATCH_HASH_INDEX]; + + match_value->match = match; + match_value->value_size = sizeof(u32); + match_value->value = kmalloc(match_value->value_size, GFP_KERNEL); + if (!match_value->value) + return -ENOMEM; + + action = &actions[MLXSW_SP_DPIPE_TABLE_ADJ_ACTION_DST_MAC]; + action_value = &action_values[MLXSW_SP_DPIPE_TABLE_ADJ_ACTION_DST_MAC]; + + action_value->action = action; + action_value->value_size = sizeof(u64); + action_value->value = kmalloc(action_value->value_size, GFP_KERNEL); + if (!action_value->value) + return -ENOMEM; + + action = &actions[MLXSW_SP_DPIPE_TABLE_ADJ_ACTION_ERIF_PORT]; + action_value = &action_values[MLXSW_SP_DPIPE_TABLE_ADJ_ACTION_ERIF_PORT]; + + action_value->action = action; + action_value->value_size = sizeof(u32); + action_value->value = kmalloc(action_value->value_size, GFP_KERNEL); + if (!action_value->value) + return -ENOMEM; + + return 0; +} + +static void +__mlxsw_sp_dpipe_table_adj_entry_fill(struct devlink_dpipe_entry *entry, + u32 adj_index, u32 adj_hash_index, + unsigned char *ha, + struct mlxsw_sp_rif *rif) +{ + struct devlink_dpipe_value *value; + u32 *p_rif_value; + u32 *p_index; + + value = &entry->match_values[MLXSW_SP_DPIPE_TABLE_ADJ_MATCH_INDEX]; + p_index = value->value; + *p_index = adj_index; + + value = &entry->match_values[MLXSW_SP_DPIPE_TABLE_ADJ_MATCH_HASH_INDEX]; + p_index = value->value; + *p_index = adj_hash_index; + + value = &entry->action_values[MLXSW_SP_DPIPE_TABLE_ADJ_ACTION_DST_MAC]; + ether_addr_copy(value->value, ha); + + value = &entry->action_values[MLXSW_SP_DPIPE_TABLE_ADJ_ACTION_ERIF_PORT]; + p_rif_value = value->value; + *p_rif_value = mlxsw_sp_rif_index(rif); + value->mapping_value = mlxsw_sp_rif_dev_ifindex(rif); + value->mapping_valid = true; +} + +static void mlxsw_sp_dpipe_table_adj_entry_fill(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_nexthop *nh, + struct devlink_dpipe_entry *entry) +{ + struct mlxsw_sp_rif *rif = mlxsw_sp_nexthop_rif(nh); + unsigned char *ha = mlxsw_sp_nexthop_ha(nh); + u32 adj_hash_index = 0; + u32 adj_index = 0; + int err; + + mlxsw_sp_nexthop_indexes(nh, &adj_index, &adj_hash_index); + __mlxsw_sp_dpipe_table_adj_entry_fill(entry, adj_index, + adj_hash_index, ha, rif); + err = mlxsw_sp_nexthop_counter_get(mlxsw_sp, nh, &entry->counter); + if (!err) + entry->counter_valid = true; +} + +static int +mlxsw_sp_dpipe_table_adj_entries_get(struct mlxsw_sp *mlxsw_sp, + struct devlink_dpipe_entry *entry, + bool counters_enabled, + struct devlink_dpipe_dump_ctx *dump_ctx) +{ + struct mlxsw_sp_nexthop *nh; + int entry_index = 0; + int nh_count_max; + int nh_count = 0; + int nh_skip; + int j; + int err; + + rtnl_lock(); + nh_count_max = mlxsw_sp_dpipe_table_adj_size(mlxsw_sp); +start_again: + err = devlink_dpipe_entry_ctx_prepare(dump_ctx); + if (err) + goto err_ctx_prepare; + j = 0; + nh_skip = nh_count; + mlxsw_sp_nexthop_for_each(nh, mlxsw_sp->router) { + if (!mlxsw_sp_nexthop_offload(nh) || + mlxsw_sp_nexthop_group_has_ipip(nh)) + continue; + + if (nh_count < nh_skip) + goto skip; + + mlxsw_sp_dpipe_table_adj_entry_fill(mlxsw_sp, nh, entry); + entry->index = entry_index; + err = devlink_dpipe_entry_ctx_append(dump_ctx, entry); + if (err) { + if (err == -EMSGSIZE) { + if (!j) + goto err_entry_append; + break; + } + goto err_entry_append; + } + entry_index++; + j++; +skip: + nh_count++; + } + + devlink_dpipe_entry_ctx_close(dump_ctx); + if (nh_count != nh_count_max) + goto start_again; + rtnl_unlock(); + + return 0; + +err_ctx_prepare: +err_entry_append: + rtnl_unlock(); + return err; +} + +static int +mlxsw_sp_dpipe_table_adj_entries_dump(void *priv, bool counters_enabled, + struct devlink_dpipe_dump_ctx *dump_ctx) +{ + struct devlink_dpipe_value action_values[MLXSW_SP_DPIPE_TABLE_ADJ_ACTION_COUNT]; + struct devlink_dpipe_value match_values[MLXSW_SP_DPIPE_TABLE_ADJ_MATCH_COUNT]; + struct devlink_dpipe_action actions[MLXSW_SP_DPIPE_TABLE_ADJ_ACTION_COUNT]; + struct devlink_dpipe_match matches[MLXSW_SP_DPIPE_TABLE_ADJ_MATCH_COUNT]; + struct devlink_dpipe_entry entry = {0}; + struct mlxsw_sp *mlxsw_sp = priv; + int err; + + memset(matches, 0, MLXSW_SP_DPIPE_TABLE_ADJ_MATCH_COUNT * + sizeof(matches[0])); + memset(match_values, 0, MLXSW_SP_DPIPE_TABLE_ADJ_MATCH_COUNT * + sizeof(match_values[0])); + memset(actions, 0, MLXSW_SP_DPIPE_TABLE_ADJ_ACTION_COUNT * + sizeof(actions[0])); + memset(action_values, 0, MLXSW_SP_DPIPE_TABLE_ADJ_ACTION_COUNT * + sizeof(action_values[0])); + + mlxsw_sp_dpipe_table_adj_match_action_prepare(matches, actions); + err = mlxsw_sp_dpipe_table_adj_entry_prepare(&entry, + match_values, matches, + action_values, actions); + if (err) + goto out; + + err = mlxsw_sp_dpipe_table_adj_entries_get(mlxsw_sp, &entry, + counters_enabled, dump_ctx); +out: + devlink_dpipe_entry_clear(&entry); + return err; +} + static u64 mlxsw_sp_dpipe_table_adj_size_get(void *priv) { @@ -911,6 +1148,7 @@ mlxsw_sp_dpipe_table_adj_size_get(void *priv) static struct devlink_dpipe_table_ops mlxsw_sp_dpipe_table_adj_ops = { .matches_dump = mlxsw_sp_dpipe_table_adj_matches_dump, .actions_dump = mlxsw_sp_dpipe_table_adj_actions_dump, + .entries_dump = mlxsw_sp_dpipe_table_adj_entries_dump, .size_get = mlxsw_sp_dpipe_table_adj_size_get, }; -- cgit v1.2.3 From 427e652aa34d90960f729c0b902c3c4a8a821b2e Mon Sep 17 00:00:00 2001 From: Arkadi Sharshevsky Date: Mon, 25 Sep 2017 10:32:30 +0200 Subject: mlxsw: spectrum_dpipe: Add support for controlling nexthop counters Add support for controlling nexthop counters via dpipe. Signed-off-by: Arkadi Sharshevsky Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- .../net/ethernet/mellanox/mlxsw/spectrum_dpipe.c | 24 ++++++++++++++++++++++ .../net/ethernet/mellanox/mlxsw/spectrum_router.c | 12 +++++------ .../net/ethernet/mellanox/mlxsw/spectrum_router.h | 6 ++++++ 3 files changed, 36 insertions(+), 6 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c index e6755a96b269..a056f23d3a0e 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c @@ -1132,6 +1132,29 @@ out: return err; } +static int mlxsw_sp_dpipe_table_adj_counters_update(void *priv, bool enable) +{ + struct mlxsw_sp *mlxsw_sp = priv; + struct mlxsw_sp_nexthop *nh; + u32 adj_hash_index = 0; + u32 adj_index = 0; + + mlxsw_sp_nexthop_for_each(nh, mlxsw_sp->router) { + if (!mlxsw_sp_nexthop_offload(nh) || + mlxsw_sp_nexthop_group_has_ipip(nh)) + continue; + + mlxsw_sp_nexthop_indexes(nh, &adj_index, &adj_hash_index); + if (enable) + mlxsw_sp_nexthop_counter_alloc(mlxsw_sp, nh); + else + mlxsw_sp_nexthop_counter_free(mlxsw_sp, nh); + mlxsw_sp_nexthop_update(mlxsw_sp, + adj_index + adj_hash_index, nh); + } + return 0; +} + static u64 mlxsw_sp_dpipe_table_adj_size_get(void *priv) { @@ -1149,6 +1172,7 @@ static struct devlink_dpipe_table_ops mlxsw_sp_dpipe_table_adj_ops = { .matches_dump = mlxsw_sp_dpipe_table_adj_matches_dump, .actions_dump = mlxsw_sp_dpipe_table_adj_actions_dump, .entries_dump = mlxsw_sp_dpipe_table_adj_entries_dump, + .counters_set_update = mlxsw_sp_dpipe_table_adj_counters_update, .size_get = mlxsw_sp_dpipe_table_adj_size_get, }; diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c index a75064a8ba80..321f7356073c 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c @@ -2070,8 +2070,8 @@ struct mlxsw_sp_nexthop_group { #define nh_rif nexthops[0].rif }; -static void mlxsw_sp_nexthop_counter_alloc(struct mlxsw_sp *mlxsw_sp, - struct mlxsw_sp_nexthop *nh) +void mlxsw_sp_nexthop_counter_alloc(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_nexthop *nh) { struct devlink *devlink; @@ -2086,8 +2086,8 @@ static void mlxsw_sp_nexthop_counter_alloc(struct mlxsw_sp *mlxsw_sp, nh->counter_valid = true; } -static void mlxsw_sp_nexthop_counter_free(struct mlxsw_sp *mlxsw_sp, - struct mlxsw_sp_nexthop *nh) +void mlxsw_sp_nexthop_counter_free(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_nexthop *nh) { if (!nh->counter_valid) return; @@ -2433,8 +2433,8 @@ static int mlxsw_sp_adj_index_mass_update(struct mlxsw_sp *mlxsw_sp, return 0; } -static int mlxsw_sp_nexthop_update(struct mlxsw_sp *mlxsw_sp, u32 adj_index, - struct mlxsw_sp_nexthop *nh) +int mlxsw_sp_nexthop_update(struct mlxsw_sp *mlxsw_sp, u32 adj_index, + struct mlxsw_sp_nexthop *nh) { struct mlxsw_sp_neigh_entry *neigh_entry = nh->neigh_entry; char ratr_pl[MLXSW_REG_RATR_LEN]; diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h index a6e86590939f..3d449180b035 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h @@ -122,5 +122,11 @@ bool mlxsw_sp_nexthop_group_has_ipip(struct mlxsw_sp_nexthop *nh); nh = mlxsw_sp_nexthop_next(router, nh)) int mlxsw_sp_nexthop_counter_get(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_nexthop *nh, u64 *p_counter); +int mlxsw_sp_nexthop_update(struct mlxsw_sp *mlxsw_sp, u32 adj_index, + struct mlxsw_sp_nexthop *nh); +void mlxsw_sp_nexthop_counter_alloc(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_nexthop *nh); +void mlxsw_sp_nexthop_counter_free(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_nexthop *nh); #endif /* _MLXSW_ROUTER_H_*/ -- cgit v1.2.3 From 2a52a8c6e594cdc562f503492ba89ac7bc0c4074 Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Mon, 25 Sep 2017 10:58:20 +0200 Subject: mlxsw: spectrum_acl: Propagate errors from mlxsw_afa_block_jump/continue Propagate error instead of doing WARN_ON right away. Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- .../net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c | 14 ++++++++------ .../net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h | 4 ++-- drivers/net/ethernet/mellanox/mlxsw/spectrum.h | 6 +++--- drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c | 10 +++++----- drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c | 6 +++++- drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c | 4 +++- 6 files changed, 26 insertions(+), 18 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c index ab3ffe7a8eda..bc55d0e76705 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c +++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c @@ -399,23 +399,25 @@ u32 mlxsw_afa_block_first_set_kvdl_index(struct mlxsw_afa_block *block) } EXPORT_SYMBOL(mlxsw_afa_block_first_set_kvdl_index); -void mlxsw_afa_block_continue(struct mlxsw_afa_block *block) +int mlxsw_afa_block_continue(struct mlxsw_afa_block *block) { - if (WARN_ON(block->finished)) - return; + if (block->finished) + return -EINVAL; mlxsw_afa_set_goto_set(block->cur_set, MLXSW_AFA_SET_GOTO_BINDING_CMD_NONE, 0); block->finished = true; + return 0; } EXPORT_SYMBOL(mlxsw_afa_block_continue); -void mlxsw_afa_block_jump(struct mlxsw_afa_block *block, u16 group_id) +int mlxsw_afa_block_jump(struct mlxsw_afa_block *block, u16 group_id) { - if (WARN_ON(block->finished)) - return; + if (block->finished) + return -EINVAL; mlxsw_afa_set_goto_set(block->cur_set, MLXSW_AFA_SET_GOTO_BINDING_CMD_JUMP, group_id); block->finished = true; + return 0; } EXPORT_SYMBOL(mlxsw_afa_block_jump); diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h index 501819c790d6..06b0be432b8f 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h +++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h @@ -57,8 +57,8 @@ void mlxsw_afa_block_destroy(struct mlxsw_afa_block *block); int mlxsw_afa_block_commit(struct mlxsw_afa_block *block); char *mlxsw_afa_block_first_set(struct mlxsw_afa_block *block); u32 mlxsw_afa_block_first_set_kvdl_index(struct mlxsw_afa_block *block); -void mlxsw_afa_block_continue(struct mlxsw_afa_block *block); -void mlxsw_afa_block_jump(struct mlxsw_afa_block *block, u16 group_id); +int mlxsw_afa_block_continue(struct mlxsw_afa_block *block); +int mlxsw_afa_block_jump(struct mlxsw_afa_block *block, u16 group_id); int mlxsw_afa_block_append_drop(struct mlxsw_afa_block *block); int mlxsw_afa_block_append_trap(struct mlxsw_afa_block *block, u16 trap_id); int mlxsw_afa_block_append_fwd(struct mlxsw_afa_block *block, diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h index e907ec446a73..9355d914a4c8 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h @@ -468,9 +468,9 @@ void mlxsw_sp_acl_rulei_keymask_buf(struct mlxsw_sp_acl_rule_info *rulei, enum mlxsw_afk_element element, const char *key_value, const char *mask_value, unsigned int len); -void mlxsw_sp_acl_rulei_act_continue(struct mlxsw_sp_acl_rule_info *rulei); -void mlxsw_sp_acl_rulei_act_jump(struct mlxsw_sp_acl_rule_info *rulei, - u16 group_id); +int mlxsw_sp_acl_rulei_act_continue(struct mlxsw_sp_acl_rule_info *rulei); +int mlxsw_sp_acl_rulei_act_jump(struct mlxsw_sp_acl_rule_info *rulei, + u16 group_id); int mlxsw_sp_acl_rulei_act_drop(struct mlxsw_sp_acl_rule_info *rulei); int mlxsw_sp_acl_rulei_act_trap(struct mlxsw_sp_acl_rule_info *rulei); int mlxsw_sp_acl_rulei_act_fwd(struct mlxsw_sp *mlxsw_sp, diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c index eede75fbd585..93dcd315f7d6 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c @@ -378,15 +378,15 @@ void mlxsw_sp_acl_rulei_keymask_buf(struct mlxsw_sp_acl_rule_info *rulei, key_value, mask_value, len); } -void mlxsw_sp_acl_rulei_act_continue(struct mlxsw_sp_acl_rule_info *rulei) +int mlxsw_sp_acl_rulei_act_continue(struct mlxsw_sp_acl_rule_info *rulei) { - mlxsw_afa_block_continue(rulei->act_block); + return mlxsw_afa_block_continue(rulei->act_block); } -void mlxsw_sp_acl_rulei_act_jump(struct mlxsw_sp_acl_rule_info *rulei, - u16 group_id) +int mlxsw_sp_acl_rulei_act_jump(struct mlxsw_sp_acl_rule_info *rulei, + u16 group_id) { - mlxsw_afa_block_jump(rulei->act_block, group_id); + return mlxsw_afa_block_jump(rulei->act_block, group_id); } int mlxsw_sp_acl_rulei_act_drop(struct mlxsw_sp_acl_rule_info *rulei) diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c index 50b40de1fb91..7e8284b46968 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c @@ -608,7 +608,10 @@ mlxsw_sp_acl_tcam_region_catchall_add(struct mlxsw_sp *mlxsw_sp, goto err_rulei_create; } - mlxsw_sp_acl_rulei_act_continue(rulei); + err = mlxsw_sp_acl_rulei_act_continue(rulei); + if (WARN_ON(err)) + goto err_rulei_act_continue; + err = mlxsw_sp_acl_rulei_commit(rulei); if (err) goto err_rulei_commit; @@ -623,6 +626,7 @@ mlxsw_sp_acl_tcam_region_catchall_add(struct mlxsw_sp *mlxsw_sp, err_rule_insert: err_rulei_commit: +err_rulei_act_continue: mlxsw_sp_acl_rulei_destroy(rulei); err_rulei_create: parman_item_remove(region->parman, parman_prio, parman_item); diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c index 8aace9a06a5d..f1cedccb58cc 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c @@ -84,7 +84,9 @@ static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp, return PTR_ERR(ruleset); group_id = mlxsw_sp_acl_ruleset_group_id(ruleset); - mlxsw_sp_acl_rulei_act_jump(rulei, group_id); + err = mlxsw_sp_acl_rulei_act_jump(rulei, group_id); + if (err) + return err; } else if (is_tcf_mirred_egress_redirect(a)) { int ifindex = tcf_mirred_ifindex(a); struct net_device *out_dev; -- cgit v1.2.3 From b2925957ec1a9349c6ac42fc5ac95bcf0dd4a6a0 Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Mon, 25 Sep 2017 10:58:22 +0200 Subject: mlxsw: spectrum_flower: Offload "ok" termination action If action is "gact_ok", offload it to HW. Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c index f1cedccb58cc..2f0e57857ea4 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c @@ -63,7 +63,11 @@ static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp, tcf_exts_to_list(exts, &actions); list_for_each_entry(a, &actions, list) { - if (is_tcf_gact_shot(a)) { + if (is_tcf_gact_ok(a)) { + err = mlxsw_sp_acl_rulei_act_continue(rulei); + if (err) + return err; + } else if (is_tcf_gact_shot(a)) { err = mlxsw_sp_acl_rulei_act_drop(rulei); if (err) return err; -- cgit v1.2.3 From 79ede4ae2d01b0282bfaaf761308a5ac485c8144 Mon Sep 17 00:00:00 2001 From: John Hurley Date: Mon, 25 Sep 2017 12:23:35 +0200 Subject: nfp: add helper to get flower cmsg length Add a helper function that returns the length of the cmsg data when given the cmsg skb Signed-off-by: John Hurley Signed-off-by: Simon Horman Signed-off-by: David S. Miller --- drivers/net/ethernet/netronome/nfp/flower/cmsg.h | 5 +++++ drivers/net/ethernet/netronome/nfp/flower/metadata.c | 2 +- 2 files changed, 6 insertions(+), 1 deletion(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/netronome/nfp/flower/cmsg.h b/drivers/net/ethernet/netronome/nfp/flower/cmsg.h index a2ec60344236..7a5ccf0cc7c2 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/cmsg.h +++ b/drivers/net/ethernet/netronome/nfp/flower/cmsg.h @@ -323,6 +323,11 @@ static inline void *nfp_flower_cmsg_get_data(struct sk_buff *skb) return (unsigned char *)skb->data + NFP_FLOWER_CMSG_HLEN; } +static inline int nfp_flower_cmsg_get_data_len(struct sk_buff *skb) +{ + return skb->len - NFP_FLOWER_CMSG_HLEN; +} + struct sk_buff * nfp_flower_cmsg_mac_repr_start(struct nfp_app *app, unsigned int num_ports); void diff --git a/drivers/net/ethernet/netronome/nfp/flower/metadata.c b/drivers/net/ethernet/netronome/nfp/flower/metadata.c index 3226ddc55f99..193520ef23f0 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/metadata.c +++ b/drivers/net/ethernet/netronome/nfp/flower/metadata.c @@ -140,7 +140,7 @@ exit_rcu_unlock: void nfp_flower_rx_flow_stats(struct nfp_app *app, struct sk_buff *skb) { - unsigned int msg_len = skb->len - NFP_FLOWER_CMSG_HLEN; + unsigned int msg_len = nfp_flower_cmsg_get_data_len(skb); struct nfp_fl_stats_frame *stats_frame; unsigned char *msg; int i; -- cgit v1.2.3 From 611aec101ab7c19755e8ea6d480f679aaffed5ad Mon Sep 17 00:00:00 2001 From: John Hurley Date: Mon, 25 Sep 2017 12:23:36 +0200 Subject: nfp: compile flower vxlan tunnel metadata match fields Compile ovs-tc flower vxlan metadata match fields for offloading. Only support offload of tunnel data when the VXLAN port specifically matches well known port 4789. Signed-off-by: John Hurley Signed-off-by: Simon Horman Signed-off-by: David S. Miller --- drivers/net/ethernet/netronome/nfp/flower/cmsg.h | 38 ++++++++++++ drivers/net/ethernet/netronome/nfp/flower/main.h | 2 + drivers/net/ethernet/netronome/nfp/flower/match.c | 60 +++++++++++++++++-- .../net/ethernet/netronome/nfp/flower/offload.c | 70 +++++++++++++++++++--- 4 files changed, 158 insertions(+), 12 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/netronome/nfp/flower/cmsg.h b/drivers/net/ethernet/netronome/nfp/flower/cmsg.h index 7a5ccf0cc7c2..af9165b3b652 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/cmsg.h +++ b/drivers/net/ethernet/netronome/nfp/flower/cmsg.h @@ -83,6 +83,14 @@ #define NFP_FL_PUSH_VLAN_CFI BIT(12) #define NFP_FL_PUSH_VLAN_VID GENMASK(11, 0) +/* Tunnel ports */ +#define NFP_FL_PORT_TYPE_TUN 0x50000000 + +enum nfp_flower_tun_type { + NFP_FL_TUNNEL_NONE = 0, + NFP_FL_TUNNEL_VXLAN = 2, +}; + struct nfp_fl_output { __be16 a_op; __be16 flags; @@ -230,6 +238,36 @@ struct nfp_flower_ipv6 { struct in6_addr ipv6_dst; }; +/* Flow Frame VXLAN --> Tunnel details (4W/16B) + * ----------------------------------------------------------------- + * 3 2 1 + * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * | ipv4_addr_src | + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * | ipv4_addr_dst | + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * | tun_flags | tos | ttl | + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * | gpe_flags | Reserved | Next Protocol | + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * | VNI | Reserved | + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + */ +struct nfp_flower_vxlan { + __be32 ip_src; + __be32 ip_dst; + __be16 tun_flags; + u8 tos; + u8 ttl; + u8 gpe_flags; + u8 reserved[2]; + u8 nxt_proto; + __be32 tun_id; +}; + +#define NFP_FL_TUN_VNI_OFFSET 8 + /* The base header for a control message packet. * Defines an 8-bit version, and an 8-bit type, padded * to a 32-bit word. Rest of the packet is type-specific. diff --git a/drivers/net/ethernet/netronome/nfp/flower/main.h b/drivers/net/ethernet/netronome/nfp/flower/main.h index c20dd00a1cae..cd695eabce02 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/main.h +++ b/drivers/net/ethernet/netronome/nfp/flower/main.h @@ -58,6 +58,8 @@ struct nfp_app; #define NFP_FL_MASK_REUSE_TIME_NS 40000 #define NFP_FL_MASK_ID_LOCATION 1 +#define NFP_FL_VXLAN_PORT 4789 + struct nfp_fl_mask_id { struct circ_buf mask_id_free_list; struct timespec64 *last_used; diff --git a/drivers/net/ethernet/netronome/nfp/flower/match.c b/drivers/net/ethernet/netronome/nfp/flower/match.c index d25b5038c3a2..1fd1bab0611f 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/match.c +++ b/drivers/net/ethernet/netronome/nfp/flower/match.c @@ -77,14 +77,17 @@ nfp_flower_compile_meta(struct nfp_flower_meta_one *frame, u8 key_type) static int nfp_flower_compile_port(struct nfp_flower_in_port *frame, u32 cmsg_port, - bool mask_version) + bool mask_version, enum nfp_flower_tun_type tun_type) { if (mask_version) { frame->in_port = cpu_to_be32(~0); return 0; } - frame->in_port = cpu_to_be32(cmsg_port); + if (tun_type) + frame->in_port = cpu_to_be32(NFP_FL_PORT_TYPE_TUN | tun_type); + else + frame->in_port = cpu_to_be32(cmsg_port); return 0; } @@ -189,15 +192,53 @@ nfp_flower_compile_ipv6(struct nfp_flower_ipv6 *frame, } } +static void +nfp_flower_compile_vxlan(struct nfp_flower_vxlan *frame, + struct tc_cls_flower_offload *flow, + bool mask_version) +{ + struct fl_flow_key *target = mask_version ? flow->mask : flow->key; + struct flow_dissector_key_ipv4_addrs *vxlan_ips; + struct flow_dissector_key_keyid *vni; + + /* Wildcard TOS/TTL/GPE_FLAGS/NXT_PROTO for now. */ + memset(frame, 0, sizeof(struct nfp_flower_vxlan)); + + if (dissector_uses_key(flow->dissector, + FLOW_DISSECTOR_KEY_ENC_KEYID)) { + u32 temp_vni; + + vni = skb_flow_dissector_target(flow->dissector, + FLOW_DISSECTOR_KEY_ENC_KEYID, + target); + temp_vni = be32_to_cpu(vni->keyid) << NFP_FL_TUN_VNI_OFFSET; + frame->tun_id = cpu_to_be32(temp_vni); + } + + if (dissector_uses_key(flow->dissector, + FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS)) { + vxlan_ips = + skb_flow_dissector_target(flow->dissector, + FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS, + target); + frame->ip_src = vxlan_ips->src; + frame->ip_dst = vxlan_ips->dst; + } +} + int nfp_flower_compile_flow_match(struct tc_cls_flower_offload *flow, struct nfp_fl_key_ls *key_ls, struct net_device *netdev, struct nfp_fl_payload *nfp_flow) { + enum nfp_flower_tun_type tun_type = NFP_FL_TUNNEL_NONE; int err; u8 *ext; u8 *msk; + if (key_ls->key_layer & NFP_FLOWER_LAYER_VXLAN) + tun_type = NFP_FL_TUNNEL_VXLAN; + memset(nfp_flow->unmasked_data, 0, key_ls->key_size); memset(nfp_flow->mask_data, 0, key_ls->key_size); @@ -216,14 +257,14 @@ int nfp_flower_compile_flow_match(struct tc_cls_flower_offload *flow, /* Populate Exact Port data. */ err = nfp_flower_compile_port((struct nfp_flower_in_port *)ext, nfp_repr_get_port_id(netdev), - false); + false, tun_type); if (err) return err; /* Populate Mask Port Data. */ err = nfp_flower_compile_port((struct nfp_flower_in_port *)msk, nfp_repr_get_port_id(netdev), - true); + true, tun_type); if (err) return err; @@ -291,5 +332,16 @@ int nfp_flower_compile_flow_match(struct tc_cls_flower_offload *flow, msk += sizeof(struct nfp_flower_ipv6); } + if (key_ls->key_layer & NFP_FLOWER_LAYER_VXLAN) { + /* Populate Exact VXLAN Data. */ + nfp_flower_compile_vxlan((struct nfp_flower_vxlan *)ext, + flow, false); + /* Populate Mask VXLAN Data. */ + nfp_flower_compile_vxlan((struct nfp_flower_vxlan *)msk, + flow, true); + ext += sizeof(struct nfp_flower_vxlan); + msk += sizeof(struct nfp_flower_vxlan); + } + return 0; } diff --git a/drivers/net/ethernet/netronome/nfp/flower/offload.c b/drivers/net/ethernet/netronome/nfp/flower/offload.c index a18b4d2b1d3e..637372ba8f55 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/offload.c +++ b/drivers/net/ethernet/netronome/nfp/flower/offload.c @@ -52,8 +52,25 @@ BIT(FLOW_DISSECTOR_KEY_PORTS) | \ BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) | \ BIT(FLOW_DISSECTOR_KEY_VLAN) | \ + BIT(FLOW_DISSECTOR_KEY_ENC_KEYID) | \ + BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) | \ + BIT(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) | \ + BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL) | \ + BIT(FLOW_DISSECTOR_KEY_ENC_PORTS) | \ BIT(FLOW_DISSECTOR_KEY_IP)) +#define NFP_FLOWER_WHITELIST_TUN_DISSECTOR \ + (BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL) | \ + BIT(FLOW_DISSECTOR_KEY_ENC_KEYID) | \ + BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) | \ + BIT(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) | \ + BIT(FLOW_DISSECTOR_KEY_ENC_PORTS)) + +#define NFP_FLOWER_WHITELIST_TUN_DISSECTOR_R \ + (BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL) | \ + BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) | \ + BIT(FLOW_DISSECTOR_KEY_ENC_PORTS)) + static int nfp_flower_xmit_flow(struct net_device *netdev, struct nfp_fl_payload *nfp_flow, u8 mtype) @@ -125,15 +142,58 @@ nfp_flower_calculate_key_layers(struct nfp_fl_key_ls *ret_key_ls, if (flow->dissector->used_keys & ~NFP_FLOWER_WHITELIST_DISSECTOR) return -EOPNOTSUPP; + /* If any tun dissector is used then the required set must be used. */ + if (flow->dissector->used_keys & NFP_FLOWER_WHITELIST_TUN_DISSECTOR && + (flow->dissector->used_keys & NFP_FLOWER_WHITELIST_TUN_DISSECTOR_R) + != NFP_FLOWER_WHITELIST_TUN_DISSECTOR_R) + return -EOPNOTSUPP; + + key_layer_two = 0; + key_layer = NFP_FLOWER_LAYER_PORT | NFP_FLOWER_LAYER_MAC; + key_size = sizeof(struct nfp_flower_meta_one) + + sizeof(struct nfp_flower_in_port) + + sizeof(struct nfp_flower_mac_mpls); + if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_ENC_CONTROL)) { + struct flow_dissector_key_ipv4_addrs *mask_ipv4 = NULL; + struct flow_dissector_key_ports *mask_enc_ports = NULL; + struct flow_dissector_key_ports *enc_ports = NULL; struct flow_dissector_key_control *mask_enc_ctl = skb_flow_dissector_target(flow->dissector, FLOW_DISSECTOR_KEY_ENC_CONTROL, flow->mask); - /* We are expecting a tunnel. For now we ignore offloading. */ - if (mask_enc_ctl->addr_type) + struct flow_dissector_key_control *enc_ctl = + skb_flow_dissector_target(flow->dissector, + FLOW_DISSECTOR_KEY_ENC_CONTROL, + flow->key); + if (mask_enc_ctl->addr_type != 0xffff || + enc_ctl->addr_type != FLOW_DISSECTOR_KEY_IPV4_ADDRS) return -EOPNOTSUPP; + + /* These fields are already verified as used. */ + mask_ipv4 = + skb_flow_dissector_target(flow->dissector, + FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS, + flow->mask); + if (mask_ipv4->dst != cpu_to_be32(~0)) + return -EOPNOTSUPP; + + mask_enc_ports = + skb_flow_dissector_target(flow->dissector, + FLOW_DISSECTOR_KEY_ENC_PORTS, + flow->mask); + enc_ports = + skb_flow_dissector_target(flow->dissector, + FLOW_DISSECTOR_KEY_ENC_PORTS, + flow->key); + + if (mask_enc_ports->dst != cpu_to_be16(~0) || + enc_ports->dst != htons(NFP_FL_VXLAN_PORT)) + return -EOPNOTSUPP; + + key_layer |= NFP_FLOWER_LAYER_VXLAN; + key_size += sizeof(struct nfp_flower_vxlan); } if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_BASIC)) { @@ -151,12 +211,6 @@ nfp_flower_calculate_key_layers(struct nfp_fl_key_ls *ret_key_ls, FLOW_DISSECTOR_KEY_IP, flow->mask); - key_layer_two = 0; - key_layer = NFP_FLOWER_LAYER_PORT | NFP_FLOWER_LAYER_MAC; - key_size = sizeof(struct nfp_flower_meta_one) + - sizeof(struct nfp_flower_in_port) + - sizeof(struct nfp_flower_mac_mpls); - if (mask_basic && mask_basic->n_proto) { /* Ethernet type is present in the key. */ switch (key_basic->n_proto) { -- cgit v1.2.3 From b27d6a95a70de551df828de2b658efd949a9864e Mon Sep 17 00:00:00 2001 From: John Hurley Date: Mon, 25 Sep 2017 12:23:37 +0200 Subject: nfp: compile flower vxlan tunnel set actions Compile set tunnel actions for tc flower. Only support VXLAN and ensure a tunnel destination port of 4789 is used. Signed-off-by: John Hurley Signed-off-by: Simon Horman Signed-off-by: David S. Miller --- drivers/net/ethernet/netronome/nfp/flower/action.c | 169 ++++++++++++++++++--- drivers/net/ethernet/netronome/nfp/flower/cmsg.h | 31 +++- 2 files changed, 179 insertions(+), 21 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/netronome/nfp/flower/action.c b/drivers/net/ethernet/netronome/nfp/flower/action.c index db9750695dc7..38f3835ae176 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/action.c +++ b/drivers/net/ethernet/netronome/nfp/flower/action.c @@ -37,6 +37,7 @@ #include #include #include +#include #include "cmsg.h" #include "main.h" @@ -80,14 +81,27 @@ nfp_fl_push_vlan(struct nfp_fl_push_vlan *push_vlan, push_vlan->vlan_tci = cpu_to_be16(tmp_push_vlan_tci); } +static bool nfp_fl_netdev_is_tunnel_type(struct net_device *out_dev, + enum nfp_flower_tun_type tun_type) +{ + if (!out_dev->rtnl_link_ops) + return false; + + if (!strcmp(out_dev->rtnl_link_ops->kind, "vxlan")) + return tun_type == NFP_FL_TUNNEL_VXLAN; + + return false; +} + static int nfp_fl_output(struct nfp_fl_output *output, const struct tc_action *action, struct nfp_fl_payload *nfp_flow, bool last, - struct net_device *in_dev) + struct net_device *in_dev, enum nfp_flower_tun_type tun_type, + int *tun_out_cnt) { size_t act_size = sizeof(struct nfp_fl_output); + u16 tmp_output_op, tmp_flags; struct net_device *out_dev; - u16 tmp_output_op; int ifindex; /* Set action opcode to output action. */ @@ -97,25 +111,114 @@ nfp_fl_output(struct nfp_fl_output *output, const struct tc_action *action, output->a_op = cpu_to_be16(tmp_output_op); - /* Set action output parameters. */ - output->flags = cpu_to_be16(last ? NFP_FL_OUT_FLAGS_LAST : 0); - ifindex = tcf_mirred_ifindex(action); out_dev = __dev_get_by_index(dev_net(in_dev), ifindex); if (!out_dev) return -EOPNOTSUPP; - /* Only offload egress ports are on the same device as the ingress - * port. + tmp_flags = last ? NFP_FL_OUT_FLAGS_LAST : 0; + + if (tun_type) { + /* Verify the egress netdev matches the tunnel type. */ + if (!nfp_fl_netdev_is_tunnel_type(out_dev, tun_type)) + return -EOPNOTSUPP; + + if (*tun_out_cnt) + return -EOPNOTSUPP; + (*tun_out_cnt)++; + + output->flags = cpu_to_be16(tmp_flags | + NFP_FL_OUT_FLAGS_USE_TUN); + output->port = cpu_to_be32(NFP_FL_PORT_TYPE_TUN | tun_type); + } else { + /* Set action output parameters. */ + output->flags = cpu_to_be16(tmp_flags); + + /* Only offload if egress ports are on the same device as the + * ingress port. + */ + if (!switchdev_port_same_parent_id(in_dev, out_dev)) + return -EOPNOTSUPP; + + output->port = cpu_to_be32(nfp_repr_get_port_id(out_dev)); + if (!output->port) + return -EOPNOTSUPP; + } + nfp_flow->meta.shortcut = output->port; + + return 0; +} + +static bool nfp_fl_supported_tun_port(const struct tc_action *action) +{ + struct ip_tunnel_info *tun = tcf_tunnel_info(action); + + return tun->key.tp_dst == htons(NFP_FL_VXLAN_PORT); +} + +static struct nfp_fl_pre_tunnel *nfp_fl_pre_tunnel(char *act_data, int act_len) +{ + size_t act_size = sizeof(struct nfp_fl_pre_tunnel); + struct nfp_fl_pre_tunnel *pre_tun_act; + u16 tmp_pre_tun_op; + + /* Pre_tunnel action must be first on action list. + * If other actions already exist they need pushed forward. */ - if (!switchdev_port_same_parent_id(in_dev, out_dev)) - return -EOPNOTSUPP; + if (act_len) + memmove(act_data + act_size, act_data, act_len); + + pre_tun_act = (struct nfp_fl_pre_tunnel *)act_data; + + memset(pre_tun_act, 0, act_size); + + tmp_pre_tun_op = + FIELD_PREP(NFP_FL_ACT_LEN_LW, act_size >> NFP_FL_LW_SIZ) | + FIELD_PREP(NFP_FL_ACT_JMP_ID, NFP_FL_ACTION_OPCODE_PRE_TUNNEL); + + pre_tun_act->a_op = cpu_to_be16(tmp_pre_tun_op); - output->port = cpu_to_be32(nfp_repr_get_port_id(out_dev)); - if (!output->port) + return pre_tun_act; +} + +static int +nfp_fl_set_vxlan(struct nfp_fl_set_vxlan *set_vxlan, + const struct tc_action *action, + struct nfp_fl_pre_tunnel *pre_tun) +{ + struct ip_tunnel_info *vxlan = tcf_tunnel_info(action); + size_t act_size = sizeof(struct nfp_fl_set_vxlan); + u32 tmp_set_vxlan_type_index = 0; + u16 tmp_set_vxlan_op; + /* Currently support one pre-tunnel so index is always 0. */ + int pretun_idx = 0; + + if (vxlan->options_len) { + /* Do not support options e.g. vxlan gpe. */ return -EOPNOTSUPP; + } - nfp_flow->meta.shortcut = output->port; + tmp_set_vxlan_op = + FIELD_PREP(NFP_FL_ACT_LEN_LW, act_size >> NFP_FL_LW_SIZ) | + FIELD_PREP(NFP_FL_ACT_JMP_ID, + NFP_FL_ACTION_OPCODE_SET_IPV4_TUNNEL); + + set_vxlan->a_op = cpu_to_be16(tmp_set_vxlan_op); + + /* Set tunnel type and pre-tunnel index. */ + tmp_set_vxlan_type_index |= + FIELD_PREP(NFP_FL_IPV4_TUNNEL_TYPE, NFP_FL_TUNNEL_VXLAN) | + FIELD_PREP(NFP_FL_IPV4_PRE_TUN_INDEX, pretun_idx); + + set_vxlan->tun_type_index = cpu_to_be32(tmp_set_vxlan_type_index); + + set_vxlan->tun_id = vxlan->key.tun_id; + set_vxlan->tun_flags = vxlan->key.tun_flags; + set_vxlan->ipv4_ttl = vxlan->key.ttl; + set_vxlan->ipv4_tos = vxlan->key.tos; + + /* Complete pre_tunnel action. */ + pre_tun->ipv4_dst = vxlan->key.u.ipv4.dst; return 0; } @@ -123,8 +226,11 @@ nfp_fl_output(struct nfp_fl_output *output, const struct tc_action *action, static int nfp_flower_loop_action(const struct tc_action *a, struct nfp_fl_payload *nfp_fl, int *a_len, - struct net_device *netdev) + struct net_device *netdev, + enum nfp_flower_tun_type *tun_type, int *tun_out_cnt) { + struct nfp_fl_pre_tunnel *pre_tun; + struct nfp_fl_set_vxlan *s_vxl; struct nfp_fl_push_vlan *psh_v; struct nfp_fl_pop_vlan *pop_v; struct nfp_fl_output *output; @@ -137,7 +243,8 @@ nfp_flower_loop_action(const struct tc_action *a, return -EOPNOTSUPP; output = (struct nfp_fl_output *)&nfp_fl->action_data[*a_len]; - err = nfp_fl_output(output, a, nfp_fl, true, netdev); + err = nfp_fl_output(output, a, nfp_fl, true, netdev, *tun_type, + tun_out_cnt); if (err) return err; @@ -147,7 +254,8 @@ nfp_flower_loop_action(const struct tc_action *a, return -EOPNOTSUPP; output = (struct nfp_fl_output *)&nfp_fl->action_data[*a_len]; - err = nfp_fl_output(output, a, nfp_fl, false, netdev); + err = nfp_fl_output(output, a, nfp_fl, false, netdev, *tun_type, + tun_out_cnt); if (err) return err; @@ -170,6 +278,29 @@ nfp_flower_loop_action(const struct tc_action *a, nfp_fl_push_vlan(psh_v, a); *a_len += sizeof(struct nfp_fl_push_vlan); + } else if (is_tcf_tunnel_set(a) && nfp_fl_supported_tun_port(a)) { + /* Pre-tunnel action is required for tunnel encap. + * This checks for next hop entries on NFP. + * If none, the packet falls back before applying other actions. + */ + if (*a_len + sizeof(struct nfp_fl_pre_tunnel) + + sizeof(struct nfp_fl_set_vxlan) > NFP_FL_MAX_A_SIZ) + return -EOPNOTSUPP; + + *tun_type = NFP_FL_TUNNEL_VXLAN; + pre_tun = nfp_fl_pre_tunnel(nfp_fl->action_data, *a_len); + nfp_fl->meta.shortcut = cpu_to_be32(NFP_FL_SC_ACT_NULL); + *a_len += sizeof(struct nfp_fl_pre_tunnel); + + s_vxl = (struct nfp_fl_set_vxlan *)&nfp_fl->action_data[*a_len]; + err = nfp_fl_set_vxlan(s_vxl, a, pre_tun); + if (err) + return err; + + *a_len += sizeof(struct nfp_fl_set_vxlan); + } else if (is_tcf_tunnel_release(a)) { + /* Tunnel decap is handled by default so accept action. */ + return 0; } else { /* Currently we do not handle any other actions. */ return -EOPNOTSUPP; @@ -182,18 +313,22 @@ int nfp_flower_compile_action(struct tc_cls_flower_offload *flow, struct net_device *netdev, struct nfp_fl_payload *nfp_flow) { - int act_len, act_cnt, err; + int act_len, act_cnt, err, tun_out_cnt; + enum nfp_flower_tun_type tun_type; const struct tc_action *a; LIST_HEAD(actions); memset(nfp_flow->action_data, 0, NFP_FL_MAX_A_SIZ); nfp_flow->meta.act_len = 0; + tun_type = NFP_FL_TUNNEL_NONE; act_len = 0; act_cnt = 0; + tun_out_cnt = 0; tcf_exts_to_list(flow->exts, &actions); list_for_each_entry(a, &actions, list) { - err = nfp_flower_loop_action(a, nfp_flow, &act_len, netdev); + err = nfp_flower_loop_action(a, nfp_flow, &act_len, netdev, + &tun_type, &tun_out_cnt); if (err) return err; act_cnt++; diff --git a/drivers/net/ethernet/netronome/nfp/flower/cmsg.h b/drivers/net/ethernet/netronome/nfp/flower/cmsg.h index af9165b3b652..ff42ce8a1e9c 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/cmsg.h +++ b/drivers/net/ethernet/netronome/nfp/flower/cmsg.h @@ -67,10 +67,12 @@ #define NFP_FL_LW_SIZ 2 /* Action opcodes */ -#define NFP_FL_ACTION_OPCODE_OUTPUT 0 -#define NFP_FL_ACTION_OPCODE_PUSH_VLAN 1 -#define NFP_FL_ACTION_OPCODE_POP_VLAN 2 -#define NFP_FL_ACTION_OPCODE_NUM 32 +#define NFP_FL_ACTION_OPCODE_OUTPUT 0 +#define NFP_FL_ACTION_OPCODE_PUSH_VLAN 1 +#define NFP_FL_ACTION_OPCODE_POP_VLAN 2 +#define NFP_FL_ACTION_OPCODE_SET_IPV4_TUNNEL 6 +#define NFP_FL_ACTION_OPCODE_PRE_TUNNEL 17 +#define NFP_FL_ACTION_OPCODE_NUM 32 #define NFP_FL_ACT_JMP_ID GENMASK(15, 8) #define NFP_FL_ACT_LEN_LW GENMASK(7, 0) @@ -85,6 +87,8 @@ /* Tunnel ports */ #define NFP_FL_PORT_TYPE_TUN 0x50000000 +#define NFP_FL_IPV4_TUNNEL_TYPE GENMASK(7, 4) +#define NFP_FL_IPV4_PRE_TUN_INDEX GENMASK(2, 0) enum nfp_flower_tun_type { NFP_FL_TUNNEL_NONE = 0, @@ -123,6 +127,25 @@ struct nfp_flower_meta_one { u16 reserved; }; +struct nfp_fl_pre_tunnel { + __be16 a_op; + __be16 reserved; + __be32 ipv4_dst; + /* reserved for use with IPv6 addresses */ + __be32 extra[3]; +}; + +struct nfp_fl_set_vxlan { + __be16 a_op; + __be16 reserved; + __be64 tun_id; + __be32 tun_type_index; + __be16 tun_flags; + u8 ipv4_ttl; + u8 ipv4_tos; + __be32 extra[2]; +} __packed; + /* Metadata with L2 (1W/4B) * ---------------------------------------------------------------- * 3 2 1 -- cgit v1.2.3 From fd0dd1ab1e107369c950796bb9b0e8eab6134bf1 Mon Sep 17 00:00:00 2001 From: John Hurley Date: Mon, 25 Sep 2017 12:23:38 +0200 Subject: nfp: offload flower vxlan endpoint MAC addresses Generate a list of MAC addresses of netdevs that could be used as VXLAN tunnel end points. Give offloaded MACs an index for storage on the NFP in the ranges: 0x100-0x1ff physical port representors 0x200-0x2ff VF port representors 0x300-0x3ff other offloads (e.g. vxlan netdevs, ovs bridges) Assign phys and vf indexes based on unique 8 bit values in the port num. Maintain list of other netdevs to ensure same netdev is not offloaded twice and each gets a unique ID without exhausting the entries. Because the IDs are unique but constant for a netdev, any changes are implemented by overwriting the index on NFP. Signed-off-by: John Hurley Signed-off-by: Simon Horman Signed-off-by: David S. Miller --- drivers/net/ethernet/netronome/nfp/Makefile | 3 +- drivers/net/ethernet/netronome/nfp/flower/cmsg.c | 7 - drivers/net/ethernet/netronome/nfp/flower/cmsg.h | 9 + drivers/net/ethernet/netronome/nfp/flower/main.c | 13 + drivers/net/ethernet/netronome/nfp/flower/main.h | 18 + drivers/net/ethernet/netronome/nfp/flower/match.c | 7 + .../ethernet/netronome/nfp/flower/tunnel_conf.c | 374 +++++++++++++++++++++ 7 files changed, 423 insertions(+), 8 deletions(-) create mode 100644 drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/netronome/nfp/Makefile b/drivers/net/ethernet/netronome/nfp/Makefile index 96e579a15cbe..becaacf1554d 100644 --- a/drivers/net/ethernet/netronome/nfp/Makefile +++ b/drivers/net/ethernet/netronome/nfp/Makefile @@ -37,7 +37,8 @@ nfp-objs += \ flower/main.o \ flower/match.o \ flower/metadata.o \ - flower/offload.o + flower/offload.o \ + flower/tunnel_conf.o endif ifeq ($(CONFIG_BPF_SYSCALL),y) diff --git a/drivers/net/ethernet/netronome/nfp/flower/cmsg.c b/drivers/net/ethernet/netronome/nfp/flower/cmsg.c index c3ca05d10fe1..b756006dba6f 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/cmsg.c +++ b/drivers/net/ethernet/netronome/nfp/flower/cmsg.c @@ -38,17 +38,10 @@ #include #include "main.h" -#include "../nfpcore/nfp_cpp.h" #include "../nfp_net.h" #include "../nfp_net_repr.h" #include "./cmsg.h" -#define nfp_flower_cmsg_warn(app, fmt, args...) \ - do { \ - if (net_ratelimit()) \ - nfp_warn((app)->cpp, fmt, ## args); \ - } while (0) - static struct nfp_flower_cmsg_hdr * nfp_flower_cmsg_get_hdr(struct sk_buff *skb) { diff --git a/drivers/net/ethernet/netronome/nfp/flower/cmsg.h b/drivers/net/ethernet/netronome/nfp/flower/cmsg.h index ff42ce8a1e9c..dc248193c996 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/cmsg.h +++ b/drivers/net/ethernet/netronome/nfp/flower/cmsg.h @@ -39,6 +39,7 @@ #include #include "../nfp_app.h" +#include "../nfpcore/nfp_cpp.h" #define NFP_FLOWER_LAYER_META BIT(0) #define NFP_FLOWER_LAYER_PORT BIT(1) @@ -90,6 +91,12 @@ #define NFP_FL_IPV4_TUNNEL_TYPE GENMASK(7, 4) #define NFP_FL_IPV4_PRE_TUN_INDEX GENMASK(2, 0) +#define nfp_flower_cmsg_warn(app, fmt, args...) \ + do { \ + if (net_ratelimit()) \ + nfp_warn((app)->cpp, fmt, ## args); \ + } while (0) + enum nfp_flower_tun_type { NFP_FL_TUNNEL_NONE = 0, NFP_FL_TUNNEL_VXLAN = 2, @@ -310,6 +317,7 @@ enum nfp_flower_cmsg_type_port { NFP_FLOWER_CMSG_TYPE_FLOW_DEL = 2, NFP_FLOWER_CMSG_TYPE_MAC_REPR = 7, NFP_FLOWER_CMSG_TYPE_PORT_MOD = 8, + NFP_FLOWER_CMSG_TYPE_TUN_MAC = 11, NFP_FLOWER_CMSG_TYPE_FLOW_STATS = 15, NFP_FLOWER_CMSG_TYPE_PORT_ECHO = 16, NFP_FLOWER_CMSG_TYPE_MAX = 32, @@ -343,6 +351,7 @@ enum nfp_flower_cmsg_port_type { NFP_FLOWER_CMSG_PORT_TYPE_UNSPEC = 0x0, NFP_FLOWER_CMSG_PORT_TYPE_PHYS_PORT = 0x1, NFP_FLOWER_CMSG_PORT_TYPE_PCIE_PORT = 0x2, + NFP_FLOWER_CMSG_PORT_TYPE_OTHER_PORT = 0x3, }; enum nfp_flower_cmsg_port_vnic_type { diff --git a/drivers/net/ethernet/netronome/nfp/flower/main.c b/drivers/net/ethernet/netronome/nfp/flower/main.c index 91fe03617106..e46e7c60d491 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/main.c +++ b/drivers/net/ethernet/netronome/nfp/flower/main.c @@ -436,6 +436,16 @@ static void nfp_flower_clean(struct nfp_app *app) app->priv = NULL; } +static int nfp_flower_start(struct nfp_app *app) +{ + return nfp_tunnel_config_start(app); +} + +static void nfp_flower_stop(struct nfp_app *app) +{ + nfp_tunnel_config_stop(app); +} + const struct nfp_app_type app_flower = { .id = NFP_APP_FLOWER_NIC, .name = "flower", @@ -453,6 +463,9 @@ const struct nfp_app_type app_flower = { .repr_open = nfp_flower_repr_netdev_open, .repr_stop = nfp_flower_repr_netdev_stop, + .start = nfp_flower_start, + .stop = nfp_flower_stop, + .ctrl_msg_rx = nfp_flower_cmsg_rx, .sriov_enable = nfp_flower_sriov_enable, diff --git a/drivers/net/ethernet/netronome/nfp/flower/main.h b/drivers/net/ethernet/netronome/nfp/flower/main.h index cd695eabce02..9de375acc254 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/main.h +++ b/drivers/net/ethernet/netronome/nfp/flower/main.h @@ -84,6 +84,13 @@ struct nfp_fl_stats_id { * @flow_table: Hash table used to store flower rules * @cmsg_work: Workqueue for control messages processing * @cmsg_skbs: List of skbs for control message processing + * @nfp_mac_off_list: List of MAC addresses to offload + * @nfp_mac_index_list: List of unique 8-bit indexes for non NFP netdevs + * @nfp_mac_off_lock: Lock for the MAC address list + * @nfp_mac_index_lock: Lock for the MAC index list + * @nfp_mac_off_ids: IDA to manage id assignment for offloaded macs + * @nfp_mac_off_count: Number of MACs in address list + * @nfp_tun_mac_nb: Notifier to monitor link state */ struct nfp_flower_priv { struct nfp_app *app; @@ -96,6 +103,13 @@ struct nfp_flower_priv { DECLARE_HASHTABLE(flow_table, NFP_FLOWER_HASH_BITS); struct work_struct cmsg_work; struct sk_buff_head cmsg_skbs; + struct list_head nfp_mac_off_list; + struct list_head nfp_mac_index_list; + struct mutex nfp_mac_off_lock; + struct mutex nfp_mac_index_lock; + struct ida nfp_mac_off_ids; + int nfp_mac_off_count; + struct notifier_block nfp_tun_mac_nb; }; struct nfp_fl_key_ls { @@ -165,4 +179,8 @@ nfp_flower_remove_fl_table(struct nfp_app *app, unsigned long tc_flower_cookie); void nfp_flower_rx_flow_stats(struct nfp_app *app, struct sk_buff *skb); +int nfp_tunnel_config_start(struct nfp_app *app); +void nfp_tunnel_config_stop(struct nfp_app *app); +void nfp_tunnel_write_macs(struct nfp_app *app); + #endif diff --git a/drivers/net/ethernet/netronome/nfp/flower/match.c b/drivers/net/ethernet/netronome/nfp/flower/match.c index 1fd1bab0611f..cb3ff6c126e8 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/match.c +++ b/drivers/net/ethernet/netronome/nfp/flower/match.c @@ -232,6 +232,7 @@ int nfp_flower_compile_flow_match(struct tc_cls_flower_offload *flow, struct nfp_fl_payload *nfp_flow) { enum nfp_flower_tun_type tun_type = NFP_FL_TUNNEL_NONE; + struct nfp_repr *netdev_repr; int err; u8 *ext; u8 *msk; @@ -341,6 +342,12 @@ int nfp_flower_compile_flow_match(struct tc_cls_flower_offload *flow, flow, true); ext += sizeof(struct nfp_flower_vxlan); msk += sizeof(struct nfp_flower_vxlan); + + /* Configure tunnel end point MAC. */ + if (nfp_netdev_is_nfp_repr(netdev)) { + netdev_repr = netdev_priv(netdev); + nfp_tunnel_write_macs(netdev_repr->app); + } } return 0; diff --git a/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c b/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c new file mode 100644 index 000000000000..34be85803020 --- /dev/null +++ b/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c @@ -0,0 +1,374 @@ +/* + * Copyright (C) 2017 Netronome Systems, Inc. + * + * This software is dual licensed under the GNU General License Version 2, + * June 1991 as shown in the file COPYING in the top-level directory of this + * source tree or the BSD 2-Clause License provided below. You have the + * option to license this software under the complete terms of either license. + * + * The BSD 2-Clause License: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * 1. Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * 2. Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include +#include +#include + +#include "cmsg.h" +#include "main.h" +#include "../nfp_net_repr.h" +#include "../nfp_net.h" + +/** + * struct nfp_tun_mac_addr - configure MAC address of tunnel EP on NFP + * @reserved: reserved for future use + * @count: number of MAC addresses in the message + * @index: index of MAC address in the lookup table + * @addr: interface MAC address + * @addresses: series of MACs to offload + */ +struct nfp_tun_mac_addr { + __be16 reserved; + __be16 count; + struct index_mac_addr { + __be16 index; + u8 addr[ETH_ALEN]; + } addresses[]; +}; + +/** + * struct nfp_tun_mac_offload_entry - list of MACs to offload + * @index: index of MAC address for offloading + * @addr: interface MAC address + * @list: list pointer + */ +struct nfp_tun_mac_offload_entry { + __be16 index; + u8 addr[ETH_ALEN]; + struct list_head list; +}; + +#define NFP_MAX_MAC_INDEX 0xff + +/** + * struct nfp_tun_mac_non_nfp_idx - converts non NFP netdev ifindex to 8-bit id + * @ifindex: netdev ifindex of the device + * @index: index of netdevs mac on NFP + * @list: list pointer + */ +struct nfp_tun_mac_non_nfp_idx { + int ifindex; + u8 index; + struct list_head list; +}; + +static bool nfp_tun_is_netdev_to_offload(struct net_device *netdev) +{ + if (!netdev->rtnl_link_ops) + return false; + if (!strcmp(netdev->rtnl_link_ops->kind, "openvswitch")) + return true; + if (!strcmp(netdev->rtnl_link_ops->kind, "vxlan")) + return true; + + return false; +} + +static int +nfp_flower_xmit_tun_conf(struct nfp_app *app, u8 mtype, u16 plen, void *pdata) +{ + struct sk_buff *skb; + unsigned char *msg; + + skb = nfp_flower_cmsg_alloc(app, plen, mtype); + if (!skb) + return -ENOMEM; + + msg = nfp_flower_cmsg_get_data(skb); + memcpy(msg, pdata, nfp_flower_cmsg_get_data_len(skb)); + + nfp_ctrl_tx(app->ctrl, skb); + return 0; +} + +void nfp_tunnel_write_macs(struct nfp_app *app) +{ + struct nfp_flower_priv *priv = app->priv; + struct nfp_tun_mac_offload_entry *entry; + struct nfp_tun_mac_addr *payload; + struct list_head *ptr, *storage; + int mac_count, err, pay_size; + + mutex_lock(&priv->nfp_mac_off_lock); + if (!priv->nfp_mac_off_count) { + mutex_unlock(&priv->nfp_mac_off_lock); + return; + } + + pay_size = sizeof(struct nfp_tun_mac_addr) + + sizeof(struct index_mac_addr) * priv->nfp_mac_off_count; + + payload = kzalloc(pay_size, GFP_KERNEL); + if (!payload) { + mutex_unlock(&priv->nfp_mac_off_lock); + return; + } + + payload->count = cpu_to_be16(priv->nfp_mac_off_count); + + mac_count = 0; + list_for_each_safe(ptr, storage, &priv->nfp_mac_off_list) { + entry = list_entry(ptr, struct nfp_tun_mac_offload_entry, + list); + payload->addresses[mac_count].index = entry->index; + ether_addr_copy(payload->addresses[mac_count].addr, + entry->addr); + mac_count++; + } + + err = nfp_flower_xmit_tun_conf(app, NFP_FLOWER_CMSG_TYPE_TUN_MAC, + pay_size, payload); + + kfree(payload); + + if (err) { + mutex_unlock(&priv->nfp_mac_off_lock); + /* Write failed so retain list for future retry. */ + return; + } + + /* If list was successfully offloaded, flush it. */ + list_for_each_safe(ptr, storage, &priv->nfp_mac_off_list) { + entry = list_entry(ptr, struct nfp_tun_mac_offload_entry, + list); + list_del(&entry->list); + kfree(entry); + } + + priv->nfp_mac_off_count = 0; + mutex_unlock(&priv->nfp_mac_off_lock); +} + +static int nfp_tun_get_mac_idx(struct nfp_app *app, int ifindex) +{ + struct nfp_flower_priv *priv = app->priv; + struct nfp_tun_mac_non_nfp_idx *entry; + struct list_head *ptr, *storage; + int idx; + + mutex_lock(&priv->nfp_mac_index_lock); + list_for_each_safe(ptr, storage, &priv->nfp_mac_index_list) { + entry = list_entry(ptr, struct nfp_tun_mac_non_nfp_idx, list); + if (entry->ifindex == ifindex) { + idx = entry->index; + mutex_unlock(&priv->nfp_mac_index_lock); + return idx; + } + } + + idx = ida_simple_get(&priv->nfp_mac_off_ids, 0, + NFP_MAX_MAC_INDEX, GFP_KERNEL); + if (idx < 0) { + mutex_unlock(&priv->nfp_mac_index_lock); + return idx; + } + + entry = kmalloc(sizeof(*entry), GFP_KERNEL); + if (!entry) { + mutex_unlock(&priv->nfp_mac_index_lock); + return -ENOMEM; + } + entry->ifindex = ifindex; + entry->index = idx; + list_add_tail(&entry->list, &priv->nfp_mac_index_list); + mutex_unlock(&priv->nfp_mac_index_lock); + + return idx; +} + +static void nfp_tun_del_mac_idx(struct nfp_app *app, int ifindex) +{ + struct nfp_flower_priv *priv = app->priv; + struct nfp_tun_mac_non_nfp_idx *entry; + struct list_head *ptr, *storage; + + mutex_lock(&priv->nfp_mac_index_lock); + list_for_each_safe(ptr, storage, &priv->nfp_mac_index_list) { + entry = list_entry(ptr, struct nfp_tun_mac_non_nfp_idx, list); + if (entry->ifindex == ifindex) { + ida_simple_remove(&priv->nfp_mac_off_ids, + entry->index); + list_del(&entry->list); + kfree(entry); + break; + } + } + mutex_unlock(&priv->nfp_mac_index_lock); +} + +static void nfp_tun_add_to_mac_offload_list(struct net_device *netdev, + struct nfp_app *app) +{ + struct nfp_flower_priv *priv = app->priv; + struct nfp_tun_mac_offload_entry *entry; + u16 nfp_mac_idx; + int port = 0; + + /* Check if MAC should be offloaded. */ + if (!is_valid_ether_addr(netdev->dev_addr)) + return; + + if (nfp_netdev_is_nfp_repr(netdev)) + port = nfp_repr_get_port_id(netdev); + else if (!nfp_tun_is_netdev_to_offload(netdev)) + return; + + entry = kmalloc(sizeof(*entry), GFP_KERNEL); + if (!entry) { + nfp_flower_cmsg_warn(app, "Mem fail when offloading MAC.\n"); + return; + } + + if (FIELD_GET(NFP_FLOWER_CMSG_PORT_TYPE, port) == + NFP_FLOWER_CMSG_PORT_TYPE_PHYS_PORT) { + nfp_mac_idx = port << 8 | NFP_FLOWER_CMSG_PORT_TYPE_PHYS_PORT; + } else if (FIELD_GET(NFP_FLOWER_CMSG_PORT_TYPE, port) == + NFP_FLOWER_CMSG_PORT_TYPE_PCIE_PORT) { + port = FIELD_GET(NFP_FLOWER_CMSG_PORT_VNIC, port); + nfp_mac_idx = port << 8 | NFP_FLOWER_CMSG_PORT_TYPE_PCIE_PORT; + } else { + /* Must assign our own unique 8-bit index. */ + int idx = nfp_tun_get_mac_idx(app, netdev->ifindex); + + if (idx < 0) { + nfp_flower_cmsg_warn(app, "Can't assign non-repr MAC index.\n"); + kfree(entry); + return; + } + nfp_mac_idx = idx << 8 | NFP_FLOWER_CMSG_PORT_TYPE_OTHER_PORT; + } + + entry->index = cpu_to_be16(nfp_mac_idx); + ether_addr_copy(entry->addr, netdev->dev_addr); + + mutex_lock(&priv->nfp_mac_off_lock); + priv->nfp_mac_off_count++; + list_add_tail(&entry->list, &priv->nfp_mac_off_list); + mutex_unlock(&priv->nfp_mac_off_lock); +} + +static int nfp_tun_mac_event_handler(struct notifier_block *nb, + unsigned long event, void *ptr) +{ + struct nfp_flower_priv *app_priv; + struct net_device *netdev; + struct nfp_app *app; + + if (event == NETDEV_DOWN || event == NETDEV_UNREGISTER) { + app_priv = container_of(nb, struct nfp_flower_priv, + nfp_tun_mac_nb); + app = app_priv->app; + netdev = netdev_notifier_info_to_dev(ptr); + + /* If non-nfp netdev then free its offload index. */ + if (nfp_tun_is_netdev_to_offload(netdev)) + nfp_tun_del_mac_idx(app, netdev->ifindex); + } else if (event == NETDEV_UP || event == NETDEV_CHANGEADDR || + event == NETDEV_REGISTER) { + app_priv = container_of(nb, struct nfp_flower_priv, + nfp_tun_mac_nb); + app = app_priv->app; + netdev = netdev_notifier_info_to_dev(ptr); + + nfp_tun_add_to_mac_offload_list(netdev, app); + + /* Force a list write to keep NFP up to date. */ + nfp_tunnel_write_macs(app); + } + return NOTIFY_OK; +} + +int nfp_tunnel_config_start(struct nfp_app *app) +{ + struct nfp_flower_priv *priv = app->priv; + struct net_device *netdev; + int err; + + /* Initialise priv data for MAC offloading. */ + priv->nfp_mac_off_count = 0; + mutex_init(&priv->nfp_mac_off_lock); + INIT_LIST_HEAD(&priv->nfp_mac_off_list); + priv->nfp_tun_mac_nb.notifier_call = nfp_tun_mac_event_handler; + mutex_init(&priv->nfp_mac_index_lock); + INIT_LIST_HEAD(&priv->nfp_mac_index_list); + ida_init(&priv->nfp_mac_off_ids); + + err = register_netdevice_notifier(&priv->nfp_tun_mac_nb); + if (err) + goto err_free_mac_ida; + + /* Parse netdevs already registered for MACs that need offloaded. */ + rtnl_lock(); + for_each_netdev(&init_net, netdev) + nfp_tun_add_to_mac_offload_list(netdev, app); + rtnl_unlock(); + + return 0; + +err_free_mac_ida: + ida_destroy(&priv->nfp_mac_off_ids); + return err; +} + +void nfp_tunnel_config_stop(struct nfp_app *app) +{ + struct nfp_tun_mac_offload_entry *mac_entry; + struct nfp_flower_priv *priv = app->priv; + struct nfp_tun_mac_non_nfp_idx *mac_idx; + struct list_head *ptr, *storage; + + unregister_netdevice_notifier(&priv->nfp_tun_mac_nb); + + /* Free any memory that may be occupied by MAC list. */ + mutex_lock(&priv->nfp_mac_off_lock); + list_for_each_safe(ptr, storage, &priv->nfp_mac_off_list) { + mac_entry = list_entry(ptr, struct nfp_tun_mac_offload_entry, + list); + list_del(&mac_entry->list); + kfree(mac_entry); + } + mutex_unlock(&priv->nfp_mac_off_lock); + + /* Free any memory that may be occupied by MAC index list. */ + mutex_lock(&priv->nfp_mac_index_lock); + list_for_each_safe(ptr, storage, &priv->nfp_mac_index_list) { + mac_idx = list_entry(ptr, struct nfp_tun_mac_non_nfp_idx, + list); + list_del(&mac_idx->list); + kfree(mac_idx); + } + mutex_unlock(&priv->nfp_mac_index_lock); + + ida_destroy(&priv->nfp_mac_off_ids); +} -- cgit v1.2.3 From 2d9ad71a8ce67eea9ee38512a215e1893bd5cf87 Mon Sep 17 00:00:00 2001 From: John Hurley Date: Mon, 25 Sep 2017 12:23:39 +0200 Subject: nfp: offload vxlan IPv4 endpoints of flower rules Maintain a list of IPv4 addresses used as the tunnel destination IP match fields in currently active flower rules. Offload the entire list of NFP_FL_IPV4_ADDRS_MAX (even if some are unused) when new IPs are added or removed. The NFP should only be aware of tunnel end points that are currently used by rules on the device Signed-off-by: John Hurley Reviewed-by: Simon Horman Signed-off-by: Simon Horman Signed-off-by: David S. Miller --- drivers/net/ethernet/netronome/nfp/flower/cmsg.h | 1 + drivers/net/ethernet/netronome/nfp/flower/main.h | 7 ++ drivers/net/ethernet/netronome/nfp/flower/match.c | 14 ++- .../net/ethernet/netronome/nfp/flower/offload.c | 4 + .../ethernet/netronome/nfp/flower/tunnel_conf.c | 120 +++++++++++++++++++++ 5 files changed, 143 insertions(+), 3 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/netronome/nfp/flower/cmsg.h b/drivers/net/ethernet/netronome/nfp/flower/cmsg.h index dc248193c996..6540bb1ceefb 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/cmsg.h +++ b/drivers/net/ethernet/netronome/nfp/flower/cmsg.h @@ -318,6 +318,7 @@ enum nfp_flower_cmsg_type_port { NFP_FLOWER_CMSG_TYPE_MAC_REPR = 7, NFP_FLOWER_CMSG_TYPE_PORT_MOD = 8, NFP_FLOWER_CMSG_TYPE_TUN_MAC = 11, + NFP_FLOWER_CMSG_TYPE_TUN_IPS = 14, NFP_FLOWER_CMSG_TYPE_FLOW_STATS = 15, NFP_FLOWER_CMSG_TYPE_PORT_ECHO = 16, NFP_FLOWER_CMSG_TYPE_MAX = 32, diff --git a/drivers/net/ethernet/netronome/nfp/flower/main.h b/drivers/net/ethernet/netronome/nfp/flower/main.h index 9de375acc254..53306af6cfe8 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/main.h +++ b/drivers/net/ethernet/netronome/nfp/flower/main.h @@ -86,8 +86,10 @@ struct nfp_fl_stats_id { * @cmsg_skbs: List of skbs for control message processing * @nfp_mac_off_list: List of MAC addresses to offload * @nfp_mac_index_list: List of unique 8-bit indexes for non NFP netdevs + * @nfp_ipv4_off_list: List of IPv4 addresses to offload * @nfp_mac_off_lock: Lock for the MAC address list * @nfp_mac_index_lock: Lock for the MAC index list + * @nfp_ipv4_off_lock: Lock for the IPv4 address list * @nfp_mac_off_ids: IDA to manage id assignment for offloaded macs * @nfp_mac_off_count: Number of MACs in address list * @nfp_tun_mac_nb: Notifier to monitor link state @@ -105,8 +107,10 @@ struct nfp_flower_priv { struct sk_buff_head cmsg_skbs; struct list_head nfp_mac_off_list; struct list_head nfp_mac_index_list; + struct list_head nfp_ipv4_off_list; struct mutex nfp_mac_off_lock; struct mutex nfp_mac_index_lock; + struct mutex nfp_ipv4_off_lock; struct ida nfp_mac_off_ids; int nfp_mac_off_count; struct notifier_block nfp_tun_mac_nb; @@ -142,6 +146,7 @@ struct nfp_fl_payload { struct rcu_head rcu; spinlock_t lock; /* lock stats */ struct nfp_fl_stats stats; + __be32 nfp_tun_ipv4_addr; char *unmasked_data; char *mask_data; char *action_data; @@ -182,5 +187,7 @@ void nfp_flower_rx_flow_stats(struct nfp_app *app, struct sk_buff *skb); int nfp_tunnel_config_start(struct nfp_app *app); void nfp_tunnel_config_stop(struct nfp_app *app); void nfp_tunnel_write_macs(struct nfp_app *app); +void nfp_tunnel_del_ipv4_off(struct nfp_app *app, __be32 ipv4); +void nfp_tunnel_add_ipv4_off(struct nfp_app *app, __be32 ipv4); #endif diff --git a/drivers/net/ethernet/netronome/nfp/flower/match.c b/drivers/net/ethernet/netronome/nfp/flower/match.c index cb3ff6c126e8..865a815ab92a 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/match.c +++ b/drivers/net/ethernet/netronome/nfp/flower/match.c @@ -195,7 +195,7 @@ nfp_flower_compile_ipv6(struct nfp_flower_ipv6 *frame, static void nfp_flower_compile_vxlan(struct nfp_flower_vxlan *frame, struct tc_cls_flower_offload *flow, - bool mask_version) + bool mask_version, __be32 *tun_dst) { struct fl_flow_key *target = mask_version ? flow->mask : flow->key; struct flow_dissector_key_ipv4_addrs *vxlan_ips; @@ -223,6 +223,7 @@ nfp_flower_compile_vxlan(struct nfp_flower_vxlan *frame, target); frame->ip_src = vxlan_ips->src; frame->ip_dst = vxlan_ips->dst; + *tun_dst = vxlan_ips->dst; } } @@ -232,6 +233,7 @@ int nfp_flower_compile_flow_match(struct tc_cls_flower_offload *flow, struct nfp_fl_payload *nfp_flow) { enum nfp_flower_tun_type tun_type = NFP_FL_TUNNEL_NONE; + __be32 tun_dst, tun_dst_mask = 0; struct nfp_repr *netdev_repr; int err; u8 *ext; @@ -336,10 +338,10 @@ int nfp_flower_compile_flow_match(struct tc_cls_flower_offload *flow, if (key_ls->key_layer & NFP_FLOWER_LAYER_VXLAN) { /* Populate Exact VXLAN Data. */ nfp_flower_compile_vxlan((struct nfp_flower_vxlan *)ext, - flow, false); + flow, false, &tun_dst); /* Populate Mask VXLAN Data. */ nfp_flower_compile_vxlan((struct nfp_flower_vxlan *)msk, - flow, true); + flow, true, &tun_dst_mask); ext += sizeof(struct nfp_flower_vxlan); msk += sizeof(struct nfp_flower_vxlan); @@ -347,6 +349,12 @@ int nfp_flower_compile_flow_match(struct tc_cls_flower_offload *flow, if (nfp_netdev_is_nfp_repr(netdev)) { netdev_repr = netdev_priv(netdev); nfp_tunnel_write_macs(netdev_repr->app); + + /* Store the tunnel destination in the rule data. + * This must be present and be an exact match. + */ + nfp_flow->nfp_tun_ipv4_addr = tun_dst; + nfp_tunnel_add_ipv4_off(netdev_repr->app, tun_dst); } } diff --git a/drivers/net/ethernet/netronome/nfp/flower/offload.c b/drivers/net/ethernet/netronome/nfp/flower/offload.c index 637372ba8f55..3d9537ebdea4 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/offload.c +++ b/drivers/net/ethernet/netronome/nfp/flower/offload.c @@ -306,6 +306,7 @@ nfp_flower_allocate_new(struct nfp_fl_key_ls *key_layer) if (!flow_pay->action_data) goto err_free_mask; + flow_pay->nfp_tun_ipv4_addr = 0; flow_pay->meta.flags = 0; spin_lock_init(&flow_pay->lock); @@ -415,6 +416,9 @@ nfp_flower_del_offload(struct nfp_app *app, struct net_device *netdev, if (err) goto err_free_flow; + if (nfp_flow->nfp_tun_ipv4_addr) + nfp_tunnel_del_ipv4_off(app, nfp_flow->nfp_tun_ipv4_addr); + err = nfp_flower_xmit_flow(netdev, nfp_flow, NFP_FLOWER_CMSG_TYPE_FLOW_DEL); if (err) diff --git a/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c b/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c index 34be85803020..185505140f5e 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c +++ b/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c @@ -32,6 +32,7 @@ */ #include +#include #include #include @@ -40,6 +41,30 @@ #include "../nfp_net_repr.h" #include "../nfp_net.h" +#define NFP_FL_IPV4_ADDRS_MAX 32 + +/** + * struct nfp_tun_ipv4_addr - set the IP address list on the NFP + * @count: number of IPs populated in the array + * @ipv4_addr: array of IPV4_ADDRS_MAX 32 bit IPv4 addresses + */ +struct nfp_tun_ipv4_addr { + __be32 count; + __be32 ipv4_addr[NFP_FL_IPV4_ADDRS_MAX]; +}; + +/** + * struct nfp_ipv4_addr_entry - cached IPv4 addresses + * @ipv4_addr: IP address + * @ref_count: number of rules currently using this IP + * @list: list pointer + */ +struct nfp_ipv4_addr_entry { + __be32 ipv4_addr; + int ref_count; + struct list_head list; +}; + /** * struct nfp_tun_mac_addr - configure MAC address of tunnel EP on NFP * @reserved: reserved for future use @@ -112,6 +137,87 @@ nfp_flower_xmit_tun_conf(struct nfp_app *app, u8 mtype, u16 plen, void *pdata) return 0; } +static void nfp_tun_write_ipv4_list(struct nfp_app *app) +{ + struct nfp_flower_priv *priv = app->priv; + struct nfp_ipv4_addr_entry *entry; + struct nfp_tun_ipv4_addr payload; + struct list_head *ptr, *storage; + int count; + + memset(&payload, 0, sizeof(struct nfp_tun_ipv4_addr)); + mutex_lock(&priv->nfp_ipv4_off_lock); + count = 0; + list_for_each_safe(ptr, storage, &priv->nfp_ipv4_off_list) { + if (count >= NFP_FL_IPV4_ADDRS_MAX) { + mutex_unlock(&priv->nfp_ipv4_off_lock); + nfp_flower_cmsg_warn(app, "IPv4 offload exceeds limit.\n"); + return; + } + entry = list_entry(ptr, struct nfp_ipv4_addr_entry, list); + payload.ipv4_addr[count++] = entry->ipv4_addr; + } + payload.count = cpu_to_be32(count); + mutex_unlock(&priv->nfp_ipv4_off_lock); + + nfp_flower_xmit_tun_conf(app, NFP_FLOWER_CMSG_TYPE_TUN_IPS, + sizeof(struct nfp_tun_ipv4_addr), + &payload); +} + +void nfp_tunnel_add_ipv4_off(struct nfp_app *app, __be32 ipv4) +{ + struct nfp_flower_priv *priv = app->priv; + struct nfp_ipv4_addr_entry *entry; + struct list_head *ptr, *storage; + + mutex_lock(&priv->nfp_ipv4_off_lock); + list_for_each_safe(ptr, storage, &priv->nfp_ipv4_off_list) { + entry = list_entry(ptr, struct nfp_ipv4_addr_entry, list); + if (entry->ipv4_addr == ipv4) { + entry->ref_count++; + mutex_unlock(&priv->nfp_ipv4_off_lock); + return; + } + } + + entry = kmalloc(sizeof(*entry), GFP_KERNEL); + if (!entry) { + mutex_unlock(&priv->nfp_ipv4_off_lock); + nfp_flower_cmsg_warn(app, "Mem error when offloading IP address.\n"); + return; + } + entry->ipv4_addr = ipv4; + entry->ref_count = 1; + list_add_tail(&entry->list, &priv->nfp_ipv4_off_list); + mutex_unlock(&priv->nfp_ipv4_off_lock); + + nfp_tun_write_ipv4_list(app); +} + +void nfp_tunnel_del_ipv4_off(struct nfp_app *app, __be32 ipv4) +{ + struct nfp_flower_priv *priv = app->priv; + struct nfp_ipv4_addr_entry *entry; + struct list_head *ptr, *storage; + + mutex_lock(&priv->nfp_ipv4_off_lock); + list_for_each_safe(ptr, storage, &priv->nfp_ipv4_off_list) { + entry = list_entry(ptr, struct nfp_ipv4_addr_entry, list); + if (entry->ipv4_addr == ipv4) { + entry->ref_count--; + if (!entry->ref_count) { + list_del(&entry->list); + kfree(entry); + } + break; + } + } + mutex_unlock(&priv->nfp_ipv4_off_lock); + + nfp_tun_write_ipv4_list(app); +} + void nfp_tunnel_write_macs(struct nfp_app *app) { struct nfp_flower_priv *priv = app->priv; @@ -324,6 +430,10 @@ int nfp_tunnel_config_start(struct nfp_app *app) INIT_LIST_HEAD(&priv->nfp_mac_index_list); ida_init(&priv->nfp_mac_off_ids); + /* Initialise priv data for IPv4 offloading. */ + mutex_init(&priv->nfp_ipv4_off_lock); + INIT_LIST_HEAD(&priv->nfp_ipv4_off_list); + err = register_netdevice_notifier(&priv->nfp_tun_mac_nb); if (err) goto err_free_mac_ida; @@ -346,6 +456,7 @@ void nfp_tunnel_config_stop(struct nfp_app *app) struct nfp_tun_mac_offload_entry *mac_entry; struct nfp_flower_priv *priv = app->priv; struct nfp_tun_mac_non_nfp_idx *mac_idx; + struct nfp_ipv4_addr_entry *ip_entry; struct list_head *ptr, *storage; unregister_netdevice_notifier(&priv->nfp_tun_mac_nb); @@ -371,4 +482,13 @@ void nfp_tunnel_config_stop(struct nfp_app *app) mutex_unlock(&priv->nfp_mac_index_lock); ida_destroy(&priv->nfp_mac_off_ids); + + /* Free any memory that may be occupied by ipv4 list. */ + mutex_lock(&priv->nfp_ipv4_off_lock); + list_for_each_safe(ptr, storage, &priv->nfp_ipv4_off_list) { + ip_entry = list_entry(ptr, struct nfp_ipv4_addr_entry, list); + list_del(&ip_entry->list); + kfree(ip_entry); + } + mutex_unlock(&priv->nfp_ipv4_off_lock); } -- cgit v1.2.3 From 8e6a9046b66a7dfb11ae8be226afaaf417649411 Mon Sep 17 00:00:00 2001 From: John Hurley Date: Mon, 25 Sep 2017 12:23:40 +0200 Subject: nfp: flower vxlan neighbour offload Receive a request when the NFP does not know the next hop for a packet that is to be encapsulated in a VXLAN tunnel. Do a route lookup, determine the next hop entry and update neighbour table on NFP. Monitor the kernel neighbour table for link changes and update NFP with relevant information. Overwrite routes with zero values on the NFP when they expire. Signed-off-by: John Hurley Reviewed-by: Simon Horman Signed-off-by: Simon Horman Signed-off-by: David S. Miller --- drivers/net/ethernet/netronome/nfp/flower/cmsg.c | 6 + drivers/net/ethernet/netronome/nfp/flower/cmsg.h | 2 + drivers/net/ethernet/netronome/nfp/flower/main.h | 7 + .../ethernet/netronome/nfp/flower/tunnel_conf.c | 253 +++++++++++++++++++++ 4 files changed, 268 insertions(+) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/netronome/nfp/flower/cmsg.c b/drivers/net/ethernet/netronome/nfp/flower/cmsg.c index b756006dba6f..862787daaa68 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/cmsg.c +++ b/drivers/net/ethernet/netronome/nfp/flower/cmsg.c @@ -181,6 +181,12 @@ nfp_flower_cmsg_process_one_rx(struct nfp_app *app, struct sk_buff *skb) case NFP_FLOWER_CMSG_TYPE_FLOW_STATS: nfp_flower_rx_flow_stats(app, skb); break; + case NFP_FLOWER_CMSG_TYPE_NO_NEIGH: + nfp_tunnel_request_route(app, skb); + break; + case NFP_FLOWER_CMSG_TYPE_TUN_NEIGH: + /* Acks from the NFP that the route is added - ignore. */ + break; default: nfp_flower_cmsg_warn(app, "Cannot handle invalid repr control type %u\n", type); diff --git a/drivers/net/ethernet/netronome/nfp/flower/cmsg.h b/drivers/net/ethernet/netronome/nfp/flower/cmsg.h index 6540bb1ceefb..1dc72a1ed577 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/cmsg.h +++ b/drivers/net/ethernet/netronome/nfp/flower/cmsg.h @@ -317,7 +317,9 @@ enum nfp_flower_cmsg_type_port { NFP_FLOWER_CMSG_TYPE_FLOW_DEL = 2, NFP_FLOWER_CMSG_TYPE_MAC_REPR = 7, NFP_FLOWER_CMSG_TYPE_PORT_MOD = 8, + NFP_FLOWER_CMSG_TYPE_NO_NEIGH = 10, NFP_FLOWER_CMSG_TYPE_TUN_MAC = 11, + NFP_FLOWER_CMSG_TYPE_TUN_NEIGH = 13, NFP_FLOWER_CMSG_TYPE_TUN_IPS = 14, NFP_FLOWER_CMSG_TYPE_FLOW_STATS = 15, NFP_FLOWER_CMSG_TYPE_PORT_ECHO = 16, diff --git a/drivers/net/ethernet/netronome/nfp/flower/main.h b/drivers/net/ethernet/netronome/nfp/flower/main.h index 53306af6cfe8..93ad969c3653 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/main.h +++ b/drivers/net/ethernet/netronome/nfp/flower/main.h @@ -87,12 +87,15 @@ struct nfp_fl_stats_id { * @nfp_mac_off_list: List of MAC addresses to offload * @nfp_mac_index_list: List of unique 8-bit indexes for non NFP netdevs * @nfp_ipv4_off_list: List of IPv4 addresses to offload + * @nfp_neigh_off_list: List of neighbour offloads * @nfp_mac_off_lock: Lock for the MAC address list * @nfp_mac_index_lock: Lock for the MAC index list * @nfp_ipv4_off_lock: Lock for the IPv4 address list + * @nfp_neigh_off_lock: Lock for the neighbour address list * @nfp_mac_off_ids: IDA to manage id assignment for offloaded macs * @nfp_mac_off_count: Number of MACs in address list * @nfp_tun_mac_nb: Notifier to monitor link state + * @nfp_tun_neigh_nb: Notifier to monitor neighbour state */ struct nfp_flower_priv { struct nfp_app *app; @@ -108,12 +111,15 @@ struct nfp_flower_priv { struct list_head nfp_mac_off_list; struct list_head nfp_mac_index_list; struct list_head nfp_ipv4_off_list; + struct list_head nfp_neigh_off_list; struct mutex nfp_mac_off_lock; struct mutex nfp_mac_index_lock; struct mutex nfp_ipv4_off_lock; + struct mutex nfp_neigh_off_lock; struct ida nfp_mac_off_ids; int nfp_mac_off_count; struct notifier_block nfp_tun_mac_nb; + struct notifier_block nfp_tun_neigh_nb; }; struct nfp_fl_key_ls { @@ -189,5 +195,6 @@ void nfp_tunnel_config_stop(struct nfp_app *app); void nfp_tunnel_write_macs(struct nfp_app *app); void nfp_tunnel_del_ipv4_off(struct nfp_app *app, __be32 ipv4); void nfp_tunnel_add_ipv4_off(struct nfp_app *app, __be32 ipv4); +void nfp_tunnel_request_route(struct nfp_app *app, struct sk_buff *skb); #endif diff --git a/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c b/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c index 185505140f5e..8c6b88a1306b 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c +++ b/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c @@ -33,6 +33,7 @@ #include #include +#include #include #include @@ -41,6 +42,44 @@ #include "../nfp_net_repr.h" #include "../nfp_net.h" +/** + * struct nfp_tun_neigh - neighbour/route entry on the NFP + * @dst_ipv4: destination IPv4 address + * @src_ipv4: source IPv4 address + * @dst_addr: destination MAC address + * @src_addr: source MAC address + * @port_id: NFP port to output packet on - associated with source IPv4 + */ +struct nfp_tun_neigh { + __be32 dst_ipv4; + __be32 src_ipv4; + u8 dst_addr[ETH_ALEN]; + u8 src_addr[ETH_ALEN]; + __be32 port_id; +}; + +/** + * struct nfp_tun_req_route_ipv4 - NFP requests a route/neighbour lookup + * @ingress_port: ingress port of packet that signalled request + * @ipv4_addr: destination ipv4 address for route + * @reserved: reserved for future use + */ +struct nfp_tun_req_route_ipv4 { + __be32 ingress_port; + __be32 ipv4_addr; + __be32 reserved[2]; +}; + +/** + * struct nfp_ipv4_route_entry - routes that are offloaded to the NFP + * @ipv4_addr: destination of route + * @list: list pointer + */ +struct nfp_ipv4_route_entry { + __be32 ipv4_addr; + struct list_head list; +}; + #define NFP_FL_IPV4_ADDRS_MAX 32 /** @@ -137,6 +176,197 @@ nfp_flower_xmit_tun_conf(struct nfp_app *app, u8 mtype, u16 plen, void *pdata) return 0; } +static bool nfp_tun_has_route(struct nfp_app *app, __be32 ipv4_addr) +{ + struct nfp_flower_priv *priv = app->priv; + struct nfp_ipv4_route_entry *entry; + struct list_head *ptr, *storage; + + mutex_lock(&priv->nfp_neigh_off_lock); + list_for_each_safe(ptr, storage, &priv->nfp_neigh_off_list) { + entry = list_entry(ptr, struct nfp_ipv4_route_entry, list); + if (entry->ipv4_addr == ipv4_addr) { + mutex_unlock(&priv->nfp_neigh_off_lock); + return true; + } + } + mutex_unlock(&priv->nfp_neigh_off_lock); + return false; +} + +static void nfp_tun_add_route_to_cache(struct nfp_app *app, __be32 ipv4_addr) +{ + struct nfp_flower_priv *priv = app->priv; + struct nfp_ipv4_route_entry *entry; + struct list_head *ptr, *storage; + + mutex_lock(&priv->nfp_neigh_off_lock); + list_for_each_safe(ptr, storage, &priv->nfp_neigh_off_list) { + entry = list_entry(ptr, struct nfp_ipv4_route_entry, list); + if (entry->ipv4_addr == ipv4_addr) { + mutex_unlock(&priv->nfp_neigh_off_lock); + return; + } + } + entry = kmalloc(sizeof(*entry), GFP_KERNEL); + if (!entry) { + mutex_unlock(&priv->nfp_neigh_off_lock); + nfp_flower_cmsg_warn(app, "Mem error when storing new route.\n"); + return; + } + + entry->ipv4_addr = ipv4_addr; + list_add_tail(&entry->list, &priv->nfp_neigh_off_list); + mutex_unlock(&priv->nfp_neigh_off_lock); +} + +static void nfp_tun_del_route_from_cache(struct nfp_app *app, __be32 ipv4_addr) +{ + struct nfp_flower_priv *priv = app->priv; + struct nfp_ipv4_route_entry *entry; + struct list_head *ptr, *storage; + + mutex_lock(&priv->nfp_neigh_off_lock); + list_for_each_safe(ptr, storage, &priv->nfp_neigh_off_list) { + entry = list_entry(ptr, struct nfp_ipv4_route_entry, list); + if (entry->ipv4_addr == ipv4_addr) { + list_del(&entry->list); + kfree(entry); + break; + } + } + mutex_unlock(&priv->nfp_neigh_off_lock); +} + +static void +nfp_tun_write_neigh(struct net_device *netdev, struct nfp_app *app, + struct flowi4 *flow, struct neighbour *neigh) +{ + struct nfp_tun_neigh payload; + + /* Only offload representor IPv4s for now. */ + if (!nfp_netdev_is_nfp_repr(netdev)) + return; + + memset(&payload, 0, sizeof(struct nfp_tun_neigh)); + payload.dst_ipv4 = flow->daddr; + + /* If entry has expired send dst IP with all other fields 0. */ + if (!(neigh->nud_state & NUD_VALID)) { + nfp_tun_del_route_from_cache(app, payload.dst_ipv4); + /* Trigger ARP to verify invalid neighbour state. */ + neigh_event_send(neigh, NULL); + goto send_msg; + } + + /* Have a valid neighbour so populate rest of entry. */ + payload.src_ipv4 = flow->saddr; + ether_addr_copy(payload.src_addr, netdev->dev_addr); + neigh_ha_snapshot(payload.dst_addr, neigh, netdev); + payload.port_id = cpu_to_be32(nfp_repr_get_port_id(netdev)); + /* Add destination of new route to NFP cache. */ + nfp_tun_add_route_to_cache(app, payload.dst_ipv4); + +send_msg: + nfp_flower_xmit_tun_conf(app, NFP_FLOWER_CMSG_TYPE_TUN_NEIGH, + sizeof(struct nfp_tun_neigh), + (unsigned char *)&payload); +} + +static int +nfp_tun_neigh_event_handler(struct notifier_block *nb, unsigned long event, + void *ptr) +{ + struct nfp_flower_priv *app_priv; + struct netevent_redirect *redir; + struct flowi4 flow = {}; + struct neighbour *n; + struct nfp_app *app; + struct rtable *rt; + int err; + + switch (event) { + case NETEVENT_REDIRECT: + redir = (struct netevent_redirect *)ptr; + n = redir->neigh; + break; + case NETEVENT_NEIGH_UPDATE: + n = (struct neighbour *)ptr; + break; + default: + return NOTIFY_DONE; + } + + flow.daddr = *(__be32 *)n->primary_key; + + /* Only concerned with route changes for representors. */ + if (!nfp_netdev_is_nfp_repr(n->dev)) + return NOTIFY_DONE; + + app_priv = container_of(nb, struct nfp_flower_priv, nfp_tun_neigh_nb); + app = app_priv->app; + + /* Only concerned with changes to routes already added to NFP. */ + if (!nfp_tun_has_route(app, flow.daddr)) + return NOTIFY_DONE; + +#if IS_ENABLED(CONFIG_INET) + /* Do a route lookup to populate flow data. */ + rt = ip_route_output_key(dev_net(n->dev), &flow); + err = PTR_ERR_OR_ZERO(rt); + if (err) + return NOTIFY_DONE; +#else + return NOTIFY_DONE; +#endif + + flow.flowi4_proto = IPPROTO_UDP; + nfp_tun_write_neigh(n->dev, app, &flow, n); + + return NOTIFY_OK; +} + +void nfp_tunnel_request_route(struct nfp_app *app, struct sk_buff *skb) +{ + struct nfp_tun_req_route_ipv4 *payload; + struct net_device *netdev; + struct flowi4 flow = {}; + struct neighbour *n; + struct rtable *rt; + int err; + + payload = nfp_flower_cmsg_get_data(skb); + + netdev = nfp_app_repr_get(app, be32_to_cpu(payload->ingress_port)); + if (!netdev) + goto route_fail_warning; + + flow.daddr = payload->ipv4_addr; + flow.flowi4_proto = IPPROTO_UDP; + +#if IS_ENABLED(CONFIG_INET) + /* Do a route lookup on same namespace as ingress port. */ + rt = ip_route_output_key(dev_net(netdev), &flow); + err = PTR_ERR_OR_ZERO(rt); + if (err) + goto route_fail_warning; +#else + goto route_fail_warning; +#endif + + /* Get the neighbour entry for the lookup */ + n = dst_neigh_lookup(&rt->dst, &flow.daddr); + ip_rt_put(rt); + if (!n) + goto route_fail_warning; + nfp_tun_write_neigh(n->dev, app, &flow, n); + neigh_release(n); + return; + +route_fail_warning: + nfp_flower_cmsg_warn(app, "Requested route not found.\n"); +} + static void nfp_tun_write_ipv4_list(struct nfp_app *app) { struct nfp_flower_priv *priv = app->priv; @@ -434,10 +664,19 @@ int nfp_tunnel_config_start(struct nfp_app *app) mutex_init(&priv->nfp_ipv4_off_lock); INIT_LIST_HEAD(&priv->nfp_ipv4_off_list); + /* Initialise priv data for neighbour offloading. */ + mutex_init(&priv->nfp_neigh_off_lock); + INIT_LIST_HEAD(&priv->nfp_neigh_off_list); + priv->nfp_tun_neigh_nb.notifier_call = nfp_tun_neigh_event_handler; + err = register_netdevice_notifier(&priv->nfp_tun_mac_nb); if (err) goto err_free_mac_ida; + err = register_netevent_notifier(&priv->nfp_tun_neigh_nb); + if (err) + goto err_unreg_mac_nb; + /* Parse netdevs already registered for MACs that need offloaded. */ rtnl_lock(); for_each_netdev(&init_net, netdev) @@ -446,6 +685,8 @@ int nfp_tunnel_config_start(struct nfp_app *app) return 0; +err_unreg_mac_nb: + unregister_netdevice_notifier(&priv->nfp_tun_mac_nb); err_free_mac_ida: ida_destroy(&priv->nfp_mac_off_ids); return err; @@ -455,11 +696,13 @@ void nfp_tunnel_config_stop(struct nfp_app *app) { struct nfp_tun_mac_offload_entry *mac_entry; struct nfp_flower_priv *priv = app->priv; + struct nfp_ipv4_route_entry *route_entry; struct nfp_tun_mac_non_nfp_idx *mac_idx; struct nfp_ipv4_addr_entry *ip_entry; struct list_head *ptr, *storage; unregister_netdevice_notifier(&priv->nfp_tun_mac_nb); + unregister_netevent_notifier(&priv->nfp_tun_neigh_nb); /* Free any memory that may be occupied by MAC list. */ mutex_lock(&priv->nfp_mac_off_lock); @@ -491,4 +734,14 @@ void nfp_tunnel_config_stop(struct nfp_app *app) kfree(ip_entry); } mutex_unlock(&priv->nfp_ipv4_off_lock); + + /* Free any memory that may be occupied by the route list. */ + mutex_lock(&priv->nfp_neigh_off_lock); + list_for_each_safe(ptr, storage, &priv->nfp_neigh_off_list) { + route_entry = list_entry(ptr, struct nfp_ipv4_route_entry, + list); + list_del(&route_entry->list); + kfree(route_entry); + } + mutex_unlock(&priv->nfp_neigh_off_lock); } -- cgit v1.2.3 From 856f5b135758ad80053a49f7ce9d1dc0166e3006 Mon Sep 17 00:00:00 2001 From: John Hurley Date: Mon, 25 Sep 2017 12:23:41 +0200 Subject: nfp: flower vxlan neighbour keep-alive Periodically receive messages containing the destination IPs of tunnels that have recently forwarded traffic. Update the neighbour entries 'used' value for these IPs next hop. This prevents the neighbour entry from expiring on timeout but rather signals an ARP to verify the connection. From an NFP perspective, packets will not fall back mid-flow unless the link is verified to be down. Signed-off-by: John Hurley Reviewed-by: Simon Horman Signed-off-by: Simon Horman Signed-off-by: David S. Miller --- drivers/net/ethernet/netronome/nfp/flower/cmsg.c | 3 + drivers/net/ethernet/netronome/nfp/flower/cmsg.h | 1 + drivers/net/ethernet/netronome/nfp/flower/main.h | 1 + .../ethernet/netronome/nfp/flower/tunnel_conf.c | 64 ++++++++++++++++++++++ 4 files changed, 69 insertions(+) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/netronome/nfp/flower/cmsg.c b/drivers/net/ethernet/netronome/nfp/flower/cmsg.c index 862787daaa68..6b71c719deba 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/cmsg.c +++ b/drivers/net/ethernet/netronome/nfp/flower/cmsg.c @@ -184,6 +184,9 @@ nfp_flower_cmsg_process_one_rx(struct nfp_app *app, struct sk_buff *skb) case NFP_FLOWER_CMSG_TYPE_NO_NEIGH: nfp_tunnel_request_route(app, skb); break; + case NFP_FLOWER_CMSG_TYPE_ACTIVE_TUNS: + nfp_tunnel_keep_alive(app, skb); + break; case NFP_FLOWER_CMSG_TYPE_TUN_NEIGH: /* Acks from the NFP that the route is added - ignore. */ break; diff --git a/drivers/net/ethernet/netronome/nfp/flower/cmsg.h b/drivers/net/ethernet/netronome/nfp/flower/cmsg.h index 1dc72a1ed577..504ddaa21701 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/cmsg.h +++ b/drivers/net/ethernet/netronome/nfp/flower/cmsg.h @@ -319,6 +319,7 @@ enum nfp_flower_cmsg_type_port { NFP_FLOWER_CMSG_TYPE_PORT_MOD = 8, NFP_FLOWER_CMSG_TYPE_NO_NEIGH = 10, NFP_FLOWER_CMSG_TYPE_TUN_MAC = 11, + NFP_FLOWER_CMSG_TYPE_ACTIVE_TUNS = 12, NFP_FLOWER_CMSG_TYPE_TUN_NEIGH = 13, NFP_FLOWER_CMSG_TYPE_TUN_IPS = 14, NFP_FLOWER_CMSG_TYPE_FLOW_STATS = 15, diff --git a/drivers/net/ethernet/netronome/nfp/flower/main.h b/drivers/net/ethernet/netronome/nfp/flower/main.h index 93ad969c3653..12c319a219d8 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/main.h +++ b/drivers/net/ethernet/netronome/nfp/flower/main.h @@ -196,5 +196,6 @@ void nfp_tunnel_write_macs(struct nfp_app *app); void nfp_tunnel_del_ipv4_off(struct nfp_app *app, __be32 ipv4); void nfp_tunnel_add_ipv4_off(struct nfp_app *app, __be32 ipv4); void nfp_tunnel_request_route(struct nfp_app *app, struct sk_buff *skb); +void nfp_tunnel_keep_alive(struct nfp_app *app, struct sk_buff *skb); #endif diff --git a/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c b/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c index 8c6b88a1306b..c495f8f38506 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c +++ b/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c @@ -36,12 +36,36 @@ #include #include #include +#include #include "cmsg.h" #include "main.h" #include "../nfp_net_repr.h" #include "../nfp_net.h" +#define NFP_FL_MAX_ROUTES 32 + +/** + * struct nfp_tun_active_tuns - periodic message of active tunnels + * @seq: sequence number of the message + * @count: number of tunnels report in message + * @flags: options part of the request + * @ipv4: dest IPv4 address of active route + * @egress_port: port the encapsulated packet egressed + * @extra: reserved for future use + * @tun_info: tunnels that have sent traffic in reported period + */ +struct nfp_tun_active_tuns { + __be32 seq; + __be32 count; + __be32 flags; + struct route_ip_info { + __be32 ipv4; + __be32 egress_port; + __be32 extra[2]; + } tun_info[]; +}; + /** * struct nfp_tun_neigh - neighbour/route entry on the NFP * @dst_ipv4: destination IPv4 address @@ -147,6 +171,46 @@ struct nfp_tun_mac_non_nfp_idx { struct list_head list; }; +void nfp_tunnel_keep_alive(struct nfp_app *app, struct sk_buff *skb) +{ + struct nfp_tun_active_tuns *payload; + struct net_device *netdev; + int count, i, pay_len; + struct neighbour *n; + __be32 ipv4_addr; + u32 port; + + payload = nfp_flower_cmsg_get_data(skb); + count = be32_to_cpu(payload->count); + if (count > NFP_FL_MAX_ROUTES) { + nfp_flower_cmsg_warn(app, "Tunnel keep-alive request exceeds max routes.\n"); + return; + } + + pay_len = nfp_flower_cmsg_get_data_len(skb); + if (pay_len != sizeof(struct nfp_tun_active_tuns) + + sizeof(struct route_ip_info) * count) { + nfp_flower_cmsg_warn(app, "Corruption in tunnel keep-alive message.\n"); + return; + } + + for (i = 0; i < count; i++) { + ipv4_addr = payload->tun_info[i].ipv4; + port = be32_to_cpu(payload->tun_info[i].egress_port); + netdev = nfp_app_repr_get(app, port); + if (!netdev) + continue; + + n = neigh_lookup(&arp_tbl, &ipv4_addr, netdev); + if (!n) + continue; + + /* Update the used timestamp of neighbour */ + neigh_event_send(n, NULL); + neigh_release(n); + } +} + static bool nfp_tun_is_netdev_to_offload(struct net_device *netdev) { if (!netdev->rtnl_link_ops) -- cgit v1.2.3 From c011ec1bbfd69e091ca8d77e13fc251a07be57dc Mon Sep 17 00:00:00 2001 From: Yotam Gigi Date: Wed, 27 Sep 2017 08:23:17 +0200 Subject: mlxsw: spectrum: Add the multicast routing offloading logic Add the multicast router offloading logic, which is in charge of handling the VIF and MFC notifications and translating it to the hardware logic API. The offloading logic has to overcome several obstacles in order to safely comply with the kernel multicast router user API: - It must keep track of the mapping between VIFs to netdevices. The user can add an MFC cache entry pointing to a VIF, delete the VIF and add re-add it with a different netdevice. The offloading logic has to handle this in order to be compatible with the kernel logic. - It must keep track of the mapping between netdevices to spectrum RIFs, as the current hardware implementation assume having a RIF for every port in a multicast router. - It must handle routes pointing to pimreg device to be trapped to the kernel, as the packet should be delivered to userspace. - It must handle routes pointing tunnel VIFs. The current implementation does not support multicast forwarding to tunnels, thus routes that point to a tunnel should be trapped to the kernel. - It must be aware of proxy multicast routes, which include both (*,*) routes and duplicate routes. Currently proxy routes are not offloaded and trigger the abort mechanism: removal of all routes from hardware and triggering the traffic to go through the kernel. The multicast routing offloading logic also updates the counters of the offloaded MFC routes in a periodic work. Signed-off-by: Yotam Gigi Reviewed-by: Ido Schimmel Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/Makefile | 3 +- drivers/net/ethernet/mellanox/mlxsw/spectrum.h | 1 + drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c | 1014 +++++++++++++++++++++ drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.h | 133 +++ 4 files changed, 1150 insertions(+), 1 deletion(-) create mode 100644 drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c create mode 100644 drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.h (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/mellanox/mlxsw/Makefile b/drivers/net/ethernet/mellanox/mlxsw/Makefile index 4b88158173f3..9b29764905f3 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/Makefile +++ b/drivers/net/ethernet/mellanox/mlxsw/Makefile @@ -17,7 +17,8 @@ mlxsw_spectrum-objs := spectrum.o spectrum_buffers.o \ spectrum_kvdl.o spectrum_acl_tcam.o \ spectrum_acl.o spectrum_flower.o \ spectrum_cnt.o spectrum_fid.o \ - spectrum_ipip.o spectrum_acl_flex_actions.o + spectrum_ipip.o spectrum_acl_flex_actions.o \ + spectrum_mr.o mlxsw_spectrum-$(CONFIG_MLXSW_SPECTRUM_DCB) += spectrum_dcb.o mlxsw_spectrum-$(CONFIG_NET_DEVLINK) += spectrum_dpipe.o obj-$(CONFIG_MLXSW_MINIMAL) += mlxsw_minimal.o diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h index 9355d914a4c8..44c5259e5548 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h @@ -153,6 +153,7 @@ struct mlxsw_sp { struct mlxsw_sp_sb *sb; struct mlxsw_sp_bridge *bridge; struct mlxsw_sp_router *router; + struct mlxsw_sp_mr *mr; struct mlxsw_afa *afa; struct mlxsw_sp_acl *acl; struct mlxsw_sp_fid_core *fid_core; diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c new file mode 100644 index 000000000000..09120259a45d --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c @@ -0,0 +1,1014 @@ +/* + * drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c + * Copyright (c) 2017 Mellanox Technologies. All rights reserved. + * Copyright (c) 2017 Yotam Gigi + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the names of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include + +#include "spectrum_mr.h" +#include "spectrum_router.h" + +struct mlxsw_sp_mr { + const struct mlxsw_sp_mr_ops *mr_ops; + void *catchall_route_priv; + struct delayed_work stats_update_dw; + struct list_head table_list; +#define MLXSW_SP_MR_ROUTES_COUNTER_UPDATE_INTERVAL 5000 /* ms */ + unsigned long priv[0]; + /* priv has to be always the last item */ +}; + +struct mlxsw_sp_mr_vif { + struct net_device *dev; + const struct mlxsw_sp_rif *rif; + unsigned long vif_flags; + + /* A list of route_vif_entry structs that point to routes that the VIF + * instance is used as one of the egress VIFs + */ + struct list_head route_evif_list; + + /* A list of route_vif_entry structs that point to routes that the VIF + * instance is used as an ingress VIF + */ + struct list_head route_ivif_list; +}; + +struct mlxsw_sp_mr_route_vif_entry { + struct list_head vif_node; + struct list_head route_node; + struct mlxsw_sp_mr_vif *mr_vif; + struct mlxsw_sp_mr_route *mr_route; +}; + +struct mlxsw_sp_mr_table { + struct list_head node; + enum mlxsw_sp_l3proto proto; + struct mlxsw_sp *mlxsw_sp; + u32 vr_id; + struct mlxsw_sp_mr_vif vifs[MAXVIFS]; + struct list_head route_list; + struct rhashtable route_ht; + char catchall_route_priv[0]; + /* catchall_route_priv has to be always the last item */ +}; + +struct mlxsw_sp_mr_route { + struct list_head node; + struct rhash_head ht_node; + struct mlxsw_sp_mr_route_key key; + enum mlxsw_sp_mr_route_action route_action; + u16 min_mtu; + struct mfc_cache *mfc4; + void *route_priv; + const struct mlxsw_sp_mr_table *mr_table; + /* A list of route_vif_entry structs that point to the egress VIFs */ + struct list_head evif_list; + /* A route_vif_entry struct that point to the ingress VIF */ + struct mlxsw_sp_mr_route_vif_entry ivif; +}; + +static const struct rhashtable_params mlxsw_sp_mr_route_ht_params = { + .key_len = sizeof(struct mlxsw_sp_mr_route_key), + .key_offset = offsetof(struct mlxsw_sp_mr_route, key), + .head_offset = offsetof(struct mlxsw_sp_mr_route, ht_node), + .automatic_shrinking = true, +}; + +static bool mlxsw_sp_mr_vif_regular(const struct mlxsw_sp_mr_vif *vif) +{ + return !(vif->vif_flags & (VIFF_TUNNEL | VIFF_REGISTER)); +} + +static bool mlxsw_sp_mr_vif_valid(const struct mlxsw_sp_mr_vif *vif) +{ + return mlxsw_sp_mr_vif_regular(vif) && vif->dev && vif->rif; +} + +static bool mlxsw_sp_mr_vif_rif_invalid(const struct mlxsw_sp_mr_vif *vif) +{ + return mlxsw_sp_mr_vif_regular(vif) && vif->dev && !vif->rif; +} + +static bool +mlxsw_sp_mr_route_ivif_in_evifs(const struct mlxsw_sp_mr_route *mr_route) +{ + vifi_t ivif; + + switch (mr_route->mr_table->proto) { + case MLXSW_SP_L3_PROTO_IPV4: + ivif = mr_route->mfc4->mfc_parent; + return mr_route->mfc4->mfc_un.res.ttls[ivif] != 255; + case MLXSW_SP_L3_PROTO_IPV6: + /* fall through */ + default: + WARN_ON_ONCE(1); + } + return false; +} + +static int +mlxsw_sp_mr_route_valid_evifs_num(const struct mlxsw_sp_mr_route *mr_route) +{ + struct mlxsw_sp_mr_route_vif_entry *rve; + int valid_evifs; + + valid_evifs = 0; + list_for_each_entry(rve, &mr_route->evif_list, route_node) + if (mlxsw_sp_mr_vif_valid(rve->mr_vif)) + valid_evifs++; + return valid_evifs; +} + +static bool mlxsw_sp_mr_route_starg(const struct mlxsw_sp_mr_route *mr_route) +{ + switch (mr_route->mr_table->proto) { + case MLXSW_SP_L3_PROTO_IPV4: + return mr_route->key.source_mask.addr4 == INADDR_ANY; + case MLXSW_SP_L3_PROTO_IPV6: + /* fall through */ + default: + WARN_ON_ONCE(1); + } + return false; +} + +static enum mlxsw_sp_mr_route_action +mlxsw_sp_mr_route_action(const struct mlxsw_sp_mr_route *mr_route) +{ + struct mlxsw_sp_mr_route_vif_entry *rve; + + /* If the ingress port is not regular and resolved, trap the route */ + if (!mlxsw_sp_mr_vif_valid(mr_route->ivif.mr_vif)) + return MLXSW_SP_MR_ROUTE_ACTION_TRAP; + + /* The kernel does not match a (*,G) route that the ingress interface is + * not one of the egress interfaces, so trap these kind of routes. + */ + if (mlxsw_sp_mr_route_starg(mr_route) && + !mlxsw_sp_mr_route_ivif_in_evifs(mr_route)) + return MLXSW_SP_MR_ROUTE_ACTION_TRAP; + + /* If the route has no valid eVIFs, trap it. */ + if (!mlxsw_sp_mr_route_valid_evifs_num(mr_route)) + return MLXSW_SP_MR_ROUTE_ACTION_TRAP; + + /* If either one of the eVIFs is not regular (VIF of type pimreg or + * tunnel) or one of the VIFs has no matching RIF, trap the packet. + */ + list_for_each_entry(rve, &mr_route->evif_list, route_node) { + if (!mlxsw_sp_mr_vif_regular(rve->mr_vif) || + mlxsw_sp_mr_vif_rif_invalid(rve->mr_vif)) + return MLXSW_SP_MR_ROUTE_ACTION_TRAP; + } + return MLXSW_SP_MR_ROUTE_ACTION_FORWARD; +} + +static enum mlxsw_sp_mr_route_prio +mlxsw_sp_mr_route_prio(const struct mlxsw_sp_mr_route *mr_route) +{ + return mlxsw_sp_mr_route_starg(mr_route) ? + MLXSW_SP_MR_ROUTE_PRIO_STARG : MLXSW_SP_MR_ROUTE_PRIO_SG; +} + +static void mlxsw_sp_mr_route4_key(struct mlxsw_sp_mr_table *mr_table, + struct mlxsw_sp_mr_route_key *key, + const struct mfc_cache *mfc) +{ + bool starg = (mfc->mfc_origin == INADDR_ANY); + + memset(key, 0, sizeof(*key)); + key->vrid = mr_table->vr_id; + key->proto = mr_table->proto; + key->group.addr4 = mfc->mfc_mcastgrp; + key->group_mask.addr4 = 0xffffffff; + key->source.addr4 = mfc->mfc_origin; + key->source_mask.addr4 = starg ? 0 : 0xffffffff; +} + +static int mlxsw_sp_mr_route_evif_link(struct mlxsw_sp_mr_route *mr_route, + struct mlxsw_sp_mr_vif *mr_vif) +{ + struct mlxsw_sp_mr_route_vif_entry *rve; + + rve = kzalloc(sizeof(*rve), GFP_KERNEL); + if (!rve) + return -ENOMEM; + rve->mr_route = mr_route; + rve->mr_vif = mr_vif; + list_add_tail(&rve->route_node, &mr_route->evif_list); + list_add_tail(&rve->vif_node, &mr_vif->route_evif_list); + return 0; +} + +static void +mlxsw_sp_mr_route_evif_unlink(struct mlxsw_sp_mr_route_vif_entry *rve) +{ + list_del(&rve->route_node); + list_del(&rve->vif_node); + kfree(rve); +} + +static void mlxsw_sp_mr_route_ivif_link(struct mlxsw_sp_mr_route *mr_route, + struct mlxsw_sp_mr_vif *mr_vif) +{ + mr_route->ivif.mr_route = mr_route; + mr_route->ivif.mr_vif = mr_vif; + list_add_tail(&mr_route->ivif.vif_node, &mr_vif->route_ivif_list); +} + +static void mlxsw_sp_mr_route_ivif_unlink(struct mlxsw_sp_mr_route *mr_route) +{ + list_del(&mr_route->ivif.vif_node); +} + +static int +mlxsw_sp_mr_route_info_create(struct mlxsw_sp_mr_table *mr_table, + struct mlxsw_sp_mr_route *mr_route, + struct mlxsw_sp_mr_route_info *route_info) +{ + struct mlxsw_sp_mr_route_vif_entry *rve; + u16 *erif_indices; + u16 irif_index; + u16 erif = 0; + + erif_indices = kmalloc_array(MAXVIFS, sizeof(*erif_indices), + GFP_KERNEL); + if (!erif_indices) + return -ENOMEM; + + list_for_each_entry(rve, &mr_route->evif_list, route_node) { + if (mlxsw_sp_mr_vif_valid(rve->mr_vif)) { + u16 rifi = mlxsw_sp_rif_index(rve->mr_vif->rif); + + erif_indices[erif++] = rifi; + } + } + + if (mlxsw_sp_mr_vif_valid(mr_route->ivif.mr_vif)) + irif_index = mlxsw_sp_rif_index(mr_route->ivif.mr_vif->rif); + else + irif_index = 0; + + route_info->irif_index = irif_index; + route_info->erif_indices = erif_indices; + route_info->min_mtu = mr_route->min_mtu; + route_info->route_action = mr_route->route_action; + route_info->erif_num = erif; + return 0; +} + +static void +mlxsw_sp_mr_route_info_destroy(struct mlxsw_sp_mr_route_info *route_info) +{ + kfree(route_info->erif_indices); +} + +static int mlxsw_sp_mr_route_write(struct mlxsw_sp_mr_table *mr_table, + struct mlxsw_sp_mr_route *mr_route, + bool replace) +{ + struct mlxsw_sp *mlxsw_sp = mr_table->mlxsw_sp; + struct mlxsw_sp_mr_route_info route_info; + struct mlxsw_sp_mr *mr = mlxsw_sp->mr; + int err; + + err = mlxsw_sp_mr_route_info_create(mr_table, mr_route, &route_info); + if (err) + return err; + + if (!replace) { + struct mlxsw_sp_mr_route_params route_params; + + mr_route->route_priv = kzalloc(mr->mr_ops->route_priv_size, + GFP_KERNEL); + if (!mr_route->route_priv) { + err = -ENOMEM; + goto out; + } + + route_params.key = mr_route->key; + route_params.value = route_info; + route_params.prio = mlxsw_sp_mr_route_prio(mr_route); + err = mr->mr_ops->route_create(mlxsw_sp, mr->priv, + mr_route->route_priv, + &route_params); + if (err) + kfree(mr_route->route_priv); + } else { + err = mr->mr_ops->route_update(mlxsw_sp, mr_route->route_priv, + &route_info); + } +out: + mlxsw_sp_mr_route_info_destroy(&route_info); + return err; +} + +static void mlxsw_sp_mr_route_erase(struct mlxsw_sp_mr_table *mr_table, + struct mlxsw_sp_mr_route *mr_route) +{ + struct mlxsw_sp *mlxsw_sp = mr_table->mlxsw_sp; + struct mlxsw_sp_mr *mr = mlxsw_sp->mr; + + mr->mr_ops->route_destroy(mlxsw_sp, mr->priv, mr_route->route_priv); + kfree(mr_route->route_priv); +} + +static struct mlxsw_sp_mr_route * +mlxsw_sp_mr_route4_create(struct mlxsw_sp_mr_table *mr_table, + struct mfc_cache *mfc) +{ + struct mlxsw_sp_mr_route_vif_entry *rve, *tmp; + struct mlxsw_sp_mr_route *mr_route; + int err; + int i; + + /* Allocate and init a new route and fill it with parameters */ + mr_route = kzalloc(sizeof(*mr_route), GFP_KERNEL); + if (!mr_route) + return ERR_PTR(-ENOMEM); + INIT_LIST_HEAD(&mr_route->evif_list); + mlxsw_sp_mr_route4_key(mr_table, &mr_route->key, mfc); + + /* Find min_mtu and link iVIF and eVIFs */ + mr_route->min_mtu = ETH_MAX_MTU; + ipmr_cache_hold(mfc); + mr_route->mfc4 = mfc; + mr_route->mr_table = mr_table; + for (i = 0; i < MAXVIFS; i++) { + if (mfc->mfc_un.res.ttls[i] != 255) { + err = mlxsw_sp_mr_route_evif_link(mr_route, + &mr_table->vifs[i]); + if (err) + goto err; + if (mr_table->vifs[i].dev && + mr_table->vifs[i].dev->mtu < mr_route->min_mtu) + mr_route->min_mtu = mr_table->vifs[i].dev->mtu; + } + } + mlxsw_sp_mr_route_ivif_link(mr_route, &mr_table->vifs[mfc->mfc_parent]); + if (err) + goto err; + + mr_route->route_action = mlxsw_sp_mr_route_action(mr_route); + return mr_route; +err: + ipmr_cache_put(mfc); + list_for_each_entry_safe(rve, tmp, &mr_route->evif_list, route_node) + mlxsw_sp_mr_route_evif_unlink(rve); + kfree(mr_route); + return ERR_PTR(err); +} + +static void mlxsw_sp_mr_route4_destroy(struct mlxsw_sp_mr_table *mr_table, + struct mlxsw_sp_mr_route *mr_route) +{ + struct mlxsw_sp_mr_route_vif_entry *rve, *tmp; + + mlxsw_sp_mr_route_ivif_unlink(mr_route); + ipmr_cache_put(mr_route->mfc4); + list_for_each_entry_safe(rve, tmp, &mr_route->evif_list, route_node) + mlxsw_sp_mr_route_evif_unlink(rve); + kfree(mr_route); +} + +static void mlxsw_sp_mr_route_destroy(struct mlxsw_sp_mr_table *mr_table, + struct mlxsw_sp_mr_route *mr_route) +{ + switch (mr_table->proto) { + case MLXSW_SP_L3_PROTO_IPV4: + mlxsw_sp_mr_route4_destroy(mr_table, mr_route); + break; + case MLXSW_SP_L3_PROTO_IPV6: + /* fall through */ + default: + WARN_ON_ONCE(1); + } +} + +static void mlxsw_sp_mr_mfc_offload_set(struct mlxsw_sp_mr_route *mr_route, + bool offload) +{ + switch (mr_route->mr_table->proto) { + case MLXSW_SP_L3_PROTO_IPV4: + if (offload) + mr_route->mfc4->mfc_flags |= MFC_OFFLOAD; + else + mr_route->mfc4->mfc_flags &= ~MFC_OFFLOAD; + break; + case MLXSW_SP_L3_PROTO_IPV6: + /* fall through */ + default: + WARN_ON_ONCE(1); + } +} + +static void mlxsw_sp_mr_mfc_offload_update(struct mlxsw_sp_mr_route *mr_route) +{ + bool offload; + + offload = mr_route->route_action != MLXSW_SP_MR_ROUTE_ACTION_TRAP; + mlxsw_sp_mr_mfc_offload_set(mr_route, offload); +} + +static void __mlxsw_sp_mr_route_del(struct mlxsw_sp_mr_table *mr_table, + struct mlxsw_sp_mr_route *mr_route) +{ + mlxsw_sp_mr_mfc_offload_set(mr_route, false); + mlxsw_sp_mr_route_erase(mr_table, mr_route); + rhashtable_remove_fast(&mr_table->route_ht, &mr_route->ht_node, + mlxsw_sp_mr_route_ht_params); + list_del(&mr_route->node); + mlxsw_sp_mr_route_destroy(mr_table, mr_route); +} + +int mlxsw_sp_mr_route4_add(struct mlxsw_sp_mr_table *mr_table, + struct mfc_cache *mfc, bool replace) +{ + struct mlxsw_sp_mr_route *mr_orig_route = NULL; + struct mlxsw_sp_mr_route *mr_route; + int err; + + /* If the route is a (*,*) route, abort, as these kind of routes are + * used for proxy routes. + */ + if (mfc->mfc_origin == INADDR_ANY && mfc->mfc_mcastgrp == INADDR_ANY) { + dev_warn(mr_table->mlxsw_sp->bus_info->dev, + "Offloading proxy routes is not supported.\n"); + return -EINVAL; + } + + /* Create a new route */ + mr_route = mlxsw_sp_mr_route4_create(mr_table, mfc); + if (IS_ERR(mr_route)) + return PTR_ERR(mr_route); + + /* Find any route with a matching key */ + mr_orig_route = rhashtable_lookup_fast(&mr_table->route_ht, + &mr_route->key, + mlxsw_sp_mr_route_ht_params); + if (replace) { + /* On replace case, make the route point to the new route_priv. + */ + if (WARN_ON(!mr_orig_route)) { + err = -ENOENT; + goto err_no_orig_route; + } + mr_route->route_priv = mr_orig_route->route_priv; + } else if (mr_orig_route) { + /* On non replace case, if another route with the same key was + * found, abort, as duplicate routes are used for proxy routes. + */ + dev_warn(mr_table->mlxsw_sp->bus_info->dev, + "Offloading proxy routes is not supported.\n"); + err = -EINVAL; + goto err_duplicate_route; + } + + /* Put it in the table data-structures */ + list_add_tail(&mr_route->node, &mr_table->route_list); + err = rhashtable_insert_fast(&mr_table->route_ht, + &mr_route->ht_node, + mlxsw_sp_mr_route_ht_params); + if (err) + goto err_rhashtable_insert; + + /* Write the route to the hardware */ + err = mlxsw_sp_mr_route_write(mr_table, mr_route, replace); + if (err) + goto err_mr_route_write; + + /* Destroy the original route */ + if (replace) { + rhashtable_remove_fast(&mr_table->route_ht, + &mr_orig_route->ht_node, + mlxsw_sp_mr_route_ht_params); + list_del(&mr_orig_route->node); + mlxsw_sp_mr_route4_destroy(mr_table, mr_orig_route); + } + + mlxsw_sp_mr_mfc_offload_update(mr_route); + return 0; + +err_mr_route_write: + rhashtable_remove_fast(&mr_table->route_ht, &mr_route->ht_node, + mlxsw_sp_mr_route_ht_params); +err_rhashtable_insert: + list_del(&mr_route->node); +err_no_orig_route: +err_duplicate_route: + mlxsw_sp_mr_route4_destroy(mr_table, mr_route); + return err; +} + +void mlxsw_sp_mr_route4_del(struct mlxsw_sp_mr_table *mr_table, + struct mfc_cache *mfc) +{ + struct mlxsw_sp_mr_route *mr_route; + struct mlxsw_sp_mr_route_key key; + + mlxsw_sp_mr_route4_key(mr_table, &key, mfc); + mr_route = rhashtable_lookup_fast(&mr_table->route_ht, &key, + mlxsw_sp_mr_route_ht_params); + if (mr_route) + __mlxsw_sp_mr_route_del(mr_table, mr_route); +} + +/* Should be called after the VIF struct is updated */ +static int +mlxsw_sp_mr_route_ivif_resolve(struct mlxsw_sp_mr_table *mr_table, + struct mlxsw_sp_mr_route_vif_entry *rve) +{ + struct mlxsw_sp *mlxsw_sp = mr_table->mlxsw_sp; + enum mlxsw_sp_mr_route_action route_action; + struct mlxsw_sp_mr *mr = mlxsw_sp->mr; + u16 irif_index; + int err; + + route_action = mlxsw_sp_mr_route_action(rve->mr_route); + if (route_action == MLXSW_SP_MR_ROUTE_ACTION_TRAP) + return 0; + + /* rve->mr_vif->rif is guaranteed to be valid at this stage */ + irif_index = mlxsw_sp_rif_index(rve->mr_vif->rif); + err = mr->mr_ops->route_irif_update(mlxsw_sp, rve->mr_route->route_priv, + irif_index); + if (err) + return err; + + err = mr->mr_ops->route_action_update(mlxsw_sp, + rve->mr_route->route_priv, + route_action); + if (err) + /* No need to rollback here because the iRIF change only takes + * place after the action has been updated. + */ + return err; + + rve->mr_route->route_action = route_action; + mlxsw_sp_mr_mfc_offload_update(rve->mr_route); + return 0; +} + +static void +mlxsw_sp_mr_route_ivif_unresolve(struct mlxsw_sp_mr_table *mr_table, + struct mlxsw_sp_mr_route_vif_entry *rve) +{ + struct mlxsw_sp *mlxsw_sp = mr_table->mlxsw_sp; + struct mlxsw_sp_mr *mr = mlxsw_sp->mr; + + mr->mr_ops->route_action_update(mlxsw_sp, rve->mr_route->route_priv, + MLXSW_SP_MR_ROUTE_ACTION_TRAP); + rve->mr_route->route_action = MLXSW_SP_MR_ROUTE_ACTION_TRAP; + mlxsw_sp_mr_mfc_offload_update(rve->mr_route); +} + +/* Should be called after the RIF struct is updated */ +static int +mlxsw_sp_mr_route_evif_resolve(struct mlxsw_sp_mr_table *mr_table, + struct mlxsw_sp_mr_route_vif_entry *rve) +{ + struct mlxsw_sp *mlxsw_sp = mr_table->mlxsw_sp; + enum mlxsw_sp_mr_route_action route_action; + struct mlxsw_sp_mr *mr = mlxsw_sp->mr; + u16 erif_index = 0; + int err; + + /* Update the route action, as the new eVIF can be a tunnel or a pimreg + * device which will require updating the action. + */ + route_action = mlxsw_sp_mr_route_action(rve->mr_route); + if (route_action != rve->mr_route->route_action) { + err = mr->mr_ops->route_action_update(mlxsw_sp, + rve->mr_route->route_priv, + route_action); + if (err) + return err; + } + + /* Add the eRIF */ + if (mlxsw_sp_mr_vif_valid(rve->mr_vif)) { + erif_index = mlxsw_sp_rif_index(rve->mr_vif->rif); + err = mr->mr_ops->route_erif_add(mlxsw_sp, + rve->mr_route->route_priv, + erif_index); + if (err) + goto err_route_erif_add; + } + + /* Update the minimum MTU */ + if (rve->mr_vif->dev->mtu < rve->mr_route->min_mtu) { + rve->mr_route->min_mtu = rve->mr_vif->dev->mtu; + err = mr->mr_ops->route_min_mtu_update(mlxsw_sp, + rve->mr_route->route_priv, + rve->mr_route->min_mtu); + if (err) + goto err_route_min_mtu_update; + } + + rve->mr_route->route_action = route_action; + mlxsw_sp_mr_mfc_offload_update(rve->mr_route); + return 0; + +err_route_min_mtu_update: + if (mlxsw_sp_mr_vif_valid(rve->mr_vif)) + mr->mr_ops->route_erif_del(mlxsw_sp, rve->mr_route->route_priv, + erif_index); +err_route_erif_add: + if (route_action != rve->mr_route->route_action) + mr->mr_ops->route_action_update(mlxsw_sp, + rve->mr_route->route_priv, + rve->mr_route->route_action); + return err; +} + +/* Should be called before the RIF struct is updated */ +static void +mlxsw_sp_mr_route_evif_unresolve(struct mlxsw_sp_mr_table *mr_table, + struct mlxsw_sp_mr_route_vif_entry *rve) +{ + struct mlxsw_sp *mlxsw_sp = mr_table->mlxsw_sp; + enum mlxsw_sp_mr_route_action route_action; + struct mlxsw_sp_mr *mr = mlxsw_sp->mr; + u16 rifi; + + /* If the unresolved RIF was not valid, no need to delete it */ + if (!mlxsw_sp_mr_vif_valid(rve->mr_vif)) + return; + + /* Update the route action: if there is only one valid eVIF in the + * route, set the action to trap as the VIF deletion will lead to zero + * valid eVIFs. On any other case, use the mlxsw_sp_mr_route_action to + * determine the route action. + */ + if (mlxsw_sp_mr_route_valid_evifs_num(rve->mr_route) == 1) + route_action = MLXSW_SP_MR_ROUTE_ACTION_TRAP; + else + route_action = mlxsw_sp_mr_route_action(rve->mr_route); + if (route_action != rve->mr_route->route_action) + mr->mr_ops->route_action_update(mlxsw_sp, + rve->mr_route->route_priv, + route_action); + + /* Delete the erif from the route */ + rifi = mlxsw_sp_rif_index(rve->mr_vif->rif); + mr->mr_ops->route_erif_del(mlxsw_sp, rve->mr_route->route_priv, rifi); + rve->mr_route->route_action = route_action; + mlxsw_sp_mr_mfc_offload_update(rve->mr_route); +} + +static int mlxsw_sp_mr_vif_resolve(struct mlxsw_sp_mr_table *mr_table, + struct net_device *dev, + struct mlxsw_sp_mr_vif *mr_vif, + unsigned long vif_flags, + const struct mlxsw_sp_rif *rif) +{ + struct mlxsw_sp_mr_route_vif_entry *irve, *erve; + int err; + + /* Update the VIF */ + mr_vif->dev = dev; + mr_vif->rif = rif; + mr_vif->vif_flags = vif_flags; + + /* Update all routes where this VIF is used as an unresolved iRIF */ + list_for_each_entry(irve, &mr_vif->route_ivif_list, vif_node) { + err = mlxsw_sp_mr_route_ivif_resolve(mr_table, irve); + if (err) + goto err_irif_unresolve; + } + + /* Update all routes where this VIF is used as an unresolved eRIF */ + list_for_each_entry(erve, &mr_vif->route_evif_list, vif_node) { + err = mlxsw_sp_mr_route_evif_resolve(mr_table, erve); + if (err) + goto err_erif_unresolve; + } + return 0; + +err_erif_unresolve: + list_for_each_entry_from_reverse(erve, &mr_vif->route_evif_list, + vif_node) + mlxsw_sp_mr_route_evif_unresolve(mr_table, erve); +err_irif_unresolve: + list_for_each_entry_from_reverse(irve, &mr_vif->route_ivif_list, + vif_node) + mlxsw_sp_mr_route_ivif_unresolve(mr_table, irve); + mr_vif->rif = NULL; + return err; +} + +static void mlxsw_sp_mr_vif_unresolve(struct mlxsw_sp_mr_table *mr_table, + struct net_device *dev, + struct mlxsw_sp_mr_vif *mr_vif) +{ + struct mlxsw_sp_mr_route_vif_entry *rve; + + /* Update all routes where this VIF is used as an unresolved eRIF */ + list_for_each_entry(rve, &mr_vif->route_evif_list, vif_node) + mlxsw_sp_mr_route_evif_unresolve(mr_table, rve); + + /* Update all routes where this VIF is used as an unresolved iRIF */ + list_for_each_entry(rve, &mr_vif->route_ivif_list, vif_node) + mlxsw_sp_mr_route_ivif_unresolve(mr_table, rve); + + /* Update the VIF */ + mr_vif->dev = dev; + mr_vif->rif = NULL; +} + +int mlxsw_sp_mr_vif_add(struct mlxsw_sp_mr_table *mr_table, + struct net_device *dev, vifi_t vif_index, + unsigned long vif_flags, const struct mlxsw_sp_rif *rif) +{ + struct mlxsw_sp_mr_vif *mr_vif = &mr_table->vifs[vif_index]; + + if (WARN_ON(vif_index >= MAXVIFS)) + return -EINVAL; + if (mr_vif->dev) + return -EEXIST; + return mlxsw_sp_mr_vif_resolve(mr_table, dev, mr_vif, vif_flags, rif); +} + +void mlxsw_sp_mr_vif_del(struct mlxsw_sp_mr_table *mr_table, vifi_t vif_index) +{ + struct mlxsw_sp_mr_vif *mr_vif = &mr_table->vifs[vif_index]; + + if (WARN_ON(vif_index >= MAXVIFS)) + return; + if (WARN_ON(!mr_vif->dev)) + return; + mlxsw_sp_mr_vif_unresolve(mr_table, NULL, mr_vif); +} + +struct mlxsw_sp_mr_vif * +mlxsw_sp_mr_dev_vif_lookup(struct mlxsw_sp_mr_table *mr_table, + const struct net_device *dev) +{ + vifi_t vif_index; + + for (vif_index = 0; vif_index < MAXVIFS; vif_index++) + if (mr_table->vifs[vif_index].dev == dev) + return &mr_table->vifs[vif_index]; + return NULL; +} + +int mlxsw_sp_mr_rif_add(struct mlxsw_sp_mr_table *mr_table, + const struct mlxsw_sp_rif *rif) +{ + const struct net_device *rif_dev = mlxsw_sp_rif_dev(rif); + struct mlxsw_sp_mr_vif *mr_vif; + + if (!rif_dev) + return 0; + + mr_vif = mlxsw_sp_mr_dev_vif_lookup(mr_table, rif_dev); + if (!mr_vif) + return 0; + return mlxsw_sp_mr_vif_resolve(mr_table, mr_vif->dev, mr_vif, + mr_vif->vif_flags, rif); +} + +void mlxsw_sp_mr_rif_del(struct mlxsw_sp_mr_table *mr_table, + const struct mlxsw_sp_rif *rif) +{ + const struct net_device *rif_dev = mlxsw_sp_rif_dev(rif); + struct mlxsw_sp_mr_vif *mr_vif; + + if (!rif_dev) + return; + + mr_vif = mlxsw_sp_mr_dev_vif_lookup(mr_table, rif_dev); + if (!mr_vif) + return; + mlxsw_sp_mr_vif_unresolve(mr_table, mr_vif->dev, mr_vif); +} + +void mlxsw_sp_mr_rif_mtu_update(struct mlxsw_sp_mr_table *mr_table, + const struct mlxsw_sp_rif *rif, int mtu) +{ + const struct net_device *rif_dev = mlxsw_sp_rif_dev(rif); + struct mlxsw_sp *mlxsw_sp = mr_table->mlxsw_sp; + struct mlxsw_sp_mr_route_vif_entry *rve; + struct mlxsw_sp_mr *mr = mlxsw_sp->mr; + struct mlxsw_sp_mr_vif *mr_vif; + + if (!rif_dev) + return; + + /* Search for a VIF that use that RIF */ + mr_vif = mlxsw_sp_mr_dev_vif_lookup(mr_table, rif_dev); + if (!mr_vif) + return; + + /* Update all the routes that uses that VIF as eVIF */ + list_for_each_entry(rve, &mr_vif->route_evif_list, vif_node) { + if (mtu < rve->mr_route->min_mtu) { + rve->mr_route->min_mtu = mtu; + mr->mr_ops->route_min_mtu_update(mlxsw_sp, + rve->mr_route->route_priv, + mtu); + } + } +} + +struct mlxsw_sp_mr_table *mlxsw_sp_mr_table_create(struct mlxsw_sp *mlxsw_sp, + u32 vr_id, + enum mlxsw_sp_l3proto proto) +{ + struct mlxsw_sp_mr_route_params catchall_route_params = { + .prio = MLXSW_SP_MR_ROUTE_PRIO_CATCHALL, + .key = { + .vrid = vr_id, + }, + .value = { + .route_action = MLXSW_SP_MR_ROUTE_ACTION_TRAP, + } + }; + struct mlxsw_sp_mr *mr = mlxsw_sp->mr; + struct mlxsw_sp_mr_table *mr_table; + int err; + int i; + + mr_table = kzalloc(sizeof(*mr_table) + mr->mr_ops->route_priv_size, + GFP_KERNEL); + if (!mr_table) + return ERR_PTR(-ENOMEM); + + mr_table->vr_id = vr_id; + mr_table->mlxsw_sp = mlxsw_sp; + mr_table->proto = proto; + INIT_LIST_HEAD(&mr_table->route_list); + + err = rhashtable_init(&mr_table->route_ht, + &mlxsw_sp_mr_route_ht_params); + if (err) + goto err_route_rhashtable_init; + + for (i = 0; i < MAXVIFS; i++) { + INIT_LIST_HEAD(&mr_table->vifs[i].route_evif_list); + INIT_LIST_HEAD(&mr_table->vifs[i].route_ivif_list); + } + + err = mr->mr_ops->route_create(mlxsw_sp, mr->priv, + mr_table->catchall_route_priv, + &catchall_route_params); + if (err) + goto err_ops_route_create; + list_add_tail(&mr_table->node, &mr->table_list); + return mr_table; + +err_ops_route_create: + rhashtable_destroy(&mr_table->route_ht); +err_route_rhashtable_init: + kfree(mr_table); + return ERR_PTR(err); +} + +void mlxsw_sp_mr_table_destroy(struct mlxsw_sp_mr_table *mr_table) +{ + struct mlxsw_sp *mlxsw_sp = mr_table->mlxsw_sp; + struct mlxsw_sp_mr *mr = mlxsw_sp->mr; + + WARN_ON(!mlxsw_sp_mr_table_empty(mr_table)); + list_del(&mr_table->node); + mr->mr_ops->route_destroy(mlxsw_sp, mr->priv, + &mr_table->catchall_route_priv); + rhashtable_destroy(&mr_table->route_ht); + kfree(mr_table); +} + +void mlxsw_sp_mr_table_flush(struct mlxsw_sp_mr_table *mr_table) +{ + struct mlxsw_sp_mr_route *mr_route, *tmp; + int i; + + list_for_each_entry_safe(mr_route, tmp, &mr_table->route_list, node) + __mlxsw_sp_mr_route_del(mr_table, mr_route); + + for (i = 0; i < MAXVIFS; i++) { + mr_table->vifs[i].dev = NULL; + mr_table->vifs[i].rif = NULL; + } +} + +bool mlxsw_sp_mr_table_empty(const struct mlxsw_sp_mr_table *mr_table) +{ + int i; + + for (i = 0; i < MAXVIFS; i++) + if (mr_table->vifs[i].dev) + return false; + return list_empty(&mr_table->route_list); +} + +static void mlxsw_sp_mr_route_stats_update(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_mr_route *mr_route) +{ + struct mlxsw_sp_mr *mr = mlxsw_sp->mr; + u64 packets, bytes; + + if (mr_route->route_action == MLXSW_SP_MR_ROUTE_ACTION_TRAP) + return; + + mr->mr_ops->route_stats(mlxsw_sp, mr_route->route_priv, &packets, + &bytes); + + switch (mr_route->mr_table->proto) { + case MLXSW_SP_L3_PROTO_IPV4: + if (mr_route->mfc4->mfc_un.res.pkt != packets) + mr_route->mfc4->mfc_un.res.lastuse = jiffies; + mr_route->mfc4->mfc_un.res.pkt = packets; + mr_route->mfc4->mfc_un.res.bytes = bytes; + break; + case MLXSW_SP_L3_PROTO_IPV6: + /* fall through */ + default: + WARN_ON_ONCE(1); + } +} + +static void mlxsw_sp_mr_stats_update(struct work_struct *work) +{ + struct mlxsw_sp_mr *mr = container_of(work, struct mlxsw_sp_mr, + stats_update_dw.work); + struct mlxsw_sp_mr_table *mr_table; + struct mlxsw_sp_mr_route *mr_route; + unsigned long interval; + + rtnl_lock(); + list_for_each_entry(mr_table, &mr->table_list, node) + list_for_each_entry(mr_route, &mr_table->route_list, node) + mlxsw_sp_mr_route_stats_update(mr_table->mlxsw_sp, + mr_route); + rtnl_unlock(); + + interval = msecs_to_jiffies(MLXSW_SP_MR_ROUTES_COUNTER_UPDATE_INTERVAL); + mlxsw_core_schedule_dw(&mr->stats_update_dw, interval); +} + +int mlxsw_sp_mr_init(struct mlxsw_sp *mlxsw_sp, + const struct mlxsw_sp_mr_ops *mr_ops) +{ + struct mlxsw_sp_mr *mr; + unsigned long interval; + int err; + + mr = kzalloc(sizeof(*mr) + mr_ops->priv_size, GFP_KERNEL); + if (!mr) + return -ENOMEM; + mr->mr_ops = mr_ops; + mlxsw_sp->mr = mr; + INIT_LIST_HEAD(&mr->table_list); + + err = mr_ops->init(mlxsw_sp, mr->priv); + if (err) + goto err; + + /* Create the delayed work for counter updates */ + INIT_DELAYED_WORK(&mr->stats_update_dw, mlxsw_sp_mr_stats_update); + interval = msecs_to_jiffies(MLXSW_SP_MR_ROUTES_COUNTER_UPDATE_INTERVAL); + mlxsw_core_schedule_dw(&mr->stats_update_dw, interval); + return 0; +err: + kfree(mr); + return err; +} + +void mlxsw_sp_mr_fini(struct mlxsw_sp *mlxsw_sp) +{ + struct mlxsw_sp_mr *mr = mlxsw_sp->mr; + + cancel_delayed_work_sync(&mr->stats_update_dw); + mr->mr_ops->fini(mr->priv); + kfree(mr); +} diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.h new file mode 100644 index 000000000000..c851b237d253 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.h @@ -0,0 +1,133 @@ +/* + * drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.h + * Copyright (c) 2017 Mellanox Technologies. All rights reserved. + * Copyright (c) 2017 Yotam Gigi + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the names of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _MLXSW_SPECTRUM_MCROUTER_H +#define _MLXSW_SPECTRUM_MCROUTER_H + +#include +#include "spectrum_router.h" +#include "spectrum.h" + +enum mlxsw_sp_mr_route_action { + MLXSW_SP_MR_ROUTE_ACTION_FORWARD, + MLXSW_SP_MR_ROUTE_ACTION_TRAP, +}; + +enum mlxsw_sp_mr_route_prio { + MLXSW_SP_MR_ROUTE_PRIO_SG, + MLXSW_SP_MR_ROUTE_PRIO_STARG, + MLXSW_SP_MR_ROUTE_PRIO_CATCHALL, + __MLXSW_SP_MR_ROUTE_PRIO_MAX +}; + +#define MLXSW_SP_MR_ROUTE_PRIO_MAX (__MLXSW_SP_MR_ROUTE_PRIO_MAX - 1) + +struct mlxsw_sp_mr_route_key { + int vrid; + enum mlxsw_sp_l3proto proto; + union mlxsw_sp_l3addr group; + union mlxsw_sp_l3addr group_mask; + union mlxsw_sp_l3addr source; + union mlxsw_sp_l3addr source_mask; +}; + +struct mlxsw_sp_mr_route_info { + enum mlxsw_sp_mr_route_action route_action; + u16 irif_index; + u16 *erif_indices; + size_t erif_num; + u16 min_mtu; +}; + +struct mlxsw_sp_mr_route_params { + struct mlxsw_sp_mr_route_key key; + struct mlxsw_sp_mr_route_info value; + enum mlxsw_sp_mr_route_prio prio; +}; + +struct mlxsw_sp_mr_ops { + int priv_size; + int route_priv_size; + int (*init)(struct mlxsw_sp *mlxsw_sp, void *priv); + int (*route_create)(struct mlxsw_sp *mlxsw_sp, void *priv, + void *route_priv, + struct mlxsw_sp_mr_route_params *route_params); + int (*route_update)(struct mlxsw_sp *mlxsw_sp, void *route_priv, + struct mlxsw_sp_mr_route_info *route_info); + int (*route_stats)(struct mlxsw_sp *mlxsw_sp, void *route_priv, + u64 *packets, u64 *bytes); + int (*route_action_update)(struct mlxsw_sp *mlxsw_sp, void *route_priv, + enum mlxsw_sp_mr_route_action route_action); + int (*route_min_mtu_update)(struct mlxsw_sp *mlxsw_sp, void *route_priv, + u16 min_mtu); + int (*route_irif_update)(struct mlxsw_sp *mlxsw_sp, void *route_priv, + u16 irif_index); + int (*route_erif_add)(struct mlxsw_sp *mlxsw_sp, void *route_priv, + u16 erif_index); + int (*route_erif_del)(struct mlxsw_sp *mlxsw_sp, void *route_priv, + u16 erif_index); + void (*route_destroy)(struct mlxsw_sp *mlxsw_sp, void *priv, + void *route_priv); + void (*fini)(void *priv); +}; + +struct mlxsw_sp_mr; +struct mlxsw_sp_mr_table; + +int mlxsw_sp_mr_init(struct mlxsw_sp *mlxsw_sp, + const struct mlxsw_sp_mr_ops *mr_ops); +void mlxsw_sp_mr_fini(struct mlxsw_sp *mlxsw_sp); +int mlxsw_sp_mr_route4_add(struct mlxsw_sp_mr_table *mr_table, + struct mfc_cache *mfc, bool replace); +void mlxsw_sp_mr_route4_del(struct mlxsw_sp_mr_table *mr_table, + struct mfc_cache *mfc); +int mlxsw_sp_mr_vif_add(struct mlxsw_sp_mr_table *mr_table, + struct net_device *dev, vifi_t vif_index, + unsigned long vif_flags, + const struct mlxsw_sp_rif *rif); +void mlxsw_sp_mr_vif_del(struct mlxsw_sp_mr_table *mr_table, vifi_t vif_index); +int mlxsw_sp_mr_rif_add(struct mlxsw_sp_mr_table *mr_table, + const struct mlxsw_sp_rif *rif); +void mlxsw_sp_mr_rif_del(struct mlxsw_sp_mr_table *mr_table, + const struct mlxsw_sp_rif *rif); +void mlxsw_sp_mr_rif_mtu_update(struct mlxsw_sp_mr_table *mr_table, + const struct mlxsw_sp_rif *rif, int mtu); +struct mlxsw_sp_mr_table *mlxsw_sp_mr_table_create(struct mlxsw_sp *mlxsw_sp, + u32 tb_id, + enum mlxsw_sp_l3proto proto); +void mlxsw_sp_mr_table_destroy(struct mlxsw_sp_mr_table *mr_table); +void mlxsw_sp_mr_table_flush(struct mlxsw_sp_mr_table *mr_table); +bool mlxsw_sp_mr_table_empty(const struct mlxsw_sp_mr_table *mr_table); + +#endif -- cgit v1.2.3 From 0e14c7777acb6d58250cb746685dde0a74d60fe8 Mon Sep 17 00:00:00 2001 From: Yotam Gigi Date: Wed, 27 Sep 2017 08:23:18 +0200 Subject: mlxsw: spectrum: Add the multicast routing hardware logic Implement the multicast routing hardware API introduced in previous patch for the specific spectrum hardware. The spectrum hardware multicast routes are written using the RMFT2 register and point to an ACL flexible action set. The actions used for multicast routes are: - Counter action, which allows counting bytes and packets on multicast routes. - Multicast route action, which provide RPF check and do the actual packet duplication to a list of RIFs. - Trap action, in the case the route action specified by the called is trap. Signed-off-by: Yotam Gigi Reviewed-by: Ido Schimmel Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/Makefile | 2 +- drivers/net/ethernet/mellanox/mlxsw/spectrum.h | 1 + .../net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.c | 828 +++++++++++++++++++++ .../net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.h | 43 ++ 4 files changed, 873 insertions(+), 1 deletion(-) create mode 100644 drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.c create mode 100644 drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.h (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/mellanox/mlxsw/Makefile b/drivers/net/ethernet/mellanox/mlxsw/Makefile index 9b29764905f3..4816504419fc 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/Makefile +++ b/drivers/net/ethernet/mellanox/mlxsw/Makefile @@ -18,7 +18,7 @@ mlxsw_spectrum-objs := spectrum.o spectrum_buffers.o \ spectrum_acl.o spectrum_flower.o \ spectrum_cnt.o spectrum_fid.o \ spectrum_ipip.o spectrum_acl_flex_actions.o \ - spectrum_mr.o + spectrum_mr.o spectrum_mr_tcam.o mlxsw_spectrum-$(CONFIG_MLXSW_SPECTRUM_DCB) += spectrum_dcb.o mlxsw_spectrum-$(CONFIG_NET_DEVLINK) += spectrum_dpipe.o obj-$(CONFIG_MLXSW_MINIMAL) += mlxsw_minimal.o diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h index 44c5259e5548..ae67e6046098 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h @@ -139,6 +139,7 @@ struct mlxsw_sp_port_mall_tc_entry { struct mlxsw_sp_sb; struct mlxsw_sp_bridge; struct mlxsw_sp_router; +struct mlxsw_sp_mr; struct mlxsw_sp_acl; struct mlxsw_sp_counter_pool; struct mlxsw_sp_fid_core; diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.c new file mode 100644 index 000000000000..cda9e9ad10e3 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.c @@ -0,0 +1,828 @@ +/* + * drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.c + * Copyright (c) 2017 Mellanox Technologies. All rights reserved. + * Copyright (c) 2017 Yotam Gigi + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the names of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include +#include +#include +#include + +#include "reg.h" +#include "spectrum.h" +#include "core_acl_flex_actions.h" +#include "spectrum_mr.h" + +struct mlxsw_sp_mr_tcam_region { + struct mlxsw_sp *mlxsw_sp; + enum mlxsw_reg_rtar_key_type rtar_key_type; + struct parman *parman; + struct parman_prio *parman_prios; +}; + +struct mlxsw_sp_mr_tcam { + struct mlxsw_sp_mr_tcam_region ipv4_tcam_region; +}; + +/* This struct maps to one RIGR2 register entry */ +struct mlxsw_sp_mr_erif_sublist { + struct list_head list; + u32 rigr2_kvdl_index; + int num_erifs; + u16 erif_indices[MLXSW_REG_RIGR2_MAX_ERIFS]; + bool synced; +}; + +struct mlxsw_sp_mr_tcam_erif_list { + struct list_head erif_sublists; + u32 kvdl_index; +}; + +static bool +mlxsw_sp_mr_erif_sublist_full(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_mr_erif_sublist *erif_sublist) +{ + int erif_list_entries = MLXSW_CORE_RES_GET(mlxsw_sp->core, + MC_ERIF_LIST_ENTRIES); + + return erif_sublist->num_erifs == erif_list_entries; +} + +static void +mlxsw_sp_mr_erif_list_init(struct mlxsw_sp_mr_tcam_erif_list *erif_list) +{ + INIT_LIST_HEAD(&erif_list->erif_sublists); +} + +#define MLXSW_SP_KVDL_RIGR2_SIZE 1 + +static struct mlxsw_sp_mr_erif_sublist * +mlxsw_sp_mr_erif_sublist_create(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_mr_tcam_erif_list *erif_list) +{ + struct mlxsw_sp_mr_erif_sublist *erif_sublist; + int err; + + erif_sublist = kzalloc(sizeof(*erif_sublist), GFP_KERNEL); + if (!erif_sublist) + return ERR_PTR(-ENOMEM); + err = mlxsw_sp_kvdl_alloc(mlxsw_sp, MLXSW_SP_KVDL_RIGR2_SIZE, + &erif_sublist->rigr2_kvdl_index); + if (err) { + kfree(erif_sublist); + return ERR_PTR(err); + } + + list_add_tail(&erif_sublist->list, &erif_list->erif_sublists); + return erif_sublist; +} + +static void +mlxsw_sp_mr_erif_sublist_destroy(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_mr_erif_sublist *erif_sublist) +{ + list_del(&erif_sublist->list); + mlxsw_sp_kvdl_free(mlxsw_sp, erif_sublist->rigr2_kvdl_index); + kfree(erif_sublist); +} + +static int +mlxsw_sp_mr_erif_list_add(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_mr_tcam_erif_list *erif_list, + u16 erif_index) +{ + struct mlxsw_sp_mr_erif_sublist *sublist; + + /* If either there is no erif_entry or the last one is full, allocate a + * new one. + */ + if (list_empty(&erif_list->erif_sublists)) { + sublist = mlxsw_sp_mr_erif_sublist_create(mlxsw_sp, erif_list); + if (IS_ERR(sublist)) + return PTR_ERR(sublist); + erif_list->kvdl_index = sublist->rigr2_kvdl_index; + } else { + sublist = list_last_entry(&erif_list->erif_sublists, + struct mlxsw_sp_mr_erif_sublist, + list); + sublist->synced = false; + if (mlxsw_sp_mr_erif_sublist_full(mlxsw_sp, sublist)) { + sublist = mlxsw_sp_mr_erif_sublist_create(mlxsw_sp, + erif_list); + if (IS_ERR(sublist)) + return PTR_ERR(sublist); + } + } + + /* Add the eRIF to the last entry's last index */ + sublist->erif_indices[sublist->num_erifs++] = erif_index; + return 0; +} + +static void +mlxsw_sp_mr_erif_list_flush(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_mr_tcam_erif_list *erif_list) +{ + struct mlxsw_sp_mr_erif_sublist *erif_sublist, *tmp; + + list_for_each_entry_safe(erif_sublist, tmp, &erif_list->erif_sublists, + list) + mlxsw_sp_mr_erif_sublist_destroy(mlxsw_sp, erif_sublist); +} + +static int +mlxsw_sp_mr_erif_list_commit(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_mr_tcam_erif_list *erif_list) +{ + struct mlxsw_sp_mr_erif_sublist *curr_sublist; + char rigr2_pl[MLXSW_REG_RIGR2_LEN]; + int err; + int i; + + list_for_each_entry(curr_sublist, &erif_list->erif_sublists, list) { + if (curr_sublist->synced) + continue; + + /* If the sublist is not the last one, pack the next index */ + if (list_is_last(&curr_sublist->list, + &erif_list->erif_sublists)) { + mlxsw_reg_rigr2_pack(rigr2_pl, + curr_sublist->rigr2_kvdl_index, + false, 0); + } else { + struct mlxsw_sp_mr_erif_sublist *next_sublist; + + next_sublist = list_next_entry(curr_sublist, list); + mlxsw_reg_rigr2_pack(rigr2_pl, + curr_sublist->rigr2_kvdl_index, + true, + next_sublist->rigr2_kvdl_index); + } + + /* Pack all the erifs */ + for (i = 0; i < curr_sublist->num_erifs; i++) { + u16 erif_index = curr_sublist->erif_indices[i]; + + mlxsw_reg_rigr2_erif_entry_pack(rigr2_pl, i, true, + erif_index); + } + + /* Write the entry */ + err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rigr2), + rigr2_pl); + if (err) + /* No need of a rollback here because this + * hardware entry should not be pointed yet. + */ + return err; + curr_sublist->synced = true; + } + return 0; +} + +static void mlxsw_sp_mr_erif_list_move(struct mlxsw_sp_mr_tcam_erif_list *to, + struct mlxsw_sp_mr_tcam_erif_list *from) +{ + list_splice(&from->erif_sublists, &to->erif_sublists); + to->kvdl_index = from->kvdl_index; +} + +struct mlxsw_sp_mr_tcam_route { + struct mlxsw_sp_mr_tcam_erif_list erif_list; + struct mlxsw_afa_block *afa_block; + u32 counter_index; + struct parman_item parman_item; + struct parman_prio *parman_prio; + enum mlxsw_sp_mr_route_action action; + struct mlxsw_sp_mr_route_key key; + u16 irif_index; + u16 min_mtu; +}; + +static struct mlxsw_afa_block * +mlxsw_sp_mr_tcam_afa_block_create(struct mlxsw_sp *mlxsw_sp, + enum mlxsw_sp_mr_route_action route_action, + u16 irif_index, u32 counter_index, + u16 min_mtu, + struct mlxsw_sp_mr_tcam_erif_list *erif_list) +{ + struct mlxsw_afa_block *afa_block; + int err; + + afa_block = mlxsw_afa_block_create(mlxsw_sp->afa); + if (IS_ERR(afa_block)) + return afa_block; + + err = mlxsw_afa_block_append_counter(afa_block, counter_index); + if (err) + goto err; + + switch (route_action) { + case MLXSW_SP_MR_ROUTE_ACTION_TRAP: + err = mlxsw_afa_block_append_trap(afa_block, + MLXSW_TRAP_ID_ACL1); + if (err) + goto err; + break; + case MLXSW_SP_MR_ROUTE_ACTION_FORWARD: + /* If we are about to append a multicast router action, commit + * the erif_list. + */ + err = mlxsw_sp_mr_erif_list_commit(mlxsw_sp, erif_list); + if (err) + goto err; + + err = mlxsw_afa_block_append_mcrouter(afa_block, irif_index, + min_mtu, false, + erif_list->kvdl_index); + if (err) + goto err; + break; + default: + err = -EINVAL; + goto err; + } + + err = mlxsw_afa_block_commit(afa_block); + if (err) + goto err; + return afa_block; +err: + mlxsw_afa_block_destroy(afa_block); + return ERR_PTR(err); +} + +static void +mlxsw_sp_mr_tcam_afa_block_destroy(struct mlxsw_afa_block *afa_block) +{ + mlxsw_afa_block_destroy(afa_block); +} + +static int mlxsw_sp_mr_tcam_route_replace(struct mlxsw_sp *mlxsw_sp, + struct parman_item *parman_item, + struct mlxsw_sp_mr_route_key *key, + struct mlxsw_afa_block *afa_block) +{ + char rmft2_pl[MLXSW_REG_RMFT2_LEN]; + + switch (key->proto) { + case MLXSW_SP_L3_PROTO_IPV4: + mlxsw_reg_rmft2_ipv4_pack(rmft2_pl, true, parman_item->index, + key->vrid, + MLXSW_REG_RMFT2_IRIF_MASK_IGNORE, 0, + ntohl(key->group.addr4), + ntohl(key->group_mask.addr4), + ntohl(key->source.addr4), + ntohl(key->source_mask.addr4), + mlxsw_afa_block_first_set(afa_block)); + break; + case MLXSW_SP_L3_PROTO_IPV6: + default: + WARN_ON_ONCE(1); + } + + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rmft2), rmft2_pl); +} + +static int mlxsw_sp_mr_tcam_route_remove(struct mlxsw_sp *mlxsw_sp, int vrid, + struct parman_item *parman_item) +{ + char rmft2_pl[MLXSW_REG_RMFT2_LEN]; + + mlxsw_reg_rmft2_ipv4_pack(rmft2_pl, false, parman_item->index, vrid, + 0, 0, 0, 0, 0, 0, NULL); + + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rmft2), rmft2_pl); +} + +static int +mlxsw_sp_mr_tcam_erif_populate(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_mr_tcam_erif_list *erif_list, + struct mlxsw_sp_mr_route_info *route_info) +{ + int err; + int i; + + for (i = 0; i < route_info->erif_num; i++) { + u16 erif_index = route_info->erif_indices[i]; + + err = mlxsw_sp_mr_erif_list_add(mlxsw_sp, erif_list, + erif_index); + if (err) + return err; + } + return 0; +} + +static int +mlxsw_sp_mr_tcam_route_parman_item_add(struct mlxsw_sp_mr_tcam *mr_tcam, + struct mlxsw_sp_mr_tcam_route *route, + enum mlxsw_sp_mr_route_prio prio) +{ + struct parman_prio *parman_prio = NULL; + int err; + + switch (route->key.proto) { + case MLXSW_SP_L3_PROTO_IPV4: + parman_prio = &mr_tcam->ipv4_tcam_region.parman_prios[prio]; + err = parman_item_add(mr_tcam->ipv4_tcam_region.parman, + parman_prio, &route->parman_item); + if (err) + return err; + break; + case MLXSW_SP_L3_PROTO_IPV6: + default: + WARN_ON_ONCE(1); + } + route->parman_prio = parman_prio; + return 0; +} + +static void +mlxsw_sp_mr_tcam_route_parman_item_remove(struct mlxsw_sp_mr_tcam *mr_tcam, + struct mlxsw_sp_mr_tcam_route *route) +{ + switch (route->key.proto) { + case MLXSW_SP_L3_PROTO_IPV4: + parman_item_remove(mr_tcam->ipv4_tcam_region.parman, + route->parman_prio, &route->parman_item); + break; + case MLXSW_SP_L3_PROTO_IPV6: + default: + WARN_ON_ONCE(1); + } +} + +static int +mlxsw_sp_mr_tcam_route_create(struct mlxsw_sp *mlxsw_sp, void *priv, + void *route_priv, + struct mlxsw_sp_mr_route_params *route_params) +{ + struct mlxsw_sp_mr_tcam_route *route = route_priv; + struct mlxsw_sp_mr_tcam *mr_tcam = priv; + int err; + + route->key = route_params->key; + route->irif_index = route_params->value.irif_index; + route->min_mtu = route_params->value.min_mtu; + route->action = route_params->value.route_action; + + /* Create the egress RIFs list */ + mlxsw_sp_mr_erif_list_init(&route->erif_list); + err = mlxsw_sp_mr_tcam_erif_populate(mlxsw_sp, &route->erif_list, + &route_params->value); + if (err) + goto err_erif_populate; + + /* Create the flow counter */ + err = mlxsw_sp_flow_counter_alloc(mlxsw_sp, &route->counter_index); + if (err) + goto err_counter_alloc; + + /* Create the flexible action block */ + route->afa_block = mlxsw_sp_mr_tcam_afa_block_create(mlxsw_sp, + route->action, + route->irif_index, + route->counter_index, + route->min_mtu, + &route->erif_list); + if (IS_ERR(route->afa_block)) { + err = PTR_ERR(route->afa_block); + goto err_afa_block_create; + } + + /* Allocate place in the TCAM */ + err = mlxsw_sp_mr_tcam_route_parman_item_add(mr_tcam, route, + route_params->prio); + if (err) + goto err_parman_item_add; + + /* Write the route to the TCAM */ + err = mlxsw_sp_mr_tcam_route_replace(mlxsw_sp, &route->parman_item, + &route->key, route->afa_block); + if (err) + goto err_route_replace; + return 0; + +err_route_replace: + mlxsw_sp_mr_tcam_route_parman_item_remove(mr_tcam, route); +err_parman_item_add: + mlxsw_sp_mr_tcam_afa_block_destroy(route->afa_block); +err_afa_block_create: + mlxsw_sp_flow_counter_free(mlxsw_sp, route->counter_index); +err_erif_populate: +err_counter_alloc: + mlxsw_sp_mr_erif_list_flush(mlxsw_sp, &route->erif_list); + return err; +} + +static void mlxsw_sp_mr_tcam_route_destroy(struct mlxsw_sp *mlxsw_sp, + void *priv, void *route_priv) +{ + struct mlxsw_sp_mr_tcam_route *route = route_priv; + struct mlxsw_sp_mr_tcam *mr_tcam = priv; + + mlxsw_sp_mr_tcam_route_remove(mlxsw_sp, route->key.vrid, + &route->parman_item); + mlxsw_sp_mr_tcam_route_parman_item_remove(mr_tcam, route); + mlxsw_sp_mr_tcam_afa_block_destroy(route->afa_block); + mlxsw_sp_flow_counter_free(mlxsw_sp, route->counter_index); + mlxsw_sp_mr_erif_list_flush(mlxsw_sp, &route->erif_list); +} + +static int mlxsw_sp_mr_tcam_route_stats(struct mlxsw_sp *mlxsw_sp, + void *route_priv, u64 *packets, + u64 *bytes) +{ + struct mlxsw_sp_mr_tcam_route *route = route_priv; + + return mlxsw_sp_flow_counter_get(mlxsw_sp, route->counter_index, + packets, bytes); +} + +static int +mlxsw_sp_mr_tcam_route_action_update(struct mlxsw_sp *mlxsw_sp, + void *route_priv, + enum mlxsw_sp_mr_route_action route_action) +{ + struct mlxsw_sp_mr_tcam_route *route = route_priv; + struct mlxsw_afa_block *afa_block; + int err; + + /* Create a new flexible action block */ + afa_block = mlxsw_sp_mr_tcam_afa_block_create(mlxsw_sp, route_action, + route->irif_index, + route->counter_index, + route->min_mtu, + &route->erif_list); + if (IS_ERR(afa_block)) + return PTR_ERR(afa_block); + + /* Update the TCAM route entry */ + err = mlxsw_sp_mr_tcam_route_replace(mlxsw_sp, &route->parman_item, + &route->key, afa_block); + if (err) + goto err; + + /* Delete the old one */ + mlxsw_sp_mr_tcam_afa_block_destroy(route->afa_block); + route->afa_block = afa_block; + route->action = route_action; + return 0; +err: + mlxsw_sp_mr_tcam_afa_block_destroy(afa_block); + return err; +} + +static int mlxsw_sp_mr_tcam_route_min_mtu_update(struct mlxsw_sp *mlxsw_sp, + void *route_priv, u16 min_mtu) +{ + struct mlxsw_sp_mr_tcam_route *route = route_priv; + struct mlxsw_afa_block *afa_block; + int err; + + /* Create a new flexible action block */ + afa_block = mlxsw_sp_mr_tcam_afa_block_create(mlxsw_sp, + route->action, + route->irif_index, + route->counter_index, + min_mtu, + &route->erif_list); + if (IS_ERR(afa_block)) + return PTR_ERR(afa_block); + + /* Update the TCAM route entry */ + err = mlxsw_sp_mr_tcam_route_replace(mlxsw_sp, &route->parman_item, + &route->key, afa_block); + if (err) + goto err; + + /* Delete the old one */ + mlxsw_sp_mr_tcam_afa_block_destroy(route->afa_block); + route->afa_block = afa_block; + route->min_mtu = min_mtu; + return 0; +err: + mlxsw_sp_mr_tcam_afa_block_destroy(afa_block); + return err; +} + +static int mlxsw_sp_mr_tcam_route_irif_update(struct mlxsw_sp *mlxsw_sp, + void *route_priv, u16 irif_index) +{ + struct mlxsw_sp_mr_tcam_route *route = route_priv; + + if (route->action != MLXSW_SP_MR_ROUTE_ACTION_TRAP) + return -EINVAL; + route->irif_index = irif_index; + return 0; +} + +static int mlxsw_sp_mr_tcam_route_erif_add(struct mlxsw_sp *mlxsw_sp, + void *route_priv, u16 erif_index) +{ + struct mlxsw_sp_mr_tcam_route *route = route_priv; + int err; + + err = mlxsw_sp_mr_erif_list_add(mlxsw_sp, &route->erif_list, + erif_index); + if (err) + return err; + + /* Commit the action only if the route action is not TRAP */ + if (route->action != MLXSW_SP_MR_ROUTE_ACTION_TRAP) + return mlxsw_sp_mr_erif_list_commit(mlxsw_sp, + &route->erif_list); + return 0; +} + +static int mlxsw_sp_mr_tcam_route_erif_del(struct mlxsw_sp *mlxsw_sp, + void *route_priv, u16 erif_index) +{ + struct mlxsw_sp_mr_tcam_route *route = route_priv; + struct mlxsw_sp_mr_erif_sublist *erif_sublist; + struct mlxsw_sp_mr_tcam_erif_list erif_list; + struct mlxsw_afa_block *afa_block; + int err; + int i; + + /* Create a copy of the original erif_list without the deleted entry */ + mlxsw_sp_mr_erif_list_init(&erif_list); + list_for_each_entry(erif_sublist, &route->erif_list.erif_sublists, list) { + for (i = 0; i < erif_sublist->num_erifs; i++) { + u16 curr_erif = erif_sublist->erif_indices[i]; + + if (curr_erif == erif_index) + continue; + err = mlxsw_sp_mr_erif_list_add(mlxsw_sp, &erif_list, + curr_erif); + if (err) + goto err_erif_list_add; + } + } + + /* Create the flexible action block pointing to the new erif_list */ + afa_block = mlxsw_sp_mr_tcam_afa_block_create(mlxsw_sp, route->action, + route->irif_index, + route->counter_index, + route->min_mtu, + &erif_list); + if (IS_ERR(afa_block)) { + err = PTR_ERR(afa_block); + goto err_afa_block_create; + } + + /* Update the TCAM route entry */ + err = mlxsw_sp_mr_tcam_route_replace(mlxsw_sp, &route->parman_item, + &route->key, afa_block); + if (err) + goto err_route_write; + + mlxsw_sp_mr_tcam_afa_block_destroy(route->afa_block); + mlxsw_sp_mr_erif_list_flush(mlxsw_sp, &route->erif_list); + route->afa_block = afa_block; + mlxsw_sp_mr_erif_list_move(&route->erif_list, &erif_list); + return 0; + +err_route_write: + mlxsw_sp_mr_tcam_afa_block_destroy(afa_block); +err_afa_block_create: +err_erif_list_add: + mlxsw_sp_mr_erif_list_flush(mlxsw_sp, &erif_list); + return err; +} + +static int +mlxsw_sp_mr_tcam_route_update(struct mlxsw_sp *mlxsw_sp, void *route_priv, + struct mlxsw_sp_mr_route_info *route_info) +{ + struct mlxsw_sp_mr_tcam_route *route = route_priv; + struct mlxsw_sp_mr_tcam_erif_list erif_list; + struct mlxsw_afa_block *afa_block; + int err; + + /* Create a new erif_list */ + mlxsw_sp_mr_erif_list_init(&erif_list); + err = mlxsw_sp_mr_tcam_erif_populate(mlxsw_sp, &erif_list, route_info); + if (err) + goto err_erif_populate; + + /* Create the flexible action block pointing to the new erif_list */ + afa_block = mlxsw_sp_mr_tcam_afa_block_create(mlxsw_sp, + route_info->route_action, + route_info->irif_index, + route->counter_index, + route_info->min_mtu, + &erif_list); + if (IS_ERR(afa_block)) { + err = PTR_ERR(afa_block); + goto err_afa_block_create; + } + + /* Update the TCAM route entry */ + err = mlxsw_sp_mr_tcam_route_replace(mlxsw_sp, &route->parman_item, + &route->key, afa_block); + if (err) + goto err_route_write; + + mlxsw_sp_mr_tcam_afa_block_destroy(route->afa_block); + mlxsw_sp_mr_erif_list_flush(mlxsw_sp, &route->erif_list); + route->afa_block = afa_block; + mlxsw_sp_mr_erif_list_move(&route->erif_list, &erif_list); + route->action = route_info->route_action; + route->irif_index = route_info->irif_index; + route->min_mtu = route_info->min_mtu; + return 0; + +err_route_write: + mlxsw_sp_mr_tcam_afa_block_destroy(afa_block); +err_afa_block_create: +err_erif_populate: + mlxsw_sp_mr_erif_list_flush(mlxsw_sp, &erif_list); + return err; +} + +#define MLXSW_SP_MR_TCAM_REGION_BASE_COUNT 16 +#define MLXSW_SP_MR_TCAM_REGION_RESIZE_STEP 16 + +static int +mlxsw_sp_mr_tcam_region_alloc(struct mlxsw_sp_mr_tcam_region *mr_tcam_region) +{ + struct mlxsw_sp *mlxsw_sp = mr_tcam_region->mlxsw_sp; + char rtar_pl[MLXSW_REG_RTAR_LEN]; + + mlxsw_reg_rtar_pack(rtar_pl, MLXSW_REG_RTAR_OP_ALLOCATE, + mr_tcam_region->rtar_key_type, + MLXSW_SP_MR_TCAM_REGION_BASE_COUNT); + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rtar), rtar_pl); +} + +static void +mlxsw_sp_mr_tcam_region_free(struct mlxsw_sp_mr_tcam_region *mr_tcam_region) +{ + struct mlxsw_sp *mlxsw_sp = mr_tcam_region->mlxsw_sp; + char rtar_pl[MLXSW_REG_RTAR_LEN]; + + mlxsw_reg_rtar_pack(rtar_pl, MLXSW_REG_RTAR_OP_DEALLOCATE, + mr_tcam_region->rtar_key_type, 0); + mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rtar), rtar_pl); +} + +static int mlxsw_sp_mr_tcam_region_parman_resize(void *priv, + unsigned long new_count) +{ + struct mlxsw_sp_mr_tcam_region *mr_tcam_region = priv; + struct mlxsw_sp *mlxsw_sp = mr_tcam_region->mlxsw_sp; + char rtar_pl[MLXSW_REG_RTAR_LEN]; + u64 max_tcam_rules; + + max_tcam_rules = MLXSW_CORE_RES_GET(mlxsw_sp->core, ACL_MAX_TCAM_RULES); + if (new_count > max_tcam_rules) + return -EINVAL; + mlxsw_reg_rtar_pack(rtar_pl, MLXSW_REG_RTAR_OP_RESIZE, + mr_tcam_region->rtar_key_type, new_count); + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rtar), rtar_pl); +} + +static void mlxsw_sp_mr_tcam_region_parman_move(void *priv, + unsigned long from_index, + unsigned long to_index, + unsigned long count) +{ + struct mlxsw_sp_mr_tcam_region *mr_tcam_region = priv; + struct mlxsw_sp *mlxsw_sp = mr_tcam_region->mlxsw_sp; + char rrcr_pl[MLXSW_REG_RRCR_LEN]; + + mlxsw_reg_rrcr_pack(rrcr_pl, MLXSW_REG_RRCR_OP_MOVE, + from_index, count, + mr_tcam_region->rtar_key_type, to_index); + mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rrcr), rrcr_pl); +} + +static const struct parman_ops mlxsw_sp_mr_tcam_region_parman_ops = { + .base_count = MLXSW_SP_MR_TCAM_REGION_BASE_COUNT, + .resize_step = MLXSW_SP_MR_TCAM_REGION_RESIZE_STEP, + .resize = mlxsw_sp_mr_tcam_region_parman_resize, + .move = mlxsw_sp_mr_tcam_region_parman_move, + .algo = PARMAN_ALGO_TYPE_LSORT, +}; + +static int +mlxsw_sp_mr_tcam_region_init(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_mr_tcam_region *mr_tcam_region, + enum mlxsw_reg_rtar_key_type rtar_key_type) +{ + struct parman_prio *parman_prios; + struct parman *parman; + int err; + int i; + + mr_tcam_region->rtar_key_type = rtar_key_type; + mr_tcam_region->mlxsw_sp = mlxsw_sp; + + err = mlxsw_sp_mr_tcam_region_alloc(mr_tcam_region); + if (err) + return err; + + parman = parman_create(&mlxsw_sp_mr_tcam_region_parman_ops, + mr_tcam_region); + if (!parman) { + err = -ENOMEM; + goto err_parman_create; + } + mr_tcam_region->parman = parman; + + parman_prios = kmalloc_array(MLXSW_SP_MR_ROUTE_PRIO_MAX + 1, + sizeof(*parman_prios), GFP_KERNEL); + if (!parman_prios) + goto err_parman_prios_alloc; + mr_tcam_region->parman_prios = parman_prios; + + for (i = 0; i < MLXSW_SP_MR_ROUTE_PRIO_MAX + 1; i++) + parman_prio_init(mr_tcam_region->parman, + &mr_tcam_region->parman_prios[i], i); + return 0; + +err_parman_prios_alloc: + parman_destroy(parman); +err_parman_create: + mlxsw_sp_mr_tcam_region_free(mr_tcam_region); + return err; +} + +static void +mlxsw_sp_mr_tcam_region_fini(struct mlxsw_sp_mr_tcam_region *mr_tcam_region) +{ + int i; + + for (i = 0; i < MLXSW_SP_MR_ROUTE_PRIO_MAX + 1; i++) + parman_prio_fini(&mr_tcam_region->parman_prios[i]); + kfree(mr_tcam_region->parman_prios); + parman_destroy(mr_tcam_region->parman); + mlxsw_sp_mr_tcam_region_free(mr_tcam_region); +} + +static int mlxsw_sp_mr_tcam_init(struct mlxsw_sp *mlxsw_sp, void *priv) +{ + struct mlxsw_sp_mr_tcam *mr_tcam = priv; + + if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MC_ERIF_LIST_ENTRIES) || + !MLXSW_CORE_RES_VALID(mlxsw_sp->core, ACL_MAX_TCAM_RULES)) + return -EIO; + + return mlxsw_sp_mr_tcam_region_init(mlxsw_sp, + &mr_tcam->ipv4_tcam_region, + MLXSW_REG_RTAR_KEY_TYPE_IPV4_MULTICAST); +} + +static void mlxsw_sp_mr_tcam_fini(void *priv) +{ + struct mlxsw_sp_mr_tcam *mr_tcam = priv; + + mlxsw_sp_mr_tcam_region_fini(&mr_tcam->ipv4_tcam_region); +} + +const struct mlxsw_sp_mr_ops mlxsw_sp_mr_tcam_ops = { + .priv_size = sizeof(struct mlxsw_sp_mr_tcam), + .route_priv_size = sizeof(struct mlxsw_sp_mr_tcam_route), + .init = mlxsw_sp_mr_tcam_init, + .route_create = mlxsw_sp_mr_tcam_route_create, + .route_update = mlxsw_sp_mr_tcam_route_update, + .route_stats = mlxsw_sp_mr_tcam_route_stats, + .route_action_update = mlxsw_sp_mr_tcam_route_action_update, + .route_min_mtu_update = mlxsw_sp_mr_tcam_route_min_mtu_update, + .route_irif_update = mlxsw_sp_mr_tcam_route_irif_update, + .route_erif_add = mlxsw_sp_mr_tcam_route_erif_add, + .route_erif_del = mlxsw_sp_mr_tcam_route_erif_del, + .route_destroy = mlxsw_sp_mr_tcam_route_destroy, + .fini = mlxsw_sp_mr_tcam_fini, +}; diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.h new file mode 100644 index 000000000000..f9b59ee25406 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.h @@ -0,0 +1,43 @@ +/* + * drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.h + * Copyright (c) 2017 Mellanox Technologies. All rights reserved. + * Copyright (c) 2017 Yotam Gigi + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the names of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _MLXSW_SPECTRUM_MCROUTER_TCAM_H +#define _MLXSW_SPECTRUM_MCROUTER_TCAM_H + +#include "spectrum.h" +#include "spectrum_mr.h" + +extern const struct mlxsw_sp_mr_ops mlxsw_sp_mr_tcam_ops; + +#endif -- cgit v1.2.3 From 7e50d435759accec4e17764a8d5a1ef63b79ffd6 Mon Sep 17 00:00:00 2001 From: Yotam Gigi Date: Wed, 27 Sep 2017 08:23:19 +0200 Subject: mlxsw: spectrum: router: Squash the default route table to main Currently, the mlxsw Spectrum driver offloads only either the RT_TABLE_MAIN FIB table or the VRF tables, so the RT_TABLE_LOCAL table is squashed to the RT_TABLE_MAIN table to allow local routes to be offloaded too. By default, multicast MFC routes which are not assigned to any user requested table are put in the RT_TABLE_DEFAULT table. Due to the fact that offloading multicast MFC routes support in Spectrum router logic is going to be introduced soon, squash the default table to MAIN too. Signed-off-by: Yotam Gigi Reviewed-by: Ido Schimmel Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c index 321f7356073c..28c0c84bc966 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c @@ -693,8 +693,8 @@ static int mlxsw_sp_vr_lpm_tree_unbind(struct mlxsw_sp *mlxsw_sp, static u32 mlxsw_sp_fix_tb_id(u32 tb_id) { - /* For our purpose, squash main and local table into one */ - if (tb_id == RT_TABLE_LOCAL) + /* For our purpose, squash main, default and local tables into one */ + if (tb_id == RT_TABLE_LOCAL || tb_id == RT_TABLE_DEFAULT) tb_id = RT_TABLE_MAIN; return tb_id; } -- cgit v1.2.3 From d42b0965b1d4fe0808a2103a3f7c015515b1112e Mon Sep 17 00:00:00 2001 From: Yotam Gigi Date: Wed, 27 Sep 2017 08:23:20 +0200 Subject: mlxsw: spectrum_router: Add multicast routes notification handling functionality Add functionality for calling the multicast routing offloading logic upon MFC and VIF add and delete notifications. In addition, call the multicast routing upon RIF addition and deletion events. As the multicast routing offload logic may sleep, the actual calls are done in a deferred work. To ensure the MFC object is not freed in that interval, a reference is held to it. In case of a failure, the abort mechanism is used, which ejects all the routes from the hardware and triggers the traffic to flow through the kernel. Note: At that stage, the FIB notifications are still ignored, and will be enabled in a further patch. Signed-off-by: Yotam Gigi Reviewed-by: Ido Schimmel Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- .../net/ethernet/mellanox/mlxsw/spectrum_router.c | 187 ++++++++++++++++++++- 1 file changed, 185 insertions(+), 2 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c index 28c0c84bc966..77584422ed08 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c @@ -65,6 +65,8 @@ #include "spectrum_cnt.h" #include "spectrum_dpipe.h" #include "spectrum_ipip.h" +#include "spectrum_mr.h" +#include "spectrum_mr_tcam.h" #include "spectrum_router.h" struct mlxsw_sp_vr; @@ -459,6 +461,7 @@ struct mlxsw_sp_vr { unsigned int rif_count; struct mlxsw_sp_fib *fib4; struct mlxsw_sp_fib *fib6; + struct mlxsw_sp_mr_table *mr4_table; }; static const struct rhashtable_params mlxsw_sp_fib_ht_params; @@ -653,7 +656,7 @@ static void mlxsw_sp_lpm_fini(struct mlxsw_sp *mlxsw_sp) static bool mlxsw_sp_vr_is_used(const struct mlxsw_sp_vr *vr) { - return !!vr->fib4 || !!vr->fib6; + return !!vr->fib4 || !!vr->fib6 || !!vr->mr4_table; } static struct mlxsw_sp_vr *mlxsw_sp_vr_find_unused(struct mlxsw_sp *mlxsw_sp) @@ -744,9 +747,18 @@ static struct mlxsw_sp_vr *mlxsw_sp_vr_create(struct mlxsw_sp *mlxsw_sp, err = PTR_ERR(vr->fib6); goto err_fib6_create; } + vr->mr4_table = mlxsw_sp_mr_table_create(mlxsw_sp, vr->id, + MLXSW_SP_L3_PROTO_IPV4); + if (IS_ERR(vr->mr4_table)) { + err = PTR_ERR(vr->mr4_table); + goto err_mr_table_create; + } vr->tb_id = tb_id; return vr; +err_mr_table_create: + mlxsw_sp_fib_destroy(vr->fib6); + vr->fib6 = NULL; err_fib6_create: mlxsw_sp_fib_destroy(vr->fib4); vr->fib4 = NULL; @@ -755,6 +767,8 @@ err_fib6_create: static void mlxsw_sp_vr_destroy(struct mlxsw_sp_vr *vr) { + mlxsw_sp_mr_table_destroy(vr->mr4_table); + vr->mr4_table = NULL; mlxsw_sp_fib_destroy(vr->fib6); vr->fib6 = NULL; mlxsw_sp_fib_destroy(vr->fib4); @@ -775,7 +789,8 @@ static struct mlxsw_sp_vr *mlxsw_sp_vr_get(struct mlxsw_sp *mlxsw_sp, u32 tb_id) static void mlxsw_sp_vr_put(struct mlxsw_sp_vr *vr) { if (!vr->rif_count && list_empty(&vr->fib4->node_list) && - list_empty(&vr->fib6->node_list)) + list_empty(&vr->fib6->node_list) && + mlxsw_sp_mr_table_empty(vr->mr4_table)) mlxsw_sp_vr_destroy(vr); } @@ -4731,6 +4746,75 @@ static int __mlxsw_sp_router_set_abort_trap(struct mlxsw_sp *mlxsw_sp, return 0; } +static int mlxsw_sp_router_fibmr_add(struct mlxsw_sp *mlxsw_sp, + struct mfc_entry_notifier_info *men_info, + bool replace) +{ + struct mlxsw_sp_vr *vr; + + if (mlxsw_sp->router->aborted) + return 0; + + vr = mlxsw_sp_vr_get(mlxsw_sp, men_info->tb_id); + if (IS_ERR(vr)) + return PTR_ERR(vr); + + return mlxsw_sp_mr_route4_add(vr->mr4_table, men_info->mfc, replace); +} + +static void mlxsw_sp_router_fibmr_del(struct mlxsw_sp *mlxsw_sp, + struct mfc_entry_notifier_info *men_info) +{ + struct mlxsw_sp_vr *vr; + + if (mlxsw_sp->router->aborted) + return; + + vr = mlxsw_sp_vr_find(mlxsw_sp, men_info->tb_id); + if (WARN_ON(!vr)) + return; + + mlxsw_sp_mr_route4_del(vr->mr4_table, men_info->mfc); + mlxsw_sp_vr_put(vr); +} + +static int +mlxsw_sp_router_fibmr_vif_add(struct mlxsw_sp *mlxsw_sp, + struct vif_entry_notifier_info *ven_info) +{ + struct mlxsw_sp_rif *rif; + struct mlxsw_sp_vr *vr; + + if (mlxsw_sp->router->aborted) + return 0; + + vr = mlxsw_sp_vr_get(mlxsw_sp, ven_info->tb_id); + if (IS_ERR(vr)) + return PTR_ERR(vr); + + rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, ven_info->dev); + return mlxsw_sp_mr_vif_add(vr->mr4_table, ven_info->dev, + ven_info->vif_index, + ven_info->vif_flags, rif); +} + +static void +mlxsw_sp_router_fibmr_vif_del(struct mlxsw_sp *mlxsw_sp, + struct vif_entry_notifier_info *ven_info) +{ + struct mlxsw_sp_vr *vr; + + if (mlxsw_sp->router->aborted) + return; + + vr = mlxsw_sp_vr_find(mlxsw_sp, ven_info->tb_id); + if (WARN_ON(!vr)) + return; + + mlxsw_sp_mr_vif_del(vr->mr4_table, ven_info->vif_index); + mlxsw_sp_vr_put(vr); +} + static int mlxsw_sp_router_set_abort_trap(struct mlxsw_sp *mlxsw_sp) { enum mlxsw_reg_ralxx_protocol proto = MLXSW_REG_RALXX_PROTOCOL_IPV4; @@ -4741,6 +4825,10 @@ static int mlxsw_sp_router_set_abort_trap(struct mlxsw_sp *mlxsw_sp) if (err) return err; + /* The multicast router code does not need an abort trap as by default, + * packets that don't match any routes are trapped to the CPU. + */ + proto = MLXSW_REG_RALXX_PROTOCOL_IPV6; return __mlxsw_sp_router_set_abort_trap(mlxsw_sp, proto, MLXSW_SP_LPM_TREE_MIN + 1); @@ -4822,6 +4910,8 @@ static void mlxsw_sp_router_fib_flush(struct mlxsw_sp *mlxsw_sp) if (!mlxsw_sp_vr_is_used(vr)) continue; + + mlxsw_sp_mr_table_flush(vr->mr4_table); mlxsw_sp_vr_fib_flush(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV4); /* If virtual router was only used for IPv4, then it's no @@ -4854,6 +4944,8 @@ struct mlxsw_sp_fib_event_work { struct fib_entry_notifier_info fen_info; struct fib_rule_notifier_info fr_info; struct fib_nh_notifier_info fnh_info; + struct mfc_entry_notifier_info men_info; + struct vif_entry_notifier_info ven_info; }; struct mlxsw_sp *mlxsw_sp; unsigned long event; @@ -4940,6 +5032,55 @@ static void mlxsw_sp_router_fib6_event_work(struct work_struct *work) kfree(fib_work); } +static void mlxsw_sp_router_fibmr_event_work(struct work_struct *work) +{ + struct mlxsw_sp_fib_event_work *fib_work = + container_of(work, struct mlxsw_sp_fib_event_work, work); + struct mlxsw_sp *mlxsw_sp = fib_work->mlxsw_sp; + struct fib_rule *rule; + bool replace; + int err; + + rtnl_lock(); + switch (fib_work->event) { + case FIB_EVENT_ENTRY_REPLACE: /* fall through */ + case FIB_EVENT_ENTRY_ADD: + replace = fib_work->event == FIB_EVENT_ENTRY_REPLACE; + + err = mlxsw_sp_router_fibmr_add(mlxsw_sp, &fib_work->men_info, + replace); + if (err) + mlxsw_sp_router_fib_abort(mlxsw_sp); + ipmr_cache_put(fib_work->men_info.mfc); + break; + case FIB_EVENT_ENTRY_DEL: + mlxsw_sp_router_fibmr_del(mlxsw_sp, &fib_work->men_info); + ipmr_cache_put(fib_work->men_info.mfc); + break; + case FIB_EVENT_VIF_ADD: + err = mlxsw_sp_router_fibmr_vif_add(mlxsw_sp, + &fib_work->ven_info); + if (err) + mlxsw_sp_router_fib_abort(mlxsw_sp); + dev_put(fib_work->ven_info.dev); + break; + case FIB_EVENT_VIF_DEL: + mlxsw_sp_router_fibmr_vif_del(mlxsw_sp, + &fib_work->ven_info); + dev_put(fib_work->ven_info.dev); + break; + case FIB_EVENT_RULE_ADD: /* fall through */ + case FIB_EVENT_RULE_DEL: + rule = fib_work->fr_info.rule; + if (!ipmr_rule_default(rule) && !rule->l3mdev) + mlxsw_sp_router_fib_abort(mlxsw_sp); + fib_rule_put(rule); + break; + } + rtnl_unlock(); + kfree(fib_work); +} + static void mlxsw_sp_router_fib4_event(struct mlxsw_sp_fib_event_work *fib_work, struct fib_notifier_info *info) { @@ -4985,6 +5126,30 @@ static void mlxsw_sp_router_fib6_event(struct mlxsw_sp_fib_event_work *fib_work, } } +static void +mlxsw_sp_router_fibmr_event(struct mlxsw_sp_fib_event_work *fib_work, + struct fib_notifier_info *info) +{ + switch (fib_work->event) { + case FIB_EVENT_ENTRY_REPLACE: /* fall through */ + case FIB_EVENT_ENTRY_ADD: /* fall through */ + case FIB_EVENT_ENTRY_DEL: + memcpy(&fib_work->men_info, info, sizeof(fib_work->men_info)); + ipmr_cache_hold(fib_work->men_info.mfc); + break; + case FIB_EVENT_VIF_ADD: /* fall through */ + case FIB_EVENT_VIF_DEL: + memcpy(&fib_work->ven_info, info, sizeof(fib_work->ven_info)); + dev_hold(fib_work->ven_info.dev); + break; + case FIB_EVENT_RULE_ADD: /* fall through */ + case FIB_EVENT_RULE_DEL: + memcpy(&fib_work->fr_info, info, sizeof(fib_work->fr_info)); + fib_rule_get(fib_work->fr_info.rule); + break; + } +} + /* Called with rcu_read_lock() */ static int mlxsw_sp_router_fib_event(struct notifier_block *nb, unsigned long event, void *ptr) @@ -5014,6 +5179,10 @@ static int mlxsw_sp_router_fib_event(struct notifier_block *nb, INIT_WORK(&fib_work->work, mlxsw_sp_router_fib6_event_work); mlxsw_sp_router_fib6_event(fib_work, info); break; + case RTNL_FAMILY_IPMR: + INIT_WORK(&fib_work->work, mlxsw_sp_router_fibmr_event_work); + mlxsw_sp_router_fibmr_event(fib_work, info); + break; } mlxsw_core_schedule_work(&fib_work->work); @@ -5227,12 +5396,18 @@ mlxsw_sp_rif_create(struct mlxsw_sp *mlxsw_sp, if (err) goto err_configure; + err = mlxsw_sp_mr_rif_add(vr->mr4_table, rif); + if (err) + goto err_mr_rif_add; + mlxsw_sp_rif_counters_alloc(rif); mlxsw_sp->router->rifs[rif_index] = rif; vr->rif_count++; return rif; +err_mr_rif_add: + ops->deconfigure(rif); err_configure: if (fid) mlxsw_sp_fid_put(fid); @@ -5257,6 +5432,7 @@ void mlxsw_sp_rif_destroy(struct mlxsw_sp_rif *rif) vr->rif_count--; mlxsw_sp->router->rifs[rif->rif_index] = NULL; mlxsw_sp_rif_counters_free(rif); + mlxsw_sp_mr_rif_del(vr->mr4_table, rif); ops->deconfigure(rif); if (fid) /* Loopback RIFs are not associated with a FID. */ @@ -6120,6 +6296,10 @@ int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp) if (err) goto err_lpm_init; + err = mlxsw_sp_mr_init(mlxsw_sp, &mlxsw_sp_mr_tcam_ops); + if (err) + goto err_mr_init; + err = mlxsw_sp_vrs_init(mlxsw_sp); if (err) goto err_vrs_init; @@ -6141,6 +6321,8 @@ err_register_fib_notifier: err_neigh_init: mlxsw_sp_vrs_fini(mlxsw_sp); err_vrs_init: + mlxsw_sp_mr_fini(mlxsw_sp); +err_mr_init: mlxsw_sp_lpm_fini(mlxsw_sp); err_lpm_init: rhashtable_destroy(&mlxsw_sp->router->nexthop_group_ht); @@ -6162,6 +6344,7 @@ void mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp) unregister_fib_notifier(&mlxsw_sp->router->fib_nb); mlxsw_sp_neigh_fini(mlxsw_sp); mlxsw_sp_vrs_fini(mlxsw_sp); + mlxsw_sp_mr_fini(mlxsw_sp); mlxsw_sp_lpm_fini(mlxsw_sp); rhashtable_destroy(&mlxsw_sp->router->nexthop_group_ht); rhashtable_destroy(&mlxsw_sp->router->nexthop_ht); -- cgit v1.2.3 From fd890fe98f8b026642d39011d03d22fb4aa66b0f Mon Sep 17 00:00:00 2001 From: Yotam Gigi Date: Wed, 27 Sep 2017 08:23:21 +0200 Subject: mlxsw: spectrum: Notify multicast router on RIF MTU changes Due to the fact that multicast routes hold the minimum MTU of all the egress RIFs and trap packets that don't meet it, notify the mulitcast router code on RIF MTU changes. Signed-off-by: Yotam Gigi Reviewed-by: Ido Schimmel Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c | 11 +++++++++++ 1 file changed, 11 insertions(+) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c index 77584422ed08..dbd9c196fcfb 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c @@ -5773,6 +5773,17 @@ int mlxsw_sp_netdevice_router_port_event(struct net_device *dev) if (err) goto err_rif_fdb_op; + if (rif->mtu != dev->mtu) { + struct mlxsw_sp_vr *vr; + + /* The RIF is relevant only to its mr_table instance, as unlike + * unicast routing, in multicast routing a RIF cannot be shared + * between several multicast routing tables. + */ + vr = &mlxsw_sp->router->vrs[rif->vr_id]; + mlxsw_sp_mr_rif_mtu_update(vr->mr4_table, rif, dev->mtu); + } + ether_addr_copy(rif->addr, dev->dev_addr); rif->mtu = dev->mtu; -- cgit v1.2.3 From 664375e9567b5eeece8d9ebf85eaf5107cab382d Mon Sep 17 00:00:00 2001 From: Yotam Gigi Date: Wed, 27 Sep 2017 08:23:22 +0200 Subject: mlxsw: spectrum: router: Don't ignore IPMR notifications Make the Spectrum router logic not ignore the RTNL_FAMILY_IPMR FIB notifications. Past commits added the IPMR VIF and MFC add/del notifications via the fib_notifier chain. In addition, a code for handling these notifications in the Spectrum router logic was added. Make the Spectrum router logic not ignore these notifications and forward the requests to the Spectrum multicast router offloading logic. Signed-off-by: Yotam Gigi Reviewed-by: Ido Schimmel Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c index dbd9c196fcfb..ef4b86b3aa9b 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c @@ -5159,7 +5159,8 @@ static int mlxsw_sp_router_fib_event(struct notifier_block *nb, struct mlxsw_sp_router *router; if (!net_eq(info->net, &init_net) || - (info->family != AF_INET && info->family != AF_INET6)) + (info->family != AF_INET && info->family != AF_INET6 && + info->family != RTNL_FAMILY_IPMR)) return NOTIFY_DONE; fib_work = kzalloc(sizeof(*fib_work), GFP_ATOMIC); -- cgit v1.2.3 From 4d8806fd14e1492cd4fb2021f709b163ea3364ad Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Tue, 26 Sep 2017 16:14:09 +0100 Subject: cxgb4: make function ch_flower_stats_cb, fixes warning The function ch_flower_stats_cb is local to the source and does not need to be in global scope, so make it static. Cleans up sparse warnings: symbol 'ch_flower_stats_cb' was not declared. Should it be static? Signed-off-by: Colin Ian King Signed-off-by: David S. Miller --- drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c index a36bd66d2834..92a311767381 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c @@ -366,7 +366,7 @@ err: return ret; } -void ch_flower_stats_cb(unsigned long data) +static void ch_flower_stats_cb(unsigned long data) { struct adapter *adap = (struct adapter *)data; struct ch_tc_flower_entry *flower_entry; -- cgit v1.2.3 From 352f58b0d9f26d283b10f4c9f21e8717141c1334 Mon Sep 17 00:00:00 2001 From: Aviad Krawczyk Date: Wed, 27 Sep 2017 01:57:50 +0800 Subject: net-next/hinic: Set Rxq irq to specific cpu for NUMA Set Rxq irq to specific cpu for allocating and receiving the skb from the same node. Signed-off-by: Aviad Krawczyk Signed-off-by: David S. Miller --- drivers/net/ethernet/huawei/hinic/hinic_rx.c | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/huawei/hinic/hinic_rx.c b/drivers/net/ethernet/huawei/hinic/hinic_rx.c index 1d4f712b15a8..e2e5cdc7119c 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_rx.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_rx.c @@ -26,6 +26,7 @@ #include #include #include +#include #include #include "hinic_common.h" @@ -171,11 +172,10 @@ static int rx_alloc_pkts(struct hinic_rxq *rxq) struct hinic_sge sge; dma_addr_t dma_addr; struct sk_buff *skb; - int i, alloc_more; u16 prod_idx; + int i; free_wqebbs = hinic_get_rq_free_wqebbs(rxq->rq); - alloc_more = 0; /* Limit the allocation chunks */ if (free_wqebbs > nic_dev->rx_weight) @@ -185,7 +185,6 @@ static int rx_alloc_pkts(struct hinic_rxq *rxq) skb = rx_alloc_skb(rxq, &dma_addr); if (!skb) { netdev_err(rxq->netdev, "Failed to alloc Rx skb\n"); - alloc_more = 1; goto skb_out; } @@ -195,7 +194,6 @@ static int rx_alloc_pkts(struct hinic_rxq *rxq) &prod_idx); if (!rq_wqe) { rx_free_skb(rxq, skb, dma_addr); - alloc_more = 1; goto skb_out; } @@ -211,9 +209,7 @@ skb_out: hinic_rq_update(rxq->rq, prod_idx); } - if (alloc_more) - tasklet_schedule(&rxq->rx_task); - + tasklet_schedule(&rxq->rx_task); return i; } @@ -357,7 +353,7 @@ static int rxq_recv(struct hinic_rxq *rxq, int budget) } if (pkts) - tasklet_schedule(&rxq->rx_task); /* hinic_rx_alloc_pkts */ + tasklet_schedule(&rxq->rx_task); /* rx_alloc_pkts */ u64_stats_update_begin(&rxq->rxq_stats.syncp); rxq->rxq_stats.pkts += pkts; @@ -417,6 +413,8 @@ static int rx_request_irq(struct hinic_rxq *rxq) struct hinic_dev *nic_dev = netdev_priv(rxq->netdev); struct hinic_hwdev *hwdev = nic_dev->hwdev; struct hinic_rq *rq = rxq->rq; + struct hinic_qp *qp; + struct cpumask mask; int err; rx_add_napi(rxq); @@ -432,7 +430,9 @@ static int rx_request_irq(struct hinic_rxq *rxq) return err; } - return 0; + qp = container_of(rq, struct hinic_qp, rq); + cpumask_set_cpu(qp->q_id % num_online_cpus(), &mask); + return irq_set_affinity_hint(rq->irq, &mask); } static void rx_free_irq(struct hinic_rxq *rxq) -- cgit v1.2.3 From bbdc9e687fb3c2920961d7716f1c5519ff7bc595 Mon Sep 17 00:00:00 2001 From: Aviad Krawczyk Date: Wed, 27 Sep 2017 02:11:33 +0800 Subject: net-next/hinic: Fix a case of Tx Queue is Stopped forever Fix the following scenario: 1. tx_free_poll is running on cpu X 2. xmit function is running on cpu Y and fails to get sq wqe 3. tx_free_poll frees wqes on cpu X and checks the queue is not stopped 4. xmit function stops the queue after failed to get sq wqe 5. The queue is stopped forever Signed-off-by: Aviad Krawczyk Signed-off-by: David S. Miller --- drivers/net/ethernet/huawei/hinic/hinic_tx.c | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/huawei/hinic/hinic_tx.c b/drivers/net/ethernet/huawei/hinic/hinic_tx.c index abe3e38cd342..9128858479c4 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_tx.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_tx.c @@ -212,10 +212,19 @@ netdev_tx_t hinic_xmit_frame(struct sk_buff *skb, struct net_device *netdev) sq_wqe = hinic_sq_get_wqe(txq->sq, wqe_size, &prod_idx); if (!sq_wqe) { - tx_unmap_skb(nic_dev, skb, txq->sges); - netif_stop_subqueue(netdev, qp->q_id); + /* Check for the case free_tx_poll is called in another cpu + * and we stopped the subqueue after free_tx_poll check. + */ + sq_wqe = hinic_sq_get_wqe(txq->sq, wqe_size, &prod_idx); + if (sq_wqe) { + netif_wake_subqueue(nic_dev->netdev, qp->q_id); + goto process_sq_wqe; + } + + tx_unmap_skb(nic_dev, skb, txq->sges); + u64_stats_update_begin(&txq->txq_stats.syncp); txq->txq_stats.tx_busy++; u64_stats_update_end(&txq->txq_stats.syncp); @@ -223,6 +232,7 @@ netdev_tx_t hinic_xmit_frame(struct sk_buff *skb, struct net_device *netdev) goto flush_skbs; } +process_sq_wqe: hinic_sq_prepare_wqe(txq->sq, prod_idx, sq_wqe, txq->sges, nr_sges); hinic_sq_write_wqe(txq->sq, prod_idx, sq_wqe, skb, wqe_size); -- cgit v1.2.3 From 9ffe79a9c2eec0f30687c2fd8b452bda5c8287b0 Mon Sep 17 00:00:00 2001 From: Yunsheng Lin Date: Wed, 27 Sep 2017 09:45:23 +0800 Subject: net: hns3: Support for dynamically assigning tx buffer to TC This patch add support of dynamically assigning tx buffer to TC when the TC is enabled. It will save buffer for rx direction to avoid packet loss. Signed-off-by: Yunsheng Lin Signed-off-by: David S. Miller --- .../net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h | 1 + .../ethernet/hisilicon/hns3/hns3pf/hclge_main.c | 64 ++++++++++++++++++---- 2 files changed, 55 insertions(+), 10 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h index 758cf3948131..a81c6cb93ed5 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h @@ -311,6 +311,7 @@ struct hclge_tc_thrd { struct hclge_priv_buf { struct hclge_waterline wl; /* Waterline for low and high*/ u32 buf_size; /* TC private buffer size */ + u32 tx_buf_size; u32 enable; /* Enable TC private buffer or not */ }; diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c index e0685e630afe..eaa3fc355568 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c @@ -1324,7 +1324,7 @@ static int hclge_alloc_vport(struct hclge_dev *hdev) return 0; } -static int hclge_cmd_alloc_tx_buff(struct hclge_dev *hdev, u16 buf_size) +static int hclge_cmd_alloc_tx_buff(struct hclge_dev *hdev) { /* TX buffer size is unit by 128 byte */ #define HCLGE_BUF_SIZE_UNIT_SHIFT 7 @@ -1337,10 +1337,13 @@ static int hclge_cmd_alloc_tx_buff(struct hclge_dev *hdev, u16 buf_size) req = (struct hclge_tx_buff_alloc *)desc.data; hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TX_BUFF_ALLOC, 0); - for (i = 0; i < HCLGE_TC_NUM; i++) + for (i = 0; i < HCLGE_TC_NUM; i++) { + u32 buf_size = hdev->priv_buf[i].tx_buf_size; + req->tx_pkt_buff[i] = cpu_to_le16((buf_size >> HCLGE_BUF_SIZE_UNIT_SHIFT) | HCLGE_BUF_SIZE_UPDATE_EN_MSK); + } ret = hclge_cmd_send(&hdev->hw, &desc, 1); if (ret) { @@ -1352,9 +1355,9 @@ static int hclge_cmd_alloc_tx_buff(struct hclge_dev *hdev, u16 buf_size) return 0; } -static int hclge_tx_buffer_alloc(struct hclge_dev *hdev, u32 buf_size) +static int hclge_tx_buffer_alloc(struct hclge_dev *hdev) { - int ret = hclge_cmd_alloc_tx_buff(hdev, buf_size); + int ret = hclge_cmd_alloc_tx_buff(hdev); if (ret) { dev_err(&hdev->pdev->dev, @@ -1433,6 +1436,16 @@ static u32 hclge_get_rx_priv_buff_alloced(struct hclge_dev *hdev) return rx_priv; } +static u32 hclge_get_tx_buff_alloced(struct hclge_dev *hdev) +{ + u32 i, total_tx_size = 0; + + for (i = 0; i < HCLGE_MAX_TC_NUM; i++) + total_tx_size += hdev->priv_buf[i].tx_buf_size; + + return total_tx_size; +} + static bool hclge_is_rx_buf_ok(struct hclge_dev *hdev, u32 rx_all) { u32 shared_buf_min, shared_buf_tc, shared_std; @@ -1477,18 +1490,43 @@ static bool hclge_is_rx_buf_ok(struct hclge_dev *hdev, u32 rx_all) return true; } +static int hclge_tx_buffer_calc(struct hclge_dev *hdev) +{ + u32 i, total_size; + + total_size = hdev->pkt_buf_size; + + /* alloc tx buffer for all enabled tc */ + for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { + struct hclge_priv_buf *priv = &hdev->priv_buf[i]; + + if (total_size < HCLGE_DEFAULT_TX_BUF) + return -ENOMEM; + + if (hdev->hw_tc_map & BIT(i)) + priv->tx_buf_size = HCLGE_DEFAULT_TX_BUF; + else + priv->tx_buf_size = 0; + + total_size -= priv->tx_buf_size; + } + + return 0; +} + /* hclge_rx_buffer_calc: calculate the rx private buffer size for all TCs * @hdev: pointer to struct hclge_dev - * @tx_size: the allocated tx buffer for all TCs * @return: 0: calculate sucessful, negative: fail */ -int hclge_rx_buffer_calc(struct hclge_dev *hdev, u32 tx_size) +int hclge_rx_buffer_calc(struct hclge_dev *hdev) { - u32 rx_all = hdev->pkt_buf_size - tx_size; + u32 rx_all = hdev->pkt_buf_size; int no_pfc_priv_num, pfc_priv_num; struct hclge_priv_buf *priv; int i; + rx_all -= hclge_get_tx_buff_alloced(hdev); + /* When DCB is not supported, rx private * buffer is not allocated. */ @@ -1771,7 +1809,6 @@ static int hclge_common_wl_config(struct hclge_dev *hdev) int hclge_buffer_alloc(struct hclge_dev *hdev) { - u32 tx_buf_size = HCLGE_DEFAULT_TX_BUF; int ret; hdev->priv_buf = devm_kmalloc_array(&hdev->pdev->dev, HCLGE_MAX_TC_NUM, @@ -1780,14 +1817,21 @@ int hclge_buffer_alloc(struct hclge_dev *hdev) if (!hdev->priv_buf) return -ENOMEM; - ret = hclge_tx_buffer_alloc(hdev, tx_buf_size); + ret = hclge_tx_buffer_calc(hdev); + if (ret) { + dev_err(&hdev->pdev->dev, + "could not calc tx buffer size for all TCs %d\n", ret); + return ret; + } + + ret = hclge_tx_buffer_alloc(hdev); if (ret) { dev_err(&hdev->pdev->dev, "could not alloc tx buffers %d\n", ret); return ret; } - ret = hclge_rx_buffer_calc(hdev, tx_buf_size); + ret = hclge_rx_buffer_calc(hdev); if (ret) { dev_err(&hdev->pdev->dev, "could not calc rx priv buffer size for all TCs %d\n", -- cgit v1.2.3 From acf61ecd44feae2a78c13d0d7cb8e386741c5cf0 Mon Sep 17 00:00:00 2001 From: Yunsheng Lin Date: Wed, 27 Sep 2017 09:45:24 +0800 Subject: net: hns3: Add support for dynamically buffer reallocation Current buffer allocation can only happen at init, when doing buffer reallocation after init, care must be taken care of memory which priv_buf points to. This patch fixes it by using a dynamic allocated temporary memory. Because we only do buffer reallocation at init or when setting up the DCB parameter, and priv_buf is only used at buffer allocation process, so it is ok to use a dynamic allocated temporary memory. Signed-off-by: Yunsheng Lin Signed-off-by: David S. Miller --- .../net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h | 5 + .../ethernet/hisilicon/hns3/hns3pf/hclge_main.c | 150 +++++++++++---------- .../ethernet/hisilicon/hns3/hns3pf/hclge_main.h | 2 - 3 files changed, 87 insertions(+), 70 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h index a81c6cb93ed5..6b6d28eff664 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h @@ -322,6 +322,11 @@ struct hclge_shared_buf { u32 buf_size; }; +struct hclge_pkt_buf_alloc { + struct hclge_priv_buf priv_buf[HCLGE_MAX_TC_NUM]; + struct hclge_shared_buf s_buf; +}; + #define HCLGE_RX_COM_WL_EN_B 15 struct hclge_rx_com_wl_buf { __le16 high_wl; diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c index eaa3fc355568..61632feb8c4e 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c @@ -1324,7 +1324,8 @@ static int hclge_alloc_vport(struct hclge_dev *hdev) return 0; } -static int hclge_cmd_alloc_tx_buff(struct hclge_dev *hdev) +static int hclge_cmd_alloc_tx_buff(struct hclge_dev *hdev, + struct hclge_pkt_buf_alloc *buf_alloc) { /* TX buffer size is unit by 128 byte */ #define HCLGE_BUF_SIZE_UNIT_SHIFT 7 @@ -1338,7 +1339,7 @@ static int hclge_cmd_alloc_tx_buff(struct hclge_dev *hdev) hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TX_BUFF_ALLOC, 0); for (i = 0; i < HCLGE_TC_NUM; i++) { - u32 buf_size = hdev->priv_buf[i].tx_buf_size; + u32 buf_size = buf_alloc->priv_buf[i].tx_buf_size; req->tx_pkt_buff[i] = cpu_to_le16((buf_size >> HCLGE_BUF_SIZE_UNIT_SHIFT) | @@ -1355,9 +1356,10 @@ static int hclge_cmd_alloc_tx_buff(struct hclge_dev *hdev) return 0; } -static int hclge_tx_buffer_alloc(struct hclge_dev *hdev) +static int hclge_tx_buffer_alloc(struct hclge_dev *hdev, + struct hclge_pkt_buf_alloc *buf_alloc) { - int ret = hclge_cmd_alloc_tx_buff(hdev); + int ret = hclge_cmd_alloc_tx_buff(hdev, buf_alloc); if (ret) { dev_err(&hdev->pdev->dev, @@ -1390,13 +1392,14 @@ static int hclge_get_pfc_enalbe_num(struct hclge_dev *hdev) } /* Get the number of pfc enabled TCs, which have private buffer */ -static int hclge_get_pfc_priv_num(struct hclge_dev *hdev) +static int hclge_get_pfc_priv_num(struct hclge_dev *hdev, + struct hclge_pkt_buf_alloc *buf_alloc) { struct hclge_priv_buf *priv; int i, cnt = 0; for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { - priv = &hdev->priv_buf[i]; + priv = &buf_alloc->priv_buf[i]; if ((hdev->tm_info.hw_pfc_map & BIT(i)) && priv->enable) cnt++; @@ -1406,13 +1409,14 @@ static int hclge_get_pfc_priv_num(struct hclge_dev *hdev) } /* Get the number of pfc disabled TCs, which have private buffer */ -static int hclge_get_no_pfc_priv_num(struct hclge_dev *hdev) +static int hclge_get_no_pfc_priv_num(struct hclge_dev *hdev, + struct hclge_pkt_buf_alloc *buf_alloc) { struct hclge_priv_buf *priv; int i, cnt = 0; for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { - priv = &hdev->priv_buf[i]; + priv = &buf_alloc->priv_buf[i]; if (hdev->hw_tc_map & BIT(i) && !(hdev->tm_info.hw_pfc_map & BIT(i)) && priv->enable) @@ -1422,31 +1426,33 @@ static int hclge_get_no_pfc_priv_num(struct hclge_dev *hdev) return cnt; } -static u32 hclge_get_rx_priv_buff_alloced(struct hclge_dev *hdev) +static u32 hclge_get_rx_priv_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc) { struct hclge_priv_buf *priv; u32 rx_priv = 0; int i; for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { - priv = &hdev->priv_buf[i]; + priv = &buf_alloc->priv_buf[i]; if (priv->enable) rx_priv += priv->buf_size; } return rx_priv; } -static u32 hclge_get_tx_buff_alloced(struct hclge_dev *hdev) +static u32 hclge_get_tx_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc) { u32 i, total_tx_size = 0; for (i = 0; i < HCLGE_MAX_TC_NUM; i++) - total_tx_size += hdev->priv_buf[i].tx_buf_size; + total_tx_size += buf_alloc->priv_buf[i].tx_buf_size; return total_tx_size; } -static bool hclge_is_rx_buf_ok(struct hclge_dev *hdev, u32 rx_all) +static bool hclge_is_rx_buf_ok(struct hclge_dev *hdev, + struct hclge_pkt_buf_alloc *buf_alloc, + u32 rx_all) { u32 shared_buf_min, shared_buf_tc, shared_std; int tc_num, pfc_enable_num; @@ -1467,30 +1473,31 @@ static bool hclge_is_rx_buf_ok(struct hclge_dev *hdev, u32 rx_all) hdev->mps; shared_std = max_t(u32, shared_buf_min, shared_buf_tc); - rx_priv = hclge_get_rx_priv_buff_alloced(hdev); + rx_priv = hclge_get_rx_priv_buff_alloced(buf_alloc); if (rx_all <= rx_priv + shared_std) return false; shared_buf = rx_all - rx_priv; - hdev->s_buf.buf_size = shared_buf; - hdev->s_buf.self.high = shared_buf; - hdev->s_buf.self.low = 2 * hdev->mps; + buf_alloc->s_buf.buf_size = shared_buf; + buf_alloc->s_buf.self.high = shared_buf; + buf_alloc->s_buf.self.low = 2 * hdev->mps; for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { if ((hdev->hw_tc_map & BIT(i)) && (hdev->tm_info.hw_pfc_map & BIT(i))) { - hdev->s_buf.tc_thrd[i].low = hdev->mps; - hdev->s_buf.tc_thrd[i].high = 2 * hdev->mps; + buf_alloc->s_buf.tc_thrd[i].low = hdev->mps; + buf_alloc->s_buf.tc_thrd[i].high = 2 * hdev->mps; } else { - hdev->s_buf.tc_thrd[i].low = 0; - hdev->s_buf.tc_thrd[i].high = hdev->mps; + buf_alloc->s_buf.tc_thrd[i].low = 0; + buf_alloc->s_buf.tc_thrd[i].high = hdev->mps; } } return true; } -static int hclge_tx_buffer_calc(struct hclge_dev *hdev) +static int hclge_tx_buffer_calc(struct hclge_dev *hdev, + struct hclge_pkt_buf_alloc *buf_alloc) { u32 i, total_size; @@ -1498,7 +1505,7 @@ static int hclge_tx_buffer_calc(struct hclge_dev *hdev) /* alloc tx buffer for all enabled tc */ for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { - struct hclge_priv_buf *priv = &hdev->priv_buf[i]; + struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i]; if (total_size < HCLGE_DEFAULT_TX_BUF) return -ENOMEM; @@ -1516,22 +1523,24 @@ static int hclge_tx_buffer_calc(struct hclge_dev *hdev) /* hclge_rx_buffer_calc: calculate the rx private buffer size for all TCs * @hdev: pointer to struct hclge_dev + * @buf_alloc: pointer to buffer calculation data * @return: 0: calculate sucessful, negative: fail */ -int hclge_rx_buffer_calc(struct hclge_dev *hdev) +int hclge_rx_buffer_calc(struct hclge_dev *hdev, + struct hclge_pkt_buf_alloc *buf_alloc) { u32 rx_all = hdev->pkt_buf_size; int no_pfc_priv_num, pfc_priv_num; struct hclge_priv_buf *priv; int i; - rx_all -= hclge_get_tx_buff_alloced(hdev); + rx_all -= hclge_get_tx_buff_alloced(buf_alloc); /* When DCB is not supported, rx private * buffer is not allocated. */ if (!hnae3_dev_dcb_supported(hdev)) { - if (!hclge_is_rx_buf_ok(hdev, rx_all)) + if (!hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all)) return -ENOMEM; return 0; @@ -1539,7 +1548,7 @@ int hclge_rx_buffer_calc(struct hclge_dev *hdev) /* step 1, try to alloc private buffer for all enabled tc */ for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { - priv = &hdev->priv_buf[i]; + priv = &buf_alloc->priv_buf[i]; if (hdev->hw_tc_map & BIT(i)) { priv->enable = 1; if (hdev->tm_info.hw_pfc_map & BIT(i)) { @@ -1560,14 +1569,14 @@ int hclge_rx_buffer_calc(struct hclge_dev *hdev) } } - if (hclge_is_rx_buf_ok(hdev, rx_all)) + if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all)) return 0; /* step 2, try to decrease the buffer size of * no pfc TC's private buffer */ for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { - priv = &hdev->priv_buf[i]; + priv = &buf_alloc->priv_buf[i]; priv->enable = 0; priv->wl.low = 0; @@ -1590,18 +1599,18 @@ int hclge_rx_buffer_calc(struct hclge_dev *hdev) } } - if (hclge_is_rx_buf_ok(hdev, rx_all)) + if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all)) return 0; /* step 3, try to reduce the number of pfc disabled TCs, * which have private buffer */ /* get the total no pfc enable TC number, which have private buffer */ - no_pfc_priv_num = hclge_get_no_pfc_priv_num(hdev); + no_pfc_priv_num = hclge_get_no_pfc_priv_num(hdev, buf_alloc); /* let the last to be cleared first */ for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) { - priv = &hdev->priv_buf[i]; + priv = &buf_alloc->priv_buf[i]; if (hdev->hw_tc_map & BIT(i) && !(hdev->tm_info.hw_pfc_map & BIT(i))) { @@ -1613,22 +1622,22 @@ int hclge_rx_buffer_calc(struct hclge_dev *hdev) no_pfc_priv_num--; } - if (hclge_is_rx_buf_ok(hdev, rx_all) || + if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) || no_pfc_priv_num == 0) break; } - if (hclge_is_rx_buf_ok(hdev, rx_all)) + if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all)) return 0; /* step 4, try to reduce the number of pfc enabled TCs * which have private buffer. */ - pfc_priv_num = hclge_get_pfc_priv_num(hdev); + pfc_priv_num = hclge_get_pfc_priv_num(hdev, buf_alloc); /* let the last to be cleared first */ for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) { - priv = &hdev->priv_buf[i]; + priv = &buf_alloc->priv_buf[i]; if (hdev->hw_tc_map & BIT(i) && hdev->tm_info.hw_pfc_map & BIT(i)) { @@ -1640,17 +1649,18 @@ int hclge_rx_buffer_calc(struct hclge_dev *hdev) pfc_priv_num--; } - if (hclge_is_rx_buf_ok(hdev, rx_all) || + if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) || pfc_priv_num == 0) break; } - if (hclge_is_rx_buf_ok(hdev, rx_all)) + if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all)) return 0; return -ENOMEM; } -static int hclge_rx_priv_buf_alloc(struct hclge_dev *hdev) +static int hclge_rx_priv_buf_alloc(struct hclge_dev *hdev, + struct hclge_pkt_buf_alloc *buf_alloc) { struct hclge_rx_priv_buff *req; struct hclge_desc desc; @@ -1662,7 +1672,7 @@ static int hclge_rx_priv_buf_alloc(struct hclge_dev *hdev) /* Alloc private buffer TCs */ for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { - struct hclge_priv_buf *priv = &hdev->priv_buf[i]; + struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i]; req->buf_num[i] = cpu_to_le16(priv->buf_size >> HCLGE_BUF_UNIT_S); @@ -1671,7 +1681,7 @@ static int hclge_rx_priv_buf_alloc(struct hclge_dev *hdev) } req->shared_buf = - cpu_to_le16((hdev->s_buf.buf_size >> HCLGE_BUF_UNIT_S) | + cpu_to_le16((buf_alloc->s_buf.buf_size >> HCLGE_BUF_UNIT_S) | (1 << HCLGE_TC0_PRI_BUF_EN_B)); ret = hclge_cmd_send(&hdev->hw, &desc, 1); @@ -1686,7 +1696,8 @@ static int hclge_rx_priv_buf_alloc(struct hclge_dev *hdev) #define HCLGE_PRIV_ENABLE(a) ((a) > 0 ? 1 : 0) -static int hclge_rx_priv_wl_config(struct hclge_dev *hdev) +static int hclge_rx_priv_wl_config(struct hclge_dev *hdev, + struct hclge_pkt_buf_alloc *buf_alloc) { struct hclge_rx_priv_wl_buf *req; struct hclge_priv_buf *priv; @@ -1706,7 +1717,9 @@ static int hclge_rx_priv_wl_config(struct hclge_dev *hdev) desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT); for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) { - priv = &hdev->priv_buf[i * HCLGE_TC_NUM_ONE_DESC + j]; + u32 idx = i * HCLGE_TC_NUM_ONE_DESC + j; + + priv = &buf_alloc->priv_buf[idx]; req->tc_wl[j].high = cpu_to_le16(priv->wl.high >> HCLGE_BUF_UNIT_S); req->tc_wl[j].high |= @@ -1731,9 +1744,10 @@ static int hclge_rx_priv_wl_config(struct hclge_dev *hdev) return 0; } -static int hclge_common_thrd_config(struct hclge_dev *hdev) +static int hclge_common_thrd_config(struct hclge_dev *hdev, + struct hclge_pkt_buf_alloc *buf_alloc) { - struct hclge_shared_buf *s_buf = &hdev->s_buf; + struct hclge_shared_buf *s_buf = &buf_alloc->s_buf; struct hclge_rx_com_thrd *req; struct hclge_desc desc[2]; struct hclge_tc_thrd *tc; @@ -1777,9 +1791,10 @@ static int hclge_common_thrd_config(struct hclge_dev *hdev) return 0; } -static int hclge_common_wl_config(struct hclge_dev *hdev) +static int hclge_common_wl_config(struct hclge_dev *hdev, + struct hclge_pkt_buf_alloc *buf_alloc) { - struct hclge_shared_buf *buf = &hdev->s_buf; + struct hclge_shared_buf *buf = &buf_alloc->s_buf; struct hclge_rx_com_wl *req; struct hclge_desc desc; int ret; @@ -1809,69 +1824,68 @@ static int hclge_common_wl_config(struct hclge_dev *hdev) int hclge_buffer_alloc(struct hclge_dev *hdev) { + struct hclge_pkt_buf_alloc *pkt_buf; int ret; - hdev->priv_buf = devm_kmalloc_array(&hdev->pdev->dev, HCLGE_MAX_TC_NUM, - sizeof(struct hclge_priv_buf), - GFP_KERNEL | __GFP_ZERO); - if (!hdev->priv_buf) + pkt_buf = kzalloc(sizeof(*pkt_buf), GFP_KERNEL); + if (!pkt_buf) return -ENOMEM; - ret = hclge_tx_buffer_calc(hdev); + ret = hclge_tx_buffer_calc(hdev, pkt_buf); if (ret) { dev_err(&hdev->pdev->dev, "could not calc tx buffer size for all TCs %d\n", ret); - return ret; + goto out; } - ret = hclge_tx_buffer_alloc(hdev); + ret = hclge_tx_buffer_alloc(hdev, pkt_buf); if (ret) { dev_err(&hdev->pdev->dev, "could not alloc tx buffers %d\n", ret); - return ret; + goto out; } - ret = hclge_rx_buffer_calc(hdev); + ret = hclge_rx_buffer_calc(hdev, pkt_buf); if (ret) { dev_err(&hdev->pdev->dev, "could not calc rx priv buffer size for all TCs %d\n", ret); - return ret; + goto out; } - ret = hclge_rx_priv_buf_alloc(hdev); + ret = hclge_rx_priv_buf_alloc(hdev, pkt_buf); if (ret) { dev_err(&hdev->pdev->dev, "could not alloc rx priv buffer %d\n", ret); - return ret; + goto out; } if (hnae3_dev_dcb_supported(hdev)) { - ret = hclge_rx_priv_wl_config(hdev); + ret = hclge_rx_priv_wl_config(hdev, pkt_buf); if (ret) { dev_err(&hdev->pdev->dev, "could not configure rx private waterline %d\n", ret); - return ret; + goto out; } - ret = hclge_common_thrd_config(hdev); + ret = hclge_common_thrd_config(hdev, pkt_buf); if (ret) { dev_err(&hdev->pdev->dev, "could not configure common threshold %d\n", ret); - return ret; + goto out; } } - ret = hclge_common_wl_config(hdev); - if (ret) { + ret = hclge_common_wl_config(hdev, pkt_buf); + if (ret) dev_err(&hdev->pdev->dev, "could not configure common waterline %d\n", ret); - return ret; - } - return 0; +out: + kfree(pkt_buf); + return ret; } static int hclge_init_roce_base_info(struct hclge_vport *vport) diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h index 9fcfd9395424..4fc36f04c971 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h @@ -463,8 +463,6 @@ struct hclge_dev { u32 pkt_buf_size; /* Total pf buf size for tx/rx */ u32 mps; /* Max packet size */ - struct hclge_priv_buf *priv_buf; - struct hclge_shared_buf s_buf; enum hclge_mta_dmac_sel_type mta_mac_sel_type; bool enable_mta; /* Mutilcast filter enable */ -- cgit v1.2.3 From 9dc2145d910e94d1987fd165ac7643777fcf17c4 Mon Sep 17 00:00:00 2001 From: Yunsheng Lin Date: Wed, 27 Sep 2017 09:45:25 +0800 Subject: net: hns3: Add support for PFC setting in TM module This patch add a pfc_pause_en cmd, and use it to configure PFC option according to fc_mode in hdev->tm_info. Signed-off-by: Yunsheng Lin Signed-off-by: David S. Miller --- .../net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c | 68 ++++++++++++++++++++-- .../net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h | 5 ++ 2 files changed, 68 insertions(+), 5 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c index 73a75d7cc551..0b4b5d9b0798 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c @@ -124,6 +124,20 @@ static int hclge_mac_pause_en_cfg(struct hclge_dev *hdev, bool tx, bool rx) return hclge_cmd_send(&hdev->hw, &desc, 1); } +static int hclge_pfc_pause_en_cfg(struct hclge_dev *hdev, u8 tx_rx_bitmap, + u8 pfc_bitmap) +{ + struct hclge_desc desc; + struct hclge_pfc_en_cmd *pfc = (struct hclge_pfc_en_cmd *)&desc.data; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_PFC_PAUSE_EN, false); + + pfc->tx_rx_en_bitmap = tx_rx_bitmap; + pfc->pri_en_bitmap = pfc_bitmap; + + return hclge_cmd_send(&hdev->hw, &desc, 1); +} + static int hclge_fill_pri_array(struct hclge_dev *hdev, u8 *pri, u8 pri_id) { u8 tc; @@ -969,20 +983,64 @@ static int hclge_tm_schd_setup_hw(struct hclge_dev *hdev) return hclge_tm_schd_mode_hw(hdev); } +static int hclge_pfc_setup_hw(struct hclge_dev *hdev) +{ + u8 enable_bitmap = 0; + + if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) + enable_bitmap = HCLGE_TX_MAC_PAUSE_EN_MSK | + HCLGE_RX_MAC_PAUSE_EN_MSK; + + return hclge_pfc_pause_en_cfg(hdev, enable_bitmap, + hdev->tm_info.hw_pfc_map); +} + +static int hclge_mac_pause_setup_hw(struct hclge_dev *hdev) +{ + bool tx_en, rx_en; + + switch (hdev->tm_info.fc_mode) { + case HCLGE_FC_NONE: + tx_en = false; + rx_en = false; + break; + case HCLGE_FC_RX_PAUSE: + tx_en = false; + rx_en = true; + break; + case HCLGE_FC_TX_PAUSE: + tx_en = true; + rx_en = false; + break; + case HCLGE_FC_FULL: + tx_en = true; + rx_en = true; + break; + default: + tx_en = true; + rx_en = true; + } + + return hclge_mac_pause_en_cfg(hdev, tx_en, rx_en); +} + int hclge_pause_setup_hw(struct hclge_dev *hdev) { - bool en = hdev->tm_info.fc_mode != HCLGE_FC_PFC; int ret; u8 i; - ret = hclge_mac_pause_en_cfg(hdev, en, en); - if (ret) - return ret; + if (hdev->tm_info.fc_mode != HCLGE_FC_PFC) + return hclge_mac_pause_setup_hw(hdev); - /* Only DCB-supported dev supports qset back pressure setting */ + /* Only DCB-supported dev supports qset back pressure and pfc cmd */ if (!hnae3_dev_dcb_supported(hdev)) return 0; + /* When MAC is GE Mode, hdev does not support pfc setting */ + ret = hclge_pfc_setup_hw(hdev); + if (ret) + dev_warn(&hdev->pdev->dev, "set pfc pause failed:%d\n", ret); + for (i = 0; i < hdev->tm_info.num_tc; i++) { ret = hclge_tm_qs_bp_cfg(hdev, i); if (ret) diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h index 85158b0d73fe..8ecd83c50f47 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h @@ -94,6 +94,11 @@ struct hclge_bp_to_qs_map_cmd { u32 rsvd1; }; +struct hclge_pfc_en_cmd { + u8 tx_rx_en_bitmap; + u8 pri_en_bitmap; +}; + #define hclge_tm_set_field(dest, string, val) \ hnae_set_field((dest), (HCLGE_TM_SHAP_##string##_MSK), \ (HCLGE_TM_SHAP_##string##_LSH), val) -- cgit v1.2.3 From 0a5677d39ef12739c9c10ef6e8e5f4b0805bfe71 Mon Sep 17 00:00:00 2001 From: Yunsheng Lin Date: Wed, 27 Sep 2017 09:45:26 +0800 Subject: net: hns3: Add support for port shaper setting in TM module This patch add a tm_port_shaper cmd and set port shaper to HCLGE_ETHER_MAX_RATE on TM initialization process. Signed-off-by: Yunsheng Lin Signed-off-by: David S. Miller --- .../net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c | 32 ++++++++++++++++++++++ .../net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h | 4 +++ 2 files changed, 36 insertions(+) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c index 0b4b5d9b0798..f79cebd7c95b 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c @@ -301,6 +301,34 @@ static int hclge_tm_pg_shapping_cfg(struct hclge_dev *hdev, return hclge_cmd_send(&hdev->hw, &desc, 1); } +static int hclge_tm_port_shaper_cfg(struct hclge_dev *hdev) +{ + struct hclge_port_shapping_cmd *shap_cfg_cmd; + struct hclge_desc desc; + u32 shapping_para = 0; + u8 ir_u, ir_b, ir_s; + int ret; + + ret = hclge_shaper_para_calc(HCLGE_ETHER_MAX_RATE, + HCLGE_SHAPER_LVL_PORT, + &ir_b, &ir_u, &ir_s); + if (ret) + return ret; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PORT_SHAPPING, false); + shap_cfg_cmd = (struct hclge_port_shapping_cmd *)desc.data; + + hclge_tm_set_field(shapping_para, IR_B, ir_b); + hclge_tm_set_field(shapping_para, IR_U, ir_u); + hclge_tm_set_field(shapping_para, IR_S, ir_s); + hclge_tm_set_field(shapping_para, BS_B, HCLGE_SHAPER_BS_U_DEF); + hclge_tm_set_field(shapping_para, BS_S, HCLGE_SHAPER_BS_S_DEF); + + shap_cfg_cmd->port_shapping_para = cpu_to_le32(shapping_para); + + return hclge_cmd_send(&hdev->hw, &desc, 1); +} + static int hclge_tm_pri_shapping_cfg(struct hclge_dev *hdev, enum hclge_shap_bucket bucket, u8 pri_id, u8 ir_b, u8 ir_u, u8 ir_s, @@ -864,6 +892,10 @@ static int hclge_tm_shaper_cfg(struct hclge_dev *hdev) { int ret; + ret = hclge_tm_port_shaper_cfg(hdev); + if (ret) + return ret; + ret = hclge_tm_pg_shaper_cfg(hdev); if (ret) return ret; diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h index 8ecd83c50f47..19a01e41c8b0 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h @@ -99,6 +99,10 @@ struct hclge_pfc_en_cmd { u8 pri_en_bitmap; }; +struct hclge_port_shapping_cmd { + __le32 port_shapping_para; +}; + #define hclge_tm_set_field(dest, string, val) \ hnae_set_field((dest), (HCLGE_TM_SHAP_##string##_MSK), \ (HCLGE_TM_SHAP_##string##_LSH), val) -- cgit v1.2.3 From cc9bb43ab394f14096a55ee6101af0e804c05f0f Mon Sep 17 00:00:00 2001 From: Yunsheng Lin Date: Wed, 27 Sep 2017 09:45:27 +0800 Subject: net: hns3: Add tc-based TM support for sriov enabled port When sriov is enabled and TM is in tc-based mode, vf's TM parameters is not set in TM initialization process. This patch add the tc_based TM support for sriov enabled using the information in vport struct. Signed-off-by: Yunsheng Lin Signed-off-by: David S. Miller --- .../net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c | 49 ++++++++++++++-------- 1 file changed, 31 insertions(+), 18 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c index f79cebd7c95b..ea94d23a79f7 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c @@ -388,13 +388,13 @@ static int hclge_tm_pri_schd_mode_cfg(struct hclge_dev *hdev, u8 pri_id) return hclge_cmd_send(&hdev->hw, &desc, 1); } -static int hclge_tm_qs_schd_mode_cfg(struct hclge_dev *hdev, u16 qs_id) +static int hclge_tm_qs_schd_mode_cfg(struct hclge_dev *hdev, u16 qs_id, u8 mode) { struct hclge_desc desc; hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_QS_SCH_MODE_CFG, false); - if (hdev->tm_info.tc_info[qs_id].tc_sch_mode == HCLGE_SCH_MODE_DWRR) + if (mode == HCLGE_SCH_MODE_DWRR) desc.data[1] = cpu_to_le32(HCLGE_TM_TX_SCHD_DWRR_MSK); else desc.data[1] = 0; @@ -638,17 +638,18 @@ static int hclge_tm_pri_q_qs_cfg(struct hclge_dev *hdev) { struct hclge_vport *vport = hdev->vport; int ret; - u32 i; + u32 i, k; if (hdev->tx_sch_mode == HCLGE_FLAG_TC_BASE_SCH_MODE) { /* Cfg qs -> pri mapping, one by one mapping */ - for (i = 0; i < hdev->tm_info.num_tc; i++) { - ret = hclge_tm_qs_to_pri_map_cfg(hdev, i, i); - if (ret) - return ret; - } + for (k = 0; k < hdev->num_alloc_vport; k++) + for (i = 0; i < hdev->tm_info.num_tc; i++) { + ret = hclge_tm_qs_to_pri_map_cfg( + hdev, vport[k].qs_offset + i, i); + if (ret) + return ret; + } } else if (hdev->tx_sch_mode == HCLGE_FLAG_VNET_BASE_SCH_MODE) { - int k; /* Cfg qs -> pri mapping, qs = tc, pri = vf, 8 qs -> 1 pri */ for (k = 0; k < hdev->num_alloc_vport; k++) for (i = 0; i < HNAE3_MAX_TC; i++) { @@ -797,10 +798,11 @@ static int hclge_tm_pri_shaper_cfg(struct hclge_dev *hdev) static int hclge_tm_pri_tc_base_dwrr_cfg(struct hclge_dev *hdev) { + struct hclge_vport *vport = hdev->vport; struct hclge_pg_info *pg_info; u8 dwrr; int ret; - u32 i; + u32 i, k; for (i = 0; i < hdev->tm_info.num_tc; i++) { pg_info = @@ -811,9 +813,13 @@ static int hclge_tm_pri_tc_base_dwrr_cfg(struct hclge_dev *hdev) if (ret) return ret; - ret = hclge_tm_qs_weight_cfg(hdev, i, dwrr); - if (ret) - return ret; + for (k = 0; k < hdev->num_alloc_vport; k++) { + ret = hclge_tm_qs_weight_cfg( + hdev, vport[k].qs_offset + i, + vport[k].dwrr); + if (ret) + return ret; + } } return 0; @@ -944,7 +950,10 @@ static int hclge_tm_schd_mode_vnet_base_cfg(struct hclge_vport *vport) return ret; for (i = 0; i < kinfo->num_tc; i++) { - ret = hclge_tm_qs_schd_mode_cfg(hdev, vport->qs_offset + i); + u8 sch_mode = hdev->tm_info.tc_info[i].tc_sch_mode; + + ret = hclge_tm_qs_schd_mode_cfg(hdev, vport->qs_offset + i, + sch_mode); if (ret) return ret; } @@ -956,7 +965,7 @@ static int hclge_tm_lvl34_schd_mode_cfg(struct hclge_dev *hdev) { struct hclge_vport *vport = hdev->vport; int ret; - u8 i; + u8 i, k; if (hdev->tx_sch_mode == HCLGE_FLAG_TC_BASE_SCH_MODE) { for (i = 0; i < hdev->tm_info.num_tc; i++) { @@ -964,9 +973,13 @@ static int hclge_tm_lvl34_schd_mode_cfg(struct hclge_dev *hdev) if (ret) return ret; - ret = hclge_tm_qs_schd_mode_cfg(hdev, i); - if (ret) - return ret; + for (k = 0; k < hdev->num_alloc_vport; k++) { + ret = hclge_tm_qs_schd_mode_cfg( + hdev, vport[k].qs_offset + i, + HCLGE_SCH_MODE_DWRR); + if (ret) + return ret; + } } } else { for (i = 0; i < hdev->num_alloc_vport; i++) { -- cgit v1.2.3 From 77f255c1c695c72acb1d1c47d30323a273774ae6 Mon Sep 17 00:00:00 2001 From: Yunsheng Lin Date: Wed, 27 Sep 2017 09:45:28 +0800 Subject: net: hns3: Add some interface for the support of DCB feature This patch add some interface and export some interface from hclge_tm and hclgc_main to support the upcoming DCB feature. Signed-off-by: Yunsheng Lin Signed-off-by: David S. Miller --- .../ethernet/hisilicon/hns3/hns3pf/hclge_main.c | 3 +- .../ethernet/hisilicon/hns3/hns3pf/hclge_main.h | 3 ++ .../net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c | 48 ++++++++++++++++++++-- .../net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h | 6 +++ 4 files changed, 55 insertions(+), 5 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c index 61632feb8c4e..644f7ff54081 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c @@ -30,7 +30,6 @@ #define HCLGE_64BIT_STATS_FIELD_OFF(f) (offsetof(struct hclge_64_bit_stats, f)) #define HCLGE_32BIT_STATS_FIELD_OFF(f) (offsetof(struct hclge_32_bit_stats, f)) -static int hclge_rss_init_hw(struct hclge_dev *hdev); static int hclge_set_mta_filter_mode(struct hclge_dev *hdev, enum hclge_mta_dmac_sel_type mta_mac_sel, bool enable); @@ -2655,7 +2654,7 @@ static int hclge_get_tc_size(struct hnae3_handle *handle) return hdev->rss_size_max; } -static int hclge_rss_init_hw(struct hclge_dev *hdev) +int hclge_rss_init_hw(struct hclge_dev *hdev) { const u8 hfunc = HCLGE_RSS_HASH_ALGO_TOEPLITZ; struct hclge_vport *vport = hdev->vport; diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h index 4fc36f04c971..394b58788065 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h @@ -515,4 +515,7 @@ static inline int hclge_get_queue_id(struct hnae3_queue *queue) int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex); int hclge_set_vf_vlan_common(struct hclge_dev *vport, int vfid, bool is_kill, u16 vlan, u8 qos, __be16 proto); + +int hclge_buffer_alloc(struct hclge_dev *hdev); +int hclge_rss_init_hw(struct hclge_dev *hdev); #endif diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c index ea94d23a79f7..8295684da5ba 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c @@ -883,10 +883,14 @@ static int hclge_tm_pri_dwrr_cfg(struct hclge_dev *hdev) return 0; } -static int hclge_tm_map_cfg(struct hclge_dev *hdev) +int hclge_tm_map_cfg(struct hclge_dev *hdev) { int ret; + ret = hclge_up_to_tc_map(hdev); + if (ret) + return ret; + ret = hclge_tm_pg_to_pri_map(hdev); if (ret) return ret; @@ -994,7 +998,7 @@ static int hclge_tm_lvl34_schd_mode_cfg(struct hclge_dev *hdev) return 0; } -static int hclge_tm_schd_mode_hw(struct hclge_dev *hdev) +int hclge_tm_schd_mode_hw(struct hclge_dev *hdev) { int ret; @@ -1092,7 +1096,45 @@ int hclge_pause_setup_hw(struct hclge_dev *hdev) return ret; } - return hclge_up_to_tc_map(hdev); + return 0; +} + +int hclge_tm_prio_tc_info_update(struct hclge_dev *hdev, u8 *prio_tc) +{ + struct hclge_vport *vport = hdev->vport; + struct hnae3_knic_private_info *kinfo; + u32 i, k; + + for (i = 0; i < HNAE3_MAX_USER_PRIO; i++) { + if (prio_tc[i] >= hdev->tm_info.num_tc) + return -EINVAL; + hdev->tm_info.prio_tc[i] = prio_tc[i]; + + for (k = 0; k < hdev->num_alloc_vport; k++) { + kinfo = &vport[k].nic.kinfo; + kinfo->prio_tc[i] = prio_tc[i]; + } + } + return 0; +} + +void hclge_tm_schd_info_update(struct hclge_dev *hdev, u8 num_tc) +{ + u8 i, bit_map = 0; + + hdev->tm_info.num_tc = num_tc; + + for (i = 0; i < hdev->tm_info.num_tc; i++) + bit_map |= BIT(i); + + if (!bit_map) { + bit_map = 1; + hdev->tm_info.num_tc = 1; + } + + hdev->hw_tc_map = bit_map; + + hclge_tm_schd_info_init(hdev); } int hclge_tm_init_hw(struct hclge_dev *hdev) diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h index 19a01e41c8b0..bf59961918ab 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h @@ -112,4 +112,10 @@ struct hclge_port_shapping_cmd { int hclge_tm_schd_init(struct hclge_dev *hdev); int hclge_pause_setup_hw(struct hclge_dev *hdev); +int hclge_tm_schd_mode_hw(struct hclge_dev *hdev); +int hclge_tm_prio_tc_info_update(struct hclge_dev *hdev, u8 *prio_tc); +void hclge_tm_schd_info_update(struct hclge_dev *hdev, u8 num_tc); +int hclge_tm_dwrr_cfg(struct hclge_dev *hdev); +int hclge_tm_map_cfg(struct hclge_dev *hdev); +int hclge_tm_init_hw(struct hclge_dev *hdev); #endif -- cgit v1.2.3 From cacde272dd00496c2c1c36606a56b340cd967603 Mon Sep 17 00:00:00 2001 From: Yunsheng Lin Date: Wed, 27 Sep 2017 09:45:29 +0800 Subject: net: hns3: Add hclge_dcb module for the support of DCB feature The hclge_dcb module calls the interface from hclge_main/tm and provide interface for the dcb netlink interface. This patch also update Makefiles required to build the DCB supported code in HNS3 Ethernet driver and update the existing Kconfig file in the hisilicon folder. Signed-off-by: Yunsheng Lin Signed-off-by: David S. Miller --- drivers/net/ethernet/hisilicon/Kconfig | 9 + drivers/net/ethernet/hisilicon/hns3/hnae3.h | 17 ++ .../net/ethernet/hisilicon/hns3/hns3pf/Makefile | 2 + .../net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c | 304 +++++++++++++++++++++ .../net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.h | 21 ++ .../ethernet/hisilicon/hns3/hns3pf/hclge_main.c | 25 +- .../ethernet/hisilicon/hns3/hns3pf/hclge_main.h | 3 + 7 files changed, 375 insertions(+), 6 deletions(-) create mode 100644 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c create mode 100644 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.h (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/hisilicon/Kconfig b/drivers/net/ethernet/hisilicon/Kconfig index 91c7bdb9b43c..9d7cb0387bf7 100644 --- a/drivers/net/ethernet/hisilicon/Kconfig +++ b/drivers/net/ethernet/hisilicon/Kconfig @@ -103,4 +103,13 @@ config HNS3_ENET family of SoCs. This module depends upon HNAE3 driver to access the HNAE3 devices and their associated operations. +config HNS3_DCB + bool "Hisilicon HNS3 Data Center Bridge Support" + default n + depends on HNS3 && HNS3_HCLGE && DCB + ---help--- + Say Y here if you want to use Data Center Bridging (DCB) in the HNS3 driver. + + If unsure, say N. + endif # NET_VENDOR_HISILICON diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.h b/drivers/net/ethernet/hisilicon/hns3/hnae3.h index 1a01cadfe5f3..c677530841cf 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hnae3.h +++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h @@ -28,6 +28,7 @@ */ #include +#include #include #include #include @@ -131,6 +132,7 @@ struct hnae3_client_ops { int (*init_instance)(struct hnae3_handle *handle); void (*uninit_instance)(struct hnae3_handle *handle, bool reset); void (*link_status_change)(struct hnae3_handle *handle, bool state); + int (*setup_tc)(struct hnae3_handle *handle, u8 tc); }; #define HNAE3_CLIENT_NAME_LENGTH 16 @@ -363,6 +365,20 @@ struct hnae3_ae_ops { u16 vlan, u8 qos, __be16 proto); }; +struct hnae3_dcb_ops { + /* IEEE 802.1Qaz std */ + int (*ieee_getets)(struct hnae3_handle *, struct ieee_ets *); + int (*ieee_setets)(struct hnae3_handle *, struct ieee_ets *); + int (*ieee_getpfc)(struct hnae3_handle *, struct ieee_pfc *); + int (*ieee_setpfc)(struct hnae3_handle *, struct ieee_pfc *); + + /* DCBX configuration */ + u8 (*getdcbx)(struct hnae3_handle *); + u8 (*setdcbx)(struct hnae3_handle *, u8); + + int (*map_update)(struct hnae3_handle *); +}; + struct hnae3_ae_algo { const struct hnae3_ae_ops *ops; struct list_head node; @@ -394,6 +410,7 @@ struct hnae3_knic_private_info { u16 num_tqps; /* total number of TQPs in this handle */ struct hnae3_queue **tqp; /* array base of all TQPs in this instance */ + const struct hnae3_dcb_ops *dcb_ops; }; struct hnae3_roce_private_info { diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile b/drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile index 162e8a42acd0..7023dc878086 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile @@ -7,5 +7,7 @@ ccflags-y := -Idrivers/net/ethernet/hisilicon/hns3 obj-$(CONFIG_HNS3_HCLGE) += hclge.o hclge-objs = hclge_main.o hclge_cmd.o hclge_mdio.o hclge_tm.o +hclge-$(CONFIG_HNS3_DCB) += hclge_dcb.o + obj-$(CONFIG_HNS3_ENET) += hns3.o hns3-objs = hns3_enet.o hns3_ethtool.o diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c new file mode 100644 index 000000000000..1b30a6f966d8 --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c @@ -0,0 +1,304 @@ +/* + * Copyright (c) 2016-2017 Hisilicon Limited. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#include "hclge_main.h" +#include "hclge_tm.h" +#include "hnae3.h" + +#define BW_PERCENT 100 + +static int hclge_ieee_ets_to_tm_info(struct hclge_dev *hdev, + struct ieee_ets *ets) +{ + u8 i; + + for (i = 0; i < HNAE3_MAX_TC; i++) { + switch (ets->tc_tsa[i]) { + case IEEE_8021QAZ_TSA_STRICT: + hdev->tm_info.tc_info[i].tc_sch_mode = + HCLGE_SCH_MODE_SP; + hdev->tm_info.pg_info[0].tc_dwrr[i] = 0; + break; + case IEEE_8021QAZ_TSA_ETS: + hdev->tm_info.tc_info[i].tc_sch_mode = + HCLGE_SCH_MODE_DWRR; + hdev->tm_info.pg_info[0].tc_dwrr[i] = + ets->tc_tx_bw[i]; + break; + default: + /* Hardware only supports SP (strict priority) + * or ETS (enhanced transmission selection) + * algorithms, if we receive some other value + * from dcbnl, then throw an error. + */ + return -EINVAL; + } + } + + return hclge_tm_prio_tc_info_update(hdev, ets->prio_tc); +} + +static void hclge_tm_info_to_ieee_ets(struct hclge_dev *hdev, + struct ieee_ets *ets) +{ + u32 i; + + memset(ets, 0, sizeof(*ets)); + ets->willing = 1; + ets->ets_cap = hdev->tc_max; + + for (i = 0; i < HNAE3_MAX_TC; i++) { + ets->prio_tc[i] = hdev->tm_info.prio_tc[i]; + ets->tc_tx_bw[i] = hdev->tm_info.pg_info[0].tc_dwrr[i]; + + if (hdev->tm_info.tc_info[i].tc_sch_mode == + HCLGE_SCH_MODE_SP) + ets->tc_tsa[i] = IEEE_8021QAZ_TSA_STRICT; + else + ets->tc_tsa[i] = IEEE_8021QAZ_TSA_ETS; + } +} + +/* IEEE std */ +static int hclge_ieee_getets(struct hnae3_handle *h, struct ieee_ets *ets) +{ + struct hclge_vport *vport = hclge_get_vport(h); + struct hclge_dev *hdev = vport->back; + + hclge_tm_info_to_ieee_ets(hdev, ets); + + return 0; +} + +static int hclge_ets_validate(struct hclge_dev *hdev, struct ieee_ets *ets, + u8 *tc, bool *changed) +{ + u32 total_ets_bw = 0; + u8 max_tc = 0; + u8 i; + + for (i = 0; i < HNAE3_MAX_TC; i++) { + if (ets->prio_tc[i] >= hdev->tc_max || + i >= hdev->tc_max) + return -EINVAL; + + if (ets->prio_tc[i] != hdev->tm_info.prio_tc[i]) + *changed = true; + + if (ets->prio_tc[i] > max_tc) + max_tc = ets->prio_tc[i]; + + switch (ets->tc_tsa[i]) { + case IEEE_8021QAZ_TSA_STRICT: + if (hdev->tm_info.tc_info[i].tc_sch_mode != + HCLGE_SCH_MODE_SP) + *changed = true; + break; + case IEEE_8021QAZ_TSA_ETS: + if (hdev->tm_info.tc_info[i].tc_sch_mode != + HCLGE_SCH_MODE_DWRR) + *changed = true; + + total_ets_bw += ets->tc_tx_bw[i]; + break; + default: + return -EINVAL; + } + } + + if (total_ets_bw != BW_PERCENT) + return -EINVAL; + + *tc = max_tc + 1; + if (*tc != hdev->tm_info.num_tc) + *changed = true; + + return 0; +} + +static int hclge_map_update(struct hnae3_handle *h) +{ + struct hclge_vport *vport = hclge_get_vport(h); + struct hclge_dev *hdev = vport->back; + int ret; + + ret = hclge_tm_map_cfg(hdev); + if (ret) + return ret; + + ret = hclge_tm_schd_mode_hw(hdev); + if (ret) + return ret; + + ret = hclge_pause_setup_hw(hdev); + if (ret) + return ret; + + ret = hclge_buffer_alloc(hdev); + if (ret) + return ret; + + return hclge_rss_init_hw(hdev); +} + +static int hclge_client_setup_tc(struct hclge_dev *hdev) +{ + struct hclge_vport *vport = hdev->vport; + struct hnae3_client *client; + struct hnae3_handle *handle; + int ret; + u32 i; + + for (i = 0; i < hdev->num_vmdq_vport + 1; i++) { + handle = &vport[i].nic; + client = handle->client; + + if (!client || !client->ops || !client->ops->setup_tc) + continue; + + ret = client->ops->setup_tc(handle, hdev->tm_info.num_tc); + if (ret) + return ret; + } + + return 0; +} + +static int hclge_ieee_setets(struct hnae3_handle *h, struct ieee_ets *ets) +{ + struct hclge_vport *vport = hclge_get_vport(h); + struct hclge_dev *hdev = vport->back; + bool map_changed = false; + u8 num_tc = 0; + int ret; + + if (!(hdev->dcbx_cap & DCB_CAP_DCBX_VER_IEEE)) + return -EINVAL; + + ret = hclge_ets_validate(hdev, ets, &num_tc, &map_changed); + if (ret) + return ret; + + hclge_tm_schd_info_update(hdev, num_tc); + + ret = hclge_ieee_ets_to_tm_info(hdev, ets); + if (ret) + return ret; + + if (map_changed) { + ret = hclge_client_setup_tc(hdev); + if (ret) + return ret; + } + + return hclge_tm_dwrr_cfg(hdev); +} + +static int hclge_ieee_getpfc(struct hnae3_handle *h, struct ieee_pfc *pfc) +{ + struct hclge_vport *vport = hclge_get_vport(h); + struct hclge_dev *hdev = vport->back; + u8 i, j, pfc_map, *prio_tc; + + memset(pfc, 0, sizeof(*pfc)); + pfc->pfc_cap = hdev->pfc_max; + prio_tc = hdev->tm_info.prio_tc; + pfc_map = hdev->tm_info.hw_pfc_map; + + /* Pfc setting is based on TC */ + for (i = 0; i < hdev->tm_info.num_tc; i++) { + for (j = 0; j < HNAE3_MAX_USER_PRIO; j++) { + if ((prio_tc[j] == i) && (pfc_map & BIT(i))) + pfc->pfc_en |= BIT(j); + } + } + + return 0; +} + +static int hclge_ieee_setpfc(struct hnae3_handle *h, struct ieee_pfc *pfc) +{ + struct hclge_vport *vport = hclge_get_vport(h); + struct hclge_dev *hdev = vport->back; + u8 i, j, pfc_map, *prio_tc; + + if (!(hdev->dcbx_cap & DCB_CAP_DCBX_VER_IEEE)) + return -EINVAL; + + prio_tc = hdev->tm_info.prio_tc; + pfc_map = 0; + + for (i = 0; i < hdev->tm_info.num_tc; i++) { + for (j = 0; j < HNAE3_MAX_USER_PRIO; j++) { + if ((prio_tc[j] == i) && (pfc->pfc_en & BIT(j))) { + pfc_map |= BIT(i); + break; + } + } + } + + if (pfc_map == hdev->tm_info.hw_pfc_map) + return 0; + + hdev->tm_info.hw_pfc_map = pfc_map; + + return hclge_pause_setup_hw(hdev); +} + +/* DCBX configuration */ +static u8 hclge_getdcbx(struct hnae3_handle *h) +{ + struct hclge_vport *vport = hclge_get_vport(h); + struct hclge_dev *hdev = vport->back; + + return hdev->dcbx_cap; +} + +static u8 hclge_setdcbx(struct hnae3_handle *h, u8 mode) +{ + struct hclge_vport *vport = hclge_get_vport(h); + struct hclge_dev *hdev = vport->back; + + /* No support for LLD_MANAGED modes or CEE */ + if ((mode & DCB_CAP_DCBX_LLD_MANAGED) || + (mode & DCB_CAP_DCBX_VER_CEE) || + !(mode & DCB_CAP_DCBX_HOST)) + return 1; + + hdev->dcbx_cap = mode; + + return 0; +} + +static const struct hnae3_dcb_ops hns3_dcb_ops = { + .ieee_getets = hclge_ieee_getets, + .ieee_setets = hclge_ieee_setets, + .ieee_getpfc = hclge_ieee_getpfc, + .ieee_setpfc = hclge_ieee_setpfc, + .getdcbx = hclge_getdcbx, + .setdcbx = hclge_setdcbx, + .map_update = hclge_map_update, +}; + +void hclge_dcb_ops_set(struct hclge_dev *hdev) +{ + struct hclge_vport *vport = hdev->vport; + struct hnae3_knic_private_info *kinfo; + + /* Hdev does not support DCB or vport is + * not a pf, then dcb_ops is not set. + */ + if (!hnae3_dev_dcb_supported(hdev) || + vport->vport_id != 0) + return; + + kinfo = &vport->nic.kinfo; + kinfo->dcb_ops = &hns3_dcb_ops; + hdev->dcbx_cap = DCB_CAP_DCBX_VER_IEEE | DCB_CAP_DCBX_HOST; +} diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.h new file mode 100644 index 000000000000..7d808ee96694 --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.h @@ -0,0 +1,21 @@ +/* + * Copyright (c) 2016~2017 Hisilicon Limited. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#ifndef __HCLGE_DCB_H__ +#define __HCLGE_DCB_H__ + +#include "hclge_main.h" + +#ifdef CONFIG_HNS3_DCB +void hclge_dcb_ops_set(struct hclge_dev *hdev); +#else +static inline void hclge_dcb_ops_set(struct hclge_dev *hdev) {} +#endif + +#endif /* __HCLGE_DCB_H__ */ diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c index 644f7ff54081..dd220eab7f53 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c @@ -19,6 +19,7 @@ #include #include "hclge_cmd.h" +#include "hclge_dcb.h" #include "hclge_main.h" #include "hclge_mdio.h" #include "hclge_tm.h" @@ -1057,7 +1058,7 @@ static int hclge_configure(struct hclge_dev *hdev) hdev->hw.mac.phy_addr = cfg.phy_addr; hdev->num_desc = cfg.tqp_desc_num; hdev->tm_info.num_pg = 1; - hdev->tm_info.num_tc = cfg.tc_num; + hdev->tc_max = cfg.tc_num; hdev->tm_info.hw_pfc_map = 0; ret = hclge_parse_speed(cfg.default_speed, &hdev->hw.mac.speed); @@ -1066,15 +1067,25 @@ static int hclge_configure(struct hclge_dev *hdev) return ret; } - if ((hdev->tm_info.num_tc > HNAE3_MAX_TC) || - (hdev->tm_info.num_tc < 1)) { + if ((hdev->tc_max > HNAE3_MAX_TC) || + (hdev->tc_max < 1)) { dev_warn(&hdev->pdev->dev, "TC num = %d.\n", - hdev->tm_info.num_tc); - hdev->tm_info.num_tc = 1; + hdev->tc_max); + hdev->tc_max = 1; } + /* Dev does not support DCB */ + if (!hnae3_dev_dcb_supported(hdev)) { + hdev->tc_max = 1; + hdev->pfc_max = 0; + } else { + hdev->pfc_max = hdev->tc_max; + } + + hdev->tm_info.num_tc = hdev->tc_max; + /* Currently not support uncontiuous tc */ - for (i = 0; i < cfg.tc_num; i++) + for (i = 0; i < hdev->tm_info.num_tc; i++) hnae_set_bit(hdev->hw_tc_map, i, 1); if (!hdev->num_vmdq_vport && !hdev->num_req_vfs) @@ -4238,6 +4249,8 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev) return ret; } + hclge_dcb_ops_set(hdev); + setup_timer(&hdev->service_timer, hclge_service_timer, (unsigned long)hdev); INIT_WORK(&hdev->service_task, hclge_service_task); diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h index 394b58788065..7c66c00e8a3e 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h @@ -421,8 +421,11 @@ struct hclge_dev { #define HCLGE_FLAG_TC_BASE_SCH_MODE 1 #define HCLGE_FLAG_VNET_BASE_SCH_MODE 2 u8 tx_sch_mode; + u8 tc_max; + u8 pfc_max; u8 default_up; + u8 dcbx_cap; struct hclge_tm_info tm_info; u16 num_msi; -- cgit v1.2.3 From 986743dbf0a70211bba594b5abee33b6661feaa9 Mon Sep 17 00:00:00 2001 From: Yunsheng Lin Date: Wed, 27 Sep 2017 09:45:30 +0800 Subject: net: hns3: Add dcb netlink interface for the support of DCB feature This patch add dcb netlink interface by calling the interface from hclge_dcb module. This patch also update Makefile in order to build hns3_dcbnl module. Signed-off-by: Yunsheng Lin Signed-off-by: David S. Miller --- .../net/ethernet/hisilicon/hns3/hns3pf/Makefile | 2 + .../ethernet/hisilicon/hns3/hns3pf/hns3_dcbnl.c | 106 +++++++++++++++++++++ .../net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c | 2 + .../net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.h | 7 ++ 4 files changed, 117 insertions(+) create mode 100644 drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_dcbnl.c (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile b/drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile index 7023dc878086..d2b20d01a58c 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile @@ -11,3 +11,5 @@ hclge-$(CONFIG_HNS3_DCB) += hclge_dcb.o obj-$(CONFIG_HNS3_ENET) += hns3.o hns3-objs = hns3_enet.o hns3_ethtool.o + +hns3-$(CONFIG_HNS3_DCB) += hns3_dcbnl.o diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_dcbnl.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_dcbnl.c new file mode 100644 index 000000000000..9832172bfb08 --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_dcbnl.c @@ -0,0 +1,106 @@ +/* + * Copyright (c) 2016-2017 Hisilicon Limited. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#include "hnae3.h" +#include "hns3_enet.h" + +static +int hns3_dcbnl_ieee_getets(struct net_device *ndev, struct ieee_ets *ets) +{ + struct hns3_nic_priv *priv = netdev_priv(ndev); + struct hnae3_handle *h = priv->ae_handle; + + if (h->kinfo.dcb_ops->ieee_getets) + return h->kinfo.dcb_ops->ieee_getets(h, ets); + + return -EOPNOTSUPP; +} + +static +int hns3_dcbnl_ieee_setets(struct net_device *ndev, struct ieee_ets *ets) +{ + struct hns3_nic_priv *priv = netdev_priv(ndev); + struct hnae3_handle *h = priv->ae_handle; + + if (h->kinfo.dcb_ops->ieee_setets) + return h->kinfo.dcb_ops->ieee_setets(h, ets); + + return -EOPNOTSUPP; +} + +static +int hns3_dcbnl_ieee_getpfc(struct net_device *ndev, struct ieee_pfc *pfc) +{ + struct hns3_nic_priv *priv = netdev_priv(ndev); + struct hnae3_handle *h = priv->ae_handle; + + if (h->kinfo.dcb_ops->ieee_getpfc) + return h->kinfo.dcb_ops->ieee_getpfc(h, pfc); + + return -EOPNOTSUPP; +} + +static +int hns3_dcbnl_ieee_setpfc(struct net_device *ndev, struct ieee_pfc *pfc) +{ + struct hns3_nic_priv *priv = netdev_priv(ndev); + struct hnae3_handle *h = priv->ae_handle; + + if (h->kinfo.dcb_ops->ieee_setpfc) + return h->kinfo.dcb_ops->ieee_setpfc(h, pfc); + + return -EOPNOTSUPP; +} + +/* DCBX configuration */ +static u8 hns3_dcbnl_getdcbx(struct net_device *ndev) +{ + struct hns3_nic_priv *priv = netdev_priv(ndev); + struct hnae3_handle *h = priv->ae_handle; + + if (h->kinfo.dcb_ops->getdcbx) + return h->kinfo.dcb_ops->getdcbx(h); + + return 0; +} + +/* return 0 if successful, otherwise fail */ +static u8 hns3_dcbnl_setdcbx(struct net_device *ndev, u8 mode) +{ + struct hns3_nic_priv *priv = netdev_priv(ndev); + struct hnae3_handle *h = priv->ae_handle; + + if (h->kinfo.dcb_ops->setdcbx) + return h->kinfo.dcb_ops->setdcbx(h, mode); + + return 1; +} + +static const struct dcbnl_rtnl_ops hns3_dcbnl_ops = { + .ieee_getets = hns3_dcbnl_ieee_getets, + .ieee_setets = hns3_dcbnl_ieee_setets, + .ieee_getpfc = hns3_dcbnl_ieee_getpfc, + .ieee_setpfc = hns3_dcbnl_ieee_setpfc, + .getdcbx = hns3_dcbnl_getdcbx, + .setdcbx = hns3_dcbnl_setdcbx, +}; + +/* hclge_dcbnl_setup - DCBNL setup + * @handle: the corresponding vport handle + * Set up DCBNL + */ +void hns3_dcbnl_setup(struct hnae3_handle *handle) +{ + struct net_device *dev = handle->kinfo.netdev; + + if (!handle->kinfo.dcb_ops) + return; + + dev->dcbnl_ops = &hns3_dcbnl_ops; +} diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c index 35369e1c8036..11dab26f3543 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c @@ -2790,6 +2790,8 @@ static int hns3_client_init(struct hnae3_handle *handle) goto out_reg_netdev_fail; } + hns3_dcbnl_setup(handle); + /* MTU range: (ETH_MIN_MTU(kernel default) - 9706) */ netdev->max_mtu = HNS3_MAX_MTU - (ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN); diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.h index 7e8746189747..481eada73e2d 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.h @@ -590,4 +590,11 @@ static inline void hns3_write_reg(void __iomem *base, u32 reg, u32 value) void hns3_ethtool_set_ops(struct net_device *netdev); int hns3_clean_tx_ring(struct hns3_enet_ring *ring, int budget); + +#ifdef CONFIG_HNS3_DCB +void hns3_dcbnl_setup(struct hnae3_handle *handle); +#else +static inline void hns3_dcbnl_setup(struct hnae3_handle *handle) {} +#endif + #endif -- cgit v1.2.3 From 7979a223305016625d211dd051569933c433f81e Mon Sep 17 00:00:00 2001 From: Yunsheng Lin Date: Wed, 27 Sep 2017 09:45:31 +0800 Subject: net: hns3: Setting for fc_mode and dcb enable flag in TM module After the DCB feature is supported, fc_mode and dcb enable flag must be set according to the DCB parameter. Signed-off-by: Yunsheng Lin Signed-off-by: David S. Miller --- .../net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c | 34 +++++++++++++++++++--- 1 file changed, 30 insertions(+), 4 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c index 8295684da5ba..359ee670d1e1 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c @@ -486,7 +486,11 @@ static void hclge_tm_tc_info_init(struct hclge_dev *hdev) hdev->tm_info.prio_tc[i] = (i >= hdev->tm_info.num_tc) ? 0 : i; - hdev->flag &= ~HCLGE_FLAG_DCB_ENABLE; + /* DCB is enabled if we have more than 1 TC */ + if (hdev->tm_info.num_tc > 1) + hdev->flag |= HCLGE_FLAG_DCB_ENABLE; + else + hdev->flag &= ~HCLGE_FLAG_DCB_ENABLE; } static void hclge_tm_pg_info_init(struct hclge_dev *hdev) @@ -512,6 +516,24 @@ static void hclge_tm_pg_info_init(struct hclge_dev *hdev) } } +static void hclge_pfc_info_init(struct hclge_dev *hdev) +{ + if (!(hdev->flag & HCLGE_FLAG_DCB_ENABLE)) { + if (hdev->fc_mode_last_time == HCLGE_FC_PFC) + dev_warn(&hdev->pdev->dev, + "DCB is disable, but last mode is FC_PFC\n"); + + hdev->tm_info.fc_mode = hdev->fc_mode_last_time; + } else if (hdev->tm_info.fc_mode != HCLGE_FC_PFC) { + /* fc_mode_last_time record the last fc_mode when + * DCB is enabled, so that fc_mode can be set to + * the correct value when DCB is disabled. + */ + hdev->fc_mode_last_time = hdev->tm_info.fc_mode; + hdev->tm_info.fc_mode = HCLGE_FC_PFC; + } +} + static int hclge_tm_schd_info_init(struct hclge_dev *hdev) { if ((hdev->tx_sch_mode != HCLGE_FLAG_TC_BASE_SCH_MODE) && @@ -524,8 +546,7 @@ static int hclge_tm_schd_info_init(struct hclge_dev *hdev) hclge_tm_vport_info_update(hdev); - hdev->tm_info.fc_mode = HCLGE_FC_NONE; - hdev->fc_mode_last_time = hdev->tm_info.fc_mode; + hclge_pfc_info_init(hdev); return 0; } @@ -1158,8 +1179,13 @@ int hclge_tm_init_hw(struct hclge_dev *hdev) int hclge_tm_schd_init(struct hclge_dev *hdev) { - int ret = hclge_tm_schd_info_init(hdev); + int ret; + + /* fc_mode is HCLGE_FC_FULL on reset */ + hdev->tm_info.fc_mode = HCLGE_FC_FULL; + hdev->fc_mode_last_time = hdev->tm_info.fc_mode; + ret = hclge_tm_schd_info_init(hdev); if (ret) return ret; -- cgit v1.2.3 From 9df8f79a4d2957fa3083e8fda0843d8c010351a7 Mon Sep 17 00:00:00 2001 From: Yunsheng Lin Date: Wed, 27 Sep 2017 09:45:32 +0800 Subject: net: hns3: Add DCB support when interacting with network stack When using lldptool to configure DCB parameter, hclge_dcb module call the client_ops->setup_tc to tell network stack which queue and priority is using for specific tc. Signed-off-by: Yunsheng Lin Signed-off-by: David S. Miller --- .../net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c | 102 ++++++++++++++++++--- 1 file changed, 87 insertions(+), 15 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c index 11dab26f3543..4a0890f98b70 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c @@ -196,6 +196,32 @@ static void hns3_vector_gl_rl_init(struct hns3_enet_tqp_vector *tqp_vector) tqp_vector->tx_group.flow_level = HNS3_FLOW_LOW; } +static int hns3_nic_set_real_num_queue(struct net_device *netdev) +{ + struct hns3_nic_priv *priv = netdev_priv(netdev); + struct hnae3_handle *h = priv->ae_handle; + struct hnae3_knic_private_info *kinfo = &h->kinfo; + unsigned int queue_size = kinfo->rss_size * kinfo->num_tc; + int ret; + + ret = netif_set_real_num_tx_queues(netdev, queue_size); + if (ret) { + netdev_err(netdev, + "netif_set_real_num_tx_queues fail, ret=%d!\n", + ret); + return ret; + } + + ret = netif_set_real_num_rx_queues(netdev, queue_size); + if (ret) { + netdev_err(netdev, + "netif_set_real_num_rx_queues fail, ret=%d!\n", ret); + return ret; + } + + return 0; +} + static int hns3_nic_net_up(struct net_device *netdev) { struct hns3_nic_priv *priv = netdev_priv(netdev); @@ -232,26 +258,13 @@ out_start_err: static int hns3_nic_net_open(struct net_device *netdev) { - struct hns3_nic_priv *priv = netdev_priv(netdev); - struct hnae3_handle *h = priv->ae_handle; int ret; netif_carrier_off(netdev); - ret = netif_set_real_num_tx_queues(netdev, h->kinfo.num_tqps); - if (ret) { - netdev_err(netdev, - "netif_set_real_num_tx_queues fail, ret=%d!\n", - ret); - return ret; - } - - ret = netif_set_real_num_rx_queues(netdev, h->kinfo.num_tqps); - if (ret) { - netdev_err(netdev, - "netif_set_real_num_rx_queues fail, ret=%d!\n", ret); + ret = hns3_nic_set_real_num_queue(netdev); + if (ret) return ret; - } ret = hns3_nic_net_up(netdev); if (ret) { @@ -2848,10 +2861,69 @@ static void hns3_link_status_change(struct hnae3_handle *handle, bool linkup) } } +static int hns3_client_setup_tc(struct hnae3_handle *handle, u8 tc) +{ + struct hnae3_knic_private_info *kinfo = &handle->kinfo; + struct net_device *ndev = kinfo->netdev; + bool if_running = netif_running(ndev); + int ret; + u8 i; + + if (tc > HNAE3_MAX_TC) + return -EINVAL; + + if (!ndev) + return -ENODEV; + + ret = netdev_set_num_tc(ndev, tc); + if (ret) + return ret; + + if (if_running) { + (void)hns3_nic_net_stop(ndev); + msleep(100); + } + + ret = (kinfo->dcb_ops && kinfo->dcb_ops->map_update) ? + kinfo->dcb_ops->map_update(handle) : -EOPNOTSUPP; + if (ret) + goto err_out; + + if (tc <= 1) { + netdev_reset_tc(ndev); + goto out; + } + + for (i = 0; i < HNAE3_MAX_TC; i++) { + struct hnae3_tc_info *tc_info = &kinfo->tc_info[i]; + + if (tc_info->enable) + netdev_set_tc_queue(ndev, + tc_info->tc, + tc_info->tqp_count, + tc_info->tqp_offset); + } + + for (i = 0; i < HNAE3_MAX_USER_PRIO; i++) { + netdev_set_prio_tc_map(ndev, i, + kinfo->prio_tc[i]); + } + +out: + ret = hns3_nic_set_real_num_queue(ndev); + +err_out: + if (if_running) + (void)hns3_nic_net_open(ndev); + + return ret; +} + const struct hnae3_client_ops client_ops = { .init_instance = hns3_client_init, .uninit_instance = hns3_client_uninit, .link_status_change = hns3_link_status_change, + .setup_tc = hns3_client_setup_tc, }; /* hns3_init_module - Driver registration routine -- cgit v1.2.3 From 2b634bb0686e43a6338fe779fbabd72b6b928fdc Mon Sep 17 00:00:00 2001 From: Jacob Keller Date: Fri, 14 Jul 2017 09:10:14 -0400 Subject: i40e/i40evf: rename bytes_per_int to bytes_per_usec This value is not calculating bytes_per_int, which would actually just be bytes/ITR_COUNTDOWN_START, but rather it's calculating bytes/usecs. Rename the variable for clarity so that future developers understand what the value is actually calculating. Signed-off-by: Jacob Keller Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_txrx.c | 12 ++++++------ drivers/net/ethernet/intel/i40evf/i40e_txrx.c | 12 ++++++------ 2 files changed, 12 insertions(+), 12 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c index f426762bd83a..d9fdf69bbc6e 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c @@ -960,14 +960,14 @@ static bool i40e_set_new_dynamic_itr(struct i40e_ring_container *rc) { enum i40e_latency_range new_latency_range = rc->latency_range; u32 new_itr = rc->itr; - int bytes_per_int; + int bytes_per_usec; unsigned int usecs, estimated_usecs; if (rc->total_packets == 0 || !rc->itr) return false; usecs = (rc->itr << 1) * ITR_COUNTDOWN_START; - bytes_per_int = rc->total_bytes / usecs; + bytes_per_usec = rc->total_bytes / usecs; /* The calculations in this algorithm depend on interrupts actually * firing at the ITR rate. This may not happen if the packet rate is @@ -993,18 +993,18 @@ static bool i40e_set_new_dynamic_itr(struct i40e_ring_container *rc) */ switch (new_latency_range) { case I40E_LOWEST_LATENCY: - if (bytes_per_int > 10) + if (bytes_per_usec > 10) new_latency_range = I40E_LOW_LATENCY; break; case I40E_LOW_LATENCY: - if (bytes_per_int > 20) + if (bytes_per_usec > 20) new_latency_range = I40E_BULK_LATENCY; - else if (bytes_per_int <= 10) + else if (bytes_per_usec <= 10) new_latency_range = I40E_LOWEST_LATENCY; break; case I40E_BULK_LATENCY: default: - if (bytes_per_int <= 20) + if (bytes_per_usec <= 20) new_latency_range = I40E_LOW_LATENCY; break; } diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c index c32c62462c84..37e1de886d48 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c @@ -358,14 +358,14 @@ static bool i40e_set_new_dynamic_itr(struct i40e_ring_container *rc) { enum i40e_latency_range new_latency_range = rc->latency_range; u32 new_itr = rc->itr; - int bytes_per_int; + int bytes_per_usec; unsigned int usecs, estimated_usecs; if (rc->total_packets == 0 || !rc->itr) return false; usecs = (rc->itr << 1) * ITR_COUNTDOWN_START; - bytes_per_int = rc->total_bytes / usecs; + bytes_per_usec = rc->total_bytes / usecs; /* The calculations in this algorithm depend on interrupts actually * firing at the ITR rate. This may not happen if the packet rate is @@ -391,18 +391,18 @@ static bool i40e_set_new_dynamic_itr(struct i40e_ring_container *rc) */ switch (new_latency_range) { case I40E_LOWEST_LATENCY: - if (bytes_per_int > 10) + if (bytes_per_usec > 10) new_latency_range = I40E_LOW_LATENCY; break; case I40E_LOW_LATENCY: - if (bytes_per_int > 20) + if (bytes_per_usec > 20) new_latency_range = I40E_BULK_LATENCY; - else if (bytes_per_int <= 10) + else if (bytes_per_usec <= 10) new_latency_range = I40E_LOWEST_LATENCY; break; case I40E_BULK_LATENCY: default: - if (bytes_per_int <= 20) + if (bytes_per_usec <= 20) new_latency_range = I40E_LOW_LATENCY; break; } -- cgit v1.2.3 From 16badf758b25bd00528246ab9af938296b9d368d Mon Sep 17 00:00:00 2001 From: Sudheer Mogilappagari Date: Fri, 14 Jul 2017 09:10:15 -0400 Subject: i40e: Fix unqualified module message while bringing link up In current driver, when ifconfig ethx up is done, the link state doesn't transition to UP inside i40e_open(). It changes after AQ command response is handled in i40e_handle_link_event(). When pf->hw.phy.link_info.link_info is DOWN inside i40e_open(), The state is transient and invalid. So log message gets printed based on incorrect info (i.e link_info and an_info). This commit removes check for unqualified module inside i40e_up_complete(). The existing check in i40e_handle_link_event() logs the error message based on correct link state information. Signed-off-by: Sudheer Mogilappagari Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_main.c | 9 --------- 1 file changed, 9 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index 6498da8806cb..b235a27232a8 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -5470,15 +5470,6 @@ static int i40e_up_complete(struct i40e_vsi *vsi) i40e_print_link_message(vsi, true); netif_tx_start_all_queues(vsi->netdev); netif_carrier_on(vsi->netdev); - } else if (vsi->netdev) { - i40e_print_link_message(vsi, false); - /* need to check for qualified module here*/ - if ((pf->hw.phy.link_info.link_info & - I40E_AQ_MEDIA_AVAILABLE) && - (!(pf->hw.phy.link_info.an_info & - I40E_AQ_QUALIFIED_MODULE))) - netdev_err(vsi->netdev, - "the driver failed to link because an unqualified module was detected."); } /* replay FDIR SB filters */ -- cgit v1.2.3 From 9a03449d3ea0f6b497ff3a3bf6203a5e72c7e6be Mon Sep 17 00:00:00 2001 From: Sudheer Mogilappagari Date: Fri, 14 Jul 2017 09:10:16 -0400 Subject: i40e: Fix link down message when interface is brought up i40e_print_link_message() is intended to compare new link state with current link state and print log message only if the new state is different from current state. However in current driver the new state does not get updated when link is going down because of the if condition. When an interface is brought down, vsi->state is set to I40E_VSI_DOWN in i40e_vsi_close() and later i40e_print_link_message() does not get invoked in i40e_link_event due to if condition. Hence link down message doesn't appear when link is going down. The down state is seen later during i40e_open() and old state gets printed. The actual link state doesn't get updated in i40e_close() or i40e_open() but when i40e_handle_link_event is called inside i40e_clean_adminq_subtask. This change allows i40e_print_link_message() to be called when interface is going down and keeps the state information updated. Signed-off-by: Sudheer Mogilappagari Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_main.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index b235a27232a8..2e8fe6186b38 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -6420,8 +6420,7 @@ static void i40e_link_event(struct i40e_pf *pf) new_link == netif_carrier_ok(vsi->netdev))) return; - if (!test_bit(__I40E_VSI_DOWN, vsi->state)) - i40e_print_link_message(vsi, new_link); + i40e_print_link_message(vsi, new_link); /* Notify the base of the switch tree connected to * the link. Floating VEBs are not notified. -- cgit v1.2.3 From 3fded4663b07f8fa99b9424ca3d5c46b79f6b27e Mon Sep 17 00:00:00 2001 From: Sudheer Mogilappagari Date: Fri, 14 Jul 2017 09:10:18 -0400 Subject: i40e: simplify member variable accesses This commit replaces usage of vsi->back in i40e_print_link_message() (which is actually a PF pointer) with temp variable. Signed-off-by: Sudheer Mogilappagari Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_main.c | 21 +++++++++++---------- 1 file changed, 11 insertions(+), 10 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index 2e8fe6186b38..3c650917b54f 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -5346,13 +5346,14 @@ out: void i40e_print_link_message(struct i40e_vsi *vsi, bool isup) { enum i40e_aq_link_speed new_speed; + struct i40e_pf *pf = vsi->back; char *speed = "Unknown"; char *fc = "Unknown"; char *fec = ""; char *req_fec = ""; char *an = ""; - new_speed = vsi->back->hw.phy.link_info.link_speed; + new_speed = pf->hw.phy.link_info.link_speed; if ((vsi->current_isup == isup) && (vsi->current_speed == new_speed)) return; @@ -5366,13 +5367,13 @@ void i40e_print_link_message(struct i40e_vsi *vsi, bool isup) /* Warn user if link speed on NPAR enabled partition is not at * least 10GB */ - if (vsi->back->hw.func_caps.npar_enable && - (vsi->back->hw.phy.link_info.link_speed == I40E_LINK_SPEED_1GB || - vsi->back->hw.phy.link_info.link_speed == I40E_LINK_SPEED_100MB)) + if (pf->hw.func_caps.npar_enable && + (pf->hw.phy.link_info.link_speed == I40E_LINK_SPEED_1GB || + pf->hw.phy.link_info.link_speed == I40E_LINK_SPEED_100MB)) netdev_warn(vsi->netdev, "The partition detected link speed that is less than 10Gbps\n"); - switch (vsi->back->hw.phy.link_info.link_speed) { + switch (pf->hw.phy.link_info.link_speed) { case I40E_LINK_SPEED_40GB: speed = "40 G"; break; @@ -5395,7 +5396,7 @@ void i40e_print_link_message(struct i40e_vsi *vsi, bool isup) break; } - switch (vsi->back->hw.fc.current_mode) { + switch (pf->hw.fc.current_mode) { case I40E_FC_FULL: fc = "RX/TX"; break; @@ -5410,18 +5411,18 @@ void i40e_print_link_message(struct i40e_vsi *vsi, bool isup) break; } - if (vsi->back->hw.phy.link_info.link_speed == I40E_LINK_SPEED_25GB) { + if (pf->hw.phy.link_info.link_speed == I40E_LINK_SPEED_25GB) { req_fec = ", Requested FEC: None"; fec = ", FEC: None"; an = ", Autoneg: False"; - if (vsi->back->hw.phy.link_info.an_info & I40E_AQ_AN_COMPLETED) + if (pf->hw.phy.link_info.an_info & I40E_AQ_AN_COMPLETED) an = ", Autoneg: True"; - if (vsi->back->hw.phy.link_info.fec_info & + if (pf->hw.phy.link_info.fec_info & I40E_AQ_CONFIG_FEC_KR_ENA) fec = ", FEC: CL74 FC-FEC/BASE-R"; - else if (vsi->back->hw.phy.link_info.fec_info & + else if (pf->hw.phy.link_info.fec_info & I40E_AQ_CONFIG_FEC_RS_ENA) fec = ", FEC: CL108 RS-FEC"; -- cgit v1.2.3 From e04ea00217904fc3f6fddac0b74e74e5ac488fda Mon Sep 17 00:00:00 2001 From: Mariusz Stachura Date: Fri, 14 Jul 2017 09:10:19 -0400 Subject: i40e: relax warning message in case of version mismatch Fortville and Fort Park devices are often on different firmware release schedules. This change relaxes the minor version warning message, so it is only displayed for older FW warning version for old firmware Fortville 3 or earlier. Signed-off-by: Mariusz Stachura Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_main.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index 3c650917b54f..a887087d08cd 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -11374,8 +11374,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) hw->aq.api_min_ver > I40E_FW_API_VERSION_MINOR) dev_info(&pdev->dev, "The driver for the device detected a newer version of the NVM image than expected. Please install the most recent version of the network driver.\n"); - else if (hw->aq.api_maj_ver < I40E_FW_API_VERSION_MAJOR || - hw->aq.api_min_ver < (I40E_FW_API_VERSION_MINOR - 1)) + else if (hw->aq.api_maj_ver == 1 && hw->aq.api_min_ver < 4) dev_info(&pdev->dev, "The driver for the device detected an older version of the NVM image than expected. Please update the NVM image.\n"); -- cgit v1.2.3 From 0dc8692e914ac49931d69b5217d5fe0171fc026e Mon Sep 17 00:00:00 2001 From: Mariusz Stachura Date: Fri, 14 Jul 2017 09:27:00 -0400 Subject: i40e: fix for flow director counters not wrapping as expected An errata with GLQF_PCNT causes it to not wrap as expected. This can cause an error in flow director statistics. This patch resets affected counters just after reading. Signed-off-by: Mariusz Stachura Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_main.c | 35 +++++++++++++++++++---------- 1 file changed, 23 insertions(+), 12 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index a887087d08cd..638f5bad0bd7 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -599,6 +599,20 @@ static void i40e_stat_update32(struct i40e_hw *hw, u32 reg, *stat = (u32)((new_data + BIT_ULL(32)) - *offset); } +/** + * i40e_stat_update_and_clear32 - read and clear hw reg, update a 32 bit stat + * @hw: ptr to the hardware info + * @reg: the hw reg to read and clear + * @stat: ptr to the stat + **/ +static void i40e_stat_update_and_clear32(struct i40e_hw *hw, u32 reg, u64 *stat) +{ + u32 new_data = rd32(hw, reg); + + wr32(hw, reg, 1); /* must write a nonzero value to clear register */ + *stat += new_data; +} + /** * i40e_update_eth_stats - Update VSI-specific ethernet statistics counters. * @vsi: the VSI to be updated @@ -1040,18 +1054,15 @@ static void i40e_update_pf_stats(struct i40e_pf *pf) &osd->rx_jabber, &nsd->rx_jabber); /* FDIR stats */ - i40e_stat_update32(hw, - I40E_GLQF_PCNT(I40E_FD_ATR_STAT_IDX(pf->hw.pf_id)), - pf->stat_offsets_loaded, - &osd->fd_atr_match, &nsd->fd_atr_match); - i40e_stat_update32(hw, - I40E_GLQF_PCNT(I40E_FD_SB_STAT_IDX(pf->hw.pf_id)), - pf->stat_offsets_loaded, - &osd->fd_sb_match, &nsd->fd_sb_match); - i40e_stat_update32(hw, - I40E_GLQF_PCNT(I40E_FD_ATR_TUNNEL_STAT_IDX(pf->hw.pf_id)), - pf->stat_offsets_loaded, - &osd->fd_atr_tunnel_match, &nsd->fd_atr_tunnel_match); + i40e_stat_update_and_clear32(hw, + I40E_GLQF_PCNT(I40E_FD_ATR_STAT_IDX(hw->pf_id)), + &nsd->fd_atr_match); + i40e_stat_update_and_clear32(hw, + I40E_GLQF_PCNT(I40E_FD_SB_STAT_IDX(hw->pf_id)), + &nsd->fd_sb_match); + i40e_stat_update_and_clear32(hw, + I40E_GLQF_PCNT(I40E_FD_ATR_TUNNEL_STAT_IDX(hw->pf_id)), + &nsd->fd_atr_tunnel_match); val = rd32(hw, I40E_PRTPM_EEE_STAT); nsd->tx_lpi_status = -- cgit v1.2.3 From 905770fa3e6f30b393829ba1c238554e7f238aee Mon Sep 17 00:00:00 2001 From: Mitch Williams Date: Fri, 14 Jul 2017 09:27:01 -0400 Subject: i40evf: lower message level We see this message regularly on VF reset or unload (which invokes a reset). It's essentially meaningless unless it's happening constantly. To prevent consternation, lower the log level to debug so it's not seen under normal circumstance. Signed-off-by: Mitch Williams Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c b/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c index 85876f4fb1fb..2bb0fe00361f 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c +++ b/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c @@ -52,7 +52,7 @@ static int i40evf_send_pf_msg(struct i40evf_adapter *adapter, err = i40e_aq_send_msg_to_pf(hw, op, 0, msg, len, NULL); if (err) - dev_err(&adapter->pdev->dev, "Unable to send opcode %d to PF, err %s, aq_err %s\n", + dev_dbg(&adapter->pdev->dev, "Unable to send opcode %d to PF, err %s, aq_err %s\n", op, i40evf_stat_str(hw, err), i40evf_aq_str(hw, hw->aq.asq_last_status)); return err; -- cgit v1.2.3 From c17401a1dd210a5f22ab1ec7c7366037c158a14c Mon Sep 17 00:00:00 2001 From: Jacob Keller Date: Fri, 14 Jul 2017 09:27:02 -0400 Subject: i40e: use separate state bit for miscellaneous IRQ setup We currently (mis)use the __I40E_RECOVERY_PENDING bit to determine when we should actually request a new IRQ in i40e_setup_misc_vector(). This led to a design mistake where we open-coded the re-setup of the miscellaneous vector in i40e_resume() instead of using the function provided. If we did not open-code this and instead tried to use the i40e_setup_misc_vector() function, it would lead to never reallocating the IRQ. This would lead to a second i40e_suspend() call failing to free the vector due to a NULL pointer dereference. A future patch is going to re-work how the i40e_suspend() and i40e_resume() flows work to clear all IRQ vectors, which would require us to use i40e_setup_misc_vector() directly. Since during this time the __I40E_RECOVERY_PENDING bit is set, we'll never re-allocate the vector. Rather than leaving the open-coded setup in i40e_resume() lets just fix the problem properly in i40e_setup_misc_vector(). Introduce a new state bit which indicates when the IRQ has been assigned, which will be set when i40e_setup_misc_vector is first called. This ultimately resolves the issue of re-requesting the vector, without overloading the __I40E_RECOVERY_PENDING state. This ensures that the suspend/resume cycle can use the setup function instead of open-coding the re-request during resume. Additionally, since the only callers of i40e_stop_misc_vector also want to free it, move this code directly into the function to avoid duplication. Due to the new functionality, rename it to i40e_free_misc_vector(). This lets us drop the extra calls to free and re-enable the vector during i40e_suspend() and i40e_resume(). We don't need to call i40e_setup_misc_Vector() in i40e_resume() because it gets called by the i40e_rebuild() call. Signed-off-by: Jacob Keller Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e.h | 1 + drivers/net/ethernet/intel/i40e/i40e_main.c | 39 +++++++++++------------------ 2 files changed, 15 insertions(+), 25 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h index d0c1bf5441d8..b7a539cdca00 100644 --- a/drivers/net/ethernet/intel/i40e/i40e.h +++ b/drivers/net/ethernet/intel/i40e/i40e.h @@ -136,6 +136,7 @@ enum i40e_state_t { __I40E_MDD_EVENT_PENDING, __I40E_VFLR_EVENT_PENDING, __I40E_RESET_RECOVERY_PENDING, + __I40E_MISC_IRQ_REQUESTED, __I40E_RESET_INTR_RECEIVED, __I40E_REINIT_REQUESTED, __I40E_PF_RESET_REQUESTED, diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index 638f5bad0bd7..3ea4f8b942c3 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -3604,14 +3604,20 @@ static int i40e_vsi_enable_irq(struct i40e_vsi *vsi) } /** - * i40e_stop_misc_vector - Stop the vector that handles non-queue events + * i40e_free_misc_vector - Free the vector that handles non-queue events * @pf: board private structure **/ -static void i40e_stop_misc_vector(struct i40e_pf *pf) +static void i40e_free_misc_vector(struct i40e_pf *pf) { /* Disable ICR 0 */ wr32(&pf->hw, I40E_PFINT_ICR0_ENA, 0); i40e_flush(&pf->hw); + + if (pf->flags & I40E_FLAG_MSIX_ENABLED && pf->msix_entries) { + synchronize_irq(pf->msix_entries[0].vector); + free_irq(pf->msix_entries[0].vector, pf); + clear_bit(__I40E_MISC_IRQ_REQUESTED, pf->state); + } } /** @@ -4466,11 +4472,7 @@ static void i40e_clear_interrupt_scheme(struct i40e_pf *pf) { int i; - i40e_stop_misc_vector(pf); - if (pf->flags & I40E_FLAG_MSIX_ENABLED && pf->msix_entries) { - synchronize_irq(pf->msix_entries[0].vector); - free_irq(pf->msix_entries[0].vector, pf); - } + i40e_free_misc_vector(pf); i40e_put_lump(pf->irq_pile, pf->iwarp_base_vector, I40E_IWARP_IRQ_PILE_ID); @@ -8365,13 +8367,12 @@ static int i40e_setup_misc_vector(struct i40e_pf *pf) struct i40e_hw *hw = &pf->hw; int err = 0; - /* Only request the irq if this is the first time through, and - * not when we're rebuilding after a Reset - */ - if (!test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state)) { + /* Only request the IRQ once, the first time through. */ + if (!test_and_set_bit(__I40E_MISC_IRQ_REQUESTED, pf->state)) { err = request_irq(pf->msix_entries[0].vector, i40e_intr, 0, pf->int_name, pf); if (err) { + clear_bit(__I40E_MISC_IRQ_REQUESTED, pf->state); dev_info(&pf->pdev->dev, "request_irq for %s failed: %d\n", pf->int_name, err); @@ -12069,11 +12070,8 @@ static int i40e_suspend(struct pci_dev *pdev, pm_message_t state) wr32(hw, I40E_PFPM_APM, (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0)); wr32(hw, I40E_PFPM_WUFC, (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0)); - i40e_stop_misc_vector(pf); - if (pf->msix_entries) { - synchronize_irq(pf->msix_entries[0].vector); - free_irq(pf->msix_entries[0].vector, pf); - } + i40e_free_misc_vector(pf); + retval = pci_save_state(pdev); if (retval) return retval; @@ -12113,15 +12111,6 @@ static int i40e_resume(struct pci_dev *pdev) /* handling the reset will rebuild the device state */ if (test_and_clear_bit(__I40E_SUSPENDED, pf->state)) { clear_bit(__I40E_DOWN, pf->state); - if (pf->msix_entries) { - err = request_irq(pf->msix_entries[0].vector, - i40e_intr, 0, pf->int_name, pf); - if (err) { - dev_err(&pf->pdev->dev, - "request_irq for %s failed: %d\n", - pf->int_name, err); - } - } i40e_reset_and_rebuild(pf, false, false); } -- cgit v1.2.3 From 0e5d3da400558b7d30586a2cc1afe02276445636 Mon Sep 17 00:00:00 2001 From: Jacob Keller Date: Fri, 14 Jul 2017 09:27:03 -0400 Subject: i40e: use newer generic PM support instead of legacy PM callbacks Stop using the old legacy PM support, since we now have stable support for the newer generic PM callbacks. This has several advantages. First, we no longer have to manage our own pci_save_state() and power changes, as it's preferred to have the PCI stack do this. Second, these routines get called for both hibernate and suspend to ram, so we can have the driver properly handle all the suspend/resume flows that it needs to. Signed-off-by: Jacob Keller Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_main.c | 54 +++++++++-------------------- 1 file changed, 17 insertions(+), 37 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index 3ea4f8b942c3..c82360437024 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -12050,14 +12050,14 @@ static void i40e_shutdown(struct pci_dev *pdev) #ifdef CONFIG_PM /** - * i40e_suspend - PCI callback for moving to D3 - * @pdev: PCI device information struct + * i40e_suspend - PM callback for moving to D3 + * @dev: generic device information structure **/ -static int i40e_suspend(struct pci_dev *pdev, pm_message_t state) +static int i40e_suspend(struct device *dev) { + struct pci_dev *pdev = to_pci_dev(dev); struct i40e_pf *pf = pci_get_drvdata(pdev); struct i40e_hw *hw = &pf->hw; - int retval = 0; set_bit(__I40E_SUSPENDED, pf->state); set_bit(__I40E_DOWN, pf->state); @@ -12072,41 +12072,17 @@ static int i40e_suspend(struct pci_dev *pdev, pm_message_t state) i40e_free_misc_vector(pf); - retval = pci_save_state(pdev); - if (retval) - return retval; - - pci_wake_from_d3(pdev, pf->wol_en); - pci_set_power_state(pdev, PCI_D3hot); - - return retval; + return 0; } /** - * i40e_resume - PCI callback for waking up from D3 - * @pdev: PCI device information struct + * i40e_resume - PM callback for waking up from D3 + * @dev: generic device information structure **/ -static int i40e_resume(struct pci_dev *pdev) +static int i40e_resume(struct device *dev) { + struct pci_dev *pdev = to_pci_dev(dev); struct i40e_pf *pf = pci_get_drvdata(pdev); - u32 err; - - pci_set_power_state(pdev, PCI_D0); - pci_restore_state(pdev); - /* pci_restore_state() clears dev->state_saves, so - * call pci_save_state() again to restore it. - */ - pci_save_state(pdev); - - err = pci_enable_device_mem(pdev); - if (err) { - dev_err(&pdev->dev, "Cannot enable PCI device from suspend\n"); - return err; - } - pci_set_master(pdev); - - /* no wakeup events while running */ - pci_wake_from_d3(pdev, false); /* handling the reset will rebuild the device state */ if (test_and_clear_bit(__I40E_SUSPENDED, pf->state)) { @@ -12117,22 +12093,26 @@ static int i40e_resume(struct pci_dev *pdev) return 0; } -#endif +#endif /* CONFIG_PM */ + static const struct pci_error_handlers i40e_err_handler = { .error_detected = i40e_pci_error_detected, .slot_reset = i40e_pci_error_slot_reset, .resume = i40e_pci_error_resume, }; +static SIMPLE_DEV_PM_OPS(i40e_pm_ops, i40e_suspend, i40e_resume); + static struct pci_driver i40e_driver = { .name = i40e_driver_name, .id_table = i40e_pci_tbl, .probe = i40e_probe, .remove = i40e_remove, #ifdef CONFIG_PM - .suspend = i40e_suspend, - .resume = i40e_resume, -#endif + .driver = { + .pm = &i40e_pm_ops, + }, +#endif /* CONFIG_PM */ .shutdown = i40e_shutdown, .err_handler = &i40e_err_handler, .sriov_configure = i40e_pci_sriov_configure, -- cgit v1.2.3 From 401586c2b9bb16147f3dcc64d3596013625e2c44 Mon Sep 17 00:00:00 2001 From: Jacob Keller Date: Fri, 14 Jul 2017 09:27:04 -0400 Subject: i40e: don't clear suspended state until we finish resuming When handling suspend and resume callbacks we want to make sure that (a) we don't suspend again if we're already suspended and (b) we don't resume again if we're already resuming. Lets make sure we test_and_set the __I40E_SUSPENDED bit in i40e_suspend which ensures that a suspend call when already suspended will exit early. Additionally, if __I40E_SUSPENDED is not set when we begin resuming, exit early as well. Signed-off-by: Jacob Keller Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_main.c | 19 +++++++++++++------ 1 file changed, 13 insertions(+), 6 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index c82360437024..494cafde6b26 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -12059,7 +12059,10 @@ static int i40e_suspend(struct device *dev) struct i40e_pf *pf = pci_get_drvdata(pdev); struct i40e_hw *hw = &pf->hw; - set_bit(__I40E_SUSPENDED, pf->state); + /* If we're already suspended, then there is nothing to do */ + if (test_and_set_bit(__I40E_SUSPENDED, pf->state)) + return 0; + set_bit(__I40E_DOWN, pf->state); if (pf->wol_en && (pf->hw_features & I40E_HW_WOL_MC_MAGIC_PKT_WAKE)) @@ -12084,11 +12087,15 @@ static int i40e_resume(struct device *dev) struct pci_dev *pdev = to_pci_dev(dev); struct i40e_pf *pf = pci_get_drvdata(pdev); - /* handling the reset will rebuild the device state */ - if (test_and_clear_bit(__I40E_SUSPENDED, pf->state)) { - clear_bit(__I40E_DOWN, pf->state); - i40e_reset_and_rebuild(pf, false, false); - } + /* If we're not suspended, then there is nothing to do */ + if (!test_bit(__I40E_SUSPENDED, pf->state)) + return 0; + + clear_bit(__I40E_DOWN, pf->state); + i40e_reset_and_rebuild(pf, false, false); + + /* Clear suspended state last after everything is recovered */ + clear_bit(__I40E_SUSPENDED, pf->state); return 0; } -- cgit v1.2.3 From 5c499228803a77bd4e878c7119fbd40a1dc6d773 Mon Sep 17 00:00:00 2001 From: Jacob Keller Date: Fri, 14 Jul 2017 09:27:05 -0400 Subject: i40e: prevent service task from running while we're suspended Although the service task does check the suspended status before running, it might already be part way through running when we go to suspend. Lets ensure that the service task is stopped and will not be restarted again until we finish resuming. This ensures that service task code does not cause strange interactions with the suspend/resume handlers. Signed-off-by: Jacob Keller Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_main.c | 8 ++++++++ 1 file changed, 8 insertions(+) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index 494cafde6b26..368373459ad5 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -12065,6 +12065,10 @@ static int i40e_suspend(struct device *dev) set_bit(__I40E_DOWN, pf->state); + /* Ensure service task will not be running */ + del_timer_sync(&pf->service_timer); + cancel_work_sync(&pf->service_task); + if (pf->wol_en && (pf->hw_features & I40E_HW_WOL_MC_MAGIC_PKT_WAKE)) i40e_enable_mc_magic_wake(pf); @@ -12097,6 +12101,10 @@ static int i40e_resume(struct device *dev) /* Clear suspended state last after everything is recovered */ clear_bit(__I40E_SUSPENDED, pf->state); + /* Restart the service task */ + mod_timer(&pf->service_timer, + round_jiffies(jiffies + pf->service_timer_period)); + return 0; } -- cgit v1.2.3 From b980c0634fe56928a45cc3c0f688d96e36705403 Mon Sep 17 00:00:00 2001 From: Jacob Keller Date: Fri, 14 Jul 2017 09:27:06 -0400 Subject: i40e: shutdown all IRQs and disable MSI-X when suspended On some platforms with a large number of CPUs, we will allocate many IRQ vectors. When hibernating, the system will attempt to migrate all of the vectors back to CPU0 when shutting down all the other CPUs. It is possible that we have so many vectors that it cannot re-assign them to CPU0. This is even more likely if we have many devices installed in one platform. The end result is failure to hibernate, as it is not possible to shutdown the CPUs. We can avoid this by disabling MSI-X and clearing our interrupt scheme when the device is suspended. A more ideal solution would be some method for the stack to properly handle this for all drivers, rather than on a case-by-case basis for each driver to fix itself. However, until this more ideal solution exists, we can do our part and shutdown our IRQs during suspend, which should allow systems with a large number of CPUs to safely suspend or hibernate. It may be worth investigating if we should shut down even further when we suspend as it may make the path cleaner, but this was the minimum fix for the hibernation issue mentioned here. Testing-hints: This affects systems with a large number of CPUs, and with multiple devices enabled. Without this change, those platforms are unable to hibernate at all. Signed-off-by: Jacob Keller Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_main.c | 68 ++++++++++++++++++++++++- drivers/net/ethernet/intel/i40evf/i40evf_main.c | 2 +- 2 files changed, 68 insertions(+), 2 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index 368373459ad5..8a44793d5390 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -8354,6 +8354,57 @@ static int i40e_init_interrupt_scheme(struct i40e_pf *pf) return 0; } +#ifdef CONFIG_PM +/** + * i40e_restore_interrupt_scheme - Restore the interrupt scheme + * @pf: private board data structure + * + * Restore the interrupt scheme that was cleared when we suspended the + * device. This should be called during resume to re-allocate the q_vectors + * and reacquire IRQs. + */ +static int i40e_restore_interrupt_scheme(struct i40e_pf *pf) +{ + int err, i; + + /* We cleared the MSI and MSI-X flags when disabling the old interrupt + * scheme. We need to re-enabled them here in order to attempt to + * re-acquire the MSI or MSI-X vectors + */ + pf->flags |= (I40E_FLAG_MSIX_ENABLED | I40E_FLAG_MSI_ENABLED); + + err = i40e_init_interrupt_scheme(pf); + if (err) + return err; + + /* Now that we've re-acquired IRQs, we need to remap the vectors and + * rings together again. + */ + for (i = 0; i < pf->num_alloc_vsi; i++) { + if (pf->vsi[i]) { + err = i40e_vsi_alloc_q_vectors(pf->vsi[i]); + if (err) + goto err_unwind; + i40e_vsi_map_rings_to_vectors(pf->vsi[i]); + } + } + + err = i40e_setup_misc_vector(pf); + if (err) + goto err_unwind; + + return 0; + +err_unwind: + while (i--) { + if (pf->vsi[i]) + i40e_vsi_free_q_vectors(pf->vsi[i]); + } + + return err; +} +#endif /* CONFIG_PM */ + /** * i40e_setup_misc_vector - Setup the misc vector to handle non queue events * @pf: board private structure @@ -12077,7 +12128,12 @@ static int i40e_suspend(struct device *dev) wr32(hw, I40E_PFPM_APM, (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0)); wr32(hw, I40E_PFPM_WUFC, (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0)); - i40e_free_misc_vector(pf); + /* Clear the interrupt scheme and release our IRQs so that the system + * can safely hibernate even when there are a large number of CPUs. + * Otherwise hibernation might fail when mapping all the vectors back + * to CPU0. + */ + i40e_clear_interrupt_scheme(pf); return 0; } @@ -12090,11 +12146,21 @@ static int i40e_resume(struct device *dev) { struct pci_dev *pdev = to_pci_dev(dev); struct i40e_pf *pf = pci_get_drvdata(pdev); + int err; /* If we're not suspended, then there is nothing to do */ if (!test_bit(__I40E_SUSPENDED, pf->state)) return 0; + /* We cleared the interrupt scheme when we suspended, so we need to + * restore it now to resume device functionality. + */ + err = i40e_restore_interrupt_scheme(pf); + if (err) { + dev_err(&pdev->dev, "Cannot restore interrupt scheme: %d\n", + err); + } + clear_bit(__I40E_DOWN, pf->state); i40e_reset_and_rebuild(pf, false, false); diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_main.c b/drivers/net/ethernet/intel/i40evf/i40evf_main.c index c243f9da95ae..80ade6510279 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf_main.c +++ b/drivers/net/ethernet/intel/i40evf/i40evf_main.c @@ -46,7 +46,7 @@ static const char i40evf_driver_string[] = #define DRV_VERSION_MAJOR 3 #define DRV_VERSION_MINOR 0 -#define DRV_VERSION_BUILD 0 +#define DRV_VERSION_BUILD 1 #define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \ __stringify(DRV_VERSION_MINOR) "." \ __stringify(DRV_VERSION_BUILD) \ -- cgit v1.2.3 From c97fc9b6a798f4253c176231ba0aceda6b59b058 Mon Sep 17 00:00:00 2001 From: Alan Brady Date: Fri, 14 Jul 2017 09:27:07 -0400 Subject: i40evf: fix ring to vector mapping The current implementation for mapping queues to vectors is broken because it attempts to map each Tx and Rx ring to its own vector, however we use combined queues so we should actually be mapping the Tx/Rx rings together on one vector. Also in the current implementation, in the case where we have more queues than vectors, we attempt to group the queues together into 'chunks' and map each 'chunk' of queues to a vector. Chunking them together would be more ideal if, and only if, we only had RSS because of the way the hashing algorithm works but in the case of a future patch that enables VF ADq, round robin assignment is better and still works with RSS. This patch resolves both those issues and simplifies the code needed to accomplish this. Instead of treating the case where we have more queues than vectors as special, if we notice our vector index is greater than vectors, reset the vector index to zero and continue mapping. This should ensure that in both cases, whether we have enough vectors for each queue or not, the queues get appropriately mapped. Signed-off-by: Alan Brady Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40evf/i40evf_main.c | 48 ++++++------------------- 1 file changed, 10 insertions(+), 38 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_main.c b/drivers/net/ethernet/intel/i40evf/i40evf_main.c index 80ade6510279..69ef6c1d5364 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf_main.c +++ b/drivers/net/ethernet/intel/i40evf/i40evf_main.c @@ -432,52 +432,24 @@ i40evf_map_vector_to_txq(struct i40evf_adapter *adapter, int v_idx, int t_idx) **/ static int i40evf_map_rings_to_vectors(struct i40evf_adapter *adapter) { + int rings_remaining = adapter->num_active_queues; + int ridx = 0, vidx = 0; int q_vectors; - int v_start = 0; - int rxr_idx = 0, txr_idx = 0; - int rxr_remaining = adapter->num_active_queues; - int txr_remaining = adapter->num_active_queues; - int i, j; - int rqpv, tqpv; int err = 0; q_vectors = adapter->num_msix_vectors - NONQ_VECS; - /* The ideal configuration... - * We have enough vectors to map one per queue. - */ - if (q_vectors >= (rxr_remaining * 2)) { - for (; rxr_idx < rxr_remaining; v_start++, rxr_idx++) - i40evf_map_vector_to_rxq(adapter, v_start, rxr_idx); + for (; ridx < rings_remaining; ridx++) { + i40evf_map_vector_to_rxq(adapter, vidx, ridx); + i40evf_map_vector_to_txq(adapter, vidx, ridx); - for (; txr_idx < txr_remaining; v_start++, txr_idx++) - i40evf_map_vector_to_txq(adapter, v_start, txr_idx); - goto out; - } - - /* If we don't have enough vectors for a 1-to-1 - * mapping, we'll have to group them so there are - * multiple queues per vector. - * Re-adjusting *qpv takes care of the remainder. - */ - for (i = v_start; i < q_vectors; i++) { - rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - i); - for (j = 0; j < rqpv; j++) { - i40evf_map_vector_to_rxq(adapter, i, rxr_idx); - rxr_idx++; - rxr_remaining--; - } - } - for (i = v_start; i < q_vectors; i++) { - tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - i); - for (j = 0; j < tqpv; j++) { - i40evf_map_vector_to_txq(adapter, i, txr_idx); - txr_idx++; - txr_remaining--; - } + /* In the case where we have more queues than vectors, continue + * round-robin on vectors until all queues are mapped. + */ + if (++vidx >= q_vectors) + vidx = 0; } -out: adapter->aq_required |= I40EVF_FLAG_AQ_MAP_VECTORS; return err; -- cgit v1.2.3 From a3f5aa907340b5d7b54223ddbaa90410f168864d Mon Sep 17 00:00:00 2001 From: Alan Brady Date: Fri, 14 Jul 2017 09:27:08 -0400 Subject: i40e: Enable VF to negotiate number of allocated queues Currently the PF allocates a default number of queues for each VF and cannot be changed. This patch enables the VF to request a different number of queues allocated to it. This patch also adds a new virtchnl op and capability flag to facilitate this negotiation. After the PF receives a request message, it will set a requested number of queues for that VF. Then when the VF resets, its VSI will get a new number of queues allocated to it. This is a best effort request and since we only allocate a guaranteed default number, if the VF tries to ask for more than the guaranteed number, there may not be enough in HW to accommodate it unless other queues for other VFs are freed. It should also be noted decreasing the number queues allocated to a VF to below the default will NOT enable the allocation of more than 32 VFs per PF and will not free queues guaranteed to each VF by default. Signed-off-by: Alan Brady Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e.h | 1 + drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c | 75 ++++++++++++++++++++++ drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h | 1 + include/linux/avf/virtchnl.h | 20 ++++++ 4 files changed, 97 insertions(+) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h index b7a539cdca00..439c63cb2a0c 100644 --- a/drivers/net/ethernet/intel/i40e/i40e.h +++ b/drivers/net/ethernet/intel/i40e/i40e.h @@ -77,6 +77,7 @@ #define i40e_default_queues_per_vmdq(pf) \ (((pf)->hw_features & I40E_HW_RSS_AQ_CAPABLE) ? 4 : 1) #define I40E_DEFAULT_QUEUES_PER_VF 4 +#define I40E_MAX_VF_QUEUES 16 #define I40E_DEFAULT_QUEUES_PER_TC 1 /* should be a power of 2 */ #define i40e_pf_get_max_q_per_tc(pf) \ (((pf)->hw_features & I40E_HW_128_QP_RSS_CAPABLE) ? 128 : 64) diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c index 4d1e670f490e..a75396c157d9 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c @@ -815,6 +815,14 @@ static void i40e_free_vf_res(struct i40e_vf *vf) */ clear_bit(I40E_VF_STATE_INIT, &vf->vf_states); + /* It's possible the VF had requeuested more queues than the default so + * do the accounting here when we're about to free them. + */ + if (vf->num_queue_pairs > I40E_DEFAULT_QUEUES_PER_VF) { + pf->queues_left += vf->num_queue_pairs - + I40E_DEFAULT_QUEUES_PER_VF; + } + /* free vsi & disconnect it from the parent uplink */ if (vf->lan_vsi_idx) { i40e_vsi_release(pf->vsi[vf->lan_vsi_idx]); @@ -868,12 +876,27 @@ static int i40e_alloc_vf_res(struct i40e_vf *vf) int total_queue_pairs = 0; int ret; + if (vf->num_req_queues && + vf->num_req_queues <= pf->queues_left + I40E_DEFAULT_QUEUES_PER_VF) + pf->num_vf_qps = vf->num_req_queues; + else + pf->num_vf_qps = I40E_DEFAULT_QUEUES_PER_VF; + /* allocate hw vsi context & associated resources */ ret = i40e_alloc_vsi_res(vf, I40E_VSI_SRIOV); if (ret) goto error_alloc; total_queue_pairs += pf->vsi[vf->lan_vsi_idx]->alloc_queue_pairs; + /* We account for each VF to get a default number of queue pairs. If + * the VF has now requested more, we need to account for that to make + * certain we never request more queues than we actually have left in + * HW. + */ + if (total_queue_pairs > I40E_DEFAULT_QUEUES_PER_VF) + pf->queues_left -= + total_queue_pairs - I40E_DEFAULT_QUEUES_PER_VF; + if (vf->trusted) set_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps); else @@ -1579,6 +1602,9 @@ static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg) VIRTCHNL_VF_OFFLOAD_WB_ON_ITR; } + if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_REQ_QUEUES) + vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_REQ_QUEUES; + vfres->num_vsis = num_vsis; vfres->num_queue_pairs = vf->num_queue_pairs; vfres->max_vectors = pf->hw.func_caps.num_msix_vectors_vf; @@ -1986,6 +2012,52 @@ error_param: aq_ret); } +/** + * i40e_vc_request_queues_msg + * @vf: pointer to the VF info + * @msg: pointer to the msg buffer + * @msglen: msg length + * + * VFs get a default number of queues but can use this message to request a + * different number. Will respond with either the number requested or the + * maximum we can support. + **/ +static int i40e_vc_request_queues_msg(struct i40e_vf *vf, u8 *msg, int msglen) +{ + struct virtchnl_vf_res_request *vfres = + (struct virtchnl_vf_res_request *)msg; + int req_pairs = vfres->num_queue_pairs; + int cur_pairs = vf->num_queue_pairs; + struct i40e_pf *pf = vf->pf; + + if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) + return -EINVAL; + + if (req_pairs <= 0) { + dev_err(&pf->pdev->dev, + "VF %d tried to request %d queues. Ignoring.\n", + vf->vf_id, req_pairs); + } else if (req_pairs > I40E_MAX_VF_QUEUES) { + dev_err(&pf->pdev->dev, + "VF %d tried to request more than %d queues.\n", + vf->vf_id, + I40E_MAX_VF_QUEUES); + vfres->num_queue_pairs = I40E_MAX_VF_QUEUES; + } else if (req_pairs - cur_pairs > pf->queues_left) { + dev_warn(&pf->pdev->dev, + "VF %d requested %d more queues, but only %d left.\n", + vf->vf_id, + req_pairs - cur_pairs, + pf->queues_left); + vfres->num_queue_pairs = pf->queues_left + cur_pairs; + } else { + vf->num_req_queues = req_pairs; + } + + return i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_REQUEST_QUEUES, 0, + (u8 *)vfres, sizeof(vfres)); +} + /** * i40e_vc_get_stats_msg * @vf: pointer to the VF info @@ -2708,6 +2780,9 @@ int i40e_vc_process_vf_msg(struct i40e_pf *pf, s16 vf_id, u32 v_opcode, case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING: ret = i40e_vc_disable_vlan_stripping(vf, msg, msglen); break; + case VIRTCHNL_OP_REQUEST_QUEUES: + ret = i40e_vc_request_queues_msg(vf, msg, msglen); + break; case VIRTCHNL_OP_UNKNOWN: default: diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h index 1f4b0c504368..5111d05d5f2f 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h @@ -97,6 +97,7 @@ struct i40e_vf { u16 lan_vsi_id; /* ID as used by firmware */ u8 num_queue_pairs; /* num of qps assigned to VF vsis */ + u8 num_req_queues; /* num of requested qps */ u64 num_mdd_events; /* num of mdd events detected */ /* num of continuous malformed or invalid msgs detected */ u64 num_invalid_msgs; diff --git a/include/linux/avf/virtchnl.h b/include/linux/avf/virtchnl.h index 2b038442c352..60e5d90cb18a 100644 --- a/include/linux/avf/virtchnl.h +++ b/include/linux/avf/virtchnl.h @@ -135,6 +135,7 @@ enum virtchnl_ops { VIRTCHNL_OP_SET_RSS_HENA = 26, VIRTCHNL_OP_ENABLE_VLAN_STRIPPING = 27, VIRTCHNL_OP_DISABLE_VLAN_STRIPPING = 28, + VIRTCHNL_OP_REQUEST_QUEUES = 29, }; /* This macro is used to generate a compilation error if a structure @@ -235,6 +236,7 @@ VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_vsi_resource); #define VIRTCHNL_VF_OFFLOAD_RSS_AQ 0x00000008 #define VIRTCHNL_VF_OFFLOAD_RSS_REG 0x00000010 #define VIRTCHNL_VF_OFFLOAD_WB_ON_ITR 0x00000020 +#define VIRTCHNL_VF_OFFLOAD_REQ_QUEUES 0x00000040 #define VIRTCHNL_VF_OFFLOAD_VLAN 0x00010000 #define VIRTCHNL_VF_OFFLOAD_RX_POLLING 0x00020000 #define VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2 0x00040000 @@ -325,6 +327,21 @@ struct virtchnl_vsi_queue_config_info { struct virtchnl_queue_pair_info qpair[1]; }; +/* VIRTCHNL_OP_REQUEST_QUEUES + * VF sends this message to request the PF to allocate additional queues to + * this VF. Each VF gets a guaranteed number of queues on init but asking for + * additional queues must be negotiated. This is a best effort request as it + * is possible the PF does not have enough queues left to support the request. + * If the PF cannot support the number requested it will respond with the + * maximum number it is able to support; otherwise it will respond with the + * number requested. + */ + +/* VF resource request */ +struct virtchnl_vf_res_request { + u16 num_queue_pairs; +}; + VIRTCHNL_CHECK_STRUCT_LEN(72, virtchnl_vsi_queue_config_info); /* VIRTCHNL_OP_CONFIG_IRQ_MAP @@ -691,6 +708,9 @@ virtchnl_vc_validate_vf_msg(struct virtchnl_version_info *ver, u32 v_opcode, case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING: case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING: break; + case VIRTCHNL_OP_REQUEST_QUEUES: + valid_len = sizeof(struct virtchnl_vf_res_request); + break; /* These are always errors coming from the VF. */ case VIRTCHNL_OP_EVENT: case VIRTCHNL_OP_UNKNOWN: -- cgit v1.2.3 From 22b96551f213d7e7d743442c923c266a10306b9b Mon Sep 17 00:00:00 2001 From: Mitch Williams Date: Fri, 14 Jul 2017 09:27:09 -0400 Subject: i40e: refactor FW version checking The i40e driver now supports two different devices with two different firmware versions. So be smart about how we handle these. Move the FW version macros to the appropriate header file, and add a convenience macro that checks the version based on the device. Then use this macro to check whether or not the driver can use the new link info API. Signed-off-by: Mitch Williams Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h | 10 +++++++++- drivers/net/ethernet/intel/i40e/i40e_common.c | 6 ++++-- drivers/net/ethernet/intel/i40e/i40e_main.c | 2 +- drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h | 10 +++++++++- 4 files changed, 23 insertions(+), 5 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h index 5d5f422cbae5..e2a9ec80a623 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h +++ b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h @@ -34,7 +34,15 @@ */ #define I40E_FW_API_VERSION_MAJOR 0x0001 -#define I40E_FW_API_VERSION_MINOR 0x0005 +#define I40E_FW_API_VERSION_MINOR_X722 0x0005 +#define I40E_FW_API_VERSION_MINOR_X710 0x0007 + +#define I40E_FW_MINOR_VERSION(_h) ((_h)->mac.type == I40E_MAC_XL710 ? \ + I40E_FW_API_VERSION_MINOR_X710 : \ + I40E_FW_API_VERSION_MINOR_X722) + +/* API version 1.7 implements additional link and PHY-specific APIs */ +#define I40E_MINOR_VER_GET_LINK_INFO_XL710 0x0007 struct i40e_aq_desc { __le16 flags; diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c index 111426ba5fbc..7346d8850c8e 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_common.c +++ b/drivers/net/ethernet/intel/i40e/i40e_common.c @@ -1593,8 +1593,10 @@ i40e_status i40e_aq_get_phy_capabilities(struct i40e_hw *hw, status = I40E_ERR_UNKNOWN_PHY; if (report_init) { - hw->phy.phy_types = le32_to_cpu(abilities->phy_type); - hw->phy.phy_types |= ((u64)abilities->phy_type_ext << 32); + if (hw->mac.type == I40E_MAC_XL710 && + hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR && + hw->aq.api_min_ver >= I40E_MINOR_VER_GET_LINK_INFO_XL710) + status = i40e_aq_get_link_info(hw, true, NULL, NULL); } return status; diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index 8a44793d5390..47f71d7c3ae0 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -11434,7 +11434,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) i40e_nvm_version_str(hw)); if (hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR && - hw->aq.api_min_ver > I40E_FW_API_VERSION_MINOR) + hw->aq.api_min_ver > I40E_FW_MINOR_VERSION(hw)) dev_info(&pdev->dev, "The driver for the device detected a newer version of the NVM image than expected. Please install the most recent version of the network driver.\n"); else if (hw->aq.api_maj_ver == 1 && hw->aq.api_min_ver < 4) diff --git a/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h index 83e63e55c4b4..f9f48d1900b0 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h +++ b/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h @@ -34,7 +34,15 @@ */ #define I40E_FW_API_VERSION_MAJOR 0x0001 -#define I40E_FW_API_VERSION_MINOR 0x0005 +#define I40E_FW_API_VERSION_MINOR_X722 0x0005 +#define I40E_FW_API_VERSION_MINOR_X710 0x0007 + +#define I40E_FW_MINOR_VERSION(_h) ((_h)->mac.type == I40E_MAC_XL710 ? \ + I40E_FW_API_VERSION_MINOR_X710 : \ + I40E_FW_API_VERSION_MINOR_X722) + +/* API version 1.7 implements additional link and PHY-specific APIs */ +#define I40E_MINOR_VER_GET_LINK_INFO_XL710 0x0007 struct i40e_aq_desc { __le16 flags; -- cgit v1.2.3 From e876a8a7e9dd89dc88c12ca2e81beb478dbe9897 Mon Sep 17 00:00:00 2001 From: Mick Tarsel Date: Thu, 28 Sep 2017 13:53:18 -0700 Subject: ibmvnic: Set state UP State is initially reported as UNKNOWN. Before register call netif_carrier_off(). Once the device is opened, call netif_carrier_on() in order to set the state to UP. Signed-off-by: Mick Tarsel Signed-off-by: David S. Miller --- drivers/net/ethernet/ibm/ibmvnic.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c index cb8182f4fdfa..4bc14a901571 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.c +++ b/drivers/net/ethernet/ibm/ibmvnic.c @@ -927,6 +927,7 @@ static int ibmvnic_open(struct net_device *netdev) } rc = __ibmvnic_open(netdev); + netif_carrier_on(netdev); mutex_unlock(&adapter->reset_lock); return rc; @@ -3899,6 +3900,7 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id) if (rc) goto ibmvnic_init_fail; + netif_carrier_off(netdev); rc = register_netdev(netdev); if (rc) { dev_err(&dev->dev, "failed to register netdev rc=%d\n", rc); -- cgit v1.2.3 From 075cfdd659cb1e86f948f11ba577f27706f0756e Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Fri, 29 Sep 2017 20:51:23 +0100 Subject: net: hns3: fix null pointer dereference before null check pointer ndev is being dereferenced with the call to netif_running before it is being null checked. Re-order the code to only dereference ndev after it has been null checked. Detected by CoverityScan, CID#1457206 ("Dereference before null check") Fixes: 9df8f79a4d29 ("net: hns3: Add DCB support when interacting with network stack") Signed-off-by: Colin Ian King Signed-off-by: David S. Miller --- drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c index 4a0890f98b70..c31506514e5d 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c @@ -2865,7 +2865,7 @@ static int hns3_client_setup_tc(struct hnae3_handle *handle, u8 tc) { struct hnae3_knic_private_info *kinfo = &handle->kinfo; struct net_device *ndev = kinfo->netdev; - bool if_running = netif_running(ndev); + bool if_running; int ret; u8 i; @@ -2875,6 +2875,8 @@ static int hns3_client_setup_tc(struct hnae3_handle *handle, u8 tc) if (!ndev) return -ENODEV; + if_running = netif_running(ndev); + ret = netdev_set_num_tc(ndev, tc); if (ret) return ret; -- cgit v1.2.3 From 9c86b846ce02f7e35d7234cf090b80553eba5389 Mon Sep 17 00:00:00 2001 From: Jonas Gorski Date: Sun, 1 Oct 2017 13:02:15 +0200 Subject: bcm63xx_enet: correct clock usage Check the return code of prepare_enable and change one last instance of enable only to prepare_enable. Also properly disable and release the clock in error paths and on remove for enetsw. Signed-off-by: Jonas Gorski Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bcm63xx_enet.c | 31 +++++++++++++++++++++------- 1 file changed, 23 insertions(+), 8 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/broadcom/bcm63xx_enet.c b/drivers/net/ethernet/broadcom/bcm63xx_enet.c index c6221f04a748..a45ec97b5b1e 100644 --- a/drivers/net/ethernet/broadcom/bcm63xx_enet.c +++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.c @@ -1773,7 +1773,9 @@ static int bcm_enet_probe(struct platform_device *pdev) ret = PTR_ERR(priv->mac_clk); goto out; } - clk_prepare_enable(priv->mac_clk); + ret = clk_prepare_enable(priv->mac_clk); + if (ret) + goto out_put_clk_mac; /* initialize default and fetch platform data */ priv->rx_ring_size = BCMENET_DEF_RX_DESC; @@ -1805,9 +1807,11 @@ static int bcm_enet_probe(struct platform_device *pdev) if (IS_ERR(priv->phy_clk)) { ret = PTR_ERR(priv->phy_clk); priv->phy_clk = NULL; - goto out_put_clk_mac; + goto out_disable_clk_mac; } - clk_prepare_enable(priv->phy_clk); + ret = clk_prepare_enable(priv->phy_clk); + if (ret) + goto out_put_clk_phy; } /* do minimal hardware init to be able to probe mii bus */ @@ -1900,13 +1904,16 @@ out_free_mdio: out_uninit_hw: /* turn off mdc clock */ enet_writel(priv, 0, ENET_MIISC_REG); - if (priv->phy_clk) { + if (priv->phy_clk) clk_disable_unprepare(priv->phy_clk); + +out_put_clk_phy: + if (priv->phy_clk) clk_put(priv->phy_clk); - } -out_put_clk_mac: +out_disable_clk_mac: clk_disable_unprepare(priv->mac_clk); +out_put_clk_mac: clk_put(priv->mac_clk); out: free_netdev(dev); @@ -2748,7 +2755,9 @@ static int bcm_enetsw_probe(struct platform_device *pdev) ret = PTR_ERR(priv->mac_clk); goto out_unmap; } - clk_enable(priv->mac_clk); + ret = clk_prepare_enable(priv->mac_clk); + if (ret) + goto out_put_clk; priv->rx_chan = 0; priv->tx_chan = 1; @@ -2769,7 +2778,7 @@ static int bcm_enetsw_probe(struct platform_device *pdev) ret = register_netdev(dev); if (ret) - goto out_put_clk; + goto out_disable_clk; netif_carrier_off(dev); platform_set_drvdata(pdev, dev); @@ -2778,6 +2787,9 @@ static int bcm_enetsw_probe(struct platform_device *pdev) return 0; +out_disable_clk: + clk_disable_unprepare(priv->mac_clk); + out_put_clk: clk_put(priv->mac_clk); @@ -2809,6 +2821,9 @@ static int bcm_enetsw_remove(struct platform_device *pdev) res = platform_get_resource(pdev, IORESOURCE_MEM, 0); release_mem_region(res->start, resource_size(res)); + clk_disable_unprepare(priv->mac_clk); + clk_put(priv->mac_clk); + free_netdev(dev); return 0; } -- cgit v1.2.3 From d6213c1f2ad54a964b77471690264ed685718928 Mon Sep 17 00:00:00 2001 From: Jonas Gorski Date: Sun, 1 Oct 2017 13:02:16 +0200 Subject: bcm63xx_enet: do not write to random DMA channel on BCM6345 The DMA controller regs actually point to DMA channel 0, so the write to ENETDMA_CFG_REG will actually modify a random DMA channel. Since DMA controller registers do not exist on BCM6345, guard the write with the usual check for dma_has_sram. Signed-off-by: Jonas Gorski Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bcm63xx_enet.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/broadcom/bcm63xx_enet.c b/drivers/net/ethernet/broadcom/bcm63xx_enet.c index a45ec97b5b1e..a1e1e12e187a 100644 --- a/drivers/net/ethernet/broadcom/bcm63xx_enet.c +++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.c @@ -1062,7 +1062,8 @@ static int bcm_enet_open(struct net_device *dev) val = enet_readl(priv, ENET_CTL_REG); val |= ENET_CTL_ENABLE_MASK; enet_writel(priv, val, ENET_CTL_REG); - enet_dma_writel(priv, ENETDMA_CFG_EN_MASK, ENETDMA_CFG_REG); + if (priv->dma_has_sram) + enet_dma_writel(priv, ENETDMA_CFG_EN_MASK, ENETDMA_CFG_REG); enet_dmac_writel(priv, priv->dma_chan_en_mask, ENETDMAC_CHANCFG, priv->rx_chan); -- cgit v1.2.3 From 527a48713b01057d94aeec8f4383b1e20c82522c Mon Sep 17 00:00:00 2001 From: Jonas Gorski Date: Sun, 1 Oct 2017 13:02:17 +0200 Subject: bcm63xx_enet: do not rely on probe order Do not rely on the shared device being probed before the enet(sw) devices. This makes it easier to eventually move out the shared device as a dma controller driver (what it should be). Signed-off-by: Jonas Gorski Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bcm63xx_enet.c | 9 ++------- 1 file changed, 2 insertions(+), 7 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/broadcom/bcm63xx_enet.c b/drivers/net/ethernet/broadcom/bcm63xx_enet.c index a1e1e12e187a..8caf6abab3a6 100644 --- a/drivers/net/ethernet/broadcom/bcm63xx_enet.c +++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.c @@ -1722,10 +1722,8 @@ static int bcm_enet_probe(struct platform_device *pdev) const char *clk_name; int i, ret; - /* stop if shared driver failed, assume driver->probe will be - * called in the same order we register devices (correct ?) */ if (!bcm_enet_shared_base[0]) - return -ENODEV; + return -EPROBE_DEFER; res_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0); res_irq_rx = platform_get_resource(pdev, IORESOURCE_IRQ, 1); @@ -2696,11 +2694,8 @@ static int bcm_enetsw_probe(struct platform_device *pdev) struct resource *res_mem; int ret, irq_rx, irq_tx; - /* stop if shared driver failed, assume driver->probe will be - * called in the same order we register devices (correct ?) - */ if (!bcm_enet_shared_base[0]) - return -ENODEV; + return -EPROBE_DEFER; res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); irq_rx = platform_get_irq(pdev, 0); -- cgit v1.2.3 From 7e697ce99ceb09538cdc1dfa9ebb3db60236b0a7 Mon Sep 17 00:00:00 2001 From: Jonas Gorski Date: Sun, 1 Oct 2017 13:02:18 +0200 Subject: bcm63xx_enet: use managed functions for clock/ioremap Use managed functions where possible to reduce the amount of resource handling on error and remove paths. Signed-off-by: Jonas Gorski Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bcm63xx_enet.c | 54 +++++++--------------------- 1 file changed, 12 insertions(+), 42 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/broadcom/bcm63xx_enet.c b/drivers/net/ethernet/broadcom/bcm63xx_enet.c index 8caf6abab3a6..059ef4f1d137 100644 --- a/drivers/net/ethernet/broadcom/bcm63xx_enet.c +++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.c @@ -1767,14 +1767,14 @@ static int bcm_enet_probe(struct platform_device *pdev) clk_name = "enet1"; } - priv->mac_clk = clk_get(&pdev->dev, clk_name); + priv->mac_clk = devm_clk_get(&pdev->dev, clk_name); if (IS_ERR(priv->mac_clk)) { ret = PTR_ERR(priv->mac_clk); goto out; } ret = clk_prepare_enable(priv->mac_clk); if (ret) - goto out_put_clk_mac; + goto out; /* initialize default and fetch platform data */ priv->rx_ring_size = BCMENET_DEF_RX_DESC; @@ -1802,7 +1802,7 @@ static int bcm_enet_probe(struct platform_device *pdev) if (priv->mac_id == 0 && priv->has_phy && !priv->use_external_mii) { /* using internal PHY, enable clock */ - priv->phy_clk = clk_get(&pdev->dev, "ephy"); + priv->phy_clk = devm_clk_get(&pdev->dev, "ephy"); if (IS_ERR(priv->phy_clk)) { ret = PTR_ERR(priv->phy_clk); priv->phy_clk = NULL; @@ -1810,7 +1810,7 @@ static int bcm_enet_probe(struct platform_device *pdev) } ret = clk_prepare_enable(priv->phy_clk); if (ret) - goto out_put_clk_phy; + goto out_disable_clk_mac; } /* do minimal hardware init to be able to probe mii bus */ @@ -1906,14 +1906,8 @@ out_uninit_hw: if (priv->phy_clk) clk_disable_unprepare(priv->phy_clk); -out_put_clk_phy: - if (priv->phy_clk) - clk_put(priv->phy_clk); - out_disable_clk_mac: clk_disable_unprepare(priv->mac_clk); -out_put_clk_mac: - clk_put(priv->mac_clk); out: free_netdev(dev); return ret; @@ -1949,12 +1943,10 @@ static int bcm_enet_remove(struct platform_device *pdev) } /* disable hw block clocks */ - if (priv->phy_clk) { + if (priv->phy_clk) clk_disable_unprepare(priv->phy_clk); - clk_put(priv->phy_clk); - } + clk_disable_unprepare(priv->mac_clk); - clk_put(priv->mac_clk); free_netdev(dev); return 0; @@ -2734,26 +2726,20 @@ static int bcm_enetsw_probe(struct platform_device *pdev) if (ret) goto out; - if (!request_mem_region(res_mem->start, resource_size(res_mem), - "bcm63xx_enetsw")) { - ret = -EBUSY; + priv->base = devm_ioremap_resource(&pdev->dev, res_mem); + if (IS_ERR(priv->base)) { + ret = PTR_ERR(priv->base); goto out; } - priv->base = ioremap(res_mem->start, resource_size(res_mem)); - if (priv->base == NULL) { - ret = -ENOMEM; - goto out_release_mem; - } - - priv->mac_clk = clk_get(&pdev->dev, "enetsw"); + priv->mac_clk = devm_clk_get(&pdev->dev, "enetsw"); if (IS_ERR(priv->mac_clk)) { ret = PTR_ERR(priv->mac_clk); - goto out_unmap; + goto out; } ret = clk_prepare_enable(priv->mac_clk); if (ret) - goto out_put_clk; + goto out; priv->rx_chan = 0; priv->tx_chan = 1; @@ -2785,15 +2771,6 @@ static int bcm_enetsw_probe(struct platform_device *pdev) out_disable_clk: clk_disable_unprepare(priv->mac_clk); - -out_put_clk: - clk_put(priv->mac_clk); - -out_unmap: - iounmap(priv->base); - -out_release_mem: - release_mem_region(res_mem->start, resource_size(res_mem)); out: free_netdev(dev); return ret; @@ -2805,20 +2782,13 @@ static int bcm_enetsw_remove(struct platform_device *pdev) { struct bcm_enet_priv *priv; struct net_device *dev; - struct resource *res; /* stop netdevice */ dev = platform_get_drvdata(pdev); priv = netdev_priv(dev); unregister_netdev(dev); - /* release device resources */ - iounmap(priv->base); - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - release_mem_region(res->start, resource_size(res)); - clk_disable_unprepare(priv->mac_clk); - clk_put(priv->mac_clk); free_netdev(dev); return 0; -- cgit v1.2.3 From 4e78e5c5d881bf2d6267545a554c1baf245257b7 Mon Sep 17 00:00:00 2001 From: Jonas Gorski Date: Sun, 1 Oct 2017 13:02:19 +0200 Subject: bcm63xx_enet: drop unneeded NULL phy_clk check clk_disable and clk_unprepare are NULL-safe, so need to duplicate the NULL check of the functions. Signed-off-by: Jonas Gorski Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bcm63xx_enet.c | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/broadcom/bcm63xx_enet.c b/drivers/net/ethernet/broadcom/bcm63xx_enet.c index 059ef4f1d137..f6bc13fe8a99 100644 --- a/drivers/net/ethernet/broadcom/bcm63xx_enet.c +++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.c @@ -1903,8 +1903,7 @@ out_free_mdio: out_uninit_hw: /* turn off mdc clock */ enet_writel(priv, 0, ENET_MIISC_REG); - if (priv->phy_clk) - clk_disable_unprepare(priv->phy_clk); + clk_disable_unprepare(priv->phy_clk); out_disable_clk_mac: clk_disable_unprepare(priv->mac_clk); @@ -1943,9 +1942,7 @@ static int bcm_enet_remove(struct platform_device *pdev) } /* disable hw block clocks */ - if (priv->phy_clk) - clk_disable_unprepare(priv->phy_clk); - + clk_disable_unprepare(priv->phy_clk); clk_disable_unprepare(priv->mac_clk); free_netdev(dev); -- cgit v1.2.3 From 840f922317fb5c20841d6d7f3853ead506546ade Mon Sep 17 00:00:00 2001 From: Jonas Gorski Date: Sun, 1 Oct 2017 13:02:20 +0200 Subject: bcm63xx_enet: remove unneeded include We don't use anyhing from that file, so drop it. Signed-off-by: Jonas Gorski Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bcm63xx_enet.h | 1 - 1 file changed, 1 deletion(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/broadcom/bcm63xx_enet.h b/drivers/net/ethernet/broadcom/bcm63xx_enet.h index 0a1b7b2e55bd..dd6ae3077433 100644 --- a/drivers/net/ethernet/broadcom/bcm63xx_enet.h +++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.h @@ -8,7 +8,6 @@ #include #include -#include #include #include -- cgit v1.2.3 From 45bfbc013b4294cadafbef821d377d3a99c7ab1e Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Sun, 1 Oct 2017 17:27:35 +0100 Subject: mlxsw: spectrum: fix uninitialized value in err In the unlikely event that mfc->mfc_un.res.ttls[i] is 255 for all values of i from 0 to MAXIVS-1, the err is not set at all and hence has a garbage value on the error return at the end of the function, so initialize it to 0. Also, the error return check on err and goto to err: inside the for loop makes it impossible for err to be zero at the end of the for loop, so we can remove the redundant err check at the end of the loop. Detected by CoverityScan CID#1457207 ("Unitialized scalar value") Fixes: c011ec1bbfd6 ("mlxsw: spectrum: Add the multicast routing offloading logic") Signed-off-by: Colin Ian King Reviewed-by: Yotam Gigi Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c index 09120259a45d..4aaf6ca1be7c 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c @@ -349,7 +349,7 @@ mlxsw_sp_mr_route4_create(struct mlxsw_sp_mr_table *mr_table, { struct mlxsw_sp_mr_route_vif_entry *rve, *tmp; struct mlxsw_sp_mr_route *mr_route; - int err; + int err = 0; int i; /* Allocate and init a new route and fill it with parameters */ @@ -376,8 +376,6 @@ mlxsw_sp_mr_route4_create(struct mlxsw_sp_mr_table *mr_table, } } mlxsw_sp_mr_route_ivif_link(mr_route, &mr_table->vifs[mfc->mfc_parent]); - if (err) - goto err; mr_route->route_action = mlxsw_sp_mr_route_action(mr_route); return mr_route; -- cgit v1.2.3 From 17a91809942ca32c70026d2d5ba3348a2c4fdf8f Mon Sep 17 00:00:00 2001 From: Jacob Keller Date: Mon, 2 Oct 2017 07:17:50 -0700 Subject: fm10k: ensure we process SM mbx when processing VF mbx When we process VF mailboxes, the driver is likely going to also queue up messages to the switch manager. This process merely queues up the FIFO, but doesn't actually begin the transmission process. Because we hold the mailbox lock during this VF processing, the PF<->SM mailbox is not getting processed at this time. Ensure that we actually process the PF<->SM mailbox in between each PF<->VF mailbox. This should ensure prompt transmission of the messages queued up after each VF message is received and handled. Signed-off-by: Jacob Keller Tested-by: Krishneil Singh Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/fm10k/fm10k_iov.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_iov.c b/drivers/net/ethernet/intel/fm10k/fm10k_iov.c index 5f4dac0d36ef..2ec49116fe91 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_iov.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_iov.c @@ -126,6 +126,9 @@ process_mbx: struct fm10k_mbx_info *mbx = &vf_info->mbx; u16 glort = vf_info->glort; + /* process the SM mailbox first to drain outgoing messages */ + hw->mbx.ops.process(hw, &hw->mbx); + /* verify port mapping is valid, if not reset port */ if (vf_info->vf_flags && !fm10k_glort_valid_pf(hw, glort)) hw->iov.ops.reset_lport(hw, vf_info); -- cgit v1.2.3 From b52b7f7059f2df8eb3258a25bc69e12dc21ebcd7 Mon Sep 17 00:00:00 2001 From: Jacob Keller Date: Wed, 8 Mar 2017 15:55:43 -0800 Subject: fm10k: reschedule service event if we stall the PF<->SM mailbox When we are handling PF<->VF mailbox messages, it is possible that the VF will send us so many messages that the PF<->SM FIFO will fill up. In this case, we stop the loop and wait until the service event is rescheduled. Normally this should happen due to an interrupt. But it is possible that we don't get another interrupt for a while and it isn't until the service timer actually reschedules us. Instead, simply reschedule immediately which will cause the service event to be run again as soon as we exit. This ensures that we promptly handle all of the PF<->VF messages with minimal delay, while still giving time for the SM mailbox to drain. Signed-off-by: Jacob Keller Tested-by: Krishneil Singh Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/fm10k/fm10k_iov.c | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_iov.c b/drivers/net/ethernet/intel/fm10k/fm10k_iov.c index 2ec49116fe91..d8356c494f06 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_iov.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_iov.c @@ -143,6 +143,10 @@ process_mbx: if (!hw->mbx.ops.tx_ready(&hw->mbx, FM10K_VFMBX_MSG_MTU)) { /* keep track of how many times this occurs */ interface->hw_sm_mbx_full++; + + /* make sure we try again momentarily */ + fm10k_service_event_schedule(interface); + break; } -- cgit v1.2.3 From 95f49d4bdee34dd0f68446bb260ab537f62ed9b3 Mon Sep 17 00:00:00 2001 From: Markus Elfring Date: Mon, 8 May 2017 18:18:09 +0200 Subject: fm10k: Use seq_putc() in fm10k_dbg_desc_break() Two single characters should be put into a sequence. Thus use the corresponding function "seq_putc". This issue was detected by using the Coccinelle software. Signed-off-by: Markus Elfring Tested-by: Krishneil Singh Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/fm10k/fm10k_debugfs.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_debugfs.c b/drivers/net/ethernet/intel/fm10k/fm10k_debugfs.c index 5116fd043630..14df09e2d964 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_debugfs.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_debugfs.c @@ -52,9 +52,9 @@ static void fm10k_dbg_desc_seq_stop(struct seq_file __always_unused *s, static void fm10k_dbg_desc_break(struct seq_file *s, int i) { while (i--) - seq_puts(s, "-"); + seq_putc(s, '-'); - seq_puts(s, "\n"); + seq_putc(s, '\n'); } static int fm10k_dbg_tx_desc_seq_show(struct seq_file *s, void *v) -- cgit v1.2.3 From 5c66d1251d67714e9f6e6b0af18ca989109b876f Mon Sep 17 00:00:00 2001 From: Jacob Keller Date: Mon, 10 Jul 2017 13:23:04 -0700 Subject: fm10k: stop spurious link down messages when Tx FIFO is full In fm10k_get_host_state_generic, we check the mailbox tx_read() function to ensure that the mailbox is still open. This function also checks to make sure we have space to transmit another message. Unfortunately, if we just recently sent a bunch of messages (such as enabling hundreds of VLANs on a VF) this can result in a race where the watchdog task thinks the link went down just because we haven't had time to process all these messages yet. Instead, lets just check whether the mailbox is still open. This ensures that we don't race with the Tx FIFO, and we only link down once the mailbox is not open. This is safe, because if the FIFO fills up and we're unable to send a message for too long, we'll end up triggering the timeout detection which results in a reset. Additionally, since we still check to ensure the mailbox state is OPEN, we'll transition to link down whenever the mailbox closes as well. Signed-off-by: Jacob Keller Tested-by: Krishneil Singh Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/fm10k/fm10k_common.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_common.c b/drivers/net/ethernet/intel/fm10k/fm10k_common.c index 62a6ad9b3eed..736a9f087bc9 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_common.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_common.c @@ -1,5 +1,5 @@ /* Intel(R) Ethernet Switch Host Interface Driver - * Copyright(c) 2013 - 2016 Intel Corporation. + * Copyright(c) 2013 - 2017 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -517,8 +517,8 @@ s32 fm10k_get_host_state_generic(struct fm10k_hw *hw, bool *host_ready) goto out; } - /* verify Mailbox is still valid */ - if (!mbx->ops.tx_ready(mbx, FM10K_VFMBX_MSG_MTU)) + /* verify Mailbox is still open */ + if (mbx->state != FM10K_STATE_OPEN) goto out; /* interface cannot receive traffic without logical ports */ -- cgit v1.2.3 From 375ce90eab7ee1c87eefa2cd312b0be9ac961082 Mon Sep 17 00:00:00 2001 From: Jacob Keller Date: Mon, 10 Jul 2017 13:23:05 -0700 Subject: fm10k: fix typos on fall through comments Newer versions of GCC since version 7 now warn when a case statement may fall through without an explicit comment. "Fallthough" does not count as it is misspelled. Fix the typos for these comments to appease the new warnings. Signed-off-by: Jacob Keller Tested-by: Krishneil Singh Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/fm10k/fm10k_mbx.c | 4 ++-- drivers/net/ethernet/intel/fm10k/fm10k_pf.c | 10 +++++----- 2 files changed, 7 insertions(+), 7 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_mbx.c b/drivers/net/ethernet/intel/fm10k/fm10k_mbx.c index 334088a101c3..244d3ad58ca7 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_mbx.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_mbx.c @@ -1,5 +1,5 @@ /* Intel(R) Ethernet Switch Host Interface Driver - * Copyright(c) 2013 - 2016 Intel Corporation. + * Copyright(c) 2013 - 2017 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -1586,7 +1586,7 @@ s32 fm10k_pfvf_mbx_init(struct fm10k_hw *hw, struct fm10k_mbx_info *mbx, mbx->mbmem_reg = FM10K_MBMEM_VF(id, 0); break; } - /* fallthough */ + /* fall through */ default: return FM10K_MBX_ERR_NO_MBX; } diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pf.c b/drivers/net/ethernet/intel/fm10k/fm10k_pf.c index 40ee0242a80a..9e4fb3a44376 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_pf.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_pf.c @@ -1,5 +1,5 @@ /* Intel(R) Ethernet Switch Host Interface Driver - * Copyright(c) 2013 - 2016 Intel Corporation. + * Copyright(c) 2013 - 2017 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -1334,19 +1334,19 @@ static u8 fm10k_iov_supported_xcast_mode_pf(struct fm10k_vf_info *vf_info, case FM10K_XCAST_MODE_PROMISC: if (vf_flags & FM10K_VF_FLAG_PROMISC_CAPABLE) return FM10K_XCAST_MODE_PROMISC; - /* fallthough */ + /* fall through */ case FM10K_XCAST_MODE_ALLMULTI: if (vf_flags & FM10K_VF_FLAG_ALLMULTI_CAPABLE) return FM10K_XCAST_MODE_ALLMULTI; - /* fallthough */ + /* fall through */ case FM10K_XCAST_MODE_MULTI: if (vf_flags & FM10K_VF_FLAG_MULTI_CAPABLE) return FM10K_XCAST_MODE_MULTI; - /* fallthough */ + /* fall through */ case FM10K_XCAST_MODE_NONE: if (vf_flags & FM10K_VF_FLAG_NONE_CAPABLE) return FM10K_XCAST_MODE_NONE; - /* fallthough */ + /* fall through */ default: break; } -- cgit v1.2.3 From b94dd008c401fc73a8d843e3219356255f40c1ed Mon Sep 17 00:00:00 2001 From: Jacob Keller Date: Mon, 10 Jul 2017 13:23:06 -0700 Subject: fm10k: avoid possible truncation of q_vector->name New versions of GCC since version 7 began warning about possible truncation of calls to snprintf. We can fix this and avoid false positives. First, we should pass the full buffer size to snprintf, because it guarantees a NULL character as part of its passed length, so passing len-1 is simply wasting a byte of possible storage. Second, if we make the ri and ti variables unsigned, the compiler is able to correctly reason that the value never gets larger than 256, so it doesn't need to warn about the full space required to print a signed integer. Signed-off-by: Jacob Keller Tested-by: Krishneil Singh Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/fm10k/fm10k_pci.c | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c index 63784576ae8b..9212b3fa3b62 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c @@ -1544,7 +1544,7 @@ int fm10k_qv_request_irq(struct fm10k_intfc *interface) struct net_device *dev = interface->netdev; struct fm10k_hw *hw = &interface->hw; struct msix_entry *entry; - int ri = 0, ti = 0; + unsigned int ri = 0, ti = 0; int vector, err; entry = &interface->msix_entries[NON_Q_VECTORS(hw)]; @@ -1554,15 +1554,15 @@ int fm10k_qv_request_irq(struct fm10k_intfc *interface) /* name the vector */ if (q_vector->tx.count && q_vector->rx.count) { - snprintf(q_vector->name, sizeof(q_vector->name) - 1, - "%s-TxRx-%d", dev->name, ri++); + snprintf(q_vector->name, sizeof(q_vector->name), + "%s-TxRx-%u", dev->name, ri++); ti++; } else if (q_vector->rx.count) { - snprintf(q_vector->name, sizeof(q_vector->name) - 1, - "%s-rx-%d", dev->name, ri++); + snprintf(q_vector->name, sizeof(q_vector->name), + "%s-rx-%u", dev->name, ri++); } else if (q_vector->tx.count) { - snprintf(q_vector->name, sizeof(q_vector->name) - 1, - "%s-tx-%d", dev->name, ti++); + snprintf(q_vector->name, sizeof(q_vector->name), + "%s-tx-%u", dev->name, ti++); } else { /* skip this unused q_vector */ continue; -- cgit v1.2.3 From 523a0b558db4ca205522976077911e5efe235781 Mon Sep 17 00:00:00 2001 From: Jacob Keller Date: Mon, 10 Jul 2017 13:23:07 -0700 Subject: fm10k: add missing fall through comment Newer versions of GCC starting with 7 now additionally warn when a case statement may fall through without an explicit comment mentioning it. Add such a comment to silence the warning, as this is expected. Unfortunately the comment must come directly before the next case statement, so we put it outside the #ifdef. Otherwise, the compiler cannot properly detect it and thus the warning is displayed regardless. Signed-off-by: Jacob Keller Tested-by: Krishneil Singh Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/fm10k/fm10k_main.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_main.c b/drivers/net/ethernet/intel/fm10k/fm10k_main.c index 9dffaba85ae6..189d52a8a605 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_main.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_main.c @@ -876,6 +876,7 @@ static void fm10k_tx_csum(struct fm10k_ring *tx_ring, case IPPROTO_GRE: if (skb->encapsulation) break; + /* fall through */ default: if (unlikely(net_ratelimit())) { dev_warn(tx_ring->dev, -- cgit v1.2.3 From 8bac58be1700dab3cac8cb53ed0651da40777024 Mon Sep 17 00:00:00 2001 From: Jacob Keller Date: Mon, 10 Jul 2017 13:23:08 -0700 Subject: fm10k: avoid needless delay when loading driver When we load the driver, we set the last_reset to be in the future, which delays the initial driver reset. Additionally, the service task isn't scheduled to run automatically until the timer runs out. This causes a needless delay of the first reset to begin talking to the switch manager. We can avoid this by simply not setting last_reset and immediately scheduling the service task while in probe. This allows the device to wake up faster, and avoids this delay. Signed-off-by: Jacob Keller Tested-by: Krishneil Singh Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/fm10k/fm10k_pci.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c index 9212b3fa3b62..6c2c4bffaedf 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c @@ -1800,9 +1800,6 @@ static int fm10k_sw_init(struct fm10k_intfc *interface, netdev->vlan_features |= NETIF_F_HIGHDMA; } - /* delay any future reset requests */ - interface->last_reset = jiffies + (10 * HZ); - /* reset and initialize the hardware so it is in a known state */ err = hw->mac.ops.reset_hw(hw); if (err) { @@ -2079,8 +2076,9 @@ static int fm10k_probe(struct pci_dev *pdev, const struct pci_device_id *ent) /* enable SR-IOV after registering netdev to enforce PF/VF ordering */ fm10k_iov_configure(pdev, 0); - /* clear the service task disable bit to allow service task to start */ + /* clear the service task disable bit and kick off service task */ clear_bit(__FM10K_SERVICE_DISABLE, interface->state); + fm10k_service_event_schedule(interface); return 0; -- cgit v1.2.3 From 4abf01b43b62525e4f1a20dd1a2bc4a1967d8928 Mon Sep 17 00:00:00 2001 From: Jacob Keller Date: Mon, 10 Jul 2017 13:23:09 -0700 Subject: fm10k: simplify reading PFVFLRE register We're doing a really convoluted bitshift and read for the PFVFLRE register. Just reading the PFVFLRE(1), shifting it by 32, then reading PFVFLRE(0) should be sufficient. Signed-off-by: Jacob Keller Tested-by: Krishneil Singh Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/fm10k/fm10k_iov.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_iov.c b/drivers/net/ethernet/intel/fm10k/fm10k_iov.c index d8356c494f06..dfc88a463735 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_iov.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_iov.c @@ -1,5 +1,5 @@ /* Intel(R) Ethernet Switch Host Interface Driver - * Copyright(c) 2013 - 2016 Intel Corporation. + * Copyright(c) 2013 - 2017 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -67,10 +67,8 @@ s32 fm10k_iov_event(struct fm10k_intfc *interface) /* read VFLRE to determine if any VFs have been reset */ do { - vflre = fm10k_read_reg(hw, FM10K_PFVFLRE(0)); + vflre = fm10k_read_reg(hw, FM10K_PFVFLRE(1)); vflre <<= 32; - vflre |= fm10k_read_reg(hw, FM10K_PFVFLRE(1)); - vflre = (vflre << 32) | (vflre >> 32); vflre |= fm10k_read_reg(hw, FM10K_PFVFLRE(0)); i = iov_data->num_vfs; -- cgit v1.2.3 From d876c1583bb1b7f7264880265b824e88b791aa5d Mon Sep 17 00:00:00 2001 From: Jacob Keller Date: Mon, 10 Jul 2017 13:23:10 -0700 Subject: fm10k: don't loop while resetting VFs due to VFLR event We've always had a really weird looping construction for resetting VFs. We read the VFLRE register and reset the VF if the corresponding bit is set, which makes sense. However we loop continuously until we no longer have any bits left unset. At first this makes sense, as a sort of "keep trying until we succeed" concept. Unfortunately this causes a problem if we happen to surprise remove while this code is executing, because in this case we'll always read all 1s for the VFLRE register. This results in a hard lockup on the CPU because the loop will never terminate. Because our own reset function will clear the VFLR event register always, (except when we've lost PCIe link obviously) there is no real reason to loop. In practice, we'll loop over once and find that no VFs are pending anymore. Lets just check once. Since we're clear the notification when we reset there's no benefit to the loop. Additionally, there shouldn't be a race as future VLFRE events should trigger an interrupt. Additionally, we didn't warn or do anything in the looped case anyways. Signed-off-by: Jacob Keller Tested-by: Krishneil Singh Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/fm10k/fm10k_iov.c | 24 +++++++++++------------- 1 file changed, 11 insertions(+), 13 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_iov.c b/drivers/net/ethernet/intel/fm10k/fm10k_iov.c index dfc88a463735..03897720bf0b 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_iov.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_iov.c @@ -66,23 +66,21 @@ s32 fm10k_iov_event(struct fm10k_intfc *interface) goto read_unlock; /* read VFLRE to determine if any VFs have been reset */ - do { - vflre = fm10k_read_reg(hw, FM10K_PFVFLRE(1)); - vflre <<= 32; - vflre |= fm10k_read_reg(hw, FM10K_PFVFLRE(0)); + vflre = fm10k_read_reg(hw, FM10K_PFVFLRE(1)); + vflre <<= 32; + vflre |= fm10k_read_reg(hw, FM10K_PFVFLRE(0)); - i = iov_data->num_vfs; + i = iov_data->num_vfs; - for (vflre <<= 64 - i; vflre && i--; vflre += vflre) { - struct fm10k_vf_info *vf_info = &iov_data->vf_info[i]; + for (vflre <<= 64 - i; vflre && i--; vflre += vflre) { + struct fm10k_vf_info *vf_info = &iov_data->vf_info[i]; - if (vflre >= 0) - continue; + if (vflre >= 0) + continue; - hw->iov.ops.reset_resources(hw, vf_info); - vf_info->mbx.ops.connect(hw, &vf_info->mbx); - } - } while (i != iov_data->num_vfs); + hw->iov.ops.reset_resources(hw, vf_info); + vf_info->mbx.ops.connect(hw, &vf_info->mbx); + } read_unlock: rcu_read_unlock(); -- cgit v1.2.3 From dd5eede2b711350f684e8510300cb3762a821ae6 Mon Sep 17 00:00:00 2001 From: Jacob Keller Date: Mon, 10 Jul 2017 13:23:11 -0700 Subject: fm10k: avoid divide by zero in rare cases when device is resetting It is possible that under rare circumstances the device is undergoing a reset, such as when a PFLR occurs, and the device may be transmitting simultaneously. In this case, we might attempt to divide by zero when finding the proper r_idx. Instead, lets read the num_tx_queues once, and make sure it's non-zero. Signed-off-by: Jacob Keller Tested-by: Krishneil Singh Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/fm10k/fm10k_netdev.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c index e69d49d91d67..77d495fedced 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c @@ -643,9 +643,13 @@ int fm10k_close(struct net_device *netdev) static netdev_tx_t fm10k_xmit_frame(struct sk_buff *skb, struct net_device *dev) { struct fm10k_intfc *interface = netdev_priv(dev); + int num_tx_queues = READ_ONCE(interface->num_tx_queues); unsigned int r_idx = skb->queue_mapping; int err; + if (!num_tx_queues) + return NETDEV_TX_BUSY; + if ((skb->protocol == htons(ETH_P_8021Q)) && !skb_vlan_tag_present(skb)) { /* FM10K only supports hardware tagging, any tags in frame @@ -698,8 +702,8 @@ static netdev_tx_t fm10k_xmit_frame(struct sk_buff *skb, struct net_device *dev) __skb_put(skb, pad_len); } - if (r_idx >= interface->num_tx_queues) - r_idx %= interface->num_tx_queues; + if (r_idx >= num_tx_queues) + r_idx %= num_tx_queues; err = fm10k_xmit_frame_ring(skb, interface->tx_ring[r_idx]); -- cgit v1.2.3 From 65b0a469e9e6d025ce9cc46f6cc94d28abf5561c Mon Sep 17 00:00:00 2001 From: Jacob Keller Date: Mon, 10 Jul 2017 13:23:12 -0700 Subject: fm10k: move fm10k_prepare_for_reset and fm10k_handle_reset A future patch needs these functions defined earlier in the file. Move them closer to above where they will be called. Signed-off-by: Jacob Keller Tested-by: Krishneil Singh Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/fm10k/fm10k_pci.c | 58 ++++++++++++++-------------- 1 file changed, 29 insertions(+), 29 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c index 6c2c4bffaedf..41335154d6b1 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c @@ -132,35 +132,6 @@ static void fm10k_service_timer(unsigned long data) fm10k_service_event_schedule(interface); } -static void fm10k_detach_subtask(struct fm10k_intfc *interface) -{ - struct net_device *netdev = interface->netdev; - u32 __iomem *hw_addr; - u32 value; - - /* do nothing if device is still present or hw_addr is set */ - if (netif_device_present(netdev) || interface->hw.hw_addr) - return; - - /* check the real address space to see if we've recovered */ - hw_addr = READ_ONCE(interface->uc_addr); - value = readl(hw_addr); - if (~value) { - interface->hw.hw_addr = interface->uc_addr; - netif_device_attach(netdev); - set_bit(FM10K_FLAG_RESET_REQUESTED, interface->flags); - netdev_warn(netdev, "PCIe link restored, device now attached\n"); - return; - } - - rtnl_lock(); - - if (netif_running(netdev)) - dev_close(netdev); - - rtnl_unlock(); -} - static void fm10k_prepare_for_reset(struct fm10k_intfc *interface) { struct net_device *netdev = interface->netdev; @@ -270,6 +241,35 @@ reinit_err: return err; } +static void fm10k_detach_subtask(struct fm10k_intfc *interface) +{ + struct net_device *netdev = interface->netdev; + u32 __iomem *hw_addr; + u32 value; + + /* do nothing if device is still present or hw_addr is set */ + if (netif_device_present(netdev) || interface->hw.hw_addr) + return; + + /* check the real address space to see if we've recovered */ + hw_addr = READ_ONCE(interface->uc_addr); + value = readl(hw_addr); + if (~value) { + interface->hw.hw_addr = interface->uc_addr; + netif_device_attach(netdev); + set_bit(FM10K_FLAG_RESET_REQUESTED, interface->flags); + netdev_warn(netdev, "PCIe link restored, device now attached\n"); + return; + } + + rtnl_lock(); + + if (netif_running(netdev)) + dev_close(netdev); + + rtnl_unlock(); +} + static void fm10k_reinit(struct fm10k_intfc *interface) { int err; -- cgit v1.2.3 From 04914390f5a197da1e042f585e1263ad8ebff632 Mon Sep 17 00:00:00 2001 From: Jacob Keller Date: Mon, 10 Jul 2017 13:23:13 -0700 Subject: fm10k: prevent race condition of __FM10K_SERVICE_SCHED Although very unlikely, it is possible that cancel_work_sync() may stop the service_task before it actually started. In this case, the __FM10K_SERVICE_SCHED bit will never be cleared. This results in the service task being unable to reschedule in the future. Add a helper function which sets the service disable bit, waits for the service task to stop and clears the schedule bit, thus avoiding the race condition. We know the schedule bit is safe to clear because the cancel_work_sync() guarantees the service task is not running. Add a helper function also to restart the service task, for symmetry. This is not strictly needed but helps the mental model of how to stop and start the service task. This race could only happen in fm10k_suspend/fm10k_resume as this is the only place where the service task is actually restarted. Thus, suspend/resume testing would be ideal. However, note that the chance of this happening is very slim as the service event is scheduled for immediate execution, and you would have to trigger a suspend at almost the exact same time as the service task was scheduled. Signed-off-by: Jacob Keller Tested-by: Krishneil Singh Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/fm10k/fm10k_pci.c | 32 ++++++++++++++++++++++------ 1 file changed, 25 insertions(+), 7 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c index 41335154d6b1..9575f7c1862d 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c @@ -118,6 +118,27 @@ static void fm10k_service_event_complete(struct fm10k_intfc *interface) fm10k_service_event_schedule(interface); } +static void fm10k_stop_service_event(struct fm10k_intfc *interface) +{ + set_bit(__FM10K_SERVICE_DISABLE, interface->state); + cancel_work_sync(&interface->service_task); + + /* It's possible that cancel_work_sync stopped the service task from + * running before it could actually start. In this case the + * __FM10K_SERVICE_SCHED bit will never be cleared. Since we know that + * the service task cannot be running at this point, we need to clear + * the scheduled bit, as otherwise the service task may never be + * restarted. + */ + clear_bit(__FM10K_SERVICE_SCHED, interface->state); +} + +static void fm10k_start_service_event(struct fm10k_intfc *interface) +{ + clear_bit(__FM10K_SERVICE_DISABLE, interface->state); + fm10k_service_event_schedule(interface); +} + /** * fm10k_service_timer - Timer Call-back * @data: pointer to interface cast into an unsigned long @@ -2116,8 +2137,7 @@ static void fm10k_remove(struct pci_dev *pdev) del_timer_sync(&interface->service_timer); - set_bit(__FM10K_SERVICE_DISABLE, interface->state); - cancel_work_sync(&interface->service_task); + fm10k_stop_service_event(interface); /* free netdev, this may bounce the interrupts due to setup_tc */ if (netdev->reg_state == NETREG_REGISTERED) @@ -2155,8 +2175,7 @@ static void fm10k_prepare_suspend(struct fm10k_intfc *interface) * stopped. We stop the watchdog task until after we resume software * activity. */ - set_bit(__FM10K_SERVICE_DISABLE, interface->state); - cancel_work_sync(&interface->service_task); + fm10k_stop_service_event(interface); fm10k_prepare_for_reset(interface); } @@ -2183,9 +2202,8 @@ static int fm10k_handle_resume(struct fm10k_intfc *interface) interface->link_down_event = jiffies + (HZ); set_bit(__FM10K_LINK_DOWN, interface->state); - /* clear the service task disable bit to allow service task to start */ - clear_bit(__FM10K_SERVICE_DISABLE, interface->state); - fm10k_service_event_schedule(interface); + /* restart the service task */ + fm10k_start_service_event(interface); return err; } -- cgit v1.2.3 From 7ff176f81dd4675d08951d2395c7ddb1d9974da6 Mon Sep 17 00:00:00 2001 From: Petr Machata Date: Mon, 2 Oct 2017 12:21:57 +0200 Subject: mlxsw: spectrum_router: Fix a typo Signed-off-by: Petr Machata Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c index ef4b86b3aa9b..75078a3fbee8 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c @@ -1680,7 +1680,7 @@ __mlxsw_sp_router_neighs_update_rauhtd(struct mlxsw_sp *mlxsw_sp, err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(rauhtd), rauhtd_pl); if (err) { - dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to dump neighbour talbe\n"); + dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to dump neighbour table\n"); break; } num_rec = mlxsw_reg_rauhtd_num_rec_get(rauhtd_pl); -- cgit v1.2.3 From 85f44a15b1920babc2d28c11f514f5b217a29968 Mon Sep 17 00:00:00 2001 From: Petr Machata Date: Mon, 2 Oct 2017 12:21:58 +0200 Subject: mlxsw: spectrum_router: Drop a redundant condition Signed-off-by: Petr Machata Reviewed-by: Ido Schimmel Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c index 75078a3fbee8..58bc04cbbef4 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c @@ -3251,7 +3251,7 @@ mlxsw_sp_fib_entry_offload_refresh(struct mlxsw_sp_fib_entry *fib_entry, return; if (mlxsw_sp_fib_entry_should_offload(fib_entry)) mlxsw_sp_fib_entry_offload_set(fib_entry); - else if (!mlxsw_sp_fib_entry_should_offload(fib_entry)) + else mlxsw_sp_fib_entry_offload_unset(fib_entry); return; default: -- cgit v1.2.3 From e50d5751c807853cd0fce0b5c46479cc6274014f Mon Sep 17 00:00:00 2001 From: Shannon Nelson Date: Mon, 24 Jul 2017 18:17:42 -0700 Subject: i40e: limit lan queue count in large CPU count machine When a machine has more CPUs than queue pairs, e.g. 512 cores, the counting gets a little funky and turns off Flow Director with the message: not enough queues for Flow Director. Flow Director feature is disabled This patch limits the number of lan queues initially allocated to be sure we have some left for FD and other features. Signed-off-by: Shannon Nelson Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_main.c | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index 47f71d7c3ae0..387f0863f794 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -11093,6 +11093,7 @@ static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit) static void i40e_determine_queue_usage(struct i40e_pf *pf) { int queues_left; + int q_max; pf->num_lan_qps = 0; @@ -11139,10 +11140,12 @@ static void i40e_determine_queue_usage(struct i40e_pf *pf) I40E_FLAG_DCB_ENABLED); dev_info(&pf->pdev->dev, "not enough queues for DCB. DCB is disabled.\n"); } - pf->num_lan_qps = max_t(int, pf->rss_size_max, - num_online_cpus()); - pf->num_lan_qps = min_t(int, pf->num_lan_qps, - pf->hw.func_caps.num_tx_qp); + + /* limit lan qps to the smaller of qps, cpus or msix */ + q_max = max_t(int, pf->rss_size_max, num_online_cpus()); + q_max = min_t(int, q_max, pf->hw.func_caps.num_tx_qp); + q_max = min_t(int, q_max, pf->hw.func_caps.num_msix_vectors); + pf->num_lan_qps = q_max; queues_left -= pf->num_lan_qps; } -- cgit v1.2.3 From 5872866e166c38ad1c1028fb9cf7dd756c0ef43e Mon Sep 17 00:00:00 2001 From: Lihong Yang Date: Thu, 27 Jul 2017 03:17:09 -0700 Subject: i40e: remove logically dead code This patch removes the !vf condition check that cannot be true in i40e_ndo_set_vf_trust function Detected by CoverityScan, CID 1397531 Logically dead code Signed-off-by: Lihong Yang Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c | 2 -- 1 file changed, 2 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c index a75396c157d9..e6b95e1e1a33 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c @@ -3354,8 +3354,6 @@ int i40e_ndo_set_vf_trust(struct net_device *netdev, int vf_id, bool setting) vf = &pf->vf[vf_id]; - if (!vf) - return -EINVAL; if (setting == vf->trusted) goto out; -- cgit v1.2.3 From 54902349ee95045b67e2f0c39b75f5418540064b Mon Sep 17 00:00:00 2001 From: Christophe JAILLET Date: Sun, 6 Aug 2017 23:37:01 +0200 Subject: i40e: Fix a potential NULL pointer dereference If 'kzalloc()' fails, a NULL pointer will be dereferenced. Return an error code (-ENOMEM) instead. Signed-off-by: Christophe JAILLET Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c index e6b95e1e1a33..9e3667fc7f6a 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c @@ -423,6 +423,9 @@ static int i40e_config_iwarp_qvlist(struct i40e_vf *vf, (sizeof(struct virtchnl_iwarp_qv_info) * (qvlist_info->num_vectors - 1)); vf->qvlist_info = kzalloc(size, GFP_KERNEL); + if (!vf->qvlist_info) + return -ENOMEM; + vf->qvlist_info->num_vectors = qvlist_info->num_vectors; msix_vf = pf->hw.func_caps.num_msix_vectors_vf; -- cgit v1.2.3 From d60bcc798000e015940fb47eb23b79dd2fda5c9e Mon Sep 17 00:00:00 2001 From: Filip Sadowski Date: Tue, 22 Aug 2017 06:57:43 -0400 Subject: i40e: Fix reporting of supported link modes This patch fixes incorrect reporting of supported link modes on some NICs. Signed-off-by: Filip Sadowski Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h | 20 ++++++++++++++++++-- drivers/net/ethernet/intel/i40e/i40e_common.c | 11 ++++++++++- drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h | 20 ++++++++++++++++++-- 3 files changed, 46 insertions(+), 5 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h index e2a9ec80a623..5d0291c1337e 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h +++ b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h @@ -1734,6 +1734,8 @@ enum i40e_aq_phy_type { I40E_PHY_TYPE_10GBASE_CR1_CU = 0xB, I40E_PHY_TYPE_10GBASE_AOC = 0xC, I40E_PHY_TYPE_40GBASE_AOC = 0xD, + I40E_PHY_TYPE_UNRECOGNIZED = 0xE, + I40E_PHY_TYPE_UNSUPPORTED = 0xF, I40E_PHY_TYPE_100BASE_TX = 0x11, I40E_PHY_TYPE_1000BASE_T = 0x12, I40E_PHY_TYPE_10GBASE_T = 0x13, @@ -1752,6 +1754,8 @@ enum i40e_aq_phy_type { I40E_PHY_TYPE_25GBASE_CR = 0x20, I40E_PHY_TYPE_25GBASE_SR = 0x21, I40E_PHY_TYPE_25GBASE_LR = 0x22, + I40E_PHY_TYPE_EMPTY = 0xFE, + I40E_PHY_TYPE_DEFAULT = 0xFF, I40E_PHY_TYPE_MAX }; @@ -1942,19 +1946,31 @@ struct i40e_aqc_get_link_status { #define I40E_AQ_25G_SERDES_UCODE_ERR 0X04 #define I40E_AQ_25G_NIMB_UCODE_ERR 0X05 u8 loopback; /* use defines from i40e_aqc_set_lb_mode */ +/* Since firmware API 1.7 loopback field keeps power class info as well */ +#define I40E_AQ_LOOPBACK_MASK 0x07 +#define I40E_AQ_PWR_CLASS_SHIFT_LB 6 +#define I40E_AQ_PWR_CLASS_MASK_LB (0x03 << I40E_AQ_PWR_CLASS_SHIFT_LB) __le16 max_frame_size; u8 config; #define I40E_AQ_CONFIG_FEC_KR_ENA 0x01 #define I40E_AQ_CONFIG_FEC_RS_ENA 0x02 #define I40E_AQ_CONFIG_CRC_ENA 0x04 #define I40E_AQ_CONFIG_PACING_MASK 0x78 - u8 power_desc; + union { + struct { + u8 power_desc; #define I40E_AQ_LINK_POWER_CLASS_1 0x00 #define I40E_AQ_LINK_POWER_CLASS_2 0x01 #define I40E_AQ_LINK_POWER_CLASS_3 0x02 #define I40E_AQ_LINK_POWER_CLASS_4 0x03 #define I40E_AQ_PWR_CLASS_MASK 0x03 - u8 reserved[4]; + u8 reserved[4]; + }; + struct { + u8 link_type[4]; + u8 link_type_ext; + }; + }; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_get_link_status); diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c index 7346d8850c8e..64c15f4c9d2b 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_common.c +++ b/drivers/net/ethernet/intel/i40e/i40e_common.c @@ -1821,7 +1821,7 @@ i40e_status i40e_aq_get_link_info(struct i40e_hw *hw, hw_link_info->fec_info = resp->config & (I40E_AQ_CONFIG_FEC_KR_ENA | I40E_AQ_CONFIG_FEC_RS_ENA); hw_link_info->ext_info = resp->ext_info; - hw_link_info->loopback = resp->loopback; + hw_link_info->loopback = resp->loopback & I40E_AQ_LOOPBACK_MASK; hw_link_info->max_frame_size = le16_to_cpu(resp->max_frame_size); hw_link_info->pacing = resp->config & I40E_AQ_CONFIG_PACING_MASK; @@ -1852,6 +1852,15 @@ i40e_status i40e_aq_get_link_info(struct i40e_hw *hw, hw->aq.fw_min_ver < 40)) && hw_link_info->phy_type == 0xE) hw_link_info->phy_type = I40E_PHY_TYPE_10GBASE_SFPP_CU; + if (hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR && + hw->aq.api_min_ver >= 7) { + __le32 tmp; + + memcpy(&tmp, resp->link_type, sizeof(tmp)); + hw->phy.phy_types = le32_to_cpu(tmp); + hw->phy.phy_types |= ((u64)resp->link_type_ext << 32); + } + /* save link status information */ if (link) *link = *hw_link_info; diff --git a/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h index f9f48d1900b0..709d114fc305 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h +++ b/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h @@ -1730,6 +1730,8 @@ enum i40e_aq_phy_type { I40E_PHY_TYPE_10GBASE_CR1_CU = 0xB, I40E_PHY_TYPE_10GBASE_AOC = 0xC, I40E_PHY_TYPE_40GBASE_AOC = 0xD, + I40E_PHY_TYPE_UNRECOGNIZED = 0xE, + I40E_PHY_TYPE_UNSUPPORTED = 0xF, I40E_PHY_TYPE_100BASE_TX = 0x11, I40E_PHY_TYPE_1000BASE_T = 0x12, I40E_PHY_TYPE_10GBASE_T = 0x13, @@ -1748,6 +1750,8 @@ enum i40e_aq_phy_type { I40E_PHY_TYPE_25GBASE_CR = 0x20, I40E_PHY_TYPE_25GBASE_SR = 0x21, I40E_PHY_TYPE_25GBASE_LR = 0x22, + I40E_PHY_TYPE_EMPTY = 0xFE, + I40E_PHY_TYPE_DEFAULT = 0xFF, I40E_PHY_TYPE_MAX }; @@ -1938,19 +1942,31 @@ struct i40e_aqc_get_link_status { #define I40E_AQ_25G_SERDES_UCODE_ERR 0X04 #define I40E_AQ_25G_NIMB_UCODE_ERR 0X05 u8 loopback; /* use defines from i40e_aqc_set_lb_mode */ +/* Since firmware API 1.7 loopback field keeps power class info as well */ +#define I40E_AQ_LOOPBACK_MASK 0x07 +#define I40E_AQ_PWR_CLASS_SHIFT_LB 6 +#define I40E_AQ_PWR_CLASS_MASK_LB (0x03 << I40E_AQ_PWR_CLASS_SHIFT_LB) __le16 max_frame_size; u8 config; #define I40E_AQ_CONFIG_FEC_KR_ENA 0x01 #define I40E_AQ_CONFIG_FEC_RS_ENA 0x02 #define I40E_AQ_CONFIG_CRC_ENA 0x04 #define I40E_AQ_CONFIG_PACING_MASK 0x78 - u8 power_desc; + union { + struct { + u8 power_desc; #define I40E_AQ_LINK_POWER_CLASS_1 0x00 #define I40E_AQ_LINK_POWER_CLASS_2 0x01 #define I40E_AQ_LINK_POWER_CLASS_3 0x02 #define I40E_AQ_LINK_POWER_CLASS_4 0x03 #define I40E_AQ_PWR_CLASS_MASK 0x03 - u8 reserved[4]; + u8 reserved[4]; + }; + struct { + u8 link_type[4]; + u8 link_type_ext; + }; + }; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_get_link_status); -- cgit v1.2.3 From 9c0e5caf6398d6b892dc5046c421890e32ab5fa3 Mon Sep 17 00:00:00 2001 From: Filip Sadowski Date: Tue, 22 Aug 2017 06:57:44 -0400 Subject: i40e: Add support for 'ethtool -m' This patch adds support for 'ethtool -m' command which displays information about (Q)SFP+ module plugged into NIC's cage. Signed-off-by: Filip Sadowski Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h | 18 +++ drivers/net/ethernet/intel/i40e/i40e_common.c | 69 +++++++++ drivers/net/ethernet/intel/i40e/i40e_ethtool.c | 154 +++++++++++++++++++++ drivers/net/ethernet/intel/i40e/i40e_prototype.h | 9 ++ drivers/net/ethernet/intel/i40e/i40e_type.h | 13 ++ .../net/ethernet/intel/i40evf/i40e_adminq_cmd.h | 18 +++ drivers/net/ethernet/intel/i40evf/i40e_common.c | 69 +++++++++ drivers/net/ethernet/intel/i40evf/i40e_prototype.h | 9 ++ drivers/net/ethernet/intel/i40evf/i40e_type.h | 12 ++ 9 files changed, 371 insertions(+) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h index 5d0291c1337e..ed7bbe14bc6e 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h +++ b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h @@ -244,6 +244,8 @@ enum i40e_admin_queue_opc { i40e_aqc_opc_set_phy_debug = 0x0622, i40e_aqc_opc_upload_ext_phy_fm = 0x0625, i40e_aqc_opc_run_phy_activity = 0x0626, + i40e_aqc_opc_set_phy_register = 0x0628, + i40e_aqc_opc_get_phy_register = 0x0629, /* NVM commands */ i40e_aqc_opc_nvm_read = 0x0701, @@ -2053,6 +2055,22 @@ struct i40e_aqc_run_phy_activity { I40E_CHECK_CMD_LENGTH(i40e_aqc_run_phy_activity); +/* Set PHY Register command (0x0628) */ +/* Get PHY Register command (0x0629) */ +struct i40e_aqc_phy_register_access { + u8 phy_interface; +#define I40E_AQ_PHY_REG_ACCESS_INTERNAL 0 +#define I40E_AQ_PHY_REG_ACCESS_EXTERNAL 1 +#define I40E_AQ_PHY_REG_ACCESS_EXTERNAL_MODULE 2 + u8 dev_address; + u8 reserved1[2]; + __le32 reg_address; + __le32 reg_value; + u8 reserved2[4]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_phy_register_access); + /* NVM Read command (indirect 0x0701) * NVM Erase commands (direct 0x0702) * NVM Update commands (indirect 0x0703) diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c index 64c15f4c9d2b..fada03799850 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_common.c +++ b/drivers/net/ethernet/intel/i40e/i40e_common.c @@ -5062,6 +5062,75 @@ do_retry: wr32(hw, reg_addr, reg_val); } +/** + * i40e_aq_set_phy_register + * @hw: pointer to the hw struct + * @phy_select: select which phy should be accessed + * @dev_addr: PHY device address + * @reg_addr: PHY register address + * @reg_val: new register value + * @cmd_details: pointer to command details structure or NULL + * + * Write the external PHY register. + **/ +i40e_status i40e_aq_set_phy_register(struct i40e_hw *hw, + u8 phy_select, u8 dev_addr, + u32 reg_addr, u32 reg_val, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_phy_register_access *cmd = + (struct i40e_aqc_phy_register_access *)&desc.params.raw; + i40e_status status; + + i40e_fill_default_direct_cmd_desc(&desc, + i40e_aqc_opc_set_phy_register); + + cmd->phy_interface = phy_select; + cmd->dev_address = dev_addr; + cmd->reg_address = cpu_to_le32(reg_addr); + cmd->reg_value = cpu_to_le32(reg_val); + + status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); + + return status; +} + +/** + * i40e_aq_get_phy_register + * @hw: pointer to the hw struct + * @phy_select: select which phy should be accessed + * @dev_addr: PHY device address + * @reg_addr: PHY register address + * @reg_val: read register value + * @cmd_details: pointer to command details structure or NULL + * + * Read the external PHY register. + **/ +i40e_status i40e_aq_get_phy_register(struct i40e_hw *hw, + u8 phy_select, u8 dev_addr, + u32 reg_addr, u32 *reg_val, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_phy_register_access *cmd = + (struct i40e_aqc_phy_register_access *)&desc.params.raw; + i40e_status status; + + i40e_fill_default_direct_cmd_desc(&desc, + i40e_aqc_opc_get_phy_register); + + cmd->phy_interface = phy_select; + cmd->dev_address = dev_addr; + cmd->reg_address = cpu_to_le32(reg_addr); + + status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); + if (!status) + *reg_val = le32_to_cpu(cmd->reg_value); + + return status; +} + /** * i40e_aq_write_ppp - Write pipeline personalization profile (ppp) * @hw: pointer to the hw struct diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c index 05e89864f781..1136d02e2e95 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c @@ -4196,6 +4196,158 @@ flags_complete: return 0; } +/** + * i40e_get_module_info - get (Q)SFP+ module type info + * @netdev: network interface device structure + * @modinfo: module EEPROM size and layout information structure + **/ +static int i40e_get_module_info(struct net_device *netdev, + struct ethtool_modinfo *modinfo) +{ + struct i40e_netdev_priv *np = netdev_priv(netdev); + struct i40e_vsi *vsi = np->vsi; + struct i40e_pf *pf = vsi->back; + struct i40e_hw *hw = &pf->hw; + u32 sff8472_comp = 0; + u32 sff8472_swap = 0; + u32 sff8636_rev = 0; + i40e_status status; + u32 type = 0; + + /* Check if firmware supports reading module EEPROM. */ + if (!(hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE)) { + netdev_err(vsi->netdev, "Module EEPROM memory read not supported. Please update the NVM image.\n"); + return -EINVAL; + } + + status = i40e_update_link_info(hw); + if (status) + return -EIO; + + if (hw->phy.link_info.phy_type == I40E_PHY_TYPE_EMPTY) { + netdev_err(vsi->netdev, "Cannot read module EEPROM memory. No module connected.\n"); + return -EINVAL; + } + + type = hw->phy.link_info.module_type[0]; + + switch (type) { + case I40E_MODULE_TYPE_SFP: + status = i40e_aq_get_phy_register(hw, + I40E_AQ_PHY_REG_ACCESS_EXTERNAL_MODULE, + I40E_I2C_EEPROM_DEV_ADDR, + I40E_MODULE_SFF_8472_COMP, + &sff8472_comp, NULL); + if (status) + return -EIO; + + status = i40e_aq_get_phy_register(hw, + I40E_AQ_PHY_REG_ACCESS_EXTERNAL_MODULE, + I40E_I2C_EEPROM_DEV_ADDR, + I40E_MODULE_SFF_8472_SWAP, + &sff8472_swap, NULL); + if (status) + return -EIO; + + /* Check if the module requires address swap to access + * the other EEPROM memory page. + */ + if (sff8472_swap & I40E_MODULE_SFF_ADDR_MODE) { + netdev_warn(vsi->netdev, "Module address swap to access page 0xA2 is not supported.\n"); + modinfo->type = ETH_MODULE_SFF_8079; + modinfo->eeprom_len = ETH_MODULE_SFF_8079_LEN; + } else if (sff8472_comp == 0x00) { + /* Module is not SFF-8472 compliant */ + modinfo->type = ETH_MODULE_SFF_8079; + modinfo->eeprom_len = ETH_MODULE_SFF_8079_LEN; + } else { + modinfo->type = ETH_MODULE_SFF_8472; + modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN; + } + break; + case I40E_MODULE_TYPE_QSFP_PLUS: + /* Read from memory page 0. */ + status = i40e_aq_get_phy_register(hw, + I40E_AQ_PHY_REG_ACCESS_EXTERNAL_MODULE, + 0, + I40E_MODULE_REVISION_ADDR, + &sff8636_rev, NULL); + if (status) + return -EIO; + /* Determine revision compliance byte */ + if (sff8636_rev > 0x02) { + /* Module is SFF-8636 compliant */ + modinfo->type = ETH_MODULE_SFF_8636; + modinfo->eeprom_len = I40E_MODULE_QSFP_MAX_LEN; + } else { + modinfo->type = ETH_MODULE_SFF_8436; + modinfo->eeprom_len = I40E_MODULE_QSFP_MAX_LEN; + } + break; + case I40E_MODULE_TYPE_QSFP28: + modinfo->type = ETH_MODULE_SFF_8636; + modinfo->eeprom_len = I40E_MODULE_QSFP_MAX_LEN; + break; + default: + netdev_err(vsi->netdev, "Module type unrecognized\n"); + return -EINVAL; + } + return 0; +} + +/** + * i40e_get_module_eeprom - fills buffer with (Q)SFP+ module memory contents + * @netdev: network interface device structure + * @ee: EEPROM dump request structure + * @data: buffer to be filled with EEPROM contents + **/ +static int i40e_get_module_eeprom(struct net_device *netdev, + struct ethtool_eeprom *ee, + u8 *data) +{ + struct i40e_netdev_priv *np = netdev_priv(netdev); + struct i40e_vsi *vsi = np->vsi; + struct i40e_pf *pf = vsi->back; + struct i40e_hw *hw = &pf->hw; + bool is_sfp = false; + i40e_status status; + u32 value = 0; + int i; + + if (!ee || !ee->len || !data) + return -EINVAL; + + if (hw->phy.link_info.module_type[0] == I40E_MODULE_TYPE_SFP) + is_sfp = true; + + for (i = 0; i < ee->len; i++) { + u32 offset = i + ee->offset; + u32 addr = is_sfp ? I40E_I2C_EEPROM_DEV_ADDR : 0; + + /* Check if we need to access the other memory page */ + if (is_sfp) { + if (offset >= ETH_MODULE_SFF_8079_LEN) { + offset -= ETH_MODULE_SFF_8079_LEN; + addr = I40E_I2C_EEPROM_DEV_ADDR2; + } + } else { + while (offset >= ETH_MODULE_SFF_8436_LEN) { + /* Compute memory page number and offset. */ + offset -= ETH_MODULE_SFF_8436_LEN / 2; + addr++; + } + } + + status = i40e_aq_get_phy_register(hw, + I40E_AQ_PHY_REG_ACCESS_EXTERNAL_MODULE, + addr, offset, &value, NULL); + if (status) + return -EIO; + data[i] = value; + } + return 0; +} + static const struct ethtool_ops i40e_ethtool_ops = { .get_drvinfo = i40e_get_drvinfo, .get_regs_len = i40e_get_regs_len, @@ -4228,6 +4380,8 @@ static const struct ethtool_ops i40e_ethtool_ops = { .set_rxfh = i40e_set_rxfh, .get_channels = i40e_get_channels, .set_channels = i40e_set_channels, + .get_module_info = i40e_get_module_info, + .get_module_eeprom = i40e_get_module_eeprom, .get_ts_info = i40e_get_ts_info, .get_priv_flags = i40e_get_priv_flags, .set_priv_flags = i40e_set_priv_flags, diff --git a/drivers/net/ethernet/intel/i40e/i40e_prototype.h b/drivers/net/ethernet/intel/i40e/i40e_prototype.h index a39b13197891..01502561035c 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_prototype.h +++ b/drivers/net/ethernet/intel/i40e/i40e_prototype.h @@ -360,6 +360,15 @@ i40e_status i40e_aq_rx_ctl_write_register(struct i40e_hw *hw, u32 reg_addr, u32 reg_val, struct i40e_asq_cmd_details *cmd_details); void i40e_write_rx_ctl(struct i40e_hw *hw, u32 reg_addr, u32 reg_val); +i40e_status i40e_aq_set_phy_register(struct i40e_hw *hw, + u8 phy_select, u8 dev_addr, + u32 reg_addr, u32 reg_val, + struct i40e_asq_cmd_details *cmd_details); +i40e_status i40e_aq_get_phy_register(struct i40e_hw *hw, + u8 phy_select, u8 dev_addr, + u32 reg_addr, u32 *reg_val, + struct i40e_asq_cmd_details *cmd_details); + i40e_status i40e_read_phy_register_clause22(struct i40e_hw *hw, u16 reg, u8 phy_addr, u16 *value); i40e_status i40e_write_phy_register_clause22(struct i40e_hw *hw, diff --git a/drivers/net/ethernet/intel/i40e/i40e_type.h b/drivers/net/ethernet/intel/i40e/i40e_type.h index fd4bbdd88b57..8b0b9f826b7f 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_type.h +++ b/drivers/net/ethernet/intel/i40e/i40e_type.h @@ -428,6 +428,18 @@ struct i40e_nvm_access { u8 data[1]; }; +/* (Q)SFP module access definitions */ +#define I40E_I2C_EEPROM_DEV_ADDR 0xA0 +#define I40E_I2C_EEPROM_DEV_ADDR2 0xA2 +#define I40E_MODULE_TYPE_ADDR 0x00 +#define I40E_MODULE_REVISION_ADDR 0x01 +#define I40E_MODULE_SFF_8472_COMP 0x5E +#define I40E_MODULE_SFF_8472_SWAP 0x5C +#define I40E_MODULE_SFF_ADDR_MODE 0x04 +#define I40E_MODULE_TYPE_QSFP_PLUS 0x0D +#define I40E_MODULE_TYPE_QSFP28 0x11 +#define I40E_MODULE_QSFP_MAX_LEN 640 + /* PCI bus types */ enum i40e_bus_type { i40e_bus_type_unknown = 0, @@ -598,6 +610,7 @@ struct i40e_hw { struct i40e_dcbx_config desired_dcbx_config; /* CEE Desired Cfg */ #define I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE BIT_ULL(0) +#define I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE BIT_ULL(2) u64 flags; /* debug mask */ diff --git a/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h index 709d114fc305..eee7ece42b39 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h +++ b/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h @@ -244,6 +244,8 @@ enum i40e_admin_queue_opc { i40e_aqc_opc_set_phy_debug = 0x0622, i40e_aqc_opc_upload_ext_phy_fm = 0x0625, i40e_aqc_opc_run_phy_activity = 0x0626, + i40e_aqc_opc_set_phy_register = 0x0628, + i40e_aqc_opc_get_phy_register = 0x0629, /* NVM commands */ i40e_aqc_opc_nvm_read = 0x0701, @@ -2046,6 +2048,22 @@ struct i40e_aqc_run_phy_activity { I40E_CHECK_CMD_LENGTH(i40e_aqc_run_phy_activity); +/* Set PHY Register command (0x0628) */ +/* Get PHY Register command (0x0629) */ +struct i40e_aqc_phy_register_access { + u8 phy_interface; +#define I40E_AQ_PHY_REG_ACCESS_INTERNAL 0 +#define I40E_AQ_PHY_REG_ACCESS_EXTERNAL 1 +#define I40E_AQ_PHY_REG_ACCESS_EXTERNAL_MODULE 2 + u8 dev_address; + u8 reserved1[2]; + __le32 reg_address; + __le32 reg_value; + u8 reserved2[4]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_phy_register_access); + /* NVM Read command (indirect 0x0701) * NVM Erase commands (direct 0x0702) * NVM Update commands (indirect 0x0703) diff --git a/drivers/net/ethernet/intel/i40evf/i40e_common.c b/drivers/net/ethernet/intel/i40evf/i40e_common.c index 8d3a2bfe186a..7d70bf69b249 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_common.c +++ b/drivers/net/ethernet/intel/i40evf/i40e_common.c @@ -1041,6 +1041,75 @@ do_retry: wr32(hw, reg_addr, reg_val); } +/** + * i40evf_aq_set_phy_register + * @hw: pointer to the hw struct + * @phy_select: select which phy should be accessed + * @dev_addr: PHY device address + * @reg_addr: PHY register address + * @reg_val: new register value + * @cmd_details: pointer to command details structure or NULL + * + * Reset the external PHY. + **/ +i40e_status i40evf_aq_set_phy_register(struct i40e_hw *hw, + u8 phy_select, u8 dev_addr, + u32 reg_addr, u32 reg_val, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_phy_register_access *cmd = + (struct i40e_aqc_phy_register_access *)&desc.params.raw; + i40e_status status; + + i40evf_fill_default_direct_cmd_desc(&desc, + i40e_aqc_opc_set_phy_register); + + cmd->phy_interface = phy_select; + cmd->dev_address = dev_addr; + cmd->reg_address = cpu_to_le32(reg_addr); + cmd->reg_value = cpu_to_le32(reg_val); + + status = i40evf_asq_send_command(hw, &desc, NULL, 0, cmd_details); + + return status; +} + +/** + * i40evf_aq_get_phy_register + * @hw: pointer to the hw struct + * @phy_select: select which phy should be accessed + * @dev_addr: PHY device address + * @reg_addr: PHY register address + * @reg_val: read register value + * @cmd_details: pointer to command details structure or NULL + * + * Reset the external PHY. + **/ +i40e_status i40evf_aq_get_phy_register(struct i40e_hw *hw, + u8 phy_select, u8 dev_addr, + u32 reg_addr, u32 *reg_val, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_phy_register_access *cmd = + (struct i40e_aqc_phy_register_access *)&desc.params.raw; + i40e_status status; + + i40evf_fill_default_direct_cmd_desc(&desc, + i40e_aqc_opc_get_phy_register); + + cmd->phy_interface = phy_select; + cmd->dev_address = dev_addr; + cmd->reg_address = cpu_to_le32(reg_addr); + + status = i40evf_asq_send_command(hw, &desc, NULL, 0, cmd_details); + if (!status) + *reg_val = le32_to_cpu(cmd->reg_value); + + return status; +} + /** * i40e_aq_send_msg_to_pf * @hw: pointer to the hardware structure diff --git a/drivers/net/ethernet/intel/i40evf/i40e_prototype.h b/drivers/net/ethernet/intel/i40evf/i40e_prototype.h index c9836bba487d..b624b5994075 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_prototype.h +++ b/drivers/net/ethernet/intel/i40evf/i40e_prototype.h @@ -111,6 +111,15 @@ i40e_status i40evf_aq_rx_ctl_write_register(struct i40e_hw *hw, u32 reg_addr, u32 reg_val, struct i40e_asq_cmd_details *cmd_details); void i40evf_write_rx_ctl(struct i40e_hw *hw, u32 reg_addr, u32 reg_val); +i40e_status i40e_aq_set_phy_register(struct i40e_hw *hw, + u8 phy_select, u8 dev_addr, + u32 reg_addr, u32 reg_val, + struct i40e_asq_cmd_details *cmd_details); +i40e_status i40e_aq_get_phy_register(struct i40e_hw *hw, + u8 phy_select, u8 dev_addr, + u32 reg_addr, u32 *reg_val, + struct i40e_asq_cmd_details *cmd_details); + i40e_status i40e_read_phy_register(struct i40e_hw *hw, u8 page, u16 reg, u8 phy_addr, u16 *value); i40e_status i40e_write_phy_register(struct i40e_hw *hw, u8 page, diff --git a/drivers/net/ethernet/intel/i40evf/i40e_type.h b/drivers/net/ethernet/intel/i40evf/i40e_type.h index 2ea919d9cdcf..b53584e3d580 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_type.h +++ b/drivers/net/ethernet/intel/i40evf/i40e_type.h @@ -401,6 +401,18 @@ struct i40e_nvm_access { u8 data[1]; }; +/* (Q)SFP module access definitions */ +#define I40E_I2C_EEPROM_DEV_ADDR 0xA0 +#define I40E_I2C_EEPROM_DEV_ADDR2 0xA2 +#define I40E_MODULE_TYPE_ADDR 0x00 +#define I40E_MODULE_REVISION_ADDR 0x01 +#define I40E_MODULE_SFF_8472_COMP 0x5E +#define I40E_MODULE_SFF_8472_SWAP 0x5C +#define I40E_MODULE_SFF_ADDR_MODE 0x04 +#define I40E_MODULE_TYPE_QSFP_PLUS 0x0D +#define I40E_MODULE_TYPE_QSFP28 0x11 +#define I40E_MODULE_QSFP_MAX_LEN 640 + /* PCI bus types */ enum i40e_bus_type { i40e_bus_type_unknown = 0, -- cgit v1.2.3 From 00f6c2f5e20bbdb638e58c50c6e6b1d8b796d6f6 Mon Sep 17 00:00:00 2001 From: Mariusz Stachura Date: Tue, 22 Aug 2017 06:57:45 -0400 Subject: i40e: use admin queue for setting LEDs behavior Instead of accessing register directly, use newly added AQC in order to blink LEDs. Introduce and utilize a new flag to prevent excessive API version checking. Signed-off-by: Mariusz Stachura Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_adminq.c | 6 ++ drivers/net/ethernet/intel/i40e/i40e_common.c | 115 ++++++++++++++++++++------ drivers/net/ethernet/intel/i40evf/i40e_type.h | 2 + 3 files changed, 99 insertions(+), 24 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.c b/drivers/net/ethernet/intel/i40e/i40e_adminq.c index ba04988e0598..08f63226105a 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_adminq.c +++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.c @@ -607,6 +607,12 @@ i40e_status i40e_init_adminq(struct i40e_hw *hw) &oem_lo); hw->nvm.oem_ver = ((u32)oem_hi << 16) | oem_lo; + if (hw->mac.type == I40E_MAC_XL710 && + hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR && + hw->aq.api_min_ver >= I40E_MINOR_VER_GET_LINK_INFO_XL710) { + hw->flags |= I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE; + } + if (hw->aq.api_maj_ver > I40E_FW_API_VERSION_MAJOR) { ret_code = I40E_ERR_FIRMWARE_API_VERSION; goto init_adminq_free_arq; diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c index fada03799850..a4838779de5d 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_common.c +++ b/drivers/net/ethernet/intel/i40e/i40e_common.c @@ -4836,6 +4836,74 @@ phy_blinking_end: return status; } +/** + * i40e_led_get_reg - read LED register + * @hw: pointer to the HW structure + * @led_addr: LED register address + * @reg_val: read register value + **/ +static enum i40e_status_code i40e_led_get_reg(struct i40e_hw *hw, u16 led_addr, + u32 *reg_val) +{ + enum i40e_status_code status; + u8 phy_addr = 0; + u8 port_num; + u32 i; + + *reg_val = 0; + if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE) { + status = + i40e_aq_get_phy_register(hw, + I40E_AQ_PHY_REG_ACCESS_EXTERNAL, + I40E_PHY_COM_REG_PAGE, + I40E_PHY_LED_PROV_REG_1, + reg_val, NULL); + } else { + i = rd32(hw, I40E_PFGEN_PORTNUM); + port_num = (u8)(i & I40E_PFGEN_PORTNUM_PORT_NUM_MASK); + phy_addr = i40e_get_phy_address(hw, port_num); + status = i40e_read_phy_register_clause45(hw, + I40E_PHY_COM_REG_PAGE, + led_addr, phy_addr, + (u16 *)reg_val); + } + return status; +} + +/** + * i40e_led_set_reg - write LED register + * @hw: pointer to the HW structure + * @led_addr: LED register address + * @reg_val: register value to write + **/ +static enum i40e_status_code i40e_led_set_reg(struct i40e_hw *hw, u16 led_addr, + u32 reg_val) +{ + enum i40e_status_code status; + u8 phy_addr = 0; + u8 port_num; + u32 i; + + if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE) { + status = + i40e_aq_set_phy_register(hw, + I40E_AQ_PHY_REG_ACCESS_EXTERNAL, + I40E_PHY_COM_REG_PAGE, + I40E_PHY_LED_PROV_REG_1, + reg_val, NULL); + } else { + i = rd32(hw, I40E_PFGEN_PORTNUM); + port_num = (u8)(i & I40E_PFGEN_PORTNUM_PORT_NUM_MASK); + phy_addr = i40e_get_phy_address(hw, port_num); + status = i40e_write_phy_register_clause45(hw, + I40E_PHY_COM_REG_PAGE, + led_addr, phy_addr, + (u16)reg_val); + } + + return status; +} + /** * i40e_led_get_phy - return current on/off mode * @hw: pointer to the hw struct @@ -4853,7 +4921,19 @@ i40e_status i40e_led_get_phy(struct i40e_hw *hw, u16 *led_addr, u16 temp_addr; u8 port_num; u32 i; - + u32 reg_val_aq; + + if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE) { + status = + i40e_aq_get_phy_register(hw, + I40E_AQ_PHY_REG_ACCESS_EXTERNAL, + I40E_PHY_COM_REG_PAGE, + I40E_PHY_LED_PROV_REG_1, + ®_val_aq, NULL); + if (status == I40E_SUCCESS) + *val = (u16)reg_val_aq; + return status; + } temp_addr = I40E_PHY_LED_PROV_REG_1; i = rd32(hw, I40E_PFGEN_PORTNUM); port_num = (u8)(i & I40E_PFGEN_PORTNUM_PORT_NUM_MASK); @@ -4888,51 +4968,38 @@ i40e_status i40e_led_set_phy(struct i40e_hw *hw, bool on, u16 led_addr, u32 mode) { i40e_status status = 0; - u16 led_ctl = 0; - u16 led_reg = 0; - u8 phy_addr = 0; - u8 port_num; - u32 i; + u32 led_ctl = 0; + u32 led_reg = 0; - i = rd32(hw, I40E_PFGEN_PORTNUM); - port_num = (u8)(i & I40E_PFGEN_PORTNUM_PORT_NUM_MASK); - phy_addr = i40e_get_phy_address(hw, port_num); - status = i40e_read_phy_register_clause45(hw, I40E_PHY_COM_REG_PAGE, - led_addr, phy_addr, &led_reg); + status = i40e_led_get_reg(hw, led_addr, &led_reg); if (status) return status; led_ctl = led_reg; if (led_reg & I40E_PHY_LED_LINK_MODE_MASK) { led_reg = 0; - status = i40e_write_phy_register_clause45(hw, - I40E_PHY_COM_REG_PAGE, - led_addr, phy_addr, - led_reg); + status = i40e_led_set_reg(hw, led_addr, led_reg); if (status) return status; } - status = i40e_read_phy_register_clause45(hw, I40E_PHY_COM_REG_PAGE, - led_addr, phy_addr, &led_reg); + status = i40e_led_get_reg(hw, led_addr, &led_reg); if (status) goto restore_config; if (on) led_reg = I40E_PHY_LED_MANUAL_ON; else led_reg = 0; - status = i40e_write_phy_register_clause45(hw, I40E_PHY_COM_REG_PAGE, - led_addr, phy_addr, led_reg); + + status = i40e_led_set_reg(hw, led_addr, led_reg); if (status) goto restore_config; if (mode & I40E_PHY_LED_MODE_ORIG) { led_ctl = (mode & I40E_PHY_LED_MODE_MASK); - status = i40e_write_phy_register_clause45(hw, - I40E_PHY_COM_REG_PAGE, - led_addr, phy_addr, led_ctl); + status = i40e_led_set_reg(hw, led_addr, led_ctl); } return status; + restore_config: - status = i40e_write_phy_register_clause45(hw, I40E_PHY_COM_REG_PAGE, - led_addr, phy_addr, led_ctl); + status = i40e_led_set_reg(hw, led_addr, led_ctl); return status; } diff --git a/drivers/net/ethernet/intel/i40evf/i40e_type.h b/drivers/net/ethernet/intel/i40evf/i40e_type.h index b53584e3d580..48eacf5e73e4 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_type.h +++ b/drivers/net/ethernet/intel/i40evf/i40e_type.h @@ -568,6 +568,8 @@ struct i40e_hw { /* LLDP/DCBX Status */ u16 dcbx_status; +#define I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE BIT_ULL(2) + /* DCBX info */ struct i40e_dcbx_config local_dcbx_config; /* Oper/Local Cfg */ struct i40e_dcbx_config remote_dcbx_config; /* Peer Cfg */ -- cgit v1.2.3 From ba4e003d29c1d32776f156695fb00adf7df86ee2 Mon Sep 17 00:00:00 2001 From: Jacob Keller Date: Tue, 22 Aug 2017 06:57:46 -0400 Subject: i40e: don't hold spinlock while resetting VF When we refactored handling of the PVID in commit 9af52f60b2d9 ("i40e: use (add|rm)_vlan_all_mac helper functions when changing PVID") we introduced a scheduling while atomic regression. This occurred because we now held the spinlock across a call to i40e_reset_vf(), which results in a usleep_range() call that triggers a scheduling while atomic bug. This was rare as it only occurred if the user configured a VLAN on a VF and also attempted to reconfigure the VF from the host system with a port VLAN. We do need to hold the lock while calling i40e_is_vsi_in_vlan(), but we should not be holding it while we reset the VF. We'll fix this by introducing a separate helper function i40e_vsi_has_vlans which checks whether we have a PVID and whether the VSI has configured VLANs. This helper function will manage its own need for the mac_filter_hash_lock. Then, we can move the acquiring of the spinlock until after we reset the VF, which ensures that we do not sleep while holding the lock. Using a separate function like this makes the code more clear and is easier to read than attempting to release and re-acquire the spinlock when we reset the VF. Signed-off-by: Jacob Keller Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c | 36 +++++++++++++++++++--- 1 file changed, 32 insertions(+), 4 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c index 9e3667fc7f6a..53ead127b293 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c @@ -2925,6 +2925,34 @@ error_param: return ret; } +/** + * i40e_vsi_has_vlans - True if VSI has configured VLANs + * @vsi: pointer to the vsi + * + * Check if a VSI has configured any VLANs. False if we have a port VLAN or if + * we have no configured VLANs. Do not call while holding the + * mac_filter_hash_lock. + */ +static bool i40e_vsi_has_vlans(struct i40e_vsi *vsi) +{ + bool have_vlans; + + /* If we have a port VLAN, then the VSI cannot have any VLANs + * configured, as all MAC/VLAN filters will be assigned to the PVID. + */ + if (vsi->info.pvid) + return false; + + /* Since we don't have a PVID, we know that if the device is in VLAN + * mode it must be because of a VLAN filter configured on this VSI. + */ + spin_lock_bh(&vsi->mac_filter_hash_lock); + have_vlans = i40e_is_vsi_in_vlan(vsi); + spin_unlock_bh(&vsi->mac_filter_hash_lock); + + return have_vlans; +} + /** * i40e_ndo_set_vf_port_vlan * @netdev: network interface device structure @@ -2977,10 +3005,7 @@ int i40e_ndo_set_vf_port_vlan(struct net_device *netdev, int vf_id, /* duplicate request, so just return success */ goto error_pvid; - /* Locked once because multiple functions below iterate list */ - spin_lock_bh(&vsi->mac_filter_hash_lock); - - if (le16_to_cpu(vsi->info.pvid) == 0 && i40e_is_vsi_in_vlan(vsi)) { + if (i40e_vsi_has_vlans(vsi)) { dev_err(&pf->pdev->dev, "VF %d has already configured VLAN filters and the administrator is requesting a port VLAN override.\nPlease unload and reload the VF driver for this change to take effect.\n", vf_id); @@ -2993,6 +3018,9 @@ int i40e_ndo_set_vf_port_vlan(struct net_device *netdev, int vf_id, vsi = pf->vsi[vf->lan_vsi_idx]; } + /* Locked once because multiple functions below iterate list */ + spin_lock_bh(&vsi->mac_filter_hash_lock); + /* Check for condition where there was already a port VLAN ID * filter set and now it is being deleted by setting it to zero. * Additionally check for the condition where there was a port -- cgit v1.2.3 From eeeddbb80640ef63466a54bc118f66c81487bc42 Mon Sep 17 00:00:00 2001 From: Jacob Keller Date: Tue, 22 Aug 2017 06:57:47 -0400 Subject: i40e: drop i40e_pf *pf from i40e_vc_disable_vf() It's never used, and the vf structure could get back to the PF if necessary. Lets just drop the extra unneeded parameter. Signed-off-by: Jacob Keller Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c index 53ead127b293..70a79864177a 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c @@ -154,12 +154,11 @@ void i40e_vc_notify_vf_reset(struct i40e_vf *vf) /** * i40e_vc_disable_vf - * @pf: pointer to the PF info * @vf: pointer to the VF info * * Disable the VF through a SW reset **/ -static inline void i40e_vc_disable_vf(struct i40e_pf *pf, struct i40e_vf *vf) +static inline void i40e_vc_disable_vf(struct i40e_vf *vf) { i40e_vc_notify_vf_reset(vf); i40e_reset_vf(vf, false); @@ -2918,7 +2917,7 @@ int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac) } /* Force the VF driver stop so it has to reload with new MAC address */ - i40e_vc_disable_vf(pf, vf); + i40e_vc_disable_vf(vf); dev_info(&pf->pdev->dev, "Reload the VF driver to make this change effective.\n"); error_param: @@ -3013,7 +3012,7 @@ int i40e_ndo_set_vf_port_vlan(struct net_device *netdev, int vf_id, * the right thing by reconfiguring his network correctly * and then reloading the VF driver. */ - i40e_vc_disable_vf(pf, vf); + i40e_vc_disable_vf(vf); /* During reset the VF got a new VSI, so refresh the pointer. */ vsi = pf->vsi[vf->lan_vsi_idx]; } -- cgit v1.2.3 From f18d20218a14d11d8fd6ed32e66ad199c8c93280 Mon Sep 17 00:00:00 2001 From: Jacob Keller Date: Tue, 22 Aug 2017 06:57:48 -0400 Subject: i40e: make use of i40e_vc_disable_vf Replace i40e_vc_notify_vf_reset and i40e_reset_vf with a call to i40e_vc_disable_vf which does this exact thing. This matches similar code patterns throughout the driver. Signed-off-by: Jacob Keller Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c index 70a79864177a..94ee243f110e 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c @@ -3388,8 +3388,7 @@ int i40e_ndo_set_vf_trust(struct net_device *netdev, int vf_id, bool setting) goto out; vf->trusted = setting; - i40e_vc_notify_vf_reset(vf); - i40e_reset_vf(vf, false); + i40e_vc_disable_vf(vf); dev_info(&pf->pdev->dev, "VF %u is now %strusted\n", vf_id, setting ? "" : "un"); out: -- cgit v1.2.3 From d43d60e5eb9504aa6f8f390aa0313cc8e3816b82 Mon Sep 17 00:00:00 2001 From: Jacob Keller Date: Tue, 22 Aug 2017 06:57:49 -0400 Subject: i40e: ensure reset occurs when disabling VF It is possible although rare that we may not reset when i40e_vc_disable_vf() is called. This can lead to some weird circumstances with some values not being properly set. Modify i40e_reset_vf() to return a code indicating whether it reset or not. Now, i40e_vc_disable_vf() can wait until a reset actually occurs. If it fails to free up within a reasonable time frame we'll display a warning message. Signed-off-by: Jacob Keller Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c | 42 +++++++++++++++++----- drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h | 4 +-- 2 files changed, 35 insertions(+), 11 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c index 94ee243f110e..7742cf3d38d9 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c @@ -156,12 +156,28 @@ void i40e_vc_notify_vf_reset(struct i40e_vf *vf) * i40e_vc_disable_vf * @vf: pointer to the VF info * - * Disable the VF through a SW reset + * Disable the VF through a SW reset. **/ static inline void i40e_vc_disable_vf(struct i40e_vf *vf) { + int i; + i40e_vc_notify_vf_reset(vf); - i40e_reset_vf(vf, false); + + /* We want to ensure that an actual reset occurs initiated after this + * function was called. However, we do not want to wait forever, so + * we'll give a reasonable time and print a message if we failed to + * ensure a reset. + */ + for (i = 0; i < 20; i++) { + if (i40e_reset_vf(vf, false)) + return; + usleep_range(10000, 20000); + } + + dev_warn(&vf->pf->pdev->dev, + "Failed to initiate reset for VF %d after 200 milliseconds\n", + vf->vf_id); } /** @@ -1051,9 +1067,9 @@ static void i40e_cleanup_reset_vf(struct i40e_vf *vf) * @vf: pointer to the VF structure * @flr: VFLR was issued or not * - * reset the VF + * Returns true if the VF is reset, false otherwise. **/ -void i40e_reset_vf(struct i40e_vf *vf, bool flr) +bool i40e_reset_vf(struct i40e_vf *vf, bool flr) { struct i40e_pf *pf = vf->pf; struct i40e_hw *hw = &pf->hw; @@ -1061,9 +1077,11 @@ void i40e_reset_vf(struct i40e_vf *vf, bool flr) u32 reg; int i; - /* If VFs have been disabled, there is no need to reset */ + /* If the VFs have been disabled, this means something else is + * resetting the VF, so we shouldn't continue. + */ if (test_and_set_bit(__I40E_VF_DISABLE, pf->state)) - return; + return false; i40e_trigger_vf_reset(vf, flr); @@ -1100,6 +1118,8 @@ void i40e_reset_vf(struct i40e_vf *vf, bool flr) i40e_flush(hw); clear_bit(__I40E_VF_DISABLE, pf->state); + + return true; } /** @@ -1111,8 +1131,10 @@ void i40e_reset_vf(struct i40e_vf *vf, bool flr) * VF, then do all the waiting in one chunk, and finally finish restoring each * VF after the wait. This is useful during PF routines which need to reset * all VFs, as otherwise it must perform these resets in a serialized fashion. + * + * Returns true if any VFs were reset, and false otherwise. **/ -void i40e_reset_all_vfs(struct i40e_pf *pf, bool flr) +bool i40e_reset_all_vfs(struct i40e_pf *pf, bool flr) { struct i40e_hw *hw = &pf->hw; struct i40e_vf *vf; @@ -1121,11 +1143,11 @@ void i40e_reset_all_vfs(struct i40e_pf *pf, bool flr) /* If we don't have any VFs, then there is nothing to reset */ if (!pf->num_alloc_vfs) - return; + return false; /* If VFs have been disabled, there is no need to reset */ if (test_and_set_bit(__I40E_VF_DISABLE, pf->state)) - return; + return false; /* Begin reset on all VFs at once */ for (v = 0; v < pf->num_alloc_vfs; v++) @@ -1200,6 +1222,8 @@ void i40e_reset_all_vfs(struct i40e_pf *pf, bool flr) i40e_flush(hw); clear_bit(__I40E_VF_DISABLE, pf->state); + + return true; } /** diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h index 5111d05d5f2f..5ea42ad094bc 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h @@ -122,8 +122,8 @@ int i40e_alloc_vfs(struct i40e_pf *pf, u16 num_alloc_vfs); int i40e_vc_process_vf_msg(struct i40e_pf *pf, s16 vf_id, u32 v_opcode, u32 v_retval, u8 *msg, u16 msglen); int i40e_vc_process_vflr_event(struct i40e_pf *pf); -void i40e_reset_vf(struct i40e_vf *vf, bool flr); -void i40e_reset_all_vfs(struct i40e_pf *pf, bool flr); +bool i40e_reset_vf(struct i40e_vf *vf, bool flr); +bool i40e_reset_all_vfs(struct i40e_pf *pf, bool flr); void i40e_vc_notify_vf_reset(struct i40e_vf *vf); /* VF configuration related iplink handlers */ -- cgit v1.2.3 From 5b36e8d04b4439c9ceb814bfdfe1284737f9c632 Mon Sep 17 00:00:00 2001 From: Alan Brady Date: Tue, 22 Aug 2017 06:57:50 -0400 Subject: i40evf: Enable VF to request an alternate queue allocation Currently the VF gets a default number of allocated queues from HW on init and it could choose to enable or disable those allocated queues. This makes it such that the VF can request more or less underlying allocated queues from the PF. First the VF negotiates the number of queues it wants that can be supported by the PF and if successful asks for a reset. During reset the PF will reallocate the HW queues for the VF and will then remap the new queues. Signed-off-by: Alan Brady Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40evf/i40evf.h | 4 + drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c | 38 ++++++++- drivers/net/ethernet/intel/i40evf/i40evf_main.c | 94 ++++++++++++++++++++-- .../net/ethernet/intel/i40evf/i40evf_virtchnl.c | 44 +++++++++- 4 files changed, 173 insertions(+), 7 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/intel/i40evf/i40evf.h b/drivers/net/ethernet/intel/i40evf/i40evf.h index 82f69031e5cd..5982362c5643 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf.h +++ b/drivers/net/ethernet/intel/i40evf/i40evf.h @@ -102,6 +102,7 @@ struct i40e_vsi { #define I40E_TX_CTXTDESC(R, i) \ (&(((struct i40e_tx_context_desc *)((R)->desc))[i])) #define MAX_QUEUES 16 +#define I40EVF_MAX_REQ_QUEUES 4 #define I40EVF_HKEY_ARRAY_SIZE ((I40E_VFQF_HKEY_MAX_INDEX + 1) * 4) #define I40EVF_HLUT_ARRAY_SIZE ((I40E_VFQF_HLUT_MAX_INDEX + 1) * 4) @@ -200,6 +201,7 @@ struct i40evf_adapter { struct list_head vlan_filter_list; char misc_vector_name[IFNAMSIZ + 9]; int num_active_queues; + int num_req_queues; /* TX */ struct i40e_ring *tx_rings; @@ -235,6 +237,7 @@ struct i40evf_adapter { #define I40EVF_FLAG_PROMISC_ON BIT(18) #define I40EVF_FLAG_ALLMULTI_ON BIT(19) #define I40EVF_FLAG_LEGACY_RX BIT(20) +#define I40EVF_FLAG_REINIT_ITR_NEEDED BIT(21) /* duplicates for common code */ #define I40E_FLAG_DCB_ENABLED 0 #define I40E_FLAG_RX_CSUM_ENABLED I40EVF_FLAG_RX_CSUM_ENABLED @@ -349,6 +352,7 @@ void i40evf_deconfigure_queues(struct i40evf_adapter *adapter); void i40evf_enable_queues(struct i40evf_adapter *adapter); void i40evf_disable_queues(struct i40evf_adapter *adapter); void i40evf_map_queues(struct i40evf_adapter *adapter); +int i40evf_request_queues(struct i40evf_adapter *adapter, int num); void i40evf_add_ether_addrs(struct i40evf_adapter *adapter); void i40evf_del_ether_addrs(struct i40evf_adapter *adapter); void i40evf_add_vlans(struct i40evf_adapter *adapter); diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c b/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c index 65874d6b3ab9..da006fa3fec1 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c +++ b/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c @@ -669,7 +669,7 @@ static void i40evf_get_channels(struct net_device *netdev, struct i40evf_adapter *adapter = netdev_priv(netdev); /* Report maximum channels */ - ch->max_combined = adapter->num_active_queues; + ch->max_combined = I40EVF_MAX_REQ_QUEUES; ch->max_other = NONQ_VECS; ch->other_count = NONQ_VECS; @@ -677,6 +677,41 @@ static void i40evf_get_channels(struct net_device *netdev, ch->combined_count = adapter->num_active_queues; } +/** + * i40evf_set_channels: set the new channel count + * @netdev: network interface device structure + * @ch: channel information structure + * + * Negotiate a new number of channels with the PF then do a reset. During + * reset we'll realloc queues and fix the RSS table. Returns 0 on success, + * negative on failure. + **/ +static int i40evf_set_channels(struct net_device *netdev, + struct ethtool_channels *ch) +{ + struct i40evf_adapter *adapter = netdev_priv(netdev); + int num_req = ch->combined_count; + + if (num_req != adapter->num_active_queues && + !(adapter->vf_res->vf_cap_flags & + VIRTCHNL_VF_OFFLOAD_REQ_QUEUES)) { + dev_info(&adapter->pdev->dev, "PF is not capable of queue negotiation.\n"); + return -EINVAL; + } + + /* All of these should have already been checked by ethtool before this + * even gets to us, but just to be sure. + */ + if (num_req <= 0 || num_req > I40EVF_MAX_REQ_QUEUES) + return -EINVAL; + + if (ch->rx_count || ch->tx_count || ch->other_count != NONQ_VECS) + return -EINVAL; + + adapter->num_req_queues = num_req; + return i40evf_request_queues(adapter, num_req); +} + /** * i40evf_get_rxfh_key_size - get the RSS hash key size * @netdev: network interface device structure @@ -785,6 +820,7 @@ static const struct ethtool_ops i40evf_ethtool_ops = { .get_rxfh = i40evf_get_rxfh, .set_rxfh = i40evf_set_rxfh, .get_channels = i40evf_get_channels, + .set_channels = i40evf_set_channels, .get_rxfh_key_size = i40evf_get_rxfh_key_size, .get_link_ksettings = i40evf_get_link_ksettings, }; diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_main.c b/drivers/net/ethernet/intel/i40evf/i40evf_main.c index 69ef6c1d5364..8c513ce84345 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf_main.c +++ b/drivers/net/ethernet/intel/i40evf/i40evf_main.c @@ -1189,9 +1189,18 @@ static int i40evf_alloc_queues(struct i40evf_adapter *adapter) { int i, num_active_queues; - num_active_queues = min_t(int, - adapter->vsi_res->num_queue_pairs, - (int)(num_online_cpus())); + /* If we're in reset reallocating queues we don't actually know yet for + * certain the PF gave us the number of queues we asked for but we'll + * assume it did. Once basic reset is finished we'll confirm once we + * start negotiating config with PF. + */ + if (adapter->num_req_queues) + num_active_queues = adapter->num_req_queues; + else + num_active_queues = min_t(int, + adapter->vsi_res->num_queue_pairs, + (int)(num_online_cpus())); + adapter->tx_rings = kcalloc(num_active_queues, sizeof(struct i40e_ring), GFP_KERNEL); @@ -1539,6 +1548,48 @@ static void i40evf_free_rss(struct i40evf_adapter *adapter) adapter->rss_lut = NULL; } +/** + * i40evf_reinit_interrupt_scheme - Reallocate queues and vectors + * @adapter: board private structure + * + * Returns 0 on success, negative on failure + **/ +static int i40evf_reinit_interrupt_scheme(struct i40evf_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + int err; + + if (netif_running(netdev)) + i40evf_free_traffic_irqs(adapter); + i40evf_free_misc_irq(adapter); + i40evf_reset_interrupt_capability(adapter); + i40evf_free_q_vectors(adapter); + i40evf_free_queues(adapter); + + err = i40evf_init_interrupt_scheme(adapter); + if (err) + goto err; + + netif_tx_stop_all_queues(netdev); + + err = i40evf_request_misc_irq(adapter); + if (err) + goto err; + + set_bit(__I40E_VSI_DOWN, adapter->vsi.state); + + err = i40evf_map_rings_to_vectors(adapter); + if (err) + goto err; + + if (RSS_AQ(adapter)) + adapter->aq_required |= I40EVF_FLAG_AQ_CONFIGURE_RSS; + else + err = i40evf_init_rss(adapter); +err: + return err; +} + /** * i40evf_watchdog_timer - Periodic call-back timer * @data: pointer to adapter disguised as unsigned long @@ -1885,8 +1936,15 @@ continue_reset: if (err) dev_info(&adapter->pdev->dev, "Failed to init adminq: %d\n", err); + adapter->aq_required = 0; - adapter->aq_required = I40EVF_FLAG_AQ_GET_CONFIG; + if (adapter->flags & I40EVF_FLAG_REINIT_ITR_NEEDED) { + err = i40evf_reinit_interrupt_scheme(adapter); + if (err) + goto reset_err; + } + + adapter->aq_required |= I40EVF_FLAG_AQ_GET_CONFIG; adapter->aq_required |= I40EVF_FLAG_AQ_MAP_VECTORS; /* re-add all MAC filters */ @@ -1916,6 +1974,15 @@ continue_reset: if (err) goto reset_err; + if (adapter->flags & I40EVF_FLAG_REINIT_ITR_NEEDED) { + err = i40evf_request_traffic_irqs(adapter, + netdev->name); + if (err) + goto reset_err; + + adapter->flags &= ~I40EVF_FLAG_REINIT_ITR_NEEDED; + } + i40evf_configure(adapter); i40evf_up_complete(adapter); @@ -2431,9 +2498,9 @@ static int i40evf_check_reset_complete(struct i40e_hw *hw) int i40evf_process_config(struct i40evf_adapter *adapter) { struct virtchnl_vf_resource *vfres = adapter->vf_res; + int i, num_req_queues = adapter->num_req_queues; struct net_device *netdev = adapter->netdev; struct i40e_vsi *vsi = &adapter->vsi; - int i; netdev_features_t hw_enc_features; netdev_features_t hw_features; @@ -2447,6 +2514,23 @@ int i40evf_process_config(struct i40evf_adapter *adapter) return -ENODEV; } + if (num_req_queues && + num_req_queues != adapter->vsi_res->num_queue_pairs) { + /* Problem. The PF gave us fewer queues than what we had + * negotiated in our request. Need a reset to see if we can't + * get back to a working state. + */ + dev_err(&adapter->pdev->dev, + "Requested %d queues, but PF only gave us %d.\n", + num_req_queues, + adapter->vsi_res->num_queue_pairs); + adapter->flags |= I40EVF_FLAG_REINIT_ITR_NEEDED; + adapter->num_req_queues = adapter->vsi_res->num_queue_pairs; + i40evf_schedule_reset(adapter); + return -ENODEV; + } + adapter->num_req_queues = 0; + hw_enc_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c b/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c index 2bb0fe00361f..2bb81c39d85f 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c +++ b/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c @@ -160,7 +160,8 @@ int i40evf_send_vf_config_msg(struct i40evf_adapter *adapter) VIRTCHNL_VF_OFFLOAD_WB_ON_ITR | VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2 | VIRTCHNL_VF_OFFLOAD_ENCAP | - VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM; + VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM | + VIRTCHNL_VF_OFFLOAD_REQ_QUEUES; adapter->current_op = VIRTCHNL_OP_GET_VF_RESOURCES; adapter->aq_required &= ~I40EVF_FLAG_AQ_GET_CONFIG; @@ -384,6 +385,32 @@ void i40evf_map_queues(struct i40evf_adapter *adapter) kfree(vimi); } +/** + * i40evf_request_queues + * @adapter: adapter structure + * @num: number of requested queues + * + * We get a default number of queues from the PF. This enables us to request a + * different number. Returns 0 on success, negative on failure + **/ +int i40evf_request_queues(struct i40evf_adapter *adapter, int num) +{ + struct virtchnl_vf_res_request vfres; + + if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { + /* bail because we already have a command pending */ + dev_err(&adapter->pdev->dev, "Cannot request queues, command %d pending\n", + adapter->current_op); + return -EBUSY; + } + + vfres.num_queue_pairs = num; + + adapter->current_op = VIRTCHNL_OP_REQUEST_QUEUES; + return i40evf_send_pf_msg(adapter, VIRTCHNL_OP_REQUEST_QUEUES, + (u8 *)&vfres, sizeof(vfres)); +} + /** * i40evf_add_ether_addrs * @adapter: adapter structure @@ -1068,6 +1095,21 @@ void i40evf_virtchnl_completion(struct i40evf_adapter *adapter, "Invalid message %d from PF\n", v_opcode); } break; + case VIRTCHNL_OP_REQUEST_QUEUES: { + struct virtchnl_vf_res_request *vfres = + (struct virtchnl_vf_res_request *)msg; + if (vfres->num_queue_pairs == adapter->num_req_queues) { + adapter->flags |= I40EVF_FLAG_REINIT_ITR_NEEDED; + i40evf_schedule_reset(adapter); + } else { + dev_info(&adapter->pdev->dev, + "Requested %d queues, PF can support %d\n", + adapter->num_req_queues, + vfres->num_queue_pairs); + adapter->num_req_queues = 0; + } + } + break; default: if (adapter->current_op && (v_opcode != adapter->current_op)) dev_warn(&adapter->pdev->dev, "Expected response %d from PF, received %d\n", -- cgit v1.2.3 From 1b7b7596aeebc21913bad49eb6a2c364c4b2988a Mon Sep 17 00:00:00 2001 From: Mitch Williams Date: Tue, 22 Aug 2017 06:57:51 -0400 Subject: i40e: make i40evf_map_rings_to_vectors void This function cannot fail, so why is it returning a value? And why are we checking it? Why shouldn't we just make it void? Why is this commit message made up of only questions? Signed-off-by: Mitch Williams Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40evf/i40evf_main.c | 9 ++------- 1 file changed, 2 insertions(+), 7 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_main.c b/drivers/net/ethernet/intel/i40evf/i40evf_main.c index 8c513ce84345..f2f1e754c2ce 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf_main.c +++ b/drivers/net/ethernet/intel/i40evf/i40evf_main.c @@ -430,12 +430,11 @@ i40evf_map_vector_to_txq(struct i40evf_adapter *adapter, int v_idx, int t_idx) * group the rings as "efficiently" as possible. You would add new * mapping configurations in here. **/ -static int i40evf_map_rings_to_vectors(struct i40evf_adapter *adapter) +static void i40evf_map_rings_to_vectors(struct i40evf_adapter *adapter) { int rings_remaining = adapter->num_active_queues; int ridx = 0, vidx = 0; int q_vectors; - int err = 0; q_vectors = adapter->num_msix_vectors - NONQ_VECS; @@ -451,8 +450,6 @@ static int i40evf_map_rings_to_vectors(struct i40evf_adapter *adapter) } adapter->aq_required |= I40EVF_FLAG_AQ_MAP_VECTORS; - - return err; } #ifdef CONFIG_NET_POLL_CONTROLLER @@ -1578,9 +1575,7 @@ static int i40evf_reinit_interrupt_scheme(struct i40evf_adapter *adapter) set_bit(__I40E_VSI_DOWN, adapter->vsi.state); - err = i40evf_map_rings_to_vectors(adapter); - if (err) - goto err; + i40evf_map_rings_to_vectors(adapter); if (RSS_AQ(adapter)) adapter->aq_required |= I40EVF_FLAG_AQ_CONFIGURE_RSS; -- cgit v1.2.3 From 41d0a4d0c8b144e44d92ea95e975d2434748d806 Mon Sep 17 00:00:00 2001 From: Alan Brady Date: Tue, 22 Aug 2017 06:57:52 -0400 Subject: i40e: fix handling of vf_states variable Currently we inappropriately clear the vf_states variable with a null assignment. This is problematic because we should be using atomic bitops on this variable and we don't actually want to clear all the flags. We should just clear the ones we know we want to clear. Additionally remove the I40E_VF_STATE_FCOEENA bit because it is no longer being used. Signed-off-by: Alan Brady Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c | 5 ++++- drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h | 1 - 2 files changed, 4 insertions(+), 2 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c index 7742cf3d38d9..989a65d60ac9 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c @@ -879,7 +879,8 @@ static void i40e_free_vf_res(struct i40e_vf *vf) } /* reset some of the state variables keeping track of the resources */ vf->num_queue_pairs = 0; - vf->vf_states = 0; + clear_bit(I40E_VF_STATE_MC_PROMISC, &vf->vf_states); + clear_bit(I40E_VF_STATE_UC_PROMISC, &vf->vf_states); } /** @@ -1586,6 +1587,8 @@ static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg) (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_IWARP)) { vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_IWARP; set_bit(I40E_VF_STATE_IWARPENA, &vf->vf_states); + } else { + clear_bit(I40E_VF_STATE_IWARPENA, &vf->vf_states); } if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_PF) { diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h index 5ea42ad094bc..5efc4f92bb37 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h @@ -56,7 +56,6 @@ enum i40e_vf_states { I40E_VF_STATE_INIT = 0, I40E_VF_STATE_ACTIVE, I40E_VF_STATE_IWARPENA, - I40E_VF_STATE_FCOEENA, I40E_VF_STATE_DISABLED, I40E_VF_STATE_MC_PROMISC, I40E_VF_STATE_UC_PROMISC, -- cgit v1.2.3 From c53d11f669c0e7d0daf46a717b6712ad0b09de99 Mon Sep 17 00:00:00 2001 From: Alan Brady Date: Tue, 22 Aug 2017 06:57:53 -0400 Subject: i40e: fix client notify of VF reset Currently there is a bug in which the PF driver fails to inform clients of a VF reset which then causes clients to leak resources. The bug exists because we were incorrectly checking the I40E_VF_STATE_PRE_ENABLE bit. When a VF is first init we go through a reset to initialize variables and allocate resources but we don't want to inform clients of this first reset since the client isn't fully enabled yet so we set a state bit signifying we're in a "pre-enabled" client state. During the first reset we should be clearing the bit, allowing all following resets to notify the client of the reset when the bit is not set. This patch fixes the issue by negating the 'test_and_clear_bit' check to accurately reflect the behavior we want. Signed-off-by: Alan Brady Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c index 989a65d60ac9..04568137e029 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c @@ -1050,8 +1050,8 @@ static void i40e_cleanup_reset_vf(struct i40e_vf *vf) set_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states); clear_bit(I40E_VF_STATE_DISABLED, &vf->vf_states); /* Do not notify the client during VF init */ - if (test_and_clear_bit(I40E_VF_STATE_PRE_ENABLE, - &vf->vf_states)) + if (!test_and_clear_bit(I40E_VF_STATE_PRE_ENABLE, + &vf->vf_states)) i40e_notify_client_of_vf_reset(pf, abs_vf_id); vf->num_vlan = 0; } -- cgit v1.2.3 From ab243ec9401d164531cc9bc07fb32231d72d1280 Mon Sep 17 00:00:00 2001 From: Scott Peterson Date: Tue, 22 Aug 2017 06:57:54 -0400 Subject: i40e: Stop dropping 802.1ad tags - eth proto 0x88a8 Enable i40e to pass traffic with VLAN tags using the 802.1ad ethernet protocol ID (0x88a8). This requires NIC firmware providing version 1.7 of the API. With older NIC firmware 802.1ad tagged packets will continue to be dropped. No VLAN offloads nor RSS are supported for 802.1ad VLANs. Signed-off-by: Scott Peterson Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_adminq.c | 6 ++++++ drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h | 17 ++++++++++++++++- drivers/net/ethernet/intel/i40e/i40e_common.c | 6 +++++- drivers/net/ethernet/intel/i40e/i40e_main.c | 7 +++++++ drivers/net/ethernet/intel/i40e/i40e_type.h | 6 ++++++ drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h | 17 ++++++++++++++++- drivers/net/ethernet/intel/i40evf/i40e_type.h | 6 ++++++ 7 files changed, 62 insertions(+), 3 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.c b/drivers/net/ethernet/intel/i40e/i40e_adminq.c index 08f63226105a..9dcb2a961197 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_adminq.c +++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.c @@ -613,6 +613,12 @@ i40e_status i40e_init_adminq(struct i40e_hw *hw) hw->flags |= I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE; } + /* The ability to RX (not drop) 802.1ad frames was added in API 1.7 */ + if (hw->aq.api_maj_ver > 1 || + (hw->aq.api_maj_ver == 1 && + hw->aq.api_min_ver >= 7)) + hw->flags |= I40E_HW_FLAG_802_1AD_CAPABLE; + if (hw->aq.api_maj_ver > I40E_FW_API_VERSION_MAJOR) { ret_code = I40E_ERR_FIRMWARE_API_VERSION; goto init_adminq_free_arq; diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h index ed7bbe14bc6e..4c85ea9cd89a 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h +++ b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h @@ -775,7 +775,22 @@ struct i40e_aqc_set_switch_config { #define I40E_AQ_SET_SWITCH_CFG_PROMISC 0x0001 #define I40E_AQ_SET_SWITCH_CFG_L2_FILTER 0x0002 __le16 valid_flags; - u8 reserved[12]; + /* The ethertype in switch_tag is dropped on ingress and used + * internally by the switch. Set this to zero for the default + * of 0x88a8 (802.1ad). Should be zero for firmware API + * versions lower than 1.7. + */ + __le16 switch_tag; + /* The ethertypes in first_tag and second_tag are used to + * match the outer and inner VLAN tags (respectively) when HW + * double VLAN tagging is enabled via the set port parameters + * AQ command. Otherwise these are both ignored. Set them to + * zero for their defaults of 0x8100 (802.1Q). Should be zero + * for firmware API versions lower than 1.7. + */ + __le16 first_tag; + __le16 second_tag; + u8 reserved[6]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_set_switch_config); diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c index a4838779de5d..60542beda7ad 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_common.c +++ b/drivers/net/ethernet/intel/i40e/i40e_common.c @@ -2402,7 +2402,11 @@ enum i40e_status_code i40e_aq_set_switch_config(struct i40e_hw *hw, i40e_aqc_opc_set_switch_config); scfg->flags = cpu_to_le16(flags); scfg->valid_flags = cpu_to_le16(valid_flags); - + if (hw->flags & I40E_HW_FLAG_802_1AD_CAPABLE) { + scfg->switch_tag = cpu_to_le16(hw->switch_tag); + scfg->first_tag = cpu_to_le16(hw->first_tag); + scfg->second_tag = cpu_to_le16(hw->second_tag); + } status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); return status; diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index 387f0863f794..3f9e89b054ec 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -11361,6 +11361,13 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) hw->bus.bus_id = pdev->bus->number; pf->instance = pfs_found; + /* Select something other than the 802.1ad ethertype for the + * switch to use internally and drop on ingress. + */ + hw->switch_tag = 0xffff; + hw->first_tag = ETH_P_8021AD; + hw->second_tag = ETH_P_8021Q; + INIT_LIST_HEAD(&pf->l3_flex_pit_list); INIT_LIST_HEAD(&pf->l4_flex_pit_list); diff --git a/drivers/net/ethernet/intel/i40e/i40e_type.h b/drivers/net/ethernet/intel/i40e/i40e_type.h index 8b0b9f826b7f..4b32b1d38a66 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_type.h +++ b/drivers/net/ethernet/intel/i40e/i40e_type.h @@ -610,9 +610,15 @@ struct i40e_hw { struct i40e_dcbx_config desired_dcbx_config; /* CEE Desired Cfg */ #define I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE BIT_ULL(0) +#define I40E_HW_FLAG_802_1AD_CAPABLE BIT_ULL(1) #define I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE BIT_ULL(2) u64 flags; + /* Used in set switch config AQ command */ + u16 switch_tag; + u16 first_tag; + u16 second_tag; + /* debug mask */ u32 debug_mask; char err_str[16]; diff --git a/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h index eee7ece42b39..ed5602f4bbcd 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h +++ b/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h @@ -771,7 +771,22 @@ struct i40e_aqc_set_switch_config { #define I40E_AQ_SET_SWITCH_CFG_PROMISC 0x0001 #define I40E_AQ_SET_SWITCH_CFG_L2_FILTER 0x0002 __le16 valid_flags; - u8 reserved[12]; + /* The ethertype in switch_tag is dropped on ingress and used + * internally by the switch. Set this to zero for the default + * of 0x88a8 (802.1ad). Should be zero for firmware API + * versions lower than 1.7. + */ + __le16 switch_tag; + /* The ethertypes in first_tag and second_tag are used to + * match the outer and inner VLAN tags (respectively) when HW + * double VLAN tagging is enabled via the set port parameters + * AQ command. Otherwise these are both ignored. Set them to + * zero for their defaults of 0x8100 (802.1Q). Should be zero + * for firmware API versions lower than 1.7. + */ + __le16 first_tag; + __le16 second_tag; + u8 reserved[6]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_set_switch_config); diff --git a/drivers/net/ethernet/intel/i40evf/i40e_type.h b/drivers/net/ethernet/intel/i40evf/i40e_type.h index 48eacf5e73e4..9364b67fff9c 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_type.h +++ b/drivers/net/ethernet/intel/i40evf/i40e_type.h @@ -568,6 +568,7 @@ struct i40e_hw { /* LLDP/DCBX Status */ u16 dcbx_status; +#define I40E_HW_FLAG_802_1AD_CAPABLE BIT_ULL(1) #define I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE BIT_ULL(2) /* DCBX info */ @@ -575,6 +576,11 @@ struct i40e_hw { struct i40e_dcbx_config remote_dcbx_config; /* Peer Cfg */ struct i40e_dcbx_config desired_dcbx_config; /* CEE Desired Cfg */ + /* Used in set switch config AQ command */ + u16 switch_tag; + u16 first_tag; + u16 second_tag; + /* debug mask */ u32 debug_mask; char err_str[16]; -- cgit v1.2.3 From 0b40f457488d966878eec413a91f27d9b21e6ce5 Mon Sep 17 00:00:00 2001 From: Jacob Keller Date: Mon, 10 Jul 2017 13:23:14 -0700 Subject: fm10k: prepare_for_reset() when we lose PCIe Link If we lose PCIe link, such as when an unannounced PFLR event occurs, or when a device is surprise removed, we currently detach the device and close the netdev. This unfortunately leaves a lot of things still active, such as the msix_mbx_pf IRQ, and Tx/Rx resources. This can cause problems because the register reads will return potentially invalid values which may result in unknown driver behavior. Begin the process of resetting using fm10k_prepare_for_reset(), much in the same way as the suspend and resume cycle does. This will attempt to shutdown as much as possible, in order to prevent possible issues. A naive implementation for this has issues, because there are now multiple flows calling the reset logic and setting a reset bit. This would cause problems, because the "re-attach" routine might call fm10k_handle_reset() prior to the reset actually finishing. Instead, we'll add state bits to indicate which flow actually initiated the reset. For the general reset flow, we'll assume that if someone else is resetting that we do not need to handle it at all, so it does not need its own state bit. For the suspend case, we will simply issue a warning indicating that we are attempting to recover from this case when resuming. For the detached subtask, we'll simply refuse to re-attach until we've actually initiated a reset as part of that flow. Finally, we'll stop attempting to manage the mailbox subtask when we're detached, since there's nothing we can do if we don't have a PCIe address. Overall this produces a much cleaner shutdown and recovery cycle for a PCIe surprise remove event. Signed-off-by: Jacob Keller Tested-by: Krishneil Singh Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/fm10k/fm10k.h | 2 + drivers/net/ethernet/intel/fm10k/fm10k_pci.c | 103 ++++++++++++++++++++------- 2 files changed, 79 insertions(+), 26 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/intel/fm10k/fm10k.h b/drivers/net/ethernet/intel/fm10k/fm10k.h index 689c413b7782..ba70c58ca920 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k.h +++ b/drivers/net/ethernet/intel/fm10k/fm10k.h @@ -270,6 +270,8 @@ enum fm10k_flags_t { enum fm10k_state_t { __FM10K_RESETTING, + __FM10K_RESET_DETACHED, + __FM10K_RESET_SUSPENDED, __FM10K_DOWN, __FM10K_SERVICE_SCHED, __FM10K_SERVICE_REQUEST, diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c index 9575f7c1862d..4e5e3e64beda 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c @@ -153,7 +153,15 @@ static void fm10k_service_timer(unsigned long data) fm10k_service_event_schedule(interface); } -static void fm10k_prepare_for_reset(struct fm10k_intfc *interface) +/** + * fm10k_prepare_for_reset - Prepare the driver and device for a pending reset + * @interface: fm10k private data structure + * + * This function prepares for a device reset by shutting as much down as we + * can. It does nothing and returns false if __FM10K_RESETTING was already set + * prior to calling this function. It returns true if it actually did work. + */ +static bool fm10k_prepare_for_reset(struct fm10k_intfc *interface) { struct net_device *netdev = interface->netdev; @@ -162,8 +170,9 @@ static void fm10k_prepare_for_reset(struct fm10k_intfc *interface) /* put off any impending NetWatchDogTimeout */ netif_trans_update(netdev); - while (test_and_set_bit(__FM10K_RESETTING, interface->state)) - usleep_range(1000, 2000); + /* Nothing to do if a reset is already in progress */ + if (test_and_set_bit(__FM10K_RESETTING, interface->state)) + return false; rtnl_lock(); @@ -181,6 +190,8 @@ static void fm10k_prepare_for_reset(struct fm10k_intfc *interface) interface->last_reset = jiffies + (10 * HZ); rtnl_unlock(); + + return true; } static int fm10k_handle_reset(struct fm10k_intfc *interface) @@ -189,6 +200,8 @@ static int fm10k_handle_reset(struct fm10k_intfc *interface) struct fm10k_hw *hw = &interface->hw; int err; + WARN_ON(!test_bit(__FM10K_RESETTING, interface->state)); + rtnl_lock(); pci_set_master(interface->pdev); @@ -267,51 +280,75 @@ static void fm10k_detach_subtask(struct fm10k_intfc *interface) struct net_device *netdev = interface->netdev; u32 __iomem *hw_addr; u32 value; + int err; - /* do nothing if device is still present or hw_addr is set */ + /* do nothing if netdev is still present or hw_addr is set */ if (netif_device_present(netdev) || interface->hw.hw_addr) return; + /* We've lost the PCIe register space, and can no longer access the + * device. Shut everything except the detach subtask down and prepare + * to reset the device in case we recover. If we actually prepare for + * reset, indicate that we're detached. + */ + if (fm10k_prepare_for_reset(interface)) + set_bit(__FM10K_RESET_DETACHED, interface->state); + /* check the real address space to see if we've recovered */ hw_addr = READ_ONCE(interface->uc_addr); value = readl(hw_addr); if (~value) { + /* Make sure the reset was initiated because we detached, + * otherwise we might race with a different reset flow. + */ + if (!test_and_clear_bit(__FM10K_RESET_DETACHED, + interface->state)) + return; + + /* Restore the hardware address */ interface->hw.hw_addr = interface->uc_addr; + + /* PCIe link has been restored, and the device is active + * again. Restore everything and reset the device. + */ + err = fm10k_handle_reset(interface); + if (err) { + netdev_err(netdev, "Unable to reset device: %d\n", err); + interface->hw.hw_addr = NULL; + return; + } + + /* Re-attach the netdev */ netif_device_attach(netdev); - set_bit(FM10K_FLAG_RESET_REQUESTED, interface->flags); netdev_warn(netdev, "PCIe link restored, device now attached\n"); return; } - - rtnl_lock(); - - if (netif_running(netdev)) - dev_close(netdev); - - rtnl_unlock(); } -static void fm10k_reinit(struct fm10k_intfc *interface) +static void fm10k_reset_subtask(struct fm10k_intfc *interface) { int err; - fm10k_prepare_for_reset(interface); - - err = fm10k_handle_reset(interface); - if (err) - dev_err(&interface->pdev->dev, - "fm10k_handle_reset failed: %d\n", err); -} - -static void fm10k_reset_subtask(struct fm10k_intfc *interface) -{ if (!test_and_clear_bit(FM10K_FLAG_RESET_REQUESTED, interface->flags)) return; + /* If another thread has already prepared to reset the device, we + * should not attempt to handle a reset here, since we'd race with + * that thread. This may happen if we suspend the device or if the + * PCIe link is lost. In this case, we'll just ignore the RESET + * request, as it will (eventually) be taken care of when the thread + * which actually started the reset is finished. + */ + if (!fm10k_prepare_for_reset(interface)) + return; + netdev_err(interface->netdev, "Reset interface\n"); - fm10k_reinit(interface); + err = fm10k_handle_reset(interface); + if (err) + dev_err(&interface->pdev->dev, + "fm10k_handle_reset failed: %d\n", err); } /** @@ -381,6 +418,10 @@ static void fm10k_watchdog_update_host_state(struct fm10k_intfc *interface) **/ static void fm10k_mbx_subtask(struct fm10k_intfc *interface) { + /* If we're resetting, bail out */ + if (test_bit(__FM10K_RESETTING, interface->state)) + return; + /* process upstream mailbox and update device state */ fm10k_watchdog_update_host_state(interface); @@ -630,9 +671,11 @@ static void fm10k_service_task(struct work_struct *work) interface = container_of(work, struct fm10k_intfc, service_task); + /* Check whether we're detached first */ + fm10k_detach_subtask(interface); + /* tasks run even when interface is down */ fm10k_mbx_subtask(interface); - fm10k_detach_subtask(interface); fm10k_reset_subtask(interface); /* tasks only run when interface is up */ @@ -2177,7 +2220,8 @@ static void fm10k_prepare_suspend(struct fm10k_intfc *interface) */ fm10k_stop_service_event(interface); - fm10k_prepare_for_reset(interface); + if (fm10k_prepare_for_reset(interface)) + set_bit(__FM10K_RESET_SUSPENDED, interface->state); } static int fm10k_handle_resume(struct fm10k_intfc *interface) @@ -2185,6 +2229,13 @@ static int fm10k_handle_resume(struct fm10k_intfc *interface) struct fm10k_hw *hw = &interface->hw; int err; + /* Even if we didn't properly prepare for reset in + * fm10k_prepare_suspend, we'll attempt to resume anyways. + */ + if (!test_and_clear_bit(__FM10K_RESET_SUSPENDED, interface->state)) + dev_warn(&interface->pdev->dev, + "Device was shut down as part of suspend... Attempting to recover\n"); + /* reset statistics starting values */ hw->mac.ops.rebind_hw_stats(hw, &interface->stats); -- cgit v1.2.3 From b4fcd43661df0d84cc4e030ab7a26533114889b9 Mon Sep 17 00:00:00 2001 From: Jacob Keller Date: Mon, 10 Jul 2017 13:23:15 -0700 Subject: fm10k: use spinlock to implement mailbox lock Lets not re-invent the locking wheel. Remove our bitlock and use a proper spinlock instead. Signed-off-by: Jacob Keller Tested-by: Krishneil Singh Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/fm10k/fm10k.h | 15 +++++---------- drivers/net/ethernet/intel/fm10k/fm10k_pci.c | 3 +++ 2 files changed, 8 insertions(+), 10 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/intel/fm10k/fm10k.h b/drivers/net/ethernet/intel/fm10k/fm10k.h index ba70c58ca920..74542e9d63c7 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k.h +++ b/drivers/net/ethernet/intel/fm10k/fm10k.h @@ -276,7 +276,6 @@ enum fm10k_state_t { __FM10K_SERVICE_SCHED, __FM10K_SERVICE_REQUEST, __FM10K_SERVICE_DISABLE, - __FM10K_MBX_LOCK, __FM10K_LINK_DOWN, __FM10K_UPDATING_STATS, /* This value must be last and determines the BITMAP size */ @@ -346,6 +345,8 @@ struct fm10k_intfc { struct fm10k_hw_stats stats; struct fm10k_hw hw; + /* Mailbox lock */ + spinlock_t mbx_lock; u32 __iomem *uc_addr; u32 __iomem *sw_addr; u16 msg_enable; @@ -386,23 +387,17 @@ struct fm10k_intfc { static inline void fm10k_mbx_lock(struct fm10k_intfc *interface) { - /* busy loop if we cannot obtain the lock as some calls - * such as ndo_set_rx_mode may be made in atomic context - */ - while (test_and_set_bit(__FM10K_MBX_LOCK, interface->state)) - udelay(20); + spin_lock(&interface->mbx_lock); } static inline void fm10k_mbx_unlock(struct fm10k_intfc *interface) { - /* flush memory to make sure state is correct */ - smp_mb__before_atomic(); - clear_bit(__FM10K_MBX_LOCK, interface->state); + spin_unlock(&interface->mbx_lock); } static inline int fm10k_mbx_trylock(struct fm10k_intfc *interface) { - return !test_and_set_bit(__FM10K_MBX_LOCK, interface->state); + return spin_trylock(&interface->mbx_lock); } /* fm10k_test_staterr - test bits in Rx descriptor status and error fields */ diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c index 4e5e3e64beda..240772ad5d69 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c @@ -1921,6 +1921,9 @@ static int fm10k_sw_init(struct fm10k_intfc *interface, netdev_rss_key_fill(rss_key, sizeof(rss_key)); memcpy(interface->rssrk, rss_key, sizeof(rss_key)); + /* Initialize the mailbox lock */ + spin_lock_init(&interface->mbx_lock); + /* Start off interface as being down */ set_bit(__FM10K_DOWN, interface->state); set_bit(__FM10K_UPDATING_STATS, interface->state); -- cgit v1.2.3 From 8249c47c6ba48cd3eba7c3ca7f8e733ee815c39b Mon Sep 17 00:00:00 2001 From: Jacob Keller Date: Mon, 10 Jul 2017 13:23:16 -0700 Subject: fm10k: use generic PM hooks instead of legacy PCIe power hooks Replace the PCI specific legacy power management hooks with the new generic power management hooks which work properly for both suspend and hibernate. The new generic system is better and properly handles the lower level PCIe power management rather than forcing the driver to handle it. Signed-off-by: Jacob Keller Tested-by: Krishneil Singh Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/fm10k/fm10k_pci.c | 67 +++++++++------------------- 1 file changed, 22 insertions(+), 45 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c index 240772ad5d69..aef39909e4a2 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c @@ -2264,36 +2264,19 @@ static int fm10k_handle_resume(struct fm10k_intfc *interface) #ifdef CONFIG_PM /** - * fm10k_resume - Restore device to pre-sleep state - * @pdev: PCI device information struct + * fm10k_resume - Generic PM resume hook + * @dev: generic device structure * - * fm10k_resume is called after the system has powered back up from a sleep - * state and is ready to resume operation. This function is meant to restore - * the device back to its pre-sleep state. + * Generic PM hook used when waking the device from a low power state after + * suspend or hibernation. This function does not need to handle lower PCIe + * device state as the stack takes care of that for us. **/ -static int fm10k_resume(struct pci_dev *pdev) +static int fm10k_resume(struct device *dev) { - struct fm10k_intfc *interface = pci_get_drvdata(pdev); + struct fm10k_intfc *interface = pci_get_drvdata(to_pci_dev(dev)); struct net_device *netdev = interface->netdev; struct fm10k_hw *hw = &interface->hw; - u32 err; - - pci_set_power_state(pdev, PCI_D0); - pci_restore_state(pdev); - - /* pci_restore_state clears dev->state_saved so call - * pci_save_state to restore it. - */ - pci_save_state(pdev); - - err = pci_enable_device_mem(pdev); - if (err) { - dev_err(&pdev->dev, "Cannot enable PCI device from suspend\n"); - return err; - } - pci_set_master(pdev); - - pci_wake_from_d3(pdev, false); + int err; /* refresh hw_addr in case it was dropped */ hw->hw_addr = interface->uc_addr; @@ -2308,36 +2291,27 @@ static int fm10k_resume(struct pci_dev *pdev) } /** - * fm10k_suspend - Prepare the device for a system sleep state - * @pdev: PCI device information struct + * fm10k_suspend - Generic PM suspend hook + * @dev: generic device structure * - * fm10k_suspend is meant to shutdown the device prior to the system entering - * a sleep state. The fm10k hardware does not support wake on lan so the - * driver simply needs to shut down the device so it is in a low power state. + * Generic PM hook used when setting the device into a low power state for + * system suspend or hibernation. This function does not need to handle lower + * PCIe device state as the stack takes care of that for us. **/ -static int fm10k_suspend(struct pci_dev *pdev, - pm_message_t __always_unused state) +static int fm10k_suspend(struct device *dev) { - struct fm10k_intfc *interface = pci_get_drvdata(pdev); + struct fm10k_intfc *interface = pci_get_drvdata(to_pci_dev(dev)); struct net_device *netdev = interface->netdev; - int err = 0; netif_device_detach(netdev); fm10k_prepare_suspend(interface); - err = pci_save_state(pdev); - if (err) - return err; - - pci_disable_device(pdev); - pci_wake_from_d3(pdev, false); - pci_set_power_state(pdev, PCI_D3hot); - return 0; } #endif /* CONFIG_PM */ + /** * fm10k_io_error_detected - called when PCI error is detected * @pdev: Pointer to PCI device @@ -2447,15 +2421,18 @@ static const struct pci_error_handlers fm10k_err_handler = { .reset_done = fm10k_io_reset_done, }; +static SIMPLE_DEV_PM_OPS(fm10k_pm_ops, fm10k_suspend, fm10k_resume); + static struct pci_driver fm10k_driver = { .name = fm10k_driver_name, .id_table = fm10k_pci_tbl, .probe = fm10k_probe, .remove = fm10k_remove, #ifdef CONFIG_PM - .suspend = fm10k_suspend, - .resume = fm10k_resume, -#endif + .driver = { + .pm = &fm10k_pm_ops, + }, +#endif /* CONFIG_PM */ .sriov_configure = fm10k_iov_configure, .err_handler = &fm10k_err_handler }; -- cgit v1.2.3 From fc9173682dcf73cfe3324267424ef17e854bb444 Mon Sep 17 00:00:00 2001 From: Jacob Keller Date: Mon, 10 Jul 2017 13:23:17 -0700 Subject: fm10k: introduce a message queue for MAC/VLAN messages Under some circumstances, when dealing with a large number of MAC address or VLAN updates at once, the fm10k driver, particularly the VFs can overload the mailbox with too many messages at once. This results in a mailbox timeout, which causes the driver to initiate a reset. During the reset, we re-send all the same messages that originally caused the timeout. This results in a cycle of resets each triggering a future reset. To fix or avoid this, we introduce a workqueue item which monitors a queue of MAC and VLAN requests. These requests are queued to the end of the list, and we process as a FIFO periodically. Initially we only handle requests for the netdev, but we do handle unicast MAC addresses, multicast MAC addresses, and update VLAN requests. A future patch will add support to use this queue for handling MAC update requests from the VF<->PF mailbox. The MAC/VLAN work item will keep checking to make sure that each request does not overflow the mailbox and cause a timeout. If it might, then the work item will reschedule itself a short time later. This avoids any reset cycle, since we never send the message if the mailbox is not ready. As an alternative, we tried increasing the mailbox message FIFO, but this just delays the problem and results in needless memory waste on the system. Our new message queue is dynamically allocated so only uses as much memory as it needs. Additionally, it need not be contiguous like the Tx and Rx FIFOs. Note that this patch chose to only create a queue for MAC and VLAN messages, since these are the only messages sent in a large enough volume to cause the reset loop. Other messages are very unlikely to overflow the mailbox Tx FIFO so easily. Signed-off-by: Jacob Keller Tested-by: Krishneil Singh Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/fm10k/fm10k.h | 39 +++++ drivers/net/ethernet/intel/fm10k/fm10k_netdev.c | 199 ++++++++++++++++++----- drivers/net/ethernet/intel/fm10k/fm10k_pci.c | 201 ++++++++++++++++++++++++ 3 files changed, 397 insertions(+), 42 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/intel/fm10k/fm10k.h b/drivers/net/ethernet/intel/fm10k/fm10k.h index 74542e9d63c7..40856bc0f3b9 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k.h +++ b/drivers/net/ethernet/intel/fm10k/fm10k.h @@ -248,6 +248,29 @@ struct fm10k_udp_port { __be16 port; }; +enum fm10k_macvlan_request_type { + FM10K_UC_MAC_REQUEST, + FM10K_MC_MAC_REQUEST, + FM10K_VLAN_REQUEST +}; + +struct fm10k_macvlan_request { + enum fm10k_macvlan_request_type type; + struct list_head list; + union { + struct fm10k_mac_request { + u8 addr[ETH_ALEN]; + u16 glort; + u16 vid; + } mac; + struct fm10k_vlan_request { + u32 vid; + u8 vsi; + } vlan; + }; + bool set; +}; + /* one work queue for entire driver */ extern struct workqueue_struct *fm10k_workqueue; @@ -276,6 +299,9 @@ enum fm10k_state_t { __FM10K_SERVICE_SCHED, __FM10K_SERVICE_REQUEST, __FM10K_SERVICE_DISABLE, + __FM10K_MACVLAN_SCHED, + __FM10K_MACVLAN_REQUEST, + __FM10K_MACVLAN_DISABLE, __FM10K_LINK_DOWN, __FM10K_UPDATING_STATS, /* This value must be last and determines the BITMAP size */ @@ -368,6 +394,12 @@ struct fm10k_intfc { struct list_head vxlan_port; struct list_head geneve_port; + /* MAC/VLAN update queue */ + struct list_head macvlan_requests; + struct delayed_work macvlan_task; + /* MAC/VLAN update queue lock */ + spinlock_t macvlan_lock; + #ifdef CONFIG_DEBUG_FS struct dentry *dbg_intfc; #endif /* CONFIG_DEBUG_FS */ @@ -487,6 +519,7 @@ void fm10k_up(struct fm10k_intfc *interface); void fm10k_down(struct fm10k_intfc *interface); void fm10k_update_stats(struct fm10k_intfc *interface); void fm10k_service_event_schedule(struct fm10k_intfc *interface); +void fm10k_macvlan_schedule(struct fm10k_intfc *interface); void fm10k_update_rx_drop_en(struct fm10k_intfc *interface); #ifdef CONFIG_NET_POLL_CONTROLLER void fm10k_netpoll(struct net_device *netdev); @@ -507,6 +540,12 @@ void fm10k_reset_rx_state(struct fm10k_intfc *); int fm10k_setup_tc(struct net_device *dev, u8 tc); int fm10k_open(struct net_device *netdev); int fm10k_close(struct net_device *netdev); +int fm10k_queue_vlan_request(struct fm10k_intfc *interface, u32 vid, + u8 vsi, bool set); +int fm10k_queue_mac_request(struct fm10k_intfc *interface, u16 glort, + const unsigned char *addr, u16 vid, bool set); +void fm10k_clear_macvlan_queue(struct fm10k_intfc *interface, + u16 glort, bool vlans); /* Ethtool */ void fm10k_set_ethtool_ops(struct net_device *dev); diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c index 77d495fedced..81e4425f0529 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c @@ -758,11 +758,132 @@ static bool fm10k_host_mbx_ready(struct fm10k_intfc *interface) return (hw->mac.type == fm10k_mac_vf || interface->host_ready); } +/** + * fm10k_queue_vlan_request - Queue a VLAN update request + * @interface: the fm10k interface structure + * @vid: the VLAN vid + * @vsi: VSI index number + * @set: whether to set or clear + * + * This function queues up a VLAN update. For VFs, this must be sent to the + * managing PF over the mailbox. For PFs, we'll use the same handling so that + * it's similar to the VF. This avoids storming the PF<->VF mailbox with too + * many VLAN updates during reset. + */ +int fm10k_queue_vlan_request(struct fm10k_intfc *interface, + u32 vid, u8 vsi, bool set) +{ + struct fm10k_macvlan_request *request; + unsigned long flags; + + /* This must be atomic since we may be called while the netdev + * addr_list_lock is held + */ + request = kzalloc(sizeof(*request), GFP_ATOMIC); + if (!request) + return -ENOMEM; + + request->type = FM10K_VLAN_REQUEST; + request->vlan.vid = vid; + request->vlan.vsi = vsi; + request->set = set; + + spin_lock_irqsave(&interface->macvlan_lock, flags); + list_add_tail(&request->list, &interface->macvlan_requests); + spin_unlock_irqrestore(&interface->macvlan_lock, flags); + + fm10k_macvlan_schedule(interface); + + return 0; +} + +/** + * fm10k_queue_mac_request - Queue a MAC update request + * @interface: the fm10k interface structure + * @glort: the target glort for this update + * @addr: the address to update + * @vid: the vid to update + * @sync: whether to add or remove + * + * This function queues up a MAC request for sending to the switch manager. + * A separate thread monitors the queue and sends updates to the switch + * manager. Return 0 on success, and negative error code on failure. + **/ +int fm10k_queue_mac_request(struct fm10k_intfc *interface, u16 glort, + const unsigned char *addr, u16 vid, bool set) +{ + struct fm10k_macvlan_request *request; + unsigned long flags; + + /* This must be atomic since we may be called while the netdev + * addr_list_lock is held + */ + request = kzalloc(sizeof(*request), GFP_ATOMIC); + if (!request) + return -ENOMEM; + + if (is_multicast_ether_addr(addr)) + request->type = FM10K_MC_MAC_REQUEST; + else + request->type = FM10K_UC_MAC_REQUEST; + + ether_addr_copy(request->mac.addr, addr); + request->mac.glort = glort; + request->mac.vid = vid; + request->set = set; + + spin_lock_irqsave(&interface->macvlan_lock, flags); + list_add_tail(&request->list, &interface->macvlan_requests); + spin_unlock_irqrestore(&interface->macvlan_lock, flags); + + fm10k_macvlan_schedule(interface); + + return 0; +} + +/** + * fm10k_clear_macvlan_queue - Cancel pending updates for a given glort + * @interface: the fm10k interface structure + * @glort: the target glort to clear + * @vlans: true to clear VLAN messages, false to ignore them + * + * Cancel any outstanding MAC/VLAN requests for a given glort. This is + * expected to be called when a logical port goes down. + **/ +void fm10k_clear_macvlan_queue(struct fm10k_intfc *interface, + u16 glort, bool vlans) + +{ + struct fm10k_macvlan_request *r, *tmp; + unsigned long flags; + + spin_lock_irqsave(&interface->macvlan_lock, flags); + + /* Free any outstanding MAC/VLAN requests for this interface */ + list_for_each_entry_safe(r, tmp, &interface->macvlan_requests, list) { + switch (r->type) { + case FM10K_MC_MAC_REQUEST: + case FM10K_UC_MAC_REQUEST: + /* Don't free requests for other interfaces */ + if (r->mac.glort != glort) + break; + /* fall through */ + case FM10K_VLAN_REQUEST: + if (vlans) { + list_del(&r->list); + kfree(r); + } + break; + } + } + + spin_unlock_irqrestore(&interface->macvlan_lock, flags); +} + static int fm10k_uc_vlan_unsync(struct net_device *netdev, const unsigned char *uc_addr) { struct fm10k_intfc *interface = netdev_priv(netdev); - struct fm10k_hw *hw = &interface->hw; u16 glort = interface->glort; u16 vid = interface->vid; bool set = !!(vid / VLAN_N_VID); @@ -771,10 +892,7 @@ static int fm10k_uc_vlan_unsync(struct net_device *netdev, /* drop any leading bits on the VLAN ID */ vid &= VLAN_N_VID - 1; - if (fm10k_host_mbx_ready(interface)) - err = hw->mac.ops.update_uc_addr(hw, glort, uc_addr, - vid, set, 0); - + err = fm10k_queue_mac_request(interface, glort, uc_addr, vid, set); if (err) return err; @@ -786,7 +904,6 @@ static int fm10k_mc_vlan_unsync(struct net_device *netdev, const unsigned char *mc_addr) { struct fm10k_intfc *interface = netdev_priv(netdev); - struct fm10k_hw *hw = &interface->hw; u16 glort = interface->glort; u16 vid = interface->vid; bool set = !!(vid / VLAN_N_VID); @@ -795,9 +912,7 @@ static int fm10k_mc_vlan_unsync(struct net_device *netdev, /* drop any leading bits on the VLAN ID */ vid &= VLAN_N_VID - 1; - if (fm10k_host_mbx_ready(interface)) - err = hw->mac.ops.update_mc_addr(hw, glort, mc_addr, vid, set); - + err = fm10k_queue_mac_request(interface, glort, mc_addr, vid, set); if (err) return err; @@ -855,18 +970,14 @@ static int fm10k_update_vid(struct net_device *netdev, u16 vid, bool set) /* only need to update the VLAN if not in promiscuous mode */ if (!(netdev->flags & IFF_PROMISC)) { - err = hw->mac.ops.update_vlan(hw, vid, 0, set); + err = fm10k_queue_vlan_request(interface, vid, 0, set); if (err) goto err_out; } - /* update our base MAC address if host's mailbox is ready */ - if (fm10k_host_mbx_ready(interface)) - err = hw->mac.ops.update_uc_addr(hw, interface->glort, - hw->mac.addr, vid, set, 0); - else - err = -EHOSTDOWN; - + /* Update our base MAC address */ + err = fm10k_queue_mac_request(interface, interface->glort, + hw->mac.addr, vid, set); if (err) goto err_out; @@ -910,7 +1021,6 @@ static u16 fm10k_find_next_vlan(struct fm10k_intfc *interface, u16 vid) static void fm10k_clear_unused_vlans(struct fm10k_intfc *interface) { - struct fm10k_hw *hw = &interface->hw; u32 vid, prev_vid; /* loop through and find any gaps in the table */ @@ -922,7 +1032,7 @@ static void fm10k_clear_unused_vlans(struct fm10k_intfc *interface) /* send request to clear multiple bits at a time */ prev_vid += (vid - prev_vid - 1) << FM10K_VLAN_LENGTH_SHIFT; - hw->mac.ops.update_vlan(hw, prev_vid, 0, false); + fm10k_queue_vlan_request(interface, prev_vid, 0, false); } } @@ -937,15 +1047,11 @@ static int __fm10k_uc_sync(struct net_device *dev, if (!is_valid_ether_addr(addr)) return -EADDRNOTAVAIL; - /* update table with current entries if host's mailbox is ready */ - if (!fm10k_host_mbx_ready(interface)) - return -EHOSTDOWN; - for (vid = hw->mac.default_vid ? fm10k_find_next_vlan(interface, 0) : 1; vid < VLAN_N_VID; vid = fm10k_find_next_vlan(interface, vid)) { - err = hw->mac.ops.update_uc_addr(hw, glort, addr, - vid, sync, 0); + err = fm10k_queue_mac_request(interface, glort, + addr, vid, sync); if (err) return err; } @@ -1002,15 +1108,18 @@ static int __fm10k_mc_sync(struct net_device *dev, struct fm10k_intfc *interface = netdev_priv(dev); struct fm10k_hw *hw = &interface->hw; u16 vid, glort = interface->glort; + s32 err; - /* update table with current entries if host's mailbox is ready */ - if (!fm10k_host_mbx_ready(interface)) - return 0; + if (!is_multicast_ether_addr(addr)) + return -EADDRNOTAVAIL; for (vid = hw->mac.default_vid ? fm10k_find_next_vlan(interface, 0) : 1; vid < VLAN_N_VID; vid = fm10k_find_next_vlan(interface, vid)) { - hw->mac.ops.update_mc_addr(hw, glort, addr, vid, sync); + err = fm10k_queue_mac_request(interface, glort, + addr, vid, sync); + if (err) + return err; } return 0; @@ -1050,7 +1159,8 @@ static void fm10k_set_rx_mode(struct net_device *dev) if (interface->xcast_mode != xcast_mode) { /* update VLAN table */ if (xcast_mode == FM10K_XCAST_MODE_PROMISC) - hw->mac.ops.update_vlan(hw, FM10K_VLAN_ALL, 0, true); + fm10k_queue_vlan_request(interface, FM10K_VLAN_ALL, + 0, true); if (interface->xcast_mode == FM10K_XCAST_MODE_PROMISC) fm10k_clear_unused_vlans(interface); @@ -1098,22 +1208,20 @@ void fm10k_restore_rx_state(struct fm10k_intfc *interface) interface->glort_count, true); /* update VLAN table */ - hw->mac.ops.update_vlan(hw, FM10K_VLAN_ALL, 0, - xcast_mode == FM10K_XCAST_MODE_PROMISC); + fm10k_queue_vlan_request(interface, FM10K_VLAN_ALL, 0, + xcast_mode == FM10K_XCAST_MODE_PROMISC); /* Add filter for VLAN 0 */ - hw->mac.ops.update_vlan(hw, 0, 0, true); + fm10k_queue_vlan_request(interface, 0, 0, true); /* update table with current entries */ for (vid = hw->mac.default_vid ? fm10k_find_next_vlan(interface, 0) : 1; vid < VLAN_N_VID; vid = fm10k_find_next_vlan(interface, vid)) { - hw->mac.ops.update_vlan(hw, vid, 0, true); + fm10k_queue_vlan_request(interface, vid, 0, true); - /* Update unicast entries if host's mailbox is ready */ - if (fm10k_host_mbx_ready(interface)) - hw->mac.ops.update_uc_addr(hw, glort, hw->mac.addr, - vid, true, 0); + fm10k_queue_mac_request(interface, glort, + hw->mac.addr, vid, true); } /* update xcast mode before synchronizing addresses if host's mailbox @@ -1140,6 +1248,13 @@ void fm10k_reset_rx_state(struct fm10k_intfc *interface) struct net_device *netdev = interface->netdev; struct fm10k_hw *hw = &interface->hw; + /* Wait for MAC/VLAN work to finish */ + while (test_bit(__FM10K_MACVLAN_SCHED, interface->state)) + usleep_range(1000, 2000); + + /* Cancel pending MAC/VLAN requests */ + fm10k_clear_macvlan_queue(interface, interface->glort, true); + fm10k_mbx_lock(interface); /* clear the logical port state on lower device if host's mailbox is @@ -1374,8 +1489,8 @@ static void *fm10k_dfwd_add_station(struct net_device *dev, if (fm10k_host_mbx_ready(interface)) { hw->mac.ops.update_xcast_mode(hw, glort, FM10K_XCAST_MODE_MULTI); - hw->mac.ops.update_uc_addr(hw, glort, sdev->dev_addr, - 0, true, 0); + fm10k_queue_mac_request(interface, glort, sdev->dev_addr, + 0, true); } fm10k_mbx_unlock(interface); @@ -1414,8 +1529,8 @@ static void fm10k_dfwd_del_station(struct net_device *dev, void *priv) if (fm10k_host_mbx_ready(interface)) { hw->mac.ops.update_xcast_mode(hw, glort, FM10K_XCAST_MODE_NONE); - hw->mac.ops.update_uc_addr(hw, glort, sdev->dev_addr, - 0, false, 0); + fm10k_queue_mac_request(interface, glort, sdev->dev_addr, + 0, false); } fm10k_mbx_unlock(interface); diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c index aef39909e4a2..58538ce997e1 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c @@ -91,6 +91,76 @@ static int fm10k_hw_ready(struct fm10k_intfc *interface) return FM10K_REMOVED(hw->hw_addr) ? -ENODEV : 0; } +/** + * fm10k_macvlan_schedule - Schedule MAC/VLAN queue task + * @interface: fm10k private interface structure + * + * Schedule the MAC/VLAN queue monitor task. If the MAC/VLAN task cannot be + * started immediately, request that it be restarted when possible. + */ +void fm10k_macvlan_schedule(struct fm10k_intfc *interface) +{ + /* Avoid processing the MAC/VLAN queue when the service task is + * disabled, or when we're resetting the device. + */ + if (!test_bit(__FM10K_MACVLAN_DISABLE, interface->state) && + !test_and_set_bit(__FM10K_MACVLAN_SCHED, interface->state)) { + clear_bit(__FM10K_MACVLAN_REQUEST, interface->state); + /* We delay the actual start of execution in order to allow + * multiple MAC/VLAN updates to accumulate before handling + * them, and to allow some time to let the mailbox drain + * between runs. + */ + queue_delayed_work(fm10k_workqueue, + &interface->macvlan_task, 10); + } else { + set_bit(__FM10K_MACVLAN_REQUEST, interface->state); + } +} + +/** + * fm10k_stop_macvlan_task - Stop the MAC/VLAN queue monitor + * @interface: fm10k private interface structure + * + * Wait until the MAC/VLAN queue task has stopped, and cancel any future + * requests. + */ +static void fm10k_stop_macvlan_task(struct fm10k_intfc *interface) +{ + /* Disable the MAC/VLAN work item */ + set_bit(__FM10K_MACVLAN_DISABLE, interface->state); + + /* Make sure we waited until any current invocations have stopped */ + cancel_delayed_work_sync(&interface->macvlan_task); + + /* We set the __FM10K_MACVLAN_SCHED bit when we schedule the task. + * However, it may not be unset of the MAC/VLAN task never actually + * got a chance to run. Since we've canceled the task here, and it + * cannot be rescheuled right now, we need to ensure the scheduled bit + * gets unset. + */ + clear_bit(__FM10K_MACVLAN_SCHED, interface->state); +} + +/** + * fm10k_resume_macvlan_task - Restart the MAC/VLAN queue monitor + * @interface: fm10k private interface structure + * + * Clear the __FM10K_MACVLAN_DISABLE bit and, if a request occurred, schedule + * the MAC/VLAN work monitor. + */ +static void fm10k_resume_macvlan_task(struct fm10k_intfc *interface) +{ + /* Re-enable the MAC/VLAN work item */ + clear_bit(__FM10K_MACVLAN_DISABLE, interface->state); + + /* We might have received a MAC/VLAN request while disabled. If so, + * kick off the queue now. + */ + if (test_bit(__FM10K_MACVLAN_REQUEST, interface->state)) + fm10k_macvlan_schedule(interface); +} + void fm10k_service_event_schedule(struct fm10k_intfc *interface) { if (!test_bit(__FM10K_SERVICE_DISABLE, interface->state) && @@ -174,6 +244,12 @@ static bool fm10k_prepare_for_reset(struct fm10k_intfc *interface) if (test_and_set_bit(__FM10K_RESETTING, interface->state)) return false; + /* As the MAC/VLAN task will be accessing registers it must not be + * running while we reset. Although the task will not be scheduled + * once we start resetting it may already be running + */ + fm10k_stop_macvlan_task(interface); + rtnl_lock(); fm10k_iov_suspend(interface->pdev); @@ -258,6 +334,8 @@ static int fm10k_handle_reset(struct fm10k_intfc *interface) rtnl_unlock(); + fm10k_resume_macvlan_task(interface); + clear_bit(__FM10K_RESETTING, interface->state); return err; @@ -686,6 +764,112 @@ static void fm10k_service_task(struct work_struct *work) fm10k_service_event_complete(interface); } +/** + * fm10k_macvlan_task - send queued MAC/VLAN requests to switch manager + * @work: pointer to work_struct containing our data + * + * This work item handles sending MAC/VLAN updates to the switch manager. When + * the interface is up, it will attempt to queue mailbox messages to the + * switch manager requesting updates for MAC/VLAN pairs. If the Tx fifo of the + * mailbox is full, it will reschedule itself to try again in a short while. + * This ensures that the driver does not overload the switch mailbox with too + * many simultaneous requests, causing an unnecessary reset. + **/ +static void fm10k_macvlan_task(struct work_struct *work) +{ + struct fm10k_macvlan_request *item; + struct fm10k_intfc *interface; + struct delayed_work *dwork; + struct list_head *requests; + struct fm10k_hw *hw; + unsigned long flags; + + dwork = to_delayed_work(work); + interface = container_of(dwork, struct fm10k_intfc, macvlan_task); + hw = &interface->hw; + requests = &interface->macvlan_requests; + + do { + /* Pop the first item off the list */ + spin_lock_irqsave(&interface->macvlan_lock, flags); + item = list_first_entry_or_null(requests, + struct fm10k_macvlan_request, + list); + if (item) + list_del_init(&item->list); + + spin_unlock_irqrestore(&interface->macvlan_lock, flags); + + /* We have no more items to process */ + if (!item) + goto done; + + fm10k_mbx_lock(interface); + + /* Check that we have plenty of space to send the message. We + * want to ensure that the mailbox stays low enough to avoid a + * change in the host state, otherwise we may see spurious + * link up / link down notifications. + */ + if (!hw->mbx.ops.tx_ready(&hw->mbx, FM10K_VFMBX_MSG_MTU + 5)) { + hw->mbx.ops.process(hw, &hw->mbx); + set_bit(__FM10K_MACVLAN_REQUEST, interface->state); + fm10k_mbx_unlock(interface); + + /* Put the request back on the list */ + spin_lock_irqsave(&interface->macvlan_lock, flags); + list_add(&item->list, requests); + spin_unlock_irqrestore(&interface->macvlan_lock, flags); + break; + } + + switch (item->type) { + case FM10K_MC_MAC_REQUEST: + hw->mac.ops.update_mc_addr(hw, + item->mac.glort, + item->mac.addr, + item->mac.vid, + item->set); + break; + case FM10K_UC_MAC_REQUEST: + hw->mac.ops.update_uc_addr(hw, + item->mac.glort, + item->mac.addr, + item->mac.vid, + item->set, + 0); + break; + case FM10K_VLAN_REQUEST: + hw->mac.ops.update_vlan(hw, + item->vlan.vid, + item->vlan.vsi, + item->set); + break; + default: + break; + } + + fm10k_mbx_unlock(interface); + + /* Free the item now that we've sent the update */ + kfree(item); + } while (true); + +done: + WARN_ON(!test_bit(__FM10K_MACVLAN_SCHED, interface->state)); + + /* flush memory to make sure state is correct */ + smp_mb__before_atomic(); + clear_bit(__FM10K_MACVLAN_SCHED, interface->state); + + /* If a MAC/VLAN request was scheduled since we started, we should + * re-schedule. However, there is no reason to re-schedule if there is + * no work to do. + */ + if (test_bit(__FM10K_MACVLAN_REQUEST, interface->state)) + fm10k_macvlan_schedule(interface); +} + /** * fm10k_configure_tx_ring - Configure Tx ring after Reset * @interface: board private structure @@ -1918,11 +2102,15 @@ static int fm10k_sw_init(struct fm10k_intfc *interface, INIT_LIST_HEAD(&interface->vxlan_port); INIT_LIST_HEAD(&interface->geneve_port); + /* Initialize the MAC/VLAN queue */ + INIT_LIST_HEAD(&interface->macvlan_requests); + netdev_rss_key_fill(rss_key, sizeof(rss_key)); memcpy(interface->rssrk, rss_key, sizeof(rss_key)); /* Initialize the mailbox lock */ spin_lock_init(&interface->mbx_lock); + spin_lock_init(&interface->macvlan_lock); /* Start off interface as being down */ set_bit(__FM10K_DOWN, interface->state); @@ -2131,6 +2319,9 @@ static int fm10k_probe(struct pci_dev *pdev, const struct pci_device_id *ent) (unsigned long)interface); INIT_WORK(&interface->service_task, fm10k_service_task); + /* Setup the MAC/VLAN queue */ + INIT_DELAYED_WORK(&interface->macvlan_task, fm10k_macvlan_task); + /* kick off service timer now, even when interface is down */ mod_timer(&interface->service_timer, (HZ * 2) + jiffies); @@ -2184,6 +2375,10 @@ static void fm10k_remove(struct pci_dev *pdev) del_timer_sync(&interface->service_timer); fm10k_stop_service_event(interface); + fm10k_stop_macvlan_task(interface); + + /* Remove all pending MAC/VLAN requests */ + fm10k_clear_macvlan_queue(interface, interface->glort, true); /* free netdev, this may bounce the interrupts due to setup_tc */ if (netdev->reg_state == NETREG_REGISTERED) @@ -2220,6 +2415,9 @@ static void fm10k_prepare_suspend(struct fm10k_intfc *interface) * a surprise remove if the PCIe device is disabled while we're * stopped. We stop the watchdog task until after we resume software * activity. + * + * Note that the MAC/VLAN task will be stopped as part of preparing + * for reset so we don't need to handle it here. */ fm10k_stop_service_event(interface); @@ -2259,6 +2457,9 @@ static int fm10k_handle_resume(struct fm10k_intfc *interface) /* restart the service task */ fm10k_start_service_event(interface); + /* Restart the MAC/VLAN request queue in-case of outstanding events */ + fm10k_macvlan_schedule(interface); + return err; } -- cgit v1.2.3 From 1f5c27e52857c9ba8f1ee4ed5093bee1a341f330 Mon Sep 17 00:00:00 2001 From: Jacob Keller Date: Mon, 10 Jul 2017 13:23:18 -0700 Subject: fm10k: use the MAC/VLAN queue for VF<->PF MAC/VLAN requests Now that we have a working MAC/VLAN queue for handling MAC/VLAN messages from the netdev, replace the default handler for the VF<->PF messages. This new handler is very similar to the default code, but uses the MAC/VLAN queue instead of sending the message directly. Unfortunately we can't easily re-use the default code, so we'll just replace the entire function. This ensures that a VF requesting a large number of VLANs or MAC addresses does not start a reset cycle, as explained in the commit which introduced the message queue. Signed-off-by: Jacob Keller Reviewed-by: Ngai-mint Kwan Tested-by: Krishneil Singh Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/fm10k/fm10k_iov.c | 132 ++++++++++++++++++++++++++- drivers/net/ethernet/intel/fm10k/fm10k_pf.c | 2 +- drivers/net/ethernet/intel/fm10k/fm10k_pf.h | 3 +- 3 files changed, 133 insertions(+), 4 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_iov.c b/drivers/net/ethernet/intel/fm10k/fm10k_iov.c index 03897720bf0b..4a17cc903eed 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_iov.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_iov.c @@ -35,10 +35,133 @@ static s32 fm10k_iov_msg_error(struct fm10k_hw *hw, u32 **results, return fm10k_tlv_msg_error(hw, results, mbx); } +/** + * fm10k_iov_msg_queue_mac_vlan - Message handler for MAC/VLAN request from VF + * @hw: Pointer to hardware structure + * @results: Pointer array to message, results[0] is pointer to message + * @mbx: Pointer to mailbox information structure + * + * This function is a custom handler for MAC/VLAN requests from the VF. The + * assumption is that it is acceptable to directly hand off the message from + * the VF to the PF's switch manager. However, we use a MAC/VLAN message + * queue to avoid overloading the mailbox when a large number of requests + * come in. + **/ +static s32 fm10k_iov_msg_queue_mac_vlan(struct fm10k_hw *hw, u32 **results, + struct fm10k_mbx_info *mbx) +{ + struct fm10k_vf_info *vf_info = (struct fm10k_vf_info *)mbx; + struct fm10k_intfc *interface = hw->back; + u8 mac[ETH_ALEN]; + u32 *result; + int err = 0; + bool set; + u16 vlan; + u32 vid; + + /* we shouldn't be updating rules on a disabled interface */ + if (!FM10K_VF_FLAG_ENABLED(vf_info)) + err = FM10K_ERR_PARAM; + + if (!err && !!results[FM10K_MAC_VLAN_MSG_VLAN]) { + result = results[FM10K_MAC_VLAN_MSG_VLAN]; + + /* record VLAN id requested */ + err = fm10k_tlv_attr_get_u32(result, &vid); + if (err) + return err; + + set = !(vid & FM10K_VLAN_CLEAR); + vid &= ~FM10K_VLAN_CLEAR; + + /* if the length field has been set, this is a multi-bit + * update request. For multi-bit requests, simply disallow + * them when the pf_vid has been set. In this case, the PF + * should have already cleared the VLAN_TABLE, and if we + * allowed them, it could allow a rogue VF to receive traffic + * on a VLAN it was not assigned. In the single-bit case, we + * need to modify requests for VLAN 0 to use the default PF or + * SW vid when assigned. + */ + + if (vid >> 16) { + /* prevent multi-bit requests when PF has + * administratively set the VLAN for this VF + */ + if (vf_info->pf_vid) + return FM10K_ERR_PARAM; + } else { + err = fm10k_iov_select_vid(vf_info, (u16)vid); + if (err < 0) + return err; + + vid = err; + } + + /* update VSI info for VF in regards to VLAN table */ + err = hw->mac.ops.update_vlan(hw, vid, vf_info->vsi, set); + } + + if (!err && !!results[FM10K_MAC_VLAN_MSG_MAC]) { + result = results[FM10K_MAC_VLAN_MSG_MAC]; + + /* record unicast MAC address requested */ + err = fm10k_tlv_attr_get_mac_vlan(result, mac, &vlan); + if (err) + return err; + + /* block attempts to set MAC for a locked device */ + if (is_valid_ether_addr(vf_info->mac) && + !ether_addr_equal(mac, vf_info->mac)) + return FM10K_ERR_PARAM; + + set = !(vlan & FM10K_VLAN_CLEAR); + vlan &= ~FM10K_VLAN_CLEAR; + + err = fm10k_iov_select_vid(vf_info, vlan); + if (err < 0) + return err; + + vlan = (u16)err; + + /* Add this request to the MAC/VLAN queue */ + err = fm10k_queue_mac_request(interface, vf_info->glort, + mac, vlan, set); + } + + if (!err && !!results[FM10K_MAC_VLAN_MSG_MULTICAST]) { + result = results[FM10K_MAC_VLAN_MSG_MULTICAST]; + + /* record multicast MAC address requested */ + err = fm10k_tlv_attr_get_mac_vlan(result, mac, &vlan); + if (err) + return err; + + /* verify that the VF is allowed to request multicast */ + if (!(vf_info->vf_flags & FM10K_VF_FLAG_MULTI_ENABLED)) + return FM10K_ERR_PARAM; + + set = !(vlan & FM10K_VLAN_CLEAR); + vlan &= ~FM10K_VLAN_CLEAR; + + err = fm10k_iov_select_vid(vf_info, vlan); + if (err < 0) + return err; + + vlan = (u16)err; + + /* Add this request to the MAC/VLAN queue */ + err = fm10k_queue_mac_request(interface, vf_info->glort, + mac, vlan, set); + } + + return err; +} + static const struct fm10k_msg_data iov_mbx_data[] = { FM10K_TLV_MSG_TEST_HANDLER(fm10k_tlv_msg_test), FM10K_VF_MSG_MSIX_HANDLER(fm10k_iov_msg_msix_pf), - FM10K_VF_MSG_MAC_VLAN_HANDLER(fm10k_iov_msg_mac_vlan_pf), + FM10K_VF_MSG_MAC_VLAN_HANDLER(fm10k_iov_msg_queue_mac_vlan), FM10K_VF_MSG_LPORT_STATE_HANDLER(fm10k_iov_msg_lport_state_pf), FM10K_TLV_MSG_ERROR_HANDLER(fm10k_iov_msg_error), }; @@ -126,8 +249,10 @@ process_mbx: hw->mbx.ops.process(hw, &hw->mbx); /* verify port mapping is valid, if not reset port */ - if (vf_info->vf_flags && !fm10k_glort_valid_pf(hw, glort)) + if (vf_info->vf_flags && !fm10k_glort_valid_pf(hw, glort)) { hw->iov.ops.reset_lport(hw, vf_info); + fm10k_clear_macvlan_queue(interface, glort, false); + } /* reset VFs that have mailbox timed out */ if (!mbx->timeout) { @@ -190,6 +315,7 @@ void fm10k_iov_suspend(struct pci_dev *pdev) hw->iov.ops.reset_resources(hw, vf_info); hw->iov.ops.reset_lport(hw, vf_info); + fm10k_clear_macvlan_queue(interface, vf_info->glort, false); } } @@ -414,6 +540,8 @@ static inline void fm10k_reset_vf_info(struct fm10k_intfc *interface, /* disable LPORT for this VF which clears switch rules */ hw->iov.ops.reset_lport(hw, vf_info); + fm10k_clear_macvlan_queue(interface, vf_info->glort, false); + /* assign new MAC+VLAN for this VF */ hw->iov.ops.assign_default_mac_vlan(hw, vf_info); diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pf.c b/drivers/net/ethernet/intel/fm10k/fm10k_pf.c index 9e4fb3a44376..425d814aed4d 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_pf.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_pf.c @@ -1186,7 +1186,7 @@ s32 fm10k_iov_msg_msix_pf(struct fm10k_hw *hw, u32 **results, * Will report an error if the VLAN ID is out of range. For VID = 0, it will * return either the pf_vid or sw_vid depending on which one is set. */ -static s32 fm10k_iov_select_vid(struct fm10k_vf_info *vf_info, u16 vid) +s32 fm10k_iov_select_vid(struct fm10k_vf_info *vf_info, u16 vid) { if (!vid) return vf_info->pf_vid ? vf_info->pf_vid : vf_info->sw_vid; diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pf.h b/drivers/net/ethernet/intel/fm10k/fm10k_pf.h index 3336d3c10760..e04d41f1a532 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_pf.h +++ b/drivers/net/ethernet/intel/fm10k/fm10k_pf.h @@ -1,5 +1,5 @@ /* Intel(R) Ethernet Switch Host Interface Driver - * Copyright(c) 2013 - 2016 Intel Corporation. + * Copyright(c) 2013 - 2017 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -114,6 +114,7 @@ extern const struct fm10k_tlv_attr fm10k_err_msg_attr[]; #define FM10K_PF_MSG_ERR_HANDLER(msg, func) \ FM10K_MSG_HANDLER(FM10K_PF_MSG_ID_##msg, fm10k_err_msg_attr, func) +s32 fm10k_iov_select_vid(struct fm10k_vf_info *vf_info, u16 vid); s32 fm10k_iov_msg_msix_pf(struct fm10k_hw *, u32 **, struct fm10k_mbx_info *); s32 fm10k_iov_msg_mac_vlan_pf(struct fm10k_hw *, u32 **, struct fm10k_mbx_info *); -- cgit v1.2.3 From ef57ab791c81b3a83c75a312b15b42f7440bb425 Mon Sep 17 00:00:00 2001 From: Jacob Keller Date: Mon, 10 Jul 2017 13:23:19 -0700 Subject: fm10k: bump version number Signed-off-by: Jacob Keller Tested-by: Krishneil Singh Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/fm10k/fm10k_main.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_main.c b/drivers/net/ethernet/intel/fm10k/fm10k_main.c index 189d52a8a605..5d56ed5ad7a6 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_main.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_main.c @@ -28,7 +28,7 @@ #include "fm10k.h" -#define DRV_VERSION "0.21.7-k" +#define DRV_VERSION "0.22.1-k" #define DRV_SUMMARY "Intel(R) Ethernet Switch Host Interface Driver" const char fm10k_driver_version[] = DRV_VERSION; char fm10k_driver_name[] = "fm10k"; -- cgit v1.2.3 From c0ad8ef3df091ef179d78dccb810024612dcfa44 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Fri, 11 Aug 2017 09:17:15 -0700 Subject: fm10k: Fix misuse of net_ratelimit() Correct the backward logic using !net_ratelimit() Miscellanea: o Add a blank line before the error return label Signed-off-by: Joe Perches Tested-by: Krishneil Singh Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/fm10k/fm10k_main.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_main.c b/drivers/net/ethernet/intel/fm10k/fm10k_main.c index 5d56ed5ad7a6..dbd69310f263 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_main.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_main.c @@ -806,9 +806,10 @@ static int fm10k_tso(struct fm10k_ring *tx_ring, tx_desc->mss = cpu_to_le16(skb_shinfo(skb)->gso_size); return 1; + err_vxlan: tx_ring->netdev->features &= ~NETIF_F_GSO_UDP_TUNNEL; - if (!net_ratelimit()) + if (net_ratelimit()) netdev_err(tx_ring->netdev, "TSO requested for unsupported tunnel, disabling offload\n"); return -1; -- cgit v1.2.3 From 87be98927eb0bfa5484dfbe5ba2f6b7f91dd9187 Mon Sep 17 00:00:00 2001 From: Jacob Keller Date: Fri, 11 Aug 2017 11:14:37 -0700 Subject: fm10k: prefer %s and __func__ for diagnostic prints Don't hard code the function names in the diagnostic output when these reset related routines fail. Instead, use %s and __func__ so that future refactors don't need to change the print outs. Additionally, while we are here, add missing function header comments for the new reset_prepare and reset_done function handlers. Signed-off-by: Jacob Keller Tested-by: Krishneil Singh Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/fm10k/fm10k_pci.c | 18 ++++++++++++++++-- 1 file changed, 16 insertions(+), 2 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c index 58538ce997e1..1e9ae3197b17 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c @@ -2588,11 +2588,18 @@ static void fm10k_io_resume(struct pci_dev *pdev) if (err) dev_warn(&pdev->dev, - "fm10k_io_resume failed: %d\n", err); + "%s failed: %d\n", __func__, err); else netif_device_attach(netdev); } +/** + * fm10k_io_reset_prepare - called when PCI function is about to be reset + * @pdev: Pointer to PCI device + * + * This callback is called when the PCI function is about to be reset, + * allowing the device driver to prepare for it. + */ static void fm10k_io_reset_prepare(struct pci_dev *pdev) { /* warn incase we have any active VF devices */ @@ -2602,6 +2609,13 @@ static void fm10k_io_reset_prepare(struct pci_dev *pdev) fm10k_prepare_suspend(pci_get_drvdata(pdev)); } +/** + * fm10k_io_reset_done - called when PCI function has finished resetting + * @pdev: Pointer to PCI device + * + * This callback is called just after the PCI function is reset, such as via + * /sys/class/net//device/reset or similar. + */ static void fm10k_io_reset_done(struct pci_dev *pdev) { struct fm10k_intfc *interface = pci_get_drvdata(pdev); @@ -2609,7 +2623,7 @@ static void fm10k_io_reset_done(struct pci_dev *pdev) if (err) { dev_warn(&pdev->dev, - "fm10k_io_reset_notify failed: %d\n", err); + "%s failed: %d\n", __func__, err); netif_device_detach(interface->netdev); } } -- cgit v1.2.3 From 3e256ac5b1ec307e5dd5a4c99fbdbc651446c738 Mon Sep 17 00:00:00 2001 From: Jacob Keller Date: Fri, 11 Aug 2017 11:14:58 -0700 Subject: fm10k: fix mis-ordered parameters in declaration for .ndo_set_vf_bw We've had support for setting both a minimum and maximum bandwidth via .ndo_set_vf_bw since commit 883a9ccbae56 ("fm10k: Add support for SR-IOV to driver", 2014-09-20). Likely because we do not support minimum rates, the declaration mis-ordered the "unused" parameter, which causes warnings when analyzed with cppcheck. Fix this warning by properly declaring the min_rate and max_rate variables in the declaration and definition (rather than using "unused"). Also rename "rate" to max_rate so as to clarify that we only support setting the maximum rate. Signed-off-by: Jacob Keller Tested-by: Krishneil Singh Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/fm10k/fm10k.h | 4 ++-- drivers/net/ethernet/intel/fm10k/fm10k_iov.c | 9 +++++---- 2 files changed, 7 insertions(+), 6 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/intel/fm10k/fm10k.h b/drivers/net/ethernet/intel/fm10k/fm10k.h index 40856bc0f3b9..46973fb234c5 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k.h +++ b/drivers/net/ethernet/intel/fm10k/fm10k.h @@ -562,8 +562,8 @@ s32 fm10k_iov_update_pvid(struct fm10k_intfc *interface, u16 glort, u16 pvid); int fm10k_ndo_set_vf_mac(struct net_device *netdev, int vf_idx, u8 *mac); int fm10k_ndo_set_vf_vlan(struct net_device *netdev, int vf_idx, u16 vid, u8 qos, __be16 vlan_proto); -int fm10k_ndo_set_vf_bw(struct net_device *netdev, int vf_idx, int rate, - int unused); +int fm10k_ndo_set_vf_bw(struct net_device *netdev, int vf_idx, + int __always_unused min_rate, int max_rate); int fm10k_ndo_get_vf_config(struct net_device *netdev, int vf_idx, struct ifla_vf_info *ivi); diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_iov.c b/drivers/net/ethernet/intel/fm10k/fm10k_iov.c index 4a17cc903eed..ea3ab24265ee 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_iov.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_iov.c @@ -613,7 +613,7 @@ int fm10k_ndo_set_vf_vlan(struct net_device *netdev, int vf_idx, u16 vid, } int fm10k_ndo_set_vf_bw(struct net_device *netdev, int vf_idx, - int __always_unused unused, int rate) + int __always_unused min_rate, int max_rate) { struct fm10k_intfc *interface = netdev_priv(netdev); struct fm10k_iov_data *iov_data = interface->iov_data; @@ -624,14 +624,15 @@ int fm10k_ndo_set_vf_bw(struct net_device *netdev, int vf_idx, return -EINVAL; /* rate limit cannot be less than 10Mbs or greater than link speed */ - if (rate && ((rate < FM10K_VF_TC_MIN) || rate > FM10K_VF_TC_MAX)) + if (max_rate && + (max_rate < FM10K_VF_TC_MIN || max_rate > FM10K_VF_TC_MAX)) return -EINVAL; /* store values */ - iov_data->vf_info[vf_idx].rate = rate; + iov_data->vf_info[vf_idx].rate = max_rate; /* update hardware configuration */ - hw->iov.ops.configure_tc(hw, vf_idx, rate); + hw->iov.ops.configure_tc(hw, vf_idx, max_rate); return 0; } -- cgit v1.2.3 From a047fbae23e1d94da28f81fb0f86fab4e473a094 Mon Sep 17 00:00:00 2001 From: Arjun Vynipadath Date: Tue, 3 Oct 2017 11:43:05 +0530 Subject: cxgb4: Update comment for min_mtu We have lost a comment for minimum mtu value set for netdevice with 'commit d894be57ca92 ("ethernet: use net core MTU range checking in more drivers"). Updating it accordingly. Signed-off-by: Arjun Vynipadath Signed-off-by: Ganesh Goudar Signed-off-by: David S. Miller --- drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index 13b636b0af5f..fe4cbe22d5d7 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c @@ -5024,7 +5024,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) netdev->priv_flags |= IFF_UNICAST_FLT; /* MTU range: 81 - 9600 */ - netdev->min_mtu = 81; + netdev->min_mtu = 81; /* accommodate SACK */ netdev->max_mtu = MAX_MTU; netdev->netdev_ops = &cxgb4_netdev_ops; -- cgit v1.2.3 From 267872435515185e2e600a721fdddeea90f96ffa Mon Sep 17 00:00:00 2001 From: Yotam Gigi Date: Tue, 3 Oct 2017 09:58:09 +0200 Subject: mlxsw: acl: Introduce ACL trap and forward action Use trap/discard flex action to implement trap and forward. The action will later be used for multicast routing, as the multicast routing mechanism is done using ACL flexible actions in Spectrum hardware. Using that action, it will be possible to implement a trap-and-forward route. Signed-off-by: Yotam Gigi Reviewed-by: Ido Schimmel Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- .../net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c | 17 +++++++++++++++++ .../net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h | 2 ++ 2 files changed, 19 insertions(+) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c index bc55d0e76705..6a979a09ab72 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c +++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c @@ -676,6 +676,7 @@ enum mlxsw_afa_trapdisc_trap_action { MLXSW_ITEM32(afa, trapdisc, trap_action, 0x00, 24, 4); enum mlxsw_afa_trapdisc_forward_action { + MLXSW_AFA_TRAPDISC_FORWARD_ACTION_FORWARD = 1, MLXSW_AFA_TRAPDISC_FORWARD_ACTION_DISCARD = 3, }; @@ -729,6 +730,22 @@ int mlxsw_afa_block_append_trap(struct mlxsw_afa_block *block, u16 trap_id) } EXPORT_SYMBOL(mlxsw_afa_block_append_trap); +int mlxsw_afa_block_append_trap_and_forward(struct mlxsw_afa_block *block, + u16 trap_id) +{ + char *act = mlxsw_afa_block_append_action(block, + MLXSW_AFA_TRAPDISC_CODE, + MLXSW_AFA_TRAPDISC_SIZE); + + if (!act) + return -ENOBUFS; + mlxsw_afa_trapdisc_pack(act, MLXSW_AFA_TRAPDISC_TRAP_ACTION_TRAP, + MLXSW_AFA_TRAPDISC_FORWARD_ACTION_FORWARD, + trap_id); + return 0; +} +EXPORT_SYMBOL(mlxsw_afa_block_append_trap_and_forward); + /* Forwarding Action * ----------------- * Forwarding Action can be used to implement Policy Based Switching (PBS) diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h index 06b0be432b8f..a8d3314c3a24 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h +++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h @@ -61,6 +61,8 @@ int mlxsw_afa_block_continue(struct mlxsw_afa_block *block); int mlxsw_afa_block_jump(struct mlxsw_afa_block *block, u16 group_id); int mlxsw_afa_block_append_drop(struct mlxsw_afa_block *block); int mlxsw_afa_block_append_trap(struct mlxsw_afa_block *block, u16 trap_id); +int mlxsw_afa_block_append_trap_and_forward(struct mlxsw_afa_block *block, + u16 trap_id); int mlxsw_afa_block_append_fwd(struct mlxsw_afa_block *block, u8 local_port, bool in_port); int mlxsw_afa_block_append_vlan_modify(struct mlxsw_afa_block *block, -- cgit v1.2.3 From a0040c8c935548e1efb1a28f07f15d7ec7918055 Mon Sep 17 00:00:00 2001 From: Yotam Gigi Date: Tue, 3 Oct 2017 09:58:10 +0200 Subject: mlxsw: spectrum: Add trap for multicast trap-and-forward routes When a multicast route is configured with trap-and-forward action, the packets should be marked with skb->offload_mr_fwd_mark, in order to prevent the packets from being forwarded again by the kernel ipmr module. Due to this, it is not possible to use the already existing multicast trap (MLXSW_TRAP_ID_ACL1) as the packet should be marked differently. Add the MLXSW_TRAP_ID_ACL2 which is for trap-and-forward multicast routes, and set the offload_mr_fwd_mark skb field in its handler. Signed-off-by: Yotam Gigi Reviewed-by: Ido Schimmel Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/spectrum.c | 13 +++++++++++++ drivers/net/ethernet/mellanox/mlxsw/trap.h | 2 ++ 2 files changed, 15 insertions(+) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c index e9b94430afed..3adf237c951a 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c @@ -3312,6 +3312,14 @@ static void mlxsw_sp_rx_listener_mark_func(struct sk_buff *skb, u8 local_port, return mlxsw_sp_rx_listener_no_mark_func(skb, local_port, priv); } +static void mlxsw_sp_rx_listener_mr_mark_func(struct sk_buff *skb, + u8 local_port, void *priv) +{ + skb->offload_mr_fwd_mark = 1; + skb->offload_fwd_mark = 1; + return mlxsw_sp_rx_listener_no_mark_func(skb, local_port, priv); +} + static void mlxsw_sp_rx_listener_sample_func(struct sk_buff *skb, u8 local_port, void *priv) { @@ -3355,6 +3363,10 @@ out: MLXSW_RXL(mlxsw_sp_rx_listener_mark_func, _trap_id, _action, \ _is_ctrl, SP_##_trap_group, DISCARD) +#define MLXSW_SP_RXL_MR_MARK(_trap_id, _action, _trap_group, _is_ctrl) \ + MLXSW_RXL(mlxsw_sp_rx_listener_mr_mark_func, _trap_id, _action, \ + _is_ctrl, SP_##_trap_group, DISCARD) + #define MLXSW_SP_EVENTL(_func, _trap_id) \ MLXSW_EVENTL(_func, _trap_id, SP_EVENT) @@ -3425,6 +3437,7 @@ static const struct mlxsw_listener mlxsw_sp_listener[] = { MLXSW_SP_RXL_MARK(IPV4_PIM, TRAP_TO_CPU, PIM, false), MLXSW_SP_RXL_MARK(RPF, TRAP_TO_CPU, RPF, false), MLXSW_SP_RXL_MARK(ACL1, TRAP_TO_CPU, MULTICAST, false), + MLXSW_SP_RXL_MR_MARK(ACL2, TRAP_TO_CPU, MULTICAST, false), }; static int mlxsw_sp_cpu_policers_set(struct mlxsw_core *mlxsw_core) diff --git a/drivers/net/ethernet/mellanox/mlxsw/trap.h b/drivers/net/ethernet/mellanox/mlxsw/trap.h index a98103539f6b..ec6cef8267ae 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/trap.h +++ b/drivers/net/ethernet/mellanox/mlxsw/trap.h @@ -93,6 +93,8 @@ enum { MLXSW_TRAP_ID_ACL0 = 0x1C0, /* Multicast trap used for routes with trap action */ MLXSW_TRAP_ID_ACL1 = 0x1C1, + /* Multicast trap used for routes with trap-and-forward action */ + MLXSW_TRAP_ID_ACL2 = 0x1C2, MLXSW_TRAP_ID_MAX = 0x1FF }; -- cgit v1.2.3 From 607feadef89ac806df5a0be983afef77247e1541 Mon Sep 17 00:00:00 2001 From: Yotam Gigi Date: Tue, 3 Oct 2017 09:58:11 +0200 Subject: mlxsw: spectrum: mr_tcam: Add trap-and-forward multicast route In addition to the current multicast route actions, which include trap route action and a forward route action, add the trap-and-forward multicast route action, and implement it in the multicast routing hardware logic. To implement that, add a trap-and-forward ACL action as the last action in the route flexible action set. The used trap is the ACL2 trap, which marks the packets with offload_mr_forward_mark, to prevent the packet from being forwarded again by the kernel. Note: At that stage the offloading logic does not support trap-and-forward multicast routes. This patch adds the support only in the hardware logic. Signed-off-by: Yotam Gigi Reviewed-by: Ido Schimmel Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.h | 1 + drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.c | 8 ++++++++ 2 files changed, 9 insertions(+) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.h index c851b237d253..5d26a122af49 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.h @@ -42,6 +42,7 @@ enum mlxsw_sp_mr_route_action { MLXSW_SP_MR_ROUTE_ACTION_FORWARD, MLXSW_SP_MR_ROUTE_ACTION_TRAP, + MLXSW_SP_MR_ROUTE_ACTION_TRAP_AND_FORWARD, }; enum mlxsw_sp_mr_route_prio { diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.c index cda9e9ad10e3..3ffb28dd4057 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.c @@ -253,6 +253,7 @@ mlxsw_sp_mr_tcam_afa_block_create(struct mlxsw_sp *mlxsw_sp, if (err) goto err; break; + case MLXSW_SP_MR_ROUTE_ACTION_TRAP_AND_FORWARD: case MLXSW_SP_MR_ROUTE_ACTION_FORWARD: /* If we are about to append a multicast router action, commit * the erif_list. @@ -266,6 +267,13 @@ mlxsw_sp_mr_tcam_afa_block_create(struct mlxsw_sp *mlxsw_sp, erif_list->kvdl_index); if (err) goto err; + + if (route_action == MLXSW_SP_MR_ROUTE_ACTION_TRAP_AND_FORWARD) { + err = mlxsw_afa_block_append_trap_and_forward(afa_block, + MLXSW_TRAP_ID_ACL2); + if (err) + goto err; + } break; default: err = -EINVAL; -- cgit v1.2.3 From f60c254998de80feaec8e4122960ab64e8045214 Mon Sep 17 00:00:00 2001 From: Yotam Gigi Date: Tue, 3 Oct 2017 09:58:12 +0200 Subject: mlxsw: spectrum: mr: Support trap-and-forward routes Add the support of trap-and-forward route action in the multicast routing offloading logic. A route will be set to trap-and-forward action if one (or more) of its output interfaces is not offload-able, i.e. does not have a valid Spectrum RIF. This way, a route with mixed output VIFs list, which contains both offload-able and un-offload-able devices can go through partial offloading in hardware, and the rest will be done in the kernel ipmr module. Signed-off-by: Yotam Gigi Reviewed-by: Ido Schimmel Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c | 17 ++++++++--------- 1 file changed, 8 insertions(+), 9 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c index 4aaf6ca1be7c..1f84bb8e9135 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c @@ -114,9 +114,9 @@ static bool mlxsw_sp_mr_vif_valid(const struct mlxsw_sp_mr_vif *vif) return mlxsw_sp_mr_vif_regular(vif) && vif->dev && vif->rif; } -static bool mlxsw_sp_mr_vif_rif_invalid(const struct mlxsw_sp_mr_vif *vif) +static bool mlxsw_sp_mr_vif_exists(const struct mlxsw_sp_mr_vif *vif) { - return mlxsw_sp_mr_vif_regular(vif) && vif->dev && !vif->rif; + return vif->dev; } static bool @@ -182,14 +182,13 @@ mlxsw_sp_mr_route_action(const struct mlxsw_sp_mr_route *mr_route) if (!mlxsw_sp_mr_route_valid_evifs_num(mr_route)) return MLXSW_SP_MR_ROUTE_ACTION_TRAP; - /* If either one of the eVIFs is not regular (VIF of type pimreg or - * tunnel) or one of the VIFs has no matching RIF, trap the packet. + /* If one of the eVIFs has no RIF, trap-and-forward the route as there + * is some more routing to do in software too. */ - list_for_each_entry(rve, &mr_route->evif_list, route_node) { - if (!mlxsw_sp_mr_vif_regular(rve->mr_vif) || - mlxsw_sp_mr_vif_rif_invalid(rve->mr_vif)) - return MLXSW_SP_MR_ROUTE_ACTION_TRAP; - } + list_for_each_entry(rve, &mr_route->evif_list, route_node) + if (mlxsw_sp_mr_vif_exists(rve->mr_vif) && !rve->mr_vif->rif) + return MLXSW_SP_MR_ROUTE_ACTION_TRAP_AND_FORWARD; + return MLXSW_SP_MR_ROUTE_ACTION_FORWARD; } -- cgit v1.2.3 From b508e0b6e47c85a095ef056f3de6ba9d396c490c Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Tue, 3 Oct 2017 13:53:03 +0300 Subject: mlxsw: spectrum: Fix check for IS_ERR() instead of NULL mlxsw_afa_block_create() doesn't return error pointers, it returns NULL on error. Fixes: 0e14c7777acb ("mlxsw: spectrum: Add the multicast routing hardware logic") Signed-off-by: Dan Carpenter Acked-by: Yotam Gigi Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.c index 3ffb28dd4057..3a61896ae4d8 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.c @@ -239,8 +239,8 @@ mlxsw_sp_mr_tcam_afa_block_create(struct mlxsw_sp *mlxsw_sp, int err; afa_block = mlxsw_afa_block_create(mlxsw_sp->afa); - if (IS_ERR(afa_block)) - return afa_block; + if (!afa_block) + return ERR_PTR(-ENOMEM); err = mlxsw_afa_block_append_counter(afa_block, counter_index); if (err) -- cgit v1.2.3 From b5c7d4e54c9ab830e5c03f92377fe15cbae64d0d Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Tue, 3 Oct 2017 13:53:41 +0300 Subject: mlxsw: spectrum: Add missing error code on allocation failure We accidentally return success if the kmalloc_array() call fails. Fixes: 0e14c7777acb ("mlxsw: spectrum: Add the multicast routing hardware logic") Signed-off-by: Dan Carpenter Acked-by: Yotam Gigi Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.c index 3a61896ae4d8..39c21c70ac32 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.c @@ -771,8 +771,10 @@ mlxsw_sp_mr_tcam_region_init(struct mlxsw_sp *mlxsw_sp, parman_prios = kmalloc_array(MLXSW_SP_MR_ROUTE_PRIO_MAX + 1, sizeof(*parman_prios), GFP_KERNEL); - if (!parman_prios) + if (!parman_prios) { + err = -ENOMEM; goto err_parman_prios_alloc; + } mr_tcam_region->parman_prios = parman_prios; for (i = 0; i < MLXSW_SP_MR_ROUTE_PRIO_MAX + 1; i++) -- cgit v1.2.3 From acd669a8f67ed47f5edd385741486cc7a259a446 Mon Sep 17 00:00:00 2001 From: Ganesh Goudar Date: Tue, 3 Oct 2017 11:10:53 +0530 Subject: cxgb4: add new T6 pci device id's Add 0x6085 T6 device id. Signed-off-by: Ganesh Goudar Signed-off-by: David S. Miller --- drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h b/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h index 37d90d63e4a3..633e9751a25e 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h @@ -202,6 +202,7 @@ CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN CH_PCI_ID_TABLE_FENTRY(0x6082), /* Custom T6225-CR SFP28 */ CH_PCI_ID_TABLE_FENTRY(0x6083), /* Custom T62100-CR QSFP28 */ CH_PCI_ID_TABLE_FENTRY(0x6084), /* Custom T64100-CR QSFP28 */ + CH_PCI_ID_TABLE_FENTRY(0x6085), /* Custom T6240-SO */ CH_PCI_DEVICE_ID_TABLE_DEFINE_END; #endif /* __T4_PCI_ID_TBL_H__ */ -- cgit v1.2.3 From 4d86d38186271438ef002c5ae6e04836f01bf8bf Mon Sep 17 00:00:00 2001 From: Simon Horman Date: Wed, 4 Oct 2017 09:54:27 +0200 Subject: ravb: RX checksum offload Add support for RX checksum offload. This is enabled by default and may be disabled and re-enabled using ethtool: # ethtool -K eth0 rx off # ethtool -K eth0 rx on The RAVB provides a simple checksumming scheme which appears to be completely compatible with CHECKSUM_COMPLETE: sum of all packet data after the L2 header is appended to packet data; this may be trivially read by the driver and used to update the skb accordingly. In terms of performance throughput is close to gigabit line-rate both with and without RX checksum offload enabled. Perf output, however, appears to indicate that significantly less time is spent in do_csum(). This is as expected. Test results with RX checksum offload enabled: # /usr/bin/perf_3.16 record -o /run/perf.data -a netperf -t TCP_MAERTS -H 10.4.3.162 MIGRATED TCP MAERTS TEST from 0.0.0.0 (0.0.0.0) port 0 AF_INET to 10.4.3.162 () port 0 AF_INET : demo enable_enobufs failed: getprotobyname Recv Send Send Socket Socket Message Elapsed Size Size Size Time Throughput bytes bytes bytes secs. 10^6bits/sec 87380 16384 16384 10.00 937.54 Summary of output of perf report: 18.28% ksoftirqd/0 [kernel.kallsyms] [k] _raw_spin_unlock_irqrestore 10.34% ksoftirqd/0 [kernel.kallsyms] [k] __pi_memcpy 9.83% ksoftirqd/0 [kernel.kallsyms] [k] ravb_poll 7.89% ksoftirqd/0 [kernel.kallsyms] [k] skb_put 4.01% ksoftirqd/0 [kernel.kallsyms] [k] dev_gro_receive 3.37% netperf [kernel.kallsyms] [k] __arch_copy_to_user 3.17% swapper [kernel.kallsyms] [k] arch_cpu_idle 2.55% swapper [kernel.kallsyms] [k] tick_nohz_idle_enter 2.04% ksoftirqd/0 [kernel.kallsyms] [k] __pi___inval_dcache_area 2.03% swapper [kernel.kallsyms] [k] _raw_spin_unlock_irq 1.96% ksoftirqd/0 [kernel.kallsyms] [k] __netdev_alloc_skb 1.59% ksoftirqd/0 [kernel.kallsyms] [k] __slab_alloc.isra.83 Test results without RX checksum offload enabled: # /usr/bin/perf_3.16 record -o /run/perf.data -a netperf -t TCP_MAERTS -H 10.4.3.162 MIGRATED TCP MAERTS TEST from 0.0.0.0 (0.0.0.0) port 0 AF_INET to 10.4.3.162 () port 0 AF_INET : demo enable_enobufs failed: getprotobyname Recv Send Send Socket Socket Message Elapsed Size Size Size Time Throughput bytes bytes bytes secs. 10^6bits/sec 87380 16384 16384 10.00 940.20 Summary of output of perf report: 17.10% ksoftirqd/0 [kernel.kallsyms] [k] _raw_spin_unlock_irqrestore 10.99% ksoftirqd/0 [kernel.kallsyms] [k] __pi_memcpy 8.87% ksoftirqd/0 [kernel.kallsyms] [k] ravb_poll 8.16% ksoftirqd/0 [kernel.kallsyms] [k] skb_put 7.42% ksoftirqd/0 [kernel.kallsyms] [k] do_csum 3.91% ksoftirqd/0 [kernel.kallsyms] [k] dev_gro_receive 2.31% swapper [kernel.kallsyms] [k] arch_cpu_idle 2.16% ksoftirqd/0 [kernel.kallsyms] [k] __pi___inval_dcache_area 2.14% ksoftirqd/0 [kernel.kallsyms] [k] __netdev_alloc_skb 1.93% netperf [kernel.kallsyms] [k] __arch_copy_to_user 1.79% swapper [kernel.kallsyms] [k] tick_nohz_idle_enter 1.63% ksoftirqd/0 [kernel.kallsyms] [k] __slab_alloc.isra.83 Above results collected on an R-Car Gen 3 Salvator-X/r8a7796 ES1.0. Also tested on a R-Car Gen 3 Salvator-X/r8a7795 ES1.0. By inspection this also appears to be compatible with the ravb found on R-Car Gen 2 SoCs, however, this patch is currently untested on such hardware. Signed-off-by: Simon Horman Signed-off-by: David S. Miller --- drivers/net/ethernet/renesas/ravb_main.c | 55 +++++++++++++++++++++++++++++++- 1 file changed, 54 insertions(+), 1 deletion(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c index fdf30bfa403b..a8822a756e08 100644 --- a/drivers/net/ethernet/renesas/ravb_main.c +++ b/drivers/net/ethernet/renesas/ravb_main.c @@ -403,8 +403,9 @@ static void ravb_emac_init(struct net_device *ndev) /* Receive frame limit set register */ ravb_write(ndev, ndev->mtu + ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN, RFLR); - /* PAUSE prohibition */ + /* EMAC Mode: PAUSE prohibition; Duplex; RX Checksum; TX; RX */ ravb_write(ndev, ECMR_ZPF | (priv->duplex ? ECMR_DM : 0) | + (ndev->features & NETIF_F_RXCSUM ? ECMR_RCSC : 0) | ECMR_TE | ECMR_RE, ECMR); ravb_set_rate(ndev); @@ -520,6 +521,19 @@ static void ravb_get_tx_tstamp(struct net_device *ndev) } } +static void ravb_rx_csum(struct sk_buff *skb) +{ + u8 *hw_csum; + + /* The hardware checksum is 2 bytes appended to packet data */ + if (unlikely(skb->len < 2)) + return; + hw_csum = skb_tail_pointer(skb) - 2; + skb->csum = csum_unfold((__force __sum16)get_unaligned_le16(hw_csum)); + skb->ip_summed = CHECKSUM_COMPLETE; + skb_trim(skb, skb->len - 2); +} + /* Packet receive function for Ethernet AVB */ static bool ravb_rx(struct net_device *ndev, int *quota, int q) { @@ -587,8 +601,11 @@ static bool ravb_rx(struct net_device *ndev, int *quota, int q) ts.tv_nsec = le32_to_cpu(desc->ts_n); shhwtstamps->hwtstamp = timespec64_to_ktime(ts); } + skb_put(skb, pkt_len); skb->protocol = eth_type_trans(skb, ndev); + if (ndev->features & NETIF_F_RXCSUM) + ravb_rx_csum(skb); napi_gro_receive(&priv->napi[q], skb); stats->rx_packets++; stats->rx_bytes += pkt_len; @@ -1842,6 +1859,38 @@ static int ravb_do_ioctl(struct net_device *ndev, struct ifreq *req, int cmd) return phy_mii_ioctl(phydev, req, cmd); } +static void ravb_set_rx_csum(struct net_device *ndev, bool enable) +{ + struct ravb_private *priv = netdev_priv(ndev); + unsigned long flags; + + spin_lock_irqsave(&priv->lock, flags); + + /* Disable TX and RX */ + ravb_rcv_snd_disable(ndev); + + /* Modify RX Checksum setting */ + ravb_modify(ndev, ECMR, ECMR_RCSC, enable ? ECMR_RCSC : 0); + + /* Enable TX and RX */ + ravb_rcv_snd_enable(ndev); + + spin_unlock_irqrestore(&priv->lock, flags); +} + +static int ravb_set_features(struct net_device *ndev, + netdev_features_t features) +{ + netdev_features_t changed = ndev->features ^ features; + + if (changed & NETIF_F_RXCSUM) + ravb_set_rx_csum(ndev, features & NETIF_F_RXCSUM); + + ndev->features = features; + + return 0; +} + static const struct net_device_ops ravb_netdev_ops = { .ndo_open = ravb_open, .ndo_stop = ravb_close, @@ -1853,6 +1902,7 @@ static const struct net_device_ops ravb_netdev_ops = { .ndo_do_ioctl = ravb_do_ioctl, .ndo_validate_addr = eth_validate_addr, .ndo_set_mac_address = eth_mac_addr, + .ndo_set_features = ravb_set_features, }; /* MDIO bus init function */ @@ -2004,6 +2054,9 @@ static int ravb_probe(struct platform_device *pdev) if (!ndev) return -ENOMEM; + ndev->features = NETIF_F_RXCSUM; + ndev->hw_features = NETIF_F_RXCSUM; + pm_runtime_enable(&pdev->dev); pm_runtime_get_sync(&pdev->dev); -- cgit v1.2.3 From ebf6b13142f947be576b40edce214788dfe1d3e3 Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Wed, 4 Oct 2017 14:20:37 +0100 Subject: cxgb4vf: make a couple of functions static The functions t4vf_link_down_rc_str and t4vf_handle_get_port_info are local to the source and do not need to be in global scope, so make them static. Cleans up sparse warnings: symbol 't4vf_link_down_rc_str' was not declared. Should it be static? symbol 't4vf_handle_get_port_info' was not declared. Should it be static? Signed-off-by: Colin Ian King Signed-off-by: David S. Miller --- drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c index a8d94963b4d0..67aec59a14e6 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c @@ -1812,7 +1812,7 @@ int t4vf_eth_eq_free(struct adapter *adapter, unsigned int eqid) * * Returns a string representation of the Link Down Reason Code. */ -const char *t4vf_link_down_rc_str(unsigned char link_down_rc) +static const char *t4vf_link_down_rc_str(unsigned char link_down_rc) { static const char * const reason[] = { "Link Down", @@ -1838,8 +1838,8 @@ const char *t4vf_link_down_rc_str(unsigned char link_down_rc) * * Processes a GET_PORT_INFO FW reply message. */ -void t4vf_handle_get_port_info(struct port_info *pi, - const struct fw_port_cmd *cmd) +static void t4vf_handle_get_port_info(struct port_info *pi, + const struct fw_port_cmd *cmd) { int action = FW_PORT_CMD_ACTION_G(be32_to_cpu(cmd->action_to_len16)); struct adapter *adapter = pi->adapter; -- cgit v1.2.3 From 42ab19ee90292993370a30ad242599d75a3b749e Mon Sep 17 00:00:00 2001 From: David Ahern Date: Wed, 4 Oct 2017 17:48:47 -0700 Subject: net: Add extack to upper device linking Add extack arg to netdev_upper_dev_link and netdev_master_upper_dev_link Signed-off-by: David Ahern Signed-off-by: David S. Miller --- drivers/net/bonding/bond_main.c | 7 ++++--- drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c | 2 +- drivers/net/hyperv/netvsc_drv.c | 2 +- drivers/net/ipvlan/ipvlan_main.c | 2 +- drivers/net/macsec.c | 2 +- drivers/net/macvlan.c | 7 ++++--- drivers/net/macvtap.c | 2 +- drivers/net/team/team.c | 2 +- drivers/net/usb/qmi_wwan.c | 2 +- drivers/net/vrf.c | 7 ++++--- include/linux/if_macvlan.h | 3 ++- include/linux/netdevice.h | 6 ++++-- net/8021q/vlan.c | 6 +++--- net/8021q/vlan.h | 2 +- net/8021q/vlan_netlink.c | 2 +- net/batman-adv/hard-interface.c | 2 +- net/bridge/br_if.c | 2 +- net/core/dev.c | 15 ++++++++++----- net/openvswitch/vport-netdev.c | 3 ++- 19 files changed, 44 insertions(+), 32 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 78feb94a36db..bc92307c2082 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -1217,14 +1217,15 @@ static enum netdev_lag_tx_type bond_lag_tx_type(struct bonding *bond) } } -static int bond_master_upper_dev_link(struct bonding *bond, struct slave *slave) +static int bond_master_upper_dev_link(struct bonding *bond, struct slave *slave, + struct netlink_ext_ack *extack) { struct netdev_lag_upper_info lag_upper_info; int err; lag_upper_info.tx_type = bond_lag_tx_type(bond); err = netdev_master_upper_dev_link(slave->dev, bond->dev, slave, - &lag_upper_info); + &lag_upper_info, extack); if (err) return err; rtmsg_ifinfo(RTM_NEWLINK, slave->dev, IFF_SLAVE, GFP_KERNEL); @@ -1710,7 +1711,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev, goto err_detach; } - res = bond_master_upper_dev_link(bond, new_slave); + res = bond_master_upper_dev_link(bond, new_slave, extack); if (res) { netdev_dbg(bond_dev, "Error %d calling bond_master_upper_dev_link\n", res); goto err_unregister; diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c index 98f22551eb45..1af326a60cbb 100644 --- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c +++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c @@ -178,7 +178,7 @@ static int rmnet_newlink(struct net *src_net, struct net_device *dev, if (err) goto err1; - err = netdev_master_upper_dev_link(dev, real_dev, NULL, NULL); + err = netdev_master_upper_dev_link(dev, real_dev, NULL, NULL, extack); if (err) goto err2; diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c index f300ae61c6c6..dfb986421ec6 100644 --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c @@ -1748,7 +1748,7 @@ static int netvsc_vf_join(struct net_device *vf_netdev, goto rx_handler_failed; } - ret = netdev_upper_dev_link(vf_netdev, ndev); + ret = netdev_upper_dev_link(vf_netdev, ndev, NULL); if (ret != 0) { netdev_err(vf_netdev, "can not set master device %s (err = %d)\n", diff --git a/drivers/net/ipvlan/ipvlan_main.c b/drivers/net/ipvlan/ipvlan_main.c index c74893c1e620..57c3856bab05 100644 --- a/drivers/net/ipvlan/ipvlan_main.c +++ b/drivers/net/ipvlan/ipvlan_main.c @@ -584,7 +584,7 @@ int ipvlan_link_new(struct net *src_net, struct net_device *dev, if (err < 0) goto remove_ida; - err = netdev_upper_dev_link(phy_dev, dev); + err = netdev_upper_dev_link(phy_dev, dev, extack); if (err) { goto unregister_netdev; } diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c index 98e4deaa3a6a..ccbe4eaffe4d 100644 --- a/drivers/net/macsec.c +++ b/drivers/net/macsec.c @@ -3244,7 +3244,7 @@ static int macsec_newlink(struct net *net, struct net_device *dev, &macsec_netdev_addr_lock_key, macsec_get_nest_level(dev)); - err = netdev_upper_dev_link(real_dev, dev); + err = netdev_upper_dev_link(real_dev, dev, extack); if (err < 0) goto unregister; diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c index 1ffe77e95d46..858bd66511a2 100644 --- a/drivers/net/macvlan.c +++ b/drivers/net/macvlan.c @@ -1344,7 +1344,8 @@ static int macvlan_changelink_sources(struct macvlan_dev *vlan, u32 mode, } int macvlan_common_newlink(struct net *src_net, struct net_device *dev, - struct nlattr *tb[], struct nlattr *data[]) + struct nlattr *tb[], struct nlattr *data[], + struct netlink_ext_ack *extack) { struct macvlan_dev *vlan = netdev_priv(dev); struct macvlan_port *port; @@ -1433,7 +1434,7 @@ int macvlan_common_newlink(struct net *src_net, struct net_device *dev, goto destroy_macvlan_port; dev->priv_flags |= IFF_MACVLAN; - err = netdev_upper_dev_link(lowerdev, dev); + err = netdev_upper_dev_link(lowerdev, dev, extack); if (err) goto unregister_netdev; @@ -1456,7 +1457,7 @@ static int macvlan_newlink(struct net *src_net, struct net_device *dev, struct nlattr *tb[], struct nlattr *data[], struct netlink_ext_ack *extack) { - return macvlan_common_newlink(src_net, dev, tb, data); + return macvlan_common_newlink(src_net, dev, tb, data, extack); } void macvlan_dellink(struct net_device *dev, struct list_head *head) diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c index c2d0ea2fb019..f62aea2fcfa9 100644 --- a/drivers/net/macvtap.c +++ b/drivers/net/macvtap.c @@ -105,7 +105,7 @@ static int macvtap_newlink(struct net *src_net, struct net_device *dev, /* Don't put anything that may fail after macvlan_common_newlink * because we can't undo what it does. */ - err = macvlan_common_newlink(src_net, dev, tb, data); + err = macvlan_common_newlink(src_net, dev, tb, data, extack); if (err) { netdev_rx_handler_unregister(dev); return err; diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c index 4359d45aa131..a468439969df 100644 --- a/drivers/net/team/team.c +++ b/drivers/net/team/team.c @@ -1112,7 +1112,7 @@ static int team_upper_dev_link(struct team *team, struct team_port *port) lag_upper_info.tx_type = team->mode->lag_tx_type; err = netdev_master_upper_dev_link(port->dev, team->dev, NULL, - &lag_upper_info); + &lag_upper_info, NULL); if (err) return err; port->dev->priv_flags |= IFF_TEAM_PORT; diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c index 8c3733608271..db7279d5b250 100644 --- a/drivers/net/usb/qmi_wwan.c +++ b/drivers/net/usb/qmi_wwan.c @@ -221,7 +221,7 @@ static int qmimux_register_device(struct net_device *real_dev, u8 mux_id) /* Account for reference in struct qmimux_priv_priv */ dev_hold(real_dev); - err = netdev_upper_dev_link(real_dev, new_dev); + err = netdev_upper_dev_link(real_dev, new_dev, NULL); if (err) goto out_unregister_netdev; diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c index 4a082ef53533..77d0655a0250 100644 --- a/drivers/net/vrf.c +++ b/drivers/net/vrf.c @@ -764,7 +764,8 @@ static void cycle_netdev(struct net_device *dev) } } -static int do_vrf_add_slave(struct net_device *dev, struct net_device *port_dev) +static int do_vrf_add_slave(struct net_device *dev, struct net_device *port_dev, + struct netlink_ext_ack *extack) { int ret; @@ -775,7 +776,7 @@ static int do_vrf_add_slave(struct net_device *dev, struct net_device *port_dev) return -EOPNOTSUPP; port_dev->priv_flags |= IFF_L3MDEV_SLAVE; - ret = netdev_master_upper_dev_link(port_dev, dev, NULL, NULL); + ret = netdev_master_upper_dev_link(port_dev, dev, NULL, NULL, extack); if (ret < 0) goto err; @@ -794,7 +795,7 @@ static int vrf_add_slave(struct net_device *dev, struct net_device *port_dev, if (netif_is_l3_master(port_dev) || netif_is_l3_slave(port_dev)) return -EINVAL; - return do_vrf_add_slave(dev, port_dev); + return do_vrf_add_slave(dev, port_dev, extack); } /* inverse of do_vrf_add_slave */ diff --git a/include/linux/if_macvlan.h b/include/linux/if_macvlan.h index c9ec1343d187..10e319f41fb1 100644 --- a/include/linux/if_macvlan.h +++ b/include/linux/if_macvlan.h @@ -72,7 +72,8 @@ static inline void macvlan_count_rx(const struct macvlan_dev *vlan, extern void macvlan_common_setup(struct net_device *dev); extern int macvlan_common_newlink(struct net *src_net, struct net_device *dev, - struct nlattr *tb[], struct nlattr *data[]); + struct nlattr *tb[], struct nlattr *data[], + struct netlink_ext_ack *extack); extern void macvlan_count_rx(const struct macvlan_dev *vlan, unsigned int len, bool success, diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 368a5064a487..31bb3010c69b 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -3919,10 +3919,12 @@ void *netdev_adjacent_get_private(struct list_head *adj_list); void *netdev_lower_get_first_private_rcu(struct net_device *dev); struct net_device *netdev_master_upper_dev_get(struct net_device *dev); struct net_device *netdev_master_upper_dev_get_rcu(struct net_device *dev); -int netdev_upper_dev_link(struct net_device *dev, struct net_device *upper_dev); +int netdev_upper_dev_link(struct net_device *dev, struct net_device *upper_dev, + struct netlink_ext_ack *extack); int netdev_master_upper_dev_link(struct net_device *dev, struct net_device *upper_dev, - void *upper_priv, void *upper_info); + void *upper_priv, void *upper_info, + struct netlink_ext_ack *extack); void netdev_upper_dev_unlink(struct net_device *dev, struct net_device *upper_dev); void netdev_adjacent_rename_links(struct net_device *dev, char *oldname); diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c index 9649579b5b9f..71c3e045505b 100644 --- a/net/8021q/vlan.c +++ b/net/8021q/vlan.c @@ -138,7 +138,7 @@ int vlan_check_real_dev(struct net_device *real_dev, return 0; } -int register_vlan_dev(struct net_device *dev) +int register_vlan_dev(struct net_device *dev, struct netlink_ext_ack *extack) { struct vlan_dev_priv *vlan = vlan_dev_priv(dev); struct net_device *real_dev = vlan->real_dev; @@ -174,7 +174,7 @@ int register_vlan_dev(struct net_device *dev) if (err < 0) goto out_uninit_mvrp; - err = netdev_upper_dev_link(real_dev, dev); + err = netdev_upper_dev_link(real_dev, dev, extack); if (err) goto out_unregister_netdev; @@ -270,7 +270,7 @@ static int register_vlan_device(struct net_device *real_dev, u16 vlan_id) vlan->flags = VLAN_FLAG_REORDER_HDR; new_dev->rtnl_link_ops = &vlan_link_ops; - err = register_vlan_dev(new_dev); + err = register_vlan_dev(new_dev, NULL); if (err < 0) goto out_free_newdev; diff --git a/net/8021q/vlan.h b/net/8021q/vlan.h index df8bd65dd370..94f8eed9f9b3 100644 --- a/net/8021q/vlan.h +++ b/net/8021q/vlan.h @@ -107,7 +107,7 @@ void vlan_dev_get_realdev_name(const struct net_device *dev, char *result); int vlan_check_real_dev(struct net_device *real_dev, __be16 protocol, u16 vlan_id); void vlan_setup(struct net_device *dev); -int register_vlan_dev(struct net_device *dev); +int register_vlan_dev(struct net_device *dev, struct netlink_ext_ack *extack); void unregister_vlan_dev(struct net_device *dev, struct list_head *head); bool vlan_dev_inherit_address(struct net_device *dev, struct net_device *real_dev); diff --git a/net/8021q/vlan_netlink.c b/net/8021q/vlan_netlink.c index 5e831de3103e..6e7c5a6a7930 100644 --- a/net/8021q/vlan_netlink.c +++ b/net/8021q/vlan_netlink.c @@ -160,7 +160,7 @@ static int vlan_newlink(struct net *src_net, struct net_device *dev, if (err < 0) return err; - return register_vlan_dev(dev); + return register_vlan_dev(dev, extack); } static inline size_t vlan_qos_map_size(unsigned int n) diff --git a/net/batman-adv/hard-interface.c b/net/batman-adv/hard-interface.c index e348f76ea8c1..f7b413b9297e 100644 --- a/net/batman-adv/hard-interface.c +++ b/net/batman-adv/hard-interface.c @@ -738,7 +738,7 @@ int batadv_hardif_enable_interface(struct batadv_hard_iface *hard_iface, bat_priv = netdev_priv(hard_iface->soft_iface); ret = netdev_master_upper_dev_link(hard_iface->net_dev, - soft_iface, NULL, NULL); + soft_iface, NULL, NULL, NULL); if (ret) goto err_dev; diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c index f3aef22931ab..0a3fd727048d 100644 --- a/net/bridge/br_if.c +++ b/net/bridge/br_if.c @@ -540,7 +540,7 @@ int br_add_if(struct net_bridge *br, struct net_device *dev) dev->priv_flags |= IFF_BRIDGE_PORT; - err = netdev_master_upper_dev_link(dev, br->dev, NULL, NULL); + err = netdev_master_upper_dev_link(dev, br->dev, NULL, NULL, NULL); if (err) goto err5; diff --git a/net/core/dev.c b/net/core/dev.c index e27a6bc0ac4d..fcddccb6be41 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -6277,11 +6277,13 @@ static void __netdev_adjacent_dev_unlink_neighbour(struct net_device *dev, static int __netdev_upper_dev_link(struct net_device *dev, struct net_device *upper_dev, bool master, - void *upper_priv, void *upper_info) + void *upper_priv, void *upper_info, + struct netlink_ext_ack *extack) { struct netdev_notifier_changeupper_info changeupper_info = { .info = { .dev = dev, + .extack = extack, }, .upper_dev = upper_dev, .master = master, @@ -6341,9 +6343,11 @@ rollback: * returns zero. */ int netdev_upper_dev_link(struct net_device *dev, - struct net_device *upper_dev) + struct net_device *upper_dev, + struct netlink_ext_ack *extack) { - return __netdev_upper_dev_link(dev, upper_dev, false, NULL, NULL); + return __netdev_upper_dev_link(dev, upper_dev, false, + NULL, NULL, extack); } EXPORT_SYMBOL(netdev_upper_dev_link); @@ -6362,10 +6366,11 @@ EXPORT_SYMBOL(netdev_upper_dev_link); */ int netdev_master_upper_dev_link(struct net_device *dev, struct net_device *upper_dev, - void *upper_priv, void *upper_info) + void *upper_priv, void *upper_info, + struct netlink_ext_ack *extack) { return __netdev_upper_dev_link(dev, upper_dev, true, - upper_priv, upper_info); + upper_priv, upper_info, extack); } EXPORT_SYMBOL(netdev_master_upper_dev_link); diff --git a/net/openvswitch/vport-netdev.c b/net/openvswitch/vport-netdev.c index 0389398fa4ab..2e5e7a41d8ef 100644 --- a/net/openvswitch/vport-netdev.c +++ b/net/openvswitch/vport-netdev.c @@ -108,7 +108,8 @@ struct vport *ovs_netdev_link(struct vport *vport, const char *name) rtnl_lock(); err = netdev_master_upper_dev_link(vport->dev, - get_dpdev(vport->dp), NULL, NULL); + get_dpdev(vport->dp), + NULL, NULL, NULL); if (err) goto error_unlock; -- cgit v1.2.3 From e58376e1df2aaffbf12753959142a50f824c46ea Mon Sep 17 00:00:00 2001 From: David Ahern Date: Wed, 4 Oct 2017 17:48:51 -0700 Subject: mlxsw: spectrum: Add extack messages for enslave failures mlxsw fails device enslavement for a number of reasons. Use the extack facility to return an error message to the user stating why the enslave is failing. Messages are prefixed with "spectrum" so users know it is a constraint imposed by the hardware driver. For example: $ ip li add br0.11 link br0 type vlan id 11 $ ip li set swp11 master br0 Error: spectrum: Enslaving a port to a device that already has an upper device is not supported. Signed-off-by: David Ahern Reviewed-by: Ido Schimmel Tested-by: Ido Schimmel Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/spectrum.c | 47 ++++++++++++++++++++------ 1 file changed, 37 insertions(+), 10 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c index 3adf237c951a..5cd4df08ce97 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c @@ -4019,14 +4019,21 @@ static int mlxsw_sp_lag_index_get(struct mlxsw_sp *mlxsw_sp, static bool mlxsw_sp_master_lag_check(struct mlxsw_sp *mlxsw_sp, struct net_device *lag_dev, - struct netdev_lag_upper_info *lag_upper_info) + struct netdev_lag_upper_info *lag_upper_info, + struct netlink_ext_ack *extack) { u16 lag_id; - if (mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id) != 0) + if (mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id) != 0) { + NL_SET_ERR_MSG(extack, + "spectrum: Exceeded number of supported LAG devices"); return false; - if (lag_upper_info->tx_type != NETDEV_LAG_TX_TYPE_HASH) + } + if (lag_upper_info->tx_type != NETDEV_LAG_TX_TYPE_HASH) { + NL_SET_ERR_MSG(extack, + "spectrum: LAG device using unsupported Tx type"); return false; + } return true; } @@ -4231,6 +4238,7 @@ static int mlxsw_sp_netdevice_port_upper_event(struct net_device *lower_dev, { struct netdev_notifier_changeupper_info *info; struct mlxsw_sp_port *mlxsw_sp_port; + struct netlink_ext_ack *extack; struct net_device *upper_dev; struct mlxsw_sp *mlxsw_sp; int err = 0; @@ -4238,6 +4246,7 @@ static int mlxsw_sp_netdevice_port_upper_event(struct net_device *lower_dev, mlxsw_sp_port = netdev_priv(dev); mlxsw_sp = mlxsw_sp_port->mlxsw_sp; info = ptr; + extack = netdev_notifier_info_to_extack(&info->info); switch (event) { case NETDEV_PRECHANGEUPPER: @@ -4245,25 +4254,43 @@ static int mlxsw_sp_netdevice_port_upper_event(struct net_device *lower_dev, if (!is_vlan_dev(upper_dev) && !netif_is_lag_master(upper_dev) && !netif_is_bridge_master(upper_dev) && - !netif_is_ovs_master(upper_dev)) + !netif_is_ovs_master(upper_dev)) { + NL_SET_ERR_MSG(extack, + "spectrum: Unknown upper device type"); return -EINVAL; + } if (!info->linking) break; - if (netdev_has_any_upper_dev(upper_dev)) + if (netdev_has_any_upper_dev(upper_dev)) { + NL_SET_ERR_MSG(extack, + "spectrum: Enslaving a port to a device that already has an upper device is not supported"); return -EINVAL; + } if (netif_is_lag_master(upper_dev) && !mlxsw_sp_master_lag_check(mlxsw_sp, upper_dev, - info->upper_info)) + info->upper_info, extack)) return -EINVAL; - if (netif_is_lag_master(upper_dev) && vlan_uses_dev(dev)) + if (netif_is_lag_master(upper_dev) && vlan_uses_dev(dev)) { + NL_SET_ERR_MSG(extack, + "spectrum: Master device is a LAG master and this device has a VLAN"); return -EINVAL; + } if (netif_is_lag_port(dev) && is_vlan_dev(upper_dev) && - !netif_is_lag_master(vlan_dev_real_dev(upper_dev))) + !netif_is_lag_master(vlan_dev_real_dev(upper_dev))) { + NL_SET_ERR_MSG(extack, + "spectrum: Can not put a VLAN on a LAG port"); return -EINVAL; - if (netif_is_ovs_master(upper_dev) && vlan_uses_dev(dev)) + } + if (netif_is_ovs_master(upper_dev) && vlan_uses_dev(dev)) { + NL_SET_ERR_MSG(extack, + "spectrum: Master device is an OVS master and this device has a VLAN"); return -EINVAL; - if (netif_is_ovs_port(dev) && is_vlan_dev(upper_dev)) + } + if (netif_is_ovs_port(dev) && is_vlan_dev(upper_dev)) { + NL_SET_ERR_MSG(extack, + "spectrum: Can not put a VLAN on an OVS port"); return -EINVAL; + } break; case NETDEV_CHANGEUPPER: upper_dev = info->upper_dev; -- cgit v1.2.3 From d009313c99ba575b65a944fe2c683c6346ea1721 Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Thu, 5 Oct 2017 10:10:23 +0100 Subject: net: qcom/emac: make function emac_isr static The function emac_isr is local to the source and does not need to be in global scope, so make it static. Cleans up sparse warnings: symbol 'emac_isr' was not declared. Should it be static? Signed-off-by: Colin Ian King Signed-off-by: David S. Miller --- drivers/net/ethernet/qualcomm/emac/emac.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/qualcomm/emac/emac.c b/drivers/net/ethernet/qualcomm/emac/emac.c index 759543512117..f477ba29c569 100644 --- a/drivers/net/ethernet/qualcomm/emac/emac.c +++ b/drivers/net/ethernet/qualcomm/emac/emac.c @@ -130,7 +130,7 @@ static int emac_start_xmit(struct sk_buff *skb, struct net_device *netdev) return emac_mac_tx_buf_send(adpt, &adpt->tx_q, skb); } -irqreturn_t emac_isr(int _irq, void *data) +static irqreturn_t emac_isr(int _irq, void *data) { struct emac_irq *irq = data; struct emac_adapter *adpt = -- cgit v1.2.3 From ec2f25d203aaede93cef64198d93df698913101f Mon Sep 17 00:00:00 2001 From: Rami Rosen Date: Sat, 19 Aug 2017 00:20:31 +0300 Subject: i40e: fix a typo in i40e_pf documentation This patch fixes a typo in i40e_pf object documentation; num_req_vfs refers to the number of VFs requested for the PF. Signed-off-by: Rami Rosen Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h index 439c63cb2a0c..2bc4dd0dbbf1 100644 --- a/drivers/net/ethernet/intel/i40e/i40e.h +++ b/drivers/net/ethernet/intel/i40e/i40e.h @@ -350,7 +350,7 @@ struct i40e_pf { u16 num_vmdq_vsis; /* num vmdq vsis this PF has set up */ u16 num_vmdq_qps; /* num queue pairs per vmdq pool */ u16 num_vmdq_msix; /* num queue vectors per vmdq pool */ - u16 num_req_vfs; /* num VFs requested for this VF */ + u16 num_req_vfs; /* num VFs requested for this PF */ u16 num_vf_qps; /* num queue pairs per VF */ u16 num_lan_qps; /* num lan queues this PF has set up */ u16 num_lan_msix; /* num queue vectors for the base PF vsi */ -- cgit v1.2.3 From 64615b5418bac1d3b3a50a83fb2e42091fe299fe Mon Sep 17 00:00:00 2001 From: Mitch Williams Date: Tue, 29 Aug 2017 05:32:30 -0400 Subject: i40e: add private flag to control source pruning By default, our devices do source pruning, that is, they drop receive packets that have the source MAC matching one of the receive filters. Unfortunately, this breaks ARP monitoring in channel bonding, as the bonding driver expects devices to receive ARPs containing their own source address. Add an ethtool private flag to control this feature. Also, remove the netif_running() check when we process our private flags. It's OK to reset when the device is closed and in most cases we need the reset the apply these changes. Signed-off-by: Mitch Williams Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e.h | 1 + drivers/net/ethernet/intel/i40e/i40e_ethtool.c | 7 +++++-- drivers/net/ethernet/intel/i40e/i40e_main.c | 25 +++++++++++++++++++++++++ 3 files changed, 31 insertions(+), 2 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h index 2bc4dd0dbbf1..c78448daa7a1 100644 --- a/drivers/net/ethernet/intel/i40e/i40e.h +++ b/drivers/net/ethernet/intel/i40e/i40e.h @@ -452,6 +452,7 @@ struct i40e_pf { #define I40E_FLAG_TEMP_LINK_POLLING BIT_ULL(55) #define I40E_FLAG_CLIENT_L2_CHANGE BIT_ULL(56) #define I40E_FLAG_LEGACY_RX BIT_ULL(58) +#define I40E_FLAG_SOURCE_PRUNING_DISABLED BIT_ULL(59) struct i40e_client_instance *cinst; bool stat_offsets_loaded; diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c index 1136d02e2e95..6203d362438c 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c @@ -227,6 +227,8 @@ static const struct i40e_priv_flags i40e_gstrings_priv_flags[] = { I40E_PRIV_FLAG("veb-stats", I40E_FLAG_VEB_STATS_ENABLED, 0), I40E_PRIV_FLAG("hw-atr-eviction", I40E_FLAG_HW_ATR_EVICT_ENABLED, 0), I40E_PRIV_FLAG("legacy-rx", I40E_FLAG_LEGACY_RX, 0), + I40E_PRIV_FLAG("disable-source-pruning", + I40E_FLAG_SOURCE_PRUNING_DISABLED, 0), }; #define I40E_PRIV_FLAGS_STR_LEN ARRAY_SIZE(i40e_gstrings_priv_flags) @@ -4189,8 +4191,9 @@ flags_complete: /* Issue reset to cause things to take effect, as additional bits * are added we will need to create a mask of bits requiring reset */ - if ((changed_flags & I40E_FLAG_VEB_STATS_ENABLED) || - ((changed_flags & I40E_FLAG_LEGACY_RX) && netif_running(dev))) + if (changed_flags & (I40E_FLAG_VEB_STATS_ENABLED | + I40E_FLAG_LEGACY_RX | + I40E_FLAG_SOURCE_PRUNING_DISABLED)) i40e_do_reset(pf, BIT(__I40E_PF_RESET_REQUESTED), true); return 0; diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index 3f9e89b054ec..b539469f576f 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -9903,6 +9903,31 @@ static int i40e_add_vsi(struct i40e_vsi *vsi) enabled_tc = i40e_pf_get_tc_map(pf); + /* Source pruning is enabled by default, so the flag is + * negative logic - if it's set, we need to fiddle with + * the VSI to disable source pruning. + */ + if (pf->flags & I40E_FLAG_SOURCE_PRUNING_DISABLED) { + memset(&ctxt, 0, sizeof(ctxt)); + ctxt.seid = pf->main_vsi_seid; + ctxt.pf_num = pf->hw.pf_id; + ctxt.vf_num = 0; + ctxt.info.valid_sections |= + cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID); + ctxt.info.switch_id = + cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_LOCAL_LB); + ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL); + if (ret) { + dev_info(&pf->pdev->dev, + "update vsi failed, err %s aq_err %s\n", + i40e_stat_str(&pf->hw, ret), + i40e_aq_str(&pf->hw, + pf->hw.aq.asq_last_status)); + ret = -ENOENT; + goto err; + } + } + /* MFP mode setup queue map and update VSI */ if ((pf->flags & I40E_FLAG_MFP_ENABLED) && !(pf->hw.func_caps.iscsi)) { /* NIC type PF */ -- cgit v1.2.3 From be664cbefc50977aaefc868ba6a1109ec9b7449d Mon Sep 17 00:00:00 2001 From: Jacob Keller Date: Tue, 29 Aug 2017 05:32:31 -0400 Subject: i40e/i40evf: spread CPU affinity hints across online CPUs only Currently, when setting up the IRQ for a q_vector, we set an affinity hint based on the v_idx of that q_vector. Meaning a loop iterates on v_idx, which is an incremental value, and the cpumask is created based on this value. This is a problem in systems with multiple logical CPUs per core (like in simultaneous multithreading (SMT) scenarios). If we disable some logical CPUs, by turning SMT off for example, we will end up with a sparse cpu_online_mask, i.e., only the first CPU in a core is online, and incremental filling in q_vector cpumask might lead to multiple offline CPUs being assigned to q_vectors. Example: if we have a system with 8 cores each one containing 8 logical CPUs (SMT == 8 in this case), we have 64 CPUs in total. But if SMT is disabled, only the 1st CPU in each core remains online, so the cpu_online_mask in this case would have only 8 bits set, in a sparse way. In general case, when SMT is off the cpu_online_mask has only C bits set: 0, 1*N, 2*N, ..., C*(N-1) where C == # of cores; N == # of logical CPUs per core. In our example, only bits 0, 8, 16, 24, 32, 40, 48, 56 would be set. Instead, we should only assign hints for CPUs which are online. Even better, the kernel already provides a function, cpumask_local_spread() which takes an index and returns a CPU, spreading the interrupts across local NUMA nodes first, and then remote ones if necessary. Since we generally have a 1:1 mapping between vectors and CPUs, there is no real advantage to spreading vectors to local CPUs first. In order to avoid mismatch of the default XPS hints, we'll pass -1 so that it spreads across all CPUs without regard to the node locality. Note that we don't need to change the q_vector->affinity_mask as this is initialized to cpu_possible_mask, until an actual affinity is set and then notified back to us. Signed-off-by: Jacob Keller Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_main.c | 16 +++++++++++----- drivers/net/ethernet/intel/i40evf/i40evf_main.c | 9 ++++++--- 2 files changed, 17 insertions(+), 8 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index b539469f576f..d2bb4f17c89e 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -2885,14 +2885,15 @@ static void i40e_vsi_free_rx_resources(struct i40e_vsi *vsi) static void i40e_config_xps_tx_ring(struct i40e_ring *ring) { struct i40e_vsi *vsi = ring->vsi; + int cpu; if (!ring->q_vector || !ring->netdev) return; if ((vsi->tc_config.numtc <= 1) && !test_and_set_bit(__I40E_TX_XPS_INIT_DONE, &ring->state)) { - netif_set_xps_queue(ring->netdev, - get_cpu_mask(ring->q_vector->v_idx), + cpu = cpumask_local_spread(ring->q_vector->v_idx, -1); + netif_set_xps_queue(ring->netdev, get_cpu_mask(cpu), ring->queue_index); } @@ -3482,6 +3483,7 @@ static int i40e_vsi_request_irq_msix(struct i40e_vsi *vsi, char *basename) int tx_int_idx = 0; int vector, err; int irq_num; + int cpu; for (vector = 0; vector < q_vectors; vector++) { struct i40e_q_vector *q_vector = vsi->q_vectors[vector]; @@ -3517,10 +3519,14 @@ static int i40e_vsi_request_irq_msix(struct i40e_vsi *vsi, char *basename) q_vector->affinity_notify.notify = i40e_irq_affinity_notify; q_vector->affinity_notify.release = i40e_irq_affinity_release; irq_set_affinity_notifier(irq_num, &q_vector->affinity_notify); - /* get_cpu_mask returns a static constant mask with - * a permanent lifetime so it's ok to use here. + /* Spread affinity hints out across online CPUs. + * + * get_cpu_mask returns a static constant mask with + * a permanent lifetime so it's ok to pass to + * irq_set_affinity_hint without making a copy. */ - irq_set_affinity_hint(irq_num, get_cpu_mask(q_vector->v_idx)); + cpu = cpumask_local_spread(q_vector->v_idx, -1); + irq_set_affinity_hint(irq_num, get_cpu_mask(cpu)); } vsi->irqs_ready = true; diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_main.c b/drivers/net/ethernet/intel/i40evf/i40evf_main.c index f2f1e754c2ce..bc76378a71e2 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf_main.c +++ b/drivers/net/ethernet/intel/i40evf/i40evf_main.c @@ -515,6 +515,7 @@ i40evf_request_traffic_irqs(struct i40evf_adapter *adapter, char *basename) unsigned int vector, q_vectors; unsigned int rx_int_idx = 0, tx_int_idx = 0; int irq_num, err; + int cpu; i40evf_irq_disable(adapter); /* Decrement for Other and TCP Timer vectors */ @@ -553,10 +554,12 @@ i40evf_request_traffic_irqs(struct i40evf_adapter *adapter, char *basename) q_vector->affinity_notify.release = i40evf_irq_affinity_release; irq_set_affinity_notifier(irq_num, &q_vector->affinity_notify); - /* get_cpu_mask returns a static constant mask with - * a permanent lifetime so it's ok to use here. + /* Spread the IRQ affinity hints across online CPUs. Note that + * get_cpu_mask returns a mask with a permanent lifetime so + * it's safe to use as a hint for irq_set_affinity_hint. */ - irq_set_affinity_hint(irq_num, get_cpu_mask(q_vector->v_idx)); + cpu = cpumask_local_spread(q_vector->v_idx, -1); + irq_set_affinity_hint(irq_num, get_cpu_mask(cpu)); } return 0; -- cgit v1.2.3 From c3d26b75c22b0487c452bd610338aa015eae517b Mon Sep 17 00:00:00 2001 From: Alan Brady Date: Tue, 29 Aug 2017 05:32:32 -0400 Subject: i40e: re-enable PTP L4 capabilities for XL710 if FW >6.0 Starting with XL710 FW 5.3 PTP L4 was disabled for XL710 due to a bug. The bug has since been resolved in XL710 FW >6.0 and PTP L4 can now be re-enabled on those devices with updated firmware. Signed-off-by: Alan Brady Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_main.c | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index d2bb4f17c89e..85132eee9f64 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -9074,6 +9074,11 @@ static int i40e_sw_init(struct i40e_pf *pf) (pf->hw.aq.fw_maj_ver >= 5))) pf->hw_features |= I40E_HW_USE_SET_LLDP_MIB; + /* Enable PTP L4 if FW > v6.0 */ + if (pf->hw.mac.type == I40E_MAC_XL710 && + pf->hw.aq.fw_maj_ver >= 6) + pf->hw_features |= I40E_HW_PTP_L4_CAPABLE; + if (pf->hw.func_caps.vmdq) { pf->num_vmdq_vsis = I40E_DEFAULT_NUM_VMDQ_VSI; pf->flags |= I40E_FLAG_VMDQ_ENABLED; -- cgit v1.2.3 From 60518a048919a2781e3192981d2946149240b837 Mon Sep 17 00:00:00 2001 From: Mitch Williams Date: Tue, 29 Aug 2017 05:32:33 -0400 Subject: i40e: redfine I40E_PHY_TYPE_MAX Since I40E_PHY_TYPE_MAX is used as an iterator, usually combined with some sort of bit-shifting, it should only include actual PHY types and not error cases. Move it up in the enum declaration so that loops only iterate across valid PHY types. Signed-off-by: Mitch Williams Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h | 2 +- drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h index 4c85ea9cd89a..50c5a4c630b8 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h +++ b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h @@ -1771,9 +1771,9 @@ enum i40e_aq_phy_type { I40E_PHY_TYPE_25GBASE_CR = 0x20, I40E_PHY_TYPE_25GBASE_SR = 0x21, I40E_PHY_TYPE_25GBASE_LR = 0x22, + I40E_PHY_TYPE_MAX, I40E_PHY_TYPE_EMPTY = 0xFE, I40E_PHY_TYPE_DEFAULT = 0xFF, - I40E_PHY_TYPE_MAX }; #define I40E_LINK_SPEED_100MB_SHIFT 0x1 diff --git a/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h index ed5602f4bbcd..dc6fc8b1bc79 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h +++ b/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h @@ -1767,9 +1767,9 @@ enum i40e_aq_phy_type { I40E_PHY_TYPE_25GBASE_CR = 0x20, I40E_PHY_TYPE_25GBASE_SR = 0x21, I40E_PHY_TYPE_25GBASE_LR = 0x22, + I40E_PHY_TYPE_MAX, I40E_PHY_TYPE_EMPTY = 0xFE, I40E_PHY_TYPE_DEFAULT = 0xFF, - I40E_PHY_TYPE_MAX }; #define I40E_LINK_SPEED_100MB_SHIFT 0x1 -- cgit v1.2.3 From 0a0d9af5bc0b2130edaed9d9c57fc08bd338e3f5 Mon Sep 17 00:00:00 2001 From: Mitch Williams Date: Tue, 29 Aug 2017 05:32:34 -0400 Subject: i40e: fix incorrect register definition This register was defined incorrectly. Fix the increment value to 8, and replace the iterator with _i to make the definition consistent with other statistics registers. Signed-off-by: Mitch Williams Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_register.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/intel/i40e/i40e_register.h b/drivers/net/ethernet/intel/i40e/i40e_register.h index 86ca27f72f02..c234758dad15 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_register.h +++ b/drivers/net/ethernet/intel/i40e/i40e_register.h @@ -2794,7 +2794,7 @@ #define I40E_GLV_RUPP_MAX_INDEX 383 #define I40E_GLV_RUPP_RUPP_SHIFT 0 #define I40E_GLV_RUPP_RUPP_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_RUPP_RUPP_SHIFT) -#define I40E_GLV_TEPC(_VSI) (0x00344000 + ((_VSI) * 4)) /* _i=0...383 */ /* Reset: CORER */ +#define I40E_GLV_TEPC(_i) (0x00344000 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */ #define I40E_GLV_TEPC_MAX_INDEX 383 #define I40E_GLV_TEPC_TEPC_SHIFT 0 #define I40E_GLV_TEPC_TEPC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_TEPC_TEPC_SHIFT) -- cgit v1.2.3 From bd6cd4e6dd38a35215d3f28f12db51213c9aead6 Mon Sep 17 00:00:00 2001 From: Jesse Brandeburg Date: Tue, 29 Aug 2017 05:32:35 -0400 Subject: i40e/i40evf: use DECLARE_BITMAP for state When using set_bit and friends, we should be using actual bitmaps, and fix all the locations where we might access it. Signed-off-by: Jesse Brandeburg Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_debugfs.c | 8 ++++---- drivers/net/ethernet/intel/i40e/i40e_main.c | 4 ++-- drivers/net/ethernet/intel/i40e/i40e_txrx.h | 3 ++- drivers/net/ethernet/intel/i40evf/i40e_txrx.h | 3 ++- 4 files changed, 10 insertions(+), 8 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c index 8f326f87a815..6f2725fc50a1 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c +++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c @@ -278,8 +278,8 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid) rx_ring->netdev, rx_ring->rx_bi); dev_info(&pf->pdev->dev, - " rx_rings[%i]: state = %li, queue_index = %d, reg_idx = %d\n", - i, rx_ring->state, + " rx_rings[%i]: state = %lu, queue_index = %d, reg_idx = %d\n", + i, *rx_ring->state, rx_ring->queue_index, rx_ring->reg_idx); dev_info(&pf->pdev->dev, @@ -334,8 +334,8 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid) tx_ring->netdev, tx_ring->tx_bi); dev_info(&pf->pdev->dev, - " tx_rings[%i]: state = %li, queue_index = %d, reg_idx = %d\n", - i, tx_ring->state, + " tx_rings[%i]: state = %lu, queue_index = %d, reg_idx = %d\n", + i, *tx_ring->state, tx_ring->queue_index, tx_ring->reg_idx); dev_info(&pf->pdev->dev, diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index 85132eee9f64..49401be7a2f4 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -2891,7 +2891,7 @@ static void i40e_config_xps_tx_ring(struct i40e_ring *ring) return; if ((vsi->tc_config.numtc <= 1) && - !test_and_set_bit(__I40E_TX_XPS_INIT_DONE, &ring->state)) { + !test_and_set_bit(__I40E_TX_XPS_INIT_DONE, ring->state)) { cpu = cpumask_local_spread(ring->q_vector->v_idx, -1); netif_set_xps_queue(ring->netdev, get_cpu_mask(cpu), ring->queue_index); @@ -3010,7 +3010,7 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring) struct i40e_hmc_obj_rxq rx_ctx; i40e_status err = 0; - ring->state = 0; + bitmap_zero(ring->state, __I40E_RING_STATE_NBITS); /* clear the context structure first */ memset(&rx_ctx, 0, sizeof(rx_ctx)); diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.h b/drivers/net/ethernet/intel/i40e/i40e_txrx.h index 2f848bc5e391..a4e3e665a1a1 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.h +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.h @@ -342,6 +342,7 @@ struct i40e_rx_queue_stats { enum i40e_ring_state_t { __I40E_TX_FDIR_INIT_DONE, __I40E_TX_XPS_INIT_DONE, + __I40E_RING_STATE_NBITS /* must be last */ }; /* some useful defines for virtchannel interface, which @@ -366,7 +367,7 @@ struct i40e_ring { struct i40e_tx_buffer *tx_bi; struct i40e_rx_buffer *rx_bi; }; - unsigned long state; + DECLARE_BITMAP(state, __I40E_RING_STATE_NBITS); u16 queue_index; /* Queue number of ring */ u8 dcb_tc; /* Traffic class of ring */ u8 __iomem *tail; diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h index 0d9f98bc07bd..d8ca802a71a9 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h +++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h @@ -325,6 +325,7 @@ struct i40e_rx_queue_stats { enum i40e_ring_state_t { __I40E_TX_FDIR_INIT_DONE, __I40E_TX_XPS_INIT_DONE, + __I40E_RING_STATE_NBITS /* must be last */ }; /* some useful defines for virtchannel interface, which @@ -348,7 +349,7 @@ struct i40e_ring { struct i40e_tx_buffer *tx_bi; struct i40e_rx_buffer *rx_bi; }; - unsigned long state; + DECLARE_BITMAP(state, __I40E_RING_STATE_NBITS); u16 queue_index; /* Queue number of ring */ u8 dcb_tc; /* Traffic class of ring */ u8 __iomem *tail; -- cgit v1.2.3 From 7f6618226360b4aa0230a49525c81bf47d1d53c8 Mon Sep 17 00:00:00 2001 From: Alice Michael Date: Tue, 29 Aug 2017 05:32:36 -0400 Subject: i40e: fix merge error This patch removes some code that was accidentally added to the wrong function with a merge error. Fixes: c53934c6d1b1 ("i40e: fix: do not sleep in netdev_ops") Signed-off-by: Alice Michael Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_main.c | 5 ----- 1 file changed, 5 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index 49401be7a2f4..628101bb08d4 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -1776,11 +1776,6 @@ static void i40e_set_rx_mode(struct net_device *netdev) vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED; vsi->back->flags |= I40E_FLAG_FILTER_SYNC; } - - /* schedule our worker thread which will take care of - * applying the new filter changes - */ - i40e_service_event_schedule(vsi->back); } /** -- cgit v1.2.3 From 9a858178ef6dfee2b6e6ec65c42b65a129b4df02 Mon Sep 17 00:00:00 2001 From: Filip Sadowski Date: Tue, 29 Aug 2017 05:32:37 -0400 Subject: i40e: Display error message if module does not meet thermal requirements This patch causes error message to be displayed when NIC detects insertion of module that does not meet thermal requirements. Signed-off-by: Filip Sadowski Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e.h | 1 + drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h | 1 + drivers/net/ethernet/intel/i40e/i40e_main.c | 24 +++++++++++++++++----- .../net/ethernet/intel/i40evf/i40e_adminq_cmd.h | 1 + 4 files changed, 22 insertions(+), 5 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h index c78448daa7a1..4dc6d43f8812 100644 --- a/drivers/net/ethernet/intel/i40e/i40e.h +++ b/drivers/net/ethernet/intel/i40e/i40e.h @@ -451,6 +451,7 @@ struct i40e_pf { #define I40E_FLAG_CLIENT_RESET BIT_ULL(54) #define I40E_FLAG_TEMP_LINK_POLLING BIT_ULL(55) #define I40E_FLAG_CLIENT_L2_CHANGE BIT_ULL(56) +#define I40E_FLAG_LINK_DOWN_ON_CLOSE_ENABLED BIT_ULL(57) #define I40E_FLAG_LEGACY_RX BIT_ULL(58) #define I40E_FLAG_SOURCE_PRUNING_DISABLED BIT_ULL(59) diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h index 50c5a4c630b8..a8f65aed5421 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h +++ b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h @@ -1772,6 +1772,7 @@ enum i40e_aq_phy_type { I40E_PHY_TYPE_25GBASE_SR = 0x21, I40E_PHY_TYPE_25GBASE_LR = 0x22, I40E_PHY_TYPE_MAX, + I40E_PHY_TYPE_NOT_SUPPORTED_HIGH_TEMP = 0xFD, I40E_PHY_TYPE_EMPTY = 0xFE, I40E_PHY_TYPE_DEFAULT = 0xFF, }; diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index 628101bb08d4..3d6d6a283327 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -6558,12 +6558,26 @@ static void i40e_handle_link_event(struct i40e_pf *pf, */ i40e_link_event(pf); - /* check for unqualified module, if link is down */ - if ((status->link_info & I40E_AQ_MEDIA_AVAILABLE) && - (!(status->an_info & I40E_AQ_QUALIFIED_MODULE)) && - (!(status->link_info & I40E_AQ_LINK_UP))) + /* Check if module meets thermal requirements */ + if (status->phy_type == I40E_PHY_TYPE_NOT_SUPPORTED_HIGH_TEMP) { dev_err(&pf->pdev->dev, - "The driver failed to link because an unqualified module was detected.\n"); + "Rx/Tx is disabled on this device because the module does not meet thermal requirements.\n"); + dev_err(&pf->pdev->dev, + "Refer to the Intel(R) Ethernet Adapters and Devices User Guide for a list of supported modules.\n"); + } else { + /* check for unqualified module, if link is down, suppress + * the message if link was forced to be down. + */ + if ((status->link_info & I40E_AQ_MEDIA_AVAILABLE) && + (!(status->an_info & I40E_AQ_QUALIFIED_MODULE)) && + (!(status->link_info & I40E_AQ_LINK_UP)) && + (!(pf->flags & I40E_FLAG_LINK_DOWN_ON_CLOSE_ENABLED))) { + dev_err(&pf->pdev->dev, + "Rx/Tx is disabled on this device because an unsupported SFP module type was detected.\n"); + dev_err(&pf->pdev->dev, + "Refer to the Intel(R) Ethernet Adapters and Devices User Guide for a list of supported modules.\n"); + } + } } /** diff --git a/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h index dc6fc8b1bc79..60c892f559b9 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h +++ b/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h @@ -1768,6 +1768,7 @@ enum i40e_aq_phy_type { I40E_PHY_TYPE_25GBASE_SR = 0x21, I40E_PHY_TYPE_25GBASE_LR = 0x22, I40E_PHY_TYPE_MAX, + I40E_PHY_TYPE_NOT_SUPPORTED_HIGH_TEMP = 0xFD, I40E_PHY_TYPE_EMPTY = 0xFE, I40E_PHY_TYPE_DEFAULT = 0xFF, }; -- cgit v1.2.3 From 013df598d61161d356d5545f56422766ed3a3a38 Mon Sep 17 00:00:00 2001 From: Filip Sadowski Date: Tue, 29 Aug 2017 05:32:38 -0400 Subject: i40e: Properly maintain flow director filters list When there is no space for more flow director filters and user requested to add a new one it is rejected by firmware and automatically removed from the filter list maintained by driver. This behaviour is correct. Afterwards existing filter can be removed making free slot for the new one. This however causes the newly added filter to be accepted by firmware but removed from driver filter list resulting in not showing after issuing 'ethtool -n '. This happened due to not clearing the variable pf->fd_inv which stores filter number to be removed from the list when firmware refused to add the requested filter. It caused the filter with this specific ID to be constantly removed once it was added to the list although it has been accepted by firmware and effectively applied to the NIC. It was fixed by clearing pf->fd_inv variable after removal of the filter from the list when it was rejected by firmware. Signed-off-by: Filip Sadowski Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_main.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index 3d6d6a283327..9704cfef2f05 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -6232,6 +6232,7 @@ void i40e_fdir_check_and_reenable(struct i40e_pf *pf) hlist_del(&filter->fdir_node); kfree(filter); pf->fdir_pf_active_filters--; + pf->fd_inv = 0; } } } -- cgit v1.2.3 From 19b7960b2da1db56ec3f8b478c70bab244feb644 Mon Sep 17 00:00:00 2001 From: Alan Brady Date: Tue, 29 Aug 2017 05:32:39 -0400 Subject: i40e: implement split PCI error reset handler This patch implements the PCI error handler reset_prepare and reset_done. This allows us to handle function level reset. Without this patch we are unable to perform and recover from an FLR correctly and this will cause VFs to be unable to recover from an FLR on the PF. Signed-off-by: Alan Brady Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_main.c | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index 9704cfef2f05..60b11fdeca2d 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -12045,6 +12045,28 @@ static pci_ers_result_t i40e_pci_error_slot_reset(struct pci_dev *pdev) return result; } +/** + * i40e_pci_error_reset_prepare - prepare device driver for pci reset + * @pdev: PCI device information struct + */ +static void i40e_pci_error_reset_prepare(struct pci_dev *pdev) +{ + struct i40e_pf *pf = pci_get_drvdata(pdev); + + i40e_prep_for_reset(pf, false); +} + +/** + * i40e_pci_error_reset_done - pci reset done, device driver reset can begin + * @pdev: PCI device information struct + */ +static void i40e_pci_error_reset_done(struct pci_dev *pdev) +{ + struct i40e_pf *pf = pci_get_drvdata(pdev); + + i40e_reset_and_rebuild(pf, false, false); +} + /** * i40e_pci_error_resume - restart operations after PCI error recovery * @pdev: PCI device information struct @@ -12235,6 +12257,8 @@ static int i40e_resume(struct device *dev) static const struct pci_error_handlers i40e_err_handler = { .error_detected = i40e_pci_error_detected, .slot_reset = i40e_pci_error_slot_reset, + .reset_prepare = i40e_pci_error_reset_prepare, + .reset_done = i40e_pci_error_reset_done, .resume = i40e_pci_error_resume, }; -- cgit v1.2.3 From 052b93d0c2ecf693f13561b4b100d541ced41af0 Mon Sep 17 00:00:00 2001 From: Mariusz Stachura Date: Tue, 29 Aug 2017 05:32:40 -0400 Subject: i40e: do not enter PHY debug mode while setting LEDs behaviour Previous implementation of LED set/get functions required to enter PHY debug mode, in order to prevent access to it from FW and SW at the same time. Reset of all ports was a unwanted side effect. Signed-off-by: Mariusz Stachura Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_ethtool.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c index 6203d362438c..de0dfe340494 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c @@ -2010,7 +2010,9 @@ static int i40e_set_phys_id(struct net_device *netdev, if (!(pf->hw_features & I40E_HW_PHY_CONTROLS_LEDS)) { pf->led_status = i40e_led_get(hw); } else { - i40e_aq_set_phy_debug(hw, I40E_PHY_DEBUG_ALL, NULL); + if (!(hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE)) + i40e_aq_set_phy_debug(hw, I40E_PHY_DEBUG_ALL, + NULL); ret = i40e_led_get_phy(hw, &temp_status, &pf->phy_led_val); pf->led_status = temp_status; @@ -2035,7 +2037,8 @@ static int i40e_set_phys_id(struct net_device *netdev, ret = i40e_led_set_phy(hw, false, pf->led_status, (pf->phy_led_val | I40E_PHY_LED_MODE_ORIG)); - i40e_aq_set_phy_debug(hw, 0, NULL); + if (!(hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE)) + i40e_aq_set_phy_debug(hw, 0, NULL); } break; default: -- cgit v1.2.3 From 0a3b4f702fb1f76b03530d58af9efc5e10392185 Mon Sep 17 00:00:00 2001 From: Jacob Keller Date: Tue, 29 Aug 2017 05:32:41 -0400 Subject: i40evf: enable support for VF VLAN tag stripping control A recent commit 809481484e5d ("i40e/i40evf: support for VF VLAN tag stripping control") added support for VFs to negotiate the control of VLAN tag stripping. This should have allowed VFs to disable the feature. Unfortunately, the flag was set only in netdev->feature flags and not in netdev->hw_features. This ultimately causes the stack to assume that it cannot change the flag, so it was unchangeable and marked as [fixed] in the ethtool -k output. Fix this by setting the feature in hw_features first, just as we do for the PF code. This enables ethtool -K to disable the feature correctly, and fully enables user control of the VLAN tag stripping feature. Signed-off-by: Jacob Keller Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40evf/i40evf_main.c | 22 ++++++++++++++-------- 1 file changed, 14 insertions(+), 8 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_main.c b/drivers/net/ethernet/intel/i40evf/i40evf_main.c index bc76378a71e2..1d2fc898b664 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf_main.c +++ b/drivers/net/ethernet/intel/i40evf/i40evf_main.c @@ -2423,10 +2423,6 @@ out_err: return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK); } -#define I40EVF_VLAN_FEATURES (NETIF_F_HW_VLAN_CTAG_TX |\ - NETIF_F_HW_VLAN_CTAG_RX |\ - NETIF_F_HW_VLAN_CTAG_FILTER) - /** * i40evf_fix_features - fix up the netdev feature bits * @netdev: our net device @@ -2439,9 +2435,11 @@ static netdev_features_t i40evf_fix_features(struct net_device *netdev, { struct i40evf_adapter *adapter = netdev_priv(netdev); - features &= ~I40EVF_VLAN_FEATURES; - if (adapter->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN) - features |= I40EVF_VLAN_FEATURES; + if (!(adapter->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN)) + features &= ~(NETIF_F_HW_VLAN_CTAG_TX | + NETIF_F_HW_VLAN_CTAG_RX | + NETIF_F_HW_VLAN_CTAG_FILTER); + return features; } @@ -2572,9 +2570,17 @@ int i40evf_process_config(struct i40evf_adapter *adapter) */ hw_features = hw_enc_features; + /* Enable VLAN features if supported */ + if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN) + hw_features |= (NETIF_F_HW_VLAN_CTAG_TX | + NETIF_F_HW_VLAN_CTAG_RX); + netdev->hw_features |= hw_features; - netdev->features |= hw_features | I40EVF_VLAN_FEATURES; + netdev->features |= hw_features; + + if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN) + netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; adapter->vsi.id = adapter->vsi_res->vsi_id; -- cgit v1.2.3 From a5340d933e3cd7829a24bacc156dd1e475a1ae2c Mon Sep 17 00:00:00 2001 From: Jacob Keller Date: Tue, 29 Aug 2017 05:32:42 -0400 Subject: i40e: ignore skb->xmit_more when deciding to set RS bit Since commit 6a7fded776a7 ("i40e: Fix RS bit update in Tx path and disable force WB workaround") we've tried to "optimize" setting the RS bit based around skb->xmit_more. This same logic was refactored in commit 1dc8b538795f ("i40e: Reorder logic for coalescing RS bits"), but ultimately was not functionally changed. Using skb->xmit_more in this way is incorrect, because in certain circumstances we may see a large number of skbs in sequence with xmit_more set. This leads to a performance loss as the hardware does not writeback anything for those packets, which delays the time it takes for us to respond to the stack transmit requests. This significantly impacts UDP performance, especially when layered with multiple devices, such as bonding, VLANs, and vnet setups. This was not noticed until now because it is difficult to create a setup which reproduces the issue. It was discovered in a UDP_STREAM test in a VM, connected using a vnet device to a bridge, which is connected to a bonded pair of X710 ports in active-backup mode with a VLAN. These layered devices seem to compound the number of skbs transmitted at once by the qdisc. Additionally, the problem can be masked by reducing the ITR value. Since the original commit does not provide strong justification for this RS bit "optimization", revert to the previous behavior of setting the RS bit every 4th packet. Signed-off-by: Jacob Keller Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_txrx.c | 34 ++++------------------------- 1 file changed, 4 insertions(+), 30 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c index d9fdf69bbc6e..3bd176606c09 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c @@ -3167,38 +3167,12 @@ static inline int i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, /* write last descriptor with EOP bit */ td_cmd |= I40E_TX_DESC_CMD_EOP; - /* We can OR these values together as they both are checked against - * 4 below and at this point desc_count will be used as a boolean value - * after this if/else block. + /* We OR these values together to check both against 4 (WB_STRIDE) + * below. This is safe since we don't re-use desc_count afterwards. */ desc_count |= ++tx_ring->packet_stride; - /* Algorithm to optimize tail and RS bit setting: - * if queue is stopped - * mark RS bit - * reset packet counter - * else if xmit_more is supported and is true - * advance packet counter to 4 - * reset desc_count to 0 - * - * if desc_count >= 4 - * mark RS bit - * reset packet counter - * if desc_count > 0 - * update tail - * - * Note: If there are less than 4 descriptors - * pending and interrupts were disabled the service task will - * trigger a force WB. - */ - if (netif_xmit_stopped(txring_txq(tx_ring))) { - goto do_rs; - } else if (skb->xmit_more) { - /* set stride to arm on next packet and reset desc_count */ - tx_ring->packet_stride = WB_STRIDE; - desc_count = 0; - } else if (desc_count >= WB_STRIDE) { -do_rs: + if (desc_count >= WB_STRIDE) { /* write last descriptor with RS bit set */ td_cmd |= I40E_TX_DESC_CMD_RS; tx_ring->packet_stride = 0; @@ -3219,7 +3193,7 @@ do_rs: first->next_to_watch = tx_desc; /* notify HW of packet */ - if (desc_count) { + if (netif_xmit_stopped(txring_txq(tx_ring)) || !skb->xmit_more) { writel(i, tx_ring->tail); /* we need this if more than one processor can write to our tail -- cgit v1.2.3 From b74f571f59a8a3dae998e3b95e0f88fac39bfef3 Mon Sep 17 00:00:00 2001 From: Jacob Keller Date: Fri, 1 Sep 2017 13:54:07 -0700 Subject: i40e/i40evf: organize and re-number feature flags Now that we've reduced the number of flags, organize similar flags together and re-number them accordingly. Since we don't yet have more than 32 flags, we'll use a u32 for both the hw_features and flag field. Should we gain more flags in the future, we may need to convert to a u64 or separate flags out into two fields. One alternative approach considered, but not implemented here, was to use an enumeration for the flag variables, and create a macro I40E_FLAG() which used string concatenation to generate BIT_ULL values. This has the advantage of making the actual bit values compile-time dynamic so that we do not need to worry about matching the order to the bit value. However, this does produce a high level of code churn, and makes it more difficult to read a dumped flags value when debugging. Change-ID: I8653fff69453cd547d6fe98d29dfa9d8710387d1 Signed-off-by: Jacob Keller Reviewed-by: Mitch Williams Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e.h | 98 +++++++++++++------------- drivers/net/ethernet/intel/i40e/i40e_ethtool.c | 6 +- drivers/net/ethernet/intel/i40evf/i40evf.h | 32 ++++----- 3 files changed, 68 insertions(+), 68 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h index 4dc6d43f8812..18c453a3e728 100644 --- a/drivers/net/ethernet/intel/i40e/i40e.h +++ b/drivers/net/ethernet/intel/i40e/i40e.h @@ -403,57 +403,57 @@ struct i40e_pf { struct timer_list service_timer; struct work_struct service_task; - u64 hw_features; -#define I40E_HW_RSS_AQ_CAPABLE BIT_ULL(0) -#define I40E_HW_128_QP_RSS_CAPABLE BIT_ULL(1) -#define I40E_HW_ATR_EVICT_CAPABLE BIT_ULL(2) -#define I40E_HW_WB_ON_ITR_CAPABLE BIT_ULL(3) -#define I40E_HW_MULTIPLE_TCP_UDP_RSS_PCTYPE BIT_ULL(4) -#define I40E_HW_NO_PCI_LINK_CHECK BIT_ULL(5) -#define I40E_HW_100M_SGMII_CAPABLE BIT_ULL(6) -#define I40E_HW_NO_DCB_SUPPORT BIT_ULL(7) -#define I40E_HW_USE_SET_LLDP_MIB BIT_ULL(8) -#define I40E_HW_GENEVE_OFFLOAD_CAPABLE BIT_ULL(9) -#define I40E_HW_PTP_L4_CAPABLE BIT_ULL(10) -#define I40E_HW_WOL_MC_MAGIC_PKT_WAKE BIT_ULL(11) -#define I40E_HW_MPLS_HDR_OFFLOAD_CAPABLE BIT_ULL(12) -#define I40E_HW_HAVE_CRT_RETIMER BIT_ULL(13) -#define I40E_HW_OUTER_UDP_CSUM_CAPABLE BIT_ULL(14) -#define I40E_HW_PHY_CONTROLS_LEDS BIT_ULL(15) -#define I40E_HW_STOP_FW_LLDP BIT_ULL(16) -#define I40E_HW_PORT_ID_VALID BIT_ULL(17) -#define I40E_HW_RESTART_AUTONEG BIT_ULL(18) + u32 hw_features; +#define I40E_HW_RSS_AQ_CAPABLE BIT(0) +#define I40E_HW_128_QP_RSS_CAPABLE BIT(1) +#define I40E_HW_ATR_EVICT_CAPABLE BIT(2) +#define I40E_HW_WB_ON_ITR_CAPABLE BIT(3) +#define I40E_HW_MULTIPLE_TCP_UDP_RSS_PCTYPE BIT(4) +#define I40E_HW_NO_PCI_LINK_CHECK BIT(5) +#define I40E_HW_100M_SGMII_CAPABLE BIT(6) +#define I40E_HW_NO_DCB_SUPPORT BIT(7) +#define I40E_HW_USE_SET_LLDP_MIB BIT(8) +#define I40E_HW_GENEVE_OFFLOAD_CAPABLE BIT(9) +#define I40E_HW_PTP_L4_CAPABLE BIT(10) +#define I40E_HW_WOL_MC_MAGIC_PKT_WAKE BIT(11) +#define I40E_HW_MPLS_HDR_OFFLOAD_CAPABLE BIT(12) +#define I40E_HW_HAVE_CRT_RETIMER BIT(13) +#define I40E_HW_OUTER_UDP_CSUM_CAPABLE BIT(14) +#define I40E_HW_PHY_CONTROLS_LEDS BIT(15) +#define I40E_HW_STOP_FW_LLDP BIT(16) +#define I40E_HW_PORT_ID_VALID BIT(17) +#define I40E_HW_RESTART_AUTONEG BIT(18) u64 flags; -#define I40E_FLAG_RX_CSUM_ENABLED BIT_ULL(1) -#define I40E_FLAG_MSI_ENABLED BIT_ULL(2) -#define I40E_FLAG_MSIX_ENABLED BIT_ULL(3) -#define I40E_FLAG_HW_ATR_EVICT_ENABLED BIT_ULL(4) -#define I40E_FLAG_RSS_ENABLED BIT_ULL(6) -#define I40E_FLAG_VMDQ_ENABLED BIT_ULL(7) -#define I40E_FLAG_IWARP_ENABLED BIT_ULL(10) -#define I40E_FLAG_FILTER_SYNC BIT_ULL(15) -#define I40E_FLAG_SERVICE_CLIENT_REQUESTED BIT_ULL(16) -#define I40E_FLAG_SRIOV_ENABLED BIT_ULL(19) -#define I40E_FLAG_DCB_ENABLED BIT_ULL(20) -#define I40E_FLAG_FD_SB_ENABLED BIT_ULL(21) -#define I40E_FLAG_FD_ATR_ENABLED BIT_ULL(22) -#define I40E_FLAG_FD_SB_AUTO_DISABLED BIT_ULL(23) -#define I40E_FLAG_FD_ATR_AUTO_DISABLED BIT_ULL(24) -#define I40E_FLAG_PTP BIT_ULL(25) -#define I40E_FLAG_MFP_ENABLED BIT_ULL(26) -#define I40E_FLAG_UDP_FILTER_SYNC BIT_ULL(27) -#define I40E_FLAG_DCB_CAPABLE BIT_ULL(29) -#define I40E_FLAG_VEB_STATS_ENABLED BIT_ULL(37) -#define I40E_FLAG_LINK_POLLING_ENABLED BIT_ULL(39) -#define I40E_FLAG_VEB_MODE_ENABLED BIT_ULL(40) -#define I40E_FLAG_TRUE_PROMISC_SUPPORT BIT_ULL(51) -#define I40E_FLAG_CLIENT_RESET BIT_ULL(54) -#define I40E_FLAG_TEMP_LINK_POLLING BIT_ULL(55) -#define I40E_FLAG_CLIENT_L2_CHANGE BIT_ULL(56) -#define I40E_FLAG_LINK_DOWN_ON_CLOSE_ENABLED BIT_ULL(57) -#define I40E_FLAG_LEGACY_RX BIT_ULL(58) -#define I40E_FLAG_SOURCE_PRUNING_DISABLED BIT_ULL(59) +#define I40E_FLAG_RX_CSUM_ENABLED BIT(0) +#define I40E_FLAG_MSI_ENABLED BIT(1) +#define I40E_FLAG_MSIX_ENABLED BIT(2) +#define I40E_FLAG_RSS_ENABLED BIT(3) +#define I40E_FLAG_VMDQ_ENABLED BIT(4) +#define I40E_FLAG_FILTER_SYNC BIT(5) +#define I40E_FLAG_SRIOV_ENABLED BIT(6) +#define I40E_FLAG_DCB_CAPABLE BIT(7) +#define I40E_FLAG_DCB_ENABLED BIT(8) +#define I40E_FLAG_FD_SB_ENABLED BIT(9) +#define I40E_FLAG_FD_ATR_ENABLED BIT(10) +#define I40E_FLAG_FD_SB_AUTO_DISABLED BIT(11) +#define I40E_FLAG_FD_ATR_AUTO_DISABLED BIT(12) +#define I40E_FLAG_MFP_ENABLED BIT(13) +#define I40E_FLAG_UDP_FILTER_SYNC BIT(14) +#define I40E_FLAG_HW_ATR_EVICT_ENABLED BIT(15) +#define I40E_FLAG_VEB_MODE_ENABLED BIT(16) +#define I40E_FLAG_VEB_STATS_ENABLED BIT(17) +#define I40E_FLAG_LINK_POLLING_ENABLED BIT(18) +#define I40E_FLAG_TRUE_PROMISC_SUPPORT BIT(19) +#define I40E_FLAG_TEMP_LINK_POLLING BIT(20) +#define I40E_FLAG_LEGACY_RX BIT(21) +#define I40E_FLAG_PTP BIT(22) +#define I40E_FLAG_IWARP_ENABLED BIT(23) +#define I40E_FLAG_SERVICE_CLIENT_REQUESTED BIT(24) +#define I40E_FLAG_CLIENT_L2_CHANGE BIT(25) +#define I40E_FLAG_CLIENT_RESET BIT(26) +#define I40E_FLAG_LINK_DOWN_ON_CLOSE_ENABLED BIT(27) +#define I40E_FLAG_SOURCE_PRUNING_DISABLED BIT(28) struct i40e_client_instance *cinst; bool stat_offsets_loaded; diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c index de0dfe340494..afd3ca8d9851 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c @@ -4095,7 +4095,7 @@ static int i40e_set_priv_flags(struct net_device *dev, u32 flags) struct i40e_netdev_priv *np = netdev_priv(dev); struct i40e_vsi *vsi = np->vsi; struct i40e_pf *pf = vsi->back; - u64 orig_flags, new_flags, changed_flags; + u32 orig_flags, new_flags, changed_flags; u32 i, j; orig_flags = READ_ONCE(pf->flags); @@ -4147,12 +4147,12 @@ flags_complete: return -EOPNOTSUPP; /* Compare and exchange the new flags into place. If we failed, that - * is if cmpxchg64 returns anything but the old value, this means that + * is if cmpxchg returns anything but the old value, this means that * something else has modified the flags variable since we copied it * originally. We'll just punt with an error and log something in the * message buffer. */ - if (cmpxchg64(&pf->flags, orig_flags, new_flags) != orig_flags) { + if (cmpxchg(&pf->flags, orig_flags, new_flags) != orig_flags) { dev_warn(&pf->pdev->dev, "Unable to update pf->flags as it was modified by another thread...\n"); return -EAGAIN; diff --git a/drivers/net/ethernet/intel/i40evf/i40evf.h b/drivers/net/ethernet/intel/i40evf/i40evf.h index 5982362c5643..de0af521d602 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf.h +++ b/drivers/net/ethernet/intel/i40evf/i40evf.h @@ -222,22 +222,22 @@ struct i40evf_adapter { u32 flags; #define I40EVF_FLAG_RX_CSUM_ENABLED BIT(0) -#define I40EVF_FLAG_IMIR_ENABLED BIT(5) -#define I40EVF_FLAG_MQ_CAPABLE BIT(6) -#define I40EVF_FLAG_PF_COMMS_FAILED BIT(8) -#define I40EVF_FLAG_RESET_PENDING BIT(9) -#define I40EVF_FLAG_RESET_NEEDED BIT(10) -#define I40EVF_FLAG_WB_ON_ITR_CAPABLE BIT(11) -#define I40EVF_FLAG_OUTER_UDP_CSUM_CAPABLE BIT(12) -#define I40EVF_FLAG_ADDR_SET_BY_PF BIT(13) -#define I40EVF_FLAG_SERVICE_CLIENT_REQUESTED BIT(14) -#define I40EVF_FLAG_CLIENT_NEEDS_OPEN BIT(15) -#define I40EVF_FLAG_CLIENT_NEEDS_CLOSE BIT(16) -#define I40EVF_FLAG_CLIENT_NEEDS_L2_PARAMS BIT(17) -#define I40EVF_FLAG_PROMISC_ON BIT(18) -#define I40EVF_FLAG_ALLMULTI_ON BIT(19) -#define I40EVF_FLAG_LEGACY_RX BIT(20) -#define I40EVF_FLAG_REINIT_ITR_NEEDED BIT(21) +#define I40EVF_FLAG_IMIR_ENABLED BIT(1) +#define I40EVF_FLAG_MQ_CAPABLE BIT(2) +#define I40EVF_FLAG_PF_COMMS_FAILED BIT(3) +#define I40EVF_FLAG_RESET_PENDING BIT(4) +#define I40EVF_FLAG_RESET_NEEDED BIT(5) +#define I40EVF_FLAG_WB_ON_ITR_CAPABLE BIT(6) +#define I40EVF_FLAG_OUTER_UDP_CSUM_CAPABLE BIT(7) +#define I40EVF_FLAG_ADDR_SET_BY_PF BIT(8) +#define I40EVF_FLAG_SERVICE_CLIENT_REQUESTED BIT(9) +#define I40EVF_FLAG_CLIENT_NEEDS_OPEN BIT(10) +#define I40EVF_FLAG_CLIENT_NEEDS_CLOSE BIT(11) +#define I40EVF_FLAG_CLIENT_NEEDS_L2_PARAMS BIT(12) +#define I40EVF_FLAG_PROMISC_ON BIT(13) +#define I40EVF_FLAG_ALLMULTI_ON BIT(14) +#define I40EVF_FLAG_LEGACY_RX BIT(15) +#define I40EVF_FLAG_REINIT_ITR_NEEDED BIT(16) /* duplicates for common code */ #define I40E_FLAG_DCB_ENABLED 0 #define I40E_FLAG_RX_CSUM_ENABLED I40EVF_FLAG_RX_CSUM_ENABLED -- cgit v1.2.3 From bb055c198d9b2ba7baf292a440c2d24fe87db494 Mon Sep 17 00:00:00 2001 From: Pieter Jansen van Vuuren Date: Fri, 6 Oct 2017 10:21:20 +0200 Subject: nfp: add mpls match offloading support Previously MPLS match offloading was not supported. This patch enables MPLS match offloading support for label, bos and tc fields. Signed-off-by: Pieter Jansen van Vuuren Reviewed-by: Jakub Kicinski Signed-off-by: Simon Horman Signed-off-by: David S. Miller --- drivers/net/ethernet/netronome/nfp/flower/cmsg.h | 5 +++++ drivers/net/ethernet/netronome/nfp/flower/match.c | 17 +++++++++++++++-- drivers/net/ethernet/netronome/nfp/flower/offload.c | 6 +----- 3 files changed, 21 insertions(+), 7 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/netronome/nfp/flower/cmsg.h b/drivers/net/ethernet/netronome/nfp/flower/cmsg.h index 504ddaa21701..fe4751607b2b 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/cmsg.h +++ b/drivers/net/ethernet/netronome/nfp/flower/cmsg.h @@ -57,6 +57,11 @@ #define NFP_FLOWER_MASK_VLAN_CFI BIT(12) #define NFP_FLOWER_MASK_VLAN_VID GENMASK(11, 0) +#define NFP_FLOWER_MASK_MPLS_LB GENMASK(31, 12) +#define NFP_FLOWER_MASK_MPLS_TC GENMASK(11, 9) +#define NFP_FLOWER_MASK_MPLS_BOS BIT(8) +#define NFP_FLOWER_MASK_MPLS_Q BIT(0) + #define NFP_FL_SC_ACT_DROP 0x80000000 #define NFP_FL_SC_ACT_USER 0x7D000000 #define NFP_FL_SC_ACT_POPV 0x6A000000 diff --git a/drivers/net/ethernet/netronome/nfp/flower/match.c b/drivers/net/ethernet/netronome/nfp/flower/match.c index 865a815ab92a..e35ade9cd3d5 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/match.c +++ b/drivers/net/ethernet/netronome/nfp/flower/match.c @@ -111,8 +111,21 @@ nfp_flower_compile_mac(struct nfp_flower_mac_mpls *frame, ether_addr_copy(frame->mac_src, &addr->src[0]); } - if (mask_version) - frame->mpls_lse = cpu_to_be32(~0); + if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_MPLS)) { + struct flow_dissector_key_mpls *mpls; + u32 t_mpls; + + mpls = skb_flow_dissector_target(flow->dissector, + FLOW_DISSECTOR_KEY_MPLS, + target); + + t_mpls = FIELD_PREP(NFP_FLOWER_MASK_MPLS_LB, mpls->mpls_label) | + FIELD_PREP(NFP_FLOWER_MASK_MPLS_TC, mpls->mpls_tc) | + FIELD_PREP(NFP_FLOWER_MASK_MPLS_BOS, mpls->mpls_bos) | + NFP_FLOWER_MASK_MPLS_Q; + + frame->mpls_lse = cpu_to_be32(t_mpls); + } } static void diff --git a/drivers/net/ethernet/netronome/nfp/flower/offload.c b/drivers/net/ethernet/netronome/nfp/flower/offload.c index 3d9537ebdea4..a721a00a2bcc 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/offload.c +++ b/drivers/net/ethernet/netronome/nfp/flower/offload.c @@ -57,6 +57,7 @@ BIT(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) | \ BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL) | \ BIT(FLOW_DISSECTOR_KEY_ENC_PORTS) | \ + BIT(FLOW_DISSECTOR_KEY_MPLS) | \ BIT(FLOW_DISSECTOR_KEY_IP)) #define NFP_FLOWER_WHITELIST_TUN_DISSECTOR \ @@ -238,11 +239,6 @@ nfp_flower_calculate_key_layers(struct nfp_fl_key_ls *ret_key_ls, case cpu_to_be16(ETH_P_ARP): return -EOPNOTSUPP; - /* Currently we do not offload MPLS. */ - case cpu_to_be16(ETH_P_MPLS_UC): - case cpu_to_be16(ETH_P_MPLS_MC): - return -EOPNOTSUPP; - /* Will be included in layer 2. */ case cpu_to_be16(ETH_P_8021Q): break; -- cgit v1.2.3 From a1e9203cc6e5247f6e7af897252ca92cdf5edb70 Mon Sep 17 00:00:00 2001 From: Pieter Jansen van Vuuren Date: Fri, 6 Oct 2017 10:21:21 +0200 Subject: nfp: add IPv4 ttl and tos match offloading support Previously matching on IPv4 ttl and tos fields were not offloaded. This patch enables offloading IPv4 ttl and tos as match fields. Signed-off-by: Pieter Jansen van Vuuren Reviewed-by: Jakub Kicinski Signed-off-by: Simon Horman Signed-off-by: David S. Miller --- drivers/net/ethernet/netronome/nfp/flower/match.c | 11 ++++++++++- drivers/net/ethernet/netronome/nfp/flower/offload.c | 4 ---- 2 files changed, 10 insertions(+), 5 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/netronome/nfp/flower/match.c b/drivers/net/ethernet/netronome/nfp/flower/match.c index e35ade9cd3d5..d6096b4f1391 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/match.c +++ b/drivers/net/ethernet/netronome/nfp/flower/match.c @@ -156,7 +156,6 @@ nfp_flower_compile_ipv4(struct nfp_flower_ipv4 *frame, struct flow_dissector_key_ipv4_addrs *addr; struct flow_dissector_key_basic *basic; - /* Wildcard TOS/TTL for now. */ memset(frame, 0, sizeof(struct nfp_flower_ipv4)); if (dissector_uses_key(flow->dissector, @@ -174,6 +173,16 @@ nfp_flower_compile_ipv4(struct nfp_flower_ipv4 *frame, target); frame->proto = basic->ip_proto; } + + if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_IP)) { + struct flow_dissector_key_ip *flow_ip; + + flow_ip = skb_flow_dissector_target(flow->dissector, + FLOW_DISSECTOR_KEY_IP, + target); + frame->tos = flow_ip->tos; + frame->ttl = flow_ip->ttl; + } } static void diff --git a/drivers/net/ethernet/netronome/nfp/flower/offload.c b/drivers/net/ethernet/netronome/nfp/flower/offload.c index a721a00a2bcc..3651db5dfb8b 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/offload.c +++ b/drivers/net/ethernet/netronome/nfp/flower/offload.c @@ -216,10 +216,6 @@ nfp_flower_calculate_key_layers(struct nfp_fl_key_ls *ret_key_ls, /* Ethernet type is present in the key. */ switch (key_basic->n_proto) { case cpu_to_be16(ETH_P_IP): - if (mask_ip && mask_ip->tos) - return -EOPNOTSUPP; - if (mask_ip && mask_ip->ttl) - return -EOPNOTSUPP; key_layer |= NFP_FLOWER_LAYER_IPV4; key_size += sizeof(struct nfp_flower_ipv4); break; -- cgit v1.2.3 From fc53b4a7014aab8c260c2b81ae6c24687dff3045 Mon Sep 17 00:00:00 2001 From: Pieter Jansen van Vuuren Date: Fri, 6 Oct 2017 10:21:22 +0200 Subject: nfp: add IPv6 ttl and tos match offloading support Previously matching on IPv6 ttl and tos fields were not offloaded. This patch enables offloading IPv6 ttl and tos as match fields. Signed-off-by: Pieter Jansen van Vuuren Reviewed-by: Jakub Kicinski Signed-off-by: Simon Horman Signed-off-by: David S. Miller --- drivers/net/ethernet/netronome/nfp/flower/match.c | 11 ++++++++++- drivers/net/ethernet/netronome/nfp/flower/offload.c | 10 ---------- 2 files changed, 10 insertions(+), 11 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/netronome/nfp/flower/match.c b/drivers/net/ethernet/netronome/nfp/flower/match.c index d6096b4f1391..60614d4f0e22 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/match.c +++ b/drivers/net/ethernet/netronome/nfp/flower/match.c @@ -194,7 +194,6 @@ nfp_flower_compile_ipv6(struct nfp_flower_ipv6 *frame, struct flow_dissector_key_ipv6_addrs *addr; struct flow_dissector_key_basic *basic; - /* Wildcard LABEL/TOS/TTL for now. */ memset(frame, 0, sizeof(struct nfp_flower_ipv6)); if (dissector_uses_key(flow->dissector, @@ -212,6 +211,16 @@ nfp_flower_compile_ipv6(struct nfp_flower_ipv6 *frame, target); frame->proto = basic->ip_proto; } + + if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_IP)) { + struct flow_dissector_key_ip *flow_ip; + + flow_ip = skb_flow_dissector_target(flow->dissector, + FLOW_DISSECTOR_KEY_IP, + target); + frame->tos = flow_ip->tos; + frame->ttl = flow_ip->ttl; + } } static void diff --git a/drivers/net/ethernet/netronome/nfp/flower/offload.c b/drivers/net/ethernet/netronome/nfp/flower/offload.c index 3651db5dfb8b..6f239c27964e 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/offload.c +++ b/drivers/net/ethernet/netronome/nfp/flower/offload.c @@ -135,7 +135,6 @@ nfp_flower_calculate_key_layers(struct nfp_fl_key_ls *ret_key_ls, { struct flow_dissector_key_basic *mask_basic = NULL; struct flow_dissector_key_basic *key_basic = NULL; - struct flow_dissector_key_ip *mask_ip = NULL; u32 key_layer_two; u8 key_layer; int key_size; @@ -207,11 +206,6 @@ nfp_flower_calculate_key_layers(struct nfp_fl_key_ls *ret_key_ls, flow->key); } - if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_IP)) - mask_ip = skb_flow_dissector_target(flow->dissector, - FLOW_DISSECTOR_KEY_IP, - flow->mask); - if (mask_basic && mask_basic->n_proto) { /* Ethernet type is present in the key. */ switch (key_basic->n_proto) { @@ -221,10 +215,6 @@ nfp_flower_calculate_key_layers(struct nfp_fl_key_ls *ret_key_ls, break; case cpu_to_be16(ETH_P_IPV6): - if (mask_ip && mask_ip->tos) - return -EOPNOTSUPP; - if (mask_ip && mask_ip->ttl) - return -EOPNOTSUPP; key_layer |= NFP_FLOWER_LAYER_IPV6; key_size += sizeof(struct nfp_flower_ipv6); break; -- cgit v1.2.3 From da83d8fe5889822691384d2b3edf1716fb6debdb Mon Sep 17 00:00:00 2001 From: Pieter Jansen van Vuuren Date: Fri, 6 Oct 2017 10:21:23 +0200 Subject: nfp: add set ethernet header action flower offload Previously we did not have offloading support for set ethernet actions. This patch enables TC flower offload of set ethernet actions. Signed-off-by: Pieter Jansen van Vuuren Reviewed-by: Jakub Kicinski Signed-off-by: Simon Horman Signed-off-by: David S. Miller --- drivers/net/ethernet/netronome/nfp/flower/action.c | 85 ++++++++++++++++++++++ drivers/net/ethernet/netronome/nfp/flower/cmsg.h | 8 ++ 2 files changed, 93 insertions(+) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/netronome/nfp/flower/action.c b/drivers/net/ethernet/netronome/nfp/flower/action.c index 38f3835ae176..631ea4b7d08e 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/action.c +++ b/drivers/net/ethernet/netronome/nfp/flower/action.c @@ -36,6 +36,7 @@ #include #include #include +#include #include #include @@ -223,6 +224,87 @@ nfp_fl_set_vxlan(struct nfp_fl_set_vxlan *set_vxlan, return 0; } +static void nfp_fl_set_helper32(u32 value, u32 mask, u8 *p_exact, u8 *p_mask) +{ + u32 oldvalue = get_unaligned((u32 *)p_exact); + u32 oldmask = get_unaligned((u32 *)p_mask); + + value &= mask; + value |= oldvalue & ~mask; + + put_unaligned(oldmask | mask, (u32 *)p_mask); + put_unaligned(value, (u32 *)p_exact); +} + +static int +nfp_fl_set_eth(const struct tc_action *action, int idx, u32 off, + struct nfp_fl_set_eth *set_eth) +{ + u16 tmp_set_eth_op; + u32 exact, mask; + + if (off + 4 > ETH_ALEN * 2) + return -EOPNOTSUPP; + + mask = ~tcf_pedit_mask(action, idx); + exact = tcf_pedit_val(action, idx); + + if (exact & ~mask) + return -EOPNOTSUPP; + + nfp_fl_set_helper32(exact, mask, &set_eth->eth_addr_val[off], + &set_eth->eth_addr_mask[off]); + + set_eth->reserved = cpu_to_be16(0); + tmp_set_eth_op = FIELD_PREP(NFP_FL_ACT_LEN_LW, + sizeof(*set_eth) >> NFP_FL_LW_SIZ) | + FIELD_PREP(NFP_FL_ACT_JMP_ID, + NFP_FL_ACTION_OPCODE_SET_ETHERNET); + set_eth->a_op = cpu_to_be16(tmp_set_eth_op); + + return 0; +} + +static int +nfp_fl_pedit(const struct tc_action *action, char *nfp_action, int *a_len) +{ + struct nfp_fl_set_eth set_eth; + enum pedit_header_type htype; + int idx, nkeys, err; + size_t act_size; + u32 offset, cmd; + + memset(&set_eth, 0, sizeof(set_eth)); + nkeys = tcf_pedit_nkeys(action); + + for (idx = 0; idx < nkeys; idx++) { + cmd = tcf_pedit_cmd(action, idx); + htype = tcf_pedit_htype(action, idx); + offset = tcf_pedit_offset(action, idx); + + if (cmd != TCA_PEDIT_KEY_EX_CMD_SET) + return -EOPNOTSUPP; + + switch (htype) { + case TCA_PEDIT_KEY_EX_HDR_TYPE_ETH: + err = nfp_fl_set_eth(action, idx, offset, &set_eth); + break; + default: + return -EOPNOTSUPP; + } + if (err) + return err; + } + + if (set_eth.a_op) { + act_size = sizeof(set_eth); + memcpy(nfp_action, &set_eth, act_size); + *a_len += act_size; + } + + return 0; +} + static int nfp_flower_loop_action(const struct tc_action *a, struct nfp_fl_payload *nfp_fl, int *a_len, @@ -301,6 +383,9 @@ nfp_flower_loop_action(const struct tc_action *a, } else if (is_tcf_tunnel_release(a)) { /* Tunnel decap is handled by default so accept action. */ return 0; + } else if (is_tcf_pedit(a)) { + if (nfp_fl_pedit(a, &nfp_fl->action_data[*a_len], a_len)) + return -EOPNOTSUPP; } else { /* Currently we do not handle any other actions. */ return -EOPNOTSUPP; diff --git a/drivers/net/ethernet/netronome/nfp/flower/cmsg.h b/drivers/net/ethernet/netronome/nfp/flower/cmsg.h index fe4751607b2b..ffeaf85aa420 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/cmsg.h +++ b/drivers/net/ethernet/netronome/nfp/flower/cmsg.h @@ -77,6 +77,7 @@ #define NFP_FL_ACTION_OPCODE_PUSH_VLAN 1 #define NFP_FL_ACTION_OPCODE_POP_VLAN 2 #define NFP_FL_ACTION_OPCODE_SET_IPV4_TUNNEL 6 +#define NFP_FL_ACTION_OPCODE_SET_ETHERNET 7 #define NFP_FL_ACTION_OPCODE_PRE_TUNNEL 17 #define NFP_FL_ACTION_OPCODE_NUM 32 @@ -107,6 +108,13 @@ enum nfp_flower_tun_type { NFP_FL_TUNNEL_VXLAN = 2, }; +struct nfp_fl_set_eth { + __be16 a_op; + __be16 reserved; + u8 eth_addr_mask[ETH_ALEN * 2]; + u8 eth_addr_val[ETH_ALEN * 2]; +}; + struct nfp_fl_output { __be16 a_op; __be16 flags; -- cgit v1.2.3 From c0b1bd9a8b8ac81e4e0985aad4a8de869ab6a668 Mon Sep 17 00:00:00 2001 From: Pieter Jansen van Vuuren Date: Fri, 6 Oct 2017 10:21:24 +0200 Subject: nfp: add set ipv4 header action flower offload Previously we did not have offloading support for set IPv4 actions. This patch enables TC flower offload of set IPv4 src and dst address actions. Signed-off-by: Pieter Jansen van Vuuren Reviewed-by: Jakub Kicinski Signed-off-by: Simon Horman Signed-off-by: David S. Miller --- drivers/net/ethernet/netronome/nfp/flower/action.c | 46 ++++++++++++++++++++++ drivers/net/ethernet/netronome/nfp/flower/cmsg.h | 10 +++++ 2 files changed, 56 insertions(+) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/netronome/nfp/flower/action.c b/drivers/net/ethernet/netronome/nfp/flower/action.c index 631ea4b7d08e..2f886a529ee4 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/action.c +++ b/drivers/net/ethernet/netronome/nfp/flower/action.c @@ -265,15 +265,54 @@ nfp_fl_set_eth(const struct tc_action *action, int idx, u32 off, return 0; } +static int +nfp_fl_set_ip4(const struct tc_action *action, int idx, u32 off, + struct nfp_fl_set_ip4_addrs *set_ip_addr) +{ + u16 tmp_set_ipv4_op; + __be32 exact, mask; + + /* We are expecting tcf_pedit to return a big endian value */ + mask = (__force __be32)~tcf_pedit_mask(action, idx); + exact = (__force __be32)tcf_pedit_val(action, idx); + + if (exact & ~mask) + return -EOPNOTSUPP; + + switch (off) { + case offsetof(struct iphdr, daddr): + set_ip_addr->ipv4_dst_mask = mask; + set_ip_addr->ipv4_dst = exact; + break; + case offsetof(struct iphdr, saddr): + set_ip_addr->ipv4_src_mask = mask; + set_ip_addr->ipv4_src = exact; + break; + default: + return -EOPNOTSUPP; + } + + set_ip_addr->reserved = cpu_to_be16(0); + tmp_set_ipv4_op = FIELD_PREP(NFP_FL_ACT_LEN_LW, + sizeof(*set_ip_addr) >> NFP_FL_LW_SIZ) | + FIELD_PREP(NFP_FL_ACT_JMP_ID, + NFP_FL_ACTION_OPCODE_SET_IPV4_ADDRS); + set_ip_addr->a_op = cpu_to_be16(tmp_set_ipv4_op); + + return 0; +} + static int nfp_fl_pedit(const struct tc_action *action, char *nfp_action, int *a_len) { + struct nfp_fl_set_ip4_addrs set_ip_addr; struct nfp_fl_set_eth set_eth; enum pedit_header_type htype; int idx, nkeys, err; size_t act_size; u32 offset, cmd; + memset(&set_ip_addr, 0, sizeof(set_ip_addr)); memset(&set_eth, 0, sizeof(set_eth)); nkeys = tcf_pedit_nkeys(action); @@ -289,6 +328,9 @@ nfp_fl_pedit(const struct tc_action *action, char *nfp_action, int *a_len) case TCA_PEDIT_KEY_EX_HDR_TYPE_ETH: err = nfp_fl_set_eth(action, idx, offset, &set_eth); break; + case TCA_PEDIT_KEY_EX_HDR_TYPE_IP4: + err = nfp_fl_set_ip4(action, idx, offset, &set_ip_addr); + break; default: return -EOPNOTSUPP; } @@ -300,6 +342,10 @@ nfp_fl_pedit(const struct tc_action *action, char *nfp_action, int *a_len) act_size = sizeof(set_eth); memcpy(nfp_action, &set_eth, act_size); *a_len += act_size; + } else if (set_ip_addr.a_op) { + act_size = sizeof(set_ip_addr); + memcpy(nfp_action, &set_ip_addr, act_size); + *a_len += act_size; } return 0; diff --git a/drivers/net/ethernet/netronome/nfp/flower/cmsg.h b/drivers/net/ethernet/netronome/nfp/flower/cmsg.h index ffeaf85aa420..7ace557fdf84 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/cmsg.h +++ b/drivers/net/ethernet/netronome/nfp/flower/cmsg.h @@ -78,6 +78,7 @@ #define NFP_FL_ACTION_OPCODE_POP_VLAN 2 #define NFP_FL_ACTION_OPCODE_SET_IPV4_TUNNEL 6 #define NFP_FL_ACTION_OPCODE_SET_ETHERNET 7 +#define NFP_FL_ACTION_OPCODE_SET_IPV4_ADDRS 9 #define NFP_FL_ACTION_OPCODE_PRE_TUNNEL 17 #define NFP_FL_ACTION_OPCODE_NUM 32 @@ -115,6 +116,15 @@ struct nfp_fl_set_eth { u8 eth_addr_val[ETH_ALEN * 2]; }; +struct nfp_fl_set_ip4_addrs { + __be16 a_op; + __be16 reserved; + __be32 ipv4_src_mask; + __be32 ipv4_src; + __be32 ipv4_dst_mask; + __be32 ipv4_dst; +}; + struct nfp_fl_output { __be16 a_op; __be16 flags; -- cgit v1.2.3 From 354b82bb320e04547e4755d2cc2ebab87a6d8abe Mon Sep 17 00:00:00 2001 From: Pieter Jansen van Vuuren Date: Fri, 6 Oct 2017 10:21:25 +0200 Subject: nfp: add set ipv6 source and destination address Previously we did not have offloading support for set IPv6 actions. This patch enables TC flower offload of set IPv6 src and dst address actions. Signed-off-by: Pieter Jansen van Vuuren Reviewed-by: Jakub Kicinski Signed-off-by: Simon Horman Signed-off-by: David S. Miller --- drivers/net/ethernet/netronome/nfp/flower/action.c | 72 ++++++++++++++++++++++ drivers/net/ethernet/netronome/nfp/flower/cmsg.h | 11 ++++ 2 files changed, 83 insertions(+) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/netronome/nfp/flower/action.c b/drivers/net/ethernet/netronome/nfp/flower/action.c index 2f886a529ee4..4394e4f15fdb 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/action.c +++ b/drivers/net/ethernet/netronome/nfp/flower/action.c @@ -302,9 +302,55 @@ nfp_fl_set_ip4(const struct tc_action *action, int idx, u32 off, return 0; } +static void +nfp_fl_set_ip6_helper(int opcode_tag, int idx, __be32 exact, __be32 mask, + struct nfp_fl_set_ipv6_addr *ip6) +{ + u16 tmp_set_op; + + ip6->ipv6[idx % 4].mask = mask; + ip6->ipv6[idx % 4].exact = exact; + + ip6->reserved = cpu_to_be16(0); + tmp_set_op = FIELD_PREP(NFP_FL_ACT_LEN_LW, sizeof(*ip6) >> + NFP_FL_LW_SIZ) | + FIELD_PREP(NFP_FL_ACT_JMP_ID, opcode_tag); + ip6->a_op = cpu_to_be16(tmp_set_op); +} + +static int +nfp_fl_set_ip6(const struct tc_action *action, int idx, u32 off, + struct nfp_fl_set_ipv6_addr *ip_dst, + struct nfp_fl_set_ipv6_addr *ip_src) +{ + __be32 exact, mask; + + /* We are expecting tcf_pedit to return a big endian value */ + mask = (__force __be32)~tcf_pedit_mask(action, idx); + exact = (__force __be32)tcf_pedit_val(action, idx); + + if (exact & ~mask) + return -EOPNOTSUPP; + + if (off < offsetof(struct ipv6hdr, saddr)) + return -EOPNOTSUPP; + else if (off < offsetof(struct ipv6hdr, daddr)) + nfp_fl_set_ip6_helper(NFP_FL_ACTION_OPCODE_SET_IPV6_SRC, idx, + exact, mask, ip_src); + else if (off < offsetof(struct ipv6hdr, daddr) + + sizeof(struct in6_addr)) + nfp_fl_set_ip6_helper(NFP_FL_ACTION_OPCODE_SET_IPV6_DST, idx, + exact, mask, ip_dst); + else + return -EOPNOTSUPP; + + return 0; +} + static int nfp_fl_pedit(const struct tc_action *action, char *nfp_action, int *a_len) { + struct nfp_fl_set_ipv6_addr set_ip6_dst, set_ip6_src; struct nfp_fl_set_ip4_addrs set_ip_addr; struct nfp_fl_set_eth set_eth; enum pedit_header_type htype; @@ -312,6 +358,8 @@ nfp_fl_pedit(const struct tc_action *action, char *nfp_action, int *a_len) size_t act_size; u32 offset, cmd; + memset(&set_ip6_dst, 0, sizeof(set_ip6_dst)); + memset(&set_ip6_src, 0, sizeof(set_ip6_src)); memset(&set_ip_addr, 0, sizeof(set_ip_addr)); memset(&set_eth, 0, sizeof(set_eth)); nkeys = tcf_pedit_nkeys(action); @@ -331,6 +379,10 @@ nfp_fl_pedit(const struct tc_action *action, char *nfp_action, int *a_len) case TCA_PEDIT_KEY_EX_HDR_TYPE_IP4: err = nfp_fl_set_ip4(action, idx, offset, &set_ip_addr); break; + case TCA_PEDIT_KEY_EX_HDR_TYPE_IP6: + err = nfp_fl_set_ip6(action, idx, offset, &set_ip6_dst, + &set_ip6_src); + break; default: return -EOPNOTSUPP; } @@ -346,6 +398,26 @@ nfp_fl_pedit(const struct tc_action *action, char *nfp_action, int *a_len) act_size = sizeof(set_ip_addr); memcpy(nfp_action, &set_ip_addr, act_size); *a_len += act_size; + } else if (set_ip6_dst.a_op && set_ip6_src.a_op) { + /* TC compiles set src and dst IPv6 address as a single action, + * the hardware requires this to be 2 separate actions. + */ + act_size = sizeof(set_ip6_src); + memcpy(nfp_action, &set_ip6_src, act_size); + *a_len += act_size; + + act_size = sizeof(set_ip6_dst); + memcpy(&nfp_action[sizeof(set_ip6_src)], &set_ip6_dst, + act_size); + *a_len += act_size; + } else if (set_ip6_dst.a_op) { + act_size = sizeof(set_ip6_dst); + memcpy(nfp_action, &set_ip6_dst, act_size); + *a_len += act_size; + } else if (set_ip6_src.a_op) { + act_size = sizeof(set_ip6_src); + memcpy(nfp_action, &set_ip6_src, act_size); + *a_len += act_size; } return 0; diff --git a/drivers/net/ethernet/netronome/nfp/flower/cmsg.h b/drivers/net/ethernet/netronome/nfp/flower/cmsg.h index 7ace557fdf84..527914e294d7 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/cmsg.h +++ b/drivers/net/ethernet/netronome/nfp/flower/cmsg.h @@ -79,6 +79,8 @@ #define NFP_FL_ACTION_OPCODE_SET_IPV4_TUNNEL 6 #define NFP_FL_ACTION_OPCODE_SET_ETHERNET 7 #define NFP_FL_ACTION_OPCODE_SET_IPV4_ADDRS 9 +#define NFP_FL_ACTION_OPCODE_SET_IPV6_SRC 11 +#define NFP_FL_ACTION_OPCODE_SET_IPV6_DST 12 #define NFP_FL_ACTION_OPCODE_PRE_TUNNEL 17 #define NFP_FL_ACTION_OPCODE_NUM 32 @@ -125,6 +127,15 @@ struct nfp_fl_set_ip4_addrs { __be32 ipv4_dst; }; +struct nfp_fl_set_ipv6_addr { + __be16 a_op; + __be16 reserved; + struct { + __be32 mask; + __be32 exact; + } ipv6[4]; +}; + struct nfp_fl_output { __be16 a_op; __be16 flags; -- cgit v1.2.3 From f8b7b0a6b113eea5b528e51a2086e6f93f4e4933 Mon Sep 17 00:00:00 2001 From: Pieter Jansen van Vuuren Date: Fri, 6 Oct 2017 10:21:26 +0200 Subject: nfp: add set tcp and udp header action flower offload Previously we did not have offloading support for set TCP/UDP actions. This patch enables TC flower offload of set TCP/UDP sport and dport actions. Signed-off-by: Pieter Jansen van Vuuren Reviewed-by: Jakub Kicinski Signed-off-by: Simon Horman Signed-off-by: David S. Miller --- drivers/net/ethernet/netronome/nfp/flower/action.c | 42 ++++++++++++++++++++++ drivers/net/ethernet/netronome/nfp/flower/cmsg.h | 9 +++++ 2 files changed, 51 insertions(+) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/netronome/nfp/flower/action.c b/drivers/net/ethernet/netronome/nfp/flower/action.c index 4394e4f15fdb..1194c47ef827 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/action.c +++ b/drivers/net/ethernet/netronome/nfp/flower/action.c @@ -347,11 +347,40 @@ nfp_fl_set_ip6(const struct tc_action *action, int idx, u32 off, return 0; } +static int +nfp_fl_set_tport(const struct tc_action *action, int idx, u32 off, + struct nfp_fl_set_tport *set_tport, int opcode) +{ + u32 exact, mask; + u16 tmp_set_op; + + if (off) + return -EOPNOTSUPP; + + mask = ~tcf_pedit_mask(action, idx); + exact = tcf_pedit_val(action, idx); + + if (exact & ~mask) + return -EOPNOTSUPP; + + nfp_fl_set_helper32(exact, mask, set_tport->tp_port_val, + set_tport->tp_port_mask); + + set_tport->reserved = cpu_to_be16(0); + tmp_set_op = FIELD_PREP(NFP_FL_ACT_LEN_LW, + sizeof(*set_tport) >> NFP_FL_LW_SIZ); + tmp_set_op |= FIELD_PREP(NFP_FL_ACT_JMP_ID, opcode); + set_tport->a_op = cpu_to_be16(tmp_set_op); + + return 0; +} + static int nfp_fl_pedit(const struct tc_action *action, char *nfp_action, int *a_len) { struct nfp_fl_set_ipv6_addr set_ip6_dst, set_ip6_src; struct nfp_fl_set_ip4_addrs set_ip_addr; + struct nfp_fl_set_tport set_tport; struct nfp_fl_set_eth set_eth; enum pedit_header_type htype; int idx, nkeys, err; @@ -361,6 +390,7 @@ nfp_fl_pedit(const struct tc_action *action, char *nfp_action, int *a_len) memset(&set_ip6_dst, 0, sizeof(set_ip6_dst)); memset(&set_ip6_src, 0, sizeof(set_ip6_src)); memset(&set_ip_addr, 0, sizeof(set_ip_addr)); + memset(&set_tport, 0, sizeof(set_tport)); memset(&set_eth, 0, sizeof(set_eth)); nkeys = tcf_pedit_nkeys(action); @@ -383,6 +413,14 @@ nfp_fl_pedit(const struct tc_action *action, char *nfp_action, int *a_len) err = nfp_fl_set_ip6(action, idx, offset, &set_ip6_dst, &set_ip6_src); break; + case TCA_PEDIT_KEY_EX_HDR_TYPE_TCP: + err = nfp_fl_set_tport(action, idx, offset, &set_tport, + NFP_FL_ACTION_OPCODE_SET_TCP); + break; + case TCA_PEDIT_KEY_EX_HDR_TYPE_UDP: + err = nfp_fl_set_tport(action, idx, offset, &set_tport, + NFP_FL_ACTION_OPCODE_SET_UDP); + break; default: return -EOPNOTSUPP; } @@ -418,6 +456,10 @@ nfp_fl_pedit(const struct tc_action *action, char *nfp_action, int *a_len) act_size = sizeof(set_ip6_src); memcpy(nfp_action, &set_ip6_src, act_size); *a_len += act_size; + } else if (set_tport.a_op) { + act_size = sizeof(set_tport); + memcpy(nfp_action, &set_tport, act_size); + *a_len += act_size; } return 0; diff --git a/drivers/net/ethernet/netronome/nfp/flower/cmsg.h b/drivers/net/ethernet/netronome/nfp/flower/cmsg.h index 527914e294d7..f7b7242a22bc 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/cmsg.h +++ b/drivers/net/ethernet/netronome/nfp/flower/cmsg.h @@ -81,6 +81,8 @@ #define NFP_FL_ACTION_OPCODE_SET_IPV4_ADDRS 9 #define NFP_FL_ACTION_OPCODE_SET_IPV6_SRC 11 #define NFP_FL_ACTION_OPCODE_SET_IPV6_DST 12 +#define NFP_FL_ACTION_OPCODE_SET_UDP 14 +#define NFP_FL_ACTION_OPCODE_SET_TCP 15 #define NFP_FL_ACTION_OPCODE_PRE_TUNNEL 17 #define NFP_FL_ACTION_OPCODE_NUM 32 @@ -136,6 +138,13 @@ struct nfp_fl_set_ipv6_addr { } ipv6[4]; }; +struct nfp_fl_set_tport { + __be16 a_op; + __be16 reserved; + u8 tp_port_mask[4]; + u8 tp_port_val[4]; +}; + struct nfp_fl_output { __be16 a_op; __be16 flags; -- cgit v1.2.3 From d2746fe5380e9af79807994756672baaf42cb130 Mon Sep 17 00:00:00 2001 From: Bjorn Helgaas Date: Fri, 6 Oct 2017 06:00:30 -0500 Subject: bnx2x: Use pci_ari_enabled() instead of local copy Use pci_ari_enabled() from the PCI core instead of the identical local copy bnx2x_ari_enabled(). No functional change intended. Signed-off-by: Bjorn Helgaas Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c index 9ca994d0bab6..3591077a5f6b 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c @@ -1074,11 +1074,6 @@ static void bnx2x_vf_set_bars(struct bnx2x *bp, struct bnx2x_virtf *vf) } } -static int bnx2x_ari_enabled(struct pci_dev *dev) -{ - return dev->bus->self && dev->bus->self->ari_enabled; -} - static int bnx2x_get_vf_igu_cam_info(struct bnx2x *bp) { @@ -1212,7 +1207,7 @@ int bnx2x_iov_init_one(struct bnx2x *bp, int int_mode_param, err = -EIO; /* verify ari is enabled */ - if (!bnx2x_ari_enabled(bp->pdev)) { + if (!pci_ari_enabled(bp->pdev->bus)) { BNX2X_ERR("ARI not supported (check pci bridge ARI forwarding), SRIOV can not be enabled\n"); return 0; } -- cgit v1.2.3 From 0d7b70e83642f01c451a52faa3908e7b054ff7c6 Mon Sep 17 00:00:00 2001 From: Jonathan Toppins Date: Fri, 6 Oct 2017 15:48:30 -0400 Subject: bnxt_en: don't consider building bnxt_tc.o if option not enabled Instead of zeroing out bnxt_tc.c with a #ifdef foo, instead don't compile the file when the option is not enabled. Now make and the preprocessor do not have to waste time compiling a no-op. Signed-off-by: Jonathan Toppins Acked-by: Michael Chan Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnxt/Makefile | 3 ++- drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c | 5 ----- 2 files changed, 2 insertions(+), 6 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/broadcom/bnxt/Makefile b/drivers/net/ethernet/broadcom/bnxt/Makefile index 4f0cb8e1ffc0..457201f409a7 100644 --- a/drivers/net/ethernet/broadcom/bnxt/Makefile +++ b/drivers/net/ethernet/broadcom/bnxt/Makefile @@ -1,3 +1,4 @@ obj-$(CONFIG_BNXT) += bnxt_en.o -bnxt_en-y := bnxt.o bnxt_sriov.o bnxt_ethtool.o bnxt_dcb.o bnxt_ulp.o bnxt_xdp.o bnxt_vfr.o bnxt_tc.o +bnxt_en-y := bnxt.o bnxt_sriov.o bnxt_ethtool.o bnxt_dcb.o bnxt_ulp.o bnxt_xdp.o bnxt_vfr.o +bnxt_en-$(CONFIG_BNXT_FLOWER_OFFLOAD) += bnxt_tc.o diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c index 7dd3d131043a..4730c048ed9b 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c @@ -23,8 +23,6 @@ #include "bnxt_tc.h" #include "bnxt_vfr.h" -#ifdef CONFIG_BNXT_FLOWER_OFFLOAD - #define BNXT_FID_INVALID 0xffff #define VLAN_TCI(vid, prio) ((vid) | ((prio) << VLAN_PRIO_SHIFT)) @@ -833,6 +831,3 @@ void bnxt_shutdown_tc(struct bnxt *bp) rhashtable_destroy(&tc_info->flow_table); rhashtable_destroy(&tc_info->l2_table); } - -#else -#endif -- cgit v1.2.3 From c1f2c6d025d365d961dcdf555ecf0f60066f38a3 Mon Sep 17 00:00:00 2001 From: Ido Schimmel Date: Sun, 8 Oct 2017 11:57:55 +0200 Subject: mlxsw: spectrum: Add extack for VLAN enslavements Similar to physical ports, enslavement of VLAN devices can also fail. Use extack to indicate why the enslavement failed. $ ip link add link enp1s0np1 name enp1s0np1.10 type vlan id 10 $ ip link add name bond0 type bond mode 802.3ad $ ip link set dev enp1s0np1.10 master bond0 Error: spectrum: VLAN devices only support bridge and VRF uppers. Signed-off-by: Ido Schimmel Signed-off-by: Jiri Pirko Acked-by: David Ahern Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/spectrum.c | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c index 5cd4df08ce97..5ab4fd74a325 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c @@ -53,6 +53,7 @@ #include #include #include +#include #include #include #include @@ -4389,18 +4390,25 @@ static int mlxsw_sp_netdevice_port_vlan_event(struct net_device *vlan_dev, { struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); struct netdev_notifier_changeupper_info *info = ptr; + struct netlink_ext_ack *extack; struct net_device *upper_dev; int err = 0; + extack = netdev_notifier_info_to_extack(&info->info); + switch (event) { case NETDEV_PRECHANGEUPPER: upper_dev = info->upper_dev; - if (!netif_is_bridge_master(upper_dev)) + if (!netif_is_bridge_master(upper_dev)) { + NL_SET_ERR_MSG(extack, "spectrum: VLAN devices only support bridge and VRF uppers"); return -EINVAL; + } if (!info->linking) break; - if (netdev_has_any_upper_dev(upper_dev)) + if (netdev_has_any_upper_dev(upper_dev)) { + NL_SET_ERR_MSG(extack, "spectrum: Enslaving a port to a device that already has an upper device is not supported"); return -EINVAL; + } break; case NETDEV_CHANGEUPPER: upper_dev = info->upper_dev; -- cgit v1.2.3 From 9b63ef88d3a16d67afbe7916625289650e9f30d9 Mon Sep 17 00:00:00 2001 From: Ido Schimmel Date: Sun, 8 Oct 2017 11:57:56 +0200 Subject: mlxsw: spectrum: Propagate extack further for bridge enslavements The code that actually takes care of bridge offload introduces a few more non-trivial constraints with regards to bridge enslavements. Propagate extack there to indicate the reason. $ ip link add link enp1s0np1 name enp1s0np1.10 type vlan id 10 $ ip link add link enp1s0np1 name enp1s0np1.20 type vlan id 20 $ ip link add name br0 type bridge $ ip link set dev enp1s0np1.10 master br0 $ ip link set dev enp1s0np1.20 master br0 Error: spectrum: Can not bridge VLAN uppers of the same port. Signed-off-by: Ido Schimmel Signed-off-by: Jiri Pirko Acked-by: David Ahern Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/spectrum.c | 6 ++++-- drivers/net/ethernet/mellanox/mlxsw/spectrum.h | 3 ++- .../ethernet/mellanox/mlxsw/spectrum_switchdev.c | 25 +++++++++++++++------- 3 files changed, 23 insertions(+), 11 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c index 5ab4fd74a325..321988ac57cc 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c @@ -4299,7 +4299,8 @@ static int mlxsw_sp_netdevice_port_upper_event(struct net_device *lower_dev, if (info->linking) err = mlxsw_sp_port_bridge_join(mlxsw_sp_port, lower_dev, - upper_dev); + upper_dev, + extack); else mlxsw_sp_port_bridge_leave(mlxsw_sp_port, lower_dev, @@ -4416,7 +4417,8 @@ static int mlxsw_sp_netdevice_port_vlan_event(struct net_device *vlan_dev, if (info->linking) err = mlxsw_sp_port_bridge_join(mlxsw_sp_port, vlan_dev, - upper_dev); + upper_dev, + extack); else mlxsw_sp_port_bridge_leave(mlxsw_sp_port, vlan_dev, diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h index ae67e6046098..8e45183dc9bb 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h @@ -326,7 +326,8 @@ void mlxsw_sp_port_vlan_bridge_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan); int mlxsw_sp_port_bridge_join(struct mlxsw_sp_port *mlxsw_sp_port, struct net_device *brport_dev, - struct net_device *br_dev); + struct net_device *br_dev, + struct netlink_ext_ack *extack); void mlxsw_sp_port_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_port, struct net_device *brport_dev, struct net_device *br_dev); diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c index 0f9eac5f4ebf..2cfdf22a145f 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c @@ -46,6 +46,7 @@ #include #include #include +#include #include #include "spectrum.h" @@ -107,7 +108,8 @@ struct mlxsw_sp_bridge_vlan { struct mlxsw_sp_bridge_ops { int (*port_join)(struct mlxsw_sp_bridge_device *bridge_device, struct mlxsw_sp_bridge_port *bridge_port, - struct mlxsw_sp_port *mlxsw_sp_port); + struct mlxsw_sp_port *mlxsw_sp_port, + struct netlink_ext_ack *extack); void (*port_leave)(struct mlxsw_sp_bridge_device *bridge_device, struct mlxsw_sp_bridge_port *bridge_port, struct mlxsw_sp_port *mlxsw_sp_port); @@ -1735,12 +1737,15 @@ static const struct switchdev_ops mlxsw_sp_port_switchdev_ops = { static int mlxsw_sp_bridge_8021q_port_join(struct mlxsw_sp_bridge_device *bridge_device, struct mlxsw_sp_bridge_port *bridge_port, - struct mlxsw_sp_port *mlxsw_sp_port) + struct mlxsw_sp_port *mlxsw_sp_port, + struct netlink_ext_ack *extack) { struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; - if (is_vlan_dev(bridge_port->dev)) + if (is_vlan_dev(bridge_port->dev)) { + NL_SET_ERR_MSG(extack, "spectrum: Can not enslave a VLAN device to a VLAN-aware bridge"); return -EINVAL; + } mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, 1); if (WARN_ON(!mlxsw_sp_port_vlan)) @@ -1797,13 +1802,16 @@ mlxsw_sp_port_is_br_member(const struct mlxsw_sp_port *mlxsw_sp_port, static int mlxsw_sp_bridge_8021d_port_join(struct mlxsw_sp_bridge_device *bridge_device, struct mlxsw_sp_bridge_port *bridge_port, - struct mlxsw_sp_port *mlxsw_sp_port) + struct mlxsw_sp_port *mlxsw_sp_port, + struct netlink_ext_ack *extack) { struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; u16 vid; - if (!is_vlan_dev(bridge_port->dev)) + if (!is_vlan_dev(bridge_port->dev)) { + NL_SET_ERR_MSG(extack, "spectrum: Only VLAN devices can be enslaved to a VLAN-unaware bridge"); return -EINVAL; + } vid = vlan_dev_vlan_id(bridge_port->dev); mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid); @@ -1811,7 +1819,7 @@ mlxsw_sp_bridge_8021d_port_join(struct mlxsw_sp_bridge_device *bridge_device, return -EINVAL; if (mlxsw_sp_port_is_br_member(mlxsw_sp_port, bridge_device->dev)) { - netdev_err(mlxsw_sp_port->dev, "Can't bridge VLAN uppers of the same port\n"); + NL_SET_ERR_MSG(extack, "spectrum: Can not bridge VLAN uppers of the same port"); return -EINVAL; } @@ -1854,7 +1862,8 @@ static const struct mlxsw_sp_bridge_ops mlxsw_sp_bridge_8021d_ops = { int mlxsw_sp_port_bridge_join(struct mlxsw_sp_port *mlxsw_sp_port, struct net_device *brport_dev, - struct net_device *br_dev) + struct net_device *br_dev, + struct netlink_ext_ack *extack) { struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; struct mlxsw_sp_bridge_device *bridge_device; @@ -1867,7 +1876,7 @@ int mlxsw_sp_port_bridge_join(struct mlxsw_sp_port *mlxsw_sp_port, bridge_device = bridge_port->bridge_device; err = bridge_device->ops->port_join(bridge_device, bridge_port, - mlxsw_sp_port); + mlxsw_sp_port, extack); if (err) goto err_port_join; -- cgit v1.2.3 From c778c32118167adcfe6b40063c49bfeac6bc1cf1 Mon Sep 17 00:00:00 2001 From: Christos Gkekas Date: Sun, 8 Oct 2017 20:13:49 +0100 Subject: net: ethernet: stmmac: Clean up dead code Many macros in dwmac-ipq806x are unused and should be removed. Moreover gmac->id is an unsigned variable and therefore checking whether it is less than zero is redundant. Signed-off-by: Christos Gkekas Signed-off-by: David S. Miller --- drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c | 14 +------------- 1 file changed, 1 insertion(+), 13 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c index 866444b6c82f..2c6d7c69c8f7 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c @@ -51,15 +51,11 @@ #define NSS_COMMON_CLK_SRC_CTRL_RGMII(x) 1 #define NSS_COMMON_CLK_SRC_CTRL_SGMII(x) ((x >= 2) ? 1 : 0) -#define NSS_COMMON_MACSEC_CTL 0x28 -#define NSS_COMMON_MACSEC_CTL_EXT_BYPASS_EN(x) (1 << x) - #define NSS_COMMON_GMAC_CTL(x) (0x30 + (x * 4)) #define NSS_COMMON_GMAC_CTL_CSYS_REQ BIT(19) #define NSS_COMMON_GMAC_CTL_PHY_IFACE_SEL BIT(16) #define NSS_COMMON_GMAC_CTL_IFG_LIMIT_OFFSET 8 #define NSS_COMMON_GMAC_CTL_IFG_OFFSET 0 -#define NSS_COMMON_GMAC_CTL_IFG_MASK 0x3f #define NSS_COMMON_CLK_DIV_RGMII_1000 1 #define NSS_COMMON_CLK_DIV_RGMII_100 9 @@ -68,9 +64,6 @@ #define NSS_COMMON_CLK_DIV_SGMII_100 4 #define NSS_COMMON_CLK_DIV_SGMII_10 49 -#define QSGMII_PCS_MODE_CTL 0x68 -#define QSGMII_PCS_MODE_CTL_AUTONEG_EN(x) BIT((x * 8) + 7) - #define QSGMII_PCS_CAL_LCKDT_CTL 0x120 #define QSGMII_PCS_CAL_LCKDT_CTL_RST BIT(19) @@ -83,15 +76,10 @@ #define QSGMII_PHY_TX_DRIVER_EN BIT(3) #define QSGMII_PHY_QSGMII_EN BIT(7) #define QSGMII_PHY_PHASE_LOOP_GAIN_OFFSET 12 -#define QSGMII_PHY_PHASE_LOOP_GAIN_MASK 0x7 #define QSGMII_PHY_RX_DC_BIAS_OFFSET 18 -#define QSGMII_PHY_RX_DC_BIAS_MASK 0x3 #define QSGMII_PHY_RX_INPUT_EQU_OFFSET 20 -#define QSGMII_PHY_RX_INPUT_EQU_MASK 0x3 #define QSGMII_PHY_CDR_PI_SLEW_OFFSET 22 -#define QSGMII_PHY_CDR_PI_SLEW_MASK 0x3 #define QSGMII_PHY_TX_DRV_AMP_OFFSET 28 -#define QSGMII_PHY_TX_DRV_AMP_MASK 0xf struct ipq806x_gmac { struct platform_device *pdev; @@ -217,7 +205,7 @@ static int ipq806x_gmac_of_parse(struct ipq806x_gmac *gmac) * code and keep it consistent with the Linux convention, we'll number * them from 0 to 3 here. */ - if (gmac->id < 0 || gmac->id > 3) { + if (gmac->id > 3) { dev_err(dev, "invalid gmac id\n"); return -EINVAL; } -- cgit v1.2.3 From c49c777f9c87749b73bc888f097f8a4178382449 Mon Sep 17 00:00:00 2001 From: Christos Gkekas Date: Sun, 8 Oct 2017 23:46:47 +0100 Subject: qed: Delete redundant check on dcb_app priority dcb_app priority is unsigned thus checking whether it is less than zero is redundant. Signed-off-by: Christos Gkekas Acked-By: Tomer Tayar Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qed/qed_dcbx.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c index 8f6ccc0c39e5..6e15d3c10ebf 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c +++ b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c @@ -2308,7 +2308,7 @@ static int qed_dcbnl_ieee_setapp(struct qed_dev *cdev, struct dcb_app *app) DP_VERBOSE(hwfn, QED_MSG_DCB, "selector = %d protocol = %d pri = %d\n", app->selector, app->protocol, app->priority); - if (app->priority < 0 || app->priority >= QED_MAX_PFC_PRIORITIES) { + if (app->priority >= QED_MAX_PFC_PRIORITIES) { DP_INFO(hwfn, "Invalid priority %d\n", app->priority); return -EINVAL; } -- cgit v1.2.3 From 2e22a75c55c1e70f36e21d4fa4e165af7f356aa9 Mon Sep 17 00:00:00 2001 From: Emil Tantilov Date: Wed, 5 Jul 2017 15:57:30 -0700 Subject: ixgbe: Clear SWFW_SYNC register during init Added clearing of SW resource bits in the SW/FW synchronization register to ixgbe_init_swfw_sync_X540(). Updated ixgbe_acquire_swfw_sync_X540 SW Manageability host interface resource bit error case to match the error handling of the other SW resource bits. Which is to release the SW resource bits if SW times out while attempting to acquire the resource. This allows the driver to load in cases where the semaphore bits could be stuck after a reset or a crash. Signed-off-by: Emil Tantilov Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c | 19 ++++++++++++------- 1 file changed, 12 insertions(+), 7 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c index 6ea0d6a5fb90..b8c5fd2a2115 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c @@ -619,12 +619,6 @@ s32 ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw *hw, u32 mask) usleep_range(5000, 10000); } - /* Failed to get SW only semaphore */ - if (swmask == IXGBE_GSSR_SW_MNG_SM) { - hw_dbg(hw, "Failed to get SW only semaphore\n"); - return IXGBE_ERR_SWFW_SYNC; - } - /* If the resource is not released by the FW/HW the SW can assume that * the FW/HW malfunctions. In that case the SW should set the SW bit(s) * of the requested resource(s) while ignoring the corresponding FW/HW @@ -647,7 +641,8 @@ s32 ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw *hw, u32 mask) */ if (swfw_sync & swmask) { u32 rmask = IXGBE_GSSR_EEP_SM | IXGBE_GSSR_PHY0_SM | - IXGBE_GSSR_PHY1_SM | IXGBE_GSSR_MAC_CSR_SM; + IXGBE_GSSR_PHY1_SM | IXGBE_GSSR_MAC_CSR_SM | + IXGBE_GSSR_SW_MNG_SM; if (swi2c_mask) rmask |= IXGBE_GSSR_I2C_MASK; @@ -763,6 +758,8 @@ static void ixgbe_release_swfw_sync_semaphore(struct ixgbe_hw *hw) **/ void ixgbe_init_swfw_sync_X540(struct ixgbe_hw *hw) { + u32 rmask; + /* First try to grab the semaphore but we don't need to bother * looking to see whether we got the lock or not since we do * the same thing regardless of whether we got the lock or not. @@ -771,6 +768,14 @@ void ixgbe_init_swfw_sync_X540(struct ixgbe_hw *hw) */ ixgbe_get_swfw_sync_semaphore(hw); ixgbe_release_swfw_sync_semaphore(hw); + + /* Acquire and release all software resources. */ + rmask = IXGBE_GSSR_EEP_SM | IXGBE_GSSR_PHY0_SM | + IXGBE_GSSR_PHY1_SM | IXGBE_GSSR_MAC_CSR_SM | + IXGBE_GSSR_SW_MNG_SM | IXGBE_GSSR_I2C_MASK; + + ixgbe_acquire_swfw_sync_X540(hw, rmask); + ixgbe_release_swfw_sync_X540(hw, rmask); } /** -- cgit v1.2.3 From 2e033eace7557a4b67ba1cb2746bd87d6ca2620b Mon Sep 17 00:00:00 2001 From: Bhumika Goyal Date: Mon, 17 Jul 2017 20:31:18 +0530 Subject: ixgbe: declare ixgbe_mac_operations structures as const Declare ixgbe_mac_operations structures as const as they are only stored in the mac_ops field of ixgbe_info structure. This field is of type const and therefore ixgbe_mac_operations structure can be made const too. Signed-off-by: Bhumika Goyal Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c index 19fbb2f28ea4..933c5070f1b6 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c @@ -3884,7 +3884,7 @@ static const struct ixgbe_mac_operations mac_ops_X550EM_x_fw = { .write_iosf_sb_reg = ixgbe_write_iosf_sb_reg_x550, }; -static struct ixgbe_mac_operations mac_ops_x550em_a = { +static const struct ixgbe_mac_operations mac_ops_x550em_a = { X550_COMMON_MAC .led_on = ixgbe_led_on_t_x550em, .led_off = ixgbe_led_off_t_x550em, @@ -3905,7 +3905,7 @@ static struct ixgbe_mac_operations mac_ops_x550em_a = { .write_iosf_sb_reg = ixgbe_write_iosf_sb_reg_x550a, }; -static struct ixgbe_mac_operations mac_ops_x550em_a_fw = { +static const struct ixgbe_mac_operations mac_ops_x550em_a_fw = { X550_COMMON_MAC .led_on = ixgbe_led_on_generic, .led_off = ixgbe_led_off_generic, -- cgit v1.2.3 From f5a71caa1763cc8fd1b108234689d6a7e4fe9d2f Mon Sep 17 00:00:00 2001 From: Shannon Nelson Date: Tue, 15 Aug 2017 08:59:54 -0700 Subject: ixgbe: restore normal RSS after last macvlan offload is removed Just like when the last VF is removed, we need to restore normal operations after the last macvlan offload is removed, else we get stuck in single queue operations. To test: ethtool -l eth1 # note the number of queues in use, ~= cpus ethtool -K eth1 l2-fwd-offload on ip link add mv1 link eth1 type macvlan mode bridge ip link set dev mv1 up ip link del mv1 ethtool -l eth1 # are we back to the same # of queues, or stuck on 1? Signed-off-by: Shannon Nelson Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | 11 +++++++++++ 1 file changed, 11 insertions(+) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 3942c6208745..d83cc9d34de3 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -9758,6 +9758,17 @@ static void ixgbe_fwd_del(struct net_device *pdev, void *priv) limit = find_last_bit(&adapter->fwd_bitmask, 32); adapter->ring_feature[RING_F_VMDQ].limit = limit + 1; ixgbe_fwd_ring_down(fwd_adapter->netdev, fwd_adapter); + + /* go back to full RSS if we're done with our VMQs */ + if (adapter->ring_feature[RING_F_VMDQ].limit == 1) { + int rss = min_t(int, ixgbe_max_rss_indices(adapter), + num_online_cpus()); + + adapter->flags &= ~IXGBE_FLAG_VMDQ_ENABLED; + adapter->flags &= ~IXGBE_FLAG_SRIOV_ENABLED; + adapter->ring_feature[RING_F_RSS].limit = rss; + } + ixgbe_setup_tc(pdev, netdev_get_num_tc(pdev)); netdev_dbg(pdev, "pool %i:%i queues %i:%i VSI bitmask %lx\n", fwd_adapter->pool, adapter->num_rx_pools, -- cgit v1.2.3 From c69be946d687a99dbc891ebc66539c1c2f082c1d Mon Sep 17 00:00:00 2001 From: Emil Tantilov Date: Fri, 18 Aug 2017 15:48:02 -0700 Subject: ixgbe: add error checks when initializing the PHY Ignoring errors when attempting to identify the PHY can lead to a crash. Specifically in the case of FW controlled PHYs where the PHY read/write operations are set to NULL. Removed redundant comment. Signed-off-by: Emil Tantilov Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c index 933c5070f1b6..8cea53b62e1b 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c @@ -3192,6 +3192,9 @@ static s32 ixgbe_init_phy_ops_X550em(struct ixgbe_hw *hw) /* Identify the PHY or SFP module */ ret_val = phy->ops.identify(hw); + if (ret_val == IXGBE_ERR_SFP_NOT_SUPPORTED || + ret_val == IXGBE_ERR_PHY_ADDR_INVALID) + return ret_val; /* Setup function pointers based on detected hardware */ ixgbe_init_mac_link_ops_X550em(hw); @@ -3394,9 +3397,10 @@ static s32 ixgbe_reset_hw_X550em(struct ixgbe_hw *hw) ixgbe_clear_tx_pending(hw); /* PHY ops must be identified and initialized prior to reset */ - - /* Identify PHY and related function pointers */ status = hw->phy.ops.init(hw); + if (status == IXGBE_ERR_SFP_NOT_SUPPORTED || + status == IXGBE_ERR_PHY_ADDR_INVALID) + return status; /* start the external PHY */ if (hw->phy.type == ixgbe_phy_x550em_ext_t) { -- cgit v1.2.3 From 761c2a48c70d871b0622dccaa20ccad024101a51 Mon Sep 17 00:00:00 2001 From: Emil Tantilov Date: Tue, 29 Aug 2017 12:21:48 -0700 Subject: ixgbe: split Tx/Rx ring clearing for ethtool loopback test Commit: fed21bcee7a5 ("ixgbe: Don't bother clearing buffer memory for descriptor rings) exposed some issues with the logic in the current implementation of ixgbe_clean_test_rings() that are being addressed in this patch: - Split the clearing of the Tx and Rx rings in separate loops. Previously both Tx and Rx rings were cleared in a rx_desc->wb.upper.length based loop which could lead to issues if for w/e reason packets were received outside of the frames transmitted for the loopback test. - Add check for IXGBE_TXD_STAT_DD to avoid clearing the rings if the transmits have not comlpeted by the time we enter ixgbe_clean_test_rings() - Exit early on ixgbe_check_lbtest_frame() failure. This change fixes a crash during ethtool diagnostic (ethtool -t). Signed-off-by: Emil Tantilov Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c | 53 +++++++++++++++--------- 1 file changed, 34 insertions(+), 19 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c index 72c565712a5f..6d89f28cae06 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c @@ -1916,8 +1916,6 @@ static u16 ixgbe_clean_test_rings(struct ixgbe_ring *rx_ring, unsigned int size) { union ixgbe_adv_rx_desc *rx_desc; - struct ixgbe_rx_buffer *rx_buffer; - struct ixgbe_tx_buffer *tx_buffer; u16 rx_ntc, tx_ntc, count = 0; /* initialize next to clean and descriptor values */ @@ -1925,7 +1923,38 @@ static u16 ixgbe_clean_test_rings(struct ixgbe_ring *rx_ring, tx_ntc = tx_ring->next_to_clean; rx_desc = IXGBE_RX_DESC(rx_ring, rx_ntc); + while (tx_ntc != tx_ring->next_to_use) { + union ixgbe_adv_tx_desc *tx_desc; + struct ixgbe_tx_buffer *tx_buffer; + + tx_desc = IXGBE_TX_DESC(tx_ring, tx_ntc); + + /* if DD is not set transmit has not completed */ + if (!(tx_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD))) + return count; + + /* unmap buffer on Tx side */ + tx_buffer = &tx_ring->tx_buffer_info[tx_ntc]; + + /* Free all the Tx ring sk_buffs */ + dev_kfree_skb_any(tx_buffer->skb); + + /* unmap skb header data */ + dma_unmap_single(tx_ring->dev, + dma_unmap_addr(tx_buffer, dma), + dma_unmap_len(tx_buffer, len), + DMA_TO_DEVICE); + dma_unmap_len_set(tx_buffer, len, 0); + + /* increment Tx next to clean counter */ + tx_ntc++; + if (tx_ntc == tx_ring->count) + tx_ntc = 0; + } + while (rx_desc->wb.upper.length) { + struct ixgbe_rx_buffer *rx_buffer; + /* check Rx buffer */ rx_buffer = &rx_ring->rx_buffer_info[rx_ntc]; @@ -1938,6 +1967,8 @@ static u16 ixgbe_clean_test_rings(struct ixgbe_ring *rx_ring, /* verify contents of skb */ if (ixgbe_check_lbtest_frame(rx_buffer, size)) count++; + else + break; /* sync Rx buffer for device write */ dma_sync_single_for_device(rx_ring->dev, @@ -1945,26 +1976,10 @@ static u16 ixgbe_clean_test_rings(struct ixgbe_ring *rx_ring, ixgbe_rx_bufsz(rx_ring), DMA_FROM_DEVICE); - /* unmap buffer on Tx side */ - tx_buffer = &tx_ring->tx_buffer_info[tx_ntc]; - - /* Free all the Tx ring sk_buffs */ - dev_kfree_skb_any(tx_buffer->skb); - - /* unmap skb header data */ - dma_unmap_single(tx_ring->dev, - dma_unmap_addr(tx_buffer, dma), - dma_unmap_len(tx_buffer, len), - DMA_TO_DEVICE); - dma_unmap_len_set(tx_buffer, len, 0); - - /* increment Rx/Tx next to clean counters */ + /* increment Rx next to clean counter */ rx_ntc++; if (rx_ntc == rx_ring->count) rx_ntc = 0; - tx_ntc++; - if (tx_ntc == tx_ring->count) - tx_ntc = 0; /* fetch next descriptor */ rx_desc = IXGBE_RX_DESC(rx_ring, rx_ntc); -- cgit v1.2.3 From 5bca3b94dfbf9259d972a5be91333dda6eb9f350 Mon Sep 17 00:00:00 2001 From: Yunsheng Lin Date: Mon, 9 Oct 2017 15:43:55 +0800 Subject: net: hns3: Cleanup for shifting true in hns3 driver This patch fixes a shifting true in hclge_main module. Signed-off-by: Yunsheng Lin Signed-off-by: David S. Miller --- drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c index 1a13614af3de..5c1bf12beade 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c @@ -1687,7 +1687,7 @@ static int hclge_rx_priv_buf_alloc(struct hclge_dev *hdev, req->buf_num[i] = cpu_to_le16(priv->buf_size >> HCLGE_BUF_UNIT_S); req->buf_num[i] |= - cpu_to_le16(true << HCLGE_TC0_PRI_BUF_EN_B); + cpu_to_le16(1 << HCLGE_TC0_PRI_BUF_EN_B); } req->shared_buf = -- cgit v1.2.3 From 9780cb97afd868fb11500f38826b7f30d554535c Mon Sep 17 00:00:00 2001 From: Yunsheng Lin Date: Mon, 9 Oct 2017 15:43:56 +0800 Subject: net: hns3: Add hns3_get_handle macro in hns3 driver There are many places that will need to get the handle of netdev, so add a macro to get the handle of netdev. Signed-off-by: Yunsheng Lin Signed-off-by: David S. Miller --- .../ethernet/hisilicon/hns3/hns3pf/hns3_dcbnl.c | 18 ++++------ .../net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c | 39 ++++++++-------------- .../net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.h | 3 ++ .../ethernet/hisilicon/hns3/hns3pf/hns3_ethtool.c | 38 ++++++++------------- 4 files changed, 35 insertions(+), 63 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_dcbnl.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_dcbnl.c index 9832172bfb08..925619a7c50a 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_dcbnl.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_dcbnl.c @@ -13,8 +13,7 @@ static int hns3_dcbnl_ieee_getets(struct net_device *ndev, struct ieee_ets *ets) { - struct hns3_nic_priv *priv = netdev_priv(ndev); - struct hnae3_handle *h = priv->ae_handle; + struct hnae3_handle *h = hns3_get_handle(ndev); if (h->kinfo.dcb_ops->ieee_getets) return h->kinfo.dcb_ops->ieee_getets(h, ets); @@ -25,8 +24,7 @@ int hns3_dcbnl_ieee_getets(struct net_device *ndev, struct ieee_ets *ets) static int hns3_dcbnl_ieee_setets(struct net_device *ndev, struct ieee_ets *ets) { - struct hns3_nic_priv *priv = netdev_priv(ndev); - struct hnae3_handle *h = priv->ae_handle; + struct hnae3_handle *h = hns3_get_handle(ndev); if (h->kinfo.dcb_ops->ieee_setets) return h->kinfo.dcb_ops->ieee_setets(h, ets); @@ -37,8 +35,7 @@ int hns3_dcbnl_ieee_setets(struct net_device *ndev, struct ieee_ets *ets) static int hns3_dcbnl_ieee_getpfc(struct net_device *ndev, struct ieee_pfc *pfc) { - struct hns3_nic_priv *priv = netdev_priv(ndev); - struct hnae3_handle *h = priv->ae_handle; + struct hnae3_handle *h = hns3_get_handle(ndev); if (h->kinfo.dcb_ops->ieee_getpfc) return h->kinfo.dcb_ops->ieee_getpfc(h, pfc); @@ -49,8 +46,7 @@ int hns3_dcbnl_ieee_getpfc(struct net_device *ndev, struct ieee_pfc *pfc) static int hns3_dcbnl_ieee_setpfc(struct net_device *ndev, struct ieee_pfc *pfc) { - struct hns3_nic_priv *priv = netdev_priv(ndev); - struct hnae3_handle *h = priv->ae_handle; + struct hnae3_handle *h = hns3_get_handle(ndev); if (h->kinfo.dcb_ops->ieee_setpfc) return h->kinfo.dcb_ops->ieee_setpfc(h, pfc); @@ -61,8 +57,7 @@ int hns3_dcbnl_ieee_setpfc(struct net_device *ndev, struct ieee_pfc *pfc) /* DCBX configuration */ static u8 hns3_dcbnl_getdcbx(struct net_device *ndev) { - struct hns3_nic_priv *priv = netdev_priv(ndev); - struct hnae3_handle *h = priv->ae_handle; + struct hnae3_handle *h = hns3_get_handle(ndev); if (h->kinfo.dcb_ops->getdcbx) return h->kinfo.dcb_ops->getdcbx(h); @@ -73,8 +68,7 @@ static u8 hns3_dcbnl_getdcbx(struct net_device *ndev) /* return 0 if successful, otherwise fail */ static u8 hns3_dcbnl_setdcbx(struct net_device *ndev, u8 mode) { - struct hns3_nic_priv *priv = netdev_priv(ndev); - struct hnae3_handle *h = priv->ae_handle; + struct hnae3_handle *h = hns3_get_handle(ndev); if (h->kinfo.dcb_ops->setdcbx) return h->kinfo.dcb_ops->setdcbx(h, mode); diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c index c31506514e5d..c2a0537c649f 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c @@ -198,8 +198,7 @@ static void hns3_vector_gl_rl_init(struct hns3_enet_tqp_vector *tqp_vector) static int hns3_nic_set_real_num_queue(struct net_device *netdev) { - struct hns3_nic_priv *priv = netdev_priv(netdev); - struct hnae3_handle *h = priv->ae_handle; + struct hnae3_handle *h = hns3_get_handle(netdev); struct hnae3_knic_private_info *kinfo = &h->kinfo; unsigned int queue_size = kinfo->rss_size * kinfo->num_tc; int ret; @@ -307,8 +306,7 @@ static int hns3_nic_net_stop(struct net_device *netdev) void hns3_set_multicast_list(struct net_device *netdev) { - struct hns3_nic_priv *priv = netdev_priv(netdev); - struct hnae3_handle *h = priv->ae_handle; + struct hnae3_handle *h = hns3_get_handle(netdev); struct netdev_hw_addr *ha = NULL; if (h->ae_algo->ops->set_mc_addr) { @@ -321,8 +319,7 @@ void hns3_set_multicast_list(struct net_device *netdev) static int hns3_nic_uc_sync(struct net_device *netdev, const unsigned char *addr) { - struct hns3_nic_priv *priv = netdev_priv(netdev); - struct hnae3_handle *h = priv->ae_handle; + struct hnae3_handle *h = hns3_get_handle(netdev); if (h->ae_algo->ops->add_uc_addr) return h->ae_algo->ops->add_uc_addr(h, addr); @@ -333,8 +330,7 @@ static int hns3_nic_uc_sync(struct net_device *netdev, static int hns3_nic_uc_unsync(struct net_device *netdev, const unsigned char *addr) { - struct hns3_nic_priv *priv = netdev_priv(netdev); - struct hnae3_handle *h = priv->ae_handle; + struct hnae3_handle *h = hns3_get_handle(netdev); if (h->ae_algo->ops->rm_uc_addr) return h->ae_algo->ops->rm_uc_addr(h, addr); @@ -345,8 +341,7 @@ static int hns3_nic_uc_unsync(struct net_device *netdev, static int hns3_nic_mc_sync(struct net_device *netdev, const unsigned char *addr) { - struct hns3_nic_priv *priv = netdev_priv(netdev); - struct hnae3_handle *h = priv->ae_handle; + struct hnae3_handle *h = hns3_get_handle(netdev); if (h->ae_algo->ops->add_mc_addr) return h->ae_algo->ops->add_mc_addr(h, addr); @@ -357,8 +352,7 @@ static int hns3_nic_mc_sync(struct net_device *netdev, static int hns3_nic_mc_unsync(struct net_device *netdev, const unsigned char *addr) { - struct hns3_nic_priv *priv = netdev_priv(netdev); - struct hnae3_handle *h = priv->ae_handle; + struct hnae3_handle *h = hns3_get_handle(netdev); if (h->ae_algo->ops->rm_mc_addr) return h->ae_algo->ops->rm_mc_addr(h, addr); @@ -368,8 +362,7 @@ static int hns3_nic_mc_unsync(struct net_device *netdev, void hns3_nic_set_rx_mode(struct net_device *netdev) { - struct hns3_nic_priv *priv = netdev_priv(netdev); - struct hnae3_handle *h = priv->ae_handle; + struct hnae3_handle *h = hns3_get_handle(netdev); if (h->ae_algo->ops->set_promisc_mode) { if (netdev->flags & IFF_PROMISC) @@ -1025,8 +1018,7 @@ out_net_tx_busy: static int hns3_nic_net_set_mac_address(struct net_device *netdev, void *p) { - struct hns3_nic_priv *priv = netdev_priv(netdev); - struct hnae3_handle *h = priv->ae_handle; + struct hnae3_handle *h = hns3_get_handle(netdev); struct sockaddr *mac_addr = p; int ret; @@ -1208,8 +1200,7 @@ static void hns3_nic_udp_tunnel_del(struct net_device *netdev, static int hns3_setup_tc(struct net_device *netdev, u8 tc) { - struct hns3_nic_priv *priv = netdev_priv(netdev); - struct hnae3_handle *h = priv->ae_handle; + struct hnae3_handle *h = hns3_get_handle(netdev); struct hnae3_knic_private_info *kinfo = &h->kinfo; unsigned int i; int ret; @@ -1259,8 +1250,7 @@ static int hns3_nic_setup_tc(struct net_device *dev, enum tc_setup_type type, static int hns3_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid) { - struct hns3_nic_priv *priv = netdev_priv(netdev); - struct hnae3_handle *h = priv->ae_handle; + struct hnae3_handle *h = hns3_get_handle(netdev); int ret = -EIO; if (h->ae_algo->ops->set_vlan_filter) @@ -1272,8 +1262,7 @@ static int hns3_vlan_rx_add_vid(struct net_device *netdev, static int hns3_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid) { - struct hns3_nic_priv *priv = netdev_priv(netdev); - struct hnae3_handle *h = priv->ae_handle; + struct hnae3_handle *h = hns3_get_handle(netdev); int ret = -EIO; if (h->ae_algo->ops->set_vlan_filter) @@ -1285,8 +1274,7 @@ static int hns3_vlan_rx_kill_vid(struct net_device *netdev, static int hns3_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos, __be16 vlan_proto) { - struct hns3_nic_priv *priv = netdev_priv(netdev); - struct hnae3_handle *h = priv->ae_handle; + struct hnae3_handle *h = hns3_get_handle(netdev); int ret = -EIO; if (h->ae_algo->ops->set_vf_vlan_filter) @@ -1298,8 +1286,7 @@ static int hns3_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, static int hns3_nic_change_mtu(struct net_device *netdev, int new_mtu) { - struct hns3_nic_priv *priv = netdev_priv(netdev); - struct hnae3_handle *h = priv->ae_handle; + struct hnae3_handle *h = hns3_get_handle(netdev); bool if_running = netif_running(netdev); int ret; diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.h index 481eada73e2d..dd8d40ca1dcc 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.h @@ -587,6 +587,9 @@ static inline void hns3_write_reg(void __iomem *base, u32 reg, u32 value) #define hns3_for_each_ring(pos, head) \ for (pos = (head).ring; pos; pos = pos->next) +#define hns3_get_handle(ndev) \ + (((struct hns3_nic_priv *)netdev_priv(ndev))->ae_handle) + void hns3_ethtool_set_ops(struct net_device *netdev); int hns3_clean_tx_ring(struct hns3_enet_ring *ring, int budget); diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_ethtool.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_ethtool.c index d636399232fb..a892a157f346 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_ethtool.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_ethtool.c @@ -102,8 +102,7 @@ static void hns3_driv_to_eth_caps(u32 caps, struct ethtool_link_ksettings *cmd, static int hns3_get_sset_count(struct net_device *netdev, int stringset) { - struct hns3_nic_priv *priv = netdev_priv(netdev); - struct hnae3_handle *h = priv->ae_handle; + struct hnae3_handle *h = hns3_get_handle(netdev); const struct hnae3_ae_ops *ops = h->ae_algo->ops; if (!ops->get_sset_count) @@ -164,8 +163,7 @@ static u8 *hns3_get_strings_tqps(struct hnae3_handle *handle, u8 *data) static void hns3_get_strings(struct net_device *netdev, u32 stringset, u8 *data) { - struct hns3_nic_priv *priv = netdev_priv(netdev); - struct hnae3_handle *h = priv->ae_handle; + struct hnae3_handle *h = hns3_get_handle(netdev); const struct hnae3_ae_ops *ops = h->ae_algo->ops; char *buff = (char *)data; @@ -220,8 +218,7 @@ static u64 *hns3_get_stats_tqps(struct hnae3_handle *handle, u64 *data) void hns3_get_stats(struct net_device *netdev, struct ethtool_stats *stats, u64 *data) { - struct hns3_nic_priv *priv = netdev_priv(netdev); - struct hnae3_handle *h = priv->ae_handle; + struct hnae3_handle *h = hns3_get_handle(netdev); u64 *p = data; if (!h->ae_algo->ops->get_stats || !h->ae_algo->ops->update_stats) { @@ -262,10 +259,7 @@ static void hns3_get_drvinfo(struct net_device *netdev, static u32 hns3_get_link(struct net_device *netdev) { - struct hns3_nic_priv *priv = netdev_priv(netdev); - struct hnae3_handle *h; - - h = priv->ae_handle; + struct hnae3_handle *h = hns3_get_handle(netdev); if (h->ae_algo && h->ae_algo->ops && h->ae_algo->ops->get_status) return h->ae_algo->ops->get_status(h); @@ -277,7 +271,8 @@ static void hns3_get_ringparam(struct net_device *netdev, struct ethtool_ringparam *param) { struct hns3_nic_priv *priv = netdev_priv(netdev); - int queue_num = priv->ae_handle->kinfo.num_tqps; + struct hnae3_handle *h = priv->ae_handle; + int queue_num = h->kinfo.num_tqps; param->tx_max_pending = HNS3_RING_MAX_PENDING; param->rx_max_pending = HNS3_RING_MAX_PENDING; @@ -289,8 +284,7 @@ static void hns3_get_ringparam(struct net_device *netdev, static void hns3_get_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *param) { - struct hns3_nic_priv *priv = netdev_priv(netdev); - struct hnae3_handle *h = priv->ae_handle; + struct hnae3_handle *h = hns3_get_handle(netdev); if (h->ae_algo && h->ae_algo->ops && h->ae_algo->ops->get_pauseparam) h->ae_algo->ops->get_pauseparam(h, ¶m->autoneg, @@ -300,8 +294,7 @@ static void hns3_get_pauseparam(struct net_device *netdev, static int hns3_get_link_ksettings(struct net_device *netdev, struct ethtool_link_ksettings *cmd) { - struct hns3_nic_priv *priv = netdev_priv(netdev); - struct hnae3_handle *h = priv->ae_handle; + struct hnae3_handle *h = hns3_get_handle(netdev); u32 supported_caps; u32 advertised_caps; u8 media_type = HNAE3_MEDIA_TYPE_UNKNOWN; @@ -392,8 +385,7 @@ static int hns3_get_link_ksettings(struct net_device *netdev, static u32 hns3_get_rss_key_size(struct net_device *netdev) { - struct hns3_nic_priv *priv = netdev_priv(netdev); - struct hnae3_handle *h = priv->ae_handle; + struct hnae3_handle *h = hns3_get_handle(netdev); if (!h->ae_algo || !h->ae_algo->ops || !h->ae_algo->ops->get_rss_key_size) @@ -404,8 +396,7 @@ static u32 hns3_get_rss_key_size(struct net_device *netdev) static u32 hns3_get_rss_indir_size(struct net_device *netdev) { - struct hns3_nic_priv *priv = netdev_priv(netdev); - struct hnae3_handle *h = priv->ae_handle; + struct hnae3_handle *h = hns3_get_handle(netdev); if (!h->ae_algo || !h->ae_algo->ops || !h->ae_algo->ops->get_rss_indir_size) @@ -417,8 +408,7 @@ static u32 hns3_get_rss_indir_size(struct net_device *netdev) static int hns3_get_rss(struct net_device *netdev, u32 *indir, u8 *key, u8 *hfunc) { - struct hns3_nic_priv *priv = netdev_priv(netdev); - struct hnae3_handle *h = priv->ae_handle; + struct hnae3_handle *h = hns3_get_handle(netdev); if (!h->ae_algo || !h->ae_algo->ops || !h->ae_algo->ops->get_rss) return -EOPNOTSUPP; @@ -429,8 +419,7 @@ static int hns3_get_rss(struct net_device *netdev, u32 *indir, u8 *key, static int hns3_set_rss(struct net_device *netdev, const u32 *indir, const u8 *key, const u8 hfunc) { - struct hns3_nic_priv *priv = netdev_priv(netdev); - struct hnae3_handle *h = priv->ae_handle; + struct hnae3_handle *h = hns3_get_handle(netdev); if (!h->ae_algo || !h->ae_algo->ops || !h->ae_algo->ops->set_rss) return -EOPNOTSUPP; @@ -454,8 +443,7 @@ static int hns3_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd, u32 *rule_locs) { - struct hns3_nic_priv *priv = netdev_priv(netdev); - struct hnae3_handle *h = priv->ae_handle; + struct hnae3_handle *h = hns3_get_handle(netdev); if (!h->ae_algo || !h->ae_algo->ops || !h->ae_algo->ops->get_tc_size) return -EOPNOTSUPP; -- cgit v1.2.3 From 56cf68c73019ec3f04b1ae69f76f524918fb22cb Mon Sep 17 00:00:00 2001 From: Yunsheng Lin Date: Mon, 9 Oct 2017 15:43:57 +0800 Subject: net: hns3: Cleanup indentation for Kconfig in the the hisilicon folder This patch fixes a few indentation for Kconfig file in the hisilicon folder. Signed-off-by: Yunsheng Lin Signed-off-by: David S. Miller --- drivers/net/ethernet/hisilicon/Kconfig | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/hisilicon/Kconfig b/drivers/net/ethernet/hisilicon/Kconfig index 9d7cb0387bf7..30000b6aa7b8 100644 --- a/drivers/net/ethernet/hisilicon/Kconfig +++ b/drivers/net/ethernet/hisilicon/Kconfig @@ -78,7 +78,7 @@ config HNS_ENET config HNS3 tristate "Hisilicon Network Subsystem Support HNS3 (Framework)" - depends on PCI + depends on PCI ---help--- This selects the framework support for Hisilicon Network Subsystem 3. This layer facilitates clients like ENET, RoCE and user-space ethernet @@ -87,7 +87,7 @@ config HNS3 config HNS3_HCLGE tristate "Hisilicon HNS3 HCLGE Acceleration Engine & Compatibility Layer Support" - depends on PCI_MSI + depends on PCI_MSI depends on HNS3 ---help--- This selects the HNS3_HCLGE network acceleration engine & its hardware @@ -96,7 +96,7 @@ config HNS3_HCLGE config HNS3_ENET tristate "Hisilicon HNS3 Ethernet Device Support" - depends on 64BIT && PCI + depends on 64BIT && PCI depends on HNS3 && HNS3_HCLGE ---help--- This selects the Ethernet Driver for Hisilicon Network Subsystem 3 for hip08 -- cgit v1.2.3 From 5392902d332b85a93e3be2755f7f6df183e5cafc Mon Sep 17 00:00:00 2001 From: Yunsheng Lin Date: Mon, 9 Oct 2017 15:43:58 +0800 Subject: net: hns3: Consistently using GENMASK in hns3 driver This patch uses GENMASK to generate bit mask whenever possible in hns3 driver. Signed-off-by: Yunsheng Lin Signed-off-by: David S. Miller --- .../net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h | 48 +++++++++++----------- .../ethernet/hisilicon/hns3/hns3pf/hclge_main.h | 4 +- 2 files changed, 26 insertions(+), 26 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h index 6b6d28eff664..9cff7dbca5dd 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h @@ -250,11 +250,11 @@ struct hclge_ctrl_vector_chain { u8 int_vector_id; u8 int_cause_num; #define HCLGE_INT_TYPE_S 0 -#define HCLGE_INT_TYPE_M 0x3 +#define HCLGE_INT_TYPE_M GENMASK(1, 0) #define HCLGE_TQP_ID_S 2 -#define HCLGE_TQP_ID_M (0x7ff << HCLGE_TQP_ID_S) +#define HCLGE_TQP_ID_M GENMASK(12, 2) #define HCLGE_INT_GL_IDX_S 13 -#define HCLGE_INT_GL_IDX_M (0x3 << HCLGE_INT_GL_IDX_S) +#define HCLGE_INT_GL_IDX_M GENMASK(14, 13) __le16 tqp_type_and_id[HCLGE_VECTOR_ELEMENTS_PER_CMD]; u8 vfid; u8 rsv; @@ -372,28 +372,28 @@ struct hclge_pf_res { }; #define HCLGE_CFG_OFFSET_S 0 -#define HCLGE_CFG_OFFSET_M 0xfffff /* Byte (8-10.3) */ +#define HCLGE_CFG_OFFSET_M GENMASK(19, 0) #define HCLGE_CFG_RD_LEN_S 24 -#define HCLGE_CFG_RD_LEN_M (0xf << HCLGE_CFG_RD_LEN_S) +#define HCLGE_CFG_RD_LEN_M GENMASK(27, 24) #define HCLGE_CFG_RD_LEN_BYTES 16 #define HCLGE_CFG_RD_LEN_UNIT 4 #define HCLGE_CFG_VMDQ_S 0 -#define HCLGE_CFG_VMDQ_M (0xff << HCLGE_CFG_VMDQ_S) +#define HCLGE_CFG_VMDQ_M GENMASK(7, 0) #define HCLGE_CFG_TC_NUM_S 8 -#define HCLGE_CFG_TC_NUM_M (0xff << HCLGE_CFG_TC_NUM_S) +#define HCLGE_CFG_TC_NUM_M GENMASK(15, 8) #define HCLGE_CFG_TQP_DESC_N_S 16 -#define HCLGE_CFG_TQP_DESC_N_M (0xffff << HCLGE_CFG_TQP_DESC_N_S) +#define HCLGE_CFG_TQP_DESC_N_M GENMASK(31, 16) #define HCLGE_CFG_PHY_ADDR_S 0 -#define HCLGE_CFG_PHY_ADDR_M (0x1f << HCLGE_CFG_PHY_ADDR_S) +#define HCLGE_CFG_PHY_ADDR_M GENMASK(4, 0) #define HCLGE_CFG_MEDIA_TP_S 8 -#define HCLGE_CFG_MEDIA_TP_M (0xff << HCLGE_CFG_MEDIA_TP_S) +#define HCLGE_CFG_MEDIA_TP_M GENMASK(15, 8) #define HCLGE_CFG_RX_BUF_LEN_S 16 -#define HCLGE_CFG_RX_BUF_LEN_M (0xffff << HCLGE_CFG_RX_BUF_LEN_S) +#define HCLGE_CFG_RX_BUF_LEN_M GENMASK(31, 16) #define HCLGE_CFG_MAC_ADDR_H_S 0 -#define HCLGE_CFG_MAC_ADDR_H_M (0xffff << HCLGE_CFG_MAC_ADDR_H_S) +#define HCLGE_CFG_MAC_ADDR_H_M GENMASK(15, 0) #define HCLGE_CFG_DEFAULT_SPEED_S 16 -#define HCLGE_CFG_DEFAULT_SPEED_M (0xff << HCLGE_CFG_DEFAULT_SPEED_S) +#define HCLGE_CFG_DEFAULT_SPEED_M GENMASK(23, 16) struct hclge_cfg_param { __le32 offset; @@ -441,9 +441,9 @@ struct hclge_rss_indirection_table { }; #define HCLGE_RSS_TC_OFFSET_S 0 -#define HCLGE_RSS_TC_OFFSET_M (0x3ff << HCLGE_RSS_TC_OFFSET_S) +#define HCLGE_RSS_TC_OFFSET_M GENMASK(9, 0) #define HCLGE_RSS_TC_SIZE_S 12 -#define HCLGE_RSS_TC_SIZE_M (0x7 << HCLGE_RSS_TC_SIZE_S) +#define HCLGE_RSS_TC_SIZE_M GENMASK(14, 12) #define HCLGE_RSS_TC_VALID_B 15 struct hclge_rss_tc_mode { u16 rss_tc_mode[HCLGE_MAX_TC_NUM]; @@ -501,7 +501,7 @@ struct hclge_config_mac_mode { }; #define HCLGE_CFG_SPEED_S 0 -#define HCLGE_CFG_SPEED_M (0x3f << HCLGE_CFG_SPEED_S) +#define HCLGE_CFG_SPEED_M GENMASK(5, 0) #define HCLGE_CFG_DUPLEX_B 7 #define HCLGE_CFG_DUPLEX_M BIT(HCLGE_CFG_DUPLEX_B) @@ -518,7 +518,7 @@ struct hclge_config_mac_speed_dup { #define HCLGE_QUERY_AN_B 0 #define HCLGE_QUERY_DUPLEX_B 2 -#define HCLGE_QUERY_SPEED_M (0x1f << HCLGE_QUERY_SPEED_S) +#define HCLGE_QUERY_SPEED_M GENMASK(4, 0) #define HCLGE_QUERY_AN_M BIT(HCLGE_QUERY_AN_B) #define HCLGE_QUERY_DUPLEX_M BIT(HCLGE_QUERY_DUPLEX_B) @@ -528,7 +528,7 @@ struct hclge_query_an_speed_dup { u8 rsv[23]; }; -#define HCLGE_RING_ID_MASK 0x3ff +#define HCLGE_RING_ID_MASK GENMASK(9, 0) #define HCLGE_TQP_ENABLE_B 0 #define HCLGE_MAC_CFG_AN_EN_B 0 @@ -565,9 +565,9 @@ enum hclge_mac_vlan_tbl_opcode { #define HCLGE_MAC_EPORT_SW_EN_B 0xc #define HCLGE_MAC_EPORT_TYPE_B 0xb #define HCLGE_MAC_EPORT_VFID_S 0x3 -#define HCLGE_MAC_EPORT_VFID_M (0xff << HCLGE_MAC_EPORT_VFID_S) +#define HCLGE_MAC_EPORT_VFID_M GENMASK(10, 3) #define HCLGE_MAC_EPORT_PFID_S 0x0 -#define HCLGE_MAC_EPORT_PFID_M (0x7 << HCLGE_MAC_EPORT_PFID_S) +#define HCLGE_MAC_EPORT_PFID_M GENMASK(2, 0) struct hclge_mac_vlan_tbl_entry { u8 flags; u8 resp_code; @@ -583,7 +583,7 @@ struct hclge_mac_vlan_tbl_entry { }; #define HCLGE_CFG_MTA_MAC_SEL_S 0x0 -#define HCLGE_CFG_MTA_MAC_SEL_M (0x3 << HCLGE_CFG_MTA_MAC_SEL_S) +#define HCLGE_CFG_MTA_MAC_SEL_M GENMASK(1, 0) #define HCLGE_CFG_MTA_MAC_EN_B 0x7 struct hclge_mta_filter_mode { u8 dmac_sel_en; /* Use lowest 2 bit as sel_mode, bit 7 as enable */ @@ -599,7 +599,7 @@ struct hclge_cfg_func_mta_filter { #define HCLGE_CFG_MTA_ITEM_ACCEPT_B 0x0 #define HCLGE_CFG_MTA_ITEM_IDX_S 0x0 -#define HCLGE_CFG_MTA_ITEM_IDX_M (0xfff << HCLGE_CFG_MTA_ITEM_IDX_S) +#define HCLGE_CFG_MTA_ITEM_IDX_M GENMASK(11, 0) struct hclge_cfg_func_mta_item { u16 item_idx; /* Only used lowest 12 bit */ u8 accept; /* Only used lowest 1 bit */ @@ -670,10 +670,10 @@ struct hclge_cfg_tx_queue_pointer { }; #define HCLGE_TSO_MSS_MIN_S 0 -#define HCLGE_TSO_MSS_MIN_M (0x3FFF << HCLGE_TSO_MSS_MIN_S) +#define HCLGE_TSO_MSS_MIN_M GENMASK(13, 0) #define HCLGE_TSO_MSS_MAX_S 16 -#define HCLGE_TSO_MSS_MAX_M (0x3FFF << HCLGE_TSO_MSS_MAX_S) +#define HCLGE_TSO_MSS_MAX_M GENMASK(29, 16) struct hclge_cfg_tso_status { __le16 tso_mss_min; diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h index 7c66c00e8a3e..79c1a06cb941 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h @@ -32,7 +32,7 @@ #define HCLGE_VECTOR_VF_OFFSET 0x100000 #define HCLGE_RSS_IND_TBL_SIZE 512 -#define HCLGE_RSS_SET_BITMAP_MSK 0xffff +#define HCLGE_RSS_SET_BITMAP_MSK GENMASK(15, 0) #define HCLGE_RSS_KEY_SIZE 40 #define HCLGE_RSS_HASH_ALGO_TOEPLITZ 0 #define HCLGE_RSS_HASH_ALGO_SIMPLE 1 @@ -65,7 +65,7 @@ #define HCLGE_PHY_CSS_REG 17 #define HCLGE_PHY_MDIX_CTRL_S (5) -#define HCLGE_PHY_MDIX_CTRL_M (3 << HCLGE_PHY_MDIX_CTRL_S) +#define HCLGE_PHY_MDIX_CTRL_M GENMASK(6, 5) #define HCLGE_PHY_MDIX_STATUS_B (6) #define HCLGE_PHY_SPEED_DUP_RESOLVE_B (11) -- cgit v1.2.3 From d44f9b631fbc137b1b16a22318ceb136f934db48 Mon Sep 17 00:00:00 2001 From: Yunsheng Lin Date: Mon, 9 Oct 2017 15:43:59 +0800 Subject: net: hns3: Cleanup for struct that used to send cmd to firmware The hclge_tm module has already added _cmd to the end of struct that used to send cmd to firmware. This will help us finding the endian issues. This patch adds the _cmd to the end of struct that used to send cmd to firmware in hclge_main module. Signed-off-by: Yunsheng Lin Signed-off-by: David S. Miller --- .../net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c | 4 +- .../net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h | 72 ++++----- .../ethernet/hisilicon/hns3/hns3pf/hclge_main.c | 163 +++++++++++---------- 3 files changed, 120 insertions(+), 119 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c index 8b511e6e0ce9..648b64bc363a 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c @@ -279,12 +279,12 @@ int hclge_cmd_send(struct hclge_hw *hw, struct hclge_desc *desc, int num) enum hclge_cmd_status hclge_cmd_query_firmware_version(struct hclge_hw *hw, u32 *version) { - struct hclge_query_version *resp; + struct hclge_query_version_cmd *resp; struct hclge_desc desc; int ret; hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_FW_VER, 1); - resp = (struct hclge_query_version *)desc.data; + resp = (struct hclge_query_version_cmd *)desc.data; ret = hclge_cmd_send(hw, &desc, 1); if (!ret) diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h index 9cff7dbca5dd..d2ff0e37f9f5 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h @@ -221,12 +221,12 @@ enum hclge_opcode_type { #define HCLGE_RCB_INIT_QUERY_TIMEOUT 10 #define HCLGE_RCB_INIT_FLAG_EN_B 0 #define HCLGE_RCB_INIT_FLAG_FINI_B 8 -struct hclge_config_rcb_init { +struct hclge_config_rcb_init_cmd { __le16 rcb_init_flag; u8 rsv[22]; }; -struct hclge_tqp_map { +struct hclge_tqp_map_cmd { __le16 tqp_id; /* Absolute tqp id for in this pf */ u8 tqp_vf; /* VF id */ #define HCLGE_TQP_MAP_TYPE_PF 0 @@ -246,7 +246,7 @@ enum hclge_int_type { HCLGE_INT_EVENT, }; -struct hclge_ctrl_vector_chain { +struct hclge_ctrl_vector_chain_cmd { u8 int_vector_id; u8 int_cause_num; #define HCLGE_INT_TYPE_S 0 @@ -263,18 +263,18 @@ struct hclge_ctrl_vector_chain { #define HCLGE_TC_NUM 8 #define HCLGE_TC0_PRI_BUF_EN_B 15 /* Bit 15 indicate enable or not */ #define HCLGE_BUF_UNIT_S 7 /* Buf size is united by 128 bytes */ -struct hclge_tx_buff_alloc { +struct hclge_tx_buff_alloc_cmd { __le16 tx_pkt_buff[HCLGE_TC_NUM]; u8 tx_buff_rsv[8]; }; -struct hclge_rx_priv_buff { +struct hclge_rx_priv_buff_cmd { __le16 buf_num[HCLGE_TC_NUM]; __le16 shared_buf; u8 rsv[6]; }; -struct hclge_query_version { +struct hclge_query_version_cmd { __le32 firmware; __le32 firmware_rsv[5]; }; @@ -328,14 +328,14 @@ struct hclge_pkt_buf_alloc { }; #define HCLGE_RX_COM_WL_EN_B 15 -struct hclge_rx_com_wl_buf { +struct hclge_rx_com_wl_buf_cmd { __le16 high_wl; __le16 low_wl; u8 rsv[20]; }; #define HCLGE_RX_PKT_EN_B 15 -struct hclge_rx_pkt_buf { +struct hclge_rx_pkt_buf_cmd { __le16 high_pkt; __le16 low_pkt; u8 rsv[20]; @@ -348,7 +348,7 @@ struct hclge_rx_pkt_buf { #define HCLGE_PF_MAC_NUM_MASK 0x3 #define HCLGE_PF_STATE_MAIN BIT(HCLGE_PF_STATE_MAIN_B) #define HCLGE_PF_STATE_DONE BIT(HCLGE_PF_STATE_DONE_B) -struct hclge_func_status { +struct hclge_func_status_cmd { __le32 vf_rst_state[4]; u8 pf_state; u8 mac_id; @@ -359,7 +359,7 @@ struct hclge_func_status { u8 rsv[2]; }; -struct hclge_pf_res { +struct hclge_pf_res_cmd { __le16 tqp_num; __le16 buf_size; __le16 msixcap_localid_ba_nic; @@ -395,7 +395,7 @@ struct hclge_pf_res { #define HCLGE_CFG_DEFAULT_SPEED_S 16 #define HCLGE_CFG_DEFAULT_SPEED_M GENMASK(23, 16) -struct hclge_cfg_param { +struct hclge_cfg_param_cmd { __le32 offset; __le32 rsv; __le32 param[4]; @@ -405,7 +405,7 @@ struct hclge_cfg_param { #define HCLGE_DESC_NUM 0x40 #define HCLGE_ALLOC_VALID_B 0 -struct hclge_vf_num { +struct hclge_vf_num_cmd { u8 alloc_valid; u8 rsv[23]; }; @@ -413,13 +413,13 @@ struct hclge_vf_num { #define HCLGE_RSS_DEFAULT_OUTPORT_B 4 #define HCLGE_RSS_HASH_KEY_OFFSET_B 4 #define HCLGE_RSS_HASH_KEY_NUM 16 -struct hclge_rss_config { +struct hclge_rss_config_cmd { u8 hash_config; u8 rsv[7]; u8 hash_key[HCLGE_RSS_HASH_KEY_NUM]; }; -struct hclge_rss_input_tuple { +struct hclge_rss_input_tuple_cmd { u8 ipv4_tcp_en; u8 ipv4_udp_en; u8 ipv4_sctp_en; @@ -433,7 +433,7 @@ struct hclge_rss_input_tuple { #define HCLGE_RSS_CFG_TBL_SIZE 16 -struct hclge_rss_indirection_table { +struct hclge_rss_indirection_table_cmd { u16 start_table_index; u16 rss_set_bitmap; u8 rsv[4]; @@ -445,14 +445,14 @@ struct hclge_rss_indirection_table { #define HCLGE_RSS_TC_SIZE_S 12 #define HCLGE_RSS_TC_SIZE_M GENMASK(14, 12) #define HCLGE_RSS_TC_VALID_B 15 -struct hclge_rss_tc_mode { +struct hclge_rss_tc_mode_cmd { u16 rss_tc_mode[HCLGE_MAX_TC_NUM]; u8 rsv[8]; }; #define HCLGE_LINK_STS_B 0 #define HCLGE_LINK_STATUS BIT(HCLGE_LINK_STS_B) -struct hclge_link_status { +struct hclge_link_status_cmd { u8 status; u8 rsv[23]; }; @@ -467,7 +467,7 @@ struct hclge_promisc_param { #define HCLGE_PROMISC_EN_UC 0x1 #define HCLGE_PROMISC_EN_MC 0x2 #define HCLGE_PROMISC_EN_BC 0x4 -struct hclge_promisc_cfg { +struct hclge_promisc_cfg_cmd { u8 flag; u8 vf_id; __le16 rsv0; @@ -495,7 +495,7 @@ enum hclge_promisc_type { #define HCLGE_MAC_TX_UNDER_MIN_ERR_B 21 #define HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B 22 -struct hclge_config_mac_mode { +struct hclge_config_mac_mode_cmd { __le32 txrx_pad_fcs_loop_en; u8 rsv[20]; }; @@ -506,7 +506,7 @@ struct hclge_config_mac_mode { #define HCLGE_CFG_DUPLEX_B 7 #define HCLGE_CFG_DUPLEX_M BIT(HCLGE_CFG_DUPLEX_B) -struct hclge_config_mac_speed_dup { +struct hclge_config_mac_speed_dup_cmd { u8 speed_dup; #define HCLGE_CFG_MAC_SPEED_CHANGE_EN_B 0 @@ -522,7 +522,7 @@ struct hclge_config_mac_speed_dup { #define HCLGE_QUERY_AN_M BIT(HCLGE_QUERY_AN_B) #define HCLGE_QUERY_DUPLEX_M BIT(HCLGE_QUERY_DUPLEX_B) -struct hclge_query_an_speed_dup { +struct hclge_query_an_speed_dup_cmd { u8 an_syn_dup_speed; u8 pause; u8 rsv[23]; @@ -539,7 +539,7 @@ struct hclge_query_an_speed_dup { #define HCLGE_MAC_CFG_AN_EN BIT(HCLGE_MAC_CFG_AN_EN_B) -struct hclge_config_auto_neg { +struct hclge_config_auto_neg_cmd { __le32 cfg_an_cmd_flag; u8 rsv[20]; }; @@ -548,7 +548,7 @@ struct hclge_config_auto_neg { #define HCLGE_MAC_MAX_MTU 9728 #define HCLGE_MAC_UPLINK_PORT 0x100 -struct hclge_config_max_frm_size { +struct hclge_config_max_frm_size_cmd { __le16 max_frm_size; u8 rsv[22]; }; @@ -568,7 +568,7 @@ enum hclge_mac_vlan_tbl_opcode { #define HCLGE_MAC_EPORT_VFID_M GENMASK(10, 3) #define HCLGE_MAC_EPORT_PFID_S 0x0 #define HCLGE_MAC_EPORT_PFID_M GENMASK(2, 0) -struct hclge_mac_vlan_tbl_entry { +struct hclge_mac_vlan_tbl_entry_cmd { u8 flags; u8 resp_code; __le16 vlan_tag; @@ -585,13 +585,13 @@ struct hclge_mac_vlan_tbl_entry { #define HCLGE_CFG_MTA_MAC_SEL_S 0x0 #define HCLGE_CFG_MTA_MAC_SEL_M GENMASK(1, 0) #define HCLGE_CFG_MTA_MAC_EN_B 0x7 -struct hclge_mta_filter_mode { +struct hclge_mta_filter_mode_cmd { u8 dmac_sel_en; /* Use lowest 2 bit as sel_mode, bit 7 as enable */ u8 rsv[23]; }; #define HCLGE_CFG_FUNC_MTA_ACCEPT_B 0x0 -struct hclge_cfg_func_mta_filter { +struct hclge_cfg_func_mta_filter_cmd { u8 accept; /* Only used lowest 1 bit */ u8 function_id; u8 rsv[22]; @@ -600,13 +600,13 @@ struct hclge_cfg_func_mta_filter { #define HCLGE_CFG_MTA_ITEM_ACCEPT_B 0x0 #define HCLGE_CFG_MTA_ITEM_IDX_S 0x0 #define HCLGE_CFG_MTA_ITEM_IDX_M GENMASK(11, 0) -struct hclge_cfg_func_mta_item { +struct hclge_cfg_func_mta_item_cmd { u16 item_idx; /* Only used lowest 12 bit */ u8 accept; /* Only used lowest 1 bit */ u8 rsv[21]; }; -struct hclge_mac_vlan_add { +struct hclge_mac_vlan_add_cmd { __le16 flags; __le16 mac_addr_hi16; __le32 mac_addr_lo32; @@ -619,7 +619,7 @@ struct hclge_mac_vlan_add { }; #define HNS3_MAC_VLAN_CFG_FLAG_BIT 0 -struct hclge_mac_vlan_remove { +struct hclge_mac_vlan_remove_cmd { __le16 flags; __le16 mac_addr_hi16; __le32 mac_addr_lo32; @@ -631,20 +631,20 @@ struct hclge_mac_vlan_remove { u8 rsv[4]; }; -struct hclge_vlan_filter_ctrl { +struct hclge_vlan_filter_ctrl_cmd { u8 vlan_type; u8 vlan_fe; u8 rsv[22]; }; -struct hclge_vlan_filter_pf_cfg { +struct hclge_vlan_filter_pf_cfg_cmd { u8 vlan_offset; u8 vlan_cfg; u8 rsv[2]; u8 vlan_offset_bitmap[20]; }; -struct hclge_vlan_filter_vf_cfg { +struct hclge_vlan_filter_vf_cfg_cmd { u16 vlan_id; u8 resp_code; u8 rsv; @@ -653,14 +653,14 @@ struct hclge_vlan_filter_vf_cfg { u8 vf_bitmap[16]; }; -struct hclge_cfg_com_tqp_queue { +struct hclge_cfg_com_tqp_queue_cmd { __le16 tqp_id; __le16 stream_id; u8 enable; u8 rsv[19]; }; -struct hclge_cfg_tx_queue_pointer { +struct hclge_cfg_tx_queue_pointer_cmd { __le16 tqp_id; __le16 tx_tail; __le16 tx_head; @@ -675,7 +675,7 @@ struct hclge_cfg_tx_queue_pointer { #define HCLGE_TSO_MSS_MAX_S 16 #define HCLGE_TSO_MSS_MAX_M GENMASK(29, 16) -struct hclge_cfg_tso_status { +struct hclge_cfg_tso_status_cmd { __le16 tso_mss_min; __le16 tso_mss_max; u8 rsv[20]; @@ -685,7 +685,7 @@ struct hclge_cfg_tso_status { #define HCLGE_TSO_MSS_MAX 9668 #define HCLGE_TQP_RESET_B 0 -struct hclge_reset_tqp_queue { +struct hclge_reset_tqp_queue_cmd { __le16 tqp_id; u8 reset_req; u8 ready_to_reset; diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c index 5c1bf12beade..5103f234d1c3 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c @@ -820,7 +820,7 @@ static void hclge_get_stats(struct hnae3_handle *handle, u64 *data) } static int hclge_parse_func_status(struct hclge_dev *hdev, - struct hclge_func_status *status) + struct hclge_func_status_cmd *status) { if (!(status->pf_state & HCLGE_PF_STATE_DONE)) return -EINVAL; @@ -837,13 +837,13 @@ static int hclge_parse_func_status(struct hclge_dev *hdev, static int hclge_query_function_status(struct hclge_dev *hdev) { - struct hclge_func_status *req; + struct hclge_func_status_cmd *req; struct hclge_desc desc; int timeout = 0; int ret; hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_FUNC_STATUS, true); - req = (struct hclge_func_status *)desc.data; + req = (struct hclge_func_status_cmd *)desc.data; do { ret = hclge_cmd_send(&hdev->hw, &desc, 1); @@ -868,7 +868,7 @@ static int hclge_query_function_status(struct hclge_dev *hdev) static int hclge_query_pf_resource(struct hclge_dev *hdev) { - struct hclge_pf_res *req; + struct hclge_pf_res_cmd *req; struct hclge_desc desc; int ret; @@ -880,7 +880,7 @@ static int hclge_query_pf_resource(struct hclge_dev *hdev) return ret; } - req = (struct hclge_pf_res *)desc.data; + req = (struct hclge_pf_res_cmd *)desc.data; hdev->num_tqps = __le16_to_cpu(req->tqp_num); hdev->pkt_buf_size = __le16_to_cpu(req->buf_size) << HCLGE_BUF_UNIT_S; @@ -938,12 +938,12 @@ static int hclge_parse_speed(int speed_cmd, int *speed) static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc) { - struct hclge_cfg_param *req; + struct hclge_cfg_param_cmd *req; u64 mac_addr_tmp_high; u64 mac_addr_tmp; int i; - req = (struct hclge_cfg_param *)desc[0].data; + req = (struct hclge_cfg_param_cmd *)desc[0].data; /* get the configuration */ cfg->vmdq_vport_num = hnae_get_field(__le32_to_cpu(req->param[0]), @@ -978,7 +978,7 @@ static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc) for (i = 0; i < ETH_ALEN; i++) cfg->mac_addr[i] = (mac_addr_tmp >> (8 * i)) & 0xff; - req = (struct hclge_cfg_param *)desc[1].data; + req = (struct hclge_cfg_param_cmd *)desc[1].data; cfg->numa_node_map = __le32_to_cpu(req->param[0]); } @@ -989,11 +989,11 @@ static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc) static int hclge_get_cfg(struct hclge_dev *hdev, struct hclge_cfg *hcfg) { struct hclge_desc desc[HCLGE_PF_CFG_DESC_NUM]; - struct hclge_cfg_param *req; + struct hclge_cfg_param_cmd *req; int i, ret; for (i = 0; i < HCLGE_PF_CFG_DESC_NUM; i++) { - req = (struct hclge_cfg_param *)desc[i].data; + req = (struct hclge_cfg_param_cmd *)desc[i].data; hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_CFG_PARAM, true); hnae_set_field(req->offset, HCLGE_CFG_OFFSET_M, @@ -1099,12 +1099,12 @@ static int hclge_configure(struct hclge_dev *hdev) static int hclge_config_tso(struct hclge_dev *hdev, int tso_mss_min, int tso_mss_max) { - struct hclge_cfg_tso_status *req; + struct hclge_cfg_tso_status_cmd *req; struct hclge_desc desc; hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TSO_GENERIC_CONFIG, false); - req = (struct hclge_cfg_tso_status *)desc.data; + req = (struct hclge_cfg_tso_status_cmd *)desc.data; hnae_set_field(req->tso_mss_min, HCLGE_TSO_MSS_MIN_M, HCLGE_TSO_MSS_MIN_S, tso_mss_min); hnae_set_field(req->tso_mss_max, HCLGE_TSO_MSS_MIN_M, @@ -1144,13 +1144,13 @@ static int hclge_alloc_tqps(struct hclge_dev *hdev) static int hclge_map_tqps_to_func(struct hclge_dev *hdev, u16 func_id, u16 tqp_pid, u16 tqp_vid, bool is_pf) { - struct hclge_tqp_map *req; + struct hclge_tqp_map_cmd *req; struct hclge_desc desc; int ret; hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SET_TQP_MAP, false); - req = (struct hclge_tqp_map *)desc.data; + req = (struct hclge_tqp_map_cmd *)desc.data; req->tqp_id = cpu_to_le16(tqp_pid); req->tqp_vf = cpu_to_le16(func_id); req->tqp_flag = !is_pf << HCLGE_TQP_MAP_TYPE_B | @@ -1340,12 +1340,12 @@ static int hclge_cmd_alloc_tx_buff(struct hclge_dev *hdev, /* TX buffer size is unit by 128 byte */ #define HCLGE_BUF_SIZE_UNIT_SHIFT 7 #define HCLGE_BUF_SIZE_UPDATE_EN_MSK BIT(15) - struct hclge_tx_buff_alloc *req; + struct hclge_tx_buff_alloc_cmd *req; struct hclge_desc desc; int ret; u8 i; - req = (struct hclge_tx_buff_alloc *)desc.data; + req = (struct hclge_tx_buff_alloc_cmd *)desc.data; hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TX_BUFF_ALLOC, 0); for (i = 0; i < HCLGE_TC_NUM; i++) { @@ -1672,13 +1672,13 @@ int hclge_rx_buffer_calc(struct hclge_dev *hdev, static int hclge_rx_priv_buf_alloc(struct hclge_dev *hdev, struct hclge_pkt_buf_alloc *buf_alloc) { - struct hclge_rx_priv_buff *req; + struct hclge_rx_priv_buff_cmd *req; struct hclge_desc desc; int ret; int i; hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_PRIV_BUFF_ALLOC, false); - req = (struct hclge_rx_priv_buff *)desc.data; + req = (struct hclge_rx_priv_buff_cmd *)desc.data; /* Alloc private buffer TCs */ for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { @@ -2000,11 +2000,11 @@ static void hclge_check_speed_dup(struct hclge_dev *hdev, int duplex, int speed) int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex) { - struct hclge_config_mac_speed_dup *req; + struct hclge_config_mac_speed_dup_cmd *req; struct hclge_desc desc; int ret; - req = (struct hclge_config_mac_speed_dup *)desc.data; + req = (struct hclge_config_mac_speed_dup_cmd *)desc.data; hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_SPEED_DUP, false); @@ -2075,12 +2075,12 @@ static int hclge_cfg_mac_speed_dup_h(struct hnae3_handle *handle, int speed, static int hclge_query_mac_an_speed_dup(struct hclge_dev *hdev, int *speed, u8 *duplex) { - struct hclge_query_an_speed_dup *req; + struct hclge_query_an_speed_dup_cmd *req; struct hclge_desc desc; int speed_tmp; int ret; - req = (struct hclge_query_an_speed_dup *)desc.data; + req = (struct hclge_query_an_speed_dup_cmd *)desc.data; hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_AN_RESULT, true); ret = hclge_cmd_send(&hdev->hw, &desc, 1); @@ -2108,11 +2108,11 @@ static int hclge_query_mac_an_speed_dup(struct hclge_dev *hdev, int *speed, static int hclge_query_autoneg_result(struct hclge_dev *hdev) { struct hclge_mac *mac = &hdev->hw.mac; - struct hclge_query_an_speed_dup *req; + struct hclge_query_an_speed_dup_cmd *req; struct hclge_desc desc; int ret; - req = (struct hclge_query_an_speed_dup *)desc.data; + req = (struct hclge_query_an_speed_dup_cmd *)desc.data; hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_AN_RESULT, true); ret = hclge_cmd_send(&hdev->hw, &desc, 1); @@ -2129,13 +2129,13 @@ static int hclge_query_autoneg_result(struct hclge_dev *hdev) static int hclge_set_autoneg_en(struct hclge_dev *hdev, bool enable) { - struct hclge_config_auto_neg *req; + struct hclge_config_auto_neg_cmd *req; struct hclge_desc desc; int ret; hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_AN_MODE, false); - req = (struct hclge_config_auto_neg *)desc.data; + req = (struct hclge_config_auto_neg_cmd *)desc.data; hnae_set_bit(req->cfg_an_cmd_flag, HCLGE_MAC_CFG_AN_EN_B, !!enable); ret = hclge_cmd_send(&hdev->hw, &desc, 1); @@ -2214,7 +2214,7 @@ static void hclge_task_schedule(struct hclge_dev *hdev) static int hclge_get_mac_link_status(struct hclge_dev *hdev) { - struct hclge_link_status *req; + struct hclge_link_status_cmd *req; struct hclge_desc desc; int link_status; int ret; @@ -2227,7 +2227,7 @@ static int hclge_get_mac_link_status(struct hclge_dev *hdev) return ret; } - req = (struct hclge_link_status *)desc.data; + req = (struct hclge_link_status_cmd *)desc.data; link_status = req->status & HCLGE_LINK_STATUS; return !!link_status; @@ -2451,7 +2451,7 @@ static u32 hclge_get_rss_indir_size(struct hnae3_handle *handle) static int hclge_get_rss_algo(struct hclge_dev *hdev) { - struct hclge_rss_config *req; + struct hclge_rss_config_cmd *req; struct hclge_desc desc; int rss_hash_algo; int ret; @@ -2465,7 +2465,7 @@ static int hclge_get_rss_algo(struct hclge_dev *hdev) return ret; } - req = (struct hclge_rss_config *)desc.data; + req = (struct hclge_rss_config_cmd *)desc.data; rss_hash_algo = (req->hash_config & HCLGE_RSS_HASH_ALGO_MASK); if (rss_hash_algo == HCLGE_RSS_HASH_ALGO_TOEPLITZ) @@ -2477,13 +2477,13 @@ static int hclge_get_rss_algo(struct hclge_dev *hdev) static int hclge_set_rss_algo_key(struct hclge_dev *hdev, const u8 hfunc, const u8 *key) { - struct hclge_rss_config *req; + struct hclge_rss_config_cmd *req; struct hclge_desc desc; int key_offset; int key_size; int ret; - req = (struct hclge_rss_config *)desc.data; + req = (struct hclge_rss_config_cmd *)desc.data; for (key_offset = 0; key_offset < 3; key_offset++) { hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_GENERIC_CONFIG, @@ -2514,12 +2514,12 @@ static int hclge_set_rss_algo_key(struct hclge_dev *hdev, static int hclge_set_rss_indir_table(struct hclge_dev *hdev, const u32 *indir) { - struct hclge_rss_indirection_table *req; + struct hclge_rss_indirection_table_cmd *req; struct hclge_desc desc; int i, j; int ret; - req = (struct hclge_rss_indirection_table *)desc.data; + req = (struct hclge_rss_indirection_table_cmd *)desc.data; for (i = 0; i < HCLGE_RSS_CFG_TBL_NUM; i++) { hclge_cmd_setup_basic_desc @@ -2546,13 +2546,13 @@ static int hclge_set_rss_indir_table(struct hclge_dev *hdev, const u32 *indir) static int hclge_set_rss_tc_mode(struct hclge_dev *hdev, u16 *tc_valid, u16 *tc_size, u16 *tc_offset) { - struct hclge_rss_tc_mode *req; + struct hclge_rss_tc_mode_cmd *req; struct hclge_desc desc; int ret; int i; hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_TC_MODE, false); - req = (struct hclge_rss_tc_mode *)desc.data; + req = (struct hclge_rss_tc_mode_cmd *)desc.data; for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { hnae_set_bit(req->rss_tc_mode[i], HCLGE_RSS_TC_VALID_B, @@ -2577,13 +2577,13 @@ static int hclge_set_rss_input_tuple(struct hclge_dev *hdev) { #define HCLGE_RSS_INPUT_TUPLE_OTHER 0xf #define HCLGE_RSS_INPUT_TUPLE_SCTP 0x1f - struct hclge_rss_input_tuple *req; + struct hclge_rss_input_tuple_cmd *req; struct hclge_desc desc; int ret; hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false); - req = (struct hclge_rss_input_tuple *)desc.data; + req = (struct hclge_rss_input_tuple_cmd *)desc.data; req->ipv4_tcp_en = HCLGE_RSS_INPUT_TUPLE_OTHER; req->ipv4_udp_en = HCLGE_RSS_INPUT_TUPLE_OTHER; req->ipv4_sctp_en = HCLGE_RSS_INPUT_TUPLE_SCTP; @@ -2750,7 +2750,7 @@ int hclge_map_vport_ring_to_vector(struct hclge_vport *vport, int vector_id, struct hnae3_ring_chain_node *ring_chain) { struct hclge_dev *hdev = vport->back; - struct hclge_ctrl_vector_chain *req; + struct hclge_ctrl_vector_chain_cmd *req; struct hnae3_ring_chain_node *node; struct hclge_desc desc; int ret; @@ -2758,7 +2758,7 @@ int hclge_map_vport_ring_to_vector(struct hclge_vport *vport, int vector_id, hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_ADD_RING_TO_VECTOR, false); - req = (struct hclge_ctrl_vector_chain *)desc.data; + req = (struct hclge_ctrl_vector_chain_cmd *)desc.data; req->int_vector_id = vector_id; i = 0; @@ -2831,7 +2831,7 @@ static int hclge_unmap_ring_from_vector( { struct hclge_vport *vport = hclge_get_vport(handle); struct hclge_dev *hdev = vport->back; - struct hclge_ctrl_vector_chain *req; + struct hclge_ctrl_vector_chain_cmd *req; struct hnae3_ring_chain_node *node; struct hclge_desc desc; int i, vector_id; @@ -2846,7 +2846,7 @@ static int hclge_unmap_ring_from_vector( hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_DEL_RING_TO_VECTOR, false); - req = (struct hclge_ctrl_vector_chain *)desc.data; + req = (struct hclge_ctrl_vector_chain_cmd *)desc.data; req->int_vector_id = vector_id; i = 0; @@ -2898,13 +2898,13 @@ static int hclge_unmap_ring_from_vector( int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev, struct hclge_promisc_param *param) { - struct hclge_promisc_cfg *req; + struct hclge_promisc_cfg_cmd *req; struct hclge_desc desc; int ret; hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_PROMISC_MODE, false); - req = (struct hclge_promisc_cfg *)desc.data; + req = (struct hclge_promisc_cfg_cmd *)desc.data; req->vf_id = param->vf_id; req->flag = (param->enable << HCLGE_PROMISC_EN_B); @@ -2946,8 +2946,8 @@ static void hclge_set_promisc_mode(struct hnae3_handle *handle, u32 en) static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable) { struct hclge_desc desc; - struct hclge_config_mac_mode *req = - (struct hclge_config_mac_mode *)desc.data; + struct hclge_config_mac_mode_cmd *req = + (struct hclge_config_mac_mode_cmd *)desc.data; int ret; hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, false); @@ -2980,8 +2980,8 @@ static int hclge_tqp_enable(struct hclge_dev *hdev, int tqp_id, int stream_id, bool enable) { struct hclge_desc desc; - struct hclge_cfg_com_tqp_queue *req = - (struct hclge_cfg_com_tqp_queue *)desc.data; + struct hclge_cfg_com_tqp_queue_cmd *req = + (struct hclge_cfg_com_tqp_queue_cmd *)desc.data; int ret; hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_COM_TQP_QUEUE, false); @@ -3174,7 +3174,7 @@ static bool hclge_is_all_function_id_zero(struct hclge_desc *desc) return true; } -static void hclge_prepare_mac_addr(struct hclge_mac_vlan_tbl_entry *new_req, +static void hclge_prepare_mac_addr(struct hclge_mac_vlan_tbl_entry_cmd *new_req, const u8 *addr) { const unsigned char *mac_addr = addr; @@ -3201,11 +3201,11 @@ static int hclge_set_mta_filter_mode(struct hclge_dev *hdev, enum hclge_mta_dmac_sel_type mta_mac_sel, bool enable) { - struct hclge_mta_filter_mode *req; + struct hclge_mta_filter_mode_cmd *req; struct hclge_desc desc; int ret; - req = (struct hclge_mta_filter_mode *)desc.data; + req = (struct hclge_mta_filter_mode_cmd *)desc.data; hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MTA_MAC_MODE_CFG, false); hnae_set_bit(req->dmac_sel_en, HCLGE_CFG_MTA_MAC_EN_B, @@ -3228,11 +3228,11 @@ int hclge_cfg_func_mta_filter(struct hclge_dev *hdev, u8 func_id, bool enable) { - struct hclge_cfg_func_mta_filter *req; + struct hclge_cfg_func_mta_filter_cmd *req; struct hclge_desc desc; int ret; - req = (struct hclge_cfg_func_mta_filter *)desc.data; + req = (struct hclge_cfg_func_mta_filter_cmd *)desc.data; hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MTA_MAC_FUNC_CFG, false); hnae_set_bit(req->accept, HCLGE_CFG_FUNC_MTA_ACCEPT_B, @@ -3255,11 +3255,11 @@ static int hclge_set_mta_table_item(struct hclge_vport *vport, bool enable) { struct hclge_dev *hdev = vport->back; - struct hclge_cfg_func_mta_item *req; + struct hclge_cfg_func_mta_item_cmd *req; struct hclge_desc desc; int ret; - req = (struct hclge_cfg_func_mta_item *)desc.data; + req = (struct hclge_cfg_func_mta_item_cmd *)desc.data; hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MTA_TBL_ITEM_CFG, false); hnae_set_bit(req->accept, HCLGE_CFG_MTA_ITEM_ACCEPT_B, enable); @@ -3279,7 +3279,7 @@ static int hclge_set_mta_table_item(struct hclge_vport *vport, } static int hclge_remove_mac_vlan_tbl(struct hclge_vport *vport, - struct hclge_mac_vlan_tbl_entry *req) + struct hclge_mac_vlan_tbl_entry_cmd *req) { struct hclge_dev *hdev = vport->back; struct hclge_desc desc; @@ -3288,7 +3288,7 @@ static int hclge_remove_mac_vlan_tbl(struct hclge_vport *vport, hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_REMOVE, false); - memcpy(desc.data, req, sizeof(struct hclge_mac_vlan_tbl_entry)); + memcpy(desc.data, req, sizeof(struct hclge_mac_vlan_tbl_entry_cmd)); ret = hclge_cmd_send(&hdev->hw, &desc, 1); if (ret) { @@ -3304,7 +3304,7 @@ static int hclge_remove_mac_vlan_tbl(struct hclge_vport *vport, } static int hclge_lookup_mac_vlan_tbl(struct hclge_vport *vport, - struct hclge_mac_vlan_tbl_entry *req, + struct hclge_mac_vlan_tbl_entry_cmd *req, struct hclge_desc *desc, bool is_mc) { @@ -3317,7 +3317,7 @@ static int hclge_lookup_mac_vlan_tbl(struct hclge_vport *vport, desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); memcpy(desc[0].data, req, - sizeof(struct hclge_mac_vlan_tbl_entry)); + sizeof(struct hclge_mac_vlan_tbl_entry_cmd)); hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_MAC_VLAN_ADD, true); @@ -3329,7 +3329,7 @@ static int hclge_lookup_mac_vlan_tbl(struct hclge_vport *vport, } else { memcpy(desc[0].data, req, - sizeof(struct hclge_mac_vlan_tbl_entry)); + sizeof(struct hclge_mac_vlan_tbl_entry_cmd)); ret = hclge_cmd_send(&hdev->hw, desc, 1); } if (ret) { @@ -3345,7 +3345,7 @@ static int hclge_lookup_mac_vlan_tbl(struct hclge_vport *vport, } static int hclge_add_mac_vlan_tbl(struct hclge_vport *vport, - struct hclge_mac_vlan_tbl_entry *req, + struct hclge_mac_vlan_tbl_entry_cmd *req, struct hclge_desc *mc_desc) { struct hclge_dev *hdev = vport->back; @@ -3359,7 +3359,8 @@ static int hclge_add_mac_vlan_tbl(struct hclge_vport *vport, hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_ADD, false); - memcpy(desc.data, req, sizeof(struct hclge_mac_vlan_tbl_entry)); + memcpy(desc.data, req, + sizeof(struct hclge_mac_vlan_tbl_entry_cmd)); ret = hclge_cmd_send(&hdev->hw, &desc, 1); resp_code = (desc.data[0] >> 8) & 0xff; cfg_status = hclge_get_mac_vlan_cmd_status(vport, desc.retval, @@ -3373,7 +3374,7 @@ static int hclge_add_mac_vlan_tbl(struct hclge_vport *vport, mc_desc[2].flag &= cpu_to_le16(~HCLGE_CMD_FLAG_WR); mc_desc[2].flag &= cpu_to_le16(~HCLGE_CMD_FLAG_NEXT); memcpy(mc_desc[0].data, req, - sizeof(struct hclge_mac_vlan_tbl_entry)); + sizeof(struct hclge_mac_vlan_tbl_entry_cmd)); ret = hclge_cmd_send(&hdev->hw, mc_desc, 3); resp_code = (mc_desc[0].data[0] >> 8) & 0xff; cfg_status = hclge_get_mac_vlan_cmd_status(vport, @@ -3404,7 +3405,7 @@ int hclge_add_uc_addr_common(struct hclge_vport *vport, const unsigned char *addr) { struct hclge_dev *hdev = vport->back; - struct hclge_mac_vlan_tbl_entry req; + struct hclge_mac_vlan_tbl_entry_cmd req; enum hclge_cmd_status status; /* mac addr check */ @@ -3454,7 +3455,7 @@ int hclge_rm_uc_addr_common(struct hclge_vport *vport, const unsigned char *addr) { struct hclge_dev *hdev = vport->back; - struct hclge_mac_vlan_tbl_entry req; + struct hclge_mac_vlan_tbl_entry_cmd req; enum hclge_cmd_status status; /* mac addr check */ @@ -3488,7 +3489,7 @@ int hclge_add_mc_addr_common(struct hclge_vport *vport, const unsigned char *addr) { struct hclge_dev *hdev = vport->back; - struct hclge_mac_vlan_tbl_entry req; + struct hclge_mac_vlan_tbl_entry_cmd req; struct hclge_desc desc[3]; u16 tbl_idx; int status; @@ -3539,7 +3540,7 @@ int hclge_rm_mc_addr_common(struct hclge_vport *vport, const unsigned char *addr) { struct hclge_dev *hdev = vport->back; - struct hclge_mac_vlan_tbl_entry req; + struct hclge_mac_vlan_tbl_entry_cmd req; enum hclge_cmd_status status; struct hclge_desc desc[3]; u16 tbl_idx; @@ -3622,13 +3623,13 @@ static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p) static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type, bool filter_en) { - struct hclge_vlan_filter_ctrl *req; + struct hclge_vlan_filter_ctrl_cmd *req; struct hclge_desc desc; int ret; hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_CTRL, false); - req = (struct hclge_vlan_filter_ctrl *)desc.data; + req = (struct hclge_vlan_filter_ctrl_cmd *)desc.data; req->vlan_type = vlan_type; req->vlan_fe = filter_en; @@ -3646,8 +3647,8 @@ int hclge_set_vf_vlan_common(struct hclge_dev *hdev, int vfid, bool is_kill, u16 vlan, u8 qos, __be16 proto) { #define HCLGE_MAX_VF_BYTES 16 - struct hclge_vlan_filter_vf_cfg *req0; - struct hclge_vlan_filter_vf_cfg *req1; + struct hclge_vlan_filter_vf_cfg_cmd *req0; + struct hclge_vlan_filter_vf_cfg_cmd *req1; struct hclge_desc desc[2]; u8 vf_byte_val; u8 vf_byte_off; @@ -3663,8 +3664,8 @@ int hclge_set_vf_vlan_common(struct hclge_dev *hdev, int vfid, vf_byte_off = vfid / 8; vf_byte_val = 1 << (vfid % 8); - req0 = (struct hclge_vlan_filter_vf_cfg *)desc[0].data; - req1 = (struct hclge_vlan_filter_vf_cfg *)desc[1].data; + req0 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[0].data; + req1 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[1].data; req0->vlan_id = vlan; req0->vlan_cfg = is_kill; @@ -3707,7 +3708,7 @@ static int hclge_set_port_vlan_filter(struct hnae3_handle *handle, { struct hclge_vport *vport = hclge_get_vport(handle); struct hclge_dev *hdev = vport->back; - struct hclge_vlan_filter_pf_cfg *req; + struct hclge_vlan_filter_pf_cfg_cmd *req; struct hclge_desc desc; u8 vlan_offset_byte_val; u8 vlan_offset_byte; @@ -3720,7 +3721,7 @@ static int hclge_set_port_vlan_filter(struct hnae3_handle *handle, vlan_offset_byte = (vlan_id % 160) / 8; vlan_offset_byte_val = 1 << (vlan_id % 8); - req = (struct hclge_vlan_filter_pf_cfg *)desc.data; + req = (struct hclge_vlan_filter_pf_cfg_cmd *)desc.data; req->vlan_offset = vlan_offset_160; req->vlan_cfg = is_kill; req->vlan_offset_bitmap[vlan_offset_byte] = vlan_offset_byte_val; @@ -3782,7 +3783,7 @@ static int hclge_init_vlan_config(struct hclge_dev *hdev) static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu) { struct hclge_vport *vport = hclge_get_vport(handle); - struct hclge_config_max_frm_size *req; + struct hclge_config_max_frm_size_cmd *req; struct hclge_dev *hdev = vport->back; struct hclge_desc desc; int ret; @@ -3793,7 +3794,7 @@ static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu) hdev->mps = new_mtu; hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAX_FRM_SIZE, false); - req = (struct hclge_config_max_frm_size *)desc.data; + req = (struct hclge_config_max_frm_size_cmd *)desc.data; req->max_frm_size = cpu_to_le16(new_mtu); ret = hclge_cmd_send(&hdev->hw, &desc, 1); @@ -3808,13 +3809,13 @@ static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu) static int hclge_send_reset_tqp_cmd(struct hclge_dev *hdev, u16 queue_id, bool enable) { - struct hclge_reset_tqp_queue *req; + struct hclge_reset_tqp_queue_cmd *req; struct hclge_desc desc; int ret; hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, false); - req = (struct hclge_reset_tqp_queue *)desc.data; + req = (struct hclge_reset_tqp_queue_cmd *)desc.data; req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK); hnae_set_bit(req->reset_req, HCLGE_TQP_RESET_B, enable); @@ -3830,13 +3831,13 @@ static int hclge_send_reset_tqp_cmd(struct hclge_dev *hdev, u16 queue_id, static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id) { - struct hclge_reset_tqp_queue *req; + struct hclge_reset_tqp_queue_cmd *req; struct hclge_desc desc; int ret; hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, true); - req = (struct hclge_reset_tqp_queue *)desc.data; + req = (struct hclge_reset_tqp_queue_cmd *)desc.data; req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK); ret = hclge_cmd_send(&hdev->hw, &desc, 1); -- cgit v1.2.3 From a90bb9a5ea1db7f10096e7573b7d45295ad4eada Mon Sep 17 00:00:00 2001 From: Yunsheng Lin Date: Mon, 9 Oct 2017 15:44:00 +0800 Subject: net: hns3: Cleanup for endian issue in hns3 driver This patch fixes a lot of endian issues detected by sparse. Signed-off-by: Yunsheng Lin Signed-off-by: David S. Miller --- .../net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c | 8 +- .../net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h | 10 +- .../ethernet/hisilicon/hns3/hns3pf/hclge_main.c | 206 ++++++++++++--------- .../net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c | 26 ++- .../net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c | 4 +- 5 files changed, 145 insertions(+), 109 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c index 648b64bc363a..05985d81dda0 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c @@ -208,7 +208,7 @@ int hclge_cmd_send(struct hclge_hw *hw, struct hclge_desc *desc, int num) * which will be use for hardware to write back */ ntc = hw->cmq.csq.next_to_use; - opcode = desc[0].opcode; + opcode = le16_to_cpu(desc[0].opcode); while (handle < num) { desc_to_use = &hw->cmq.csq.desc[hw->cmq.csq.next_to_use]; *desc_to_use = desc[handle]; @@ -225,7 +225,7 @@ int hclge_cmd_send(struct hclge_hw *hw, struct hclge_desc *desc, int num) * If the command is sync, wait for the firmware to write back, * if multi descriptors to be sent, use the first one to check */ - if (HCLGE_SEND_SYNC(desc->flag)) { + if (HCLGE_SEND_SYNC(le16_to_cpu(desc->flag))) { do { if (hclge_cmd_csq_done(hw)) break; @@ -244,9 +244,9 @@ int hclge_cmd_send(struct hclge_hw *hw, struct hclge_desc *desc, int num) pr_debug("Get cmd desc:\n"); if (likely(!hclge_is_special_opcode(opcode))) - desc_ret = desc[handle].retval; + desc_ret = le16_to_cpu(desc[handle].retval); else - desc_ret = desc[0].retval; + desc_ret = le16_to_cpu(desc[0].retval); if ((enum hclge_cmd_return_status)desc_ret == HCLGE_CMD_EXEC_SUCCESS) diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h index d2ff0e37f9f5..8f3ba02aea3c 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h @@ -434,8 +434,8 @@ struct hclge_rss_input_tuple_cmd { #define HCLGE_RSS_CFG_TBL_SIZE 16 struct hclge_rss_indirection_table_cmd { - u16 start_table_index; - u16 rss_set_bitmap; + __le16 start_table_index; + __le16 rss_set_bitmap; u8 rsv[4]; u8 rss_result[HCLGE_RSS_CFG_TBL_SIZE]; }; @@ -446,7 +446,7 @@ struct hclge_rss_indirection_table_cmd { #define HCLGE_RSS_TC_SIZE_M GENMASK(14, 12) #define HCLGE_RSS_TC_VALID_B 15 struct hclge_rss_tc_mode_cmd { - u16 rss_tc_mode[HCLGE_MAX_TC_NUM]; + __le16 rss_tc_mode[HCLGE_MAX_TC_NUM]; u8 rsv[8]; }; @@ -601,7 +601,7 @@ struct hclge_cfg_func_mta_filter_cmd { #define HCLGE_CFG_MTA_ITEM_IDX_S 0x0 #define HCLGE_CFG_MTA_ITEM_IDX_M GENMASK(11, 0) struct hclge_cfg_func_mta_item_cmd { - u16 item_idx; /* Only used lowest 12 bit */ + __le16 item_idx; /* Only used lowest 12 bit */ u8 accept; /* Only used lowest 1 bit */ u8 rsv[21]; }; @@ -645,7 +645,7 @@ struct hclge_vlan_filter_pf_cfg_cmd { }; struct hclge_vlan_filter_vf_cfg_cmd { - u16 vlan_id; + __le16 vlan_id; u8 resp_code; u8 rsv; u8 vlan_cfg; diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c index 5103f234d1c3..6115c2f730ce 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c @@ -362,7 +362,7 @@ static int hclge_64_bit_update_stats(struct hclge_dev *hdev) #define HCLGE_64_BIT_RTN_DATANUM 4 u64 *data = (u64 *)(&hdev->hw_stats.all_64_bit_stats); struct hclge_desc desc[HCLGE_64_BIT_CMD_NUM]; - u64 *desc_data; + __le64 *desc_data; int i, k, n; int ret; @@ -376,14 +376,14 @@ static int hclge_64_bit_update_stats(struct hclge_dev *hdev) for (i = 0; i < HCLGE_64_BIT_CMD_NUM; i++) { if (unlikely(i == 0)) { - desc_data = (u64 *)(&desc[i].data[0]); + desc_data = (__le64 *)(&desc[i].data[0]); n = HCLGE_64_BIT_RTN_DATANUM - 1; } else { - desc_data = (u64 *)(&desc[i]); + desc_data = (__le64 *)(&desc[i]); n = HCLGE_64_BIT_RTN_DATANUM; } for (k = 0; k < n; k++) { - *data++ += cpu_to_le64(*desc_data); + *data++ += le64_to_cpu(*desc_data); desc_data++; } } @@ -411,7 +411,7 @@ static int hclge_32_bit_update_stats(struct hclge_dev *hdev) struct hclge_desc desc[HCLGE_32_BIT_CMD_NUM]; struct hclge_32_bit_stats *all_32_bit_stats; - u32 *desc_data; + __le32 *desc_data; int i, k, n; u64 *data; int ret; @@ -431,21 +431,27 @@ static int hclge_32_bit_update_stats(struct hclge_dev *hdev) hclge_reset_partial_32bit_counter(all_32_bit_stats); for (i = 0; i < HCLGE_32_BIT_CMD_NUM; i++) { if (unlikely(i == 0)) { + __le16 *desc_data_16bit; + all_32_bit_stats->igu_rx_err_pkt += - cpu_to_le32(desc[i].data[0]); + le32_to_cpu(desc[i].data[0]); + + desc_data_16bit = (__le16 *)&desc[i].data[1]; all_32_bit_stats->igu_rx_no_eof_pkt += - cpu_to_le32(desc[i].data[1] & 0xffff); + le16_to_cpu(*desc_data_16bit); + + desc_data_16bit++; all_32_bit_stats->igu_rx_no_sof_pkt += - cpu_to_le32((desc[i].data[1] >> 16) & 0xffff); + le16_to_cpu(*desc_data_16bit); - desc_data = (u32 *)(&desc[i].data[2]); + desc_data = &desc[i].data[2]; n = HCLGE_32_BIT_RTN_DATANUM - 4; } else { - desc_data = (u32 *)(&desc[i]); + desc_data = (__le32 *)&desc[i]; n = HCLGE_32_BIT_RTN_DATANUM; } for (k = 0; k < n; k++) { - *data++ += cpu_to_le32(*desc_data); + *data++ += le32_to_cpu(*desc_data); desc_data++; } } @@ -460,7 +466,7 @@ static int hclge_mac_update_stats(struct hclge_dev *hdev) u64 *data = (u64 *)(&hdev->hw_stats.mac_stats); struct hclge_desc desc[HCLGE_MAC_CMD_NUM]; - u64 *desc_data; + __le64 *desc_data; int i, k, n; int ret; @@ -475,14 +481,14 @@ static int hclge_mac_update_stats(struct hclge_dev *hdev) for (i = 0; i < HCLGE_MAC_CMD_NUM; i++) { if (unlikely(i == 0)) { - desc_data = (u64 *)(&desc[i].data[0]); + desc_data = (__le64 *)(&desc[i].data[0]); n = HCLGE_RTN_DATA_NUM - 2; } else { - desc_data = (u64 *)(&desc[i]); + desc_data = (__le64 *)(&desc[i]); n = HCLGE_RTN_DATA_NUM; } for (k = 0; k < n; k++) { - *data++ += cpu_to_le64(*desc_data); + *data++ += le64_to_cpu(*desc_data); desc_data++; } } @@ -508,7 +514,7 @@ static int hclge_tqps_update_stats(struct hnae3_handle *handle) HCLGE_OPC_QUERY_RX_STATUS, true); - desc[0].data[0] = (tqp->index & 0x1ff); + desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff)); ret = hclge_cmd_send(&hdev->hw, desc, 1); if (ret) { dev_err(&hdev->pdev->dev, @@ -517,7 +523,7 @@ static int hclge_tqps_update_stats(struct hnae3_handle *handle) return ret; } tqp->tqp_stats.rcb_rx_ring_pktnum_rcd += - cpu_to_le32(desc[0].data[4]); + le32_to_cpu(desc[0].data[4]); } for (i = 0; i < kinfo->num_tqps; i++) { @@ -528,7 +534,7 @@ static int hclge_tqps_update_stats(struct hnae3_handle *handle) HCLGE_OPC_QUERY_TX_STATUS, true); - desc[0].data[0] = (tqp->index & 0x1ff); + desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff)); ret = hclge_cmd_send(&hdev->hw, desc, 1); if (ret) { dev_err(&hdev->pdev->dev, @@ -537,7 +543,7 @@ static int hclge_tqps_update_stats(struct hnae3_handle *handle) return ret; } tqp->tqp_stats.rcb_tx_ring_pktnum_rcd += - cpu_to_le32(desc[0].data[4]); + le32_to_cpu(desc[0].data[4]); } return 0; @@ -552,12 +558,12 @@ static u64 *hclge_tqps_get_stats(struct hnae3_handle *handle, u64 *data) for (i = 0; i < kinfo->num_tqps; i++) { tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q); - *buff++ = cpu_to_le64(tqp->tqp_stats.rcb_tx_ring_pktnum_rcd); + *buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd; } for (i = 0; i < kinfo->num_tqps; i++) { tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q); - *buff++ = cpu_to_le64(tqp->tqp_stats.rcb_rx_ring_pktnum_rcd); + *buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd; } return buff; @@ -993,16 +999,17 @@ static int hclge_get_cfg(struct hclge_dev *hdev, struct hclge_cfg *hcfg) int i, ret; for (i = 0; i < HCLGE_PF_CFG_DESC_NUM; i++) { + u32 offset = 0; + req = (struct hclge_cfg_param_cmd *)desc[i].data; hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_CFG_PARAM, true); - hnae_set_field(req->offset, HCLGE_CFG_OFFSET_M, + hnae_set_field(offset, HCLGE_CFG_OFFSET_M, HCLGE_CFG_OFFSET_S, i * HCLGE_CFG_RD_LEN_BYTES); /* Len should be united by 4 bytes when send to hardware */ - hnae_set_field(req->offset, HCLGE_CFG_RD_LEN_M, - HCLGE_CFG_RD_LEN_S, + hnae_set_field(offset, HCLGE_CFG_RD_LEN_M, HCLGE_CFG_RD_LEN_S, HCLGE_CFG_RD_LEN_BYTES / HCLGE_CFG_RD_LEN_UNIT); - req->offset = cpu_to_le32(req->offset); + req->offset = cpu_to_le32(offset); } ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PF_CFG_DESC_NUM); @@ -1101,14 +1108,21 @@ static int hclge_config_tso(struct hclge_dev *hdev, int tso_mss_min, { struct hclge_cfg_tso_status_cmd *req; struct hclge_desc desc; + u16 tso_mss; hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TSO_GENERIC_CONFIG, false); req = (struct hclge_cfg_tso_status_cmd *)desc.data; - hnae_set_field(req->tso_mss_min, HCLGE_TSO_MSS_MIN_M, + + tso_mss = 0; + hnae_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M, HCLGE_TSO_MSS_MIN_S, tso_mss_min); - hnae_set_field(req->tso_mss_max, HCLGE_TSO_MSS_MIN_M, + req->tso_mss_min = cpu_to_le16(tso_mss); + + tso_mss = 0; + hnae_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M, HCLGE_TSO_MSS_MIN_S, tso_mss_max); + req->tso_mss_max = cpu_to_le16(tso_mss); return hclge_cmd_send(&hdev->hw, &desc, 1); } @@ -1152,7 +1166,7 @@ static int hclge_map_tqps_to_func(struct hclge_dev *hdev, u16 func_id, req = (struct hclge_tqp_map_cmd *)desc.data; req->tqp_id = cpu_to_le16(tqp_pid); - req->tqp_vf = cpu_to_le16(func_id); + req->tqp_vf = func_id; req->tqp_flag = !is_pf << HCLGE_TQP_MAP_TYPE_B | 1 << HCLGE_TQP_MAP_EN_B; req->tqp_vid = cpu_to_le16(tqp_vid); @@ -2131,12 +2145,14 @@ static int hclge_set_autoneg_en(struct hclge_dev *hdev, bool enable) { struct hclge_config_auto_neg_cmd *req; struct hclge_desc desc; + u32 flag = 0; int ret; hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_AN_MODE, false); req = (struct hclge_config_auto_neg_cmd *)desc.data; - hnae_set_bit(req->cfg_an_cmd_flag, HCLGE_MAC_CFG_AN_EN_B, !!enable); + hnae_set_bit(flag, HCLGE_MAC_CFG_AN_EN_B, !!enable); + req->cfg_an_cmd_flag = cpu_to_le32(flag); ret = hclge_cmd_send(&hdev->hw, &desc, 1); if (ret) { @@ -2525,8 +2541,9 @@ static int hclge_set_rss_indir_table(struct hclge_dev *hdev, const u32 *indir) hclge_cmd_setup_basic_desc (&desc, HCLGE_OPC_RSS_INDIR_TABLE, false); - req->start_table_index = i * HCLGE_RSS_CFG_TBL_SIZE; - req->rss_set_bitmap = HCLGE_RSS_SET_BITMAP_MSK; + req->start_table_index = + cpu_to_le16(i * HCLGE_RSS_CFG_TBL_SIZE); + req->rss_set_bitmap = cpu_to_le16(HCLGE_RSS_SET_BITMAP_MSK); for (j = 0; j < HCLGE_RSS_CFG_TBL_SIZE; j++) req->rss_result[j] = @@ -2555,12 +2572,15 @@ static int hclge_set_rss_tc_mode(struct hclge_dev *hdev, u16 *tc_valid, req = (struct hclge_rss_tc_mode_cmd *)desc.data; for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { - hnae_set_bit(req->rss_tc_mode[i], HCLGE_RSS_TC_VALID_B, - (tc_valid[i] & 0x1)); - hnae_set_field(req->rss_tc_mode[i], HCLGE_RSS_TC_SIZE_M, + u16 mode = 0; + + hnae_set_bit(mode, HCLGE_RSS_TC_VALID_B, (tc_valid[i] & 0x1)); + hnae_set_field(mode, HCLGE_RSS_TC_SIZE_M, HCLGE_RSS_TC_SIZE_S, tc_size[i]); - hnae_set_field(req->rss_tc_mode[i], HCLGE_RSS_TC_OFFSET_M, + hnae_set_field(mode, HCLGE_RSS_TC_OFFSET_M, HCLGE_RSS_TC_OFFSET_S, tc_offset[i]); + + req->rss_tc_mode[i] = cpu_to_le16(mode); } ret = hclge_cmd_send(&hdev->hw, &desc, 1); @@ -2763,15 +2783,16 @@ int hclge_map_vport_ring_to_vector(struct hclge_vport *vport, int vector_id, i = 0; for (node = ring_chain; node; node = node->next) { - hnae_set_field(req->tqp_type_and_id[i], HCLGE_INT_TYPE_M, - HCLGE_INT_TYPE_S, + u16 type_and_id = 0; + + hnae_set_field(type_and_id, HCLGE_INT_TYPE_M, HCLGE_INT_TYPE_S, hnae_get_bit(node->flag, HNAE3_RING_TYPE_B)); - hnae_set_field(req->tqp_type_and_id[i], HCLGE_TQP_ID_M, - HCLGE_TQP_ID_S, node->tqp_index); - hnae_set_field(req->tqp_type_and_id[i], HCLGE_INT_GL_IDX_M, + hnae_set_field(type_and_id, HCLGE_TQP_ID_M, HCLGE_TQP_ID_S, + node->tqp_index); + hnae_set_field(type_and_id, HCLGE_INT_GL_IDX_M, HCLGE_INT_GL_IDX_S, hnae_get_bit(node->flag, HNAE3_RING_TYPE_B)); - req->tqp_type_and_id[i] = cpu_to_le16(req->tqp_type_and_id[i]); + req->tqp_type_and_id[i] = cpu_to_le16(type_and_id); req->vfid = vport->vport_id; if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) { @@ -2851,16 +2872,17 @@ static int hclge_unmap_ring_from_vector( i = 0; for (node = ring_chain; node; node = node->next) { - hnae_set_field(req->tqp_type_and_id[i], HCLGE_INT_TYPE_M, - HCLGE_INT_TYPE_S, + u16 type_and_id = 0; + + hnae_set_field(type_and_id, HCLGE_INT_TYPE_M, HCLGE_INT_TYPE_S, hnae_get_bit(node->flag, HNAE3_RING_TYPE_B)); - hnae_set_field(req->tqp_type_and_id[i], HCLGE_TQP_ID_M, - HCLGE_TQP_ID_S, node->tqp_index); - hnae_set_field(req->tqp_type_and_id[i], HCLGE_INT_GL_IDX_M, + hnae_set_field(type_and_id, HCLGE_TQP_ID_M, HCLGE_TQP_ID_S, + node->tqp_index); + hnae_set_field(type_and_id, HCLGE_INT_GL_IDX_M, HCLGE_INT_GL_IDX_S, hnae_get_bit(node->flag, HNAE3_RING_TYPE_B)); - req->tqp_type_and_id[i] = cpu_to_le16(req->tqp_type_and_id[i]); + req->tqp_type_and_id[i] = cpu_to_le16(type_and_id); req->vfid = vport->vport_id; if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) { @@ -2948,27 +2970,25 @@ static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable) struct hclge_desc desc; struct hclge_config_mac_mode_cmd *req = (struct hclge_config_mac_mode_cmd *)desc.data; + u32 loop_en = 0; int ret; hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, false); - hnae_set_bit(req->txrx_pad_fcs_loop_en, HCLGE_MAC_TX_EN_B, enable); - hnae_set_bit(req->txrx_pad_fcs_loop_en, HCLGE_MAC_RX_EN_B, enable); - hnae_set_bit(req->txrx_pad_fcs_loop_en, HCLGE_MAC_PAD_TX_B, enable); - hnae_set_bit(req->txrx_pad_fcs_loop_en, HCLGE_MAC_PAD_RX_B, enable); - hnae_set_bit(req->txrx_pad_fcs_loop_en, HCLGE_MAC_1588_TX_B, 0); - hnae_set_bit(req->txrx_pad_fcs_loop_en, HCLGE_MAC_1588_RX_B, 0); - hnae_set_bit(req->txrx_pad_fcs_loop_en, HCLGE_MAC_APP_LP_B, 0); - hnae_set_bit(req->txrx_pad_fcs_loop_en, HCLGE_MAC_LINE_LP_B, 0); - hnae_set_bit(req->txrx_pad_fcs_loop_en, HCLGE_MAC_FCS_TX_B, enable); - hnae_set_bit(req->txrx_pad_fcs_loop_en, HCLGE_MAC_RX_FCS_B, enable); - hnae_set_bit(req->txrx_pad_fcs_loop_en, - HCLGE_MAC_RX_FCS_STRIP_B, enable); - hnae_set_bit(req->txrx_pad_fcs_loop_en, - HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B, enable); - hnae_set_bit(req->txrx_pad_fcs_loop_en, - HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B, enable); - hnae_set_bit(req->txrx_pad_fcs_loop_en, - HCLGE_MAC_TX_UNDER_MIN_ERR_B, enable); + hnae_set_bit(loop_en, HCLGE_MAC_TX_EN_B, enable); + hnae_set_bit(loop_en, HCLGE_MAC_RX_EN_B, enable); + hnae_set_bit(loop_en, HCLGE_MAC_PAD_TX_B, enable); + hnae_set_bit(loop_en, HCLGE_MAC_PAD_RX_B, enable); + hnae_set_bit(loop_en, HCLGE_MAC_1588_TX_B, 0); + hnae_set_bit(loop_en, HCLGE_MAC_1588_RX_B, 0); + hnae_set_bit(loop_en, HCLGE_MAC_APP_LP_B, 0); + hnae_set_bit(loop_en, HCLGE_MAC_LINE_LP_B, 0); + hnae_set_bit(loop_en, HCLGE_MAC_FCS_TX_B, enable); + hnae_set_bit(loop_en, HCLGE_MAC_RX_FCS_B, enable); + hnae_set_bit(loop_en, HCLGE_MAC_RX_FCS_STRIP_B, enable); + hnae_set_bit(loop_en, HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B, enable); + hnae_set_bit(loop_en, HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B, enable); + hnae_set_bit(loop_en, HCLGE_MAC_TX_UNDER_MIN_ERR_B, enable); + req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en); ret = hclge_cmd_send(&hdev->hw, &desc, 1); if (ret) @@ -3145,16 +3165,16 @@ static int hclge_update_desc_vfid(struct hclge_desc *desc, int vfid, bool clr) word_num = vfid / 32; bit_num = vfid % 32; if (clr) - desc[1].data[word_num] &= ~(1 << bit_num); + desc[1].data[word_num] &= cpu_to_le32(~(1 << bit_num)); else - desc[1].data[word_num] |= (1 << bit_num); + desc[1].data[word_num] |= cpu_to_le32(1 << bit_num); } else { word_num = (vfid - 192) / 32; bit_num = vfid % 32; if (clr) - desc[2].data[word_num] &= ~(1 << bit_num); + desc[2].data[word_num] &= cpu_to_le32(~(1 << bit_num)); else - desc[2].data[word_num] |= (1 << bit_num); + desc[2].data[word_num] |= cpu_to_le32(1 << bit_num); } return 0; @@ -3257,15 +3277,16 @@ static int hclge_set_mta_table_item(struct hclge_vport *vport, struct hclge_dev *hdev = vport->back; struct hclge_cfg_func_mta_item_cmd *req; struct hclge_desc desc; + u16 item_idx = 0; int ret; req = (struct hclge_cfg_func_mta_item_cmd *)desc.data; hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MTA_TBL_ITEM_CFG, false); hnae_set_bit(req->accept, HCLGE_CFG_MTA_ITEM_ACCEPT_B, enable); - hnae_set_field(req->item_idx, HCLGE_CFG_MTA_ITEM_IDX_M, + hnae_set_field(item_idx, HCLGE_CFG_MTA_ITEM_IDX_M, HCLGE_CFG_MTA_ITEM_IDX_S, idx); - req->item_idx = cpu_to_le16(req->item_idx); + req->item_idx = cpu_to_le16(item_idx); ret = hclge_cmd_send(&hdev->hw, &desc, 1); if (ret) { @@ -3284,6 +3305,7 @@ static int hclge_remove_mac_vlan_tbl(struct hclge_vport *vport, struct hclge_dev *hdev = vport->back; struct hclge_desc desc; u8 resp_code; + u16 retval; int ret; hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_REMOVE, false); @@ -3297,9 +3319,10 @@ static int hclge_remove_mac_vlan_tbl(struct hclge_vport *vport, ret); return ret; } - resp_code = (desc.data[0] >> 8) & 0xff; + resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff; + retval = le16_to_cpu(desc.retval); - return hclge_get_mac_vlan_cmd_status(vport, desc.retval, resp_code, + return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code, HCLGE_MAC_VLAN_REMOVE); } @@ -3310,6 +3333,7 @@ static int hclge_lookup_mac_vlan_tbl(struct hclge_vport *vport, { struct hclge_dev *hdev = vport->back; u8 resp_code; + u16 retval; int ret; hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_MAC_VLAN_ADD, true); @@ -3338,9 +3362,10 @@ static int hclge_lookup_mac_vlan_tbl(struct hclge_vport *vport, ret); return ret; } - resp_code = (desc[0].data[0] >> 8) & 0xff; + resp_code = (le32_to_cpu(desc[0].data[0]) >> 8) & 0xff; + retval = le16_to_cpu(desc[0].retval); - return hclge_get_mac_vlan_cmd_status(vport, desc[0].retval, resp_code, + return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code, HCLGE_MAC_VLAN_LKUP); } @@ -3351,6 +3376,7 @@ static int hclge_add_mac_vlan_tbl(struct hclge_vport *vport, struct hclge_dev *hdev = vport->back; int cfg_status; u8 resp_code; + u16 retval; int ret; if (!mc_desc) { @@ -3362,8 +3388,10 @@ static int hclge_add_mac_vlan_tbl(struct hclge_vport *vport, memcpy(desc.data, req, sizeof(struct hclge_mac_vlan_tbl_entry_cmd)); ret = hclge_cmd_send(&hdev->hw, &desc, 1); - resp_code = (desc.data[0] >> 8) & 0xff; - cfg_status = hclge_get_mac_vlan_cmd_status(vport, desc.retval, + resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff; + retval = le16_to_cpu(desc.retval); + + cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval, resp_code, HCLGE_MAC_VLAN_ADD); } else { @@ -3376,9 +3404,10 @@ static int hclge_add_mac_vlan_tbl(struct hclge_vport *vport, memcpy(mc_desc[0].data, req, sizeof(struct hclge_mac_vlan_tbl_entry_cmd)); ret = hclge_cmd_send(&hdev->hw, mc_desc, 3); - resp_code = (mc_desc[0].data[0] >> 8) & 0xff; - cfg_status = hclge_get_mac_vlan_cmd_status(vport, - mc_desc[0].retval, + resp_code = (le32_to_cpu(mc_desc[0].data[0]) >> 8) & 0xff; + retval = le16_to_cpu(mc_desc[0].retval); + + cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval, resp_code, HCLGE_MAC_VLAN_ADD); } @@ -3407,6 +3436,7 @@ int hclge_add_uc_addr_common(struct hclge_vport *vport, struct hclge_dev *hdev = vport->back; struct hclge_mac_vlan_tbl_entry_cmd req; enum hclge_cmd_status status; + u16 egress_port = 0; /* mac addr check */ if (is_zero_ether_addr(addr) || @@ -3426,15 +3456,15 @@ int hclge_add_uc_addr_common(struct hclge_vport *vport, hnae_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0); hnae_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 0); hnae_set_bit(req.mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 0); - hnae_set_bit(req.egress_port, - HCLGE_MAC_EPORT_SW_EN_B, 0); - hnae_set_bit(req.egress_port, - HCLGE_MAC_EPORT_TYPE_B, 0); - hnae_set_field(req.egress_port, HCLGE_MAC_EPORT_VFID_M, + + hnae_set_bit(egress_port, HCLGE_MAC_EPORT_SW_EN_B, 0); + hnae_set_bit(egress_port, HCLGE_MAC_EPORT_TYPE_B, 0); + hnae_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M, HCLGE_MAC_EPORT_VFID_S, vport->vport_id); - hnae_set_field(req.egress_port, HCLGE_MAC_EPORT_PFID_M, + hnae_set_field(egress_port, HCLGE_MAC_EPORT_PFID_M, HCLGE_MAC_EPORT_PFID_S, 0); - req.egress_port = cpu_to_le16(req.egress_port); + + req.egress_port = cpu_to_le16(egress_port); hclge_prepare_mac_addr(&req, addr); @@ -3667,7 +3697,7 @@ int hclge_set_vf_vlan_common(struct hclge_dev *hdev, int vfid, req0 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[0].data; req1 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[1].data; - req0->vlan_id = vlan; + req0->vlan_id = cpu_to_le16(vlan); req0->vlan_cfg = is_kill; if (vf_byte_off < HCLGE_MAX_VF_BYTES) diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c index 359ee670d1e1..1ae6eae82eb3 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c @@ -283,6 +283,7 @@ static int hclge_tm_pg_shapping_cfg(struct hclge_dev *hdev, struct hclge_pg_shapping_cmd *shap_cfg_cmd; enum hclge_opcode_type opcode; struct hclge_desc desc; + u32 shapping_para = 0; opcode = bucket ? HCLGE_OPC_TM_PG_P_SHAPPING : HCLGE_OPC_TM_PG_C_SHAPPING; @@ -292,11 +293,13 @@ static int hclge_tm_pg_shapping_cfg(struct hclge_dev *hdev, shap_cfg_cmd->pg_id = pg_id; - hclge_tm_set_field(shap_cfg_cmd->pg_shapping_para, IR_B, ir_b); - hclge_tm_set_field(shap_cfg_cmd->pg_shapping_para, IR_U, ir_u); - hclge_tm_set_field(shap_cfg_cmd->pg_shapping_para, IR_S, ir_s); - hclge_tm_set_field(shap_cfg_cmd->pg_shapping_para, BS_B, bs_b); - hclge_tm_set_field(shap_cfg_cmd->pg_shapping_para, BS_S, bs_s); + hclge_tm_set_field(shapping_para, IR_B, ir_b); + hclge_tm_set_field(shapping_para, IR_U, ir_u); + hclge_tm_set_field(shapping_para, IR_S, ir_s); + hclge_tm_set_field(shapping_para, BS_B, bs_b); + hclge_tm_set_field(shapping_para, BS_S, bs_s); + + shap_cfg_cmd->pg_shapping_para = cpu_to_le32(shapping_para); return hclge_cmd_send(&hdev->hw, &desc, 1); } @@ -337,6 +340,7 @@ static int hclge_tm_pri_shapping_cfg(struct hclge_dev *hdev, struct hclge_pri_shapping_cmd *shap_cfg_cmd; enum hclge_opcode_type opcode; struct hclge_desc desc; + u32 shapping_para = 0; opcode = bucket ? HCLGE_OPC_TM_PRI_P_SHAPPING : HCLGE_OPC_TM_PRI_C_SHAPPING; @@ -347,11 +351,13 @@ static int hclge_tm_pri_shapping_cfg(struct hclge_dev *hdev, shap_cfg_cmd->pri_id = pri_id; - hclge_tm_set_field(shap_cfg_cmd->pri_shapping_para, IR_B, ir_b); - hclge_tm_set_field(shap_cfg_cmd->pri_shapping_para, IR_U, ir_u); - hclge_tm_set_field(shap_cfg_cmd->pri_shapping_para, IR_S, ir_s); - hclge_tm_set_field(shap_cfg_cmd->pri_shapping_para, BS_B, bs_b); - hclge_tm_set_field(shap_cfg_cmd->pri_shapping_para, BS_S, bs_s); + hclge_tm_set_field(shapping_para, IR_B, ir_b); + hclge_tm_set_field(shapping_para, IR_U, ir_u); + hclge_tm_set_field(shapping_para, IR_S, ir_s); + hclge_tm_set_field(shapping_para, BS_B, bs_b); + hclge_tm_set_field(shapping_para, BS_S, bs_s); + + shap_cfg_cmd->pri_shapping_para = cpu_to_le32(shapping_para); return hclge_cmd_send(&hdev->hw, &desc, 1); } diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c index c2a0537c649f..aa73855366b3 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c @@ -761,7 +761,7 @@ static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv, if (type == DESC_TYPE_SKB) { skb = (struct sk_buff *)priv; - paylen = cpu_to_le16(skb->len); + paylen = skb->len; if (skb->ip_summed == CHECKSUM_PARTIAL) { skb_reset_mac_len(skb); @@ -795,7 +795,7 @@ static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv, cpu_to_le32(ol_type_vlan_len_msec); desc->tx.type_cs_vlan_tso_len = cpu_to_le32(type_cs_vlan_tso); - desc->tx.paylen = cpu_to_le16(paylen); + desc->tx.paylen = cpu_to_le32(paylen); desc->tx.mss = cpu_to_le16(mss); } -- cgit v1.2.3 From 1db9b1bf82b98cb265ed227dec3732aafae0eb0c Mon Sep 17 00:00:00 2001 From: Yunsheng Lin Date: Mon, 9 Oct 2017 15:44:01 +0800 Subject: net: hns3: Cleanup for non-static function in hns3 driver This patch fixes the following warning from sparse: warning: symbol 'hns3_set_multicast_list' was not declared. Should it be static. hns3_set_multicast_list turns out to be not used, so delete it. Signed-off-by: Yunsheng Lin Signed-off-by: David S. Miller --- .../net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c | 4 ++-- .../net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c | 14 +++++++------- .../net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c | 20 ++++---------------- .../ethernet/hisilicon/hns3/hns3pf/hns3_ethtool.c | 4 ++-- 4 files changed, 15 insertions(+), 27 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c index 05985d81dda0..8ecd80744767 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c @@ -276,8 +276,8 @@ int hclge_cmd_send(struct hclge_hw *hw, struct hclge_desc *desc, int num) return retval; } -enum hclge_cmd_status hclge_cmd_query_firmware_version(struct hclge_hw *hw, - u32 *version) +static enum hclge_cmd_status hclge_cmd_query_firmware_version( + struct hclge_hw *hw, u32 *version) { struct hclge_query_version_cmd *resp; struct hclge_desc desc; diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c index 6115c2f730ce..c91c779aeeed 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c @@ -1550,8 +1550,8 @@ static int hclge_tx_buffer_calc(struct hclge_dev *hdev, * @buf_alloc: pointer to buffer calculation data * @return: 0: calculate sucessful, negative: fail */ -int hclge_rx_buffer_calc(struct hclge_dev *hdev, - struct hclge_pkt_buf_alloc *buf_alloc) +static int hclge_rx_buffer_calc(struct hclge_dev *hdev, + struct hclge_pkt_buf_alloc *buf_alloc) { u32 rx_all = hdev->pkt_buf_size; int no_pfc_priv_num, pfc_priv_num; @@ -2828,9 +2828,9 @@ int hclge_map_vport_ring_to_vector(struct hclge_vport *vport, int vector_id, return 0; } -int hclge_map_handle_ring_to_vector(struct hnae3_handle *handle, - int vector, - struct hnae3_ring_chain_node *ring_chain) +static int hclge_map_handle_ring_to_vector( + struct hnae3_handle *handle, int vector, + struct hnae3_ring_chain_node *ring_chain) { struct hclge_vport *vport = hclge_get_vport(handle); struct hclge_dev *hdev = vport->back; @@ -3206,8 +3206,8 @@ static void hclge_prepare_mac_addr(struct hclge_mac_vlan_tbl_entry_cmd *new_req, new_req->mac_addr_lo16 = cpu_to_le16(low_val & 0xffff); } -u16 hclge_get_mac_addr_to_mta_index(struct hclge_vport *vport, - const u8 *addr) +static u16 hclge_get_mac_addr_to_mta_index(struct hclge_vport *vport, + const u8 *addr) { u16 high_val = addr[1] | (addr[0] << 8); struct hclge_dev *hdev = vport->back; diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c index aa73855366b3..26bbc91add65 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c @@ -24,7 +24,7 @@ #include "hnae3.h" #include "hns3_enet.h" -const char hns3_driver_name[] = "hns3"; +static const char hns3_driver_name[] = "hns3"; const char hns3_driver_version[] = VERMAGIC_STRING; static const char hns3_driver_string[] = "Hisilicon Ethernet Network Driver for Hip08 Family"; @@ -304,18 +304,6 @@ static int hns3_nic_net_stop(struct net_device *netdev) return 0; } -void hns3_set_multicast_list(struct net_device *netdev) -{ - struct hnae3_handle *h = hns3_get_handle(netdev); - struct netdev_hw_addr *ha = NULL; - - if (h->ae_algo->ops->set_mc_addr) { - netdev_for_each_mc_addr(ha, netdev) - if (h->ae_algo->ops->set_mc_addr(h, ha->addr)) - netdev_err(netdev, "set multicast fail\n"); - } -} - static int hns3_nic_uc_sync(struct net_device *netdev, const unsigned char *addr) { @@ -360,7 +348,7 @@ static int hns3_nic_mc_unsync(struct net_device *netdev, return 0; } -void hns3_nic_set_rx_mode(struct net_device *netdev) +static void hns3_nic_set_rx_mode(struct net_device *netdev) { struct hnae3_handle *h = hns3_get_handle(netdev); @@ -2596,7 +2584,7 @@ static void hns3_fini_ring(struct hns3_enet_ring *ring) ring->next_to_use = 0; } -int hns3_buf_size2type(u32 buf_size) +static int hns3_buf_size2type(u32 buf_size) { int bd_size_type; @@ -2908,7 +2896,7 @@ err_out: return ret; } -const struct hnae3_client_ops client_ops = { +static const struct hnae3_client_ops client_ops = { .init_instance = hns3_client_init, .uninit_instance = hns3_client_uninit, .link_status_change = hns3_link_status_change, diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_ethtool.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_ethtool.c index a892a157f346..060bacebf86a 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_ethtool.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_ethtool.c @@ -215,8 +215,8 @@ static u64 *hns3_get_stats_tqps(struct hnae3_handle *handle, u64 *data) * @stats: statistics info. * @data: statistics data. */ -void hns3_get_stats(struct net_device *netdev, struct ethtool_stats *stats, - u64 *data) +static void hns3_get_stats(struct net_device *netdev, + struct ethtool_stats *stats, u64 *data) { struct hnae3_handle *h = hns3_get_handle(netdev); u64 *p = data; -- cgit v1.2.3 From 3a4b0129bf33caca5743891906393f17a2224d44 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Sun, 8 Oct 2017 21:04:03 -0700 Subject: nfp: output control messages to trace_devlink_hwmsg() Use standard devlink trace point to allow tracing of control messages. Signed-off-by: Jakub Kicinski Reviewed-by: Simon Horman Signed-off-by: David S. Miller --- drivers/net/ethernet/netronome/nfp/nfp_app.h | 8 ++++++++ 1 file changed, 8 insertions(+) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/netronome/nfp/nfp_app.h b/drivers/net/ethernet/netronome/nfp/nfp_app.h index af640b5c2108..857bb33020ba 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_app.h +++ b/drivers/net/ethernet/netronome/nfp/nfp_app.h @@ -36,6 +36,8 @@ #include +#include + #include "nfp_net_repr.h" struct bpf_prog; @@ -271,11 +273,17 @@ static inline int nfp_app_xdp_offload(struct nfp_app *app, struct nfp_net *nn, static inline bool nfp_app_ctrl_tx(struct nfp_app *app, struct sk_buff *skb) { + trace_devlink_hwmsg(priv_to_devlink(app->pf), false, 0, + skb->data, skb->len); + return nfp_ctrl_tx(app->ctrl, skb); } static inline void nfp_app_ctrl_rx(struct nfp_app *app, struct sk_buff *skb) { + trace_devlink_hwmsg(priv_to_devlink(app->pf), true, 0, + skb->data, skb->len); + app->type->ctrl_msg_rx(app, skb); } -- cgit v1.2.3 From a52b35c39ec6f33592df634ef2d1afae23401fdd Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Sun, 8 Oct 2017 21:04:04 -0700 Subject: nfp: bpf: lift the single-port limitation Limiting the eBPF offload to a single port was a workaround required for the PoC application FW which has not been released externally. It's not necessary any more. Signed-off-by: Jakub Kicinski Reviewed-by: Simon Horman Signed-off-by: David S. Miller --- drivers/net/ethernet/netronome/nfp/bpf/main.c | 8 -------- 1 file changed, 8 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/netronome/nfp/bpf/main.c b/drivers/net/ethernet/netronome/nfp/bpf/main.c index be2cf10a2cd7..074726980994 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/main.c +++ b/drivers/net/ethernet/netronome/nfp/bpf/main.c @@ -89,14 +89,6 @@ nfp_bpf_vnic_alloc(struct nfp_app *app, struct nfp_net *nn, unsigned int id) struct nfp_net_bpf_priv *priv; int ret; - /* Limit to single port, otherwise it's just a NIC */ - if (id > 0) { - nfp_warn(app->cpp, - "BPF NIC doesn't support more than one port right now\n"); - nn->port = nfp_port_alloc(app, NFP_PORT_INVALID, nn->dp.netdev); - return PTR_ERR_OR_ZERO(nn->port); - } - priv = kmalloc(sizeof(*priv), GFP_KERNEL); if (!priv) return -ENOMEM; -- cgit v1.2.3 From b3f868df3c8904e964d7b257b47d7d90d93375e0 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Sun, 8 Oct 2017 21:04:05 -0700 Subject: nfp: bpf: use the power of sparse to check we encode registers right Define a new __bitwise type for software representation of registers. This will allow us to catch incorrect parameter types using sparse. Accessors we define also allow us to return correct enum type and therefore ensure all switches handle all register types. Signed-off-by: Jakub Kicinski Reviewed-by: Simon Horman Signed-off-by: David S. Miller --- drivers/net/ethernet/netronome/nfp/bpf/jit.c | 99 ++++++++++++++------------- drivers/net/ethernet/netronome/nfp/bpf/main.h | 24 +------ drivers/net/ethernet/netronome/nfp/nfp_asm.h | 45 ++++++++++++ 3 files changed, 99 insertions(+), 69 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/netronome/nfp/bpf/jit.c b/drivers/net/ethernet/netronome/nfp/bpf/jit.c index 239dfbe8a0a1..7e8cdfb39607 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/jit.c +++ b/drivers/net/ethernet/netronome/nfp/bpf/jit.c @@ -128,11 +128,11 @@ struct nfp_insn_re_regs { bool i8; }; -static u16 nfp_swreg_to_unreg(u32 swreg, bool is_dst) +static u16 nfp_swreg_to_unreg(swreg reg, bool is_dst) { - u16 val = FIELD_GET(NN_REG_VAL, swreg); + u16 val = swreg_value(reg); - switch (FIELD_GET(NN_REG_TYPE, swreg)) { + switch (swreg_type(reg)) { case NN_REG_GPR_A: case NN_REG_GPR_B: case NN_REG_GPR_BOTH: @@ -149,33 +149,34 @@ static u16 nfp_swreg_to_unreg(u32 swreg, bool is_dst) return UR_REG_IMM_encode(val); case NN_REG_NONE: return is_dst ? UR_REG_NO_DST : REG_NONE; - default: - pr_err("unrecognized reg encoding %08x\n", swreg); - return 0; } + + pr_err("unrecognized reg encoding %08x\n", reg); + return 0; } static int -swreg_to_unrestricted(u32 dst, u32 lreg, u32 rreg, struct nfp_insn_ur_regs *reg) +swreg_to_unrestricted(swreg dst, swreg lreg, swreg rreg, + struct nfp_insn_ur_regs *reg) { memset(reg, 0, sizeof(*reg)); /* Decode destination */ - if (FIELD_GET(NN_REG_TYPE, dst) == NN_REG_IMM) + if (swreg_type(dst) == NN_REG_IMM) return -EFAULT; - if (FIELD_GET(NN_REG_TYPE, dst) == NN_REG_GPR_B) + if (swreg_type(dst) == NN_REG_GPR_B) reg->dst_ab = ALU_DST_B; - if (FIELD_GET(NN_REG_TYPE, dst) == NN_REG_GPR_BOTH) + if (swreg_type(dst) == NN_REG_GPR_BOTH) reg->wr_both = true; reg->dst = nfp_swreg_to_unreg(dst, true); /* Decode source operands */ - if (FIELD_GET(NN_REG_TYPE, lreg) == FIELD_GET(NN_REG_TYPE, rreg)) + if (swreg_type(lreg) == swreg_type(rreg)) return -EFAULT; - if (FIELD_GET(NN_REG_TYPE, lreg) == NN_REG_GPR_B || - FIELD_GET(NN_REG_TYPE, rreg) == NN_REG_GPR_A) { + if (swreg_type(lreg) == NN_REG_GPR_B || + swreg_type(rreg) == NN_REG_GPR_A) { reg->areg = nfp_swreg_to_unreg(rreg, false); reg->breg = nfp_swreg_to_unreg(lreg, false); reg->swap = true; @@ -187,11 +188,11 @@ swreg_to_unrestricted(u32 dst, u32 lreg, u32 rreg, struct nfp_insn_ur_regs *reg) return 0; } -static u16 nfp_swreg_to_rereg(u32 swreg, bool is_dst, bool has_imm8, bool *i8) +static u16 nfp_swreg_to_rereg(swreg reg, bool is_dst, bool has_imm8, bool *i8) { - u16 val = FIELD_GET(NN_REG_VAL, swreg); + u16 val = swreg_value(reg); - switch (FIELD_GET(NN_REG_TYPE, swreg)) { + switch (swreg_type(reg)) { case NN_REG_GPR_A: case NN_REG_GPR_B: case NN_REG_GPR_BOTH: @@ -207,34 +208,37 @@ static u16 nfp_swreg_to_rereg(u32 swreg, bool is_dst, bool has_imm8, bool *i8) return RE_REG_IMM_encode(val & 0x7f); case NN_REG_NONE: return is_dst ? RE_REG_NO_DST : REG_NONE; - default: - pr_err("unrecognized reg encoding\n"); + case NN_REG_NNR: + pr_err("NNRs used with restricted encoding\n"); return 0; } + + pr_err("unrecognized reg encoding\n"); + return 0; } static int -swreg_to_restricted(u32 dst, u32 lreg, u32 rreg, struct nfp_insn_re_regs *reg, - bool has_imm8) +swreg_to_restricted(swreg dst, swreg lreg, swreg rreg, + struct nfp_insn_re_regs *reg, bool has_imm8) { memset(reg, 0, sizeof(*reg)); /* Decode destination */ - if (FIELD_GET(NN_REG_TYPE, dst) == NN_REG_IMM) + if (swreg_type(dst) == NN_REG_IMM) return -EFAULT; - if (FIELD_GET(NN_REG_TYPE, dst) == NN_REG_GPR_B) + if (swreg_type(dst) == NN_REG_GPR_B) reg->dst_ab = ALU_DST_B; - if (FIELD_GET(NN_REG_TYPE, dst) == NN_REG_GPR_BOTH) + if (swreg_type(dst) == NN_REG_GPR_BOTH) reg->wr_both = true; reg->dst = nfp_swreg_to_rereg(dst, true, false, NULL); /* Decode source operands */ - if (FIELD_GET(NN_REG_TYPE, lreg) == FIELD_GET(NN_REG_TYPE, rreg)) + if (swreg_type(lreg) == swreg_type(rreg)) return -EFAULT; - if (FIELD_GET(NN_REG_TYPE, lreg) == NN_REG_GPR_B || - FIELD_GET(NN_REG_TYPE, rreg) == NN_REG_GPR_A) { + if (swreg_type(lreg) == NN_REG_GPR_B || + swreg_type(rreg) == NN_REG_GPR_A) { reg->areg = nfp_swreg_to_rereg(rreg, false, has_imm8, ®->i8); reg->breg = nfp_swreg_to_rereg(lreg, false, has_imm8, ®->i8); reg->swap = true; @@ -281,7 +285,7 @@ __emit_cmd(struct nfp_prog *nfp_prog, enum cmd_tgt_map op, static void emit_cmd(struct nfp_prog *nfp_prog, enum cmd_tgt_map op, - u8 mode, u8 xfer, u32 lreg, u32 rreg, u8 size, bool sync) + u8 mode, u8 xfer, swreg lreg, swreg rreg, u8 size, bool sync) { struct nfp_insn_re_regs reg; int err; @@ -364,7 +368,7 @@ __emit_br_byte(struct nfp_prog *nfp_prog, u8 areg, u8 breg, bool imm8, static void emit_br_byte_neq(struct nfp_prog *nfp_prog, - u32 dst, u8 imm, u8 byte, u16 addr, u8 defer) + swreg dst, u8 imm, u8 byte, u16 addr, u8 defer) { struct nfp_insn_re_regs reg; int err; @@ -399,13 +403,13 @@ __emit_immed(struct nfp_prog *nfp_prog, u16 areg, u16 breg, u16 imm_hi, } static void -emit_immed(struct nfp_prog *nfp_prog, u32 dst, u16 imm, +emit_immed(struct nfp_prog *nfp_prog, swreg dst, u16 imm, enum immed_width width, bool invert, enum immed_shift shift) { struct nfp_insn_ur_regs reg; int err; - if (FIELD_GET(NN_REG_TYPE, dst) == NN_REG_IMM) { + if (swreg_type(dst) == NN_REG_IMM) { nfp_prog->error = -EFAULT; return; } @@ -451,8 +455,8 @@ __emit_shf(struct nfp_prog *nfp_prog, u16 dst, enum alu_dst_ab dst_ab, } static void -emit_shf(struct nfp_prog *nfp_prog, u32 dst, u32 lreg, enum shf_op op, u32 rreg, - enum shf_sc sc, u8 shift) +emit_shf(struct nfp_prog *nfp_prog, swreg dst, + swreg lreg, enum shf_op op, swreg rreg, enum shf_sc sc, u8 shift) { struct nfp_insn_re_regs reg; int err; @@ -486,7 +490,8 @@ __emit_alu(struct nfp_prog *nfp_prog, u16 dst, enum alu_dst_ab dst_ab, } static void -emit_alu(struct nfp_prog *nfp_prog, u32 dst, u32 lreg, enum alu_op op, u32 rreg) +emit_alu(struct nfp_prog *nfp_prog, swreg dst, + swreg lreg, enum alu_op op, swreg rreg) { struct nfp_insn_ur_regs reg; int err; @@ -524,7 +529,7 @@ __emit_ld_field(struct nfp_prog *nfp_prog, enum shf_sc sc, static void emit_ld_field_any(struct nfp_prog *nfp_prog, enum shf_sc sc, u8 shift, - u32 dst, u8 bmask, u32 src, bool zero) + swreg dst, u8 bmask, swreg src, bool zero) { struct nfp_insn_re_regs reg; int err; @@ -540,7 +545,7 @@ emit_ld_field_any(struct nfp_prog *nfp_prog, enum shf_sc sc, u8 shift, } static void -emit_ld_field(struct nfp_prog *nfp_prog, u32 dst, u8 bmask, u32 src, +emit_ld_field(struct nfp_prog *nfp_prog, swreg dst, u8 bmask, swreg src, enum shf_sc sc, u8 shift) { emit_ld_field_any(nfp_prog, sc, shift, dst, bmask, src, false); @@ -565,7 +570,7 @@ static bool pack_immed(u32 imm, u16 *val, enum immed_shift *shift) return true; } -static void wrp_immed(struct nfp_prog *nfp_prog, u32 dst, u32 imm) +static void wrp_immed(struct nfp_prog *nfp_prog, swreg dst, u32 imm) { enum immed_shift shift; u16 val; @@ -586,7 +591,7 @@ static void wrp_immed(struct nfp_prog *nfp_prog, u32 dst, u32 imm) * If the @imm is small enough encode it directly in operand and return * otherwise load @imm to a spare register and return its encoding. */ -static u32 ur_load_imm_any(struct nfp_prog *nfp_prog, u32 imm, u32 tmp_reg) +static swreg ur_load_imm_any(struct nfp_prog *nfp_prog, u32 imm, swreg tmp_reg) { if (FIELD_FIT(UR_REG_IMM_MAX, imm)) return reg_imm(imm); @@ -599,7 +604,7 @@ static u32 ur_load_imm_any(struct nfp_prog *nfp_prog, u32 imm, u32 tmp_reg) * If the @imm is small enough encode it directly in operand and return * otherwise load @imm to a spare register and return its encoding. */ -static u32 re_load_imm_any(struct nfp_prog *nfp_prog, u32 imm, u32 tmp_reg) +static swreg re_load_imm_any(struct nfp_prog *nfp_prog, u32 imm, swreg tmp_reg) { if (FIELD_FIT(RE_REG_IMM_MAX, imm)) return reg_imm(imm); @@ -629,7 +634,7 @@ construct_data_ind_ld(struct nfp_prog *nfp_prog, u16 offset, { unsigned int i; u16 shift, sz; - u32 tmp_reg; + swreg tmp_reg; /* We load the value from the address indicated in @offset and then * shift out the data we don't need. Note: this is big endian! @@ -697,7 +702,7 @@ static int wrp_set_mark(struct nfp_prog *nfp_prog, u8 src) static void wrp_alu_imm(struct nfp_prog *nfp_prog, u8 dst, enum alu_op alu_op, u32 imm) { - u32 tmp_reg; + swreg tmp_reg; if (alu_op == ALU_OP_AND) { if (!imm) @@ -815,7 +820,7 @@ wrp_cmp_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, const struct bpf_insn *insn = &meta->insn; u64 imm = insn->imm; /* sign extend */ u8 reg = insn->dst_reg * 2; - u32 tmp_reg; + swreg tmp_reg; if (insn->off < 0) /* TODO */ return -EOPNOTSUPP; @@ -1139,7 +1144,7 @@ static int mem_ldx4_skb(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) static int mem_ldx4_xdp(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) { - u32 dst = reg_both(meta->insn.dst_reg * 2); + swreg dst = reg_both(meta->insn.dst_reg * 2); if (meta->insn.off != offsetof(struct xdp_md, data) && meta->insn.off != offsetof(struct xdp_md, data_end)) @@ -1202,8 +1207,10 @@ static int jeq_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) { const struct bpf_insn *insn = &meta->insn; u64 imm = insn->imm; /* sign extend */ - u32 or1 = reg_a(insn->dst_reg * 2), or2 = reg_b(insn->dst_reg * 2 + 1); - u32 tmp_reg; + swreg or1, or2, tmp_reg; + + or1 = reg_a(insn->dst_reg * 2); + or2 = reg_b(insn->dst_reg * 2 + 1); if (insn->off < 0) /* TODO */ return -EOPNOTSUPP; @@ -1252,7 +1259,7 @@ static int jset_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) { const struct bpf_insn *insn = &meta->insn; u64 imm = insn->imm; /* sign extend */ - u32 tmp_reg; + swreg tmp_reg; if (insn->off < 0) /* TODO */ return -EOPNOTSUPP; @@ -1283,7 +1290,7 @@ static int jne_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) { const struct bpf_insn *insn = &meta->insn; u64 imm = insn->imm; /* sign extend */ - u32 tmp_reg; + swreg tmp_reg; if (insn->off < 0) /* TODO */ return -EOPNOTSUPP; diff --git a/drivers/net/ethernet/netronome/nfp/bpf/main.h b/drivers/net/ethernet/netronome/nfp/bpf/main.h index 4051e943f363..ccc3dbea25f6 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/main.h +++ b/drivers/net/ethernet/netronome/nfp/bpf/main.h @@ -39,6 +39,7 @@ #include #include +#include "../nfp_asm.h" #include "../nfp_net.h" /* For branch fixup logic use up-most byte of branch instruction as scratch @@ -65,29 +66,6 @@ enum nfp_bpf_action_type { NN_ACT_XDP, }; -/* Software register representation, hardware encoding in asm.h */ -#define NN_REG_TYPE GENMASK(31, 24) -#define NN_REG_VAL GENMASK(7, 0) - -enum nfp_bpf_reg_type { - NN_REG_GPR_A = BIT(0), - NN_REG_GPR_B = BIT(1), - NN_REG_NNR = BIT(2), - NN_REG_XFER = BIT(3), - NN_REG_IMM = BIT(4), - NN_REG_NONE = BIT(5), -}; - -#define NN_REG_GPR_BOTH (NN_REG_GPR_A | NN_REG_GPR_B) - -#define reg_both(x) ((x) | FIELD_PREP(NN_REG_TYPE, NN_REG_GPR_BOTH)) -#define reg_a(x) ((x) | FIELD_PREP(NN_REG_TYPE, NN_REG_GPR_A)) -#define reg_b(x) ((x) | FIELD_PREP(NN_REG_TYPE, NN_REG_GPR_B)) -#define reg_nnr(x) ((x) | FIELD_PREP(NN_REG_TYPE, NN_REG_NNR)) -#define reg_xfer(x) ((x) | FIELD_PREP(NN_REG_TYPE, NN_REG_XFER)) -#define reg_imm(x) ((x) | FIELD_PREP(NN_REG_TYPE, NN_REG_IMM)) -#define reg_none() (FIELD_PREP(NN_REG_TYPE, NN_REG_NONE)) - #define pkt_reg(np) reg_a((np)->regs_per_thread - STATIC_REG_PKT) #define imm_a(np) reg_a((np)->regs_per_thread - STATIC_REG_IMM) #define imm_b(np) reg_b((np)->regs_per_thread - STATIC_REG_IMM) diff --git a/drivers/net/ethernet/netronome/nfp/nfp_asm.h b/drivers/net/ethernet/netronome/nfp/nfp_asm.h index d2b535739d2b..9b9d5d18ee20 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_asm.h +++ b/drivers/net/ethernet/netronome/nfp/nfp_asm.h @@ -34,6 +34,7 @@ #ifndef __NFP_ASM_H__ #define __NFP_ASM_H__ 1 +#include #include #define REG_NONE 0 @@ -230,4 +231,48 @@ enum lcsr_wr_src { #define OP_CARB_BASE 0x0e000000000ULL #define OP_CARB_OR 0x00000010000ULL +/* Software register representation, independent of operand type */ +#define NN_REG_TYPE GENMASK(31, 24) +#define NN_REG_VAL GENMASK(7, 0) + +enum nfp_bpf_reg_type { + NN_REG_GPR_A = BIT(0), + NN_REG_GPR_B = BIT(1), + NN_REG_GPR_BOTH = NN_REG_GPR_A | NN_REG_GPR_B, + NN_REG_NNR = BIT(2), + NN_REG_XFER = BIT(3), + NN_REG_IMM = BIT(4), + NN_REG_NONE = BIT(5), +}; + +#define reg_both(x) __enc_swreg((x), NN_REG_GPR_BOTH) +#define reg_a(x) __enc_swreg((x), NN_REG_GPR_A) +#define reg_b(x) __enc_swreg((x), NN_REG_GPR_B) +#define reg_nnr(x) __enc_swreg((x), NN_REG_NNR) +#define reg_xfer(x) __enc_swreg((x), NN_REG_XFER) +#define reg_imm(x) __enc_swreg((x), NN_REG_IMM) +#define reg_none() __enc_swreg(0, NN_REG_NONE) + +typedef __u32 __bitwise swreg; + +static inline swreg __enc_swreg(u16 id, u8 type) +{ + return (__force swreg)(id | FIELD_PREP(NN_REG_TYPE, type)); +} + +static inline u32 swreg_raw(swreg reg) +{ + return (__force u32)reg; +} + +static inline enum nfp_bpf_reg_type swreg_type(swreg reg) +{ + return FIELD_GET(NN_REG_TYPE, swreg_raw(reg)); +} + +static inline u16 swreg_value(swreg reg) +{ + return FIELD_GET(NN_REG_VAL, swreg_raw(reg)); +} + #endif -- cgit v1.2.3 From 2a15bb1aba2bfca0a69cdbb113def57afd5666ab Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Sun, 8 Oct 2017 21:04:06 -0700 Subject: nfp: bpf: move software reg helpers and cmd table out of translator Move the software reg helpers and some static data to nfp_asm.c. They are related to the previous patch, but move is done in a separate commit for ease of review. Signed-off-by: Jakub Kicinski Reviewed-by: Simon Horman Signed-off-by: David S. Miller --- drivers/net/ethernet/netronome/nfp/Makefile | 1 + drivers/net/ethernet/netronome/nfp/bpf/jit.c | 147 ----------------------- drivers/net/ethernet/netronome/nfp/nfp_asm.c | 167 +++++++++++++++++++++++++++ drivers/net/ethernet/netronome/nfp/nfp_asm.h | 24 ++++ 4 files changed, 192 insertions(+), 147 deletions(-) create mode 100644 drivers/net/ethernet/netronome/nfp/nfp_asm.c (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/netronome/nfp/Makefile b/drivers/net/ethernet/netronome/nfp/Makefile index becaacf1554d..bd3b2bd408bc 100644 --- a/drivers/net/ethernet/netronome/nfp/Makefile +++ b/drivers/net/ethernet/netronome/nfp/Makefile @@ -14,6 +14,7 @@ nfp-objs := \ nfpcore/nfp_resource.o \ nfpcore/nfp_rtsym.o \ nfpcore/nfp_target.o \ + nfp_asm.o \ nfp_app.o \ nfp_app_nic.o \ nfp_devlink.o \ diff --git a/drivers/net/ethernet/netronome/nfp/bpf/jit.c b/drivers/net/ethernet/netronome/nfp/bpf/jit.c index 7e8cdfb39607..7bcff007075c 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/jit.c +++ b/drivers/net/ethernet/netronome/nfp/bpf/jit.c @@ -110,154 +110,7 @@ nfp_prog_offset_to_index(struct nfp_prog *nfp_prog, unsigned int offset) return offset - nfp_prog->start_off; } -/* --- SW reg --- */ -struct nfp_insn_ur_regs { - enum alu_dst_ab dst_ab; - u16 dst; - u16 areg, breg; - bool swap; - bool wr_both; -}; - -struct nfp_insn_re_regs { - enum alu_dst_ab dst_ab; - u8 dst; - u8 areg, breg; - bool swap; - bool wr_both; - bool i8; -}; - -static u16 nfp_swreg_to_unreg(swreg reg, bool is_dst) -{ - u16 val = swreg_value(reg); - - switch (swreg_type(reg)) { - case NN_REG_GPR_A: - case NN_REG_GPR_B: - case NN_REG_GPR_BOTH: - return val; - case NN_REG_NNR: - return UR_REG_NN | val; - case NN_REG_XFER: - return UR_REG_XFR | val; - case NN_REG_IMM: - if (val & ~0xff) { - pr_err("immediate too large\n"); - return 0; - } - return UR_REG_IMM_encode(val); - case NN_REG_NONE: - return is_dst ? UR_REG_NO_DST : REG_NONE; - } - - pr_err("unrecognized reg encoding %08x\n", reg); - return 0; -} - -static int -swreg_to_unrestricted(swreg dst, swreg lreg, swreg rreg, - struct nfp_insn_ur_regs *reg) -{ - memset(reg, 0, sizeof(*reg)); - - /* Decode destination */ - if (swreg_type(dst) == NN_REG_IMM) - return -EFAULT; - - if (swreg_type(dst) == NN_REG_GPR_B) - reg->dst_ab = ALU_DST_B; - if (swreg_type(dst) == NN_REG_GPR_BOTH) - reg->wr_both = true; - reg->dst = nfp_swreg_to_unreg(dst, true); - - /* Decode source operands */ - if (swreg_type(lreg) == swreg_type(rreg)) - return -EFAULT; - - if (swreg_type(lreg) == NN_REG_GPR_B || - swreg_type(rreg) == NN_REG_GPR_A) { - reg->areg = nfp_swreg_to_unreg(rreg, false); - reg->breg = nfp_swreg_to_unreg(lreg, false); - reg->swap = true; - } else { - reg->areg = nfp_swreg_to_unreg(lreg, false); - reg->breg = nfp_swreg_to_unreg(rreg, false); - } - - return 0; -} - -static u16 nfp_swreg_to_rereg(swreg reg, bool is_dst, bool has_imm8, bool *i8) -{ - u16 val = swreg_value(reg); - - switch (swreg_type(reg)) { - case NN_REG_GPR_A: - case NN_REG_GPR_B: - case NN_REG_GPR_BOTH: - return val; - case NN_REG_XFER: - return RE_REG_XFR | val; - case NN_REG_IMM: - if (val & ~(0x7f | has_imm8 << 7)) { - pr_err("immediate too large\n"); - return 0; - } - *i8 = val & 0x80; - return RE_REG_IMM_encode(val & 0x7f); - case NN_REG_NONE: - return is_dst ? RE_REG_NO_DST : REG_NONE; - case NN_REG_NNR: - pr_err("NNRs used with restricted encoding\n"); - return 0; - } - - pr_err("unrecognized reg encoding\n"); - return 0; -} - -static int -swreg_to_restricted(swreg dst, swreg lreg, swreg rreg, - struct nfp_insn_re_regs *reg, bool has_imm8) -{ - memset(reg, 0, sizeof(*reg)); - - /* Decode destination */ - if (swreg_type(dst) == NN_REG_IMM) - return -EFAULT; - - if (swreg_type(dst) == NN_REG_GPR_B) - reg->dst_ab = ALU_DST_B; - if (swreg_type(dst) == NN_REG_GPR_BOTH) - reg->wr_both = true; - reg->dst = nfp_swreg_to_rereg(dst, true, false, NULL); - - /* Decode source operands */ - if (swreg_type(lreg) == swreg_type(rreg)) - return -EFAULT; - - if (swreg_type(lreg) == NN_REG_GPR_B || - swreg_type(rreg) == NN_REG_GPR_A) { - reg->areg = nfp_swreg_to_rereg(rreg, false, has_imm8, ®->i8); - reg->breg = nfp_swreg_to_rereg(lreg, false, has_imm8, ®->i8); - reg->swap = true; - } else { - reg->areg = nfp_swreg_to_rereg(lreg, false, has_imm8, ®->i8); - reg->breg = nfp_swreg_to_rereg(rreg, false, has_imm8, ®->i8); - } - - return 0; -} - /* --- Emitters --- */ -static const struct cmd_tgt_act cmd_tgt_act[__CMD_TGT_MAP_SIZE] = { - [CMD_TGT_WRITE8] = { 0x00, 0x42 }, - [CMD_TGT_READ8] = { 0x01, 0x43 }, - [CMD_TGT_READ_LE] = { 0x01, 0x40 }, - [CMD_TGT_READ_SWAP_LE] = { 0x03, 0x40 }, -}; - static void __emit_cmd(struct nfp_prog *nfp_prog, enum cmd_tgt_map op, u8 mode, u8 xfer, u8 areg, u8 breg, u8 size, bool sync) diff --git a/drivers/net/ethernet/netronome/nfp/nfp_asm.c b/drivers/net/ethernet/netronome/nfp/nfp_asm.c new file mode 100644 index 000000000000..4c9201bf9331 --- /dev/null +++ b/drivers/net/ethernet/netronome/nfp/nfp_asm.c @@ -0,0 +1,167 @@ +/* + * Copyright (C) 2016-2017 Netronome Systems, Inc. + * + * This software is dual licensed under the GNU General License Version 2, + * June 1991 as shown in the file COPYING in the top-level directory of this + * source tree or the BSD 2-Clause License provided below. You have the + * option to license this software under the complete terms of either license. + * + * The BSD 2-Clause License: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * 1. Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * 2. Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include +#include +#include +#include +#include + +#include "nfp_asm.h" + +const struct cmd_tgt_act cmd_tgt_act[__CMD_TGT_MAP_SIZE] = { + [CMD_TGT_WRITE8] = { 0x00, 0x42 }, + [CMD_TGT_READ8] = { 0x01, 0x43 }, + [CMD_TGT_READ_LE] = { 0x01, 0x40 }, + [CMD_TGT_READ_SWAP_LE] = { 0x03, 0x40 }, +}; + +static u16 nfp_swreg_to_unreg(swreg reg, bool is_dst) +{ + u16 val = swreg_value(reg); + + switch (swreg_type(reg)) { + case NN_REG_GPR_A: + case NN_REG_GPR_B: + case NN_REG_GPR_BOTH: + return val; + case NN_REG_NNR: + return UR_REG_NN | val; + case NN_REG_XFER: + return UR_REG_XFR | val; + case NN_REG_IMM: + if (val & ~0xff) { + pr_err("immediate too large\n"); + return 0; + } + return UR_REG_IMM_encode(val); + case NN_REG_NONE: + return is_dst ? UR_REG_NO_DST : REG_NONE; + } + + pr_err("unrecognized reg encoding %08x\n", reg); + return 0; +} + +int swreg_to_unrestricted(swreg dst, swreg lreg, swreg rreg, + struct nfp_insn_ur_regs *reg) +{ + memset(reg, 0, sizeof(*reg)); + + /* Decode destination */ + if (swreg_type(dst) == NN_REG_IMM) + return -EFAULT; + + if (swreg_type(dst) == NN_REG_GPR_B) + reg->dst_ab = ALU_DST_B; + if (swreg_type(dst) == NN_REG_GPR_BOTH) + reg->wr_both = true; + reg->dst = nfp_swreg_to_unreg(dst, true); + + /* Decode source operands */ + if (swreg_type(lreg) == swreg_type(rreg)) + return -EFAULT; + + if (swreg_type(lreg) == NN_REG_GPR_B || + swreg_type(rreg) == NN_REG_GPR_A) { + reg->areg = nfp_swreg_to_unreg(rreg, false); + reg->breg = nfp_swreg_to_unreg(lreg, false); + reg->swap = true; + } else { + reg->areg = nfp_swreg_to_unreg(lreg, false); + reg->breg = nfp_swreg_to_unreg(rreg, false); + } + + return 0; +} + +static u16 nfp_swreg_to_rereg(swreg reg, bool is_dst, bool has_imm8, bool *i8) +{ + u16 val = swreg_value(reg); + + switch (swreg_type(reg)) { + case NN_REG_GPR_A: + case NN_REG_GPR_B: + case NN_REG_GPR_BOTH: + return val; + case NN_REG_XFER: + return RE_REG_XFR | val; + case NN_REG_IMM: + if (val & ~(0x7f | has_imm8 << 7)) { + pr_err("immediate too large\n"); + return 0; + } + *i8 = val & 0x80; + return RE_REG_IMM_encode(val & 0x7f); + case NN_REG_NONE: + return is_dst ? RE_REG_NO_DST : REG_NONE; + case NN_REG_NNR: + pr_err("NNRs used with restricted encoding\n"); + return 0; + } + + pr_err("unrecognized reg encoding\n"); + return 0; +} + +int swreg_to_restricted(swreg dst, swreg lreg, swreg rreg, + struct nfp_insn_re_regs *reg, bool has_imm8) +{ + memset(reg, 0, sizeof(*reg)); + + /* Decode destination */ + if (swreg_type(dst) == NN_REG_IMM) + return -EFAULT; + + if (swreg_type(dst) == NN_REG_GPR_B) + reg->dst_ab = ALU_DST_B; + if (swreg_type(dst) == NN_REG_GPR_BOTH) + reg->wr_both = true; + reg->dst = nfp_swreg_to_rereg(dst, true, false, NULL); + + /* Decode source operands */ + if (swreg_type(lreg) == swreg_type(rreg)) + return -EFAULT; + + if (swreg_type(lreg) == NN_REG_GPR_B || + swreg_type(rreg) == NN_REG_GPR_A) { + reg->areg = nfp_swreg_to_rereg(rreg, false, has_imm8, ®->i8); + reg->breg = nfp_swreg_to_rereg(lreg, false, has_imm8, ®->i8); + reg->swap = true; + } else { + reg->areg = nfp_swreg_to_rereg(lreg, false, has_imm8, ®->i8); + reg->breg = nfp_swreg_to_rereg(rreg, false, has_imm8, ®->i8); + } + + return 0; +} diff --git a/drivers/net/ethernet/netronome/nfp/nfp_asm.h b/drivers/net/ethernet/netronome/nfp/nfp_asm.h index 9b9d5d18ee20..8e87c0676c30 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_asm.h +++ b/drivers/net/ethernet/netronome/nfp/nfp_asm.h @@ -205,6 +205,8 @@ enum cmd_tgt_map { __CMD_TGT_MAP_SIZE, }; +extern const struct cmd_tgt_act cmd_tgt_act[__CMD_TGT_MAP_SIZE]; + enum cmd_mode { CMD_MODE_40b_AB = 0, CMD_MODE_40b_BA = 1, @@ -275,4 +277,26 @@ static inline u16 swreg_value(swreg reg) return FIELD_GET(NN_REG_VAL, swreg_raw(reg)); } +struct nfp_insn_ur_regs { + enum alu_dst_ab dst_ab; + u16 dst; + u16 areg, breg; + bool swap; + bool wr_both; +}; + +struct nfp_insn_re_regs { + enum alu_dst_ab dst_ab; + u8 dst; + u8 areg, breg; + bool swap; + bool wr_both; + bool i8; +}; + +int swreg_to_unrestricted(swreg dst, swreg lreg, swreg rreg, + struct nfp_insn_ur_regs *reg); +int swreg_to_restricted(swreg dst, swreg lreg, swreg rreg, + struct nfp_insn_re_regs *reg, bool has_imm8); + #endif -- cgit v1.2.3 From 3cae13193381fd4cb87791174d4c9fdf5b7025ff Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Sun, 8 Oct 2017 21:04:07 -0700 Subject: nfp: bpf: encode all 64bit shifts Add encodings of all 64bit shift operations. Signed-off-by: Jakub Kicinski Reviewed-by: Simon Horman Signed-off-by: David S. Miller --- drivers/net/ethernet/netronome/nfp/bpf/jit.c | 48 +++++++++++++++++++++------- 1 file changed, 36 insertions(+), 12 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/netronome/nfp/bpf/jit.c b/drivers/net/ethernet/netronome/nfp/bpf/jit.c index 7bcff007075c..095cf50e8450 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/jit.c +++ b/drivers/net/ethernet/netronome/nfp/bpf/jit.c @@ -825,12 +825,24 @@ static int sub_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) static int shl_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) { const struct bpf_insn *insn = &meta->insn; - - if (insn->imm != 32) - return 1; /* TODO */ - - wrp_reg_mov(nfp_prog, insn->dst_reg * 2 + 1, insn->dst_reg * 2); - wrp_immed(nfp_prog, reg_both(insn->dst_reg * 2), 0); + u8 dst = insn->dst_reg * 2; + + if (insn->imm < 32) { + emit_shf(nfp_prog, reg_both(dst + 1), + reg_a(dst + 1), SHF_OP_NONE, reg_b(dst), + SHF_SC_R_DSHF, 32 - insn->imm); + emit_shf(nfp_prog, reg_both(dst), + reg_none(), SHF_OP_NONE, reg_b(dst), + SHF_SC_L_SHF, insn->imm); + } else if (insn->imm == 32) { + wrp_reg_mov(nfp_prog, dst + 1, dst); + wrp_immed(nfp_prog, reg_both(dst), 0); + } else if (insn->imm > 32) { + emit_shf(nfp_prog, reg_both(dst + 1), + reg_none(), SHF_OP_NONE, reg_b(dst), + SHF_SC_L_SHF, insn->imm - 32); + wrp_immed(nfp_prog, reg_both(dst), 0); + } return 0; } @@ -838,12 +850,24 @@ static int shl_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) static int shr_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) { const struct bpf_insn *insn = &meta->insn; - - if (insn->imm != 32) - return 1; /* TODO */ - - wrp_reg_mov(nfp_prog, insn->dst_reg * 2, insn->dst_reg * 2 + 1); - wrp_immed(nfp_prog, reg_both(insn->dst_reg * 2 + 1), 0); + u8 dst = insn->dst_reg * 2; + + if (insn->imm < 32) { + emit_shf(nfp_prog, reg_both(dst), + reg_a(dst + 1), SHF_OP_NONE, reg_b(dst), + SHF_SC_R_DSHF, insn->imm); + emit_shf(nfp_prog, reg_both(dst + 1), + reg_none(), SHF_OP_NONE, reg_b(dst + 1), + SHF_SC_R_SHF, insn->imm); + } else if (insn->imm == 32) { + wrp_reg_mov(nfp_prog, dst, dst + 1); + wrp_immed(nfp_prog, reg_both(dst + 1), 0); + } else if (insn->imm > 32) { + emit_shf(nfp_prog, reg_both(dst), + reg_none(), SHF_OP_NONE, reg_b(dst + 1), + SHF_SC_R_SHF, insn->imm - 32); + wrp_immed(nfp_prog, reg_both(dst + 1), 0); + } return 0; } -- cgit v1.2.3 From 226e0e94ce3575bd9ca85f90957516ac1dff5bf3 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Sun, 8 Oct 2017 21:04:08 -0700 Subject: nfp: bpf: remove register rename Remove the register renumbering optimization. To implement calling map and other helpers we need more strict register layout. We can't freely reassign register numbers. This will have the effect of running in 4 context/thread mode, which should be OK since we are moving towards integrating the BPF closer with FW app datapath anyway, and the target datapath itself runs in 4 context mode. Signed-off-by: Jakub Kicinski Reviewed-by: Simon Horman Signed-off-by: David S. Miller --- drivers/net/ethernet/netronome/nfp/bpf/jit.c | 46 ++-------------------------- 1 file changed, 3 insertions(+), 43 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/netronome/nfp/bpf/jit.c b/drivers/net/ethernet/netronome/nfp/bpf/jit.c index 095cf50e8450..469dc8a055f2 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/jit.c +++ b/drivers/net/ethernet/netronome/nfp/bpf/jit.c @@ -1621,38 +1621,6 @@ static void nfp_bpf_opt_reg_init(struct nfp_prog *nfp_prog) } } -/* Try to rename registers so that program uses only low ones */ -static int nfp_bpf_opt_reg_rename(struct nfp_prog *nfp_prog) -{ - bool reg_used[MAX_BPF_REG] = {}; - u8 tgt_reg[MAX_BPF_REG] = {}; - struct nfp_insn_meta *meta; - unsigned int i, j; - - list_for_each_entry(meta, &nfp_prog->insns, l) { - if (meta->skip) - continue; - - reg_used[meta->insn.src_reg] = true; - reg_used[meta->insn.dst_reg] = true; - } - - for (i = 0, j = 0; i < ARRAY_SIZE(tgt_reg); i++) { - if (!reg_used[i]) - continue; - - tgt_reg[i] = j++; - } - nfp_prog->num_regs = j; - - list_for_each_entry(meta, &nfp_prog->insns, l) { - meta->insn.src_reg = tgt_reg[meta->insn.src_reg]; - meta->insn.dst_reg = tgt_reg[meta->insn.dst_reg]; - } - - return 0; -} - /* Remove masking after load since our load guarantees this is not needed */ static void nfp_bpf_opt_ld_mask(struct nfp_prog *nfp_prog) { @@ -1729,14 +1697,8 @@ static void nfp_bpf_opt_ld_shift(struct nfp_prog *nfp_prog) static int nfp_bpf_optimize(struct nfp_prog *nfp_prog) { - int ret; - nfp_bpf_opt_reg_init(nfp_prog); - ret = nfp_bpf_opt_reg_rename(nfp_prog); - if (ret) - return ret; - nfp_bpf_opt_ld_mask(nfp_prog); nfp_bpf_opt_ld_shift(nfp_prog); @@ -1783,10 +1745,8 @@ nfp_bpf_jit(struct bpf_prog *filter, void *prog_mem, if (ret) goto out; - if (nfp_prog->num_regs <= 7) - nfp_prog->regs_per_thread = 16; - else - nfp_prog->regs_per_thread = 32; + nfp_prog->num_regs = MAX_BPF_REG; + nfp_prog->regs_per_thread = 32; nfp_prog->prog = prog_mem; nfp_prog->__prog_alloc_len = prog_sz; @@ -1799,7 +1759,7 @@ nfp_bpf_jit(struct bpf_prog *filter, void *prog_mem, } res->n_instr = nfp_prog->prog_len; - res->dense_mode = nfp_prog->num_regs <= 7; + res->dense_mode = false; out: nfp_prog_free(nfp_prog); -- cgit v1.2.3 From 509144e25049831ffe94160b1f03cf1b900aaa3c Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Sun, 8 Oct 2017 21:04:09 -0700 Subject: nfp: bpf: remove packet marking support Temporarily drop support for skb->mark. We are primarily focusing on XDP offload, and implementing skb->mark on the new datapath has lower priority. Signed-off-by: Jakub Kicinski Reviewed-by: Simon Horman Signed-off-by: David S. Miller --- drivers/net/ethernet/netronome/nfp/bpf/jit.c | 13 ------------- drivers/net/ethernet/netronome/nfp/bpf/main.h | 3 +-- 2 files changed, 1 insertion(+), 15 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/netronome/nfp/bpf/jit.c b/drivers/net/ethernet/netronome/nfp/bpf/jit.c index 469dc8a055f2..4fa220f710d2 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/jit.c +++ b/drivers/net/ethernet/netronome/nfp/bpf/jit.c @@ -542,16 +542,6 @@ static int construct_data_ld(struct nfp_prog *nfp_prog, u16 offset, u8 size) return construct_data_ind_ld(nfp_prog, offset, 0, false, size); } -static int wrp_set_mark(struct nfp_prog *nfp_prog, u8 src) -{ - emit_alu(nfp_prog, NFP_BPF_ABI_MARK, - reg_none(), ALU_OP_NONE, reg_b(src)); - emit_alu(nfp_prog, NFP_BPF_ABI_FLAGS, - NFP_BPF_ABI_FLAGS, ALU_OP_OR, reg_imm(NFP_BPF_ABI_FLAG_MARK)); - - return 0; -} - static void wrp_alu_imm(struct nfp_prog *nfp_prog, u8 dst, enum alu_op alu_op, u32 imm) { @@ -1053,9 +1043,6 @@ static int mem_ldx4(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) static int mem_stx4_skb(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) { - if (meta->insn.off == offsetof(struct sk_buff, mark)) - return wrp_set_mark(nfp_prog, meta->insn.src_reg * 2); - return -EOPNOTSUPP; } diff --git a/drivers/net/ethernet/netronome/nfp/bpf/main.h b/drivers/net/ethernet/netronome/nfp/bpf/main.h index ccc3dbea25f6..7d959757a51a 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/main.h +++ b/drivers/net/ethernet/netronome/nfp/bpf/main.h @@ -71,9 +71,8 @@ enum nfp_bpf_action_type { #define imm_b(np) reg_b((np)->regs_per_thread - STATIC_REG_IMM) #define imm_both(np) reg_both((np)->regs_per_thread - STATIC_REG_IMM) -#define NFP_BPF_ABI_FLAGS reg_nnr(0) +#define NFP_BPF_ABI_FLAGS reg_imm(0) #define NFP_BPF_ABI_FLAG_MARK 1 -#define NFP_BPF_ABI_MARK reg_nnr(1) #define NFP_BPF_ABI_PKT reg_nnr(2) #define NFP_BPF_ABI_LEN reg_nnr(3) -- cgit v1.2.3 From 8afd9c961e95b1529cbc2b2b9c063a488659b337 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Sun, 8 Oct 2017 21:04:10 -0700 Subject: nfp: add more white space to the instruction defines We need to add longer OP_* defines, move the values away. Purely whitespace commit. Signed-off-by: Jakub Kicinski Reviewed-by: Simon Horman Signed-off-by: David S. Miller --- drivers/net/ethernet/netronome/nfp/nfp_asm.h | 156 +++++++++++++-------------- 1 file changed, 78 insertions(+), 78 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/netronome/nfp/nfp_asm.h b/drivers/net/ethernet/netronome/nfp/nfp_asm.h index 8e87c0676c30..63cfd07da34e 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_asm.h +++ b/drivers/net/ethernet/netronome/nfp/nfp_asm.h @@ -53,14 +53,14 @@ #define UR_REG_IMM_encode(x) (UR_REG_IMM | (x)) #define UR_REG_IMM_MAX 0x0ffULL -#define OP_BR_BASE 0x0d800000020ULL -#define OP_BR_BASE_MASK 0x0f8000c3ce0ULL -#define OP_BR_MASK 0x0000000001fULL -#define OP_BR_EV_PIP 0x00000000300ULL -#define OP_BR_CSS 0x0000003c000ULL -#define OP_BR_DEFBR 0x00000300000ULL -#define OP_BR_ADDR_LO 0x007ffc00000ULL -#define OP_BR_ADDR_HI 0x10000000000ULL +#define OP_BR_BASE 0x0d800000020ULL +#define OP_BR_BASE_MASK 0x0f8000c3ce0ULL +#define OP_BR_MASK 0x0000000001fULL +#define OP_BR_EV_PIP 0x00000000300ULL +#define OP_BR_CSS 0x0000003c000ULL +#define OP_BR_DEFBR 0x00000300000ULL +#define OP_BR_ADDR_LO 0x007ffc00000ULL +#define OP_BR_ADDR_HI 0x10000000000ULL #define nfp_is_br(_insn) \ (((_insn) & OP_BR_BASE_MASK) == OP_BR_BASE) @@ -83,30 +83,30 @@ enum br_ctx_signal_state { BR_CSS_NONE = 2, }; -#define OP_BBYTE_BASE 0x0c800000000ULL -#define OP_BB_A_SRC 0x000000000ffULL -#define OP_BB_BYTE 0x00000000300ULL -#define OP_BB_B_SRC 0x0000003fc00ULL -#define OP_BB_I8 0x00000040000ULL -#define OP_BB_EQ 0x00000080000ULL -#define OP_BB_DEFBR 0x00000300000ULL -#define OP_BB_ADDR_LO 0x007ffc00000ULL -#define OP_BB_ADDR_HI 0x10000000000ULL - -#define OP_BALU_BASE 0x0e800000000ULL -#define OP_BA_A_SRC 0x000000003ffULL -#define OP_BA_B_SRC 0x000000ffc00ULL -#define OP_BA_DEFBR 0x00000300000ULL -#define OP_BA_ADDR_HI 0x0007fc00000ULL - -#define OP_IMMED_A_SRC 0x000000003ffULL -#define OP_IMMED_B_SRC 0x000000ffc00ULL -#define OP_IMMED_IMM 0x0000ff00000ULL -#define OP_IMMED_WIDTH 0x00060000000ULL -#define OP_IMMED_INV 0x00080000000ULL -#define OP_IMMED_SHIFT 0x00600000000ULL -#define OP_IMMED_BASE 0x0f000000000ULL -#define OP_IMMED_WR_AB 0x20000000000ULL +#define OP_BBYTE_BASE 0x0c800000000ULL +#define OP_BB_A_SRC 0x000000000ffULL +#define OP_BB_BYTE 0x00000000300ULL +#define OP_BB_B_SRC 0x0000003fc00ULL +#define OP_BB_I8 0x00000040000ULL +#define OP_BB_EQ 0x00000080000ULL +#define OP_BB_DEFBR 0x00000300000ULL +#define OP_BB_ADDR_LO 0x007ffc00000ULL +#define OP_BB_ADDR_HI 0x10000000000ULL + +#define OP_BALU_BASE 0x0e800000000ULL +#define OP_BA_A_SRC 0x000000003ffULL +#define OP_BA_B_SRC 0x000000ffc00ULL +#define OP_BA_DEFBR 0x00000300000ULL +#define OP_BA_ADDR_HI 0x0007fc00000ULL + +#define OP_IMMED_A_SRC 0x000000003ffULL +#define OP_IMMED_B_SRC 0x000000ffc00ULL +#define OP_IMMED_IMM 0x0000ff00000ULL +#define OP_IMMED_WIDTH 0x00060000000ULL +#define OP_IMMED_INV 0x00080000000ULL +#define OP_IMMED_SHIFT 0x00600000000ULL +#define OP_IMMED_BASE 0x0f000000000ULL +#define OP_IMMED_WR_AB 0x20000000000ULL enum immed_width { IMMED_WIDTH_ALL = 0, @@ -120,17 +120,17 @@ enum immed_shift { IMMED_SHIFT_2B = 2, }; -#define OP_SHF_BASE 0x08000000000ULL -#define OP_SHF_A_SRC 0x000000000ffULL -#define OP_SHF_SC 0x00000000300ULL -#define OP_SHF_B_SRC 0x0000003fc00ULL -#define OP_SHF_I8 0x00000040000ULL -#define OP_SHF_SW 0x00000080000ULL -#define OP_SHF_DST 0x0000ff00000ULL -#define OP_SHF_SHIFT 0x001f0000000ULL -#define OP_SHF_OP 0x00e00000000ULL -#define OP_SHF_DST_AB 0x01000000000ULL -#define OP_SHF_WR_AB 0x20000000000ULL +#define OP_SHF_BASE 0x08000000000ULL +#define OP_SHF_A_SRC 0x000000000ffULL +#define OP_SHF_SC 0x00000000300ULL +#define OP_SHF_B_SRC 0x0000003fc00ULL +#define OP_SHF_I8 0x00000040000ULL +#define OP_SHF_SW 0x00000080000ULL +#define OP_SHF_DST 0x0000ff00000ULL +#define OP_SHF_SHIFT 0x001f0000000ULL +#define OP_SHF_OP 0x00e00000000ULL +#define OP_SHF_DST_AB 0x01000000000ULL +#define OP_SHF_WR_AB 0x20000000000ULL enum shf_op { SHF_OP_NONE = 0, @@ -145,14 +145,14 @@ enum shf_sc { SHF_SC_R_DSHF = 3, }; -#define OP_ALU_A_SRC 0x000000003ffULL -#define OP_ALU_B_SRC 0x000000ffc00ULL -#define OP_ALU_DST 0x0003ff00000ULL -#define OP_ALU_SW 0x00040000000ULL -#define OP_ALU_OP 0x00f80000000ULL -#define OP_ALU_DST_AB 0x01000000000ULL -#define OP_ALU_BASE 0x0a000000000ULL -#define OP_ALU_WR_AB 0x20000000000ULL +#define OP_ALU_A_SRC 0x000000003ffULL +#define OP_ALU_B_SRC 0x000000ffc00ULL +#define OP_ALU_DST 0x0003ff00000ULL +#define OP_ALU_SW 0x00040000000ULL +#define OP_ALU_OP 0x00f80000000ULL +#define OP_ALU_DST_AB 0x01000000000ULL +#define OP_ALU_BASE 0x0a000000000ULL +#define OP_ALU_WR_AB 0x20000000000ULL enum alu_op { ALU_OP_NONE = 0x00, @@ -171,26 +171,26 @@ enum alu_dst_ab { ALU_DST_B = 1, }; -#define OP_LDF_BASE 0x0c000000000ULL -#define OP_LDF_A_SRC 0x000000000ffULL -#define OP_LDF_SC 0x00000000300ULL -#define OP_LDF_B_SRC 0x0000003fc00ULL -#define OP_LDF_I8 0x00000040000ULL -#define OP_LDF_SW 0x00000080000ULL -#define OP_LDF_ZF 0x00000100000ULL -#define OP_LDF_BMASK 0x0000f000000ULL -#define OP_LDF_SHF 0x001f0000000ULL -#define OP_LDF_WR_AB 0x20000000000ULL - -#define OP_CMD_A_SRC 0x000000000ffULL -#define OP_CMD_CTX 0x00000000300ULL -#define OP_CMD_B_SRC 0x0000003fc00ULL -#define OP_CMD_TOKEN 0x000000c0000ULL -#define OP_CMD_XFER 0x00001f00000ULL -#define OP_CMD_CNT 0x0000e000000ULL -#define OP_CMD_SIG 0x000f0000000ULL -#define OP_CMD_TGT_CMD 0x07f00000000ULL -#define OP_CMD_MODE 0x1c0000000000ULL +#define OP_LDF_BASE 0x0c000000000ULL +#define OP_LDF_A_SRC 0x000000000ffULL +#define OP_LDF_SC 0x00000000300ULL +#define OP_LDF_B_SRC 0x0000003fc00ULL +#define OP_LDF_I8 0x00000040000ULL +#define OP_LDF_SW 0x00000080000ULL +#define OP_LDF_ZF 0x00000100000ULL +#define OP_LDF_BMASK 0x0000f000000ULL +#define OP_LDF_SHF 0x001f0000000ULL +#define OP_LDF_WR_AB 0x20000000000ULL + +#define OP_CMD_A_SRC 0x000000000ffULL +#define OP_CMD_CTX 0x00000000300ULL +#define OP_CMD_B_SRC 0x0000003fc00ULL +#define OP_CMD_TOKEN 0x000000c0000ULL +#define OP_CMD_XFER 0x00001f00000ULL +#define OP_CMD_CNT 0x0000e000000ULL +#define OP_CMD_SIG 0x000f0000000ULL +#define OP_CMD_TGT_CMD 0x07f00000000ULL +#define OP_CMD_MODE 0x1c0000000000ULL struct cmd_tgt_act { u8 token; @@ -218,11 +218,11 @@ enum cmd_ctx_swap { CMD_CTX_NO_SWAP = 3, }; -#define OP_LCSR_BASE 0x0fc00000000ULL -#define OP_LCSR_A_SRC 0x000000003ffULL -#define OP_LCSR_B_SRC 0x000000ffc00ULL -#define OP_LCSR_WRITE 0x00000200000ULL -#define OP_LCSR_ADDR 0x001ffc00000ULL +#define OP_LCSR_BASE 0x0fc00000000ULL +#define OP_LCSR_A_SRC 0x000000003ffULL +#define OP_LCSR_B_SRC 0x000000ffc00ULL +#define OP_LCSR_WRITE 0x00000200000ULL +#define OP_LCSR_ADDR 0x001ffc00000ULL enum lcsr_wr_src { LCSR_WR_AREG, @@ -230,8 +230,8 @@ enum lcsr_wr_src { LCSR_WR_IMM, }; -#define OP_CARB_BASE 0x0e000000000ULL -#define OP_CARB_OR 0x00000010000ULL +#define OP_CARB_BASE 0x0e000000000ULL +#define OP_CARB_OR 0x00000010000ULL /* Software register representation, independent of operand type */ #define NN_REG_TYPE GENMASK(31, 24) -- cgit v1.2.3 From 9f15d0f438372986b0f9de36f805fe2dd83f9c27 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Sun, 8 Oct 2017 21:04:11 -0700 Subject: nfp: bpf: encode LMEM accesses NFP LMEM is a large, indirectly accessed register file. There are two basic indirect access registers. Each access operation may either use offset (up to 8 or 16 words) or perform post decrement/increment. Add encodings of LMEM indexes as instruction operands. Signed-off-by: Jakub Kicinski Reviewed-by: Simon Horman Signed-off-by: David S. Miller --- drivers/net/ethernet/netronome/nfp/nfp_asm.c | 44 ++++++++++++++++++++++++++++ drivers/net/ethernet/netronome/nfp/nfp_asm.h | 41 ++++++++++++++++++++++++++ 2 files changed, 85 insertions(+) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/netronome/nfp/nfp_asm.c b/drivers/net/ethernet/netronome/nfp/nfp_asm.c index 4c9201bf9331..4bcab43da16d 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_asm.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_asm.c @@ -48,6 +48,7 @@ const struct cmd_tgt_act cmd_tgt_act[__CMD_TGT_MAP_SIZE] = { static u16 nfp_swreg_to_unreg(swreg reg, bool is_dst) { + bool lm_id, lm_dec = false; u16 val = swreg_value(reg); switch (swreg_type(reg)) { @@ -59,6 +60,33 @@ static u16 nfp_swreg_to_unreg(swreg reg, bool is_dst) return UR_REG_NN | val; case NN_REG_XFER: return UR_REG_XFR | val; + case NN_REG_LMEM: + lm_id = swreg_lm_idx(reg); + + switch (swreg_lm_mode(reg)) { + case NN_LM_MOD_NONE: + if (val & ~UR_REG_LM_IDX_MAX) { + pr_err("LM offset too large\n"); + return 0; + } + return UR_REG_LM | FIELD_PREP(UR_REG_LM_IDX, lm_id) | + val; + case NN_LM_MOD_DEC: + lm_dec = true; + /* fall through */ + case NN_LM_MOD_INC: + if (val) { + pr_err("LM offset in inc/dev mode\n"); + return 0; + } + return UR_REG_LM | UR_REG_LM_POST_MOD | + FIELD_PREP(UR_REG_LM_IDX, lm_id) | + FIELD_PREP(UR_REG_LM_POST_MOD_DEC, lm_dec); + default: + pr_err("bad LM mode for unrestricted operands %d\n", + swreg_lm_mode(reg)); + return 0; + } case NN_REG_IMM: if (val & ~0xff) { pr_err("immediate too large\n"); @@ -108,6 +136,7 @@ int swreg_to_unrestricted(swreg dst, swreg lreg, swreg rreg, static u16 nfp_swreg_to_rereg(swreg reg, bool is_dst, bool has_imm8, bool *i8) { u16 val = swreg_value(reg); + bool lm_id; switch (swreg_type(reg)) { case NN_REG_GPR_A: @@ -116,6 +145,21 @@ static u16 nfp_swreg_to_rereg(swreg reg, bool is_dst, bool has_imm8, bool *i8) return val; case NN_REG_XFER: return RE_REG_XFR | val; + case NN_REG_LMEM: + lm_id = swreg_lm_idx(reg); + + if (swreg_lm_mode(reg) != NN_LM_MOD_NONE) { + pr_err("bad LM mode for restricted operands %d\n", + swreg_lm_mode(reg)); + return 0; + } + + if (val & ~RE_REG_LM_IDX_MAX) { + pr_err("LM offset too large\n"); + return 0; + } + + return RE_REG_LM | FIELD_PREP(RE_REG_LM_IDX, lm_id) | val; case NN_REG_IMM: if (val & ~(0x7f | has_imm8 << 7)) { pr_err("immediate too large\n"); diff --git a/drivers/net/ethernet/netronome/nfp/nfp_asm.h b/drivers/net/ethernet/netronome/nfp/nfp_asm.h index 63cfd07da34e..d722f6878bd8 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_asm.h +++ b/drivers/net/ethernet/netronome/nfp/nfp_asm.h @@ -44,9 +44,17 @@ #define RE_REG_IMM_encode(x) \ (RE_REG_IMM | ((x) & 0x1f) | (((x) & 0x60) << 1)) #define RE_REG_IMM_MAX 0x07fULL +#define RE_REG_LM 0x050 +#define RE_REG_LM_IDX 0x008 +#define RE_REG_LM_IDX_MAX 0x7 #define RE_REG_XFR 0x080 #define UR_REG_XFR 0x180 +#define UR_REG_LM 0x200 +#define UR_REG_LM_IDX 0x020 +#define UR_REG_LM_POST_MOD 0x010 +#define UR_REG_LM_POST_MOD_DEC 0x001 +#define UR_REG_LM_IDX_MAX 0xf #define UR_REG_NN 0x280 #define UR_REG_NO_DST 0x300 #define UR_REG_IMM UR_REG_NO_DST @@ -235,6 +243,8 @@ enum lcsr_wr_src { /* Software register representation, independent of operand type */ #define NN_REG_TYPE GENMASK(31, 24) +#define NN_REG_LM_IDX BIT(22) +#define NN_REG_LM_MOD GENMASK(21, 20) #define NN_REG_VAL GENMASK(7, 0) enum nfp_bpf_reg_type { @@ -245,6 +255,13 @@ enum nfp_bpf_reg_type { NN_REG_XFER = BIT(3), NN_REG_IMM = BIT(4), NN_REG_NONE = BIT(5), + NN_REG_LMEM = BIT(6), +}; + +enum nfp_bpf_lm_mode { + NN_LM_MOD_NONE = 0, + NN_LM_MOD_INC, + NN_LM_MOD_DEC, }; #define reg_both(x) __enc_swreg((x), NN_REG_GPR_BOTH) @@ -254,6 +271,10 @@ enum nfp_bpf_reg_type { #define reg_xfer(x) __enc_swreg((x), NN_REG_XFER) #define reg_imm(x) __enc_swreg((x), NN_REG_IMM) #define reg_none() __enc_swreg(0, NN_REG_NONE) +#define reg_lm(x, off) __enc_swreg_lm((x), NN_LM_MOD_NONE, (off)) +#define reg_lm_inc(x) __enc_swreg_lm((x), NN_LM_MOD_INC, 0) +#define reg_lm_dec(x) __enc_swreg_lm((x), NN_LM_MOD_DEC, 0) +#define __reg_lm(x, mod, off) __enc_swreg_lm((x), (mod), (off)) typedef __u32 __bitwise swreg; @@ -262,6 +283,16 @@ static inline swreg __enc_swreg(u16 id, u8 type) return (__force swreg)(id | FIELD_PREP(NN_REG_TYPE, type)); } +static inline swreg __enc_swreg_lm(u8 id, enum nfp_bpf_lm_mode mode, u8 off) +{ + WARN_ON(id > 1 || (off && mode != NN_LM_MOD_NONE)); + + return (__force swreg)(FIELD_PREP(NN_REG_TYPE, NN_REG_LMEM) | + FIELD_PREP(NN_REG_LM_IDX, id) | + FIELD_PREP(NN_REG_LM_MOD, mode) | + off); +} + static inline u32 swreg_raw(swreg reg) { return (__force u32)reg; @@ -277,6 +308,16 @@ static inline u16 swreg_value(swreg reg) return FIELD_GET(NN_REG_VAL, swreg_raw(reg)); } +static inline bool swreg_lm_idx(swreg reg) +{ + return FIELD_GET(NN_REG_LM_IDX, swreg_raw(reg)); +} + +static inline enum nfp_bpf_lm_mode swreg_lm_mode(swreg reg) +{ + return FIELD_GET(NN_REG_LM_MOD, swreg_raw(reg)); +} + struct nfp_insn_ur_regs { enum alu_dst_ab dst_ab; u16 dst; -- cgit v1.2.3 From 995e101ffa71eff6ae5f5d5bf1ca8ec757b4ed21 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Sun, 8 Oct 2017 21:04:12 -0700 Subject: nfp: bpf: encode extended LM pointer operands Most instructions have special fields which allow switching between base and extended Local Memory pointers. Introduce those to register encoding, we will use the extra LM pointers to access high addresses of the stack. Signed-off-by: Jakub Kicinski Reviewed-by: Simon Horman Signed-off-by: David S. Miller --- drivers/net/ethernet/netronome/nfp/bpf/jit.c | 56 +++++++++++++++++++--------- drivers/net/ethernet/netronome/nfp/nfp_asm.c | 6 +++ drivers/net/ethernet/netronome/nfp/nfp_asm.h | 28 ++++++++++++-- 3 files changed, 70 insertions(+), 20 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/netronome/nfp/bpf/jit.c b/drivers/net/ethernet/netronome/nfp/bpf/jit.c index 4fa220f710d2..d7dc19feba8d 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/jit.c +++ b/drivers/net/ethernet/netronome/nfp/bpf/jit.c @@ -153,6 +153,11 @@ emit_cmd(struct nfp_prog *nfp_prog, enum cmd_tgt_map op, nfp_prog->error = -EFAULT; return; } + if (reg.dst_lmextn || reg.src_lmextn) { + pr_err("cmd can't use LMextn\n"); + nfp_prog->error = -EFAULT; + return; + } __emit_cmd(nfp_prog, op, mode, xfer, reg.areg, reg.breg, size, sync); } @@ -198,7 +203,7 @@ emit_br(struct nfp_prog *nfp_prog, enum br_mask mask, u16 addr, u8 defer) static void __emit_br_byte(struct nfp_prog *nfp_prog, u8 areg, u8 breg, bool imm8, - u8 byte, bool equal, u16 addr, u8 defer) + u8 byte, bool equal, u16 addr, u8 defer, bool src_lmextn) { u16 addr_lo, addr_hi; u64 insn; @@ -214,32 +219,34 @@ __emit_br_byte(struct nfp_prog *nfp_prog, u8 areg, u8 breg, bool imm8, FIELD_PREP(OP_BB_EQ, equal) | FIELD_PREP(OP_BB_DEFBR, defer) | FIELD_PREP(OP_BB_ADDR_LO, addr_lo) | - FIELD_PREP(OP_BB_ADDR_HI, addr_hi); + FIELD_PREP(OP_BB_ADDR_HI, addr_hi) | + FIELD_PREP(OP_BB_SRC_LMEXTN, src_lmextn); nfp_prog_push(nfp_prog, insn); } static void emit_br_byte_neq(struct nfp_prog *nfp_prog, - swreg dst, u8 imm, u8 byte, u16 addr, u8 defer) + swreg src, u8 imm, u8 byte, u16 addr, u8 defer) { struct nfp_insn_re_regs reg; int err; - err = swreg_to_restricted(reg_none(), dst, reg_imm(imm), ®, true); + err = swreg_to_restricted(reg_none(), src, reg_imm(imm), ®, true); if (err) { nfp_prog->error = err; return; } __emit_br_byte(nfp_prog, reg.areg, reg.breg, reg.i8, byte, false, addr, - defer); + defer, reg.src_lmextn); } static void __emit_immed(struct nfp_prog *nfp_prog, u16 areg, u16 breg, u16 imm_hi, enum immed_width width, bool invert, - enum immed_shift shift, bool wr_both) + enum immed_shift shift, bool wr_both, + bool dst_lmextn, bool src_lmextn) { u64 insn; @@ -250,7 +257,9 @@ __emit_immed(struct nfp_prog *nfp_prog, u16 areg, u16 breg, u16 imm_hi, FIELD_PREP(OP_IMMED_WIDTH, width) | FIELD_PREP(OP_IMMED_INV, invert) | FIELD_PREP(OP_IMMED_SHIFT, shift) | - FIELD_PREP(OP_IMMED_WR_AB, wr_both); + FIELD_PREP(OP_IMMED_WR_AB, wr_both) | + FIELD_PREP(OP_IMMED_SRC_LMEXTN, src_lmextn) | + FIELD_PREP(OP_IMMED_DST_LMEXTN, dst_lmextn); nfp_prog_push(nfp_prog, insn); } @@ -274,13 +283,15 @@ emit_immed(struct nfp_prog *nfp_prog, swreg dst, u16 imm, } __emit_immed(nfp_prog, reg.areg, reg.breg, imm >> 8, width, - invert, shift, reg.wr_both); + invert, shift, reg.wr_both, + reg.dst_lmextn, reg.src_lmextn); } static void __emit_shf(struct nfp_prog *nfp_prog, u16 dst, enum alu_dst_ab dst_ab, enum shf_sc sc, u8 shift, - u16 areg, enum shf_op op, u16 breg, bool i8, bool sw, bool wr_both) + u16 areg, enum shf_op op, u16 breg, bool i8, bool sw, bool wr_both, + bool dst_lmextn, bool src_lmextn) { u64 insn; @@ -302,7 +313,9 @@ __emit_shf(struct nfp_prog *nfp_prog, u16 dst, enum alu_dst_ab dst_ab, FIELD_PREP(OP_SHF_SHIFT, shift) | FIELD_PREP(OP_SHF_OP, op) | FIELD_PREP(OP_SHF_DST_AB, dst_ab) | - FIELD_PREP(OP_SHF_WR_AB, wr_both); + FIELD_PREP(OP_SHF_WR_AB, wr_both) | + FIELD_PREP(OP_SHF_SRC_LMEXTN, src_lmextn) | + FIELD_PREP(OP_SHF_DST_LMEXTN, dst_lmextn); nfp_prog_push(nfp_prog, insn); } @@ -321,12 +334,14 @@ emit_shf(struct nfp_prog *nfp_prog, swreg dst, } __emit_shf(nfp_prog, reg.dst, reg.dst_ab, sc, shift, - reg.areg, op, reg.breg, reg.i8, reg.swap, reg.wr_both); + reg.areg, op, reg.breg, reg.i8, reg.swap, reg.wr_both, + reg.dst_lmextn, reg.src_lmextn); } static void __emit_alu(struct nfp_prog *nfp_prog, u16 dst, enum alu_dst_ab dst_ab, - u16 areg, enum alu_op op, u16 breg, bool swap, bool wr_both) + u16 areg, enum alu_op op, u16 breg, bool swap, bool wr_both, + bool dst_lmextn, bool src_lmextn) { u64 insn; @@ -337,7 +352,9 @@ __emit_alu(struct nfp_prog *nfp_prog, u16 dst, enum alu_dst_ab dst_ab, FIELD_PREP(OP_ALU_SW, swap) | FIELD_PREP(OP_ALU_OP, op) | FIELD_PREP(OP_ALU_DST_AB, dst_ab) | - FIELD_PREP(OP_ALU_WR_AB, wr_both); + FIELD_PREP(OP_ALU_WR_AB, wr_both) | + FIELD_PREP(OP_ALU_SRC_LMEXTN, src_lmextn) | + FIELD_PREP(OP_ALU_DST_LMEXTN, dst_lmextn); nfp_prog_push(nfp_prog, insn); } @@ -356,13 +373,15 @@ emit_alu(struct nfp_prog *nfp_prog, swreg dst, } __emit_alu(nfp_prog, reg.dst, reg.dst_ab, - reg.areg, op, reg.breg, reg.swap, reg.wr_both); + reg.areg, op, reg.breg, reg.swap, reg.wr_both, + reg.dst_lmextn, reg.src_lmextn); } static void __emit_ld_field(struct nfp_prog *nfp_prog, enum shf_sc sc, u8 areg, u8 bmask, u8 breg, u8 shift, bool imm8, - bool zero, bool swap, bool wr_both) + bool zero, bool swap, bool wr_both, + bool dst_lmextn, bool src_lmextn) { u64 insn; @@ -375,7 +394,9 @@ __emit_ld_field(struct nfp_prog *nfp_prog, enum shf_sc sc, FIELD_PREP(OP_LDF_ZF, zero) | FIELD_PREP(OP_LDF_BMASK, bmask) | FIELD_PREP(OP_LDF_SHF, shift) | - FIELD_PREP(OP_LDF_WR_AB, wr_both); + FIELD_PREP(OP_LDF_WR_AB, wr_both) | + FIELD_PREP(OP_LDF_SRC_LMEXTN, src_lmextn) | + FIELD_PREP(OP_LDF_DST_LMEXTN, dst_lmextn); nfp_prog_push(nfp_prog, insn); } @@ -394,7 +415,8 @@ emit_ld_field_any(struct nfp_prog *nfp_prog, enum shf_sc sc, u8 shift, } __emit_ld_field(nfp_prog, sc, reg.areg, bmask, reg.breg, shift, - reg.i8, zero, reg.swap, reg.wr_both); + reg.i8, zero, reg.swap, reg.wr_both, + reg.dst_lmextn, reg.src_lmextn); } static void diff --git a/drivers/net/ethernet/netronome/nfp/nfp_asm.c b/drivers/net/ethernet/netronome/nfp/nfp_asm.c index 4bcab43da16d..1decc638ea6f 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_asm.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_asm.c @@ -130,6 +130,9 @@ int swreg_to_unrestricted(swreg dst, swreg lreg, swreg rreg, reg->breg = nfp_swreg_to_unreg(rreg, false); } + reg->dst_lmextn = swreg_lmextn(dst); + reg->src_lmextn = swreg_lmextn(lreg) | swreg_lmextn(rreg); + return 0; } @@ -207,5 +210,8 @@ int swreg_to_restricted(swreg dst, swreg lreg, swreg rreg, reg->breg = nfp_swreg_to_rereg(rreg, false, has_imm8, ®->i8); } + reg->dst_lmextn = swreg_lmextn(dst); + reg->src_lmextn = swreg_lmextn(lreg) | swreg_lmextn(rreg); + return 0; } diff --git a/drivers/net/ethernet/netronome/nfp/nfp_asm.h b/drivers/net/ethernet/netronome/nfp/nfp_asm.h index d722f6878bd8..40a51a45afd7 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_asm.h +++ b/drivers/net/ethernet/netronome/nfp/nfp_asm.h @@ -100,6 +100,7 @@ enum br_ctx_signal_state { #define OP_BB_DEFBR 0x00000300000ULL #define OP_BB_ADDR_LO 0x007ffc00000ULL #define OP_BB_ADDR_HI 0x10000000000ULL +#define OP_BB_SRC_LMEXTN 0x40000000000ULL #define OP_BALU_BASE 0x0e800000000ULL #define OP_BA_A_SRC 0x000000003ffULL @@ -115,6 +116,8 @@ enum br_ctx_signal_state { #define OP_IMMED_SHIFT 0x00600000000ULL #define OP_IMMED_BASE 0x0f000000000ULL #define OP_IMMED_WR_AB 0x20000000000ULL +#define OP_IMMED_SRC_LMEXTN 0x40000000000ULL +#define OP_IMMED_DST_LMEXTN 0x80000000000ULL enum immed_width { IMMED_WIDTH_ALL = 0, @@ -139,6 +142,8 @@ enum immed_shift { #define OP_SHF_OP 0x00e00000000ULL #define OP_SHF_DST_AB 0x01000000000ULL #define OP_SHF_WR_AB 0x20000000000ULL +#define OP_SHF_SRC_LMEXTN 0x40000000000ULL +#define OP_SHF_DST_LMEXTN 0x80000000000ULL enum shf_op { SHF_OP_NONE = 0, @@ -161,6 +166,8 @@ enum shf_sc { #define OP_ALU_DST_AB 0x01000000000ULL #define OP_ALU_BASE 0x0a000000000ULL #define OP_ALU_WR_AB 0x20000000000ULL +#define OP_ALU_SRC_LMEXTN 0x40000000000ULL +#define OP_ALU_DST_LMEXTN 0x80000000000ULL enum alu_op { ALU_OP_NONE = 0x00, @@ -189,6 +196,8 @@ enum alu_dst_ab { #define OP_LDF_BMASK 0x0000f000000ULL #define OP_LDF_SHF 0x001f0000000ULL #define OP_LDF_WR_AB 0x20000000000ULL +#define OP_LDF_SRC_LMEXTN 0x40000000000ULL +#define OP_LDF_DST_LMEXTN 0x80000000000ULL #define OP_CMD_A_SRC 0x000000000ffULL #define OP_CMD_CTX 0x00000000300ULL @@ -231,6 +240,8 @@ enum cmd_ctx_swap { #define OP_LCSR_B_SRC 0x000000ffc00ULL #define OP_LCSR_WRITE 0x00000200000ULL #define OP_LCSR_ADDR 0x001ffc00000ULL +#define OP_LCSR_SRC_LMEXTN 0x40000000000ULL +#define OP_LCSR_DST_LMEXTN 0x80000000000ULL enum lcsr_wr_src { LCSR_WR_AREG, @@ -243,7 +254,9 @@ enum lcsr_wr_src { /* Software register representation, independent of operand type */ #define NN_REG_TYPE GENMASK(31, 24) -#define NN_REG_LM_IDX BIT(22) +#define NN_REG_LM_IDX GENMASK(23, 22) +#define NN_REG_LM_IDX_HI BIT(23) +#define NN_REG_LM_IDX_LO BIT(22) #define NN_REG_LM_MOD GENMASK(21, 20) #define NN_REG_VAL GENMASK(7, 0) @@ -285,7 +298,7 @@ static inline swreg __enc_swreg(u16 id, u8 type) static inline swreg __enc_swreg_lm(u8 id, enum nfp_bpf_lm_mode mode, u8 off) { - WARN_ON(id > 1 || (off && mode != NN_LM_MOD_NONE)); + WARN_ON(id > 3 || (off && mode != NN_LM_MOD_NONE)); return (__force swreg)(FIELD_PREP(NN_REG_TYPE, NN_REG_LMEM) | FIELD_PREP(NN_REG_LM_IDX, id) | @@ -310,7 +323,12 @@ static inline u16 swreg_value(swreg reg) static inline bool swreg_lm_idx(swreg reg) { - return FIELD_GET(NN_REG_LM_IDX, swreg_raw(reg)); + return FIELD_GET(NN_REG_LM_IDX_LO, swreg_raw(reg)); +} + +static inline bool swreg_lmextn(swreg reg) +{ + return FIELD_GET(NN_REG_LM_IDX_HI, swreg_raw(reg)); } static inline enum nfp_bpf_lm_mode swreg_lm_mode(swreg reg) @@ -324,6 +342,8 @@ struct nfp_insn_ur_regs { u16 areg, breg; bool swap; bool wr_both; + bool dst_lmextn; + bool src_lmextn; }; struct nfp_insn_re_regs { @@ -333,6 +353,8 @@ struct nfp_insn_re_regs { bool swap; bool wr_both; bool i8; + bool dst_lmextn; + bool src_lmextn; }; int swreg_to_unrestricted(swreg dst, swreg lreg, swreg rreg, -- cgit v1.2.3 From 18e53b6cb9ac157f4b2c7db698d4adc064df2fa0 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Sun, 8 Oct 2017 21:04:13 -0700 Subject: nfp: bpf: move to datapath ABI version 2 Datapath ABI version 2 stores the packet information in LMEM instead of NNRs. We also have strict restrictions on which GPRs we can use. Only GPRs 0-23 are reserved for BPF. Adjust the static register locations and "ABI" registers. Note that packet length is packed with other info so we have to extract it into one of the scratch registers, OTOH since LMEM can be used in restricted operands we don't have to extract packet pointer. Signed-off-by: Jakub Kicinski Reviewed-by: Simon Horman Signed-off-by: David S. Miller --- drivers/net/ethernet/netronome/nfp/bpf/jit.c | 19 +++++++++--------- drivers/net/ethernet/netronome/nfp/bpf/main.h | 24 ++++++++++++++--------- drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h | 2 +- 3 files changed, 26 insertions(+), 19 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/netronome/nfp/bpf/jit.c b/drivers/net/ethernet/netronome/nfp/bpf/jit.c index d7dc19feba8d..9b6c98ccebfe 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/jit.c +++ b/drivers/net/ethernet/netronome/nfp/bpf/jit.c @@ -526,22 +526,22 @@ construct_data_ind_ld(struct nfp_prog *nfp_prog, u16 offset, emit_alu(nfp_prog, imm_a(nfp_prog), imm_a(nfp_prog), ALU_OP_ADD, reg_imm(size)); emit_alu(nfp_prog, reg_none(), - NFP_BPF_ABI_LEN, ALU_OP_SUB, imm_a(nfp_prog)); + plen_reg(nfp_prog), ALU_OP_SUB, imm_a(nfp_prog)); wrp_br_special(nfp_prog, BR_BLO, OP_BR_GO_ABORT); /* Load data */ emit_cmd(nfp_prog, CMD_TGT_READ8, CMD_MODE_32b, 0, - pkt_reg(nfp_prog), imm_b(nfp_prog), sz - 1, true); + pptr_reg(nfp_prog), imm_b(nfp_prog), sz - 1, true); } else { /* Check packet length */ tmp_reg = ur_load_imm_any(nfp_prog, offset + size, imm_a(nfp_prog)); emit_alu(nfp_prog, reg_none(), - NFP_BPF_ABI_LEN, ALU_OP_SUB, tmp_reg); + plen_reg(nfp_prog), ALU_OP_SUB, tmp_reg); wrp_br_special(nfp_prog, BR_BLO, OP_BR_GO_ABORT); /* Load data */ tmp_reg = re_load_imm_any(nfp_prog, offset, imm_b(nfp_prog)); emit_cmd(nfp_prog, CMD_TGT_READ8, CMD_MODE_32b, 0, - pkt_reg(nfp_prog), tmp_reg, sz - 1, true); + pptr_reg(nfp_prog), tmp_reg, sz - 1, true); } i = 0; @@ -1024,7 +1024,7 @@ static int mem_ldx4_skb(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) { if (meta->insn.off == offsetof(struct sk_buff, len)) emit_alu(nfp_prog, reg_both(meta->insn.dst_reg * 2), - reg_none(), ALU_OP_NONE, NFP_BPF_ABI_LEN); + reg_none(), ALU_OP_NONE, plen_reg(nfp_prog)); else return -EOPNOTSUPP; @@ -1039,12 +1039,12 @@ static int mem_ldx4_xdp(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) meta->insn.off != offsetof(struct xdp_md, data_end)) return -EOPNOTSUPP; - emit_alu(nfp_prog, dst, reg_none(), ALU_OP_NONE, NFP_BPF_ABI_PKT); + emit_alu(nfp_prog, dst, reg_none(), ALU_OP_NONE, pptr_reg(nfp_prog)); if (meta->insn.off == offsetof(struct xdp_md, data)) return 0; - emit_alu(nfp_prog, dst, dst, ALU_OP_ADD, NFP_BPF_ABI_LEN); + emit_alu(nfp_prog, dst, dst, ALU_OP_ADD, plen_reg(nfp_prog)); return 0; } @@ -1403,8 +1403,9 @@ static int nfp_fixup_branches(struct nfp_prog *nfp_prog) static void nfp_intro(struct nfp_prog *nfp_prog) { - emit_alu(nfp_prog, pkt_reg(nfp_prog), - reg_none(), ALU_OP_NONE, NFP_BPF_ABI_PKT); + wrp_immed(nfp_prog, plen_reg(nfp_prog), GENMASK(13, 0)); + emit_alu(nfp_prog, plen_reg(nfp_prog), + plen_reg(nfp_prog), ALU_OP_AND, pv_len(nfp_prog)); } static void nfp_outro_tc_legacy(struct nfp_prog *nfp_prog) diff --git a/drivers/net/ethernet/netronome/nfp/bpf/main.h b/drivers/net/ethernet/netronome/nfp/bpf/main.h index 7d959757a51a..b7a112acbdb7 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/main.h +++ b/drivers/net/ethernet/netronome/nfp/bpf/main.h @@ -54,9 +54,13 @@ enum br_special { }; enum static_regs { - STATIC_REG_PKT = 1, -#define REG_PKT_BANK ALU_DST_A - STATIC_REG_IMM = 2, /* Bank AB */ + STATIC_REG_IMM = 21, /* Bank AB */ + STATIC_REG_PKT_LEN = 22, /* Bank B */ +}; + +enum pkt_vec { + PKT_VEC_PKT_LEN = 0, + PKT_VEC_PKT_PTR = 2, }; enum nfp_bpf_action_type { @@ -66,15 +70,17 @@ enum nfp_bpf_action_type { NN_ACT_XDP, }; -#define pkt_reg(np) reg_a((np)->regs_per_thread - STATIC_REG_PKT) -#define imm_a(np) reg_a((np)->regs_per_thread - STATIC_REG_IMM) -#define imm_b(np) reg_b((np)->regs_per_thread - STATIC_REG_IMM) -#define imm_both(np) reg_both((np)->regs_per_thread - STATIC_REG_IMM) +#define pv_len(np) reg_lm(1, PKT_VEC_PKT_LEN) +#define pv_ctm_ptr(np) reg_lm(1, PKT_VEC_PKT_PTR) + +#define plen_reg(np) reg_b(STATIC_REG_PKT_LEN) +#define pptr_reg(np) pv_ctm_ptr(np) +#define imm_a(np) reg_a(STATIC_REG_IMM) +#define imm_b(np) reg_b(STATIC_REG_IMM) +#define imm_both(np) reg_both(STATIC_REG_IMM) #define NFP_BPF_ABI_FLAGS reg_imm(0) #define NFP_BPF_ABI_FLAG_MARK 1 -#define NFP_BPF_ABI_PKT reg_nnr(2) -#define NFP_BPF_ABI_LEN reg_nnr(3) struct nfp_prog; struct nfp_insn_meta; diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h b/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h index b0a452ba9039..782d452e0fc2 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h @@ -255,7 +255,7 @@ * @NFP_NET_CFG_BPF_ADDR: DMA address of the buffer with JITed BPF code */ #define NFP_NET_CFG_BPF_ABI 0x0080 -#define NFP_NET_BPF_ABI 1 +#define NFP_NET_BPF_ABI 2 #define NFP_NET_CFG_BPF_CAP 0x0081 #define NFP_NET_BPF_CAP_RELO (1 << 0) /* seamless reload */ #define NFP_NET_CFG_BPF_MAX_LEN 0x0082 -- cgit v1.2.3 From fd068ddc888355dccd90ad610104e4addf23b7a3 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Sun, 8 Oct 2017 21:04:14 -0700 Subject: nfp: bpf: calculate code store ECC In the initial PoC firmware I simply disabled ECC on the instruction store. Do the ECC calculation for generated instructions in the driver. Signed-off-by: Jakub Kicinski Reviewed-by: Simon Horman Signed-off-by: David S. Miller --- drivers/net/ethernet/netronome/nfp/bpf/jit.c | 20 +++++++++++++++ drivers/net/ethernet/netronome/nfp/nfp_asm.c | 37 ++++++++++++++++++++++++++++ drivers/net/ethernet/netronome/nfp/nfp_asm.h | 3 +++ 3 files changed, 60 insertions(+) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/netronome/nfp/bpf/jit.c b/drivers/net/ethernet/netronome/nfp/bpf/jit.c index 9b6c98ccebfe..f4aedc89bfc8 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/jit.c +++ b/drivers/net/ethernet/netronome/nfp/bpf/jit.c @@ -1715,6 +1715,23 @@ static int nfp_bpf_optimize(struct nfp_prog *nfp_prog) return 0; } +static int nfp_bpf_ustore_calc(struct nfp_prog *nfp_prog) +{ + int i; + + for (i = 0; i < nfp_prog->prog_len; i++) { + int err; + + err = nfp_ustore_check_valid_no_ecc(nfp_prog->prog[i]); + if (err) + return err; + + nfp_prog->prog[i] = nfp_ustore_calc_ecc_insn(nfp_prog->prog[i]); + } + + return 0; +} + /** * nfp_bpf_jit() - translate BPF code into NFP assembly * @filter: kernel BPF filter struct @@ -1766,8 +1783,11 @@ nfp_bpf_jit(struct bpf_prog *filter, void *prog_mem, pr_err("Translation failed with error %d (translated: %u)\n", ret, nfp_prog->n_translated); ret = -EINVAL; + goto out; } + ret = nfp_bpf_ustore_calc(nfp_prog); + res->n_instr = nfp_prog->prog_len; res->dense_mode = false; out: diff --git a/drivers/net/ethernet/netronome/nfp/nfp_asm.c b/drivers/net/ethernet/netronome/nfp/nfp_asm.c index 1decc638ea6f..de76e7444fc2 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_asm.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_asm.c @@ -215,3 +215,40 @@ int swreg_to_restricted(swreg dst, swreg lreg, swreg rreg, return 0; } + +#define NFP_USTORE_ECC_POLY_WORDS 7 +#define NFP_USTORE_OP_BITS 45 + +static const u64 nfp_ustore_ecc_polynomials[NFP_USTORE_ECC_POLY_WORDS] = { + 0x0ff800007fffULL, + 0x11f801ff801fULL, + 0x1e387e0781e1ULL, + 0x17cb8e388e22ULL, + 0x1af5b2c93244ULL, + 0x1f56d5525488ULL, + 0x0daf69a46910ULL, +}; + +static bool parity(u64 value) +{ + return hweight64(value) & 1; +} + +int nfp_ustore_check_valid_no_ecc(u64 insn) +{ + if (insn & ~GENMASK_ULL(NFP_USTORE_OP_BITS, 0)) + return -EINVAL; + + return 0; +} + +u64 nfp_ustore_calc_ecc_insn(u64 insn) +{ + u8 ecc = 0; + int i; + + for (i = 0; i < NFP_USTORE_ECC_POLY_WORDS; i++) + ecc |= parity(nfp_ustore_ecc_polynomials[i] & insn) << i; + + return insn | (u64)ecc << NFP_USTORE_OP_BITS; +} diff --git a/drivers/net/ethernet/netronome/nfp/nfp_asm.h b/drivers/net/ethernet/netronome/nfp/nfp_asm.h index 40a51a45afd7..d95087e5fb73 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_asm.h +++ b/drivers/net/ethernet/netronome/nfp/nfp_asm.h @@ -362,4 +362,7 @@ int swreg_to_unrestricted(swreg dst, swreg lreg, swreg rreg, int swreg_to_restricted(swreg dst, swreg lreg, swreg rreg, struct nfp_insn_re_regs *reg, bool has_imm8); +int nfp_ustore_check_valid_no_ecc(u64 insn); +u64 nfp_ustore_calc_ecc_insn(u64 insn); + #endif -- cgit v1.2.3 From 1c03e03f9b5278701d4a0e3444b2de3b9ddc244b Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Sun, 8 Oct 2017 21:04:15 -0700 Subject: nfp: bpf: pad code with valid nops We need to append up to 8 nops after last instruction to make sure the CPU will not fetch garbage instructions with invalid ECC if the code store was not initialized. Signed-off-by: Jakub Kicinski Reviewed-by: Simon Horman Signed-off-by: David S. Miller --- drivers/net/ethernet/netronome/nfp/bpf/jit.c | 12 +++++++++++- drivers/net/ethernet/netronome/nfp/nfp_asm.h | 2 ++ 2 files changed, 13 insertions(+), 1 deletion(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/netronome/nfp/bpf/jit.c b/drivers/net/ethernet/netronome/nfp/bpf/jit.c index f4aedc89bfc8..e0600d037773 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/jit.c +++ b/drivers/net/ethernet/netronome/nfp/bpf/jit.c @@ -426,6 +426,11 @@ emit_ld_field(struct nfp_prog *nfp_prog, swreg dst, u8 bmask, swreg src, emit_ld_field_any(nfp_prog, sc, shift, dst, bmask, src, false); } +static void emit_nop(struct nfp_prog *nfp_prog) +{ + __emit_immed(nfp_prog, UR_REG_IMM, UR_REG_IMM, 0, 0, 0, 0, 0, 0, 0); +} + /* --- Wrappers --- */ static bool pack_immed(u32 imm, u16 *val, enum immed_shift *shift) { @@ -1550,7 +1555,7 @@ static void nfp_outro(struct nfp_prog *nfp_prog) static int nfp_translate(struct nfp_prog *nfp_prog) { struct nfp_insn_meta *meta; - int err; + int i, err; nfp_intro(nfp_prog); if (nfp_prog->error) @@ -1582,6 +1587,11 @@ static int nfp_translate(struct nfp_prog *nfp_prog) if (nfp_prog->error) return nfp_prog->error; + for (i = 0; i < NFP_USTORE_PREFETCH_WINDOW; i++) + emit_nop(nfp_prog); + if (nfp_prog->error) + return nfp_prog->error; + return nfp_fixup_branches(nfp_prog); } diff --git a/drivers/net/ethernet/netronome/nfp/nfp_asm.h b/drivers/net/ethernet/netronome/nfp/nfp_asm.h index d95087e5fb73..c4c18dd5630a 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_asm.h +++ b/drivers/net/ethernet/netronome/nfp/nfp_asm.h @@ -362,6 +362,8 @@ int swreg_to_unrestricted(swreg dst, swreg lreg, swreg rreg, int swreg_to_restricted(swreg dst, swreg lreg, swreg rreg, struct nfp_insn_re_regs *reg, bool has_imm8); +#define NFP_USTORE_PREFETCH_WINDOW 8 + int nfp_ustore_check_valid_no_ecc(u64 insn); u64 nfp_ustore_calc_ecc_insn(u64 insn); -- cgit v1.2.3 From 2e85d3884f25a0419a941676d1a7c25779884be2 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Sun, 8 Oct 2017 21:04:16 -0700 Subject: nfp: bpf: byte swap the instructions Device expects the instructions in little endian. Make sure we byte swap on big endian hosts. Signed-off-by: Jakub Kicinski Reviewed-by: Simon Horman Signed-off-by: David S. Miller --- drivers/net/ethernet/netronome/nfp/bpf/jit.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/netronome/nfp/bpf/jit.c b/drivers/net/ethernet/netronome/nfp/bpf/jit.c index e0600d037773..f68052367db7 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/jit.c +++ b/drivers/net/ethernet/netronome/nfp/bpf/jit.c @@ -1725,7 +1725,7 @@ static int nfp_bpf_optimize(struct nfp_prog *nfp_prog) return 0; } -static int nfp_bpf_ustore_calc(struct nfp_prog *nfp_prog) +static int nfp_bpf_ustore_calc(struct nfp_prog *nfp_prog, __le64 *ustore) { int i; @@ -1737,6 +1737,8 @@ static int nfp_bpf_ustore_calc(struct nfp_prog *nfp_prog) return err; nfp_prog->prog[i] = nfp_ustore_calc_ecc_insn(nfp_prog->prog[i]); + + ustore[i] = cpu_to_le64(nfp_prog->prog[i]); } return 0; @@ -1796,7 +1798,7 @@ nfp_bpf_jit(struct bpf_prog *filter, void *prog_mem, goto out; } - ret = nfp_bpf_ustore_calc(nfp_prog); + ret = nfp_bpf_ustore_calc(nfp_prog, (__force __le64 *)prog_mem); res->n_instr = nfp_prog->prog_len; res->dense_mode = false; -- cgit v1.2.3 From 2de1be1db25d3285f514920230790be20db92887 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Sun, 8 Oct 2017 21:04:17 -0700 Subject: nfp: bpf: pass dst register to ld_field instruction ld_field instruction is a bit special because the encoding uses two source registers and one of them becomes the output. We do need to pass the dst register to our encoding helpers though, otherwise the "write both banks" flag will not be observed. Signed-off-by: Jakub Kicinski Reviewed-by: Simon Horman Signed-off-by: David S. Miller --- drivers/net/ethernet/netronome/nfp/bpf/jit.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/netronome/nfp/bpf/jit.c b/drivers/net/ethernet/netronome/nfp/bpf/jit.c index f68052367db7..13148f30fc4c 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/jit.c +++ b/drivers/net/ethernet/netronome/nfp/bpf/jit.c @@ -408,7 +408,8 @@ emit_ld_field_any(struct nfp_prog *nfp_prog, enum shf_sc sc, u8 shift, struct nfp_insn_re_regs reg; int err; - err = swreg_to_restricted(reg_none(), dst, src, ®, true); + /* Note: ld_field is special as it uses one of the src regs as dst */ + err = swreg_to_restricted(dst, dst, src, ®, true); if (err) { nfp_prog->error = err; return; -- cgit v1.2.3 From 86e23494222f358138e3d2c337f57577b0893797 Mon Sep 17 00:00:00 2001 From: Jesper Dangaard Brouer Date: Mon, 4 Sep 2017 20:40:22 +0200 Subject: ixgbe: add counter for times Rx pages gets allocated, not recycled The ixgbe driver have page recycle scheme based around the RX-ring queue, where a RX page is shared between two packets. Based on the refcnt, the driver can determine if the RX-page is currently only used by a single packet, if so it can then directly refill/recycle the RX-slot by with the opposite "side" of the page. While this is a clever trick, it is hard to determine when this recycling is successful and when it fails. Adding a counter, which is available via ethtool --statistics as 'alloc_rx_page'. Which counts the number of times the recycle fails and the real page allocator is invoked. When interpreting the stats, do remember that every alloc will serve two packets. The counter is collected per rx_ring, but is summed and ethtool exported as 'alloc_rx_page'. It would be relevant to know what rx_ring that cannot keep up, but that can be exported later if someone experience a need for this. Signed-off-by: Jesper Dangaard Brouer Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ixgbe/ixgbe.h | 2 ++ drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c | 1 + drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | 4 ++++ 3 files changed, 7 insertions(+) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h index dd5578756ae0..008d0085e01f 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h @@ -275,6 +275,7 @@ struct ixgbe_rx_queue_stats { u64 rsc_count; u64 rsc_flush; u64 non_eop_descs; + u64 alloc_rx_page; u64 alloc_rx_page_failed; u64 alloc_rx_buff_failed; u64 csum_err; @@ -655,6 +656,7 @@ struct ixgbe_adapter { u64 rsc_total_count; u64 rsc_total_flush; u64 non_eop_descs; + u32 alloc_rx_page; u32 alloc_rx_page_failed; u32 alloc_rx_buff_failed; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c index 6d89f28cae06..de5704c7dd1b 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c @@ -104,6 +104,7 @@ static const struct ixgbe_stats ixgbe_gstrings_stats[] = { {"tx_flow_control_xoff", IXGBE_STAT(stats.lxofftxc)}, {"rx_flow_control_xoff", IXGBE_STAT(stats.lxoffrxc)}, {"rx_csum_offload_errors", IXGBE_STAT(hw_csum_rx_error)}, + {"alloc_rx_page", IXGBE_STAT(alloc_rx_page)}, {"alloc_rx_page_failed", IXGBE_STAT(alloc_rx_page_failed)}, {"alloc_rx_buff_failed", IXGBE_STAT(alloc_rx_buff_failed)}, {"rx_no_dma_resources", IXGBE_STAT(hw_rx_no_dma_resources)}, diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index d83cc9d34de3..211074934d5b 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -1620,6 +1620,7 @@ static bool ixgbe_alloc_mapped_page(struct ixgbe_ring *rx_ring, bi->page = page; bi->page_offset = ixgbe_rx_offset(rx_ring); bi->pagecnt_bias = 1; + rx_ring->rx_stats.alloc_rx_page++; return true; } @@ -6794,6 +6795,7 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter) u32 i, missed_rx = 0, mpc, bprc, lxon, lxoff, xon_off_tot; u64 non_eop_descs = 0, restart_queue = 0, tx_busy = 0; u64 alloc_rx_page_failed = 0, alloc_rx_buff_failed = 0; + u64 alloc_rx_page = 0; u64 bytes = 0, packets = 0, hw_csum_rx_error = 0; if (test_bit(__IXGBE_DOWN, &adapter->state) || @@ -6814,6 +6816,7 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter) for (i = 0; i < adapter->num_rx_queues; i++) { struct ixgbe_ring *rx_ring = adapter->rx_ring[i]; non_eop_descs += rx_ring->rx_stats.non_eop_descs; + alloc_rx_page += rx_ring->rx_stats.alloc_rx_page; alloc_rx_page_failed += rx_ring->rx_stats.alloc_rx_page_failed; alloc_rx_buff_failed += rx_ring->rx_stats.alloc_rx_buff_failed; hw_csum_rx_error += rx_ring->rx_stats.csum_err; @@ -6821,6 +6824,7 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter) packets += rx_ring->stats.packets; } adapter->non_eop_descs = non_eop_descs; + adapter->alloc_rx_page = alloc_rx_page; adapter->alloc_rx_page_failed = alloc_rx_page_failed; adapter->alloc_rx_buff_failed = alloc_rx_buff_failed; adapter->hw_csum_rx_error = hw_csum_rx_error; -- cgit v1.2.3 From dcfd6b839c998bc9838e2a47f44f37afbdf3099c Mon Sep 17 00:00:00 2001 From: Emil Tantilov Date: Mon, 11 Sep 2017 14:21:31 -0700 Subject: ixgbe: fix use of uninitialized padding This patch is resolving Coverity hits where padding in a structure could be used uninitialized. - Initialize fwd_cmd.pad/2 before ixgbe_calculate_checksum() - Initialize buffer.pad2/3 before ixgbe_hic_unlocked() Signed-off-by: Emil Tantilov Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ixgbe/ixgbe_common.c | 4 ++-- drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c | 2 ++ 2 files changed, 4 insertions(+), 2 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c index 2c19070d2a0b..041940c4bb2b 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c @@ -3800,10 +3800,10 @@ s32 ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min, fw_cmd.ver_build = build; fw_cmd.ver_sub = sub; fw_cmd.hdr.checksum = 0; - fw_cmd.hdr.checksum = ixgbe_calculate_checksum((u8 *)&fw_cmd, - (FW_CEM_HDR_LEN + fw_cmd.hdr.buf_len)); fw_cmd.pad = 0; fw_cmd.pad2 = 0; + fw_cmd.hdr.checksum = ixgbe_calculate_checksum((u8 *)&fw_cmd, + (FW_CEM_HDR_LEN + fw_cmd.hdr.buf_len)); for (i = 0; i <= FW_CEM_MAX_RETRIES; i++) { ret_val = ixgbe_host_interface_command(hw, &fw_cmd, diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c index 8cea53b62e1b..cb7da5f9c4da 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c @@ -900,6 +900,8 @@ static s32 ixgbe_read_ee_hostif_buffer_X550(struct ixgbe_hw *hw, /* convert offset from words to bytes */ buffer.address = cpu_to_be32((offset + current_word) * 2); buffer.length = cpu_to_be16(words_to_read * 2); + buffer.pad2 = 0; + buffer.pad3 = 0; status = ixgbe_hic_unlocked(hw, (u32 *)&buffer, sizeof(buffer), IXGBE_HI_COMMAND_TIMEOUT); -- cgit v1.2.3 From c3aec05dfe2cb1bc1fdf3593da82bc987211338a Mon Sep 17 00:00:00 2001 From: Emil Tantilov Date: Mon, 11 Sep 2017 14:21:36 -0700 Subject: ixgbe: fix the FWSM.PT check in ixgbe_mng_present() Bits other than FWSM.PT can be set in IXGBE_SWFW_MODE_MASK making the previous check invalid. Change the check for MNG present to be only based on FWSM.PT bit. Signed-off-by: Emil Tantilov Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ixgbe/ixgbe_common.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c index 041940c4bb2b..4e5c92dea869 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c @@ -4100,8 +4100,8 @@ bool ixgbe_mng_present(struct ixgbe_hw *hw) return false; fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM(hw)); - fwsm &= IXGBE_FWSM_MODE_MASK; - return fwsm == IXGBE_FWSM_FW_MODE_PT; + + return !!(fwsm & IXGBE_FWSM_FW_MODE_PT); } /** -- cgit v1.2.3 From b4ded8327fea82b53fcec39e0845011246d020f4 Mon Sep 17 00:00:00 2001 From: Alexander Duyck Date: Mon, 25 Sep 2017 14:55:36 -0700 Subject: ixgbe: Update adaptive ITR algorithm The following change is meant to update the adaptive ITR algorithm to better support the needs of the network. Specifically with this change what I have done is make it so that our ITR algorithm will try to prevent either starving a socket buffer for memory in the case of Tx, or overrunning an Rx socket buffer on receive. In addition a side effect of the calculations used is that we should function better with new features such as XDP which can handle small packets at high rates without needing to lock us into NAPI polling mode. Signed-off-by: Alexander Duyck Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ixgbe/ixgbe.h | 7 + drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c | 11 +- drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | 215 +++++++++++++++++++------- 3 files changed, 178 insertions(+), 55 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h index 008d0085e01f..468c3555a629 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h @@ -435,8 +435,15 @@ static inline unsigned int ixgbe_rx_pg_order(struct ixgbe_ring *ring) } #define ixgbe_rx_pg_size(_ring) (PAGE_SIZE << ixgbe_rx_pg_order(_ring)) +#define IXGBE_ITR_ADAPTIVE_MIN_INC 2 +#define IXGBE_ITR_ADAPTIVE_MIN_USECS 10 +#define IXGBE_ITR_ADAPTIVE_MAX_USECS 126 +#define IXGBE_ITR_ADAPTIVE_LATENCY 0x80 +#define IXGBE_ITR_ADAPTIVE_BULK 0x00 + struct ixgbe_ring_container { struct ixgbe_ring *ring; /* pointer to linked list of rings */ + unsigned long next_update; /* jiffies value of last update */ unsigned int total_bytes; /* total bytes processed this int */ unsigned int total_packets; /* total packets processed this int */ u16 work_limit; /* total work allowed per interrupt */ diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c index f1bfae0c41d0..8e2a957aca18 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c @@ -806,6 +806,7 @@ static void ixgbe_add_ring(struct ixgbe_ring *ring, ring->next = head->ring; head->ring = ring; head->count++; + head->next_update = jiffies + 1; } /** @@ -879,8 +880,11 @@ static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter, /* initialize work limits */ q_vector->tx.work_limit = adapter->tx_work_limit; - /* initialize pointer to rings */ - ring = q_vector->ring; + /* Initialize setting for adaptive ITR */ + q_vector->tx.itr = IXGBE_ITR_ADAPTIVE_MAX_USECS | + IXGBE_ITR_ADAPTIVE_LATENCY; + q_vector->rx.itr = IXGBE_ITR_ADAPTIVE_MAX_USECS | + IXGBE_ITR_ADAPTIVE_LATENCY; /* intialize ITR */ if (txr_count && !rxr_count) { @@ -897,6 +901,9 @@ static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter, q_vector->itr = adapter->rx_itr_setting; } + /* initialize pointer to rings */ + ring = q_vector->ring; + while (txr_count) { /* assign generic ring traits */ ring->dev = &adapter->pdev->dev; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 211074934d5b..5e2686d106db 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -2540,50 +2540,174 @@ enum latency_range { static void ixgbe_update_itr(struct ixgbe_q_vector *q_vector, struct ixgbe_ring_container *ring_container) { - int bytes = ring_container->total_bytes; - int packets = ring_container->total_packets; - u32 timepassed_us; - u64 bytes_perint; - u8 itr_setting = ring_container->itr; + unsigned int itr = IXGBE_ITR_ADAPTIVE_MIN_USECS | + IXGBE_ITR_ADAPTIVE_LATENCY; + unsigned int avg_wire_size, packets, bytes; + unsigned long next_update = jiffies; - if (packets == 0) + /* If we don't have any rings just leave ourselves set for maximum + * possible latency so we take ourselves out of the equation. + */ + if (!ring_container->ring) return; - /* simple throttlerate management - * 0-10MB/s lowest (100000 ints/s) - * 10-20MB/s low (20000 ints/s) - * 20-1249MB/s bulk (12000 ints/s) + /* If we didn't update within up to 1 - 2 jiffies we can assume + * that either packets are coming in so slow there hasn't been + * any work, or that there is so much work that NAPI is dealing + * with interrupt moderation and we don't need to do anything. */ - /* what was last interrupt timeslice? */ - timepassed_us = q_vector->itr >> 2; - if (timepassed_us == 0) - return; + if (time_after(next_update, ring_container->next_update)) + goto clear_counts; - bytes_perint = bytes / timepassed_us; /* bytes/usec */ + packets = ring_container->total_packets; - switch (itr_setting) { - case lowest_latency: - if (bytes_perint > 10) - itr_setting = low_latency; - break; - case low_latency: - if (bytes_perint > 20) - itr_setting = bulk_latency; - else if (bytes_perint <= 10) - itr_setting = lowest_latency; + /* We have no packets to actually measure against. This means + * either one of the other queues on this vector is active or + * we are a Tx queue doing TSO with too high of an interrupt rate. + * + * When this occurs just tick up our delay by the minimum value + * and hope that this extra delay will prevent us from being called + * without any work on our queue. + */ + if (!packets) { + itr = (q_vector->itr >> 2) + IXGBE_ITR_ADAPTIVE_MIN_INC; + if (itr > IXGBE_ITR_ADAPTIVE_MAX_USECS) + itr = IXGBE_ITR_ADAPTIVE_MAX_USECS; + itr += ring_container->itr & IXGBE_ITR_ADAPTIVE_LATENCY; + goto clear_counts; + } + + bytes = ring_container->total_bytes; + + /* If packets are less than 4 or bytes are less than 9000 assume + * insufficient data to use bulk rate limiting approach. We are + * likely latency driven. + */ + if (packets < 4 && bytes < 9000) { + itr = IXGBE_ITR_ADAPTIVE_LATENCY; + goto adjust_by_size; + } + + /* Between 4 and 48 we can assume that our current interrupt delay + * is only slightly too low. As such we should increase it by a small + * fixed amount. + */ + if (packets < 48) { + itr = (q_vector->itr >> 2) + IXGBE_ITR_ADAPTIVE_MIN_INC; + if (itr > IXGBE_ITR_ADAPTIVE_MAX_USECS) + itr = IXGBE_ITR_ADAPTIVE_MAX_USECS; + goto clear_counts; + } + + /* Between 48 and 96 is our "goldilocks" zone where we are working + * out "just right". Just report that our current ITR is good for us. + */ + if (packets < 96) { + itr = q_vector->itr >> 2; + goto clear_counts; + } + + /* If packet count is 96 or greater we are likely looking at a slight + * overrun of the delay we want. Try halving our delay to see if that + * will cut the number of packets in half per interrupt. + */ + if (packets < 256) { + itr = q_vector->itr >> 3; + if (itr < IXGBE_ITR_ADAPTIVE_MIN_USECS) + itr = IXGBE_ITR_ADAPTIVE_MIN_USECS; + goto clear_counts; + } + + /* The paths below assume we are dealing with a bulk ITR since number + * of packets is 256 or greater. We are just going to have to compute + * a value and try to bring the count under control, though for smaller + * packet sizes there isn't much we can do as NAPI polling will likely + * be kicking in sooner rather than later. + */ + itr = IXGBE_ITR_ADAPTIVE_BULK; + +adjust_by_size: + /* If packet counts are 256 or greater we can assume we have a gross + * overestimation of what the rate should be. Instead of trying to fine + * tune it just use the formula below to try and dial in an exact value + * give the current packet size of the frame. + */ + avg_wire_size = bytes / packets; + + /* The following is a crude approximation of: + * wmem_default / (size + overhead) = desired_pkts_per_int + * rate / bits_per_byte / (size + ethernet overhead) = pkt_rate + * (desired_pkt_rate / pkt_rate) * usecs_per_sec = ITR value + * + * Assuming wmem_default is 212992 and overhead is 640 bytes per + * packet, (256 skb, 64 headroom, 320 shared info), we can reduce the + * formula down to + * + * (170 * (size + 24)) / (size + 640) = ITR + * + * We first do some math on the packet size and then finally bitshift + * by 8 after rounding up. We also have to account for PCIe link speed + * difference as ITR scales based on this. + */ + if (avg_wire_size <= 60) { + /* Start at 50k ints/sec */ + avg_wire_size = 5120; + } else if (avg_wire_size <= 316) { + /* 50K ints/sec to 16K ints/sec */ + avg_wire_size *= 40; + avg_wire_size += 2720; + } else if (avg_wire_size <= 1084) { + /* 16K ints/sec to 9.2K ints/sec */ + avg_wire_size *= 15; + avg_wire_size += 11452; + } else if (avg_wire_size <= 1980) { + /* 9.2K ints/sec to 8K ints/sec */ + avg_wire_size *= 5; + avg_wire_size += 22420; + } else { + /* plateau at a limit of 8K ints/sec */ + avg_wire_size = 32256; + } + + /* If we are in low latency mode half our delay which doubles the rate + * to somewhere between 100K to 16K ints/sec + */ + if (itr & IXGBE_ITR_ADAPTIVE_LATENCY) + avg_wire_size >>= 1; + + /* Resultant value is 256 times larger than it needs to be. This + * gives us room to adjust the value as needed to either increase + * or decrease the value based on link speeds of 10G, 2.5G, 1G, etc. + * + * Use addition as we have already recorded the new latency flag + * for the ITR value. + */ + switch (q_vector->adapter->link_speed) { + case IXGBE_LINK_SPEED_10GB_FULL: + case IXGBE_LINK_SPEED_100_FULL: + default: + itr += DIV_ROUND_UP(avg_wire_size, + IXGBE_ITR_ADAPTIVE_MIN_INC * 256) * + IXGBE_ITR_ADAPTIVE_MIN_INC; break; - case bulk_latency: - if (bytes_perint <= 20) - itr_setting = low_latency; + case IXGBE_LINK_SPEED_2_5GB_FULL: + case IXGBE_LINK_SPEED_1GB_FULL: + case IXGBE_LINK_SPEED_10_FULL: + itr += DIV_ROUND_UP(avg_wire_size, + IXGBE_ITR_ADAPTIVE_MIN_INC * 64) * + IXGBE_ITR_ADAPTIVE_MIN_INC; break; } - /* clear work counters since we have the values we need */ +clear_counts: + /* write back value */ + ring_container->itr = itr; + + /* next update should occur within next jiffy */ + ring_container->next_update = next_update + 1; + ring_container->total_bytes = 0; ring_container->total_packets = 0; - - /* write updated itr to ring container */ - ring_container->itr = itr_setting; } /** @@ -2625,34 +2749,19 @@ void ixgbe_write_eitr(struct ixgbe_q_vector *q_vector) static void ixgbe_set_itr(struct ixgbe_q_vector *q_vector) { - u32 new_itr = q_vector->itr; - u8 current_itr; + u32 new_itr; ixgbe_update_itr(q_vector, &q_vector->tx); ixgbe_update_itr(q_vector, &q_vector->rx); - current_itr = max(q_vector->rx.itr, q_vector->tx.itr); + /* use the smallest value of new ITR delay calculations */ + new_itr = min(q_vector->rx.itr, q_vector->tx.itr); - switch (current_itr) { - /* counts and packets in update_itr are dependent on these numbers */ - case lowest_latency: - new_itr = IXGBE_100K_ITR; - break; - case low_latency: - new_itr = IXGBE_20K_ITR; - break; - case bulk_latency: - new_itr = IXGBE_12K_ITR; - break; - default: - break; - } + /* Clear latency flag if set, shift into correct position */ + new_itr &= ~IXGBE_ITR_ADAPTIVE_LATENCY; + new_itr <<= 2; if (new_itr != q_vector->itr) { - /* do an exponential smoothing */ - new_itr = (10 * new_itr * q_vector->itr) / - ((9 * new_itr) + q_vector->itr); - /* save the algorithm value here */ q_vector->itr = new_itr; -- cgit v1.2.3 From b64666ae00327efe53613fc180df6ffbeee1d8d1 Mon Sep 17 00:00:00 2001 From: Emil Tantilov Date: Fri, 29 Sep 2017 10:55:42 -0700 Subject: ixgbe: fix crash when injecting AER after failed reset In case where AER recovery fails the device is left in a down state. Consecutive AER error injection can lead to a double IRQ free. Signed-off-by: Emil Tantilov Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 5e2686d106db..c6f9da7990c7 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -10861,6 +10861,9 @@ skip_bad_vf_detection: if (!test_bit(__IXGBE_SERVICE_INITED, &adapter->state)) return PCI_ERS_RESULT_DISCONNECT; + if (!netif_device_present(netdev)) + return PCI_ERS_RESULT_DISCONNECT; + rtnl_lock(); netif_device_detach(netdev); -- cgit v1.2.3 From b35750f19102271485af9092b800863164dd4be0 Mon Sep 17 00:00:00 2001 From: Yotam Gigi Date: Mon, 9 Oct 2017 11:15:33 +0200 Subject: mlxsw: spectrum: router: Export the mlxsw_sp_router_port function In Spectrum hardware, the router port is a virtual port that is the gateway to the routing mechanism. Hence, in order for a packet to be L3 forwarded, it must first be L2 forwarded to the router port inside the hardware. Further patches in this patchset are going to introduce support in bridge device used as an mrouter port. In this case, the router port index will be needed in order to update the MDB entries to include the router port. Thus, export the mlxsw_sp_router_port function, which returns the index of the Spectrum router port. Signed-off-by: Yotam Gigi Reviewed-by: Nogah Frankel Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c | 2 +- drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c index e0f8ea4ed7af..a072903f2c4e 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c @@ -5957,7 +5957,7 @@ static int mlxsw_sp_rif_vlan_fid_op(struct mlxsw_sp_rif *rif, return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl); } -static u8 mlxsw_sp_router_port(const struct mlxsw_sp *mlxsw_sp) +u8 mlxsw_sp_router_port(const struct mlxsw_sp *mlxsw_sp) { return mlxsw_core_max_ports(mlxsw_sp->core) + 1; } diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h index 3d449180b035..3f2d840cb285 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h @@ -70,6 +70,7 @@ u16 mlxsw_sp_rif_index(const struct mlxsw_sp_rif *rif); u16 mlxsw_sp_ipip_lb_rif_index(const struct mlxsw_sp_rif_ipip_lb *rif); u16 mlxsw_sp_ipip_lb_ul_vr_id(const struct mlxsw_sp_rif_ipip_lb *rif); int mlxsw_sp_rif_dev_ifindex(const struct mlxsw_sp_rif *rif); +u8 mlxsw_sp_router_port(const struct mlxsw_sp *mlxsw_sp); const struct net_device *mlxsw_sp_rif_dev(const struct mlxsw_sp_rif *rif); int mlxsw_sp_rif_counter_value_get(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_rif *rif, -- cgit v1.2.3 From c4db953f00f09003519ac1fb078f3b5f57b32e3c Mon Sep 17 00:00:00 2001 From: Yotam Gigi Date: Mon, 9 Oct 2017 11:15:34 +0200 Subject: mlxsw: spectrum_switchdev: Add support for router port in SMID entries In Spectrum, MDB entries point to MID entries, that indicate which ports a packet should be forwarded to. Add the support in creating MID entries that forward the packet to the Spectrum router port. This will be later used to handle the bridge mrouter port switchdev notifications. Signed-off-by: Yotam Gigi Reviewed-by: Nogah Frankel Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c index 2cfdf22a145f..4b4584f8522a 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c @@ -49,6 +49,7 @@ #include #include +#include "spectrum_router.h" #include "spectrum.h" #include "core.h" #include "reg.h" @@ -1243,7 +1244,8 @@ static int mlxsw_sp_port_mdb_op(struct mlxsw_sp *mlxsw_sp, const char *addr, } static int mlxsw_sp_port_smid_full_entry(struct mlxsw_sp *mlxsw_sp, u16 mid_idx, - long *ports_bitmap) + long *ports_bitmap, + bool set_router_port) { char *smid_pl; int err, i; @@ -1258,9 +1260,15 @@ static int mlxsw_sp_port_smid_full_entry(struct mlxsw_sp *mlxsw_sp, u16 mid_idx, mlxsw_reg_smid_port_mask_set(smid_pl, i, 1); } + mlxsw_reg_smid_port_mask_set(smid_pl, + mlxsw_sp_router_port(mlxsw_sp), 1); + for_each_set_bit(i, ports_bitmap, mlxsw_core_max_ports(mlxsw_sp->core)) mlxsw_reg_smid_port_set(smid_pl, i, 1); + mlxsw_reg_smid_port_set(smid_pl, mlxsw_sp_router_port(mlxsw_sp), + set_router_port); + err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(smid), smid_pl); kfree(smid_pl); return err; @@ -1364,7 +1372,8 @@ mlxsw_sp_mc_write_mdb_entry(struct mlxsw_sp *mlxsw_sp, mlxsw_sp_mc_get_mrouters_bitmap(flood_bitmap, bridge_device, mlxsw_sp); mid->mid = mid_idx; - err = mlxsw_sp_port_smid_full_entry(mlxsw_sp, mid_idx, flood_bitmap); + err = mlxsw_sp_port_smid_full_entry(mlxsw_sp, mid_idx, flood_bitmap, + false); kfree(flood_bitmap); if (err) return false; -- cgit v1.2.3 From 593bc28ae211b864e7e0720ffc65a14cc5cff101 Mon Sep 17 00:00:00 2001 From: Yotam Gigi Date: Mon, 9 Oct 2017 11:15:35 +0200 Subject: mlxsw: spectrum_switchdev: Support bridge mrouter notifications Support the SWITCHDEV_ATTR_ID_BRIDGE_MROUTER port attribute switchdev notification. To do that, add the mrouter flag to struct mlxsw_sp_bridge_device, which indicates whether the bridge device was set to be mrouter port. This field is set when: - A new bridge is created, where the value is taken from the kernel bridge value. - A switchdev SWITCHDEV_ATTR_ID_BRIDGE_MROUTER notification is sent. In addition, change the bridge MID entries to include the router port when the bridge device is configured to be mrouter port. The MID entries are updated in the following cases: - When a new MID entry is created, update the router port according to the bridge mrouter state. - When a SWITCHDEV_ATTR_ID_BRIDGE_MROUTER notification is sent, update all the bridge's MID entries. This is aligned with the case where a bridge slave is configured to be mrouter port. Signed-off-by: Yotam Gigi Reviewed-by: Nogah Frankel Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- .../ethernet/mellanox/mlxsw/spectrum_switchdev.c | 65 +++++++++++++++++++++- 1 file changed, 63 insertions(+), 2 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c index 4b4584f8522a..7b8548e25ae7 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c @@ -80,7 +80,8 @@ struct mlxsw_sp_bridge_device { struct list_head ports_list; struct list_head mids_list; u8 vlan_enabled:1, - multicast_enabled:1; + multicast_enabled:1, + mrouter:1; const struct mlxsw_sp_bridge_ops *ops; }; @@ -171,6 +172,7 @@ mlxsw_sp_bridge_device_create(struct mlxsw_sp_bridge *bridge, bridge_device->dev = br_dev; bridge_device->vlan_enabled = vlan_enabled; bridge_device->multicast_enabled = br_multicast_enabled(br_dev); + bridge_device->mrouter = br_multicast_router(br_dev); INIT_LIST_HEAD(&bridge_device->ports_list); if (vlan_enabled) { bridge->vlan_enabled_exists = true; @@ -813,6 +815,60 @@ static int mlxsw_sp_port_mc_disabled_set(struct mlxsw_sp_port *mlxsw_sp_port, return 0; } +static int mlxsw_sp_smid_router_port_set(struct mlxsw_sp *mlxsw_sp, + u16 mid_idx, bool add) +{ + char *smid_pl; + int err; + + smid_pl = kmalloc(MLXSW_REG_SMID_LEN, GFP_KERNEL); + if (!smid_pl) + return -ENOMEM; + + mlxsw_reg_smid_pack(smid_pl, mid_idx, + mlxsw_sp_router_port(mlxsw_sp), add); + err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(smid), smid_pl); + kfree(smid_pl); + return err; +} + +static void +mlxsw_sp_bridge_mrouter_update_mdb(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_bridge_device *bridge_device, + bool add) +{ + struct mlxsw_sp_mid *mid; + + list_for_each_entry(mid, &bridge_device->mids_list, list) + mlxsw_sp_smid_router_port_set(mlxsw_sp, mid->mid, add); +} + +static int +mlxsw_sp_port_attr_br_mrouter_set(struct mlxsw_sp_port *mlxsw_sp_port, + struct switchdev_trans *trans, + struct net_device *orig_dev, + bool is_mrouter) +{ + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + struct mlxsw_sp_bridge_device *bridge_device; + + if (switchdev_trans_ph_prepare(trans)) + return 0; + + /* It's possible we failed to enslave the port, yet this + * operation is executed due to it being deferred. + */ + bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, orig_dev); + if (!bridge_device) + return 0; + + if (bridge_device->mrouter != is_mrouter) + mlxsw_sp_bridge_mrouter_update_mdb(mlxsw_sp, bridge_device, + is_mrouter); + bridge_device->mrouter = is_mrouter; + return 0; +} + static int mlxsw_sp_port_attr_set(struct net_device *dev, const struct switchdev_attr *attr, struct switchdev_trans *trans) @@ -850,6 +906,11 @@ static int mlxsw_sp_port_attr_set(struct net_device *dev, attr->orig_dev, attr->u.mc_disabled); break; + case SWITCHDEV_ATTR_ID_BRIDGE_MROUTER: + err = mlxsw_sp_port_attr_br_mrouter_set(mlxsw_sp_port, trans, + attr->orig_dev, + attr->u.mrouter); + break; default: err = -EOPNOTSUPP; break; @@ -1373,7 +1434,7 @@ mlxsw_sp_mc_write_mdb_entry(struct mlxsw_sp *mlxsw_sp, mid->mid = mid_idx; err = mlxsw_sp_port_smid_full_entry(mlxsw_sp, mid_idx, flood_bitmap, - false); + bridge_device->mrouter); kfree(flood_bitmap); if (err) return false; -- cgit v1.2.3 From f5823fe6897c444265ef3919d8684b647eef904f Mon Sep 17 00:00:00 2001 From: Michal Kalderon Date: Mon, 9 Oct 2017 12:37:43 +0300 Subject: qed: Add ll2 option to limit the number of bds per packet iWARP uses 3 ll2 connections, the maximum number of bds is known during connection setup. This patch modifies the static array in the ll2_tx_packet descriptor to be a flexible array and significantlly reduces memory size. In addition, some redundant fields in the ll2_tx_packet were removed, which also contributed to decreasing the descriptor size. Signed-off-by: Michal Kalderon Signed-off-by: Ariel Elior Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qed/qed_ll2.c | 29 +++++++++++++++++++++-------- drivers/net/ethernet/qlogic/qed/qed_ll2.h | 9 +++------ 2 files changed, 24 insertions(+), 14 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.c b/drivers/net/ethernet/qlogic/qed/qed_ll2.c index 250afa5486cf..75af40a7690a 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_ll2.c +++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.c @@ -1105,6 +1105,7 @@ static int qed_ll2_acquire_connection_tx(struct qed_hwfn *p_hwfn, struct qed_ll2_info *p_ll2_info) { struct qed_ll2_tx_packet *p_descq; + u32 desc_size; u32 capacity; int rc = 0; @@ -1122,13 +1123,17 @@ static int qed_ll2_acquire_connection_tx(struct qed_hwfn *p_hwfn, goto out; capacity = qed_chain_get_capacity(&p_ll2_info->tx_queue.txq_chain); - p_descq = kcalloc(capacity, sizeof(struct qed_ll2_tx_packet), - GFP_KERNEL); + /* First element is part of the packet, rest are flexibly added */ + desc_size = (sizeof(*p_descq) + + (p_ll2_info->input.tx_max_bds_per_packet - 1) * + sizeof(p_descq->bds_set)); + + p_descq = kcalloc(capacity, desc_size, GFP_KERNEL); if (!p_descq) { rc = -ENOMEM; goto out; } - p_ll2_info->tx_queue.descq_array = p_descq; + p_ll2_info->tx_queue.descq_mem = p_descq; DP_VERBOSE(p_hwfn, QED_MSG_LL2, "Allocated LL2 Txq [Type %08x] with 0x%08x buffers\n", @@ -1359,11 +1364,13 @@ int qed_ll2_establish_connection(void *cxt, u8 connection_handle) { struct qed_hwfn *p_hwfn = cxt; struct qed_ll2_info *p_ll2_conn; + struct qed_ll2_tx_packet *p_pkt; struct qed_ll2_rx_queue *p_rx; struct qed_ll2_tx_queue *p_tx; struct qed_ptt *p_ptt; int rc = -EINVAL; u32 i, capacity; + u32 desc_size; u8 qid; p_ptt = qed_ptt_acquire(p_hwfn); @@ -1397,9 +1404,15 @@ int qed_ll2_establish_connection(void *cxt, u8 connection_handle) INIT_LIST_HEAD(&p_tx->sending_descq); spin_lock_init(&p_tx->lock); capacity = qed_chain_get_capacity(&p_tx->txq_chain); - for (i = 0; i < capacity; i++) - list_add_tail(&p_tx->descq_array[i].list_entry, - &p_tx->free_descq); + /* First element is part of the packet, rest are flexibly added */ + desc_size = (sizeof(*p_pkt) + + (p_ll2_conn->input.tx_max_bds_per_packet - 1) * + sizeof(p_pkt->bds_set)); + + for (i = 0; i < capacity; i++) { + p_pkt = p_tx->descq_mem + desc_size * i; + list_add_tail(&p_pkt->list_entry, &p_tx->free_descq); + } p_tx->cur_completing_bd_idx = 0; p_tx->bds_idx = 0; p_tx->b_completing_packet = false; @@ -1698,7 +1711,7 @@ int qed_ll2_prepare_tx_packet(void *cxt, p_tx = &p_ll2_conn->tx_queue; p_tx_chain = &p_tx->txq_chain; - if (pkt->num_of_bds > CORE_LL2_TX_MAX_BDS_PER_PACKET) + if (pkt->num_of_bds > p_ll2_conn->input.tx_max_bds_per_packet) return -EIO; spin_lock_irqsave(&p_tx->lock, flags); @@ -1858,7 +1871,7 @@ void qed_ll2_release_connection(void *cxt, u8 connection_handle) qed_int_unregister_cb(p_hwfn, p_ll2_conn->tx_queue.tx_sb_index); } - kfree(p_ll2_conn->tx_queue.descq_array); + kfree(p_ll2_conn->tx_queue.descq_mem); qed_chain_free(p_hwfn->cdev, &p_ll2_conn->tx_queue.txq_chain); kfree(p_ll2_conn->rx_queue.descq_array); diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.h b/drivers/net/ethernet/qlogic/qed/qed_ll2.h index a822528e9c63..9bdd08f15c79 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_ll2.h +++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.h @@ -63,17 +63,14 @@ struct qed_ll2_rx_packet { struct qed_ll2_tx_packet { struct list_head list_entry; u16 bd_used; - u16 vlan; - u16 l4_hdr_offset_w; - u8 bd_flags; bool notify_fw; void *cookie; - + /* Flexible Array of bds_set determined by max_bds_per_packet */ struct { struct core_tx_bd *txq_bd; dma_addr_t tx_frag; u16 frag_len; - } bds_set[ETH_TX_MAX_BDS_PER_NON_LSO_PACKET]; + } bds_set[1]; }; struct qed_ll2_rx_queue { @@ -101,7 +98,7 @@ struct qed_ll2_tx_queue { struct list_head active_descq; struct list_head free_descq; struct list_head sending_descq; - struct qed_ll2_tx_packet *descq_array; + void *descq_mem; /* memory for variable sized qed_ll2_tx_packet*/ struct qed_ll2_tx_packet *cur_send_packet; struct qed_ll2_tx_packet cur_completing_packet; u16 cur_completing_bd_idx; -- cgit v1.2.3 From ed468ebee04ffba0231a8f50616bdb250752a891 Mon Sep 17 00:00:00 2001 From: Michal Kalderon Date: Mon, 9 Oct 2017 12:37:44 +0300 Subject: qed: Add ll2 ability of opening a secondary queue When more than one ll2 queue is opened ( that is not an OOO queue ) ll2 code does not have enough information to determine whether the queue is the main one or not, so a new field is added to the acquire input data to expose the control of determining whether the queue is the main queue or a secondary queue. Signed-off-by: Michal Kalderon Signed-off-by: Ariel Elior Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qed/qed_ll2.c | 7 ++++++- drivers/net/ethernet/qlogic/qed/qed_ll2.h | 1 + include/linux/qed/qed_ll2_if.h | 1 + 3 files changed, 8 insertions(+), 1 deletion(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.c b/drivers/net/ethernet/qlogic/qed/qed_ll2.c index 75af40a7690a..3c695da890df 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_ll2.c +++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.c @@ -894,7 +894,7 @@ static int qed_sp_ll2_rx_queue_start(struct qed_hwfn *p_hwfn, p_ramrod->drop_ttl0_flg = p_ll2_conn->input.rx_drop_ttl0_flg; p_ramrod->inner_vlan_removal_en = p_ll2_conn->input.rx_vlan_removal_en; p_ramrod->queue_id = p_ll2_conn->queue_id; - p_ramrod->main_func_queue = (conn_type == QED_LL2_TYPE_OOO) ? 0 : 1; + p_ramrod->main_func_queue = p_ll2_conn->main_func_queue ? 1 : 0; if ((IS_MF_DEFAULT(p_hwfn) || IS_MF_SI(p_hwfn)) && p_ramrod->main_func_queue && (conn_type != QED_LL2_TYPE_ROCE) && @@ -1265,6 +1265,11 @@ int qed_ll2_acquire_connection(void *cxt, struct qed_ll2_acquire_data *data) p_ll2_info->tx_dest = (data->input.tx_dest == QED_LL2_TX_DEST_NW) ? CORE_TX_DEST_NW : CORE_TX_DEST_LB; + if (data->input.conn_type == QED_LL2_TYPE_OOO || + data->input.secondary_queue) + p_ll2_info->main_func_queue = false; + else + p_ll2_info->main_func_queue = true; /* Correct maximum number of Tx BDs */ p_tx_max = &p_ll2_info->input.tx_max_bds_per_packet; diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.h b/drivers/net/ethernet/qlogic/qed/qed_ll2.h index 9bdd08f15c79..f65817012e97 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_ll2.h +++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.h @@ -121,6 +121,7 @@ struct qed_ll2_info { bool b_active; enum core_tx_dest tx_dest; u8 tx_stats_en; + bool main_func_queue; struct qed_ll2_rx_queue rx_queue; struct qed_ll2_tx_queue tx_queue; struct qed_ll2_cbs cbs; diff --git a/include/linux/qed/qed_ll2_if.h b/include/linux/qed/qed_ll2_if.h index 89fa0bbd54f3..d7cca590b743 100644 --- a/include/linux/qed/qed_ll2_if.h +++ b/include/linux/qed/qed_ll2_if.h @@ -171,6 +171,7 @@ struct qed_ll2_acquire_data_inputs { enum qed_ll2_tx_dest tx_dest; enum qed_ll2_error_handle ai_err_packet_too_big; enum qed_ll2_error_handle ai_err_no_buf; + bool secondary_queue; u8 gsi_enable; }; -- cgit v1.2.3 From 77caa792f5d8e4ecc88eb1cf4b9c478c07e0ec57 Mon Sep 17 00:00:00 2001 From: Michal Kalderon Date: Mon, 9 Oct 2017 12:37:45 +0300 Subject: qed: Add ll2 option for dropping a tx packet The option of sending a packet on the ll2 and dropping it exists in hardware and was not used until now, thus not exposed. The iWARP unaligned MPA flow requires this functionality for flushing the tx queue. Signed-off-by: Michal Kalderon Signed-off-by: Ariel Elior Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qed/qed_ll2.c | 16 ++++++++++++++-- include/linux/qed/qed_ll2_if.h | 1 + 2 files changed, 15 insertions(+), 2 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.c b/drivers/net/ethernet/qlogic/qed/qed_ll2.c index 3c695da890df..ad67d36956e8 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_ll2.c +++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.c @@ -1597,8 +1597,20 @@ qed_ll2_prepare_tx_packet_set_bd(struct qed_hwfn *p_hwfn, roce_flavor = (pkt->qed_roce_flavor == QED_LL2_ROCE) ? CORE_ROCE : CORE_RROCE; - tx_dest = (pkt->tx_dest == QED_LL2_TX_DEST_NW) ? CORE_TX_DEST_NW - : CORE_TX_DEST_LB; + switch (pkt->tx_dest) { + case QED_LL2_TX_DEST_NW: + tx_dest = CORE_TX_DEST_NW; + break; + case QED_LL2_TX_DEST_LB: + tx_dest = CORE_TX_DEST_LB; + break; + case QED_LL2_TX_DEST_DROP: + tx_dest = CORE_TX_DEST_DROP; + break; + default: + tx_dest = CORE_TX_DEST_LB; + break; + } start_bd = (struct core_tx_bd *)qed_chain_produce(p_tx_chain); start_bd->nw_vlan_or_lb_echo = cpu_to_le16(pkt->vlan); diff --git a/include/linux/qed/qed_ll2_if.h b/include/linux/qed/qed_ll2_if.h index d7cca590b743..95fdf02a3bbe 100644 --- a/include/linux/qed/qed_ll2_if.h +++ b/include/linux/qed/qed_ll2_if.h @@ -64,6 +64,7 @@ enum qed_ll2_roce_flavor_type { enum qed_ll2_tx_dest { QED_LL2_TX_DEST_NW, /* Light L2 TX Destination to the Network */ QED_LL2_TX_DEST_LB, /* Light L2 TX Destination to the Loopback */ + QED_LL2_TX_DEST_DROP, /* Light L2 Drop the TX packet */ QED_LL2_TX_DEST_MAX }; -- cgit v1.2.3 From 6df60fe703c348a507b0030b92c2947e68e1c589 Mon Sep 17 00:00:00 2001 From: Michal Kalderon Date: Mon, 9 Oct 2017 12:37:46 +0300 Subject: qed: Fix initialization of ll2 offload feature enable_ip_cksum, enable_l4_cksum, calc_ip_len were added in commit stated below but not passed through to FW. This was OK until now as it wasn't used, but is required for the iWARP unaligned flow Fixes:7c7973b2ae27 ("qed: LL2 to use packed information for tx") Signed-off-by: Michal Kalderon Signed-off-by: Ariel Elior Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qed/qed_ll2.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.c b/drivers/net/ethernet/qlogic/qed/qed_ll2.c index ad67d36956e8..6d144747111a 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_ll2.c +++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.c @@ -1621,6 +1621,9 @@ qed_ll2_prepare_tx_packet_set_bd(struct qed_hwfn *p_hwfn, SET_FIELD(bd_data, CORE_TX_BD_DATA_START_BD, 0x1); SET_FIELD(bd_data, CORE_TX_BD_DATA_NBDS, pkt->num_of_bds); SET_FIELD(bd_data, CORE_TX_BD_DATA_ROCE_FLAV, roce_flavor); + SET_FIELD(bd_data, CORE_TX_BD_DATA_IP_CSUM, !!(pkt->enable_ip_cksum)); + SET_FIELD(bd_data, CORE_TX_BD_DATA_L4_CSUM, !!(pkt->enable_l4_cksum)); + SET_FIELD(bd_data, CORE_TX_BD_DATA_IP_LEN, !!(pkt->calc_ip_len)); start_bd->bd_data.as_bitfield = cpu_to_le16(bd_data); DMA_REGPAIR_LE(start_bd->addr, pkt->first_frag); start_bd->nbytes = cpu_to_le16(pkt->first_frag_len); -- cgit v1.2.3 From 89d65113097072de7936a2aea2f819818a7c987a Mon Sep 17 00:00:00 2001 From: Michal Kalderon Date: Mon, 9 Oct 2017 12:37:47 +0300 Subject: qed: Add the source of a packet sent on an iWARP ll2 connection When a packet is sent back to iWARP FW via the tx ll2 connection the FW needs to know the source of the packet. Whether it is OOO or unaligned MPA related. Since OOO is implemented entirely inside the ll2 code (and shared with iSCSI), packets are marked as IN_ORDER inside the ll2 code. For unaligned mpa the value will be determined in the iWARP code and sent on the pkt->vlan field. Signed-off-by: Michal Kalderon Signed-off-by: Ariel Elior Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qed/qed_ll2.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.c b/drivers/net/ethernet/qlogic/qed/qed_ll2.c index 6d144747111a..8eb9645c880d 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_ll2.c +++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.c @@ -1613,7 +1613,12 @@ qed_ll2_prepare_tx_packet_set_bd(struct qed_hwfn *p_hwfn, } start_bd = (struct core_tx_bd *)qed_chain_produce(p_tx_chain); - start_bd->nw_vlan_or_lb_echo = cpu_to_le16(pkt->vlan); + if (QED_IS_IWARP_PERSONALITY(p_hwfn) && + p_ll2->input.conn_type == QED_LL2_TYPE_OOO) + start_bd->nw_vlan_or_lb_echo = + cpu_to_le16(IWARP_LL2_IN_ORDER_TX_QUEUE); + else + start_bd->nw_vlan_or_lb_echo = cpu_to_le16(pkt->vlan); SET_FIELD(start_bd->bitfield1, CORE_TX_BD_L4_HDR_OFFSET_W, cpu_to_le16(pkt->l4_hdr_offset_w)); SET_FIELD(start_bd->bitfield1, CORE_TX_BD_TX_DST, tx_dest); -- cgit v1.2.3 From 6f34a284f36399501fcc034dc4522a2d8d9fa6c9 Mon Sep 17 00:00:00 2001 From: Michal Kalderon Date: Mon, 9 Oct 2017 12:37:48 +0300 Subject: qed: Add LL2 slowpath handling For iWARP unaligned MPA flow, a slowpath event of flushing an MPA connection that entered an unaligned state is required. The flush ramrod is received on the ll2 queue, and a pre-registered callback function is called to handle the flush event. Signed-off-by: Michal Kalderon Signed-off-by: Ariel Elior Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qed/qed_ll2.c | 40 +++++++++++++++++++++++++++++-- include/linux/qed/qed_ll2_if.h | 5 ++++ 2 files changed, 43 insertions(+), 2 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.c b/drivers/net/ethernet/qlogic/qed/qed_ll2.c index 8eb9645c880d..047f556ca62e 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_ll2.c +++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.c @@ -422,6 +422,41 @@ static void qed_ll2_rxq_parse_reg(struct qed_hwfn *p_hwfn, data->u.placement_offset = p_cqe->rx_cqe_fp.placement_offset; } +static int +qed_ll2_handle_slowpath(struct qed_hwfn *p_hwfn, + struct qed_ll2_info *p_ll2_conn, + union core_rx_cqe_union *p_cqe, + unsigned long *p_lock_flags) +{ + struct qed_ll2_rx_queue *p_rx = &p_ll2_conn->rx_queue; + struct core_rx_slow_path_cqe *sp_cqe; + + sp_cqe = &p_cqe->rx_cqe_sp; + if (sp_cqe->ramrod_cmd_id != CORE_RAMROD_RX_QUEUE_FLUSH) { + DP_NOTICE(p_hwfn, + "LL2 - unexpected Rx CQE slowpath ramrod_cmd_id:%d\n", + sp_cqe->ramrod_cmd_id); + return -EINVAL; + } + + if (!p_ll2_conn->cbs.slowpath_cb) { + DP_NOTICE(p_hwfn, + "LL2 - received RX_QUEUE_FLUSH but no callback was provided\n"); + return -EINVAL; + } + + spin_unlock_irqrestore(&p_rx->lock, *p_lock_flags); + + p_ll2_conn->cbs.slowpath_cb(p_ll2_conn->cbs.cookie, + p_ll2_conn->my_id, + le32_to_cpu(sp_cqe->opaque_data.data[0]), + le32_to_cpu(sp_cqe->opaque_data.data[1])); + + spin_lock_irqsave(&p_rx->lock, *p_lock_flags); + + return 0; +} + static int qed_ll2_rxq_handle_completion(struct qed_hwfn *p_hwfn, struct qed_ll2_info *p_ll2_conn, @@ -495,8 +530,8 @@ static int qed_ll2_rxq_completion(struct qed_hwfn *p_hwfn, void *cookie) switch (cqe->rx_cqe_sp.type) { case CORE_RX_CQE_TYPE_SLOW_PATH: - DP_NOTICE(p_hwfn, "LL2 - unexpected Rx CQE slowpath\n"); - rc = -EINVAL; + rc = qed_ll2_handle_slowpath(p_hwfn, p_ll2_conn, + cqe, &flags); break; case CORE_RX_CQE_TYPE_GSI_OFFLOAD: case CORE_RX_CQE_TYPE_REGULAR: @@ -1214,6 +1249,7 @@ qed_ll2_set_cbs(struct qed_ll2_info *p_ll2_info, const struct qed_ll2_cbs *cbs) p_ll2_info->cbs.rx_release_cb = cbs->rx_release_cb; p_ll2_info->cbs.tx_comp_cb = cbs->tx_comp_cb; p_ll2_info->cbs.tx_release_cb = cbs->tx_release_cb; + p_ll2_info->cbs.slowpath_cb = cbs->slowpath_cb; p_ll2_info->cbs.cookie = cbs->cookie; return 0; diff --git a/include/linux/qed/qed_ll2_if.h b/include/linux/qed/qed_ll2_if.h index 95fdf02a3bbe..e755954d85fd 100644 --- a/include/linux/qed/qed_ll2_if.h +++ b/include/linux/qed/qed_ll2_if.h @@ -151,11 +151,16 @@ void (*qed_ll2_release_tx_packet_cb)(void *cxt, dma_addr_t first_frag_addr, bool b_last_fragment, bool b_last_packet); +typedef +void (*qed_ll2_slowpath_cb)(void *cxt, u8 connection_handle, + u32 opaque_data_0, u32 opaque_data_1); + struct qed_ll2_cbs { qed_ll2_complete_rx_packet_cb rx_comp_cb; qed_ll2_release_rx_packet_cb rx_release_cb; qed_ll2_complete_tx_packet_cb tx_comp_cb; qed_ll2_release_tx_packet_cb tx_release_cb; + qed_ll2_slowpath_cb slowpath_cb; void *cookie; }; -- cgit v1.2.3 From ae3488ff37dc4f21985111f442d26a8805e56d45 Mon Sep 17 00:00:00 2001 From: Michal Kalderon Date: Mon, 9 Oct 2017 12:37:49 +0300 Subject: qed: Add ll2 connection for processing unaligned MPA packets This patch adds only the establishment and termination of the ll2 connection that handles unaligned MPA packets. Signed-off-by: Michal Kalderon Signed-off-by: Ariel Elior Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qed/qed_iwarp.c | 65 +++++++++++++++++++++++++++++ drivers/net/ethernet/qlogic/qed/qed_iwarp.h | 1 + 2 files changed, 66 insertions(+) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/qlogic/qed/qed_iwarp.c b/drivers/net/ethernet/qlogic/qed/qed_iwarp.c index 8fc9c811f6e3..f413621a67b0 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_iwarp.c +++ b/drivers/net/ethernet/qlogic/qed/qed_iwarp.c @@ -1713,6 +1713,19 @@ qed_iwarp_parse_rx_pkt(struct qed_hwfn *p_hwfn, return 0; } +/* fpdu can be fragmented over maximum 3 bds: header, partial mpa, unaligned */ +#define QED_IWARP_MAX_BDS_PER_FPDU 3 +static void +qed_iwarp_ll2_comp_mpa_pkt(void *cxt, struct qed_ll2_comp_rx_data *data) +{ + struct qed_iwarp_info *iwarp_info; + struct qed_hwfn *p_hwfn = cxt; + + iwarp_info = &p_hwfn->p_rdma_info->iwarp; + qed_iwarp_ll2_post_rx(p_hwfn, data->cookie, + iwarp_info->ll2_mpa_handle); +} + static void qed_iwarp_ll2_comp_syn_pkt(void *cxt, struct qed_ll2_comp_rx_data *data) { @@ -1877,6 +1890,13 @@ static void qed_iwarp_ll2_rel_tx_pkt(void *cxt, u8 connection_handle, kfree(buffer); } +void +qed_iwarp_ll2_slowpath(void *cxt, + u8 connection_handle, + u32 opaque_data_0, u32 opaque_data_1) +{ +} + static int qed_iwarp_ll2_stop(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) { struct qed_iwarp_info *iwarp_info = &p_hwfn->p_rdma_info->iwarp; @@ -1902,6 +1922,16 @@ static int qed_iwarp_ll2_stop(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) iwarp_info->ll2_ooo_handle = QED_IWARP_HANDLE_INVAL; } + if (iwarp_info->ll2_mpa_handle != QED_IWARP_HANDLE_INVAL) { + rc = qed_ll2_terminate_connection(p_hwfn, + iwarp_info->ll2_mpa_handle); + if (rc) + DP_INFO(p_hwfn, "Failed to terminate mpa connection\n"); + + qed_ll2_release_connection(p_hwfn, iwarp_info->ll2_mpa_handle); + iwarp_info->ll2_mpa_handle = QED_IWARP_HANDLE_INVAL; + } + qed_llh_remove_mac_filter(p_hwfn, p_ptt, p_hwfn->p_rdma_info->iwarp.mac_addr); return rc; @@ -1953,12 +1983,14 @@ qed_iwarp_ll2_start(struct qed_hwfn *p_hwfn, struct qed_iwarp_info *iwarp_info; struct qed_ll2_acquire_data data; struct qed_ll2_cbs cbs; + u32 mpa_buff_size; u16 n_ooo_bufs; int rc = 0; iwarp_info = &p_hwfn->p_rdma_info->iwarp; iwarp_info->ll2_syn_handle = QED_IWARP_HANDLE_INVAL; iwarp_info->ll2_ooo_handle = QED_IWARP_HANDLE_INVAL; + iwarp_info->ll2_mpa_handle = QED_IWARP_HANDLE_INVAL; iwarp_info->max_mtu = params->max_mtu; @@ -2029,6 +2061,39 @@ qed_iwarp_ll2_start(struct qed_hwfn *p_hwfn, if (rc) goto err; + /* Start Unaligned MPA connection */ + cbs.rx_comp_cb = qed_iwarp_ll2_comp_mpa_pkt; + cbs.slowpath_cb = qed_iwarp_ll2_slowpath; + + memset(&data, 0, sizeof(data)); + data.input.conn_type = QED_LL2_TYPE_IWARP; + data.input.mtu = params->max_mtu; + /* FW requires that once a packet arrives OOO, it must have at + * least 2 rx buffers available on the unaligned connection + * for handling the case that it is a partial fpdu. + */ + data.input.rx_num_desc = n_ooo_bufs * 2; + data.input.tx_num_desc = data.input.rx_num_desc; + data.input.tx_max_bds_per_packet = QED_IWARP_MAX_BDS_PER_FPDU; + data.p_connection_handle = &iwarp_info->ll2_mpa_handle; + data.input.secondary_queue = true; + data.cbs = &cbs; + + rc = qed_ll2_acquire_connection(p_hwfn, &data); + if (rc) + goto err; + + rc = qed_ll2_establish_connection(p_hwfn, iwarp_info->ll2_mpa_handle); + if (rc) + goto err; + + mpa_buff_size = QED_IWARP_MAX_BUF_SIZE(params->max_mtu); + rc = qed_iwarp_ll2_alloc_buffers(p_hwfn, + data.input.rx_num_desc, + mpa_buff_size, + iwarp_info->ll2_mpa_handle); + if (rc) + goto err; return rc; err: qed_iwarp_ll2_stop(p_hwfn, p_ptt); diff --git a/drivers/net/ethernet/qlogic/qed/qed_iwarp.h b/drivers/net/ethernet/qlogic/qed/qed_iwarp.h index 9e2bfde894df..9d33a1fa1758 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_iwarp.h +++ b/drivers/net/ethernet/qlogic/qed/qed_iwarp.h @@ -73,6 +73,7 @@ struct qed_iwarp_info { u8 tcp_flags; u8 ll2_syn_handle; u8 ll2_ooo_handle; + u8 ll2_mpa_handle; u8 peer2peer; enum mpa_negotiation_mode mpa_rev; enum mpa_rtr_type rtr_type; -- cgit v1.2.3 From fcb39f6c10b24d2d16d4c2bdb4c256bc21b8a131 Mon Sep 17 00:00:00 2001 From: Michal Kalderon Date: Mon, 9 Oct 2017 12:37:50 +0300 Subject: qed: Add mpa buffer descriptors for storing and processing mpa fpdus The mpa buff is a descriptor for iwarp ll2 buffers that contains additional information required for aligining fpdu's. In some cases, an additional packet will arrive which will complete the alignment of a fpdu, but we won't be able to post the fpdu due to insufficient place on the tx ring. In this case we can't loose the data and require storing it for later. Processing is therefore done in two places, during rx completion, where we initialize a mpa buffer descriptor and add it to the pending list, and during tx-completion, since we free up an entry in the tx chain we can process any pending mpa packets. The mpa buff descriptors are pre-allocated since we have to ensure that we won't reach a state where we can't store an incoming unaligned packet. All packets received on the ll2 MUST be processed by the driver at some stage. Since they are preallocated, we hold a free list. Signed-off-by: Michal Kalderon Signed-off-by: Ariel Elior Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qed/qed_iwarp.c | 116 ++++++++++++++++++++++++++++ drivers/net/ethernet/qlogic/qed/qed_iwarp.h | 11 +++ 2 files changed, 127 insertions(+) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/qlogic/qed/qed_iwarp.c b/drivers/net/ethernet/qlogic/qed/qed_iwarp.c index f413621a67b0..efd4861c72e2 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_iwarp.c +++ b/drivers/net/ethernet/qlogic/qed/qed_iwarp.c @@ -1415,7 +1415,10 @@ int qed_iwarp_alloc(struct qed_hwfn *p_hwfn) void qed_iwarp_resc_free(struct qed_hwfn *p_hwfn) { + struct qed_iwarp_info *iwarp_info = &p_hwfn->p_rdma_info->iwarp; + qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->tcp_cid_map, 1); + kfree(iwarp_info->mpa_bufs); } int qed_iwarp_accept(void *rdma_cxt, struct qed_iwarp_accept_in *iparams) @@ -1715,13 +1718,104 @@ qed_iwarp_parse_rx_pkt(struct qed_hwfn *p_hwfn, /* fpdu can be fragmented over maximum 3 bds: header, partial mpa, unaligned */ #define QED_IWARP_MAX_BDS_PER_FPDU 3 +static void +qed_iwarp_mpa_get_data(struct qed_hwfn *p_hwfn, + struct unaligned_opaque_data *curr_pkt, + u32 opaque_data0, u32 opaque_data1) +{ + u64 opaque_data; + + opaque_data = HILO_64(opaque_data1, opaque_data0); + *curr_pkt = *((struct unaligned_opaque_data *)&opaque_data); + + curr_pkt->first_mpa_offset = curr_pkt->tcp_payload_offset + + le16_to_cpu(curr_pkt->first_mpa_offset); + curr_pkt->cid = le32_to_cpu(curr_pkt->cid); +} + +/* This function is called when an unaligned or incomplete MPA packet arrives + * driver needs to align the packet, perhaps using previous data and send + * it down to FW once it is aligned. + */ +static int +qed_iwarp_process_mpa_pkt(struct qed_hwfn *p_hwfn, + struct qed_iwarp_ll2_mpa_buf *mpa_buf) +{ + struct qed_iwarp_ll2_buff *buf = mpa_buf->ll2_buf; + int rc = -EINVAL; + + qed_iwarp_ll2_post_rx(p_hwfn, + buf, + p_hwfn->p_rdma_info->iwarp.ll2_mpa_handle); + return rc; +} + +static void qed_iwarp_process_pending_pkts(struct qed_hwfn *p_hwfn) +{ + struct qed_iwarp_info *iwarp_info = &p_hwfn->p_rdma_info->iwarp; + struct qed_iwarp_ll2_mpa_buf *mpa_buf = NULL; + int rc; + + while (!list_empty(&iwarp_info->mpa_buf_pending_list)) { + mpa_buf = list_first_entry(&iwarp_info->mpa_buf_pending_list, + struct qed_iwarp_ll2_mpa_buf, + list_entry); + + rc = qed_iwarp_process_mpa_pkt(p_hwfn, mpa_buf); + + /* busy means break and continue processing later, don't + * remove the buf from the pending list. + */ + if (rc == -EBUSY) + break; + + list_del(&mpa_buf->list_entry); + list_add_tail(&mpa_buf->list_entry, &iwarp_info->mpa_buf_list); + + if (rc) { /* different error, don't continue */ + DP_NOTICE(p_hwfn, "process pkts failed rc=%d\n", rc); + break; + } + } +} + static void qed_iwarp_ll2_comp_mpa_pkt(void *cxt, struct qed_ll2_comp_rx_data *data) { + struct qed_iwarp_ll2_mpa_buf *mpa_buf; struct qed_iwarp_info *iwarp_info; struct qed_hwfn *p_hwfn = cxt; iwarp_info = &p_hwfn->p_rdma_info->iwarp; + mpa_buf = list_first_entry(&iwarp_info->mpa_buf_list, + struct qed_iwarp_ll2_mpa_buf, list_entry); + if (!mpa_buf) { + DP_ERR(p_hwfn, "No free mpa buf\n"); + goto err; + } + + list_del(&mpa_buf->list_entry); + qed_iwarp_mpa_get_data(p_hwfn, &mpa_buf->data, + data->opaque_data_0, data->opaque_data_1); + + DP_VERBOSE(p_hwfn, + QED_MSG_RDMA, + "LL2 MPA CompRx payload_len:0x%x\tfirst_mpa_offset:0x%x\ttcp_payload_offset:0x%x\tflags:0x%x\tcid:0x%x\n", + data->length.packet_length, mpa_buf->data.first_mpa_offset, + mpa_buf->data.tcp_payload_offset, mpa_buf->data.flags, + mpa_buf->data.cid); + + mpa_buf->ll2_buf = data->cookie; + mpa_buf->tcp_payload_len = data->length.packet_length - + mpa_buf->data.first_mpa_offset; + mpa_buf->data.first_mpa_offset += data->u.placement_offset; + mpa_buf->placement_offset = data->u.placement_offset; + + list_add_tail(&mpa_buf->list_entry, &iwarp_info->mpa_buf_pending_list); + + qed_iwarp_process_pending_pkts(p_hwfn); + return; +err: qed_iwarp_ll2_post_rx(p_hwfn, data->cookie, iwarp_info->ll2_mpa_handle); } @@ -1872,6 +1966,11 @@ static void qed_iwarp_ll2_comp_tx_pkt(void *cxt, u8 connection_handle, /* this was originally an rx packet, post it back */ qed_iwarp_ll2_post_rx(p_hwfn, buffer, connection_handle); + + if (connection_handle == p_hwfn->p_rdma_info->iwarp.ll2_mpa_handle) + qed_iwarp_process_pending_pkts(p_hwfn); + + return; } static void qed_iwarp_ll2_rel_tx_pkt(void *cxt, u8 connection_handle, @@ -1986,6 +2085,7 @@ qed_iwarp_ll2_start(struct qed_hwfn *p_hwfn, u32 mpa_buff_size; u16 n_ooo_bufs; int rc = 0; + int i; iwarp_info = &p_hwfn->p_rdma_info->iwarp; iwarp_info->ll2_syn_handle = QED_IWARP_HANDLE_INVAL; @@ -2094,6 +2194,22 @@ qed_iwarp_ll2_start(struct qed_hwfn *p_hwfn, iwarp_info->ll2_mpa_handle); if (rc) goto err; + /* The mpa_bufs array serves for pending RX packets received on the + * mpa ll2 that don't have place on the tx ring and require later + * processing. We can't fail on allocation of such a struct therefore + * we allocate enough to take care of all rx packets + */ + iwarp_info->mpa_bufs = kcalloc(data.input.rx_num_desc, + sizeof(*iwarp_info->mpa_bufs), + GFP_KERNEL); + if (!iwarp_info->mpa_bufs) + goto err; + + INIT_LIST_HEAD(&iwarp_info->mpa_buf_pending_list); + INIT_LIST_HEAD(&iwarp_info->mpa_buf_list); + for (i = 0; i < data.input.rx_num_desc; i++) + list_add_tail(&iwarp_info->mpa_bufs[i].list_entry, + &iwarp_info->mpa_buf_list); return rc; err: qed_iwarp_ll2_stop(p_hwfn, p_ptt); diff --git a/drivers/net/ethernet/qlogic/qed/qed_iwarp.h b/drivers/net/ethernet/qlogic/qed/qed_iwarp.h index 9d33a1fa1758..2c53fe46345c 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_iwarp.h +++ b/drivers/net/ethernet/qlogic/qed/qed_iwarp.h @@ -60,10 +60,20 @@ struct qed_iwarp_ll2_buff { u32 buff_size; }; +struct qed_iwarp_ll2_mpa_buf { + struct list_head list_entry; + struct qed_iwarp_ll2_buff *ll2_buf; + struct unaligned_opaque_data data; + u16 tcp_payload_len; + u8 placement_offset; +}; + struct qed_iwarp_info { struct list_head listen_list; /* qed_iwarp_listener */ struct list_head ep_list; /* qed_iwarp_ep */ struct list_head ep_free_list; /* pre-allocated ep's */ + struct list_head mpa_buf_list; /* list of mpa_bufs */ + struct list_head mpa_buf_pending_list; spinlock_t iw_lock; /* for iwarp resources */ spinlock_t qp_lock; /* for teardown races */ u32 rcv_wnd_scale; @@ -77,6 +87,7 @@ struct qed_iwarp_info { u8 peer2peer; enum mpa_negotiation_mode mpa_rev; enum mpa_rtr_type rtr_type; + struct qed_iwarp_ll2_mpa_buf *mpa_bufs; }; enum qed_iwarp_ep_state { -- cgit v1.2.3 From 469981b17a4f8ddac91837bd74ebc98578f2ddbf Mon Sep 17 00:00:00 2001 From: Michal Kalderon Date: Mon, 9 Oct 2017 12:37:51 +0300 Subject: qed: Add unaligned and packed packet processing The fpdu data structure is preallocated per connection. Each connection stores the current status of the connection: either nothing pending, or there is a partial fpdu that is waiting for the rest of the fpdu (incomplete bytes != 0). The same structure is also used for splitting a packet when there are packed fpdus. The structure is initialized with all data required for sending the fpdu back to the FW. A fpdu will always be spanned across a maximum of 3 tx bds. One for the header, one for the partial fdpu received and one for the remainder (unaligned) packet. In case of packed fpdu's, two fragments are used, one for the header and one for the data. Corner cases are not handled in the patch for clarity, and will be added as a separate patch. Signed-off-by: Michal Kalderon Signed-off-by: Ariel Elior Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qed/qed_iwarp.c | 257 ++++++++++++++++++++++++++++ drivers/net/ethernet/qlogic/qed/qed_iwarp.h | 13 ++ 2 files changed, 270 insertions(+) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/qlogic/qed/qed_iwarp.c b/drivers/net/ethernet/qlogic/qed/qed_iwarp.c index efd4861c72e2..83b147fdacde 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_iwarp.c +++ b/drivers/net/ethernet/qlogic/qed/qed_iwarp.c @@ -1419,6 +1419,7 @@ void qed_iwarp_resc_free(struct qed_hwfn *p_hwfn) qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->tcp_cid_map, 1); kfree(iwarp_info->mpa_bufs); + kfree(iwarp_info->partial_fpdus); } int qed_iwarp_accept(void *rdma_cxt, struct qed_iwarp_accept_in *iparams) @@ -1716,8 +1717,170 @@ qed_iwarp_parse_rx_pkt(struct qed_hwfn *p_hwfn, return 0; } +static struct qed_iwarp_fpdu *qed_iwarp_get_curr_fpdu(struct qed_hwfn *p_hwfn, + u16 cid) +{ + struct qed_iwarp_info *iwarp_info = &p_hwfn->p_rdma_info->iwarp; + struct qed_iwarp_fpdu *partial_fpdu; + u32 idx; + + idx = cid - qed_cxt_get_proto_cid_start(p_hwfn, PROTOCOLID_IWARP); + if (idx >= iwarp_info->max_num_partial_fpdus) { + DP_ERR(p_hwfn, "Invalid cid %x max_num_partial_fpdus=%x\n", cid, + iwarp_info->max_num_partial_fpdus); + return NULL; + } + + partial_fpdu = &iwarp_info->partial_fpdus[idx]; + + return partial_fpdu; +} + +enum qed_iwarp_mpa_pkt_type { + QED_IWARP_MPA_PKT_PACKED, + QED_IWARP_MPA_PKT_PARTIAL, + QED_IWARP_MPA_PKT_UNALIGNED +}; + +#define QED_IWARP_MPA_FPDU_LENGTH_SIZE (2) +#define QED_IWARP_MPA_CRC32_DIGEST_SIZE (4) + +/* Pad to multiple of 4 */ +#define QED_IWARP_PDU_DATA_LEN_WITH_PAD(data_len) ALIGN(data_len, 4) +#define QED_IWARP_FPDU_LEN_WITH_PAD(_mpa_len) \ + (QED_IWARP_PDU_DATA_LEN_WITH_PAD((_mpa_len) + \ + QED_IWARP_MPA_FPDU_LENGTH_SIZE) + \ + QED_IWARP_MPA_CRC32_DIGEST_SIZE) + /* fpdu can be fragmented over maximum 3 bds: header, partial mpa, unaligned */ #define QED_IWARP_MAX_BDS_PER_FPDU 3 + +char *pkt_type_str[] = { + "QED_IWARP_MPA_PKT_PACKED", + "QED_IWARP_MPA_PKT_PARTIAL", + "QED_IWARP_MPA_PKT_UNALIGNED" +}; + +static enum qed_iwarp_mpa_pkt_type +qed_iwarp_mpa_classify(struct qed_hwfn *p_hwfn, + struct qed_iwarp_fpdu *fpdu, + u16 tcp_payload_len, u8 *mpa_data) +{ + enum qed_iwarp_mpa_pkt_type pkt_type; + u16 mpa_len; + + if (fpdu->incomplete_bytes) { + pkt_type = QED_IWARP_MPA_PKT_UNALIGNED; + goto out; + } + + mpa_len = ntohs(*((u16 *)(mpa_data))); + fpdu->fpdu_length = QED_IWARP_FPDU_LEN_WITH_PAD(mpa_len); + + if (fpdu->fpdu_length <= tcp_payload_len) + pkt_type = QED_IWARP_MPA_PKT_PACKED; + else + pkt_type = QED_IWARP_MPA_PKT_PARTIAL; + +out: + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, + "MPA_ALIGN: %s: fpdu_length=0x%x tcp_payload_len:0x%x\n", + pkt_type_str[pkt_type], fpdu->fpdu_length, tcp_payload_len); + + return pkt_type; +} + +static void +qed_iwarp_init_fpdu(struct qed_iwarp_ll2_buff *buf, + struct qed_iwarp_fpdu *fpdu, + struct unaligned_opaque_data *pkt_data, + u16 tcp_payload_size, u8 placement_offset) +{ + fpdu->mpa_buf = buf; + fpdu->pkt_hdr = buf->data_phys_addr + placement_offset; + fpdu->pkt_hdr_size = pkt_data->tcp_payload_offset; + fpdu->mpa_frag = buf->data_phys_addr + pkt_data->first_mpa_offset; + fpdu->mpa_frag_virt = (u8 *)(buf->data) + pkt_data->first_mpa_offset; + + if (tcp_payload_size < fpdu->fpdu_length) + fpdu->incomplete_bytes = fpdu->fpdu_length - tcp_payload_size; + else + fpdu->incomplete_bytes = 0; /* complete fpdu */ + + fpdu->mpa_frag_len = fpdu->fpdu_length - fpdu->incomplete_bytes; +} + +static int +qed_iwarp_send_fpdu(struct qed_hwfn *p_hwfn, + struct qed_iwarp_fpdu *fpdu, + struct unaligned_opaque_data *curr_pkt, + struct qed_iwarp_ll2_buff *buf, + u16 tcp_payload_size, enum qed_iwarp_mpa_pkt_type pkt_type) +{ + struct qed_ll2_tx_pkt_info tx_pkt; + u8 ll2_handle; + int rc; + + memset(&tx_pkt, 0, sizeof(tx_pkt)); + + /* An unaligned packet means it's split over two tcp segments. So the + * complete packet requires 3 bds, one for the header, one for the + * part of the fpdu of the first tcp segment, and the last fragment + * will point to the remainder of the fpdu. A packed pdu, requires only + * two bds, one for the header and one for the data. + */ + tx_pkt.num_of_bds = (pkt_type == QED_IWARP_MPA_PKT_UNALIGNED) ? 3 : 2; + tx_pkt.tx_dest = QED_LL2_TX_DEST_LB; + tx_pkt.l4_hdr_offset_w = fpdu->pkt_hdr_size >> 2; /* offset in words */ + + /* Send the mpa_buf only with the last fpdu (in case of packed) */ + if (pkt_type == QED_IWARP_MPA_PKT_UNALIGNED || + tcp_payload_size <= fpdu->fpdu_length) + tx_pkt.cookie = fpdu->mpa_buf; + + tx_pkt.first_frag = fpdu->pkt_hdr; + tx_pkt.first_frag_len = fpdu->pkt_hdr_size; + tx_pkt.enable_ip_cksum = true; + tx_pkt.enable_l4_cksum = true; + tx_pkt.calc_ip_len = true; + /* vlan overload with enum iwarp_ll2_tx_queues */ + tx_pkt.vlan = IWARP_LL2_ALIGNED_TX_QUEUE; + + ll2_handle = p_hwfn->p_rdma_info->iwarp.ll2_mpa_handle; + + /* Set first fragment to header */ + rc = qed_ll2_prepare_tx_packet(p_hwfn, ll2_handle, &tx_pkt, true); + if (rc) + goto out; + + /* Set second fragment to first part of packet */ + rc = qed_ll2_set_fragment_of_tx_packet(p_hwfn, ll2_handle, + fpdu->mpa_frag, + fpdu->mpa_frag_len); + if (rc) + goto out; + + if (!fpdu->incomplete_bytes) + goto out; + + /* Set third fragment to second part of the packet */ + rc = qed_ll2_set_fragment_of_tx_packet(p_hwfn, + ll2_handle, + buf->data_phys_addr + + curr_pkt->first_mpa_offset, + fpdu->incomplete_bytes); +out: + DP_VERBOSE(p_hwfn, + QED_MSG_RDMA, + "MPA_ALIGN: Sent FPDU num_bds=%d first_frag_len=%x, mpa_frag_len=0x%x, incomplete_bytes:0x%x rc=%d\n", + tx_pkt.num_of_bds, + tx_pkt.first_frag_len, + fpdu->mpa_frag_len, + fpdu->incomplete_bytes, rc); + + return rc; +} + static void qed_iwarp_mpa_get_data(struct qed_hwfn *p_hwfn, struct unaligned_opaque_data *curr_pkt, @@ -1741,9 +1904,79 @@ static int qed_iwarp_process_mpa_pkt(struct qed_hwfn *p_hwfn, struct qed_iwarp_ll2_mpa_buf *mpa_buf) { + struct unaligned_opaque_data *curr_pkt = &mpa_buf->data; struct qed_iwarp_ll2_buff *buf = mpa_buf->ll2_buf; + enum qed_iwarp_mpa_pkt_type pkt_type; + struct qed_iwarp_fpdu *fpdu; int rc = -EINVAL; + u8 *mpa_data; + + fpdu = qed_iwarp_get_curr_fpdu(p_hwfn, curr_pkt->cid & 0xffff); + if (!fpdu) { /* something corrupt with cid, post rx back */ + DP_ERR(p_hwfn, "Invalid cid, drop and post back to rx cid=%x\n", + curr_pkt->cid); + goto err; + } + do { + mpa_data = ((u8 *)(buf->data) + curr_pkt->first_mpa_offset); + + pkt_type = qed_iwarp_mpa_classify(p_hwfn, fpdu, + mpa_buf->tcp_payload_len, + mpa_data); + + switch (pkt_type) { + case QED_IWARP_MPA_PKT_PARTIAL: + qed_iwarp_init_fpdu(buf, fpdu, + curr_pkt, + mpa_buf->tcp_payload_len, + mpa_buf->placement_offset); + + mpa_buf->tcp_payload_len = 0; + break; + case QED_IWARP_MPA_PKT_PACKED: + qed_iwarp_init_fpdu(buf, fpdu, + curr_pkt, + mpa_buf->tcp_payload_len, + mpa_buf->placement_offset); + + rc = qed_iwarp_send_fpdu(p_hwfn, fpdu, curr_pkt, buf, + mpa_buf->tcp_payload_len, + pkt_type); + if (rc) { + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, + "Can't send FPDU:reset rc=%d\n", rc); + memset(fpdu, 0, sizeof(*fpdu)); + break; + } + + mpa_buf->tcp_payload_len -= fpdu->fpdu_length; + curr_pkt->first_mpa_offset += fpdu->fpdu_length; + break; + case QED_IWARP_MPA_PKT_UNALIGNED: + rc = qed_iwarp_send_fpdu(p_hwfn, fpdu, curr_pkt, buf, + mpa_buf->tcp_payload_len, + pkt_type); + if (rc) { + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, + "Can't send FPDU:delay rc=%d\n", rc); + /* don't reset fpdu -> we need it for next + * classify + */ + break; + } + + mpa_buf->tcp_payload_len -= fpdu->incomplete_bytes; + curr_pkt->first_mpa_offset += fpdu->incomplete_bytes; + /* The framed PDU was sent - no more incomplete bytes */ + fpdu->incomplete_bytes = 0; + break; + } + } while (mpa_buf->tcp_payload_len && !rc); + + return rc; + +err: qed_iwarp_ll2_post_rx(p_hwfn, buf, p_hwfn->p_rdma_info->iwarp.ll2_mpa_handle); @@ -1989,11 +2222,27 @@ static void qed_iwarp_ll2_rel_tx_pkt(void *cxt, u8 connection_handle, kfree(buffer); } +/* The only slowpath for iwarp ll2 is unalign flush. When this completion + * is received, need to reset the FPDU. + */ void qed_iwarp_ll2_slowpath(void *cxt, u8 connection_handle, u32 opaque_data_0, u32 opaque_data_1) { + struct unaligned_opaque_data unalign_data; + struct qed_hwfn *p_hwfn = cxt; + struct qed_iwarp_fpdu *fpdu; + + qed_iwarp_mpa_get_data(p_hwfn, &unalign_data, + opaque_data_0, opaque_data_1); + + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "(0x%x) Flush fpdu\n", + unalign_data.cid); + + fpdu = qed_iwarp_get_curr_fpdu(p_hwfn, (u16)unalign_data.cid); + if (fpdu) + memset(fpdu, 0, sizeof(*fpdu)); } static int qed_iwarp_ll2_stop(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) @@ -2194,6 +2443,14 @@ qed_iwarp_ll2_start(struct qed_hwfn *p_hwfn, iwarp_info->ll2_mpa_handle); if (rc) goto err; + + iwarp_info->partial_fpdus = kcalloc((u16)p_hwfn->p_rdma_info->num_qps, + sizeof(*iwarp_info->partial_fpdus), + GFP_KERNEL); + if (!iwarp_info->partial_fpdus) + goto err; + + iwarp_info->max_num_partial_fpdus = (u16)p_hwfn->p_rdma_info->num_qps; /* The mpa_bufs array serves for pending RX packets received on the * mpa ll2 that don't have place on the tx ring and require later * processing. We can't fail on allocation of such a struct therefore diff --git a/drivers/net/ethernet/qlogic/qed/qed_iwarp.h b/drivers/net/ethernet/qlogic/qed/qed_iwarp.h index 2c53fe46345c..858755cafd2b 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_iwarp.h +++ b/drivers/net/ethernet/qlogic/qed/qed_iwarp.h @@ -68,6 +68,17 @@ struct qed_iwarp_ll2_mpa_buf { u8 placement_offset; }; +struct qed_iwarp_fpdu { + struct qed_iwarp_ll2_buff *mpa_buf; + void *mpa_frag_virt; + dma_addr_t mpa_frag; + dma_addr_t pkt_hdr; + u16 mpa_frag_len; + u16 fpdu_length; + u16 incomplete_bytes; + u8 pkt_hdr_size; +}; + struct qed_iwarp_info { struct list_head listen_list; /* qed_iwarp_listener */ struct list_head ep_list; /* qed_iwarp_ep */ @@ -87,7 +98,9 @@ struct qed_iwarp_info { u8 peer2peer; enum mpa_negotiation_mode mpa_rev; enum mpa_rtr_type rtr_type; + struct qed_iwarp_fpdu *partial_fpdus; struct qed_iwarp_ll2_mpa_buf *mpa_bufs; + u16 max_num_partial_fpdus; }; enum qed_iwarp_ep_state { -- cgit v1.2.3 From d531038eeb6dd25dbf88402f932bf0ea524de82e Mon Sep 17 00:00:00 2001 From: Michal Kalderon Date: Mon, 9 Oct 2017 12:37:52 +0300 Subject: qed: Add support for freeing two ll2 buffers for corner cases When posting a packet on the ll2 tx, we can provide a cookie that will be returned upon tx completion. This cookie is the ll2 iwarp buffer which is then reposted to the rx ring. Part of the unaligned mpa flow is determining when a buffer can be reposted. Each buffer needs to be sent only once as a cookie for on the tx ring. In packed fpdu case, only the last packet will be sent with the buffer, meaning we need to handle the case that a cookie can be NULL on tx complete. In addition, when a fpdu splits over two buffers, but there are no more fpdus on the second buffer, two buffers need to be provided as a cookie. To avoid changing the ll2 interface to provide two cookies, we introduce a piggy buf pointer, relevant for iWARP only, that holds a pointer to a second buffer that needs to be released during tx completion. Signed-off-by: Michal Kalderon Signed-off-by: Ariel Elior Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qed/qed_iwarp.c | 25 +++++++++++++++++++++++++ drivers/net/ethernet/qlogic/qed/qed_iwarp.h | 1 + 2 files changed, 26 insertions(+) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/qlogic/qed/qed_iwarp.c b/drivers/net/ethernet/qlogic/qed/qed_iwarp.c index 83b147fdacde..8b17369af9ef 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_iwarp.c +++ b/drivers/net/ethernet/qlogic/qed/qed_iwarp.c @@ -1846,6 +1846,12 @@ qed_iwarp_send_fpdu(struct qed_hwfn *p_hwfn, /* vlan overload with enum iwarp_ll2_tx_queues */ tx_pkt.vlan = IWARP_LL2_ALIGNED_TX_QUEUE; + /* special case of unaligned packet and not packed, need to send + * both buffers as cookie to release. + */ + if (tcp_payload_size == fpdu->incomplete_bytes) + fpdu->mpa_buf->piggy_buf = buf; + ll2_handle = p_hwfn->p_rdma_info->iwarp.ll2_mpa_handle; /* Set first fragment to header */ @@ -2195,9 +2201,19 @@ static void qed_iwarp_ll2_comp_tx_pkt(void *cxt, u8 connection_handle, bool b_last_fragment, bool b_last_packet) { struct qed_iwarp_ll2_buff *buffer = cookie; + struct qed_iwarp_ll2_buff *piggy; struct qed_hwfn *p_hwfn = cxt; + if (!buffer) /* can happen in packed mpa unaligned... */ + return; + /* this was originally an rx packet, post it back */ + piggy = buffer->piggy_buf; + if (piggy) { + buffer->piggy_buf = NULL; + qed_iwarp_ll2_post_rx(p_hwfn, piggy, connection_handle); + } + qed_iwarp_ll2_post_rx(p_hwfn, buffer, connection_handle); if (connection_handle == p_hwfn->p_rdma_info->iwarp.ll2_mpa_handle) @@ -2216,6 +2232,15 @@ static void qed_iwarp_ll2_rel_tx_pkt(void *cxt, u8 connection_handle, if (!buffer) return; + if (buffer->piggy_buf) { + dma_free_coherent(&p_hwfn->cdev->pdev->dev, + buffer->piggy_buf->buff_size, + buffer->piggy_buf->data, + buffer->piggy_buf->data_phys_addr); + + kfree(buffer->piggy_buf); + } + dma_free_coherent(&p_hwfn->cdev->pdev->dev, buffer->buff_size, buffer->data, buffer->data_phys_addr); diff --git a/drivers/net/ethernet/qlogic/qed/qed_iwarp.h b/drivers/net/ethernet/qlogic/qed/qed_iwarp.h index 858755cafd2b..58db51af26bd 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_iwarp.h +++ b/drivers/net/ethernet/qlogic/qed/qed_iwarp.h @@ -55,6 +55,7 @@ enum qed_iwarp_qp_state qed_roce2iwarp_state(enum qed_roce_qp_state state); #define QED_IWARP_HANDLE_INVAL (0xff) struct qed_iwarp_ll2_buff { + struct qed_iwarp_ll2_buff *piggy_buf; void *data; dma_addr_t data_phys_addr; u32 buff_size; -- cgit v1.2.3 From c7d1d839999476aac0d7e16732722285a9c30cce Mon Sep 17 00:00:00 2001 From: Michal Kalderon Date: Mon, 9 Oct 2017 12:37:53 +0300 Subject: qed: Add support for MPA header being split over two tcp packets There is a special case where an MPA header is split over to tcp packets, in this case we need to wait for the next packet to get the fpdu length. We use the incomplete_bytes to mark this fpdu as a "special" one which requires updating the length with the next packet Signed-off-by: Michal Kalderon Signed-off-by: Ariel Elior Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qed/qed_iwarp.c | 36 ++++++++++++++++++++++++++++- drivers/net/ethernet/qlogic/qed/qed_iwarp.h | 6 +++++ 2 files changed, 41 insertions(+), 1 deletion(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/qlogic/qed/qed_iwarp.c b/drivers/net/ethernet/qlogic/qed/qed_iwarp.c index 8b17369af9ef..299494225f44 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_iwarp.c +++ b/drivers/net/ethernet/qlogic/qed/qed_iwarp.c @@ -1742,6 +1742,7 @@ enum qed_iwarp_mpa_pkt_type { QED_IWARP_MPA_PKT_UNALIGNED }; +#define QED_IWARP_INVALID_FPDU_LENGTH 0xffff #define QED_IWARP_MPA_FPDU_LENGTH_SIZE (2) #define QED_IWARP_MPA_CRC32_DIGEST_SIZE (4) @@ -1774,6 +1775,15 @@ qed_iwarp_mpa_classify(struct qed_hwfn *p_hwfn, goto out; } + /* special case of one byte remaining... + * lower byte will be read next packet + */ + if (tcp_payload_len == 1) { + fpdu->fpdu_length = *mpa_data << BITS_PER_BYTE; + pkt_type = QED_IWARP_MPA_PKT_PARTIAL; + goto out; + } + mpa_len = ntohs(*((u16 *)(mpa_data))); fpdu->fpdu_length = QED_IWARP_FPDU_LEN_WITH_PAD(mpa_len); @@ -1802,7 +1812,9 @@ qed_iwarp_init_fpdu(struct qed_iwarp_ll2_buff *buf, fpdu->mpa_frag = buf->data_phys_addr + pkt_data->first_mpa_offset; fpdu->mpa_frag_virt = (u8 *)(buf->data) + pkt_data->first_mpa_offset; - if (tcp_payload_size < fpdu->fpdu_length) + if (tcp_payload_size == 1) + fpdu->incomplete_bytes = QED_IWARP_INVALID_FPDU_LENGTH; + else if (tcp_payload_size < fpdu->fpdu_length) fpdu->incomplete_bytes = fpdu->fpdu_length - tcp_payload_size; else fpdu->incomplete_bytes = 0; /* complete fpdu */ @@ -1810,6 +1822,27 @@ qed_iwarp_init_fpdu(struct qed_iwarp_ll2_buff *buf, fpdu->mpa_frag_len = fpdu->fpdu_length - fpdu->incomplete_bytes; } +static void +qed_iwarp_update_fpdu_length(struct qed_hwfn *p_hwfn, + struct qed_iwarp_fpdu *fpdu, u8 *mpa_data) +{ + u16 mpa_len; + + /* Update incomplete packets if needed */ + if (fpdu->incomplete_bytes == QED_IWARP_INVALID_FPDU_LENGTH) { + /* Missing lower byte is now available */ + mpa_len = fpdu->fpdu_length | *mpa_data; + fpdu->fpdu_length = QED_IWARP_FPDU_LEN_WITH_PAD(mpa_len); + fpdu->mpa_frag_len = fpdu->fpdu_length; + /* one byte of hdr */ + fpdu->incomplete_bytes = fpdu->fpdu_length - 1; + DP_VERBOSE(p_hwfn, + QED_MSG_RDMA, + "MPA_ALIGN: Partial header mpa_len=%x fpdu_length=%x incomplete_bytes=%x\n", + mpa_len, fpdu->fpdu_length, fpdu->incomplete_bytes); + } +} + static int qed_iwarp_send_fpdu(struct qed_hwfn *p_hwfn, struct qed_iwarp_fpdu *fpdu, @@ -1960,6 +1993,7 @@ qed_iwarp_process_mpa_pkt(struct qed_hwfn *p_hwfn, curr_pkt->first_mpa_offset += fpdu->fpdu_length; break; case QED_IWARP_MPA_PKT_UNALIGNED: + qed_iwarp_update_fpdu_length(p_hwfn, fpdu, mpa_data); rc = qed_iwarp_send_fpdu(p_hwfn, fpdu, curr_pkt, buf, mpa_buf->tcp_payload_len, pkt_type); diff --git a/drivers/net/ethernet/qlogic/qed/qed_iwarp.h b/drivers/net/ethernet/qlogic/qed/qed_iwarp.h index 58db51af26bd..c58793a47774 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_iwarp.h +++ b/drivers/net/ethernet/qlogic/qed/qed_iwarp.h @@ -69,6 +69,12 @@ struct qed_iwarp_ll2_mpa_buf { u8 placement_offset; }; +/* In some cases a fpdu will arrive with only one byte of the header, in this + * case the fpdu_length will be partial (contain only higher byte and + * incomplete bytes will contain the invalid value + */ +#define QED_IWARP_INVALID_INCOMPLETE_BYTES 0xffff + struct qed_iwarp_fpdu { struct qed_iwarp_ll2_buff *mpa_buf; void *mpa_frag_virt; -- cgit v1.2.3 From 1e28eaad07ea1e2d6537586529e87cbc1d698ffd Mon Sep 17 00:00:00 2001 From: Michal Kalderon Date: Mon, 9 Oct 2017 12:37:54 +0300 Subject: qed: Add iWARP support for fpdu spanned over more than two tcp packets We continue to maintain a maximum of three buffers per fpdu, to ensure that there are enough buffers for additional unaligned mpa packets. To support this, if a fpdu is split over more than two tcp packets, we use an intermediate buffer to copy the data to the previous buffer, then we can release the data. We need an intermediate buffer as the initial buffer partial packet could be located at the end of the packet, not leaving room for additional data. This is a corner case, and will usually not be the case. Signed-off-by: Michal Kalderon Signed-off-by: Ariel Elior Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qed/qed_iwarp.c | 193 ++++++++++++++++++++++++++++ drivers/net/ethernet/qlogic/qed/qed_iwarp.h | 1 + 2 files changed, 194 insertions(+) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/qlogic/qed/qed_iwarp.c b/drivers/net/ethernet/qlogic/qed/qed_iwarp.c index 299494225f44..b2b1f87864ef 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_iwarp.c +++ b/drivers/net/ethernet/qlogic/qed/qed_iwarp.c @@ -1420,6 +1420,7 @@ void qed_iwarp_resc_free(struct qed_hwfn *p_hwfn) qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->tcp_cid_map, 1); kfree(iwarp_info->mpa_bufs); kfree(iwarp_info->partial_fpdus); + kfree(iwarp_info->mpa_intermediate_buf); } int qed_iwarp_accept(void *rdma_cxt, struct qed_iwarp_accept_in *iparams) @@ -1762,6 +1763,11 @@ char *pkt_type_str[] = { "QED_IWARP_MPA_PKT_UNALIGNED" }; +static int +qed_iwarp_recycle_pkt(struct qed_hwfn *p_hwfn, + struct qed_iwarp_fpdu *fpdu, + struct qed_iwarp_ll2_buff *buf); + static enum qed_iwarp_mpa_pkt_type qed_iwarp_mpa_classify(struct qed_hwfn *p_hwfn, struct qed_iwarp_fpdu *fpdu, @@ -1822,6 +1828,68 @@ qed_iwarp_init_fpdu(struct qed_iwarp_ll2_buff *buf, fpdu->mpa_frag_len = fpdu->fpdu_length - fpdu->incomplete_bytes; } +static int +qed_iwarp_cp_pkt(struct qed_hwfn *p_hwfn, + struct qed_iwarp_fpdu *fpdu, + struct unaligned_opaque_data *pkt_data, + struct qed_iwarp_ll2_buff *buf, u16 tcp_payload_size) +{ + u8 *tmp_buf = p_hwfn->p_rdma_info->iwarp.mpa_intermediate_buf; + int rc; + + /* need to copy the data from the partial packet stored in fpdu + * to the new buf, for this we also need to move the data currently + * placed on the buf. The assumption is that the buffer is big enough + * since fpdu_length <= mss, we use an intermediate buffer since + * we may need to copy the new data to an overlapping location + */ + if ((fpdu->mpa_frag_len + tcp_payload_size) > (u16)buf->buff_size) { + DP_ERR(p_hwfn, + "MPA ALIGN: Unexpected: buffer is not large enough for split fpdu buff_size = %d mpa_frag_len = %d, tcp_payload_size = %d, incomplete_bytes = %d\n", + buf->buff_size, fpdu->mpa_frag_len, + tcp_payload_size, fpdu->incomplete_bytes); + return -EINVAL; + } + + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, + "MPA ALIGN Copying fpdu: [%p, %d] [%p, %d]\n", + fpdu->mpa_frag_virt, fpdu->mpa_frag_len, + (u8 *)(buf->data) + pkt_data->first_mpa_offset, + tcp_payload_size); + + memcpy(tmp_buf, fpdu->mpa_frag_virt, fpdu->mpa_frag_len); + memcpy(tmp_buf + fpdu->mpa_frag_len, + (u8 *)(buf->data) + pkt_data->first_mpa_offset, + tcp_payload_size); + + rc = qed_iwarp_recycle_pkt(p_hwfn, fpdu, fpdu->mpa_buf); + if (rc) + return rc; + + /* If we managed to post the buffer copy the data to the new buffer + * o/w this will occur in the next round... + */ + memcpy((u8 *)(buf->data), tmp_buf, + fpdu->mpa_frag_len + tcp_payload_size); + + fpdu->mpa_buf = buf; + /* fpdu->pkt_hdr remains as is */ + /* fpdu->mpa_frag is overridden with new buf */ + fpdu->mpa_frag = buf->data_phys_addr; + fpdu->mpa_frag_virt = buf->data; + fpdu->mpa_frag_len += tcp_payload_size; + + fpdu->incomplete_bytes -= tcp_payload_size; + + DP_VERBOSE(p_hwfn, + QED_MSG_RDMA, + "MPA ALIGN: split fpdu buff_size = %d mpa_frag_len = %d, tcp_payload_size = %d, incomplete_bytes = %d\n", + buf->buff_size, fpdu->mpa_frag_len, tcp_payload_size, + fpdu->incomplete_bytes); + + return 0; +} + static void qed_iwarp_update_fpdu_length(struct qed_hwfn *p_hwfn, struct qed_iwarp_fpdu *fpdu, u8 *mpa_data) @@ -1843,6 +1911,90 @@ qed_iwarp_update_fpdu_length(struct qed_hwfn *p_hwfn, } } +#define QED_IWARP_IS_RIGHT_EDGE(_curr_pkt) \ + (GET_FIELD((_curr_pkt)->flags, \ + UNALIGNED_OPAQUE_DATA_PKT_REACHED_WIN_RIGHT_EDGE)) + +/* This function is used to recycle a buffer using the ll2 drop option. It + * uses the mechanism to ensure that all buffers posted to tx before this one + * were completed. The buffer sent here will be sent as a cookie in the tx + * completion function and can then be reposted to rx chain when done. The flow + * that requires this is the flow where a FPDU splits over more than 3 tcp + * segments. In this case the driver needs to re-post a rx buffer instead of + * the one received, but driver can't simply repost a buffer it copied from + * as there is a case where the buffer was originally a packed FPDU, and is + * partially posted to FW. Driver needs to ensure FW is done with it. + */ +static int +qed_iwarp_recycle_pkt(struct qed_hwfn *p_hwfn, + struct qed_iwarp_fpdu *fpdu, + struct qed_iwarp_ll2_buff *buf) +{ + struct qed_ll2_tx_pkt_info tx_pkt; + u8 ll2_handle; + int rc; + + memset(&tx_pkt, 0, sizeof(tx_pkt)); + tx_pkt.num_of_bds = 1; + tx_pkt.tx_dest = QED_LL2_TX_DEST_DROP; + tx_pkt.l4_hdr_offset_w = fpdu->pkt_hdr_size >> 2; + tx_pkt.first_frag = fpdu->pkt_hdr; + tx_pkt.first_frag_len = fpdu->pkt_hdr_size; + buf->piggy_buf = NULL; + tx_pkt.cookie = buf; + + ll2_handle = p_hwfn->p_rdma_info->iwarp.ll2_mpa_handle; + + rc = qed_ll2_prepare_tx_packet(p_hwfn, ll2_handle, &tx_pkt, true); + if (rc) + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, + "Can't drop packet rc=%d\n", rc); + + DP_VERBOSE(p_hwfn, + QED_MSG_RDMA, + "MPA_ALIGN: send drop tx packet [%lx, 0x%x], buf=%p, rc=%d\n", + (unsigned long int)tx_pkt.first_frag, + tx_pkt.first_frag_len, buf, rc); + + return rc; +} + +static int +qed_iwarp_win_right_edge(struct qed_hwfn *p_hwfn, struct qed_iwarp_fpdu *fpdu) +{ + struct qed_ll2_tx_pkt_info tx_pkt; + u8 ll2_handle; + int rc; + + memset(&tx_pkt, 0, sizeof(tx_pkt)); + tx_pkt.num_of_bds = 1; + tx_pkt.tx_dest = QED_LL2_TX_DEST_LB; + tx_pkt.l4_hdr_offset_w = fpdu->pkt_hdr_size >> 2; + + tx_pkt.first_frag = fpdu->pkt_hdr; + tx_pkt.first_frag_len = fpdu->pkt_hdr_size; + tx_pkt.enable_ip_cksum = true; + tx_pkt.enable_l4_cksum = true; + tx_pkt.calc_ip_len = true; + /* vlan overload with enum iwarp_ll2_tx_queues */ + tx_pkt.vlan = IWARP_LL2_ALIGNED_RIGHT_TRIMMED_TX_QUEUE; + + ll2_handle = p_hwfn->p_rdma_info->iwarp.ll2_mpa_handle; + + rc = qed_ll2_prepare_tx_packet(p_hwfn, ll2_handle, &tx_pkt, true); + if (rc) + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, + "Can't send right edge rc=%d\n", rc); + DP_VERBOSE(p_hwfn, + QED_MSG_RDMA, + "MPA_ALIGN: Sent right edge FPDU num_bds=%d [%lx, 0x%x], rc=%d\n", + tx_pkt.num_of_bds, + (unsigned long int)tx_pkt.first_frag, + tx_pkt.first_frag_len, rc); + + return rc; +} + static int qed_iwarp_send_fpdu(struct qed_hwfn *p_hwfn, struct qed_iwarp_fpdu *fpdu, @@ -1971,6 +2123,20 @@ qed_iwarp_process_mpa_pkt(struct qed_hwfn *p_hwfn, mpa_buf->tcp_payload_len, mpa_buf->placement_offset); + if (!QED_IWARP_IS_RIGHT_EDGE(curr_pkt)) { + mpa_buf->tcp_payload_len = 0; + break; + } + + rc = qed_iwarp_win_right_edge(p_hwfn, fpdu); + + if (rc) { + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, + "Can't send FPDU:reset rc=%d\n", rc); + memset(fpdu, 0, sizeof(*fpdu)); + break; + } + mpa_buf->tcp_payload_len = 0; break; case QED_IWARP_MPA_PKT_PACKED: @@ -1994,6 +2160,28 @@ qed_iwarp_process_mpa_pkt(struct qed_hwfn *p_hwfn, break; case QED_IWARP_MPA_PKT_UNALIGNED: qed_iwarp_update_fpdu_length(p_hwfn, fpdu, mpa_data); + if (mpa_buf->tcp_payload_len < fpdu->incomplete_bytes) { + /* special handling of fpdu split over more + * than 2 segments + */ + if (QED_IWARP_IS_RIGHT_EDGE(curr_pkt)) { + rc = qed_iwarp_win_right_edge(p_hwfn, + fpdu); + /* packet will be re-processed later */ + if (rc) + return rc; + } + + rc = qed_iwarp_cp_pkt(p_hwfn, fpdu, curr_pkt, + buf, + mpa_buf->tcp_payload_len); + if (rc) /* packet will be re-processed later */ + return rc; + + mpa_buf->tcp_payload_len = 0; + break; + } + rc = qed_iwarp_send_fpdu(p_hwfn, fpdu, curr_pkt, buf, mpa_buf->tcp_payload_len, pkt_type); @@ -2510,6 +2698,11 @@ qed_iwarp_ll2_start(struct qed_hwfn *p_hwfn, goto err; iwarp_info->max_num_partial_fpdus = (u16)p_hwfn->p_rdma_info->num_qps; + + iwarp_info->mpa_intermediate_buf = kzalloc(mpa_buff_size, GFP_KERNEL); + if (!iwarp_info->mpa_intermediate_buf) + goto err; + /* The mpa_bufs array serves for pending RX packets received on the * mpa ll2 that don't have place on the tx ring and require later * processing. We can't fail on allocation of such a struct therefore diff --git a/drivers/net/ethernet/qlogic/qed/qed_iwarp.h b/drivers/net/ethernet/qlogic/qed/qed_iwarp.h index c58793a47774..c1ecd743305f 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_iwarp.h +++ b/drivers/net/ethernet/qlogic/qed/qed_iwarp.h @@ -107,6 +107,7 @@ struct qed_iwarp_info { enum mpa_rtr_type rtr_type; struct qed_iwarp_fpdu *partial_fpdus; struct qed_iwarp_ll2_mpa_buf *mpa_bufs; + u8 *mpa_intermediate_buf; u16 max_num_partial_fpdus; }; -- cgit v1.2.3 From bb428a5c4df5f50acdce89449e476faa0b295e95 Mon Sep 17 00:00:00 2001 From: Tariq Toukan Date: Mon, 9 Oct 2017 16:59:48 +0300 Subject: net/mlx4: Fix endianness issue in qp context params Should take care of the endianness before assigning to params2 field. Fixes: 53f33ae295a5 ("net/mlx4_core: Port aggregation upper layer interface") Signed-off-by: Tariq Toukan Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx4/en_resources.c | 2 +- drivers/net/ethernet/mellanox/mlx4/qp.c | 2 +- drivers/net/ethernet/mellanox/mlx4/resource_tracker.c | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/mellanox/mlx4/en_resources.c b/drivers/net/ethernet/mellanox/mlx4/en_resources.c index 5a47f9669621..6883ac75d37f 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_resources.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_resources.c @@ -53,7 +53,7 @@ void mlx4_en_fill_qp_context(struct mlx4_en_priv *priv, int size, int stride, if (is_tx) { context->sq_size_stride = ilog2(size) << 3 | (ilog2(stride) - 4); if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_PORT_REMAP) - context->params2 |= MLX4_QP_BIT_FPP; + context->params2 |= cpu_to_be32(MLX4_QP_BIT_FPP); } else { context->sq_size_stride = ilog2(TXBB_SIZE) - 4; diff --git a/drivers/net/ethernet/mellanox/mlx4/qp.c b/drivers/net/ethernet/mellanox/mlx4/qp.c index 728a2fb1f5c0..203320923340 100644 --- a/drivers/net/ethernet/mellanox/mlx4/qp.c +++ b/drivers/net/ethernet/mellanox/mlx4/qp.c @@ -925,7 +925,7 @@ int mlx4_qp_to_ready(struct mlx4_dev *dev, struct mlx4_mtt *mtt, context->flags &= cpu_to_be32(~(0xf << 28)); context->flags |= cpu_to_be32(states[i + 1] << 28); if (states[i + 1] != MLX4_QP_STATE_RTR) - context->params2 &= ~MLX4_QP_BIT_FPP; + context->params2 &= ~cpu_to_be32(MLX4_QP_BIT_FPP); err = mlx4_qp_modify(dev, mtt, states[i], states[i + 1], context, 0, 0, qp); if (err) { diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c index fabb53379727..04304dd894c6 100644 --- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c +++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c @@ -3185,7 +3185,7 @@ static int verify_qp_parameters(struct mlx4_dev *dev, optpar = be32_to_cpu(*(__be32 *) inbox->buf); if (slave != mlx4_master_func_num(dev)) { - qp_ctx->params2 &= ~MLX4_QP_BIT_FPP; + qp_ctx->params2 &= ~cpu_to_be32(MLX4_QP_BIT_FPP); /* setting QP rate-limit is disallowed for VFs */ if (qp_ctx->rate_limit_params) return -EPERM; -- cgit v1.2.3 From b71322d9db924e1ae33b9fd00a2e37aee09df81f Mon Sep 17 00:00:00 2001 From: Tariq Toukan Date: Mon, 9 Oct 2017 16:59:49 +0300 Subject: net/mlx4_core: Fix cast warning in fw.c Fix the following SPARSE warning, in MLX4_GET() macro: drivers/net/ethernet/mellanox/mlx4/fw.c:233:9: warning: cast to restricted __be64 Fixes: 17d5ceb6e43e ("net/mlx4_core: Fix unaligned accesses") Signed-off-by: Tariq Toukan Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx4/fw.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c index 16c09949afd5..634f603f941c 100644 --- a/drivers/net/ethernet/mellanox/mlx4/fw.c +++ b/drivers/net/ethernet/mellanox/mlx4/fw.c @@ -57,12 +57,12 @@ MODULE_PARM_DESC(enable_qos, "Enable Enhanced QoS support (default: off)"); #define MLX4_GET(dest, source, offset) \ do { \ void *__p = (char *) (source) + (offset); \ - u64 val; \ - switch (sizeof(dest)) { \ + __be64 val; \ + switch (sizeof(dest)) { \ case 1: (dest) = *(u8 *) __p; break; \ case 2: (dest) = be16_to_cpup(__p); break; \ case 4: (dest) = be32_to_cpup(__p); break; \ - case 8: val = get_unaligned((u64 *)__p); \ + case 8: val = get_unaligned((__be64 *)__p); \ (dest) = be64_to_cpu(val); break; \ default: __buggy_use_of_MLX4_GET(); \ } \ -- cgit v1.2.3 From 7ba5e7bd64a5715f624d39815f567842178afb72 Mon Sep 17 00:00:00 2001 From: Tariq Toukan Date: Mon, 9 Oct 2017 16:59:50 +0300 Subject: net/mlx4_en: Use __force to fix a sparse warning in TX datapath In TX data-path, we intentionally do not byte-swap, as documented in code and in the cited commit log. This fixes sparse warning: en_tx.c:720:23: warning: incorrect type in argument 1 (different base types) en_tx.c:720:23: expected unsigned int [unsigned] [usertype] en_tx.c:720:23: got restricted __be32 [usertype] doorbell_qpn Fixes: 492f5add4be8 ("net/mlx4_en: Doorbell is byteswapped in Little Endian archs") Signed-off-by: Tariq Toukan Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx4/en_tx.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c index 8a32a8f7f9c0..2cc82dc07397 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c @@ -718,7 +718,7 @@ void mlx4_en_xmit_doorbell(struct mlx4_en_tx_ring *ring) #else iowrite32be( #endif - ring->doorbell_qpn, + (__force u32)ring->doorbell_qpn, ring->bf.uar->map + MLX4_SEND_DOORBELL); } -- cgit v1.2.3 From b48be9978e4b21b28b7349f57574dae21378ddd5 Mon Sep 17 00:00:00 2001 From: Jacob Keller Date: Thu, 7 Sep 2017 15:19:12 -0700 Subject: i40e: fix flags declaration Since we don't yet have more than 32 flags, we'll use a u32 for both the hw_features and flag field. Should we gain more flags in the future, we may need to convert to a u64 or separate flags out into two fields. This was overlooked in the previous commit 2781de2134c4 ("i40e/i40evf: organize and re-number feature flags"), where the feature flag was not converted form u64 to u32. Signed-off-by: Jacob Keller Reviewed-by: Mitch Williams Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h index 18c453a3e728..7baf6d8a84dd 100644 --- a/drivers/net/ethernet/intel/i40e/i40e.h +++ b/drivers/net/ethernet/intel/i40e/i40e.h @@ -424,7 +424,7 @@ struct i40e_pf { #define I40E_HW_PORT_ID_VALID BIT(17) #define I40E_HW_RESTART_AUTONEG BIT(18) - u64 flags; + u32 flags; #define I40E_FLAG_RX_CSUM_ENABLED BIT(0) #define I40E_FLAG_MSI_ENABLED BIT(1) #define I40E_FLAG_MSIX_ENABLED BIT(2) -- cgit v1.2.3 From 784548c40d6f43eff2297220ad7800dc04be03c6 Mon Sep 17 00:00:00 2001 From: Lihong Yang Date: Thu, 7 Sep 2017 08:05:46 -0400 Subject: i40e: use the safe hash table iterator when deleting mac filters This patch replaces hash_for_each function with hash_for_each_safe when calling __i40e_del_filter. The hash_for_each_safe function is the right one to use when iterating over a hash table to safely remove a hash entry. Otherwise, incorrect values may be read from freed memory. Detected by CoverityScan, CID 1402048 Read from pointer after free Signed-off-by: Lihong Yang Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c index 04568137e029..c062d74d21f3 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c @@ -2883,6 +2883,7 @@ int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac) struct i40e_mac_filter *f; struct i40e_vf *vf; int ret = 0; + struct hlist_node *h; int bkt; /* validate the request */ @@ -2921,7 +2922,7 @@ int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac) /* Delete all the filters for this VSI - we're going to kill it * anyway. */ - hash_for_each(vsi->mac_filter_hash, bkt, f, hlist) + hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) __i40e_del_filter(vsi, f); spin_unlock_bh(&vsi->mac_filter_hash_lock); -- cgit v1.2.3 From c766b9af9abe897eb5480ef9ef3914fc07b815c5 Mon Sep 17 00:00:00 2001 From: Alan Brady Date: Thu, 7 Sep 2017 08:05:47 -0400 Subject: i40evf: fix mac filter removal timing issue Due to the asynchronous nature in which mac filters are added and deleted, there exists a bug in which filters are erroneously removed if removed then added again quickly. The events are as such: - filter marked for removal - same filter is re-added before watchdog that cleans up filters - we skip re-adding the filter because we have it already in the list - watchdog filter cleanup kicks off and filter is removed So when we were re-adding the same filter, it didn't actually get added because it already existed in the list, but was marked for removal and had yet to actually be removed. This patch fixes the issue by making sure that when adding a filter, if we find it already existing in our list, make sure it is not marked to be removed. Signed-off-by: Alan Brady Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40evf/i40evf_main.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_main.c b/drivers/net/ethernet/intel/i40evf/i40evf_main.c index 1d2fc898b664..f62d9565c7b5 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf_main.c +++ b/drivers/net/ethernet/intel/i40evf/i40evf_main.c @@ -880,6 +880,8 @@ i40evf_mac_filter *i40evf_add_filter(struct i40evf_adapter *adapter, list_add_tail(&f->list, &adapter->mac_filter_list); f->add = true; adapter->aq_required |= I40EVF_FLAG_AQ_ADD_MAC_FILTER; + } else { + f->remove = false; } clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section); -- cgit v1.2.3 From 427025592955d245997b12923111e85f07850d5f Mon Sep 17 00:00:00 2001 From: Jacob Keller Date: Thu, 7 Sep 2017 08:05:48 -0400 Subject: i40e/i40evf: fix incorrect default ITR values on driver load The ITR register expects to be programmed in units of 2 microseconds. Because of this, all of the drivers I40E_ITR_* constants are in terms of this 2 microsecond register. Unfortunately, the rx_itr_default value is expected to be programmed in microseconds. Effectively the driver defaults to an ITR value of half the expected value (in terms of minimum microseconds between interrupts). Fix this by changing the default values to be calculated using ITR_REG_TO_USEC macro which indicates that we're converting from the register units into microseconds. Signed-off-by: Jacob Keller Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_main.c | 4 ++-- drivers/net/ethernet/intel/i40e/i40e_txrx.h | 6 ++++-- drivers/net/ethernet/intel/i40evf/i40e_txrx.h | 6 ++++-- drivers/net/ethernet/intel/i40evf/i40evf_main.c | 4 ++-- 4 files changed, 12 insertions(+), 8 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index 60b11fdeca2d..d4b0cc36afb1 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -8983,8 +8983,8 @@ static int i40e_sw_init(struct i40e_pf *pf) I40E_FLAG_MSIX_ENABLED; /* Set default ITR */ - pf->rx_itr_default = I40E_ITR_DYNAMIC | I40E_ITR_RX_DEF; - pf->tx_itr_default = I40E_ITR_DYNAMIC | I40E_ITR_TX_DEF; + pf->rx_itr_default = I40E_ITR_RX_DEF; + pf->tx_itr_default = I40E_ITR_TX_DEF; /* Depending on PF configurations, it is possible that the RSS * maximum might end up larger than the available queues diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.h b/drivers/net/ethernet/intel/i40e/i40e_txrx.h index a4e3e665a1a1..c3156aa3f709 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.h +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.h @@ -38,8 +38,10 @@ #define I40E_ITR_8K 0x003E #define I40E_ITR_4K 0x007A #define I40E_MAX_INTRL 0x3B /* reg uses 4 usec resolution */ -#define I40E_ITR_RX_DEF I40E_ITR_20K -#define I40E_ITR_TX_DEF I40E_ITR_20K +#define I40E_ITR_RX_DEF (ITR_REG_TO_USEC(I40E_ITR_20K) | \ + I40E_ITR_DYNAMIC) +#define I40E_ITR_TX_DEF (ITR_REG_TO_USEC(I40E_ITR_20K) | \ + I40E_ITR_DYNAMIC) #define I40E_ITR_DYNAMIC 0x8000 /* use top bit as a flag */ #define I40E_MIN_INT_RATE 250 /* ~= 1000000 / (I40E_MAX_ITR * 2) */ #define I40E_MAX_INT_RATE 500000 /* == 1000000 / (I40E_MIN_ITR * 2) */ diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h index d8ca802a71a9..8f9830d7649a 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h +++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h @@ -38,8 +38,10 @@ #define I40E_ITR_8K 0x003E #define I40E_ITR_4K 0x007A #define I40E_MAX_INTRL 0x3B /* reg uses 4 usec resolution */ -#define I40E_ITR_RX_DEF I40E_ITR_20K -#define I40E_ITR_TX_DEF I40E_ITR_20K +#define I40E_ITR_RX_DEF (ITR_REG_TO_USEC(I40E_ITR_20K) | \ + I40E_ITR_DYNAMIC) +#define I40E_ITR_TX_DEF (ITR_REG_TO_USEC(I40E_ITR_20K) | \ + I40E_ITR_DYNAMIC) #define I40E_ITR_DYNAMIC 0x8000 /* use top bit as a flag */ #define I40E_MIN_INT_RATE 250 /* ~= 1000000 / (I40E_MAX_ITR * 2) */ #define I40E_MAX_INT_RATE 500000 /* == 1000000 / (I40E_MIN_ITR * 2) */ diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_main.c b/drivers/net/ethernet/intel/i40evf/i40evf_main.c index f62d9565c7b5..5bcbd46e2f6c 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf_main.c +++ b/drivers/net/ethernet/intel/i40evf/i40evf_main.c @@ -1223,7 +1223,7 @@ static int i40evf_alloc_queues(struct i40evf_adapter *adapter) tx_ring->netdev = adapter->netdev; tx_ring->dev = &adapter->pdev->dev; tx_ring->count = adapter->tx_desc_count; - tx_ring->tx_itr_setting = (I40E_ITR_DYNAMIC | I40E_ITR_TX_DEF); + tx_ring->tx_itr_setting = I40E_ITR_TX_DEF; if (adapter->flags & I40EVF_FLAG_WB_ON_ITR_CAPABLE) tx_ring->flags |= I40E_TXR_FLAGS_WB_ON_ITR; @@ -1232,7 +1232,7 @@ static int i40evf_alloc_queues(struct i40evf_adapter *adapter) rx_ring->netdev = adapter->netdev; rx_ring->dev = &adapter->pdev->dev; rx_ring->count = adapter->rx_desc_count; - rx_ring->rx_itr_setting = (I40E_ITR_DYNAMIC | I40E_ITR_RX_DEF); + rx_ring->rx_itr_setting = I40E_ITR_RX_DEF; } adapter->num_active_queues = num_active_queues; -- cgit v1.2.3 From dbadbbe235f82f13224c85d29e65cf859afaa18c Mon Sep 17 00:00:00 2001 From: Jacob Keller Date: Thu, 7 Sep 2017 08:05:49 -0400 Subject: i40e/i40evf: always set the CLEARPBA flag when re-enabling interrupts In the past we changed driver behavior to not clear the PBA when re-enabling interrupts. This change was motivated by the flawed belief that clearing the PBA would cause a lost interrupt if a receive interrupt occurred while interrupts were disabled. According to empirical testing this isn't the case. Additionally, the data sheet specifically says that we should set the CLEARPBA bit when re-enabling interrupts in a polling setup. This reverts commit 40d72a509862 ("i40e/i40evf: don't lose interrupts") Signed-off-by: Jacob Keller Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e.h | 5 +---- drivers/net/ethernet/intel/i40e/i40e_main.c | 11 +++++------ drivers/net/ethernet/intel/i40e/i40e_txrx.c | 6 ++---- drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c | 2 +- drivers/net/ethernet/intel/i40evf/i40e_txrx.c | 4 +--- 5 files changed, 10 insertions(+), 18 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h index 7baf6d8a84dd..8139b4ee1dc3 100644 --- a/drivers/net/ethernet/intel/i40e/i40e.h +++ b/drivers/net/ethernet/intel/i40e/i40e.h @@ -949,9 +949,6 @@ static inline void i40e_irq_dynamic_enable(struct i40e_vsi *vsi, int vector) struct i40e_hw *hw = &pf->hw; u32 val; - /* definitely clear the PBA here, as this function is meant to - * clean out all previous interrupts AND enable the interrupt - */ val = I40E_PFINT_DYN_CTLN_INTENA_MASK | I40E_PFINT_DYN_CTLN_CLEARPBA_MASK | (I40E_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT); @@ -960,7 +957,7 @@ static inline void i40e_irq_dynamic_enable(struct i40e_vsi *vsi, int vector) } void i40e_irq_dynamic_disable_icr0(struct i40e_pf *pf); -void i40e_irq_dynamic_enable_icr0(struct i40e_pf *pf, bool clearpba); +void i40e_irq_dynamic_enable_icr0(struct i40e_pf *pf); int i40e_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd); int i40e_open(struct net_device *netdev); int i40e_close(struct net_device *netdev); diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index d4b0cc36afb1..00a83afb02e9 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -3403,15 +3403,14 @@ void i40e_irq_dynamic_disable_icr0(struct i40e_pf *pf) /** * i40e_irq_dynamic_enable_icr0 - Enable default interrupt generation for icr0 * @pf: board private structure - * @clearpba: true when all pending interrupt events should be cleared **/ -void i40e_irq_dynamic_enable_icr0(struct i40e_pf *pf, bool clearpba) +void i40e_irq_dynamic_enable_icr0(struct i40e_pf *pf) { struct i40e_hw *hw = &pf->hw; u32 val; val = I40E_PFINT_DYN_CTL0_INTENA_MASK | - (clearpba ? I40E_PFINT_DYN_CTL0_CLEARPBA_MASK : 0) | + I40E_PFINT_DYN_CTL0_CLEARPBA_MASK | (I40E_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT); wr32(hw, I40E_PFINT_DYN_CTL0, val); @@ -3597,7 +3596,7 @@ static int i40e_vsi_enable_irq(struct i40e_vsi *vsi) for (i = 0; i < vsi->num_q_vectors; i++) i40e_irq_dynamic_enable(vsi, i); } else { - i40e_irq_dynamic_enable_icr0(pf, true); + i40e_irq_dynamic_enable_icr0(pf); } i40e_flush(&pf->hw); @@ -3746,7 +3745,7 @@ enable_intr: wr32(hw, I40E_PFINT_ICR0_ENA, ena_mask); if (!test_bit(__I40E_DOWN, pf->state)) { i40e_service_event_schedule(pf); - i40e_irq_dynamic_enable_icr0(pf, false); + i40e_irq_dynamic_enable_icr0(pf); } return ret; @@ -8455,7 +8454,7 @@ static int i40e_setup_misc_vector(struct i40e_pf *pf) i40e_flush(hw); - i40e_irq_dynamic_enable_icr0(pf, true); + i40e_irq_dynamic_enable_icr0(pf); return err; } diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c index 3bd176606c09..616abf79253e 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c @@ -2202,9 +2202,7 @@ static u32 i40e_buildreg_itr(const int type, const u16 itr) u32 val; val = I40E_PFINT_DYN_CTLN_INTENA_MASK | - /* Don't clear PBA because that can cause lost interrupts that - * came in while we were cleaning/polling - */ + I40E_PFINT_DYN_CTLN_CLEARPBA_MASK | (type << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT) | (itr << I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT); @@ -2241,7 +2239,7 @@ static inline void i40e_update_enable_itr(struct i40e_vsi *vsi, /* If we don't have MSIX, then we only need to re-enable icr0 */ if (!(vsi->back->flags & I40E_FLAG_MSIX_ENABLED)) { - i40e_irq_dynamic_enable_icr0(vsi->back, false); + i40e_irq_dynamic_enable_icr0(vsi->back); return; } diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c index c062d74d21f3..10298956a81b 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c @@ -1358,7 +1358,7 @@ err_alloc: i40e_free_vfs(pf); err_iov: /* Re-enable interrupt 0. */ - i40e_irq_dynamic_enable_icr0(pf, false); + i40e_irq_dynamic_enable_icr0(pf); return ret; } diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c index 37e1de886d48..fe817e2b6fef 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c @@ -1409,9 +1409,7 @@ static u32 i40e_buildreg_itr(const int type, const u16 itr) u32 val; val = I40E_VFINT_DYN_CTLN1_INTENA_MASK | - /* Don't clear PBA because that can cause lost interrupts that - * came in while we were cleaning/polling - */ + I40E_VFINT_DYN_CTLN1_CLEARPBA_MASK | (type << I40E_VFINT_DYN_CTLN1_ITR_INDX_SHIFT) | (itr << I40E_VFINT_DYN_CTLN1_INTERVAL_SHIFT); -- cgit v1.2.3 From 7362be9eeed01980bfa03cf49737703a0e43fe50 Mon Sep 17 00:00:00 2001 From: Jacob Keller Date: Thu, 7 Sep 2017 08:05:50 -0400 Subject: i40e: reduce lrxqthresh from 2 to 1 The lrxq thresh value tells hardware to immediately interrupt when there are fewer than N*64 packets left in the ring. Counter intuitively, empirical testing has shown that decreasing this value from 2 to 1, and thus changing from an immediate interrupt at fewer than 128 descriptors down to 64 descriptors causes a small increase in the maximum total packets per second we can receive. This increase occurs even when we're polling with interrupts masked, as the hardware must still handle interrupts internally even if we've disabled them in software. Also reduce the value for any VFs we allocate. Signed-off-by: Jacob Keller Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_main.c | 2 +- drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index 00a83afb02e9..74875ddaeb33 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -3030,7 +3030,7 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring) if (hw->revision_id == 0) rx_ctx.lrxqthresh = 0; else - rx_ctx.lrxqthresh = 2; + rx_ctx.lrxqthresh = 1; rx_ctx.crcstrip = 1; rx_ctx.l2tsel = 1; /* this controls whether VLAN is stripped from inner headers */ diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c index 10298956a81b..83727906a386 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c @@ -639,7 +639,7 @@ static int i40e_config_vsi_rx_queue(struct i40e_vf *vf, u16 vsi_id, rx_ctx.dsize = 1; /* default values */ - rx_ctx.lrxqthresh = 2; + rx_ctx.lrxqthresh = 1; rx_ctx.crcstrip = 1; rx_ctx.prefena = 1; rx_ctx.l2tsel = 1; -- cgit v1.2.3 From 11f29003d6376fb123b7c3779dba49bb56fb0815 Mon Sep 17 00:00:00 2001 From: Jacob Keller Date: Thu, 7 Sep 2017 08:05:51 -0400 Subject: i40e/i40evf: bump tail only in multiples of 8 Hardware only fetches descriptors on cachelines of 8, essentially ignoring the lower 3 bits of the tail register. Thus, it is pointless to bump tail by an unaligned access as the hardware will ignore some of the new descriptors we allocated. Thus, it's ideal if we can ensure tail writes are always aligned to 8. At first, it seems like we'd already do this, since we allocate descriptors in batches which are a multiple of 8. Since we'd always increment by a multiple of 8, it seems like the value should always be aligned. However, this ignores allocation failures. If we fail to allocate a buffer, our tail register will become unaligned. Once it has become unaligned it will essentially be stuck unaligned until a buffer allocation happens to fail at the exact amount necessary to re-align it. We can do better, by simply rounding down the number of buffers we're about to allocate (cleaned_count) such that "next_to_clean + cleaned_count" is rounded to the nearest multiple of 8. We do this by calculating how far off that value is and subtracting it from the cleaned_count. This essentially defers allocation of buffers if they're going to be ignored by hardware anyways, and re-aligns our next_to_use and tail values after a failure to allocate a descriptor. This calculation ensures that we always align the tail writes in a way the hardware expects and don't unnecessarily allocate buffers which won't be fetched immediately. Signed-off-by: Jacob Keller Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_txrx.c | 9 +++++++++ drivers/net/ethernet/intel/i40evf/i40e_txrx.c | 9 +++++++++ 2 files changed, 18 insertions(+) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c index 616abf79253e..a23306f04e00 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c @@ -1372,6 +1372,15 @@ bool i40e_alloc_rx_buffers(struct i40e_ring *rx_ring, u16 cleaned_count) union i40e_rx_desc *rx_desc; struct i40e_rx_buffer *bi; + /* Hardware only fetches new descriptors in cache lines of 8, + * essentially ignoring the lower 3 bits of the tail register. We want + * to ensure our tail writes are aligned to avoid unnecessary work. We + * can't simply round down the cleaned count, since we might fail to + * allocate some buffers. What we really want is to ensure that + * next_to_used + cleaned_count produces an aligned value. + */ + cleaned_count -= (ntu + cleaned_count) & 0x7; + /* do nothing if no valid netdev defined */ if (!rx_ring->netdev || !cleaned_count) return false; diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c index fe817e2b6fef..6806ada11490 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c @@ -711,6 +711,15 @@ bool i40evf_alloc_rx_buffers(struct i40e_ring *rx_ring, u16 cleaned_count) union i40e_rx_desc *rx_desc; struct i40e_rx_buffer *bi; + /* Hardware only fetches new descriptors in cache lines of 8, + * essentially ignoring the lower 3 bits of the tail register. We want + * to ensure our tail writes are aligned to avoid unnecessary work. We + * can't simply round down the cleaned count, since we might fail to + * allocate some buffers. What we really want is to ensure that + * next_to_used + cleaned_count produces an aligned value. + */ + cleaned_count -= (ntu + cleaned_count) & 0x7; + /* do nothing if no valid netdev defined */ if (!rx_ring->netdev || !cleaned_count) return false; -- cgit v1.2.3 From 95bc2fb4c6c7d23db6dc54a3d49bdbadb13c392b Mon Sep 17 00:00:00 2001 From: Jacob Keller Date: Thu, 7 Sep 2017 08:05:52 -0400 Subject: i40e/i40evf: bundle more descriptors when allocating buffers Double the number of descriptors we'll bundle into one tail bump when receiving. Empirical testing has shown that we reduce CPU utilization and don't appear to reduce throughput or packet rate. 32 seems to be the sweet spot, as it's half the default polling budget, so we'd essentially reduce from 4 tail writes when polling down to 2. Increasing this up to 64 appears to have negative impacts as it may become possible that we don't bump the tail each time we get polled, which could cause a long delay between returning descriptors to the hardware. Signed-off-by: Jacob Keller Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_txrx.h | 2 +- drivers/net/ethernet/intel/i40evf/i40e_txrx.h | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.h b/drivers/net/ethernet/intel/i40e/i40e_txrx.h index c3156aa3f709..ff57ae451524 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.h +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.h @@ -208,7 +208,7 @@ static inline bool i40e_test_staterr(union i40e_rx_desc *rx_desc, } /* How many Rx Buffers do we bundle into one write to the hardware ? */ -#define I40E_RX_BUFFER_WRITE 16 /* Must be power of 2 */ +#define I40E_RX_BUFFER_WRITE 32 /* Must be power of 2 */ #define I40E_RX_INCREMENT(r, i) \ do { \ (i)++; \ diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h index 8f9830d7649a..8d26c85d12e1 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h +++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h @@ -191,7 +191,7 @@ static inline bool i40e_test_staterr(union i40e_rx_desc *rx_desc, } /* How many Rx Buffers do we bundle into one write to the hardware ? */ -#define I40E_RX_BUFFER_WRITE 16 /* Must be power of 2 */ +#define I40E_RX_BUFFER_WRITE 32 /* Must be power of 2 */ #define I40E_RX_INCREMENT(r, i) \ do { \ (i)++; \ -- cgit v1.2.3 From 6f853d4f8e93eeace504b021e05dfdbeb4d3b40f Mon Sep 17 00:00:00 2001 From: Jacob Keller Date: Thu, 7 Sep 2017 08:05:53 -0400 Subject: i40e: allow XPS with QoS enabled Recently, the kernel gained support for enabling XPS and QoS at the same time. Thus, we no longer need to worry about the number of traffic classes when enabling XPS. Signed-off-by: Jacob Keller Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_main.c | 17 ++++++----------- 1 file changed, 6 insertions(+), 11 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index 74875ddaeb33..b26f615bed5a 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -2879,23 +2879,18 @@ static void i40e_vsi_free_rx_resources(struct i40e_vsi *vsi) **/ static void i40e_config_xps_tx_ring(struct i40e_ring *ring) { - struct i40e_vsi *vsi = ring->vsi; int cpu; if (!ring->q_vector || !ring->netdev) return; - if ((vsi->tc_config.numtc <= 1) && - !test_and_set_bit(__I40E_TX_XPS_INIT_DONE, ring->state)) { - cpu = cpumask_local_spread(ring->q_vector->v_idx, -1); - netif_set_xps_queue(ring->netdev, get_cpu_mask(cpu), - ring->queue_index); - } + /* We only initialize XPS once, so as not to overwrite user settings */ + if (test_and_set_bit(__I40E_TX_XPS_INIT_DONE, ring->state)) + return; - /* schedule our worker thread which will take care of - * applying the new filter changes - */ - i40e_service_event_schedule(vsi->back); + cpu = cpumask_local_spread(ring->q_vector->v_idx, -1); + netif_set_xps_queue(ring->netdev, get_cpu_mask(cpu), + ring->queue_index); } /** -- cgit v1.2.3 From b861fb762a26144593387b84cd078ef86d99cf6f Mon Sep 17 00:00:00 2001 From: Lihong Yang Date: Thu, 7 Sep 2017 08:05:54 -0400 Subject: i40e: add check for return from find_first_bit call The find_first_bit function will return the size passed to search if the first set bit is not found. This patch adds the check in case that happens as the return value would be used as the index in an array and that would have caused the out-of-bounds access. Detected by CoverityScan, CID 1295969 Out-of-bounds access Signed-off-by: Lihong Yang Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c index 83727906a386..125dcd1d2233 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c @@ -306,6 +306,10 @@ static void i40e_config_irq_link_list(struct i40e_vf *vf, u16 vsi_id, next_q = find_first_bit(&linklistmap, (I40E_MAX_VSI_QP * I40E_VIRTCHNL_SUPPORTED_QTYPES)); + if (unlikely(next_q == (I40E_MAX_VSI_QP * + I40E_VIRTCHNL_SUPPORTED_QTYPES))) + goto irq_list_done; + vsi_queue_id = next_q / I40E_VIRTCHNL_SUPPORTED_QTYPES; qtype = next_q % I40E_VIRTCHNL_SUPPORTED_QTYPES; pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_id, vsi_queue_id); -- cgit v1.2.3 From 4988410f8d3a6fa04381072e2406a1d3979ffb95 Mon Sep 17 00:00:00 2001 From: Jayaprakash Shanmugam Date: Thu, 7 Sep 2017 08:05:55 -0400 Subject: i40e: Retry AQC GetPhyAbilities to overcome I2CRead hangs - When the I2C is busy, the PHY reads are delayed. The firmware will return EGAIN in these cases with an expectation that the SW will trigger the reads again - This patch retries the operation for a maximum period of 500ms Signed-off-by: Jayaprakash Shanmugam Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_common.c | 42 ++++++++++++++++++--------- drivers/net/ethernet/intel/i40e/i40e_type.h | 3 ++ drivers/net/ethernet/intel/i40evf/i40e_type.h | 3 ++ 3 files changed, 35 insertions(+), 13 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c index 60542beda7ad..53aad378d49c 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_common.c +++ b/drivers/net/ethernet/intel/i40e/i40e_common.c @@ -1567,30 +1567,46 @@ i40e_status i40e_aq_get_phy_capabilities(struct i40e_hw *hw, struct i40e_aq_desc desc; i40e_status status; u16 abilities_size = sizeof(struct i40e_aq_get_phy_abilities_resp); + u16 max_delay = I40E_MAX_PHY_TIMEOUT, total_delay = 0; if (!abilities) return I40E_ERR_PARAM; - i40e_fill_default_direct_cmd_desc(&desc, - i40e_aqc_opc_get_phy_abilities); + do { + i40e_fill_default_direct_cmd_desc(&desc, + i40e_aqc_opc_get_phy_abilities); - desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); - if (abilities_size > I40E_AQ_LARGE_BUF) - desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); + desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); + if (abilities_size > I40E_AQ_LARGE_BUF) + desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); - if (qualified_modules) - desc.params.external.param0 |= + if (qualified_modules) + desc.params.external.param0 |= cpu_to_le32(I40E_AQ_PHY_REPORT_QUALIFIED_MODULES); - if (report_init) - desc.params.external.param0 |= + if (report_init) + desc.params.external.param0 |= cpu_to_le32(I40E_AQ_PHY_REPORT_INITIAL_VALUES); - status = i40e_asq_send_command(hw, &desc, abilities, abilities_size, - cmd_details); + status = i40e_asq_send_command(hw, &desc, abilities, + abilities_size, cmd_details); - if (hw->aq.asq_last_status == I40E_AQ_RC_EIO) - status = I40E_ERR_UNKNOWN_PHY; + if (status) + break; + + if (hw->aq.asq_last_status == I40E_AQ_RC_EIO) { + status = I40E_ERR_UNKNOWN_PHY; + break; + } else if (hw->aq.asq_last_status == I40E_AQ_RC_EAGAIN) { + usleep_range(1000, 2000); + total_delay++; + status = I40E_ERR_TIMEOUT; + } + } while ((hw->aq.asq_last_status != I40E_AQ_RC_OK) && + (total_delay < max_delay)); + + if (status) + return status; if (report_init) { if (hw->mac.type == I40E_MAC_XL710 && diff --git a/drivers/net/ethernet/intel/i40e/i40e_type.h b/drivers/net/ethernet/intel/i40e/i40e_type.h index 4b32b1d38a66..0410fcbdbb94 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_type.h +++ b/drivers/net/ethernet/intel/i40e/i40e_type.h @@ -46,6 +46,9 @@ /* Max default timeout in ms, */ #define I40E_MAX_NVM_TIMEOUT 18000 +/* Max timeout in ms for the phy to respond */ +#define I40E_MAX_PHY_TIMEOUT 500 + /* Switch from ms to the 1usec global time (this is the GTIME resolution) */ #define I40E_MS_TO_GTIME(time) ((time) * 1000) diff --git a/drivers/net/ethernet/intel/i40evf/i40e_type.h b/drivers/net/ethernet/intel/i40evf/i40e_type.h index 9364b67fff9c..213b773dfad6 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_type.h +++ b/drivers/net/ethernet/intel/i40evf/i40e_type.h @@ -46,6 +46,9 @@ /* Max default timeout in ms, */ #define I40E_MAX_NVM_TIMEOUT 18000 +/* Max timeout in ms for the phy to respond */ +#define I40E_MAX_PHY_TIMEOUT 500 + /* Switch from ms to the 1usec global time (this is the GTIME resolution) */ #define I40E_MS_TO_GTIME(time) ((time) * 1000) -- cgit v1.2.3 From 9bcc07f0651b3078f1c3164c710f72a558665345 Mon Sep 17 00:00:00 2001 From: Lihong Yang Date: Thu, 7 Sep 2017 08:05:56 -0400 Subject: i40e: use a local variable instead of calculating multiple times The computed result of I40E_MAX_VSI_QP * I40E_VIRTCHNL_SUPPORTED_QTYPES is used more than three times in function i40e_config_irq_link_list. Simply declare a local variable to store it to improve readability. Signed-off-by: Lihong Yang Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c | 20 +++++++------------- 1 file changed, 7 insertions(+), 13 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c index 125dcd1d2233..0c4fa225c7be 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c @@ -273,7 +273,7 @@ static void i40e_config_irq_link_list(struct i40e_vf *vf, u16 vsi_id, struct i40e_hw *hw = &pf->hw; u16 vsi_queue_id, pf_queue_id; enum i40e_queue_type qtype; - u16 next_q, vector_id; + u16 next_q, vector_id, size; u32 reg, reg_idx; u16 itr_idx = 0; @@ -303,11 +303,9 @@ static void i40e_config_irq_link_list(struct i40e_vf *vf, u16 vsi_id, vsi_queue_id + 1)); } - next_q = find_first_bit(&linklistmap, - (I40E_MAX_VSI_QP * - I40E_VIRTCHNL_SUPPORTED_QTYPES)); - if (unlikely(next_q == (I40E_MAX_VSI_QP * - I40E_VIRTCHNL_SUPPORTED_QTYPES))) + size = I40E_MAX_VSI_QP * I40E_VIRTCHNL_SUPPORTED_QTYPES; + next_q = find_first_bit(&linklistmap, size); + if (unlikely(next_q == size)) goto irq_list_done; vsi_queue_id = next_q / I40E_VIRTCHNL_SUPPORTED_QTYPES; @@ -317,7 +315,7 @@ static void i40e_config_irq_link_list(struct i40e_vf *vf, u16 vsi_id, wr32(hw, reg_idx, reg); - while (next_q < (I40E_MAX_VSI_QP * I40E_VIRTCHNL_SUPPORTED_QTYPES)) { + while (next_q < size) { switch (qtype) { case I40E_QUEUE_TYPE_RX: reg_idx = I40E_QINT_RQCTL(pf_queue_id); @@ -331,12 +329,8 @@ static void i40e_config_irq_link_list(struct i40e_vf *vf, u16 vsi_id, break; } - next_q = find_next_bit(&linklistmap, - (I40E_MAX_VSI_QP * - I40E_VIRTCHNL_SUPPORTED_QTYPES), - next_q + 1); - if (next_q < - (I40E_MAX_VSI_QP * I40E_VIRTCHNL_SUPPORTED_QTYPES)) { + next_q = find_next_bit(&linklistmap, size, next_q + 1); + if (next_q < size) { vsi_queue_id = next_q / I40E_VIRTCHNL_SUPPORTED_QTYPES; qtype = next_q % I40E_VIRTCHNL_SUPPORTED_QTYPES; pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_id, -- cgit v1.2.3 From 3d7d7a86ec6ea5abaea30194eeb175e2a3d0bdc7 Mon Sep 17 00:00:00 2001 From: Rami Rosen Date: Sat, 16 Sep 2017 05:49:48 +0300 Subject: i40e: fix a typo This patch fixes a typo in i40e_vsi_alloc_arrays() documentation. The first parameter name should be "vsi" instead of "type". Signed-off-by: Rami Rosen Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_main.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index b26f615bed5a..4de52001a2b9 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -7688,7 +7688,7 @@ static int i40e_set_num_rings_in_vsi(struct i40e_vsi *vsi) /** * i40e_vsi_alloc_arrays - Allocate queue and vector pointer arrays for the vsi - * @type: VSI pointer + * @vsi: VSI pointer * @alloc_qvectors: a bool to specify if q_vectors need to be allocated. * * On error: returns error code (negative) -- cgit v1.2.3 From 2c4d36b7087538704fc9e3464d185dcc4d04e863 Mon Sep 17 00:00:00 2001 From: Stefano Brivio Date: Wed, 6 Sep 2017 10:11:39 +0200 Subject: i40e: Avoid some useless variables and initializers in NVM functions Fixes: 09f79fd49d94 ("i40e: avoid NVM acquire deadlock during NVM update") Signed-off-by: Stefano Brivio Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_nvm.c | 20 +++++++------------- 1 file changed, 7 insertions(+), 13 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/intel/i40e/i40e_nvm.c b/drivers/net/ethernet/intel/i40e/i40e_nvm.c index 57505b1df98d..151d9cfb6ea4 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_nvm.c +++ b/drivers/net/ethernet/intel/i40e/i40e_nvm.c @@ -311,13 +311,10 @@ static i40e_status i40e_read_nvm_word_aq(struct i40e_hw *hw, u16 offset, static i40e_status __i40e_read_nvm_word(struct i40e_hw *hw, u16 offset, u16 *data) { - i40e_status ret_code = 0; - if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE) - ret_code = i40e_read_nvm_word_aq(hw, offset, data); - else - ret_code = i40e_read_nvm_word_srctl(hw, offset, data); - return ret_code; + return i40e_read_nvm_word_aq(hw, offset, data); + + return i40e_read_nvm_word_srctl(hw, offset, data); } /** @@ -331,7 +328,7 @@ static i40e_status __i40e_read_nvm_word(struct i40e_hw *hw, i40e_status i40e_read_nvm_word(struct i40e_hw *hw, u16 offset, u16 *data) { - i40e_status ret_code = 0; + i40e_status ret_code; ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ); if (ret_code) @@ -446,13 +443,10 @@ static i40e_status __i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset, u16 *words, u16 *data) { - i40e_status ret_code = 0; - if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE) - ret_code = i40e_read_nvm_buffer_aq(hw, offset, words, data); - else - ret_code = i40e_read_nvm_buffer_srctl(hw, offset, words, data); - return ret_code; + return i40e_read_nvm_buffer_aq(hw, offset, words, data); + + return i40e_read_nvm_buffer_srctl(hw, offset, words, data); } /** -- cgit v1.2.3 From c4c40e51f9c32c6dd8adf606624c930a1c4d9bbb Mon Sep 17 00:00:00 2001 From: Benjamin Poirier Date: Fri, 21 Jul 2017 11:36:23 -0700 Subject: e1000e: Fix error path in link detection In case of error from e1e_rphy(), the loop will exit early and "success" will be set to true erroneously. Signed-off-by: Benjamin Poirier Tested-by: Aaron Brown Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/e1000e/phy.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/intel/e1000e/phy.c b/drivers/net/ethernet/intel/e1000e/phy.c index d78d47b41a71..86ff0969efb6 100644 --- a/drivers/net/ethernet/intel/e1000e/phy.c +++ b/drivers/net/ethernet/intel/e1000e/phy.c @@ -1744,6 +1744,7 @@ s32 e1000e_phy_has_link_generic(struct e1000_hw *hw, u32 iterations, s32 ret_val = 0; u16 i, phy_status; + *success = false; for (i = 0; i < iterations; i++) { /* Some PHYs require the MII_BMSR register to be read * twice due to the link bit being sticky. No harm doing @@ -1763,16 +1764,16 @@ s32 e1000e_phy_has_link_generic(struct e1000_hw *hw, u32 iterations, ret_val = e1e_rphy(hw, MII_BMSR, &phy_status); if (ret_val) break; - if (phy_status & BMSR_LSTATUS) + if (phy_status & BMSR_LSTATUS) { + *success = true; break; + } if (usec_interval >= 1000) msleep(usec_interval / 1000); else udelay(usec_interval); } - *success = (i < iterations); - return ret_val; } -- cgit v1.2.3 From 65a29da1f5fd20fdebef3b959bef9b3660807b20 Mon Sep 17 00:00:00 2001 From: Benjamin Poirier Date: Fri, 21 Jul 2017 11:36:24 -0700 Subject: e1000e: Fix wrong comment related to link detection Reading e1000e_check_for_copper_link() shows that get_link_status is set to false after link has been detected. Therefore, it stays TRUE until then. Signed-off-by: Benjamin Poirier Tested-by: Aaron Brown Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/e1000e/netdev.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c index 8436c5f2c3e8..ead4c112580e 100644 --- a/drivers/net/ethernet/intel/e1000e/netdev.c +++ b/drivers/net/ethernet/intel/e1000e/netdev.c @@ -5074,7 +5074,7 @@ static bool e1000e_has_link(struct e1000_adapter *adapter) /* get_link_status is set on LSC (link status) interrupt or * Rx sequence error interrupt. get_link_status will stay - * false until the check_for_link establishes link + * true until the check_for_link establishes link * for copper adapters ONLY */ switch (hw->phy.media_type) { @@ -5092,7 +5092,7 @@ static bool e1000e_has_link(struct e1000_adapter *adapter) break; case e1000_media_type_internal_serdes: ret_val = hw->mac.ops.check_for_link(hw); - link_active = adapter->hw.mac.serdes_has_link; + link_active = hw->mac.serdes_has_link; break; default: case e1000_media_type_unknown: -- cgit v1.2.3 From d3509f8bc7b0560044c15f0e3ecfde1d9af757a6 Mon Sep 17 00:00:00 2001 From: Benjamin Poirier Date: Fri, 21 Jul 2017 11:36:25 -0700 Subject: e1000e: Fix return value test All the helpers return -E1000_ERR_PHY. Signed-off-by: Benjamin Poirier Tested-by: Aaron Brown Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/e1000e/netdev.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c index ead4c112580e..a740de6a30b0 100644 --- a/drivers/net/ethernet/intel/e1000e/netdev.c +++ b/drivers/net/ethernet/intel/e1000e/netdev.c @@ -5099,7 +5099,7 @@ static bool e1000e_has_link(struct e1000_adapter *adapter) break; } - if ((ret_val == E1000_ERR_PHY) && (hw->phy.type == e1000_phy_igp_3) && + if ((ret_val == -E1000_ERR_PHY) && (hw->phy.type == e1000_phy_igp_3) && (er32(CTRL) & E1000_PHY_CTRL_GBE_DISABLE)) { /* See e1000_kmrn_lock_loss_workaround_ich8lan() */ e_info("Gigabit has been disabled, downgrading speed\n"); -- cgit v1.2.3 From 19110cfbb34d4af0cdfe14cd243f3b09dc95b013 Mon Sep 17 00:00:00 2001 From: Benjamin Poirier Date: Fri, 21 Jul 2017 11:36:26 -0700 Subject: e1000e: Separate signaling for link check/link up Lennart reported the following race condition: \ e1000_watchdog_task \ e1000e_has_link \ hw->mac.ops.check_for_link() === e1000e_check_for_copper_link /* link is up */ mac->get_link_status = false; /* interrupt */ \ e1000_msix_other hw->mac.get_link_status = true; link_active = !hw->mac.get_link_status /* link_active is false, wrongly */ This problem arises because the single flag get_link_status is used to signal two different states: link status needs checking and link status is down. Avoid the problem by using the return value of .check_for_link to signal the link status to e1000e_has_link(). Reported-by: Lennart Sorensen Signed-off-by: Benjamin Poirier Tested-by: Aaron Brown Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/e1000e/mac.c | 11 ++++++++--- drivers/net/ethernet/intel/e1000e/netdev.c | 2 +- 2 files changed, 9 insertions(+), 4 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/intel/e1000e/mac.c b/drivers/net/ethernet/intel/e1000e/mac.c index b322011ec282..f457c5703d0c 100644 --- a/drivers/net/ethernet/intel/e1000e/mac.c +++ b/drivers/net/ethernet/intel/e1000e/mac.c @@ -410,6 +410,9 @@ void e1000e_clear_hw_cntrs_base(struct e1000_hw *hw) * Checks to see of the link status of the hardware has changed. If a * change in link status has been detected, then we read the PHY registers * to get the current speed/duplex if link exists. + * + * Returns a negative error code (-E1000_ERR_*) or 0 (link down) or 1 (link + * up). **/ s32 e1000e_check_for_copper_link(struct e1000_hw *hw) { @@ -423,7 +426,7 @@ s32 e1000e_check_for_copper_link(struct e1000_hw *hw) * Change or Rx Sequence Error interrupt. */ if (!mac->get_link_status) - return 0; + return 1; /* First we want to see if the MII Status Register reports * link. If so, then we want to get the current speed/duplex @@ -461,10 +464,12 @@ s32 e1000e_check_for_copper_link(struct e1000_hw *hw) * different link partner. */ ret_val = e1000e_config_fc_after_link_up(hw); - if (ret_val) + if (ret_val) { e_dbg("Error configuring flow control\n"); + return ret_val; + } - return ret_val; + return 1; } /** diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c index a740de6a30b0..0a5f95ab0d3c 100644 --- a/drivers/net/ethernet/intel/e1000e/netdev.c +++ b/drivers/net/ethernet/intel/e1000e/netdev.c @@ -5081,7 +5081,7 @@ static bool e1000e_has_link(struct e1000_adapter *adapter) case e1000_media_type_copper: if (hw->mac.get_link_status) { ret_val = hw->mac.ops.check_for_link(hw); - link_active = !hw->mac.get_link_status; + link_active = ret_val > 0; } else { link_active = true; } -- cgit v1.2.3 From 4aea7a5c5e940c1723add439f4088844cd26196d Mon Sep 17 00:00:00 2001 From: Benjamin Poirier Date: Fri, 21 Jul 2017 11:36:27 -0700 Subject: e1000e: Avoid receiver overrun interrupt bursts When e1000e_poll() is not fast enough to keep up with incoming traffic, the adapter (when operating in msix mode) raises the Other interrupt to signal Receiver Overrun. This is a double problem because 1) at the moment e1000_msix_other() assumes that it is only called in case of Link Status Change and 2) if the condition persists, the interrupt is repeatedly raised again in quick succession. Ideally we would configure the Other interrupt to not be raised in case of receiver overrun but this doesn't seem possible on this adapter. Instead, we handle the first part of the problem by reverting to the practice of reading ICR in the other interrupt handler, like before commit 16ecba59bc33 ("e1000e: Do not read ICR in Other interrupt"). Thanks to commit 0a8047ac68e5 ("e1000e: Fix msi-x interrupt automask") which cleared IAME from CTRL_EXT, reading ICR doesn't interfere with RxQ0, TxQ0 interrupts anymore. We handle the second part of the problem by not re-enabling the Other interrupt right away when there is overrun. Instead, we wait until traffic subsides, napi polling mode is exited and interrupts are re-enabled. Reported-by: Lennart Sorensen Fixes: 16ecba59bc33 ("e1000e: Do not read ICR in Other interrupt") Signed-off-by: Benjamin Poirier Tested-by: Aaron Brown Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/e1000e/defines.h | 1 + drivers/net/ethernet/intel/e1000e/netdev.c | 33 ++++++++++++++++++++++------- 2 files changed, 26 insertions(+), 8 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/intel/e1000e/defines.h b/drivers/net/ethernet/intel/e1000e/defines.h index 0641c0098738..afb7ebe20b24 100644 --- a/drivers/net/ethernet/intel/e1000e/defines.h +++ b/drivers/net/ethernet/intel/e1000e/defines.h @@ -398,6 +398,7 @@ #define E1000_ICR_LSC 0x00000004 /* Link Status Change */ #define E1000_ICR_RXSEQ 0x00000008 /* Rx sequence error */ #define E1000_ICR_RXDMT0 0x00000010 /* Rx desc min. threshold (0) */ +#define E1000_ICR_RXO 0x00000040 /* Receiver Overrun */ #define E1000_ICR_RXT0 0x00000080 /* Rx timer intr (ring 0) */ #define E1000_ICR_ECCER 0x00400000 /* Uncorrectable ECC Error */ /* If this bit asserted, the driver should claim the interrupt */ diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c index 0a5f95ab0d3c..ee9de3500331 100644 --- a/drivers/net/ethernet/intel/e1000e/netdev.c +++ b/drivers/net/ethernet/intel/e1000e/netdev.c @@ -1910,14 +1910,30 @@ static irqreturn_t e1000_msix_other(int __always_unused irq, void *data) struct net_device *netdev = data; struct e1000_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; + u32 icr; + bool enable = true; + + icr = er32(ICR); + if (icr & E1000_ICR_RXO) { + ew32(ICR, E1000_ICR_RXO); + enable = false; + /* napi poll will re-enable Other, make sure it runs */ + if (napi_schedule_prep(&adapter->napi)) { + adapter->total_rx_bytes = 0; + adapter->total_rx_packets = 0; + __napi_schedule(&adapter->napi); + } + } + if (icr & E1000_ICR_LSC) { + ew32(ICR, E1000_ICR_LSC); + hw->mac.get_link_status = true; + /* guard against interrupt when we're going down */ + if (!test_bit(__E1000_DOWN, &adapter->state)) + mod_timer(&adapter->watchdog_timer, jiffies + 1); + } - hw->mac.get_link_status = true; - - /* guard against interrupt when we're going down */ - if (!test_bit(__E1000_DOWN, &adapter->state)) { - mod_timer(&adapter->watchdog_timer, jiffies + 1); + if (enable && !test_bit(__E1000_DOWN, &adapter->state)) ew32(IMS, E1000_IMS_OTHER); - } return IRQ_HANDLED; } @@ -2687,7 +2703,8 @@ static int e1000e_poll(struct napi_struct *napi, int weight) napi_complete_done(napi, work_done); if (!test_bit(__E1000_DOWN, &adapter->state)) { if (adapter->msix_entries) - ew32(IMS, adapter->rx_ring->ims_val); + ew32(IMS, adapter->rx_ring->ims_val | + E1000_IMS_OTHER); else e1000_irq_enable(adapter); } @@ -4204,7 +4221,7 @@ static void e1000e_trigger_lsc(struct e1000_adapter *adapter) struct e1000_hw *hw = &adapter->hw; if (adapter->msix_entries) - ew32(ICS, E1000_ICS_OTHER); + ew32(ICS, E1000_ICS_LSC | E1000_ICS_OTHER); else ew32(ICS, E1000_ICS_LSC); } -- cgit v1.2.3 From b10effb92e272051dd1ec0d7be56bf9ca85ab927 Mon Sep 17 00:00:00 2001 From: Sasha Neftin Date: Sun, 6 Aug 2017 16:49:18 +0300 Subject: e1000e: fix buffer overrun while the I219 is processing DMA transactions MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Intel® 100/200 Series Chipset platforms reduced the round-trip latency for the LAN Controller DMA accesses, causing in some high performance cases a buffer overrun while the I219 LAN Connected Device is processing the DMA transactions. I219LM and I219V devices can fall into unrecovered Tx hang under very stressfully UDP traffic and multiple reconnection of Ethernet cable. This Tx hang of the LAN Controller is only recovered if the system is rebooted. Slightly slow down DMA access by reducing the number of outstanding requests. This workaround could have an impact on TCP traffic performance on the platform. Disabling TSO eliminates performance loss for TCP traffic without a noticeable impact on CPU performance. Please, refer to I218/I219 specification update: https://www.intel.com/content/www/us/en/embedded/products/networking/ ethernet-connection-i218-family-documentation.html Signed-off-by: Sasha Neftin Reviewed-by: Dima Ruinskiy Reviewed-by: Raanan Avargil Tested-by: Aaron Brown Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/e1000e/netdev.c | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c index ee9de3500331..14b096f3d1da 100644 --- a/drivers/net/ethernet/intel/e1000e/netdev.c +++ b/drivers/net/ethernet/intel/e1000e/netdev.c @@ -3021,8 +3021,8 @@ static void e1000_configure_tx(struct e1000_adapter *adapter) hw->mac.ops.config_collision_dist(hw); - /* SPT and CNP Si errata workaround to avoid data corruption */ - if (hw->mac.type >= e1000_pch_spt) { + /* SPT and KBL Si errata workaround to avoid data corruption */ + if (hw->mac.type == e1000_pch_spt) { u32 reg_val; reg_val = er32(IOSFPC); @@ -3030,7 +3030,9 @@ static void e1000_configure_tx(struct e1000_adapter *adapter) ew32(IOSFPC, reg_val); reg_val = er32(TARC(0)); - reg_val |= E1000_TARC0_CB_MULTIQ_3_REQ; + /* SPT and KBL Si errata workaround to avoid Tx hang */ + reg_val &= ~BIT(28); + reg_val |= BIT(29); ew32(TARC(0), reg_val); } } -- cgit v1.2.3 From 48072ae1ec7a1c778771cad8c1b8dd803c4992ab Mon Sep 17 00:00:00 2001 From: Willem de Bruijn Date: Fri, 25 Aug 2017 11:06:26 -0400 Subject: e1000e: apply burst mode settings only on default Devices that support FLAG2_DMA_BURST have different default values for RDTR and RADV. Apply burst mode default settings only when no explicit value was passed at module load. The RDTR default is zero. If the module is loaded for low latency operation with RxIntDelay=0, do not override this value with a burst default of 32. Move the decision to apply burst values earlier, where explicitly initialized module variables can be distinguished from defaults. Signed-off-by: Willem de Bruijn Acked-by: Alexander Duyck Tested-by: Aaron Brown Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/e1000e/e1000.h | 4 ---- drivers/net/ethernet/intel/e1000e/netdev.c | 8 -------- drivers/net/ethernet/intel/e1000e/param.c | 16 +++++++++++++++- 3 files changed, 15 insertions(+), 13 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/intel/e1000e/e1000.h b/drivers/net/ethernet/intel/e1000e/e1000.h index 98e68888abb1..2311b31bdcac 100644 --- a/drivers/net/ethernet/intel/e1000e/e1000.h +++ b/drivers/net/ethernet/intel/e1000e/e1000.h @@ -94,10 +94,6 @@ struct e1000_info; */ #define E1000_CHECK_RESET_COUNT 25 -#define DEFAULT_RDTR 0 -#define DEFAULT_RADV 8 -#define BURST_RDTR 0x20 -#define BURST_RADV 0x20 #define PCICFG_DESC_RING_STATUS 0xe4 #define FLUSH_DESC_REQUIRED 0x100 diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c index 14b096f3d1da..00f48d4cabec 100644 --- a/drivers/net/ethernet/intel/e1000e/netdev.c +++ b/drivers/net/ethernet/intel/e1000e/netdev.c @@ -3242,14 +3242,6 @@ static void e1000_configure_rx(struct e1000_adapter *adapter) */ ew32(RXDCTL(0), E1000_RXDCTL_DMA_BURST_ENABLE); ew32(RXDCTL(1), E1000_RXDCTL_DMA_BURST_ENABLE); - - /* override the delay timers for enabling bursting, only if - * the value was not set by the user via module options - */ - if (adapter->rx_int_delay == DEFAULT_RDTR) - adapter->rx_int_delay = BURST_RDTR; - if (adapter->rx_abs_int_delay == DEFAULT_RADV) - adapter->rx_abs_int_delay = BURST_RADV; } /* set the Receive Delay Timer Register */ diff --git a/drivers/net/ethernet/intel/e1000e/param.c b/drivers/net/ethernet/intel/e1000e/param.c index 6d8c39abee16..47da51864543 100644 --- a/drivers/net/ethernet/intel/e1000e/param.c +++ b/drivers/net/ethernet/intel/e1000e/param.c @@ -73,17 +73,25 @@ E1000_PARAM(TxAbsIntDelay, "Transmit Absolute Interrupt Delay"); /* Receive Interrupt Delay in units of 1.024 microseconds * hardware will likely hang if you set this to anything but zero. * + * Burst variant is used as default if device has FLAG2_DMA_BURST. + * * Valid Range: 0-65535 */ E1000_PARAM(RxIntDelay, "Receive Interrupt Delay"); +#define DEFAULT_RDTR 0 +#define BURST_RDTR 0x20 #define MAX_RXDELAY 0xFFFF #define MIN_RXDELAY 0 /* Receive Absolute Interrupt Delay in units of 1.024 microseconds + * + * Burst variant is used as default if device has FLAG2_DMA_BURST. * * Valid Range: 0-65535 */ E1000_PARAM(RxAbsIntDelay, "Receive Absolute Interrupt Delay"); +#define DEFAULT_RADV 8 +#define BURST_RADV 0x20 #define MAX_RXABSDELAY 0xFFFF #define MIN_RXABSDELAY 0 @@ -297,6 +305,9 @@ void e1000e_check_options(struct e1000_adapter *adapter) .max = MAX_RXDELAY } } }; + if (adapter->flags2 & FLAG2_DMA_BURST) + opt.def = BURST_RDTR; + if (num_RxIntDelay > bd) { adapter->rx_int_delay = RxIntDelay[bd]; e1000_validate_option(&adapter->rx_int_delay, &opt, @@ -307,7 +318,7 @@ void e1000e_check_options(struct e1000_adapter *adapter) } /* Receive Absolute Interrupt Delay */ { - static const struct e1000_option opt = { + static struct e1000_option opt = { .type = range_option, .name = "Receive Absolute Interrupt Delay", .err = "using default of " @@ -317,6 +328,9 @@ void e1000e_check_options(struct e1000_adapter *adapter) .max = MAX_RXABSDELAY } } }; + if (adapter->flags2 & FLAG2_DMA_BURST) + opt.def = BURST_RADV; + if (num_RxAbsIntDelay > bd) { adapter->rx_abs_int_delay = RxAbsIntDelay[bd]; e1000_validate_option(&adapter->rx_abs_int_delay, &opt, -- cgit v1.2.3 From 377b62736c01f14309141c69caa6d84363c12e12 Mon Sep 17 00:00:00 2001 From: Florian Fainelli Date: Fri, 25 Aug 2017 18:14:24 -0700 Subject: e1000e: Be drop monitor friendly e1000e_put_txbuf() can be called from normal reclamation path as well as when a DMA mapping failure, so we need to differentiate these two cases when freeing SKBs to be drop monitor friendly. e1000e_tx_hwtstamp_work() and e1000_remove() are processing TX timestamped SKBs and those should not be accounted as drops either. Signed-off-by: Florian Fainelli Tested-by: Aaron Brown Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/e1000e/netdev.c | 18 +++++++++++------- 1 file changed, 11 insertions(+), 7 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c index 00f48d4cabec..bf8f38f76953 100644 --- a/drivers/net/ethernet/intel/e1000e/netdev.c +++ b/drivers/net/ethernet/intel/e1000e/netdev.c @@ -1071,7 +1071,8 @@ next_desc: } static void e1000_put_txbuf(struct e1000_ring *tx_ring, - struct e1000_buffer *buffer_info) + struct e1000_buffer *buffer_info, + bool drop) { struct e1000_adapter *adapter = tx_ring->adapter; @@ -1085,7 +1086,10 @@ static void e1000_put_txbuf(struct e1000_ring *tx_ring, buffer_info->dma = 0; } if (buffer_info->skb) { - dev_kfree_skb_any(buffer_info->skb); + if (drop) + dev_kfree_skb_any(buffer_info->skb); + else + dev_consume_skb_any(buffer_info->skb); buffer_info->skb = NULL; } buffer_info->time_stamp = 0; @@ -1199,7 +1203,7 @@ static void e1000e_tx_hwtstamp_work(struct work_struct *work) wmb(); /* force write prior to skb_tstamp_tx */ skb_tstamp_tx(skb, &shhwtstamps); - dev_kfree_skb_any(skb); + dev_consume_skb_any(skb); } else if (time_after(jiffies, adapter->tx_hwtstamp_start + adapter->tx_timeout_factor * HZ)) { dev_kfree_skb_any(adapter->tx_hwtstamp_skb); @@ -1254,7 +1258,7 @@ static bool e1000_clean_tx_irq(struct e1000_ring *tx_ring) } } - e1000_put_txbuf(tx_ring, buffer_info); + e1000_put_txbuf(tx_ring, buffer_info, false); tx_desc->upper.data = 0; i++; @@ -2437,7 +2441,7 @@ static void e1000_clean_tx_ring(struct e1000_ring *tx_ring) for (i = 0; i < tx_ring->count; i++) { buffer_info = &tx_ring->buffer_info[i]; - e1000_put_txbuf(tx_ring, buffer_info); + e1000_put_txbuf(tx_ring, buffer_info, false); } netdev_reset_queue(adapter->netdev); @@ -5625,7 +5629,7 @@ dma_error: i += tx_ring->count; i--; buffer_info = &tx_ring->buffer_info[i]; - e1000_put_txbuf(tx_ring, buffer_info); + e1000_put_txbuf(tx_ring, buffer_info, true); } return 0; @@ -7419,7 +7423,7 @@ static void e1000_remove(struct pci_dev *pdev) if (adapter->flags & FLAG_HAS_HW_TIMESTAMP) { cancel_work_sync(&adapter->tx_hwtstamp_work); if (adapter->tx_hwtstamp_skb) { - dev_kfree_skb_any(adapter->tx_hwtstamp_skb); + dev_consume_skb_any(adapter->tx_hwtstamp_skb); adapter->tx_hwtstamp_skb = NULL; } } -- cgit v1.2.3 From 18eb86362a52f0af933cc0fd5e37027317eb2d1c Mon Sep 17 00:00:00 2001 From: Christophe JAILLET Date: Sun, 27 Aug 2017 08:39:51 +0200 Subject: igb: check memory allocation failure Check memory allocation failures and return -ENOMEM in such cases, as already done for other memory allocations in this function. This avoids NULL pointers dereference. Signed-off-by: Christophe JAILLET Tested-by: Aaron Brown Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/igb/igb_main.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index fd4a46b03cc8..837d9b46a390 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -3162,6 +3162,8 @@ static int igb_sw_init(struct igb_adapter *adapter) /* Setup and initialize a copy of the hw vlan table array */ adapter->shadow_vfta = kcalloc(E1000_VLAN_FILTER_TBL_SIZE, sizeof(u32), GFP_ATOMIC); + if (!adapter->shadow_vfta) + return -ENOMEM; /* This call may decrease the number of queues */ if (igb_init_interrupt_scheme(adapter, true)) { -- cgit v1.2.3 From 96ac18f14a5a721dc4233f1c6ebd07e103ae5a63 Mon Sep 17 00:00:00 2001 From: Ganesh Goudar Date: Tue, 10 Oct 2017 12:44:13 +0530 Subject: cxgb4: Add support for new flash parts Add support for new flash parts identification, and also cleanup the flash Part identifying and decoding code. Based on the original work of Casey Leedom Signed-off-by: Ganesh Goudar Signed-off-by: David S. Miller --- drivers/net/ethernet/chelsio/cxgb4/t4_hw.c | 136 ++++++++++++++++++++++++----- 1 file changed, 116 insertions(+), 20 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c index b65ce26ff72f..b3fd1f457639 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c @@ -8205,7 +8205,7 @@ struct flash_desc { u32 size_mb; }; -static int get_flash_params(struct adapter *adap) +static int t4_get_flash_params(struct adapter *adap) { /* Table for non-Numonix supported flash parts. Numonix parts are left * to the preexisting code. All flash parts have 64KB sectors. @@ -8214,40 +8214,136 @@ static int get_flash_params(struct adapter *adap) { 0x150201, 4 << 20 }, /* Spansion 4MB S25FL032P */ }; + unsigned int part, manufacturer; + unsigned int density, size; + u32 flashid = 0; int ret; - u32 info; + + /* Issue a Read ID Command to the Flash part. We decode supported + * Flash parts and their sizes from this. There's a newer Query + * Command which can retrieve detailed geometry information but many + * Flash parts don't support it. + */ ret = sf1_write(adap, 1, 1, 0, SF_RD_ID); if (!ret) - ret = sf1_read(adap, 3, 0, 1, &info); + ret = sf1_read(adap, 3, 0, 1, &flashid); t4_write_reg(adap, SF_OP_A, 0); /* unlock SF */ if (ret) return ret; - for (ret = 0; ret < ARRAY_SIZE(supported_flash); ++ret) - if (supported_flash[ret].vendor_and_model_id == info) { - adap->params.sf_size = supported_flash[ret].size_mb; + /* Check to see if it's one of our non-standard supported Flash parts. + */ + for (part = 0; part < ARRAY_SIZE(supported_flash); part++) + if (supported_flash[part].vendor_and_model_id == flashid) { + adap->params.sf_size = supported_flash[part].size_mb; adap->params.sf_nsec = adap->params.sf_size / SF_SEC_SIZE; - return 0; + goto found; } - if ((info & 0xff) != 0x20) /* not a Numonix flash */ + /* Decode Flash part size. The code below looks repetative with + * common encodings, but that's not guaranteed in the JEDEC + * specification for the Read JADEC ID command. The only thing that + * we're guaranteed by the JADEC specification is where the + * Manufacturer ID is in the returned result. After that each + * Manufacturer ~could~ encode things completely differently. + * Note, all Flash parts must have 64KB sectors. + */ + manufacturer = flashid & 0xff; + switch (manufacturer) { + case 0x20: { /* Micron/Numonix */ + /* This Density -> Size decoding table is taken from Micron + * Data Sheets. + */ + density = (flashid >> 16) & 0xff; + switch (density) { + case 0x14: /* 1MB */ + size = 1 << 20; + break; + case 0x15: /* 2MB */ + size = 1 << 21; + break; + case 0x16: /* 4MB */ + size = 1 << 22; + break; + case 0x17: /* 8MB */ + size = 1 << 23; + break; + case 0x18: /* 16MB */ + size = 1 << 24; + break; + case 0x19: /* 32MB */ + size = 1 << 25; + break; + case 0x20: /* 64MB */ + size = 1 << 26; + break; + case 0x21: /* 128MB */ + size = 1 << 27; + break; + case 0x22: /* 256MB */ + size = 1 << 28; + break; + + default: + dev_err(adap->pdev_dev, "Micron Flash Part has bad size, ID = %#x, Density code = %#x\n", + flashid, density); return -EINVAL; - info >>= 16; /* log2 of size */ - if (info >= 0x14 && info < 0x18) - adap->params.sf_nsec = 1 << (info - 16); - else if (info == 0x18) - adap->params.sf_nsec = 64; - else + } + break; + } + case 0xc2: { /* Macronix */ + /* This Density -> Size decoding table is taken from Macronix + * Data Sheets. + */ + density = (flashid >> 16) & 0xff; + switch (density) { + case 0x17: /* 8MB */ + size = 1 << 23; + break; + case 0x18: /* 16MB */ + size = 1 << 24; + break; + default: + dev_err(adap->pdev_dev, "Macronix Flash Part has bad size, ID = %#x, Density code = %#x\n", + flashid, density); + return -EINVAL; + } + } + case 0xef: { /* Winbond */ + /* This Density -> Size decoding table is taken from Winbond + * Data Sheets. + */ + density = (flashid >> 16) & 0xff; + switch (density) { + case 0x17: /* 8MB */ + size = 1 << 23; + break; + case 0x18: /* 16MB */ + size = 1 << 24; + break; + default: + dev_err(adap->pdev_dev, "Winbond Flash Part has bad size, ID = %#x, Density code = %#x\n", + flashid, density); return -EINVAL; - adap->params.sf_size = 1 << info; - adap->params.sf_fw_start = - t4_read_reg(adap, CIM_BOOT_CFG_A) & BOOTADDR_M; + } + break; + } + default: + dev_err(adap->pdev_dev, "Unsupported Flash Part, ID = %#x\n", + flashid); + return -EINVAL; + } + + /* Store decoded Flash size and fall through into vetting code. */ + adap->params.sf_size = size; + adap->params.sf_nsec = size / SF_SEC_SIZE; +found: if (adap->params.sf_size < FLASH_MIN_SIZE) - dev_warn(adap->pdev_dev, "WARNING!!! FLASH size %#x < %#x!!!\n", - adap->params.sf_size, FLASH_MIN_SIZE); + dev_warn(adap->pdev_dev, "WARNING: Flash Part ID %#x, size %#x < %#x\n", + flashid, adap->params.sf_size, FLASH_MIN_SIZE); return 0; } @@ -8285,7 +8381,7 @@ int t4_prep_adapter(struct adapter *adapter) get_pci_mode(adapter, &adapter->params.pci); pl_rev = REV_G(t4_read_reg(adapter, PL_REV_A)); - ret = get_flash_params(adapter); + ret = t4_get_flash_params(adapter); if (ret < 0) { dev_err(adapter->pdev_dev, "error %d identifying flash\n", ret); return ret; -- cgit v1.2.3 From 652faa98ec383c25296fb8493f17060a2c7e3438 Mon Sep 17 00:00:00 2001 From: Ganesh Goudar Date: Tue, 10 Oct 2017 12:45:02 +0530 Subject: cxgb4: add new T5 pci device id's Add 0x50aa and 0x50ab T5 device id's. Signed-off-by: Ganesh Goudar Signed-off-by: David S. Miller --- drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h b/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h index 633e9751a25e..8c22bb8c9fbf 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h @@ -181,6 +181,8 @@ CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN CH_PCI_ID_TABLE_FENTRY(0x50a7), /* Custom T580-CR */ CH_PCI_ID_TABLE_FENTRY(0x50a8), /* Custom T580-KR */ CH_PCI_ID_TABLE_FENTRY(0x50a9), /* Custom T580-KR */ + CH_PCI_ID_TABLE_FENTRY(0x50aa), /* Custom T580-CR */ + CH_PCI_ID_TABLE_FENTRY(0x50ab), /* Custom T520-CR */ /* T6 adapters: */ -- cgit v1.2.3 From ee83f77645332ce86863e5cef8dd3372b8ee4b87 Mon Sep 17 00:00:00 2001 From: Lipeng Date: Tue, 10 Oct 2017 16:42:03 +0800 Subject: net: hns3: fixes the ring index in hns3_fini_ring This patch fixes the ring index in hns3_fini_ring. Signed-off-by: Lipeng Signed-off-by: David S. Miller --- drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c index 26bbc91add65..acb82cfc1b9a 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c @@ -2661,7 +2661,7 @@ static int hns3_init_all_ring(struct hns3_nic_priv *priv) out_when_alloc_ring_memory: for (j = i - 1; j >= 0; j--) - hns3_fini_ring(priv->ring_data[i].ring); + hns3_fini_ring(priv->ring_data[j].ring); return -ENOMEM; } -- cgit v1.2.3 From 5668abda0931c61f823b21b1612e1c77b617a734 Mon Sep 17 00:00:00 2001 From: Lipeng Date: Tue, 10 Oct 2017 16:42:04 +0800 Subject: net: hns3: add support for set_ringparam This patch supports the ethtool's set_ringparam(). Signed-off-by: Lipeng Signed-off-by: David S. Miller --- .../net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c | 4 +- .../net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.h | 4 ++ .../ethernet/hisilicon/hns3/hns3pf/hns3_ethtool.c | 75 ++++++++++++++++++++++ 3 files changed, 81 insertions(+), 2 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c index acb82cfc1b9a..ba550c1b5b01 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c @@ -2637,7 +2637,7 @@ static void hns3_init_ring_hw(struct hns3_enet_ring *ring) } } -static int hns3_init_all_ring(struct hns3_nic_priv *priv) +int hns3_init_all_ring(struct hns3_nic_priv *priv) { struct hnae3_handle *h = priv->ae_handle; int ring_num = h->kinfo.num_tqps * 2; @@ -2666,7 +2666,7 @@ out_when_alloc_ring_memory: return -ENOMEM; } -static int hns3_uninit_all_ring(struct hns3_nic_priv *priv) +int hns3_uninit_all_ring(struct hns3_nic_priv *priv) { struct hnae3_handle *h = priv->ae_handle; int i; diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.h index dd8d40ca1dcc..66599890b4d4 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.h @@ -76,6 +76,8 @@ enum hns3_nic_state { #define HNS3_RING_NAME_LEN 16 #define HNS3_BUFFER_SIZE_2048 2048 #define HNS3_RING_MAX_PENDING 32768 +#define HNS3_RING_MIN_PENDING 8 +#define HNS3_RING_BD_MULTIPLE 8 #define HNS3_MAX_MTU 9728 #define HNS3_BD_SIZE_512_TYPE 0 @@ -593,6 +595,8 @@ static inline void hns3_write_reg(void __iomem *base, u32 reg, u32 value) void hns3_ethtool_set_ops(struct net_device *netdev); int hns3_clean_tx_ring(struct hns3_enet_ring *ring, int budget); +int hns3_init_all_ring(struct hns3_nic_priv *priv); +int hns3_uninit_all_ring(struct hns3_nic_priv *priv); #ifdef CONFIG_HNS3_DCB void hns3_dcbnl_setup(struct hnae3_handle *handle); diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_ethtool.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_ethtool.c index 060bacebf86a..1c5d003ecf29 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_ethtool.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_ethtool.c @@ -459,10 +459,85 @@ static int hns3_get_rxnfc(struct net_device *netdev, return 0; } +int hns3_change_all_ring_bd_num(struct hns3_nic_priv *priv, u32 new_desc_num) +{ + struct hnae3_handle *h = priv->ae_handle; + int i; + + h->kinfo.num_desc = new_desc_num; + + for (i = 0; i < h->kinfo.num_tqps * 2; i++) + priv->ring_data[i].ring->desc_num = new_desc_num; + + return hns3_init_all_ring(priv); +} + +int hns3_set_ringparam(struct net_device *ndev, struct ethtool_ringparam *param) +{ + struct hns3_nic_priv *priv = netdev_priv(ndev); + struct hnae3_handle *h = priv->ae_handle; + bool if_running = netif_running(ndev); + u32 old_desc_num, new_desc_num; + int ret; + + if (param->rx_mini_pending || param->rx_jumbo_pending) + return -EINVAL; + + if (param->tx_pending != param->rx_pending) { + netdev_err(ndev, + "Descriptors of tx and rx must be equal"); + return -EINVAL; + } + + if (param->tx_pending > HNS3_RING_MAX_PENDING || + param->tx_pending < HNS3_RING_MIN_PENDING) { + netdev_err(ndev, + "Descriptors requested (Tx/Rx: %d) out of range [%d-%d]\n", + param->tx_pending, HNS3_RING_MIN_PENDING, + HNS3_RING_MAX_PENDING); + return -EINVAL; + } + + new_desc_num = param->tx_pending; + + /* Hardware requires that its descriptors must be multiple of eight */ + new_desc_num = ALIGN(new_desc_num, HNS3_RING_BD_MULTIPLE); + old_desc_num = h->kinfo.num_desc; + if (old_desc_num == new_desc_num) + return 0; + + netdev_info(ndev, + "Changing descriptor count from %d to %d.\n", + old_desc_num, new_desc_num); + + if (if_running) + dev_close(ndev); + + ret = hns3_uninit_all_ring(priv); + if (ret) + return ret; + + ret = hns3_change_all_ring_bd_num(priv, new_desc_num); + if (ret) { + ret = hns3_change_all_ring_bd_num(priv, old_desc_num); + if (ret) { + netdev_err(ndev, + "Revert to old bd num fail, ret=%d.\n", ret); + return ret; + } + } + + if (if_running) + ret = dev_open(ndev); + + return ret; +} + static const struct ethtool_ops hns3_ethtool_ops = { .get_drvinfo = hns3_get_drvinfo, .get_link = hns3_get_link, .get_ringparam = hns3_get_ringparam, + .set_ringparam = hns3_set_ringparam, .get_pauseparam = hns3_get_pauseparam, .get_strings = hns3_get_strings, .get_ethtool_stats = hns3_get_stats, -- cgit v1.2.3 From f7db940afc0a70f72ffcb6bb9c0ad15e6c5349c1 Mon Sep 17 00:00:00 2001 From: Lipeng Date: Tue, 10 Oct 2017 16:42:05 +0800 Subject: net: hns3: add support for set_rxnfc This patch supports the ethtool's set_rxnfc(). Signed-off-by: Lipeng Signed-off-by: David S. Miller --- drivers/net/ethernet/hisilicon/hns3/hnae3.h | 2 + .../net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c | 9 ++ .../net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h | 1 + .../ethernet/hisilicon/hns3/hns3pf/hclge_main.c | 95 +++++++++++++++++++++- .../ethernet/hisilicon/hns3/hns3pf/hclge_main.h | 8 ++ .../ethernet/hisilicon/hns3/hns3pf/hns3_ethtool.c | 16 ++++ 6 files changed, 129 insertions(+), 2 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.h b/drivers/net/ethernet/hisilicon/hns3/hnae3.h index c677530841cf..d952d6213024 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hnae3.h +++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h @@ -339,6 +339,8 @@ struct hnae3_ae_ops { u8 *hfunc); int (*set_rss)(struct hnae3_handle *handle, const u32 *indir, const u8 *key, const u8 hfunc); + int (*set_rss_tuple)(struct hnae3_handle *handle, + struct ethtool_rxnfc *cmd); int (*get_tc_size)(struct hnae3_handle *handle); diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c index 8ecd80744767..60960e588b5f 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c @@ -85,6 +85,15 @@ static int hclge_init_cmd_queue(struct hclge_dev *hdev, int ring_type) return 0; } +void hclge_cmd_reuse_desc(struct hclge_desc *desc, bool is_read) +{ + desc->flag = cpu_to_le16(HCLGE_CMD_FLAG_NO_INTR | HCLGE_CMD_FLAG_IN); + if (is_read) + desc->flag |= cpu_to_le16(HCLGE_CMD_FLAG_WR); + else + desc->flag &= cpu_to_le16(~HCLGE_CMD_FLAG_WR); +} + void hclge_cmd_setup_basic_desc(struct hclge_desc *desc, enum hclge_opcode_type opcode, bool is_read) { diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h index 8f3ba02aea3c..b4373345c2b4 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h @@ -739,6 +739,7 @@ struct hclge_hw; int hclge_cmd_send(struct hclge_hw *hw, struct hclge_desc *desc, int num); void hclge_cmd_setup_basic_desc(struct hclge_desc *desc, enum hclge_opcode_type opcode, bool is_read); +void hclge_cmd_reuse_desc(struct hclge_desc *desc, bool is_read); int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev, struct hclge_promisc_param *param); diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c index c91c779aeeed..5b5e52c7fde0 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c @@ -2595,8 +2595,6 @@ static int hclge_set_rss_tc_mode(struct hclge_dev *hdev, u16 *tc_valid, static int hclge_set_rss_input_tuple(struct hclge_dev *hdev) { -#define HCLGE_RSS_INPUT_TUPLE_OTHER 0xf -#define HCLGE_RSS_INPUT_TUPLE_SCTP 0x1f struct hclge_rss_input_tuple_cmd *req; struct hclge_desc desc; int ret; @@ -2677,6 +2675,98 @@ static int hclge_set_rss(struct hnae3_handle *handle, const u32 *indir, return ret; } +static u8 hclge_get_rss_hash_bits(struct ethtool_rxnfc *nfc) +{ + u8 hash_sets = nfc->data & RXH_L4_B_0_1 ? HCLGE_S_PORT_BIT : 0; + + if (nfc->data & RXH_L4_B_2_3) + hash_sets |= HCLGE_D_PORT_BIT; + else + hash_sets &= ~HCLGE_D_PORT_BIT; + + if (nfc->data & RXH_IP_SRC) + hash_sets |= HCLGE_S_IP_BIT; + else + hash_sets &= ~HCLGE_S_IP_BIT; + + if (nfc->data & RXH_IP_DST) + hash_sets |= HCLGE_D_IP_BIT; + else + hash_sets &= ~HCLGE_D_IP_BIT; + + if (nfc->flow_type == SCTP_V4_FLOW || nfc->flow_type == SCTP_V6_FLOW) + hash_sets |= HCLGE_V_TAG_BIT; + + return hash_sets; +} + +static int hclge_set_rss_tuple(struct hnae3_handle *handle, + struct ethtool_rxnfc *nfc) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + struct hclge_rss_input_tuple_cmd *req; + struct hclge_desc desc; + u8 tuple_sets; + int ret; + + if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST | + RXH_L4_B_0_1 | RXH_L4_B_2_3)) + return -EINVAL; + + req = (struct hclge_rss_input_tuple_cmd *)desc.data; + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, true); + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "Read rss tuple fail, status = %d\n", ret); + return ret; + } + + hclge_cmd_reuse_desc(&desc, false); + + tuple_sets = hclge_get_rss_hash_bits(nfc); + switch (nfc->flow_type) { + case TCP_V4_FLOW: + req->ipv4_tcp_en = tuple_sets; + break; + case TCP_V6_FLOW: + req->ipv6_tcp_en = tuple_sets; + break; + case UDP_V4_FLOW: + req->ipv4_udp_en = tuple_sets; + break; + case UDP_V6_FLOW: + req->ipv6_udp_en = tuple_sets; + break; + case SCTP_V4_FLOW: + req->ipv4_sctp_en = tuple_sets; + break; + case SCTP_V6_FLOW: + if ((nfc->data & RXH_L4_B_0_1) || + (nfc->data & RXH_L4_B_2_3)) + return -EINVAL; + + req->ipv6_sctp_en = tuple_sets; + break; + case IPV4_FLOW: + req->ipv4_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER; + break; + case IPV6_FLOW: + req->ipv6_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER; + break; + default: + return -EINVAL; + } + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) + dev_err(&hdev->pdev->dev, + "Set rss tuple fail, status = %d\n", ret); + + return ret; +} + static int hclge_get_tc_size(struct hnae3_handle *handle) { struct hclge_vport *vport = hclge_get_vport(handle); @@ -4344,6 +4434,7 @@ static const struct hnae3_ae_ops hclge_ops = { .get_rss_indir_size = hclge_get_rss_indir_size, .get_rss = hclge_get_rss, .set_rss = hclge_set_rss, + .set_rss_tuple = hclge_set_rss_tuple, .get_tc_size = hclge_get_tc_size, .get_mac_addr = hclge_get_mac_addr, .set_mac_addr = hclge_set_mac_addr, diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h index 79c1a06cb941..a7c018c7b0ec 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h @@ -41,6 +41,14 @@ #define HCLGE_RSS_CFG_TBL_NUM \ (HCLGE_RSS_IND_TBL_SIZE / HCLGE_RSS_CFG_TBL_SIZE) +#define HCLGE_RSS_INPUT_TUPLE_OTHER GENMASK(3, 0) +#define HCLGE_RSS_INPUT_TUPLE_SCTP GENMASK(4, 0) +#define HCLGE_D_PORT_BIT BIT(0) +#define HCLGE_S_PORT_BIT BIT(1) +#define HCLGE_D_IP_BIT BIT(2) +#define HCLGE_S_IP_BIT BIT(3) +#define HCLGE_V_TAG_BIT BIT(4) + #define HCLGE_RSS_TC_SIZE_0 1 #define HCLGE_RSS_TC_SIZE_1 2 #define HCLGE_RSS_TC_SIZE_2 4 diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_ethtool.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_ethtool.c index 1c5d003ecf29..f0e88e00a1f6 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_ethtool.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_ethtool.c @@ -533,6 +533,21 @@ int hns3_set_ringparam(struct net_device *ndev, struct ethtool_ringparam *param) return ret; } +static int hns3_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd) +{ + struct hnae3_handle *h = hns3_get_handle(netdev); + + if (!h->ae_algo || !h->ae_algo->ops || !h->ae_algo->ops->set_rss_tuple) + return -EOPNOTSUPP; + + switch (cmd->cmd) { + case ETHTOOL_SRXFH: + return h->ae_algo->ops->set_rss_tuple(h, cmd); + default: + return -EOPNOTSUPP; + } +} + static const struct ethtool_ops hns3_ethtool_ops = { .get_drvinfo = hns3_get_drvinfo, .get_link = hns3_get_link, @@ -543,6 +558,7 @@ static const struct ethtool_ops hns3_ethtool_ops = { .get_ethtool_stats = hns3_get_stats, .get_sset_count = hns3_get_sset_count, .get_rxnfc = hns3_get_rxnfc, + .set_rxnfc = hns3_set_rxnfc, .get_rxfh_key_size = hns3_get_rss_key_size, .get_rxfh_indir_size = hns3_get_rss_indir_size, .get_rxfh = hns3_get_rss, -- cgit v1.2.3 From 07d2995425eb8eb4874b94bf62fb1490a2014d76 Mon Sep 17 00:00:00 2001 From: Lipeng Date: Tue, 10 Oct 2017 16:42:06 +0800 Subject: net: hns3: add support for ETHTOOL_GRXFH This patch add support for ethtool's ETHTOOL_GRXFH in hns3_get_rxnfc(). Signed-off-by: Lipeng Signed-off-by: David S. Miller --- drivers/net/ethernet/hisilicon/hns3/hnae3.h | 2 + .../ethernet/hisilicon/hns3/hns3pf/hclge_main.c | 64 ++++++++++++++++++++++ .../ethernet/hisilicon/hns3/hns3pf/hns3_ethtool.c | 2 + 3 files changed, 68 insertions(+) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.h b/drivers/net/ethernet/hisilicon/hns3/hnae3.h index d952d6213024..575f50df340c 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hnae3.h +++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h @@ -341,6 +341,8 @@ struct hnae3_ae_ops { const u8 *key, const u8 hfunc); int (*set_rss_tuple)(struct hnae3_handle *handle, struct ethtool_rxnfc *cmd); + int (*get_rss_tuple)(struct hnae3_handle *handle, + struct ethtool_rxnfc *cmd); int (*get_tc_size)(struct hnae3_handle *handle); diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c index 5b5e52c7fde0..c322b4534148 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c @@ -2767,6 +2767,69 @@ static int hclge_set_rss_tuple(struct hnae3_handle *handle, return ret; } +static int hclge_get_rss_tuple(struct hnae3_handle *handle, + struct ethtool_rxnfc *nfc) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + struct hclge_rss_input_tuple_cmd *req; + struct hclge_desc desc; + u8 tuple_sets; + int ret; + + nfc->data = 0; + + req = (struct hclge_rss_input_tuple_cmd *)desc.data; + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, true); + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "Read rss tuple fail, status = %d\n", ret); + return ret; + } + + switch (nfc->flow_type) { + case TCP_V4_FLOW: + tuple_sets = req->ipv4_tcp_en; + break; + case UDP_V4_FLOW: + tuple_sets = req->ipv4_udp_en; + break; + case TCP_V6_FLOW: + tuple_sets = req->ipv6_tcp_en; + break; + case UDP_V6_FLOW: + tuple_sets = req->ipv6_udp_en; + break; + case SCTP_V4_FLOW: + tuple_sets = req->ipv4_sctp_en; + break; + case SCTP_V6_FLOW: + tuple_sets = req->ipv6_sctp_en; + break; + case IPV4_FLOW: + case IPV6_FLOW: + tuple_sets = HCLGE_S_IP_BIT | HCLGE_D_IP_BIT; + break; + default: + return -EINVAL; + } + + if (!tuple_sets) + return 0; + + if (tuple_sets & HCLGE_D_PORT_BIT) + nfc->data |= RXH_L4_B_2_3; + if (tuple_sets & HCLGE_S_PORT_BIT) + nfc->data |= RXH_L4_B_0_1; + if (tuple_sets & HCLGE_D_IP_BIT) + nfc->data |= RXH_IP_DST; + if (tuple_sets & HCLGE_S_IP_BIT) + nfc->data |= RXH_IP_SRC; + + return 0; +} + static int hclge_get_tc_size(struct hnae3_handle *handle) { struct hclge_vport *vport = hclge_get_vport(handle); @@ -4435,6 +4498,7 @@ static const struct hnae3_ae_ops hclge_ops = { .get_rss = hclge_get_rss, .set_rss = hclge_set_rss, .set_rss_tuple = hclge_set_rss_tuple, + .get_rss_tuple = hclge_get_rss_tuple, .get_tc_size = hclge_get_tc_size, .get_mac_addr = hclge_get_mac_addr, .set_mac_addr = hclge_set_mac_addr, diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_ethtool.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_ethtool.c index f0e88e00a1f6..b64fbd3c369a 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_ethtool.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_ethtool.c @@ -452,6 +452,8 @@ static int hns3_get_rxnfc(struct net_device *netdev, case ETHTOOL_GRXRINGS: cmd->data = h->ae_algo->ops->get_tc_size(h); break; + case ETHTOOL_GRXFH: + return h->ae_algo->ops->get_rss_tuple(h, cmd); default: return -EOPNOTSUPP; } -- cgit v1.2.3 From abf11d04fd9dbec778219d76a9d38c36f65fbc24 Mon Sep 17 00:00:00 2001 From: Lipeng Date: Tue, 10 Oct 2017 16:42:07 +0800 Subject: net: hns3: fix the ring count for ETHTOOL_GRXRINGS This patch fix the ring count for ETHTOOL_GRXRINGS. Ring count not TC size should be return for command "ethtool -n ethx". Signed-off-by: Lipeng Signed-off-by: David S. Miller --- drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_ethtool.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_ethtool.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_ethtool.c index b64fbd3c369a..9b36ce081f62 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_ethtool.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_ethtool.c @@ -450,7 +450,7 @@ static int hns3_get_rxnfc(struct net_device *netdev, switch (cmd->cmd) { case ETHTOOL_GRXRINGS: - cmd->data = h->ae_algo->ops->get_tc_size(h); + cmd->data = h->kinfo.num_tc * h->kinfo.rss_size; break; case ETHTOOL_GRXFH: return h->ae_algo->ops->get_rss_tuple(h, cmd); -- cgit v1.2.3 From 7e1dc5e926d57a5bc4ac97d6e061e2fe29c266c0 Mon Sep 17 00:00:00 2001 From: Inbar Karmy Date: Tue, 10 Oct 2017 12:28:33 +0300 Subject: net/mlx4_en: Limit the number of TX rings Limit the number of TX rings per UP by the number of cores in the system. Signed-off-by: Inbar Karmy Signed-off-by: Tariq Toukan Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx4/en_ethtool.c | 4 ++-- drivers/net/ethernet/mellanox/mlx4/en_main.c | 6 +++--- drivers/net/ethernet/mellanox/mlx4/en_netdev.c | 2 +- drivers/net/ethernet/mellanox/mlx4/mlx4_en.h | 2 +- 4 files changed, 7 insertions(+), 7 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c index 3d4e4a5d00d1..e9432bc1c1bc 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c @@ -1748,7 +1748,7 @@ static void mlx4_en_get_channels(struct net_device *dev, struct mlx4_en_priv *priv = netdev_priv(dev); channel->max_rx = MAX_RX_RINGS; - channel->max_tx = MLX4_EN_MAX_TX_RING_P_UP; + channel->max_tx = priv->mdev->profile.max_num_tx_rings_p_up; channel->rx_count = priv->rx_ring_num; channel->tx_count = priv->tx_ring_num[TX] / @@ -1777,7 +1777,7 @@ static int mlx4_en_set_channels(struct net_device *dev, mutex_lock(&mdev->state_lock); xdp_count = priv->tx_ring_num[TX_XDP] ? channel->rx_count : 0; if (channel->tx_count * priv->prof->num_up + xdp_count > - MAX_TX_RINGS) { + priv->mdev->profile.max_num_tx_rings_p_up * priv->prof->num_up) { err = -EINVAL; en_err(priv, "Total number of TX and XDP rings (%d) exceeds the maximum supported (%d)\n", diff --git a/drivers/net/ethernet/mellanox/mlx4/en_main.c b/drivers/net/ethernet/mellanox/mlx4/en_main.c index 686e18de9a97..2c2965497ed3 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_main.c @@ -153,7 +153,7 @@ static void mlx4_en_get_profile(struct mlx4_en_dev *mdev) int i; params->udp_rss = udp_rss; - params->num_tx_rings_p_up = mlx4_low_memory_profile() ? + params->max_num_tx_rings_p_up = mlx4_low_memory_profile() ? MLX4_EN_MIN_TX_RING_P_UP : min_t(int, num_online_cpus(), MLX4_EN_MAX_TX_RING_P_UP); @@ -170,8 +170,8 @@ static void mlx4_en_get_profile(struct mlx4_en_dev *mdev) params->prof[i].tx_ring_size = MLX4_EN_DEF_TX_RING_SIZE; params->prof[i].rx_ring_size = MLX4_EN_DEF_RX_RING_SIZE; params->prof[i].num_up = MLX4_EN_NUM_UP_LOW; - params->prof[i].num_tx_rings_p_up = params->num_tx_rings_p_up; - params->prof[i].tx_ring_num[TX] = params->num_tx_rings_p_up * + params->prof[i].num_tx_rings_p_up = params->max_num_tx_rings_p_up; + params->prof[i].tx_ring_num[TX] = params->max_num_tx_rings_p_up * params->prof[i].num_up; params->prof[i].rss_rings = 0; params->prof[i].inline_thold = inline_thold; diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c index 9c218f1cfc6c..e4c7a80ef5a8 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c @@ -3305,7 +3305,7 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, priv->pflags = MLX4_EN_PRIV_FLAGS_BLUEFLAME; priv->ctrl_flags = cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE | MLX4_WQE_CTRL_SOLICITED); - priv->num_tx_rings_p_up = mdev->profile.num_tx_rings_p_up; + priv->num_tx_rings_p_up = mdev->profile.max_num_tx_rings_p_up; priv->tx_work_limit = MLX4_EN_DEFAULT_TX_WORK; netdev_rss_key_fill(priv->rss_key, sizeof(priv->rss_key)); diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h index fdb3ad0cbe54..245e9ea09ab2 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h @@ -399,7 +399,7 @@ struct mlx4_en_profile { u32 active_ports; u32 small_pkt_int; u8 no_reset; - u8 num_tx_rings_p_up; + u8 max_num_tx_rings_p_up; struct mlx4_en_port_profile prof[MLX4_MAX_PORTS + 1]; }; -- cgit v1.2.3 From b8d394367a631c2d749b3114e04dfb4d09624ddf Mon Sep 17 00:00:00 2001 From: Inbar Karmy Date: Tue, 10 Oct 2017 12:28:34 +0300 Subject: net/mlx4_en: Limit the number of RX rings Limit the number of RX rings by the number of cores in the system. Signed-off-by: Inbar Karmy Signed-off-by: Tariq Toukan Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx4/en_ethtool.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c index e9432bc1c1bc..bf1f04164885 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c @@ -1742,12 +1742,17 @@ static int mlx4_en_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd) return err; } +static int mlx4_en_get_max_num_rx_rings(struct net_device *dev) +{ + return min_t(int, num_online_cpus(), MAX_RX_RINGS); +} + static void mlx4_en_get_channels(struct net_device *dev, struct ethtool_channels *channel) { struct mlx4_en_priv *priv = netdev_priv(dev); - channel->max_rx = MAX_RX_RINGS; + channel->max_rx = mlx4_en_get_max_num_rx_rings(dev); channel->max_tx = priv->mdev->profile.max_num_tx_rings_p_up; channel->rx_count = priv->rx_ring_num; -- cgit v1.2.3 From 80a8dc75ee674a111fea2ae1d02ff96535a309c2 Mon Sep 17 00:00:00 2001 From: Inbar Karmy Date: Tue, 10 Oct 2017 12:28:35 +0300 Subject: net/mlx4_en: Increase number of default RX rings Remove limitation of netif_get_num_default_rss_queues() from logic of RX rings default number. Signed-off-by: Inbar Karmy Signed-off-by: Tariq Toukan Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx4/en_rx.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c index 8f9cb8abc497..a7866954d106 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c @@ -254,8 +254,7 @@ void mlx4_en_set_num_rx_rings(struct mlx4_en_dev *mdev) DEF_RX_RINGS)); num_rx_rings = mlx4_low_memory_profile() ? MIN_RX_RINGS : - min_t(int, num_of_eqs, - netif_get_num_default_rss_queues()); + min_t(int, num_of_eqs, num_online_cpus()); mdev->profile.prof[i].rx_ring_num = rounddown_pow_of_two(num_rx_rings); } -- cgit v1.2.3 From 7822b0836d2121d7de3d0f9ec636338d7496e5dc Mon Sep 17 00:00:00 2001 From: Wei Yongjun Date: Wed, 11 Oct 2017 02:35:23 +0000 Subject: net: hns3: make local functions static Fixes the following sparse warnings: drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_ethtool.c:464:5: warning: symbol 'hns3_change_all_ring_bd_num' was not declared. Should it be static? drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_ethtool.c:477:5: warning: symbol 'hns3_set_ringparam' was not declared. Should it be static? Signed-off-by: Wei Yongjun Signed-off-by: David S. Miller --- drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_ethtool.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_ethtool.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_ethtool.c index 9b36ce081f62..ddbd7f30c6a4 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_ethtool.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_ethtool.c @@ -461,7 +461,8 @@ static int hns3_get_rxnfc(struct net_device *netdev, return 0; } -int hns3_change_all_ring_bd_num(struct hns3_nic_priv *priv, u32 new_desc_num) +static int hns3_change_all_ring_bd_num(struct hns3_nic_priv *priv, + u32 new_desc_num) { struct hnae3_handle *h = priv->ae_handle; int i; @@ -474,7 +475,8 @@ int hns3_change_all_ring_bd_num(struct hns3_nic_priv *priv, u32 new_desc_num) return hns3_init_all_ring(priv); } -int hns3_set_ringparam(struct net_device *ndev, struct ethtool_ringparam *param) +static int hns3_set_ringparam(struct net_device *ndev, + struct ethtool_ringparam *param) { struct hns3_nic_priv *priv = netdev_priv(ndev); struct hnae3_handle *h = priv->ae_handle; -- cgit v1.2.3 From d7e6b347560d1824d7bccfa307ad34bd3f133706 Mon Sep 17 00:00:00 2001 From: Timur Tabi Date: Wed, 11 Oct 2017 14:52:23 -0500 Subject: net: qcom/emac: specify the correct DMA mask The 64/32-bit DMA mask hackery in the EMAC driver is not actually necessary, and is technically not accurate. The EMAC hardware is limted to a 45-bit DMA address. Although no EMAC-enabled system can have that much DDR, an IOMMU could possible provide a larger address. Rather than play games with the DMA mappings, the driver should provide a correct value and trust the DMA/IOMMU layers to do the right thing. Signed-off-by: Timur Tabi Signed-off-by: David S. Miller --- drivers/net/ethernet/qualcomm/emac/emac.c | 17 ++++------------- 1 file changed, 4 insertions(+), 13 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/qualcomm/emac/emac.c b/drivers/net/ethernet/qualcomm/emac/emac.c index f477ba29c569..ee6f2d27502c 100644 --- a/drivers/net/ethernet/qualcomm/emac/emac.c +++ b/drivers/net/ethernet/qualcomm/emac/emac.c @@ -615,20 +615,11 @@ static int emac_probe(struct platform_device *pdev) u32 reg; int ret; - /* The EMAC itself is capable of 64-bit DMA, so try that first. */ - ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); + /* The TPD buffer address is limited to 45 bits. */ + ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(45)); if (ret) { - /* Some platforms may restrict the EMAC's address bus to less - * then the size of DDR. In this case, we need to try a - * smaller mask. We could try every possible smaller mask, - * but that's overkill. Instead, just fall to 32-bit, which - * should always work. - */ - ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); - if (ret) { - dev_err(&pdev->dev, "could not set DMA mask\n"); - return ret; - } + dev_err(&pdev->dev, "could not set DMA mask\n"); + return ret; } netdev = alloc_etherdev(sizeof(struct emac_adapter)); -- cgit v1.2.3 From 3958ffcd85060967a9c70bb92b21741073578d66 Mon Sep 17 00:00:00 2001 From: Timur Tabi Date: Wed, 11 Oct 2017 14:52:24 -0500 Subject: net: qcom/emac: remove unused address arrays The EMAC is capable of multiple TX and RX rings, but the driver only supports one ring for each. One function had some left-over unused code that supports multiple rings, but all it did was make the code harder to read. Signed-off-by: Timur Tabi Signed-off-by: David S. Miller --- drivers/net/ethernet/qualcomm/emac/emac-mac.c | 16 +++------------- 1 file changed, 3 insertions(+), 13 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/qualcomm/emac/emac-mac.c b/drivers/net/ethernet/qualcomm/emac/emac-mac.c index 3ed9033e56db..9cbb27263742 100644 --- a/drivers/net/ethernet/qualcomm/emac/emac-mac.c +++ b/drivers/net/ethernet/qualcomm/emac/emac-mac.c @@ -309,22 +309,12 @@ void emac_mac_mode_config(struct emac_adapter *adpt) /* Config descriptor rings */ static void emac_mac_dma_rings_config(struct emac_adapter *adpt) { - static const unsigned short tpd_q_offset[] = { - EMAC_DESC_CTRL_8, EMAC_H1TPD_BASE_ADDR_LO, - EMAC_H2TPD_BASE_ADDR_LO, EMAC_H3TPD_BASE_ADDR_LO}; - static const unsigned short rfd_q_offset[] = { - EMAC_DESC_CTRL_2, EMAC_DESC_CTRL_10, - EMAC_DESC_CTRL_12, EMAC_DESC_CTRL_13}; - static const unsigned short rrd_q_offset[] = { - EMAC_DESC_CTRL_5, EMAC_DESC_CTRL_14, - EMAC_DESC_CTRL_15, EMAC_DESC_CTRL_16}; - /* TPD (Transmit Packet Descriptor) */ writel(upper_32_bits(adpt->tx_q.tpd.dma_addr), adpt->base + EMAC_DESC_CTRL_1); writel(lower_32_bits(adpt->tx_q.tpd.dma_addr), - adpt->base + tpd_q_offset[0]); + adpt->base + EMAC_DESC_CTRL_8); writel(adpt->tx_q.tpd.count & TPD_RING_SIZE_BMSK, adpt->base + EMAC_DESC_CTRL_9); @@ -334,9 +324,9 @@ static void emac_mac_dma_rings_config(struct emac_adapter *adpt) adpt->base + EMAC_DESC_CTRL_0); writel(lower_32_bits(adpt->rx_q.rfd.dma_addr), - adpt->base + rfd_q_offset[0]); + adpt->base + EMAC_DESC_CTRL_2); writel(lower_32_bits(adpt->rx_q.rrd.dma_addr), - adpt->base + rrd_q_offset[0]); + adpt->base + EMAC_DESC_CTRL_5); writel(adpt->rx_q.rfd.count & RFD_RING_SIZE_BMSK, adpt->base + EMAC_DESC_CTRL_3); -- cgit v1.2.3 From df1ec1b9d0df57e96011f175418dc95b1af46821 Mon Sep 17 00:00:00 2001 From: Timur Tabi Date: Wed, 11 Oct 2017 14:52:25 -0500 Subject: net: qcom/emac: enforce DMA address restrictions The EMAC has a restriction that the upper 32 bits of the base addresses for the RFD and RRD rings must be the same. The ensure that restriction, we allocate twice the space for the RRD and locate it at an appropriate address. We also re-arrange the allocations so that invalid addresses are even less likely. Signed-off-by: Timur Tabi Signed-off-by: David S. Miller --- drivers/net/ethernet/qualcomm/emac/emac-mac.c | 39 ++++++++++++++++----------- 1 file changed, 24 insertions(+), 15 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/qualcomm/emac/emac-mac.c b/drivers/net/ethernet/qualcomm/emac/emac-mac.c index 9cbb27263742..0f5ece5d9507 100644 --- a/drivers/net/ethernet/qualcomm/emac/emac-mac.c +++ b/drivers/net/ethernet/qualcomm/emac/emac-mac.c @@ -734,6 +734,11 @@ static int emac_rx_descs_alloc(struct emac_adapter *adpt) rx_q->rrd.size = rx_q->rrd.count * (adpt->rrd_size * 4); rx_q->rfd.size = rx_q->rfd.count * (adpt->rfd_size * 4); + /* Check if the RRD and RFD are aligned properly, and if not, adjust. */ + if (upper_32_bits(ring_header->dma_addr) != + upper_32_bits(ring_header->dma_addr + ALIGN(rx_q->rrd.size, 8))) + ring_header->used = ALIGN(rx_q->rrd.size, 8); + rx_q->rrd.dma_addr = ring_header->dma_addr + ring_header->used; rx_q->rrd.v_addr = ring_header->v_addr + ring_header->used; ring_header->used += ALIGN(rx_q->rrd.size, 8); @@ -767,11 +772,18 @@ int emac_mac_rx_tx_rings_alloc_all(struct emac_adapter *adpt) /* Ring DMA buffer. Each ring may need up to 8 bytes for alignment, * hence the additional padding bytes are allocated. + * + * Also double the memory allocated for the RRD so that we can + * re-align it if necessary. The EMAC has a restriction that the + * upper 32 bits of the base addresses for the RFD and RRD rings + * must be the same. It is extremely unlikely that this is not the + * case, since the rings are only a few KB in size. However, we + * need to check for this anyway, and if the two rings are not + * compliant, then we re-align. */ - ring_header->size = num_tx_descs * (adpt->tpd_size * 4) + - num_rx_descs * (adpt->rfd_size * 4) + - num_rx_descs * (adpt->rrd_size * 4) + - 8 + 2 * 8; /* 8 byte per one Tx and two Rx rings */ + ring_header->size = ALIGN(num_tx_descs * (adpt->tpd_size * 4), 8) + + ALIGN(num_rx_descs * (adpt->rfd_size * 4), 8) + + ALIGN(num_rx_descs * (adpt->rrd_size * 4), 8) * 2; ring_header->used = 0; ring_header->v_addr = dma_zalloc_coherent(dev, ring_header->size, @@ -780,26 +792,23 @@ int emac_mac_rx_tx_rings_alloc_all(struct emac_adapter *adpt) if (!ring_header->v_addr) return -ENOMEM; - ring_header->used = ALIGN(ring_header->dma_addr, 8) - - ring_header->dma_addr; - - ret = emac_tx_q_desc_alloc(adpt, &adpt->tx_q); - if (ret) { - netdev_err(adpt->netdev, "error: Tx Queue alloc failed\n"); - goto err_alloc_tx; - } - ret = emac_rx_descs_alloc(adpt); if (ret) { netdev_err(adpt->netdev, "error: Rx Queue alloc failed\n"); goto err_alloc_rx; } + ret = emac_tx_q_desc_alloc(adpt, &adpt->tx_q); + if (ret) { + netdev_err(adpt->netdev, "transmit queue allocation failed\n"); + goto err_alloc_tx; + } + return 0; -err_alloc_rx: - emac_tx_q_bufs_free(adpt); err_alloc_tx: + emac_rx_q_bufs_free(adpt); +err_alloc_rx: dma_free_coherent(dev, ring_header->size, ring_header->v_addr, ring_header->dma_addr); -- cgit v1.2.3 From 740d6f188fb71ae13e3e9f7208b6b3094517509d Mon Sep 17 00:00:00 2001 From: Timur Tabi Date: Wed, 11 Oct 2017 14:52:26 -0500 Subject: net: qcom/emac: clean up some TX/RX error messages Some of the error messages that are printed by the interrupt handlers are poorly written. For example, many don't include a device prefix, so there's no indication that they are EMAC errors. Also use rate limiting for all messages that could be printed from interrupt context. Signed-off-by: Timur Tabi Signed-off-by: David S. Miller --- drivers/net/ethernet/qualcomm/emac/emac-sgmii.c | 15 ++++++--------- drivers/net/ethernet/qualcomm/emac/emac.c | 8 ++++---- 2 files changed, 10 insertions(+), 13 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/qualcomm/emac/emac-sgmii.c b/drivers/net/ethernet/qualcomm/emac/emac-sgmii.c index 29ba37a08372..e8ab512ee7e3 100644 --- a/drivers/net/ethernet/qualcomm/emac/emac-sgmii.c +++ b/drivers/net/ethernet/qualcomm/emac/emac-sgmii.c @@ -68,10 +68,10 @@ static void emac_sgmii_link_init(struct emac_adapter *adpt) writel(val, phy->base + EMAC_SGMII_PHY_AUTONEG_CFG2); } -static int emac_sgmii_irq_clear(struct emac_adapter *adpt, u32 irq_bits) +static int emac_sgmii_irq_clear(struct emac_adapter *adpt, u8 irq_bits) { struct emac_sgmii *phy = &adpt->phy; - u32 status; + u8 status; writel_relaxed(irq_bits, phy->base + EMAC_SGMII_PHY_INTERRUPT_CLEAR); writel_relaxed(IRQ_GLOBAL_CLEAR, phy->base + EMAC_SGMII_PHY_IRQ_CMD); @@ -86,9 +86,8 @@ static int emac_sgmii_irq_clear(struct emac_adapter *adpt, u32 irq_bits) EMAC_SGMII_PHY_INTERRUPT_STATUS, status, !(status & irq_bits), 1, SGMII_PHY_IRQ_CLR_WAIT_TIME)) { - netdev_err(adpt->netdev, - "error: failed clear SGMII irq: status:0x%x bits:0x%x\n", - status, irq_bits); + net_err_ratelimited("%s: failed to clear SGMII irq: status:0x%x bits:0x%x\n", + adpt->netdev->name, status, irq_bits); return -EIO; } @@ -109,7 +108,7 @@ static irqreturn_t emac_sgmii_interrupt(int irq, void *data) { struct emac_adapter *adpt = data; struct emac_sgmii *phy = &adpt->phy; - u32 status; + u8 status; status = readl(phy->base + EMAC_SGMII_PHY_INTERRUPT_STATUS); status &= SGMII_ISR_MASK; @@ -139,10 +138,8 @@ static irqreturn_t emac_sgmii_interrupt(int irq, void *data) atomic_set(&phy->decode_error_count, 0); } - if (emac_sgmii_irq_clear(adpt, status)) { - netdev_warn(adpt->netdev, "failed to clear SGMII interrupt\n"); + if (emac_sgmii_irq_clear(adpt, status)) schedule_work(&adpt->work_thread); - } return IRQ_HANDLED; } diff --git a/drivers/net/ethernet/qualcomm/emac/emac.c b/drivers/net/ethernet/qualcomm/emac/emac.c index ee6f2d27502c..70c92b649b29 100644 --- a/drivers/net/ethernet/qualcomm/emac/emac.c +++ b/drivers/net/ethernet/qualcomm/emac/emac.c @@ -148,9 +148,8 @@ static irqreturn_t emac_isr(int _irq, void *data) goto exit; if (status & ISR_ERROR) { - netif_warn(adpt, intr, adpt->netdev, - "warning: error irq status 0x%lx\n", - status & ISR_ERROR); + net_err_ratelimited("%s: error interrupt 0x%lx\n", + adpt->netdev->name, status & ISR_ERROR); /* reset MAC */ schedule_work(&adpt->work_thread); } @@ -169,7 +168,8 @@ static irqreturn_t emac_isr(int _irq, void *data) emac_mac_tx_process(adpt, &adpt->tx_q); if (status & ISR_OVER) - net_warn_ratelimited("warning: TX/RX overflow\n"); + net_warn_ratelimited("%s: TX/RX overflow interrupt\n", + adpt->netdev->name); exit: /* enable the interrupt */ -- cgit v1.2.3 From d8bbb07adbfab5a8e03c361c7dd67fe0003d3757 Mon Sep 17 00:00:00 2001 From: Subash Abhinov Kasiviswanathan Date: Wed, 11 Oct 2017 18:43:52 -0600 Subject: net: qualcomm: rmnet: Remove existing logic for bridge mode This will be rewritten in the following patches. Signed-off-by: Subash Abhinov Kasiviswanathan Signed-off-by: David S. Miller --- drivers/net/ethernet/qualcomm/rmnet/rmnet_config.h | 1 - .../net/ethernet/qualcomm/rmnet/rmnet_handlers.c | 77 +++------------------- 2 files changed, 9 insertions(+), 69 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.h b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.h index dde4e9f14f4a..0b0c5a79c1dc 100644 --- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.h +++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.h @@ -34,7 +34,6 @@ struct rmnet_endpoint { */ struct rmnet_port { struct net_device *dev; - struct rmnet_endpoint local_ep; struct rmnet_endpoint muxed_ep[RMNET_MAX_LOGICAL_EP]; u32 ingress_data_format; u32 egress_data_format; diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c index 540c7622dcb1..b50f40181661 100644 --- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c +++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c @@ -44,56 +44,18 @@ static void rmnet_set_skb_proto(struct sk_buff *skb) /* Generic handler */ static rx_handler_result_t -rmnet_bridge_handler(struct sk_buff *skb, struct rmnet_endpoint *ep) +rmnet_deliver_skb(struct sk_buff *skb) { - if (!ep->egress_dev) - kfree_skb(skb); - else - rmnet_egress_handler(skb, ep); + skb_reset_transport_header(skb); + skb_reset_network_header(skb); + rmnet_vnd_rx_fixup(skb, skb->dev); + skb->pkt_type = PACKET_HOST; + skb_set_mac_header(skb, 0); + netif_receive_skb(skb); return RX_HANDLER_CONSUMED; } -static rx_handler_result_t -rmnet_deliver_skb(struct sk_buff *skb, struct rmnet_endpoint *ep) -{ - switch (ep->rmnet_mode) { - case RMNET_EPMODE_NONE: - return RX_HANDLER_PASS; - - case RMNET_EPMODE_BRIDGE: - return rmnet_bridge_handler(skb, ep); - - case RMNET_EPMODE_VND: - skb_reset_transport_header(skb); - skb_reset_network_header(skb); - rmnet_vnd_rx_fixup(skb, skb->dev); - - skb->pkt_type = PACKET_HOST; - skb_set_mac_header(skb, 0); - netif_receive_skb(skb); - return RX_HANDLER_CONSUMED; - - default: - kfree_skb(skb); - return RX_HANDLER_CONSUMED; - } -} - -static rx_handler_result_t -rmnet_ingress_deliver_packet(struct sk_buff *skb, - struct rmnet_port *port) -{ - if (!port) { - kfree_skb(skb); - return RX_HANDLER_CONSUMED; - } - - skb->dev = port->local_ep.egress_dev; - - return rmnet_deliver_skb(skb, &port->local_ep); -} - /* MAP handler */ static rx_handler_result_t @@ -130,7 +92,7 @@ __rmnet_map_ingress_handler(struct sk_buff *skb, skb_pull(skb, sizeof(struct rmnet_map_header)); skb_trim(skb, len); rmnet_set_skb_proto(skb); - return rmnet_deliver_skb(skb, ep); + return rmnet_deliver_skb(skb); } static rx_handler_result_t @@ -204,29 +166,8 @@ rx_handler_result_t rmnet_rx_handler(struct sk_buff **pskb) dev = skb->dev; port = rmnet_get_port(dev); - if (port->ingress_data_format & RMNET_INGRESS_FORMAT_MAP) { + if (port->ingress_data_format & RMNET_INGRESS_FORMAT_MAP) rc = rmnet_map_ingress_handler(skb, port); - } else { - switch (ntohs(skb->protocol)) { - case ETH_P_MAP: - if (port->local_ep.rmnet_mode == - RMNET_EPMODE_BRIDGE) { - rc = rmnet_ingress_deliver_packet(skb, port); - } else { - kfree_skb(skb); - rc = RX_HANDLER_CONSUMED; - } - break; - - case ETH_P_IP: - case ETH_P_IPV6: - rc = rmnet_ingress_deliver_packet(skb, port); - break; - - default: - rc = RX_HANDLER_PASS; - } - } return rc; } -- cgit v1.2.3 From 1281726ec341702b16c3e67ea37ba485cf72ee66 Mon Sep 17 00:00:00 2001 From: Subash Abhinov Kasiviswanathan Date: Wed, 11 Oct 2017 18:43:53 -0600 Subject: net: qualcomm: rmnet: Remove some unused defines Most of these constants were used in the initial patchset where custom netlink configuration was used and hence are no longer relevant. Signed-off-by: Subash Abhinov Kasiviswanathan Signed-off-by: David S. Miller --- drivers/net/ethernet/qualcomm/rmnet/rmnet_private.h | 8 -------- 1 file changed, 8 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_private.h b/drivers/net/ethernet/qualcomm/rmnet/rmnet_private.h index 7967198fdd90..49102f922b31 100644 --- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_private.h +++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_private.h @@ -19,23 +19,15 @@ #define RMNET_TX_QUEUE_LEN 1000 /* Constants */ -#define RMNET_EGRESS_FORMAT__RESERVED__ BIT(0) #define RMNET_EGRESS_FORMAT_MAP BIT(1) #define RMNET_EGRESS_FORMAT_AGGREGATION BIT(2) #define RMNET_EGRESS_FORMAT_MUXING BIT(3) -#define RMNET_EGRESS_FORMAT_MAP_CKSUMV3 BIT(4) -#define RMNET_EGRESS_FORMAT_MAP_CKSUMV4 BIT(5) -#define RMNET_INGRESS_FIX_ETHERNET BIT(0) #define RMNET_INGRESS_FORMAT_MAP BIT(1) #define RMNET_INGRESS_FORMAT_DEAGGREGATION BIT(2) #define RMNET_INGRESS_FORMAT_DEMUXING BIT(3) #define RMNET_INGRESS_FORMAT_MAP_COMMANDS BIT(4) -#define RMNET_INGRESS_FORMAT_MAP_CKSUMV3 BIT(5) -#define RMNET_INGRESS_FORMAT_MAP_CKSUMV4 BIT(6) -/* Pass the frame up the stack with no modifications to skb->dev */ -#define RMNET_EPMODE_NONE (0) /* Replace skb->dev to a virtual rmnet device and pass up the stack */ #define RMNET_EPMODE_VND (1) /* Pass the frame directly to another device with dev_queue_xmit() */ -- cgit v1.2.3 From 9148963201a4627a632d2c769805c9278f6d22d7 Mon Sep 17 00:00:00 2001 From: Subash Abhinov Kasiviswanathan Date: Wed, 11 Oct 2017 18:43:54 -0600 Subject: net: qualcomm: rmnet: Move rmnet_mode to rmnet_port Mode information on the real device makes it easier to route packets to rmnet device or bridged device based on the configuration. Signed-off-by: Subash Abhinov Kasiviswanathan Signed-off-by: David S. Miller --- drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c | 12 +++++------- drivers/net/ethernet/qualcomm/rmnet/rmnet_config.h | 2 +- drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c | 3 +-- 3 files changed, 7 insertions(+), 10 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c index 8403eea08d0e..85fce9c4d234 100644 --- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c +++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c @@ -124,20 +124,17 @@ static int rmnet_register_real_device(struct net_device *real_dev) } static void rmnet_set_endpoint_config(struct net_device *dev, - u8 mux_id, u8 rmnet_mode, - struct net_device *egress_dev) + u8 mux_id, struct net_device *egress_dev) { struct rmnet_endpoint *ep; - netdev_dbg(dev, "id %d mode %d dev %s\n", - mux_id, rmnet_mode, egress_dev->name); + netdev_dbg(dev, "id %d dev %s\n", mux_id, egress_dev->name); ep = rmnet_get_endpoint(dev, mux_id); /* This config is cleared on every set, so its ok to not * clear it on a device delete. */ memset(ep, 0, sizeof(struct rmnet_endpoint)); - ep->rmnet_mode = rmnet_mode; ep->egress_dev = egress_dev; ep->mux_id = mux_id; } @@ -183,9 +180,10 @@ static int rmnet_newlink(struct net *src_net, struct net_device *dev, ingress_format, egress_format); port->egress_data_format = egress_format; port->ingress_data_format = ingress_format; + port->rmnet_mode = mode; - rmnet_set_endpoint_config(real_dev, mux_id, mode, dev); - rmnet_set_endpoint_config(dev, mux_id, mode, real_dev); + rmnet_set_endpoint_config(real_dev, mux_id, dev); + rmnet_set_endpoint_config(dev, mux_id, real_dev); return 0; err2: diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.h b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.h index 0b0c5a79c1dc..03d473f39476 100644 --- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.h +++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.h @@ -24,7 +24,6 @@ * Exact usage of this parameter depends on the rmnet_mode. */ struct rmnet_endpoint { - u8 rmnet_mode; u8 mux_id; struct net_device *egress_dev; }; @@ -39,6 +38,7 @@ struct rmnet_port { u32 egress_data_format; struct net_device *rmnet_devices[RMNET_MAX_LOGICAL_EP]; u8 nr_rmnet_devs; + u8 rmnet_mode; }; extern struct rtnl_link_ops rmnet_link_ops; diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c index b50f40181661..86e37cc3b52c 100644 --- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c +++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c @@ -205,8 +205,7 @@ void rmnet_egress_handler(struct sk_buff *skb, } } - if (ep->rmnet_mode == RMNET_EPMODE_VND) - rmnet_vnd_tx_fixup(skb, orig_dev); + rmnet_vnd_tx_fixup(skb, orig_dev); dev_queue_xmit(skb); } -- cgit v1.2.3 From 56470c927f1ba1e101b5e5a93e02d23a14fd99b7 Mon Sep 17 00:00:00 2001 From: Subash Abhinov Kasiviswanathan Date: Wed, 11 Oct 2017 18:43:55 -0600 Subject: net: qualcomm: rmnet: Remove duplicate setting of rmnet private info The end point is set twice in the local_ep as well as the mux_id and the real_dev in the rmnet private structure. Remove the local_ep. While these elements are equivalent, rmnet_endpoint will be used only as part of the rmnet_port for muxed scenarios in VND mode. Signed-off-by: Subash Abhinov Kasiviswanathan Signed-off-by: David S. Miller --- drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c | 10 ++-------- drivers/net/ethernet/qualcomm/rmnet/rmnet_config.h | 4 ---- drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c | 18 ++++++++++-------- drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.h | 3 +-- drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c | 19 ++----------------- drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.h | 1 - 6 files changed, 15 insertions(+), 40 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c index 85fce9c4d234..96058bbccf71 100644 --- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c +++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c @@ -67,13 +67,8 @@ rmnet_get_endpoint(struct net_device *dev, int config_id) struct rmnet_endpoint *ep; struct rmnet_port *port; - if (!rmnet_is_real_dev_registered(dev)) { - ep = rmnet_vnd_get_endpoint(dev); - } else { - port = rmnet_get_port_rtnl(dev); - - ep = &port->muxed_ep[config_id]; - } + port = rmnet_get_port_rtnl(dev); + ep = &port->muxed_ep[config_id]; return ep; } @@ -183,7 +178,6 @@ static int rmnet_newlink(struct net *src_net, struct net_device *dev, port->rmnet_mode = mode; rmnet_set_endpoint_config(real_dev, mux_id, dev); - rmnet_set_endpoint_config(dev, mux_id, real_dev); return 0; err2: diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.h b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.h index 03d473f39476..c5f5c6d957c0 100644 --- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.h +++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.h @@ -20,9 +20,6 @@ #define RMNET_MAX_LOGICAL_EP 255 -/* Information about the next device to deliver the packet to. - * Exact usage of this parameter depends on the rmnet_mode. - */ struct rmnet_endpoint { u8 mux_id; struct net_device *egress_dev; @@ -44,7 +41,6 @@ struct rmnet_port { extern struct rtnl_link_ops rmnet_link_ops; struct rmnet_priv { - struct rmnet_endpoint local_ep; u8 mux_id; struct net_device *real_dev; }; diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c index 86e37cc3b52c..e0802d32d6a5 100644 --- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c +++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c @@ -116,8 +116,7 @@ rmnet_map_ingress_handler(struct sk_buff *skb, } static int rmnet_map_egress_handler(struct sk_buff *skb, - struct rmnet_port *port, - struct rmnet_endpoint *ep, + struct rmnet_port *port, u8 mux_id, struct net_device *orig_dev) { int required_headroom, additional_header_len; @@ -136,10 +135,10 @@ static int rmnet_map_egress_handler(struct sk_buff *skb, return RMNET_MAP_CONSUMED; if (port->egress_data_format & RMNET_EGRESS_FORMAT_MUXING) { - if (ep->mux_id == 0xff) + if (mux_id == 0xff) map_header->mux_id = 0; else - map_header->mux_id = ep->mux_id; + map_header->mux_id = mux_id; } skb->protocol = htons(ETH_P_MAP); @@ -176,14 +175,17 @@ rx_handler_result_t rmnet_rx_handler(struct sk_buff **pskb) * for egress device configured in logical endpoint. Packet is then transmitted * on the egress device. */ -void rmnet_egress_handler(struct sk_buff *skb, - struct rmnet_endpoint *ep) +void rmnet_egress_handler(struct sk_buff *skb) { struct net_device *orig_dev; struct rmnet_port *port; + struct rmnet_priv *priv; + u8 mux_id; orig_dev = skb->dev; - skb->dev = ep->egress_dev; + priv = netdev_priv(orig_dev); + skb->dev = priv->real_dev; + mux_id = priv->mux_id; port = rmnet_get_port(skb->dev); if (!port) { @@ -192,7 +194,7 @@ void rmnet_egress_handler(struct sk_buff *skb, } if (port->egress_data_format & RMNET_EGRESS_FORMAT_MAP) { - switch (rmnet_map_egress_handler(skb, port, ep, orig_dev)) { + switch (rmnet_map_egress_handler(skb, port, mux_id, orig_dev)) { case RMNET_MAP_CONSUMED: return; diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.h b/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.h index f2638cf5693c..3537e4ceedb3 100644 --- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.h +++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.h @@ -18,8 +18,7 @@ #include "rmnet_config.h" -void rmnet_egress_handler(struct sk_buff *skb, - struct rmnet_endpoint *ep); +void rmnet_egress_handler(struct sk_buff *skb); rx_handler_result_t rmnet_rx_handler(struct sk_buff **pskb); diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c index 7f90d5587653..4ca59a4389b8 100644 --- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c +++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c @@ -45,8 +45,8 @@ static netdev_tx_t rmnet_vnd_start_xmit(struct sk_buff *skb, struct rmnet_priv *priv; priv = netdev_priv(dev); - if (priv->local_ep.egress_dev) { - rmnet_egress_handler(skb, &priv->local_ep); + if (priv->real_dev) { + rmnet_egress_handler(skb); } else { dev->stats.tx_dropped++; kfree_skb(skb); @@ -143,21 +143,6 @@ u8 rmnet_vnd_get_mux(struct net_device *rmnet_dev) return priv->mux_id; } -/* Gets the logical endpoint configuration for a RmNet virtual network device - * node. Caller should confirm that devices is a RmNet VND before calling. - */ -struct rmnet_endpoint *rmnet_vnd_get_endpoint(struct net_device *rmnet_dev) -{ - struct rmnet_priv *priv; - - if (!rmnet_dev) - return NULL; - - priv = netdev_priv(rmnet_dev); - - return &priv->local_ep; -} - int rmnet_vnd_do_flow_control(struct net_device *rmnet_dev, int enable) { netdev_dbg(rmnet_dev, "Setting VND TX queue state to %d\n", enable); diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.h b/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.h index 8a4042f0f6bf..cae134d35774 100644 --- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.h +++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.h @@ -17,7 +17,6 @@ #define _RMNET_VND_H_ int rmnet_vnd_do_flow_control(struct net_device *dev, int enable); -struct rmnet_endpoint *rmnet_vnd_get_endpoint(struct net_device *dev); int rmnet_vnd_newlink(u8 id, struct net_device *rmnet_dev, struct rmnet_port *port, struct net_device *real_dev); -- cgit v1.2.3 From 5451237ff7a77ded1d81538e3daa76dc3ee60538 Mon Sep 17 00:00:00 2001 From: Subash Abhinov Kasiviswanathan Date: Wed, 11 Oct 2017 18:43:56 -0600 Subject: net: qualcomm: rmnet: Remove duplicate setting of rmnet_devices The rmnet_devices information is already stored in muxed_ep, so storing this in rmnet_devices[] again is redundant. Signed-off-by: Subash Abhinov Kasiviswanathan Signed-off-by: David S. Miller --- drivers/net/ethernet/qualcomm/rmnet/rmnet_config.h | 1 - drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c | 8 ++++---- 2 files changed, 4 insertions(+), 5 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.h b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.h index c5f5c6d957c0..123ccf41fc95 100644 --- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.h +++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.h @@ -33,7 +33,6 @@ struct rmnet_port { struct rmnet_endpoint muxed_ep[RMNET_MAX_LOGICAL_EP]; u32 ingress_data_format; u32 egress_data_format; - struct net_device *rmnet_devices[RMNET_MAX_LOGICAL_EP]; u8 nr_rmnet_devs; u8 rmnet_mode; }; diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c index 4ca59a4389b8..8b8497b17f52 100644 --- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c +++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c @@ -105,12 +105,12 @@ int rmnet_vnd_newlink(u8 id, struct net_device *rmnet_dev, struct rmnet_priv *priv; int rc; - if (port->rmnet_devices[id]) + if (port->muxed_ep[id].egress_dev) return -EINVAL; rc = register_netdevice(rmnet_dev); if (!rc) { - port->rmnet_devices[id] = rmnet_dev; + port->muxed_ep[id].egress_dev = rmnet_dev; port->nr_rmnet_devs++; rmnet_dev->rtnl_link_ops = &rmnet_link_ops; @@ -127,10 +127,10 @@ int rmnet_vnd_newlink(u8 id, struct net_device *rmnet_dev, int rmnet_vnd_dellink(u8 id, struct rmnet_port *port) { - if (id >= RMNET_MAX_LOGICAL_EP || !port->rmnet_devices[id]) + if (id >= RMNET_MAX_LOGICAL_EP || !port->muxed_ep[id].egress_dev) return -EINVAL; - port->rmnet_devices[id] = NULL; + port->muxed_ep[id].egress_dev = NULL; port->nr_rmnet_devs--; return 0; } -- cgit v1.2.3 From 3352e6c45760fd6675468a35ef699ab94617cab4 Mon Sep 17 00:00:00 2001 From: Subash Abhinov Kasiviswanathan Date: Wed, 11 Oct 2017 18:43:57 -0600 Subject: net: qualcomm: rmnet: Convert the muxed endpoint to hlist Rather than using a static array, use a hlist to store the muxed endpoints and use the mux id to query the rmnet_device. This is useful as usually very few mux ids are used. Signed-off-by: Subash Abhinov Kasiviswanathan Cc: Dan Williams Signed-off-by: David S. Miller --- drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c | 75 ++++++++++++---------- drivers/net/ethernet/qualcomm/rmnet/rmnet_config.h | 4 +- .../net/ethernet/qualcomm/rmnet/rmnet_handlers.c | 17 +++-- .../ethernet/qualcomm/rmnet/rmnet_map_command.c | 4 +- drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c | 15 +++-- drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.h | 6 +- 6 files changed, 68 insertions(+), 53 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c index 96058bbccf71..b5fe3f4d22a6 100644 --- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c +++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c @@ -61,18 +61,6 @@ rmnet_get_port_rtnl(const struct net_device *real_dev) return rtnl_dereference(real_dev->rx_handler_data); } -static struct rmnet_endpoint* -rmnet_get_endpoint(struct net_device *dev, int config_id) -{ - struct rmnet_endpoint *ep; - struct rmnet_port *port; - - port = rmnet_get_port_rtnl(dev); - ep = &port->muxed_ep[config_id]; - - return ep; -} - static int rmnet_unregister_real_device(struct net_device *real_dev, struct rmnet_port *port) { @@ -93,7 +81,7 @@ static int rmnet_unregister_real_device(struct net_device *real_dev, static int rmnet_register_real_device(struct net_device *real_dev) { struct rmnet_port *port; - int rc; + int rc, entry; ASSERT_RTNL(); @@ -114,26 +102,13 @@ static int rmnet_register_real_device(struct net_device *real_dev) /* hold on to real dev for MAP data */ dev_hold(real_dev); + for (entry = 0; entry < RMNET_MAX_LOGICAL_EP; entry++) + INIT_HLIST_HEAD(&port->muxed_ep[entry]); + netdev_dbg(real_dev, "registered with rmnet\n"); return 0; } -static void rmnet_set_endpoint_config(struct net_device *dev, - u8 mux_id, struct net_device *egress_dev) -{ - struct rmnet_endpoint *ep; - - netdev_dbg(dev, "id %d dev %s\n", mux_id, egress_dev->name); - - ep = rmnet_get_endpoint(dev, mux_id); - /* This config is cleared on every set, so its ok to not - * clear it on a device delete. - */ - memset(ep, 0, sizeof(struct rmnet_endpoint)); - ep->egress_dev = egress_dev; - ep->mux_id = mux_id; -} - static int rmnet_newlink(struct net *src_net, struct net_device *dev, struct nlattr *tb[], struct nlattr *data[], struct netlink_ext_ack *extack) @@ -145,6 +120,7 @@ static int rmnet_newlink(struct net *src_net, struct net_device *dev, RMNET_EGRESS_FORMAT_MAP; struct net_device *real_dev; int mode = RMNET_EPMODE_VND; + struct rmnet_endpoint *ep; struct rmnet_port *port; int err = 0; u16 mux_id; @@ -156,6 +132,10 @@ static int rmnet_newlink(struct net *src_net, struct net_device *dev, if (!data[IFLA_VLAN_ID]) return -EINVAL; + ep = kzalloc(sizeof(*ep), GFP_ATOMIC); + if (!ep) + return -ENOMEM; + mux_id = nla_get_u16(data[IFLA_VLAN_ID]); err = rmnet_register_real_device(real_dev); @@ -163,7 +143,7 @@ static int rmnet_newlink(struct net *src_net, struct net_device *dev, goto err0; port = rmnet_get_port_rtnl(real_dev); - err = rmnet_vnd_newlink(mux_id, dev, port, real_dev); + err = rmnet_vnd_newlink(mux_id, dev, port, real_dev, ep); if (err) goto err1; @@ -177,11 +157,11 @@ static int rmnet_newlink(struct net *src_net, struct net_device *dev, port->ingress_data_format = ingress_format; port->rmnet_mode = mode; - rmnet_set_endpoint_config(real_dev, mux_id, dev); + hlist_add_head_rcu(&ep->hlnode, &port->muxed_ep[mux_id]); return 0; err2: - rmnet_vnd_dellink(mux_id, port); + rmnet_vnd_dellink(mux_id, port, ep); err1: rmnet_unregister_real_device(real_dev, port); err0: @@ -191,6 +171,7 @@ err0: static void rmnet_dellink(struct net_device *dev, struct list_head *head) { struct net_device *real_dev; + struct rmnet_endpoint *ep; struct rmnet_port *port; u8 mux_id; @@ -204,8 +185,15 @@ static void rmnet_dellink(struct net_device *dev, struct list_head *head) port = rmnet_get_port_rtnl(real_dev); mux_id = rmnet_vnd_get_mux(dev); - rmnet_vnd_dellink(mux_id, port); netdev_upper_dev_unlink(dev, real_dev); + + ep = rmnet_get_endpoint(port, mux_id); + if (ep) { + hlist_del_init_rcu(&ep->hlnode); + rmnet_vnd_dellink(mux_id, port, ep); + kfree(ep); + } + rmnet_unregister_real_device(real_dev, port); unregister_netdevice_queue(dev, head); @@ -214,11 +202,16 @@ static void rmnet_dellink(struct net_device *dev, struct list_head *head) static int rmnet_dev_walk_unreg(struct net_device *rmnet_dev, void *data) { struct rmnet_walk_data *d = data; + struct rmnet_endpoint *ep; u8 mux_id; mux_id = rmnet_vnd_get_mux(rmnet_dev); - - rmnet_vnd_dellink(mux_id, d->port); + ep = rmnet_get_endpoint(d->port, mux_id); + if (ep) { + hlist_del_init_rcu(&ep->hlnode); + rmnet_vnd_dellink(mux_id, d->port, ep); + kfree(ep); + } netdev_upper_dev_unlink(rmnet_dev, d->real_dev); unregister_netdevice_queue(rmnet_dev, d->head); @@ -316,6 +309,18 @@ struct rmnet_port *rmnet_get_port(struct net_device *real_dev) return NULL; } +struct rmnet_endpoint *rmnet_get_endpoint(struct rmnet_port *port, u8 mux_id) +{ + struct rmnet_endpoint *ep; + + hlist_for_each_entry_rcu(ep, &port->muxed_ep[mux_id], hlnode) { + if (ep->mux_id == mux_id) + return ep; + } + + return NULL; +} + /* Startup/Shutdown */ static int __init rmnet_init(void) diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.h b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.h index 123ccf41fc95..8849986980f8 100644 --- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.h +++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.h @@ -23,6 +23,7 @@ struct rmnet_endpoint { u8 mux_id; struct net_device *egress_dev; + struct hlist_node hlnode; }; /* One instance of this structure is instantiated for each real_dev associated @@ -30,11 +31,11 @@ struct rmnet_endpoint { */ struct rmnet_port { struct net_device *dev; - struct rmnet_endpoint muxed_ep[RMNET_MAX_LOGICAL_EP]; u32 ingress_data_format; u32 egress_data_format; u8 nr_rmnet_devs; u8 rmnet_mode; + struct hlist_head muxed_ep[RMNET_MAX_LOGICAL_EP]; }; extern struct rtnl_link_ops rmnet_link_ops; @@ -45,5 +46,6 @@ struct rmnet_priv { }; struct rmnet_port *rmnet_get_port(struct net_device *real_dev); +struct rmnet_endpoint *rmnet_get_endpoint(struct rmnet_port *port, u8 mux_id); #endif /* _RMNET_CONFIG_H_ */ diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c index e0802d32d6a5..fa24ffb69713 100644 --- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c +++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c @@ -71,19 +71,18 @@ __rmnet_map_ingress_handler(struct sk_buff *skb, & RMNET_INGRESS_FORMAT_MAP_COMMANDS) return rmnet_map_command(skb, port); - kfree_skb(skb); - return RX_HANDLER_CONSUMED; + goto free_skb; } mux_id = RMNET_MAP_GET_MUX_ID(skb); len = RMNET_MAP_GET_LENGTH(skb) - RMNET_MAP_GET_PAD(skb); - if (mux_id >= RMNET_MAX_LOGICAL_EP) { - kfree_skb(skb); - return RX_HANDLER_CONSUMED; - } + if (mux_id >= RMNET_MAX_LOGICAL_EP) + goto free_skb; - ep = &port->muxed_ep[mux_id]; + ep = rmnet_get_endpoint(port, mux_id); + if (!ep) + goto free_skb; if (port->ingress_data_format & RMNET_INGRESS_FORMAT_DEMUXING) skb->dev = ep->egress_dev; @@ -93,6 +92,10 @@ __rmnet_map_ingress_handler(struct sk_buff *skb, skb_trim(skb, len); rmnet_set_skb_proto(skb); return rmnet_deliver_skb(skb); + +free_skb: + kfree_skb(skb); + return RX_HANDLER_CONSUMED; } static rx_handler_result_t diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_command.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_command.c index d1ea5e21b982..74d362f71cce 100644 --- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_command.c +++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_command.c @@ -17,7 +17,7 @@ #include "rmnet_vnd.h" static u8 rmnet_map_do_flow_control(struct sk_buff *skb, - struct rmnet_port *rdinfo, + struct rmnet_port *port, int enable) { struct rmnet_map_control_command *cmd; @@ -37,7 +37,7 @@ static u8 rmnet_map_do_flow_control(struct sk_buff *skb, return RX_HANDLER_CONSUMED; } - ep = &rdinfo->muxed_ep[mux_id]; + ep = rmnet_get_endpoint(port, mux_id); vnd = ep->egress_dev; ip_family = cmd->flow_control.ip_family; diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c index 8b8497b17f52..1b6747d7154f 100644 --- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c +++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c @@ -100,17 +100,19 @@ void rmnet_vnd_setup(struct net_device *rmnet_dev) int rmnet_vnd_newlink(u8 id, struct net_device *rmnet_dev, struct rmnet_port *port, - struct net_device *real_dev) + struct net_device *real_dev, + struct rmnet_endpoint *ep) { struct rmnet_priv *priv; int rc; - if (port->muxed_ep[id].egress_dev) + if (ep->egress_dev) return -EINVAL; rc = register_netdevice(rmnet_dev); if (!rc) { - port->muxed_ep[id].egress_dev = rmnet_dev; + ep->egress_dev = rmnet_dev; + ep->mux_id = id; port->nr_rmnet_devs++; rmnet_dev->rtnl_link_ops = &rmnet_link_ops; @@ -125,12 +127,13 @@ int rmnet_vnd_newlink(u8 id, struct net_device *rmnet_dev, return rc; } -int rmnet_vnd_dellink(u8 id, struct rmnet_port *port) +int rmnet_vnd_dellink(u8 id, struct rmnet_port *port, + struct rmnet_endpoint *ep) { - if (id >= RMNET_MAX_LOGICAL_EP || !port->muxed_ep[id].egress_dev) + if (id >= RMNET_MAX_LOGICAL_EP || !ep->egress_dev) return -EINVAL; - port->muxed_ep[id].egress_dev = NULL; + ep->egress_dev = NULL; port->nr_rmnet_devs--; return 0; } diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.h b/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.h index cae134d35774..71e4c3286951 100644 --- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.h +++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.h @@ -19,8 +19,10 @@ int rmnet_vnd_do_flow_control(struct net_device *dev, int enable); int rmnet_vnd_newlink(u8 id, struct net_device *rmnet_dev, struct rmnet_port *port, - struct net_device *real_dev); -int rmnet_vnd_dellink(u8 id, struct rmnet_port *port); + struct net_device *real_dev, + struct rmnet_endpoint *ep); +int rmnet_vnd_dellink(u8 id, struct rmnet_port *port, + struct rmnet_endpoint *ep); void rmnet_vnd_rx_fixup(struct sk_buff *skb, struct net_device *dev); void rmnet_vnd_tx_fixup(struct sk_buff *skb, struct net_device *dev); u8 rmnet_vnd_get_mux(struct net_device *rmnet_dev); -- cgit v1.2.3 From 60d58f971c1077a0f2467b2d5bc38058df43a819 Mon Sep 17 00:00:00 2001 From: Subash Abhinov Kasiviswanathan Date: Wed, 11 Oct 2017 18:43:58 -0600 Subject: net: qualcomm: rmnet: Implement bridge mode Add support to bridge two devices which can send multiplexing and aggregation (MAP) data. This is done only when the data itself is not going to be consumed in the stack but is being passed on to a different endpoint. This is mainly used for testing. Signed-off-by: Subash Abhinov Kasiviswanathan Signed-off-by: David S. Miller --- drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c | 93 +++++++++++++++++++++- drivers/net/ethernet/qualcomm/rmnet/rmnet_config.h | 7 +- .../net/ethernet/qualcomm/rmnet/rmnet_handlers.c | 26 +++++- drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c | 2 + 4 files changed, 122 insertions(+), 6 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c index b5fe3f4d22a6..71bee1af71ef 100644 --- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c +++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c @@ -109,6 +109,36 @@ static int rmnet_register_real_device(struct net_device *real_dev) return 0; } +static void rmnet_unregister_bridge(struct net_device *dev, + struct rmnet_port *port) +{ + struct net_device *rmnet_dev, *bridge_dev; + struct rmnet_port *bridge_port; + + if (port->rmnet_mode != RMNET_EPMODE_BRIDGE) + return; + + /* bridge slave handling */ + if (!port->nr_rmnet_devs) { + rmnet_dev = netdev_master_upper_dev_get_rcu(dev); + netdev_upper_dev_unlink(dev, rmnet_dev); + + bridge_dev = port->bridge_ep; + + bridge_port = rmnet_get_port_rtnl(bridge_dev); + bridge_port->bridge_ep = NULL; + bridge_port->rmnet_mode = RMNET_EPMODE_VND; + } else { + bridge_dev = port->bridge_ep; + + bridge_port = rmnet_get_port_rtnl(bridge_dev); + rmnet_dev = netdev_master_upper_dev_get_rcu(bridge_dev); + netdev_upper_dev_unlink(bridge_dev, rmnet_dev); + + rmnet_unregister_real_device(bridge_dev, bridge_port); + } +} + static int rmnet_newlink(struct net *src_net, struct net_device *dev, struct nlattr *tb[], struct nlattr *data[], struct netlink_ext_ack *extack) @@ -190,10 +220,10 @@ static void rmnet_dellink(struct net_device *dev, struct list_head *head) ep = rmnet_get_endpoint(port, mux_id); if (ep) { hlist_del_init_rcu(&ep->hlnode); + rmnet_unregister_bridge(dev, port); rmnet_vnd_dellink(mux_id, port, ep); kfree(ep); } - rmnet_unregister_real_device(real_dev, port); unregister_netdevice_queue(dev, head); @@ -237,6 +267,8 @@ static void rmnet_force_unassociate_device(struct net_device *dev) d.port = port; rcu_read_lock(); + rmnet_unregister_bridge(dev, port); + netdev_walk_all_lower_dev_rcu(real_dev, rmnet_dev_walk_unreg, &d); rcu_read_unlock(); unregister_netdevice_many(&list); @@ -321,6 +353,65 @@ struct rmnet_endpoint *rmnet_get_endpoint(struct rmnet_port *port, u8 mux_id) return NULL; } +int rmnet_add_bridge(struct net_device *rmnet_dev, + struct net_device *slave_dev, + struct netlink_ext_ack *extack) +{ + struct rmnet_priv *priv = netdev_priv(rmnet_dev); + struct net_device *real_dev = priv->real_dev; + struct rmnet_port *port, *slave_port; + int err; + + port = rmnet_get_port(real_dev); + + /* If there is more than one rmnet dev attached, its probably being + * used for muxing. Skip the briding in that case + */ + if (port->nr_rmnet_devs > 1) + return -EINVAL; + + if (rmnet_is_real_dev_registered(slave_dev)) + return -EBUSY; + + err = rmnet_register_real_device(slave_dev); + if (err) + return -EBUSY; + + err = netdev_master_upper_dev_link(slave_dev, rmnet_dev, NULL, NULL, + extack); + if (err) + return -EINVAL; + + slave_port = rmnet_get_port(slave_dev); + slave_port->rmnet_mode = RMNET_EPMODE_BRIDGE; + slave_port->bridge_ep = real_dev; + + port->rmnet_mode = RMNET_EPMODE_BRIDGE; + port->bridge_ep = slave_dev; + + netdev_dbg(slave_dev, "registered with rmnet as slave\n"); + return 0; +} + +int rmnet_del_bridge(struct net_device *rmnet_dev, + struct net_device *slave_dev) +{ + struct rmnet_priv *priv = netdev_priv(rmnet_dev); + struct net_device *real_dev = priv->real_dev; + struct rmnet_port *port, *slave_port; + + port = rmnet_get_port(real_dev); + port->rmnet_mode = RMNET_EPMODE_VND; + port->bridge_ep = NULL; + + netdev_upper_dev_unlink(slave_dev, rmnet_dev); + slave_port = rmnet_get_port(slave_dev); + rmnet_unregister_real_device(slave_dev, slave_port); + + netdev_dbg(slave_dev, "removed from rmnet as slave\n"); + return 0; +} + /* Startup/Shutdown */ static int __init rmnet_init(void) diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.h b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.h index 8849986980f8..60115e69e415 100644 --- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.h +++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.h @@ -36,6 +36,7 @@ struct rmnet_port { u8 nr_rmnet_devs; u8 rmnet_mode; struct hlist_head muxed_ep[RMNET_MAX_LOGICAL_EP]; + struct net_device *bridge_ep; }; extern struct rtnl_link_ops rmnet_link_ops; @@ -47,5 +48,9 @@ struct rmnet_priv { struct rmnet_port *rmnet_get_port(struct net_device *real_dev); struct rmnet_endpoint *rmnet_get_endpoint(struct rmnet_port *port, u8 mux_id); - +int rmnet_add_bridge(struct net_device *rmnet_dev, + struct net_device *slave_dev, + struct netlink_ext_ack *extack); +int rmnet_del_bridge(struct net_device *rmnet_dev, + struct net_device *slave_dev); #endif /* _RMNET_CONFIG_H_ */ diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c index fa24ffb69713..df3d2d16ce55 100644 --- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c +++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c @@ -149,6 +149,17 @@ static int rmnet_map_egress_handler(struct sk_buff *skb, return RMNET_MAP_SUCCESS; } +static rx_handler_result_t +rmnet_bridge_handler(struct sk_buff *skb, struct net_device *bridge_dev) +{ + if (bridge_dev) { + skb->dev = bridge_dev; + dev_queue_xmit(skb); + } + + return RX_HANDLER_CONSUMED; +} + /* Ingress / Egress Entry Points */ /* Processes packet as per ingress data format for receiving device. Logical @@ -157,10 +168,10 @@ static int rmnet_map_egress_handler(struct sk_buff *skb, */ rx_handler_result_t rmnet_rx_handler(struct sk_buff **pskb) { - struct rmnet_port *port; + int rc = RX_HANDLER_CONSUMED; struct sk_buff *skb = *pskb; + struct rmnet_port *port; struct net_device *dev; - int rc; if (!skb) return RX_HANDLER_CONSUMED; @@ -168,8 +179,15 @@ rx_handler_result_t rmnet_rx_handler(struct sk_buff **pskb) dev = skb->dev; port = rmnet_get_port(dev); - if (port->ingress_data_format & RMNET_INGRESS_FORMAT_MAP) - rc = rmnet_map_ingress_handler(skb, port); + switch (port->rmnet_mode) { + case RMNET_EPMODE_VND: + if (port->ingress_data_format & RMNET_INGRESS_FORMAT_MAP) + rc = rmnet_map_ingress_handler(skb, port); + break; + case RMNET_EPMODE_BRIDGE: + rc = rmnet_bridge_handler(skb, port->bridge_ep); + break; + } return rc; } diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c index 1b6747d7154f..12bd0bbd5235 100644 --- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c +++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c @@ -74,6 +74,8 @@ static const struct net_device_ops rmnet_vnd_ops = { .ndo_start_xmit = rmnet_vnd_start_xmit, .ndo_change_mtu = rmnet_vnd_change_mtu, .ndo_get_iflink = rmnet_vnd_get_iflink, + .ndo_add_slave = rmnet_add_bridge, + .ndo_del_slave = rmnet_del_bridge, }; /* Called by kernel whenever a new rmnet device is created. Sets MTU, -- cgit v1.2.3 From 717503b9cf57c0bb7ea4d3a9f5699c9a04adf988 Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Wed, 11 Oct 2017 09:41:09 +0200 Subject: net: sched: convert cls_flower->egress_dev users to tc_setup_cb_egdev infra The only user of cls_flower->egress_dev is mlx5. So do the conversion there alongside with the code originating the call in cls_flower function fl_hw_replace_filter to the newly introduced egress device callback infrastucture. Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx5/core/en.h | 3 ++ drivers/net/ethernet/mellanox/mlx5/core/en_main.c | 4 +- drivers/net/ethernet/mellanox/mlx5/core/en_rep.c | 31 +++++++---- include/net/pkt_cls.h | 5 +- net/sched/cls_api.c | 13 +++-- net/sched/cls_flower.c | 63 ++++++++++++----------- 6 files changed, 73 insertions(+), 46 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index cc13d3dbd366..5ec6d3e8dc89 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -1081,6 +1081,9 @@ int mlx5e_ethtool_get_ts_info(struct mlx5e_priv *priv, int mlx5e_ethtool_flash_device(struct mlx5e_priv *priv, struct ethtool_flash *flash); +int mlx5e_setup_tc(struct net_device *dev, enum tc_setup_type type, + void *type_data); + /* mlx5e generic netdev management API */ struct net_device* mlx5e_create_netdev(struct mlx5_core_dev *mdev, const struct mlx5e_profile *profile, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index cc11bbbd0309..2a32102e7648 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -3108,8 +3108,8 @@ static int mlx5e_setup_tc_cls_flower(struct net_device *dev, } #endif -static int mlx5e_setup_tc(struct net_device *dev, enum tc_setup_type type, - void *type_data) +int mlx5e_setup_tc(struct net_device *dev, enum tc_setup_type type, + void *type_data) { switch (type) { #ifdef CONFIG_MLX5_ESWITCH diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c index 45e03c427faf..765fc74fbb1b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c @@ -34,6 +34,7 @@ #include #include #include +#include #include #include @@ -667,14 +668,6 @@ mlx5e_rep_setup_tc_cls_flower(struct net_device *dev, cls_flower->common.chain_index) return -EOPNOTSUPP; - if (cls_flower->egress_dev) { - struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; - - dev = mlx5_eswitch_get_uplink_netdev(esw); - return dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_CLSFLOWER, - cls_flower); - } - switch (cls_flower->command) { case TC_CLSFLOWER_REPLACE: return mlx5e_configure_flower(priv, cls_flower); @@ -698,6 +691,14 @@ static int mlx5e_rep_setup_tc(struct net_device *dev, enum tc_setup_type type, } } +static int mlx5e_rep_setup_tc_cb(enum tc_setup_type type, void *type_data, + void *cb_priv) +{ + struct net_device *dev = cb_priv; + + return mlx5e_setup_tc(dev, type, type_data); +} + bool mlx5e_is_uplink_rep(struct mlx5e_priv *priv) { struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; @@ -1017,15 +1018,24 @@ mlx5e_vport_rep_load(struct mlx5_eswitch *esw, struct mlx5_eswitch_rep *rep) goto err_detach_netdev; } + err = tc_setup_cb_egdev_register(netdev, mlx5e_rep_setup_tc_cb, + mlx5_eswitch_get_uplink_netdev(esw)); + if (err) + goto err_neigh_cleanup; + err = register_netdev(netdev); if (err) { pr_warn("Failed to register representor netdev for vport %d\n", rep->vport); - goto err_neigh_cleanup; + goto err_egdev_cleanup; } return 0; +err_egdev_cleanup: + tc_setup_cb_egdev_unregister(netdev, mlx5e_rep_setup_tc_cb, + mlx5_eswitch_get_uplink_netdev(esw)); + err_neigh_cleanup: mlx5e_rep_neigh_cleanup(rpriv); @@ -1047,7 +1057,8 @@ mlx5e_vport_rep_unload(struct mlx5_eswitch *esw, struct mlx5_eswitch_rep *rep) void *ppriv = priv->ppriv; unregister_netdev(rep->netdev); - + tc_setup_cb_egdev_unregister(netdev, mlx5e_rep_setup_tc_cb, + mlx5_eswitch_get_uplink_netdev(esw)); mlx5e_rep_neigh_cleanup(rpriv); mlx5e_detach_netdev(priv); mlx5e_destroy_netdev(priv); diff --git a/include/net/pkt_cls.h b/include/net/pkt_cls.h index 6f8149c82571..c0bdf5cad727 100644 --- a/include/net/pkt_cls.h +++ b/include/net/pkt_cls.h @@ -206,8 +206,6 @@ int tcf_exts_dump(struct sk_buff *skb, struct tcf_exts *exts); int tcf_exts_dump_stats(struct sk_buff *skb, struct tcf_exts *exts); int tcf_exts_get_dev(struct net_device *dev, struct tcf_exts *exts, struct net_device **hw_dev); -int tcf_exts_egdev_cb_call(struct tcf_exts *exts, enum tc_setup_type type, - void *type_data, bool err_stop); /** * struct tcf_pkt_info - packet information @@ -407,6 +405,9 @@ tcf_match_indev(struct sk_buff *skb, int ifindex) } #endif /* CONFIG_NET_CLS_IND */ +int tc_setup_cb_call(struct tcf_exts *exts, enum tc_setup_type type, + void *type_data, bool err_stop); + struct tc_cls_common_offload { u32 chain_index; __be16 protocol; diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c index 99f9432f63cf..51994a202585 100644 --- a/net/sched/cls_api.c +++ b/net/sched/cls_api.c @@ -1026,8 +1026,9 @@ int tcf_exts_get_dev(struct net_device *dev, struct tcf_exts *exts, } EXPORT_SYMBOL(tcf_exts_get_dev); -int tcf_exts_egdev_cb_call(struct tcf_exts *exts, enum tc_setup_type type, - void *type_data, bool err_stop) +static int tc_exts_setup_cb_egdev_call(struct tcf_exts *exts, + enum tc_setup_type type, + void *type_data, bool err_stop) { int ok_count = 0; #ifdef CONFIG_NET_CLS_ACT @@ -1054,7 +1055,13 @@ int tcf_exts_egdev_cb_call(struct tcf_exts *exts, enum tc_setup_type type, #endif return ok_count; } -EXPORT_SYMBOL(tcf_exts_egdev_cb_call); + +int tc_setup_cb_call(struct tcf_exts *exts, enum tc_setup_type type, + void *type_data, bool err_stop) +{ + return tc_exts_setup_cb_egdev_call(exts, type, type_data, err_stop); +} +EXPORT_SYMBOL(tc_setup_cb_call); static int __init tc_filter_init(void) { diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c index db831ac708f6..5b7bb968d1d4 100644 --- a/net/sched/cls_flower.c +++ b/net/sched/cls_flower.c @@ -88,7 +88,6 @@ struct cls_fl_filter { u32 handle; u32 flags; struct rcu_head rcu; - struct net_device *hw_dev; }; static unsigned short int fl_mask_range(const struct fl_flow_mask *mask) @@ -201,16 +200,17 @@ static void fl_destroy_filter(struct rcu_head *head) static void fl_hw_destroy_filter(struct tcf_proto *tp, struct cls_fl_filter *f) { struct tc_cls_flower_offload cls_flower = {}; - struct net_device *dev = f->hw_dev; - - if (!tc_can_offload(dev)) - return; + struct net_device *dev = tp->q->dev_queue->dev; tc_cls_common_offload_init(&cls_flower.common, tp); cls_flower.command = TC_CLSFLOWER_DESTROY; cls_flower.cookie = (unsigned long) f; - dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_CLSFLOWER, &cls_flower); + if (tc_can_offload(dev)) + dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_CLSFLOWER, + &cls_flower); + tc_setup_cb_call(&f->exts, TC_SETUP_CLSFLOWER, + &cls_flower, false); } static int fl_hw_replace_filter(struct tcf_proto *tp, @@ -220,20 +220,9 @@ static int fl_hw_replace_filter(struct tcf_proto *tp, { struct net_device *dev = tp->q->dev_queue->dev; struct tc_cls_flower_offload cls_flower = {}; + bool skip_sw = tc_skip_sw(f->flags); int err; - if (!tc_can_offload(dev)) { - if (tcf_exts_get_dev(dev, &f->exts, &f->hw_dev) || - (f->hw_dev && !tc_can_offload(f->hw_dev))) { - f->hw_dev = dev; - return tc_skip_sw(f->flags) ? -EINVAL : 0; - } - dev = f->hw_dev; - cls_flower.egress_dev = true; - } else { - f->hw_dev = dev; - } - tc_cls_common_offload_init(&cls_flower.common, tp); cls_flower.command = TC_CLSFLOWER_REPLACE; cls_flower.cookie = (unsigned long) f; @@ -242,31 +231,47 @@ static int fl_hw_replace_filter(struct tcf_proto *tp, cls_flower.key = &f->mkey; cls_flower.exts = &f->exts; - err = dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_CLSFLOWER, - &cls_flower); - if (!err) - f->flags |= TCA_CLS_FLAGS_IN_HW; + if (tc_can_offload(dev)) { + err = dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_CLSFLOWER, + &cls_flower); + if (err) { + if (skip_sw) + return err; + } else { + f->flags |= TCA_CLS_FLAGS_IN_HW; + } + } - if (tc_skip_sw(f->flags)) + err = tc_setup_cb_call(&f->exts, TC_SETUP_CLSFLOWER, + &cls_flower, skip_sw); + if (err < 0) { + fl_hw_destroy_filter(tp, f); return err; + } else if (err > 0) { + f->flags |= TCA_CLS_FLAGS_IN_HW; + } + + if (skip_sw && !(f->flags & TCA_CLS_FLAGS_IN_HW)) + return -EINVAL; + return 0; } static void fl_hw_update_stats(struct tcf_proto *tp, struct cls_fl_filter *f) { struct tc_cls_flower_offload cls_flower = {}; - struct net_device *dev = f->hw_dev; - - if (!tc_can_offload(dev)) - return; + struct net_device *dev = tp->q->dev_queue->dev; tc_cls_common_offload_init(&cls_flower.common, tp); cls_flower.command = TC_CLSFLOWER_STATS; cls_flower.cookie = (unsigned long) f; cls_flower.exts = &f->exts; - dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_CLSFLOWER, - &cls_flower); + if (tc_can_offload(dev)) + dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_CLSFLOWER, + &cls_flower); + tc_setup_cb_call(&f->exts, TC_SETUP_CLSFLOWER, + &cls_flower, false); } static void __fl_delete(struct tcf_proto *tp, struct cls_fl_filter *f) -- cgit v1.2.3 From 5dad61b83840d7eceaba5bf316419be11bbb993a Mon Sep 17 00:00:00 2001 From: Tariq Toukan Date: Wed, 11 Oct 2017 13:17:25 +0300 Subject: net/mlx4_en: Replace netdev parameter with priv in XDP xmit function The struct net_device parameter was passed only to extract struct mlx4_en_priv out of it. Here we pass the priv parameter directly. Signed-off-by: Tariq Toukan Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx4/en_rx.c | 2 +- drivers/net/ethernet/mellanox/mlx4/en_tx.c | 3 +-- drivers/net/ethernet/mellanox/mlx4/mlx4_en.h | 2 +- 3 files changed, 3 insertions(+), 4 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c index a7866954d106..92aec17f4b4d 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c @@ -778,7 +778,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud case XDP_PASS: break; case XDP_TX: - if (likely(!mlx4_en_xmit_frame(ring, frags, dev, + if (likely(!mlx4_en_xmit_frame(ring, frags, priv, length, cq_ring, &doorbell_pending))) { frags[0].page = NULL; diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c index 2cc82dc07397..f16774c9c347 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c @@ -1087,10 +1087,9 @@ tx_drop: netdev_tx_t mlx4_en_xmit_frame(struct mlx4_en_rx_ring *rx_ring, struct mlx4_en_rx_alloc *frame, - struct net_device *dev, unsigned int length, + struct mlx4_en_priv *priv, unsigned int length, int tx_ind, bool *doorbell_pending) { - struct mlx4_en_priv *priv = netdev_priv(dev); union mlx4_wqe_qpn_vlan qpn_vlan = {}; struct mlx4_en_tx_desc *tx_desc; struct mlx4_en_tx_info *tx_info; diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h index 245e9ea09ab2..8cad9b4f1936 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h @@ -693,7 +693,7 @@ u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb, netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev); netdev_tx_t mlx4_en_xmit_frame(struct mlx4_en_rx_ring *rx_ring, struct mlx4_en_rx_alloc *frame, - struct net_device *dev, unsigned int length, + struct mlx4_en_priv *priv, unsigned int length, int tx_ind, bool *doorbell_pending); void mlx4_en_xmit_doorbell(struct mlx4_en_tx_ring *ring); bool mlx4_en_rx_recycle(struct mlx4_en_rx_ring *ring, -- cgit v1.2.3 From f6f0aa97413a420606aabe8142f8a5c0f15b9246 Mon Sep 17 00:00:00 2001 From: Tariq Toukan Date: Wed, 11 Oct 2017 13:17:26 +0300 Subject: net/mlx4_en: Obsolete call to generic write_desc in XDP xmit flow Function mlx4_en_tx_write_desc() is not optimized to use of XDP xmit. Use the relevant parts inline instead. Signed-off-by: Tariq Toukan Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx4/en_tx.c | 17 ++++++++++++----- 1 file changed, 12 insertions(+), 5 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c index f16774c9c347..ac7254e3f909 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c @@ -1090,7 +1090,9 @@ netdev_tx_t mlx4_en_xmit_frame(struct mlx4_en_rx_ring *rx_ring, struct mlx4_en_priv *priv, unsigned int length, int tx_ind, bool *doorbell_pending) { - union mlx4_wqe_qpn_vlan qpn_vlan = {}; + union mlx4_wqe_qpn_vlan qpn_vlan = { + .fence_size = MLX4_EN_XDP_TX_REAL_SZ, + }; struct mlx4_en_tx_desc *tx_desc; struct mlx4_en_tx_info *tx_info; struct mlx4_wqe_data_seg *data; @@ -1140,7 +1142,6 @@ netdev_tx_t mlx4_en_xmit_frame(struct mlx4_en_rx_ring *rx_ring, data->byte_count = cpu_to_be32(length); /* tx completion can avoid cache line miss for common cases */ - tx_desc->ctrl.srcrb_flags = priv->ctrl_flags; op_own = cpu_to_be32(MLX4_OPCODE_SEND) | ((ring->prod & ring->size) ? @@ -1151,10 +1152,16 @@ netdev_tx_t mlx4_en_xmit_frame(struct mlx4_en_rx_ring *rx_ring, ring->prod += MLX4_EN_XDP_TX_NRTXBB; - qpn_vlan.fence_size = MLX4_EN_XDP_TX_REAL_SZ; + tx_desc->ctrl.qpn_vlan = qpn_vlan; + tx_desc->ctrl.srcrb_flags = priv->ctrl_flags; + + /* Ensure new descriptor hits memory + * before setting ownership of this descriptor to HW + */ + dma_wmb(); + tx_desc->ctrl.owner_opcode = op_own; + ring->xmit_more++; - mlx4_en_tx_write_desc(ring, tx_desc, qpn_vlan, TXBB_SIZE, 0, - op_own, false, false); *doorbell_pending = true; return NETDEV_TX_OK; -- cgit v1.2.3 From f025fd6061e120713d6c11c92983804c6805d6cb Mon Sep 17 00:00:00 2001 From: Tariq Toukan Date: Wed, 11 Oct 2017 13:17:27 +0300 Subject: net/mlx4_en: XDP_TX, assign constant values of TX descs on ring creaion In XDP_TX, some fields in tx_info and tx_desc are constants across all entries of the different XDP_TX rings. Assign values to these fields on ring creation time, rather than in data-path. Patchset performance tests: Tested on ConnectX3Pro, Intel(R) Xeon(R) CPU E5-2680 v3 @ 2.50GHz Single queue no-RSS optimization ON. XDP_TX packet rate: ------------------------------ Before | After | Gain | 13.7 Mpps | 14.0 Mpps | %2.2 | ------------------------------ Signed-off-by: Tariq Toukan Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx4/en_netdev.c | 1 + drivers/net/ethernet/mellanox/mlx4/en_tx.c | 38 ++++++++++++++++---------- drivers/net/ethernet/mellanox/mlx4/mlx4_en.h | 2 ++ 3 files changed, 27 insertions(+), 14 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c index e4c7a80ef5a8..d611df2f274d 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c @@ -1752,6 +1752,7 @@ int mlx4_en_start_port(struct net_device *dev) mlx4_en_arm_cq(priv, cq); } else { + mlx4_en_init_tx_xdp_ring_descs(priv, tx_ring); mlx4_en_init_recycle_ring(priv, i); /* XDP TX CQ should never be armed */ } diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c index ac7254e3f909..596445a4a241 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c @@ -1085,14 +1085,35 @@ tx_drop: #define MLX4_EN_XDP_TX_REAL_SZ (((CTRL_SIZE + MLX4_EN_XDP_TX_NRTXBB * DS_SIZE) \ / 16) & 0x3f) +void mlx4_en_init_tx_xdp_ring_descs(struct mlx4_en_priv *priv, + struct mlx4_en_tx_ring *ring) +{ + int i; + + for (i = 0; i < ring->size; i++) { + struct mlx4_en_tx_info *tx_info = &ring->tx_info[i]; + struct mlx4_en_tx_desc *tx_desc = ring->buf + + (i << LOG_TXBB_SIZE); + + tx_info->map0_byte_count = PAGE_SIZE; + tx_info->nr_txbb = MLX4_EN_XDP_TX_NRTXBB; + tx_info->data_offset = offsetof(struct mlx4_en_tx_desc, data); + tx_info->ts_requested = 0; + tx_info->nr_maps = 1; + tx_info->linear = 1; + tx_info->inl = 0; + + tx_desc->data.lkey = ring->mr_key; + tx_desc->ctrl.qpn_vlan.fence_size = MLX4_EN_XDP_TX_REAL_SZ; + tx_desc->ctrl.srcrb_flags = priv->ctrl_flags; + } +} + netdev_tx_t mlx4_en_xmit_frame(struct mlx4_en_rx_ring *rx_ring, struct mlx4_en_rx_alloc *frame, struct mlx4_en_priv *priv, unsigned int length, int tx_ind, bool *doorbell_pending) { - union mlx4_wqe_qpn_vlan qpn_vlan = { - .fence_size = MLX4_EN_XDP_TX_REAL_SZ, - }; struct mlx4_en_tx_desc *tx_desc; struct mlx4_en_tx_info *tx_info; struct mlx4_wqe_data_seg *data; @@ -1124,20 +1145,12 @@ netdev_tx_t mlx4_en_xmit_frame(struct mlx4_en_rx_ring *rx_ring, tx_info->page = frame->page; frame->page = NULL; tx_info->map0_dma = dma; - tx_info->map0_byte_count = PAGE_SIZE; - tx_info->nr_txbb = MLX4_EN_XDP_TX_NRTXBB; tx_info->nr_bytes = max_t(unsigned int, length, ETH_ZLEN); - tx_info->data_offset = offsetof(struct mlx4_en_tx_desc, data); - tx_info->ts_requested = 0; - tx_info->nr_maps = 1; - tx_info->linear = 1; - tx_info->inl = 0; dma_sync_single_range_for_device(priv->ddev, dma, frame->page_offset, length, PCI_DMA_TODEVICE); data->addr = cpu_to_be64(dma + frame->page_offset); - data->lkey = ring->mr_key; dma_wmb(); data->byte_count = cpu_to_be32(length); @@ -1152,9 +1165,6 @@ netdev_tx_t mlx4_en_xmit_frame(struct mlx4_en_rx_ring *rx_ring, ring->prod += MLX4_EN_XDP_TX_NRTXBB; - tx_desc->ctrl.qpn_vlan = qpn_vlan; - tx_desc->ctrl.srcrb_flags = priv->ctrl_flags; - /* Ensure new descriptor hits memory * before setting ownership of this descriptor to HW */ diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h index 8cad9b4f1936..1856e279a7e0 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h @@ -705,6 +705,8 @@ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv, int node, int queue_index); void mlx4_en_destroy_tx_ring(struct mlx4_en_priv *priv, struct mlx4_en_tx_ring **pring); +void mlx4_en_init_tx_xdp_ring_descs(struct mlx4_en_priv *priv, + struct mlx4_en_tx_ring *ring); int mlx4_en_activate_tx_ring(struct mlx4_en_priv *priv, struct mlx4_en_tx_ring *ring, int cq, int user_prio); -- cgit v1.2.3 From 3f7832c26cc0cad2245981f777f3ee684399ce93 Mon Sep 17 00:00:00 2001 From: Timur Tabi Date: Thu, 12 Oct 2017 12:42:04 -0500 Subject: Revert "net: qcom/emac: enforce DMA address restrictions" This reverts commit df1ec1b9d0df57e96011f175418dc95b1af46821. It turns out that memory allocated via dma_alloc_coherent is always aligned to the size of the buffer, so there's no way the RRD and RFD can ever be in separate 32-bit regions. Signed-off-by: Timur Tabi Signed-off-by: David S. Miller --- drivers/net/ethernet/qualcomm/emac/emac-mac.c | 39 +++++++++++---------------- 1 file changed, 15 insertions(+), 24 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/qualcomm/emac/emac-mac.c b/drivers/net/ethernet/qualcomm/emac/emac-mac.c index 0f5ece5d9507..9cbb27263742 100644 --- a/drivers/net/ethernet/qualcomm/emac/emac-mac.c +++ b/drivers/net/ethernet/qualcomm/emac/emac-mac.c @@ -734,11 +734,6 @@ static int emac_rx_descs_alloc(struct emac_adapter *adpt) rx_q->rrd.size = rx_q->rrd.count * (adpt->rrd_size * 4); rx_q->rfd.size = rx_q->rfd.count * (adpt->rfd_size * 4); - /* Check if the RRD and RFD are aligned properly, and if not, adjust. */ - if (upper_32_bits(ring_header->dma_addr) != - upper_32_bits(ring_header->dma_addr + ALIGN(rx_q->rrd.size, 8))) - ring_header->used = ALIGN(rx_q->rrd.size, 8); - rx_q->rrd.dma_addr = ring_header->dma_addr + ring_header->used; rx_q->rrd.v_addr = ring_header->v_addr + ring_header->used; ring_header->used += ALIGN(rx_q->rrd.size, 8); @@ -772,18 +767,11 @@ int emac_mac_rx_tx_rings_alloc_all(struct emac_adapter *adpt) /* Ring DMA buffer. Each ring may need up to 8 bytes for alignment, * hence the additional padding bytes are allocated. - * - * Also double the memory allocated for the RRD so that we can - * re-align it if necessary. The EMAC has a restriction that the - * upper 32 bits of the base addresses for the RFD and RRD rings - * must be the same. It is extremely unlikely that this is not the - * case, since the rings are only a few KB in size. However, we - * need to check for this anyway, and if the two rings are not - * compliant, then we re-align. */ - ring_header->size = ALIGN(num_tx_descs * (adpt->tpd_size * 4), 8) + - ALIGN(num_rx_descs * (adpt->rfd_size * 4), 8) + - ALIGN(num_rx_descs * (adpt->rrd_size * 4), 8) * 2; + ring_header->size = num_tx_descs * (adpt->tpd_size * 4) + + num_rx_descs * (adpt->rfd_size * 4) + + num_rx_descs * (adpt->rrd_size * 4) + + 8 + 2 * 8; /* 8 byte per one Tx and two Rx rings */ ring_header->used = 0; ring_header->v_addr = dma_zalloc_coherent(dev, ring_header->size, @@ -792,23 +780,26 @@ int emac_mac_rx_tx_rings_alloc_all(struct emac_adapter *adpt) if (!ring_header->v_addr) return -ENOMEM; - ret = emac_rx_descs_alloc(adpt); - if (ret) { - netdev_err(adpt->netdev, "error: Rx Queue alloc failed\n"); - goto err_alloc_rx; - } + ring_header->used = ALIGN(ring_header->dma_addr, 8) - + ring_header->dma_addr; ret = emac_tx_q_desc_alloc(adpt, &adpt->tx_q); if (ret) { - netdev_err(adpt->netdev, "transmit queue allocation failed\n"); + netdev_err(adpt->netdev, "error: Tx Queue alloc failed\n"); goto err_alloc_tx; } + ret = emac_rx_descs_alloc(adpt); + if (ret) { + netdev_err(adpt->netdev, "error: Rx Queue alloc failed\n"); + goto err_alloc_rx; + } + return 0; -err_alloc_tx: - emac_rx_q_bufs_free(adpt); err_alloc_rx: + emac_tx_q_bufs_free(adpt); +err_alloc_tx: dma_free_coherent(dev, ring_header->size, ring_header->v_addr, ring_header->dma_addr); -- cgit v1.2.3 From d156576362c07e954dc36e07b0d7b0733a010f7d Mon Sep 17 00:00:00 2001 From: Florian Fainelli Date: Wed, 11 Oct 2017 10:57:50 -0700 Subject: net: systemport: Establish lower/upper queue mapping Establish a queue mapping between the DSA slave network device queues created that correspond to switch port queues, and the transmit queue that SYSTEMPORT manages. We need to configure the SYSTEMPORT transmit queue with the switch port number and switch port queue number in order for the switch and SYSTEMPORT hardware to utilize the out of band congestion notification. This hardware mechanism works by looking at the switch port egress queue and determines whether there is enough buffers for this queue, with that class of service for a successful transmission and if not, backpressures the SYSTEMPORT queue that is being used. For this to work, we implement a notifier which looks at the DSA_PORT_REGISTER event. When DSA network devices are registered, the framework calls the DSA notifiers when that happens, extracts the number of queues for these devices and their associated port number, remembers that in the driver private structure and linearly maps those queues to TX rings/queues that we manage. This scheme works because DSA slave network deviecs always transmit through SYSTEMPORT so when DSA slave network devices are destroyed/brought down, the corresponding SYSTEMPORT queues are no longer used. Also, by design of the DSA framework, the master network device (SYSTEMPORT) is registered first. For faster lookups we use an array of up to DSA_MAX_PORTS * number of queues per port, and then map pointers to bcm_sysport_tx_ring such that our ndo_select_queue() implementation can just index into that array to locate the corresponding ring index. Signed-off-by: Florian Fainelli Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bcmsysport.c | 115 ++++++++++++++++++++++++++++- drivers/net/ethernet/broadcom/bcmsysport.h | 11 ++- 2 files changed, 121 insertions(+), 5 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c index 83eec9a8c275..78bed9a84e81 100644 --- a/drivers/net/ethernet/broadcom/bcmsysport.c +++ b/drivers/net/ethernet/broadcom/bcmsysport.c @@ -1416,7 +1416,14 @@ static int bcm_sysport_init_tx_ring(struct bcm_sysport_priv *priv, tdma_writel(priv, 0, TDMA_DESC_RING_COUNT(index)); tdma_writel(priv, 1, TDMA_DESC_RING_INTR_CONTROL(index)); tdma_writel(priv, 0, TDMA_DESC_RING_PROD_CONS_INDEX(index)); - tdma_writel(priv, RING_IGNORE_STATUS, TDMA_DESC_RING_MAPPING(index)); + + /* Configure QID and port mapping */ + reg = tdma_readl(priv, TDMA_DESC_RING_MAPPING(index)); + reg &= ~(RING_QID_MASK | RING_PORT_ID_MASK << RING_PORT_ID_SHIFT); + reg |= ring->switch_queue & RING_QID_MASK; + reg |= ring->switch_port << RING_PORT_ID_SHIFT; + reg |= RING_IGNORE_STATUS; + tdma_writel(priv, reg, TDMA_DESC_RING_MAPPING(index)); tdma_writel(priv, 0, TDMA_DESC_RING_PCP_DEI_VID(index)); /* Do not use tdma_control_bit() here because TSB_SWAP1 collides @@ -1447,8 +1454,9 @@ static int bcm_sysport_init_tx_ring(struct bcm_sysport_priv *priv, napi_enable(&ring->napi); netif_dbg(priv, hw, priv->netdev, - "TDMA cfg, size=%d, desc_cpu=%p\n", - ring->size, ring->desc_cpu); + "TDMA cfg, size=%d, desc_cpu=%p switch q=%d,port=%d\n", + ring->size, ring->desc_cpu, ring->switch_queue, + ring->switch_port); return 0; } @@ -2011,6 +2019,92 @@ static const struct ethtool_ops bcm_sysport_ethtool_ops = { .set_link_ksettings = phy_ethtool_set_link_ksettings, }; +static u16 bcm_sysport_select_queue(struct net_device *dev, struct sk_buff *skb, + void *accel_priv, + select_queue_fallback_t fallback) +{ + struct bcm_sysport_priv *priv = netdev_priv(dev); + u16 queue = skb_get_queue_mapping(skb); + struct bcm_sysport_tx_ring *tx_ring; + unsigned int q, port; + + if (!netdev_uses_dsa(dev)) + return fallback(dev, skb); + + /* DSA tagging layer will have configured the correct queue */ + q = BRCM_TAG_GET_QUEUE(queue); + port = BRCM_TAG_GET_PORT(queue); + tx_ring = priv->ring_map[q + port * priv->per_port_num_tx_queues]; + + return tx_ring->index; +} + +static int bcm_sysport_map_queues(struct net_device *dev, + struct dsa_notifier_register_info *info) +{ + struct bcm_sysport_priv *priv = netdev_priv(dev); + struct bcm_sysport_tx_ring *ring; + struct net_device *slave_dev; + unsigned int num_tx_queues; + unsigned int q, start, port; + + /* We can't be setting up queue inspection for non directly attached + * switches + */ + if (info->switch_number) + return 0; + + port = info->port_number; + slave_dev = info->info.dev; + + /* On SYSTEMPORT Lite we have twice as less queues, so we cannot do a + * 1:1 mapping, we can only do a 2:1 mapping. By reducing the number of + * per-port (slave_dev) network devices queue, we achieve just that. + * This need to happen now before any slave network device is used such + * it accurately reflects the number of real TX queues. + */ + if (priv->is_lite) + netif_set_real_num_tx_queues(slave_dev, + slave_dev->num_tx_queues / 2); + num_tx_queues = slave_dev->real_num_tx_queues; + + if (priv->per_port_num_tx_queues && + priv->per_port_num_tx_queues != num_tx_queues) + netdev_warn(slave_dev, "asymetric number of per-port queues\n"); + + priv->per_port_num_tx_queues = num_tx_queues; + + start = find_first_zero_bit(&priv->queue_bitmap, dev->num_tx_queues); + for (q = 0; q < num_tx_queues; q++) { + ring = &priv->tx_rings[q + start]; + + /* Just remember the mapping actual programming done + * during bcm_sysport_init_tx_ring + */ + ring->switch_queue = q; + ring->switch_port = port; + priv->ring_map[q + port * num_tx_queues] = ring; + + /* Set all queues as being used now */ + set_bit(q + start, &priv->queue_bitmap); + } + + return 0; +} + +static int bcm_sysport_dsa_notifier(struct notifier_block *unused, + unsigned long event, void *ptr) +{ + struct dsa_notifier_register_info *info; + + if (event != DSA_PORT_REGISTER) + return NOTIFY_DONE; + + info = ptr; + + return notifier_from_errno(bcm_sysport_map_queues(info->master, info)); +} + static const struct net_device_ops bcm_sysport_netdev_ops = { .ndo_start_xmit = bcm_sysport_xmit, .ndo_tx_timeout = bcm_sysport_tx_timeout, @@ -2023,6 +2117,7 @@ static const struct net_device_ops bcm_sysport_netdev_ops = { .ndo_poll_controller = bcm_sysport_poll_controller, #endif .ndo_get_stats64 = bcm_sysport_get_stats64, + .ndo_select_queue = bcm_sysport_select_queue, }; #define REV_FMT "v%2x.%02x" @@ -2172,10 +2267,18 @@ static int bcm_sysport_probe(struct platform_device *pdev) u64_stats_init(&priv->syncp); + priv->dsa_notifier.notifier_call = bcm_sysport_dsa_notifier; + + ret = register_dsa_notifier(&priv->dsa_notifier); + if (ret) { + dev_err(&pdev->dev, "failed to register DSA notifier\n"); + goto err_deregister_fixed_link; + } + ret = register_netdev(dev); if (ret) { dev_err(&pdev->dev, "failed to register net_device\n"); - goto err_deregister_fixed_link; + goto err_deregister_notifier; } priv->rev = topctrl_readl(priv, REV_CNTL) & REV_MASK; @@ -2188,6 +2291,8 @@ static int bcm_sysport_probe(struct platform_device *pdev) return 0; +err_deregister_notifier: + unregister_dsa_notifier(&priv->dsa_notifier); err_deregister_fixed_link: if (of_phy_is_fixed_link(dn)) of_phy_deregister_fixed_link(dn); @@ -2199,11 +2304,13 @@ err_free_netdev: static int bcm_sysport_remove(struct platform_device *pdev) { struct net_device *dev = dev_get_drvdata(&pdev->dev); + struct bcm_sysport_priv *priv = netdev_priv(dev); struct device_node *dn = pdev->dev.of_node; /* Not much to do, ndo_close has been called * and we use managed allocations */ + unregister_dsa_notifier(&priv->dsa_notifier); unregister_netdev(dev); if (of_phy_is_fixed_link(dn)) of_phy_deregister_fixed_link(dn); diff --git a/drivers/net/ethernet/broadcom/bcmsysport.h b/drivers/net/ethernet/broadcom/bcmsysport.h index 82e401df199e..82f70a6783cb 100644 --- a/drivers/net/ethernet/broadcom/bcmsysport.h +++ b/drivers/net/ethernet/broadcom/bcmsysport.h @@ -404,7 +404,7 @@ struct bcm_rsb { #define RING_CONS_INDEX_MASK 0xffff #define RING_MAPPING 0x14 -#define RING_QID_MASK 0x3 +#define RING_QID_MASK 0x7 #define RING_PORT_ID_SHIFT 3 #define RING_PORT_ID_MASK 0x7 #define RING_IGNORE_STATUS (1 << 6) @@ -712,6 +712,8 @@ struct bcm_sysport_tx_ring { struct bcm_sysport_priv *priv; /* private context backpointer */ unsigned long packets; /* packets statistics */ unsigned long bytes; /* bytes statistics */ + unsigned int switch_queue; /* switch port queue number */ + unsigned int switch_port; /* switch port queue number */ }; /* Driver private structure */ @@ -765,5 +767,12 @@ struct bcm_sysport_priv { /* For atomic update generic 64bit value on 32bit Machine */ struct u64_stats_sync syncp; + + /* map information between switch port queues and local queues */ + struct notifier_block dsa_notifier; + unsigned int per_port_num_tx_queues; + unsigned long queue_bitmap; + struct bcm_sysport_tx_ring *ring_map[DSA_MAX_PORTS * 8]; + }; #endif /* __BCM_SYSPORT_H */ -- cgit v1.2.3 From 723934fb792f2dbc76ee3ac334fcde95136bf409 Mon Sep 17 00:00:00 2001 From: Florian Fainelli Date: Wed, 11 Oct 2017 10:57:52 -0700 Subject: net: systemport: Turn on ACB at the SYSTEMPORT level Now that we have established the queue mapping between the switch port egress queues and the SYSTEMPORT egress queues, we can turn on Advanced Congestion Buffering (ACB) at the SYSTEMPORT level. This enables the Ethernet MAC controller to get out of band flow control information directly from the switch port and queue that it monitors such that its internal TDMA can be appropriately backpressured. Signed-off-by: Florian Fainelli Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bcmsysport.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c index 78bed9a84e81..dafc26690555 100644 --- a/drivers/net/ethernet/broadcom/bcmsysport.c +++ b/drivers/net/ethernet/broadcom/bcmsysport.c @@ -1422,10 +1422,14 @@ static int bcm_sysport_init_tx_ring(struct bcm_sysport_priv *priv, reg &= ~(RING_QID_MASK | RING_PORT_ID_MASK << RING_PORT_ID_SHIFT); reg |= ring->switch_queue & RING_QID_MASK; reg |= ring->switch_port << RING_PORT_ID_SHIFT; - reg |= RING_IGNORE_STATUS; tdma_writel(priv, reg, TDMA_DESC_RING_MAPPING(index)); tdma_writel(priv, 0, TDMA_DESC_RING_PCP_DEI_VID(index)); + /* Enable ACB algorithm 2 */ + reg = tdma_readl(priv, TDMA_CONTROL); + reg |= tdma_control_bit(priv, ACB_ALGO); + tdma_writel(priv, reg, TDMA_CONTROL); + /* Do not use tdma_control_bit() here because TSB_SWAP1 collides * with the original definition of ACB_ALGO */ -- cgit v1.2.3 From 47f25464122bd7aebba35bfb0a26ee24d8026885 Mon Sep 17 00:00:00 2001 From: Christos Gkekas Date: Wed, 11 Oct 2017 20:26:58 +0100 Subject: vxge: Clean up unused variables in vxge-traffic Delete unused channel variables in vxge-traffic. Signed-off-by: Christos Gkekas Signed-off-by: David S. Miller --- drivers/net/ethernet/neterion/vxge/vxge-traffic.c | 19 ------------------- 1 file changed, 19 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/neterion/vxge/vxge-traffic.c b/drivers/net/ethernet/neterion/vxge/vxge-traffic.c index 5f630a24e491..0c3b5dea2858 100644 --- a/drivers/net/ethernet/neterion/vxge/vxge-traffic.c +++ b/drivers/net/ethernet/neterion/vxge/vxge-traffic.c @@ -1209,9 +1209,6 @@ void vxge_hw_ring_rxd_pre_post(struct __vxge_hw_ring *ring, void *rxdh) void vxge_hw_ring_rxd_post_post(struct __vxge_hw_ring *ring, void *rxdh) { struct vxge_hw_ring_rxd_1 *rxdp = (struct vxge_hw_ring_rxd_1 *)rxdh; - struct __vxge_hw_channel *channel; - - channel = &ring->channel; rxdp->control_0 = VXGE_HW_RING_RXD_LIST_OWN_ADAPTER; @@ -1359,11 +1356,8 @@ exit: enum vxge_hw_status vxge_hw_ring_handle_tcode( struct __vxge_hw_ring *ring, void *rxdh, u8 t_code) { - struct __vxge_hw_channel *channel; enum vxge_hw_status status = VXGE_HW_OK; - channel = &ring->channel; - /* If the t_code is not supported and if the * t_code is other than 0x5 (unparseable packet * such as unknown UPV6 header), Drop it !!! @@ -1399,10 +1393,6 @@ exit: static void __vxge_hw_non_offload_db_post(struct __vxge_hw_fifo *fifo, u64 txdl_ptr, u32 num_txds, u32 no_snoop) { - struct __vxge_hw_channel *channel; - - channel = &fifo->channel; - writeq(VXGE_HW_NODBW_TYPE(VXGE_HW_NODBW_TYPE_NODBW) | VXGE_HW_NODBW_LAST_TXD_NUMBER(num_txds) | VXGE_HW_NODBW_GET_NO_SNOOP(no_snoop), @@ -1506,9 +1496,6 @@ void vxge_hw_fifo_txdl_buffer_set(struct __vxge_hw_fifo *fifo, { struct __vxge_hw_fifo_txdl_priv *txdl_priv; struct vxge_hw_fifo_txd *txdp, *txdp_last; - struct __vxge_hw_channel *channel; - - channel = &fifo->channel; txdl_priv = __vxge_hw_fifo_txdl_priv(fifo, txdlh); txdp = (struct vxge_hw_fifo_txd *)txdlh + txdl_priv->frags; @@ -1554,9 +1541,6 @@ void vxge_hw_fifo_txdl_post(struct __vxge_hw_fifo *fifo, void *txdlh) struct __vxge_hw_fifo_txdl_priv *txdl_priv; struct vxge_hw_fifo_txd *txdp_last; struct vxge_hw_fifo_txd *txdp_first; - struct __vxge_hw_channel *channel; - - channel = &fifo->channel; txdl_priv = __vxge_hw_fifo_txdl_priv(fifo, txdlh); txdp_first = txdlh; @@ -1672,10 +1656,7 @@ enum vxge_hw_status vxge_hw_fifo_handle_tcode(struct __vxge_hw_fifo *fifo, void *txdlh, enum vxge_hw_fifo_tcode t_code) { - struct __vxge_hw_channel *channel; - enum vxge_hw_status status = VXGE_HW_OK; - channel = &fifo->channel; if (((t_code & 0x7) < 0) || ((t_code & 0x7) > 0x4)) { status = VXGE_HW_ERR_INVALID_TCODE; -- cgit v1.2.3 From 12acd136913ccdf394eeb2bc8686ff5505368119 Mon Sep 17 00:00:00 2001 From: RafaÅ‚ MiÅ‚ecki Date: Thu, 12 Oct 2017 10:21:26 +0200 Subject: net: bgmac: enable master mode for BCM54210E and B50212E PHYs MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit There are 4 very similar PHYs: 0x600d84a1: BCM54210E (rev B0) 0x600d84a2: BCM54210E (rev B1) 0x600d84a5: B50212E (rev B0) 0x600d84a6: B50212E (rev B1) that need setting master mode manually. It's because they run in slave mode by default with Automatic Slave/Master configuration disabled which can lead to unreliable connection with massive ping loss. So far it was reported for a board with BCM47189 SoC and B50212E B1 PHY connected to the bgmac supported ethernet device. Telling PHY driver to setup PHY properly solves this issue. Signed-off-by: RafaÅ‚ MiÅ‚ecki Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bgmac-bcma.c | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/broadcom/bgmac-bcma.c b/drivers/net/ethernet/broadcom/bgmac-bcma.c index 6322594ab260..6fe074c1588b 100644 --- a/drivers/net/ethernet/broadcom/bgmac-bcma.c +++ b/drivers/net/ethernet/broadcom/bgmac-bcma.c @@ -184,13 +184,19 @@ static int bgmac_probe(struct bcma_device *core) if (!bgmac_is_bcm4707_family(core) && !(ci->id == BCMA_CHIP_ID_BCM53573 && core->core_unit == 1)) { + struct phy_device *phydev; + mii_bus = bcma_mdio_mii_register(bgmac); if (IS_ERR(mii_bus)) { err = PTR_ERR(mii_bus); goto err; } - bgmac->mii_bus = mii_bus; + + phydev = mdiobus_get_phy(bgmac->mii_bus, bgmac->phyaddr); + if (ci->id == BCMA_CHIP_ID_BCM53573 && phydev && + (phydev->drv->phy_id & phydev->drv->phy_id_mask) == PHY_ID_BCM54210E) + phydev->dev_flags |= PHY_BRCM_EN_MASTER_MODE; } if (core->bus->hosttype == BCMA_HOSTTYPE_PCI) { -- cgit v1.2.3 From ab104615e01c2c4cbe9ea4073a430d51f6547dd2 Mon Sep 17 00:00:00 2001 From: Geert Uytterhoeven Date: Thu, 12 Oct 2017 10:24:53 +0200 Subject: ravb: Consolidate clock handling MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The module clock is used for two purposes: - Wake-on-LAN (WoL), which is optional, - gPTP Timer Increment (GTI) configuration, which is mandatory. As the clock is needed for GTI configuration anyway, WoL is always available. Hence remove duplication and repeated obtaining of the clock by making GTI use the stored clock for WoL use. Signed-off-by: Geert Uytterhoeven Reviewed-by: Niklas Söderlund Reviewed-by: Sergei Shtylyov Signed-off-by: David S. Miller --- drivers/net/ethernet/renesas/ravb_main.c | 35 +++++++++----------------------- 1 file changed, 10 insertions(+), 25 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c index a8822a756e08..2b962d349f5f 100644 --- a/drivers/net/ethernet/renesas/ravb_main.c +++ b/drivers/net/ethernet/renesas/ravb_main.c @@ -1354,20 +1354,15 @@ static void ravb_get_wol(struct net_device *ndev, struct ethtool_wolinfo *wol) { struct ravb_private *priv = netdev_priv(ndev); - wol->supported = 0; - wol->wolopts = 0; - - if (priv->clk) { - wol->supported = WAKE_MAGIC; - wol->wolopts = priv->wol_enabled ? WAKE_MAGIC : 0; - } + wol->supported = WAKE_MAGIC; + wol->wolopts = priv->wol_enabled ? WAKE_MAGIC : 0; } static int ravb_set_wol(struct net_device *ndev, struct ethtool_wolinfo *wol) { struct ravb_private *priv = netdev_priv(ndev); - if (!priv->clk || wol->wolopts & ~WAKE_MAGIC) + if (wol->wolopts & ~WAKE_MAGIC) return -EOPNOTSUPP; priv->wol_enabled = !!(wol->wolopts & WAKE_MAGIC); @@ -1962,22 +1957,12 @@ MODULE_DEVICE_TABLE(of, ravb_match_table); static int ravb_set_gti(struct net_device *ndev) { - + struct ravb_private *priv = netdev_priv(ndev); struct device *dev = ndev->dev.parent; - struct device_node *np = dev->of_node; unsigned long rate; - struct clk *clk; uint64_t inc; - clk = of_clk_get(np, 0); - if (IS_ERR(clk)) { - dev_err(dev, "could not get clock\n"); - return PTR_ERR(clk); - } - - rate = clk_get_rate(clk); - clk_put(clk); - + rate = clk_get_rate(priv->clk); if (!rate) return -EINVAL; @@ -2126,10 +2111,11 @@ static int ravb_probe(struct platform_device *pdev) priv->chip_id = chip_id; - /* Get clock, if not found that's OK but Wake-On-Lan is unavailable */ priv->clk = devm_clk_get(&pdev->dev, NULL); - if (IS_ERR(priv->clk)) - priv->clk = NULL; + if (IS_ERR(priv->clk)) { + error = PTR_ERR(priv->clk); + goto out_release; + } /* Set function */ ndev->netdev_ops = &ravb_netdev_ops; @@ -2197,8 +2183,7 @@ static int ravb_probe(struct platform_device *pdev) if (error) goto out_napi_del; - if (priv->clk) - device_set_wakeup_capable(&pdev->dev, 1); + device_set_wakeup_capable(&pdev->dev, 1); /* Print device information */ netdev_info(ndev, "Base address at %#x, %pM, IRQ %d.\n", -- cgit v1.2.3 From ff4241881232265dbc237591901116fe272967d6 Mon Sep 17 00:00:00 2001 From: Amritha Nambiar Date: Thu, 7 Sep 2017 04:00:11 -0700 Subject: i40e: Add macro for PF reset bit Introduce a macro for the bit setting the PF reset flag and update its usages. This makes it easier to use this flag in functions to be introduced in future without encountering checkpatch issues related to alignment and line over 80 characters. Signed-off-by: Amritha Nambiar Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e.h | 2 ++ drivers/net/ethernet/intel/i40e/i40e_debugfs.c | 3 +-- drivers/net/ethernet/intel/i40e/i40e_main.c | 9 ++++----- drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c | 5 ++--- 4 files changed, 9 insertions(+), 10 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h index 8139b4ee1dc3..e7c7a853cf7f 100644 --- a/drivers/net/ethernet/intel/i40e/i40e.h +++ b/drivers/net/ethernet/intel/i40e/i40e.h @@ -157,6 +157,8 @@ enum i40e_state_t { __I40E_STATE_SIZE__, }; +#define I40E_PF_RESET_FLAG BIT_ULL(__I40E_PF_RESET_REQUESTED) + /* VSI state flags */ enum i40e_vsi_state_t { __I40E_VSI_DOWN, diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c index 6f2725fc50a1..2b8bbc84e34f 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c +++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c @@ -798,8 +798,7 @@ static ssize_t i40e_dbg_command_write(struct file *filp, */ if (!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) { pf->flags |= I40E_FLAG_VEB_MODE_ENABLED; - i40e_do_reset_safe(pf, - BIT_ULL(__I40E_PF_RESET_REQUESTED)); + i40e_do_reset_safe(pf, I40E_PF_RESET_FLAG); } vsi = i40e_vsi_setup(pf, I40E_VSI_VMDQ2, vsi_seid, 0); diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index 4de52001a2b9..6190257eecfe 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -5747,7 +5747,7 @@ err_setup_rx: err_setup_tx: i40e_vsi_free_tx_resources(vsi); if (vsi == pf->vsi[pf->lan_vsi]) - i40e_do_reset(pf, BIT_ULL(__I40E_PF_RESET_REQUESTED), true); + i40e_do_reset(pf, I40E_PF_RESET_FLAG, true); return err; } @@ -5875,7 +5875,7 @@ void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags, bool lock_acquired) wr32(&pf->hw, I40E_GLGEN_RTRIG, val); i40e_flush(&pf->hw); - } else if (reset_flags & BIT_ULL(__I40E_PF_RESET_REQUESTED)) { + } else if (reset_flags & I40E_PF_RESET_FLAG) { /* Request a PF Reset * @@ -9223,7 +9223,7 @@ static int i40e_set_features(struct net_device *netdev, need_reset = i40e_set_ntuple(pf, features); if (need_reset) - i40e_do_reset(pf, BIT_ULL(__I40E_PF_RESET_REQUESTED), true); + i40e_do_reset(pf, I40E_PF_RESET_FLAG, true); return 0; } @@ -9475,8 +9475,7 @@ static int i40e_ndo_bridge_setlink(struct net_device *dev, pf->flags |= I40E_FLAG_VEB_MODE_ENABLED; else pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED; - i40e_do_reset(pf, BIT_ULL(__I40E_PF_RESET_REQUESTED), - true); + i40e_do_reset(pf, I40E_PF_RESET_FLAG, true); break; } } diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c index 0c4fa225c7be..e7f98e306554 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c @@ -1425,8 +1425,7 @@ int i40e_pci_sriov_configure(struct pci_dev *pdev, int num_vfs) if (num_vfs) { if (!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) { pf->flags |= I40E_FLAG_VEB_MODE_ENABLED; - i40e_do_reset_safe(pf, - BIT_ULL(__I40E_PF_RESET_REQUESTED)); + i40e_do_reset_safe(pf, I40E_PF_RESET_FLAG); } return i40e_pci_sriov_enable(pdev, num_vfs); } @@ -1434,7 +1433,7 @@ int i40e_pci_sriov_configure(struct pci_dev *pdev, int num_vfs) if (!pci_vfs_assigned(pf->pdev)) { i40e_free_vfs(pf); pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED; - i40e_do_reset_safe(pf, BIT_ULL(__I40E_PF_RESET_REQUESTED)); + i40e_do_reset_safe(pf, I40E_PF_RESET_FLAG); } else { dev_warn(&pdev->dev, "Unable to free VFs because some are assigned to VMs.\n"); return -EINVAL; -- cgit v1.2.3 From 8f88b3034db3be2eb600b9f57012bc63f1ea197f Mon Sep 17 00:00:00 2001 From: Amritha Nambiar Date: Thu, 7 Sep 2017 04:00:17 -0700 Subject: i40e: Add infrastructure for queue channel support This patch sets up the infrastructure for offloading TCs and queue configurations to the hardware by creating HW channels(VSI). A new channel is created for each of the traffic class configuration offloaded via mqprio framework except for the first TC (TC0). TC0 for the main VSI is also reconfigured as per user provided queue parameters. Queue counts that are not power-of-2 are handled by reconfiguring RSS by reprogramming LUTs using the queue count value. This patch also handles configuring the TX rings for the channels, setting up the RX queue map for channel. Also, the channels so created are removed and all the queue configuration is set to default when the qdisc is detached from the root of the device. Signed-off-by: Amritha Nambiar Signed-off-by: Kiran Patil Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e.h | 32 ++ drivers/net/ethernet/intel/i40e/i40e_main.c | 718 +++++++++++++++++++++++++++- drivers/net/ethernet/intel/i40e/i40e_txrx.h | 2 + 3 files changed, 743 insertions(+), 9 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h index e7c7a853cf7f..bde982541772 100644 --- a/drivers/net/ethernet/intel/i40e/i40e.h +++ b/drivers/net/ethernet/intel/i40e/i40e.h @@ -87,6 +87,7 @@ #define I40E_AQ_LEN 256 #define I40E_AQ_WORK_LIMIT 66 /* max number of VFs + a little */ #define I40E_MAX_USER_PRIORITY 8 +#define I40E_MAX_QUEUES_PER_CH 64 #define I40E_DEFAULT_TRAFFIC_CLASS BIT(0) #define I40E_DEFAULT_MSG_ENABLE 4 #define I40E_QUEUE_WAIT_RETRY_LIMIT 10 @@ -340,6 +341,23 @@ struct i40e_flex_pit { u8 pit_index; }; +struct i40e_channel { + struct list_head list; + bool initialized; + u8 type; + u16 vsi_number; /* Assigned VSI number from AQ 'Add VSI' response */ + u16 stat_counter_idx; + u16 base_queue; + u16 num_queue_pairs; /* Requested by user */ + u16 seid; + + u8 enabled_tc; + struct i40e_aqc_vsi_properties_data info; + + /* track this channel belongs to which VSI */ + struct i40e_vsi *parent_vsi; +}; + /* struct that defines the Ethernet device */ struct i40e_pf { struct pci_dev *pdev; @@ -456,6 +474,7 @@ struct i40e_pf { #define I40E_FLAG_CLIENT_RESET BIT(26) #define I40E_FLAG_LINK_DOWN_ON_CLOSE_ENABLED BIT(27) #define I40E_FLAG_SOURCE_PRUNING_DISABLED BIT(28) +#define I40E_FLAG_TC_MQPRIO BIT(29) struct i40e_client_instance *cinst; bool stat_offsets_loaded; @@ -536,6 +555,8 @@ struct i40e_pf { u32 ioremap_len; u32 fd_inv; u16 phy_led_val; + + u16 override_q_count; }; /** @@ -700,6 +721,15 @@ struct i40e_vsi { bool current_isup; /* Sync 'link up' logging */ enum i40e_aq_link_speed current_speed; /* Sync link speed logging */ + /* channel specific fields */ + u16 cnt_q_avail; /* num of queues available for channel usage */ + u16 orig_rss_size; + u16 current_rss_size; + + u16 next_base_queue; /* next queue to be used for channel setup */ + + struct list_head ch_list; + void *priv; /* client driver data reference. */ /* VSI specific handlers */ @@ -1004,4 +1034,6 @@ static inline bool i40e_enabled_xdp_vsi(struct i40e_vsi *vsi) { return !!vsi->xdp_prog; } + +int i40e_create_queue_channel(struct i40e_vsi *vsi, struct i40e_channel *ch); #endif /* _I40E_H_ */ diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index 6190257eecfe..e23105bee6d1 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -2881,7 +2881,7 @@ static void i40e_config_xps_tx_ring(struct i40e_ring *ring) { int cpu; - if (!ring->q_vector || !ring->netdev) + if (!ring->q_vector || !ring->netdev || ring->ch) return; /* We only initialize XPS once, so as not to overwrite user settings */ @@ -2944,7 +2944,14 @@ static int i40e_configure_tx_ring(struct i40e_ring *ring) * initialization. This has to be done regardless of * DCB as by default everything is mapped to TC0. */ - tx_ctx.rdylist = le16_to_cpu(vsi->info.qs_handle[ring->dcb_tc]); + + if (ring->ch) + tx_ctx.rdylist = + le16_to_cpu(ring->ch->info.qs_handle[ring->dcb_tc]); + + else + tx_ctx.rdylist = le16_to_cpu(vsi->info.qs_handle[ring->dcb_tc]); + tx_ctx.rdylist_act = 0; /* clear the context in the HMC */ @@ -2966,12 +2973,23 @@ static int i40e_configure_tx_ring(struct i40e_ring *ring) } /* Now associate this queue with this PCI function */ - if (vsi->type == I40E_VSI_VMDQ2) { - qtx_ctl = I40E_QTX_CTL_VM_QUEUE; - qtx_ctl |= ((vsi->id) << I40E_QTX_CTL_VFVM_INDX_SHIFT) & - I40E_QTX_CTL_VFVM_INDX_MASK; + if (ring->ch) { + if (ring->ch->type == I40E_VSI_VMDQ2) + qtx_ctl = I40E_QTX_CTL_VM_QUEUE; + else + return -EINVAL; + + qtx_ctl |= (ring->ch->vsi_number << + I40E_QTX_CTL_VFVM_INDX_SHIFT) & + I40E_QTX_CTL_VFVM_INDX_MASK; } else { - qtx_ctl = I40E_QTX_CTL_PF_QUEUE; + if (vsi->type == I40E_VSI_VMDQ2) { + qtx_ctl = I40E_QTX_CTL_VM_QUEUE; + qtx_ctl |= ((vsi->id) << I40E_QTX_CTL_VFVM_INDX_SHIFT) & + I40E_QTX_CTL_VFVM_INDX_MASK; + } else { + qtx_ctl = I40E_QTX_CTL_PF_QUEUE; + } } qtx_ctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) & @@ -5161,6 +5179,643 @@ out: return ret; } +/** + * i40e_remove_queue_channels - Remove queue channels for the TCs + * @vsi: VSI to be configured + * + * Remove queue channels for the TCs + **/ +static void i40e_remove_queue_channels(struct i40e_vsi *vsi) +{ + struct i40e_channel *ch, *ch_tmp; + int ret, i; + + /* Reset rss size that was stored when reconfiguring rss for + * channel VSIs with non-power-of-2 queue count. + */ + vsi->current_rss_size = 0; + + /* perform cleanup for channels if they exist */ + if (list_empty(&vsi->ch_list)) + return; + + list_for_each_entry_safe(ch, ch_tmp, &vsi->ch_list, list) { + struct i40e_vsi *p_vsi; + + list_del(&ch->list); + p_vsi = ch->parent_vsi; + if (!p_vsi || !ch->initialized) { + kfree(ch); + continue; + } + /* Reset queue contexts */ + for (i = 0; i < ch->num_queue_pairs; i++) { + struct i40e_ring *tx_ring, *rx_ring; + u16 pf_q; + + pf_q = ch->base_queue + i; + tx_ring = vsi->tx_rings[pf_q]; + tx_ring->ch = NULL; + + rx_ring = vsi->rx_rings[pf_q]; + rx_ring->ch = NULL; + } + + /* delete VSI from FW */ + ret = i40e_aq_delete_element(&vsi->back->hw, ch->seid, + NULL); + if (ret) + dev_err(&vsi->back->pdev->dev, + "unable to remove channel (%d) for parent VSI(%d)\n", + ch->seid, p_vsi->seid); + kfree(ch); + } + INIT_LIST_HEAD(&vsi->ch_list); +} + +/** + * i40e_is_any_channel - channel exist or not + * @vsi: ptr to VSI to which channels are associated with + * + * Returns true or false if channel(s) exist for associated VSI or not + **/ +static bool i40e_is_any_channel(struct i40e_vsi *vsi) +{ + struct i40e_channel *ch, *ch_tmp; + + list_for_each_entry_safe(ch, ch_tmp, &vsi->ch_list, list) { + if (ch->initialized) + return true; + } + + return false; +} + +/** + * i40e_get_max_queues_for_channel + * @vsi: ptr to VSI to which channels are associated with + * + * Helper function which returns max value among the queue counts set on the + * channels/TCs created. + **/ +static int i40e_get_max_queues_for_channel(struct i40e_vsi *vsi) +{ + struct i40e_channel *ch, *ch_tmp; + int max = 0; + + list_for_each_entry_safe(ch, ch_tmp, &vsi->ch_list, list) { + if (!ch->initialized) + continue; + if (ch->num_queue_pairs > max) + max = ch->num_queue_pairs; + } + + return max; +} + +/** + * i40e_validate_num_queues - validate num_queues w.r.t channel + * @pf: ptr to PF device + * @num_queues: number of queues + * @vsi: the parent VSI + * @reconfig_rss: indicates should the RSS be reconfigured or not + * + * This function validates number of queues in the context of new channel + * which is being established and determines if RSS should be reconfigured + * or not for parent VSI. + **/ +static int i40e_validate_num_queues(struct i40e_pf *pf, int num_queues, + struct i40e_vsi *vsi, bool *reconfig_rss) +{ + int max_ch_queues; + + if (!reconfig_rss) + return -EINVAL; + + *reconfig_rss = false; + + if (num_queues > I40E_MAX_QUEUES_PER_CH) { + dev_err(&pf->pdev->dev, + "Failed to create VMDq VSI. User requested num_queues (%d) > I40E_MAX_QUEUES_PER_VSI (%u)\n", + num_queues, I40E_MAX_QUEUES_PER_CH); + return -EINVAL; + } + + if (vsi->current_rss_size) { + if (num_queues > vsi->current_rss_size) { + dev_dbg(&pf->pdev->dev, + "Error: num_queues (%d) > vsi's current_size(%d)\n", + num_queues, vsi->current_rss_size); + return -EINVAL; + } else if ((num_queues < vsi->current_rss_size) && + (!is_power_of_2(num_queues))) { + dev_dbg(&pf->pdev->dev, + "Error: num_queues (%d) < vsi's current_size(%d), but not power of 2\n", + num_queues, vsi->current_rss_size); + return -EINVAL; + } + } + + if (!is_power_of_2(num_queues)) { + /* Find the max num_queues configured for channel if channel + * exist. + * if channel exist, then enforce 'num_queues' to be more than + * max ever queues configured for channel. + */ + max_ch_queues = i40e_get_max_queues_for_channel(vsi); + if (num_queues < max_ch_queues) { + dev_dbg(&pf->pdev->dev, + "Error: num_queues (%d) < max queues configured for channel(%d)\n", + num_queues, max_ch_queues); + return -EINVAL; + } + *reconfig_rss = true; + } + + return 0; +} + +/** + * i40e_vsi_reconfig_rss - reconfig RSS based on specified rss_size + * @vsi: the VSI being setup + * @rss_size: size of RSS, accordingly LUT gets reprogrammed + * + * This function reconfigures RSS by reprogramming LUTs using 'rss_size' + **/ +static int i40e_vsi_reconfig_rss(struct i40e_vsi *vsi, u16 rss_size) +{ + struct i40e_pf *pf = vsi->back; + u8 seed[I40E_HKEY_ARRAY_SIZE]; + struct i40e_hw *hw = &pf->hw; + int local_rss_size; + u8 *lut; + int ret; + + if (!vsi->rss_size) + return -EINVAL; + + if (rss_size > vsi->rss_size) + return -EINVAL; + + local_rss_size = min_t(int, vsi->rss_size, rss_size); + lut = kzalloc(vsi->rss_table_size, GFP_KERNEL); + if (!lut) + return -ENOMEM; + + /* Ignoring user configured lut if there is one */ + i40e_fill_rss_lut(pf, lut, vsi->rss_table_size, local_rss_size); + + /* Use user configured hash key if there is one, otherwise + * use default. + */ + if (vsi->rss_hkey_user) + memcpy(seed, vsi->rss_hkey_user, I40E_HKEY_ARRAY_SIZE); + else + netdev_rss_key_fill((void *)seed, I40E_HKEY_ARRAY_SIZE); + + ret = i40e_config_rss(vsi, seed, lut, vsi->rss_table_size); + if (ret) { + dev_info(&pf->pdev->dev, + "Cannot set RSS lut, err %s aq_err %s\n", + i40e_stat_str(hw, ret), + i40e_aq_str(hw, hw->aq.asq_last_status)); + kfree(lut); + return ret; + } + kfree(lut); + + /* Do the update w.r.t. storing rss_size */ + if (!vsi->orig_rss_size) + vsi->orig_rss_size = vsi->rss_size; + vsi->current_rss_size = local_rss_size; + + return ret; +} + +/** + * i40e_channel_setup_queue_map - Setup a channel queue map + * @pf: ptr to PF device + * @vsi: the VSI being setup + * @ctxt: VSI context structure + * @ch: ptr to channel structure + * + * Setup queue map for a specific channel + **/ +static void i40e_channel_setup_queue_map(struct i40e_pf *pf, + struct i40e_vsi_context *ctxt, + struct i40e_channel *ch) +{ + u16 qcount, qmap, sections = 0; + u8 offset = 0; + int pow; + + sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID; + sections |= I40E_AQ_VSI_PROP_SCHED_VALID; + + qcount = min_t(int, ch->num_queue_pairs, pf->num_lan_msix); + ch->num_queue_pairs = qcount; + + /* find the next higher power-of-2 of num queue pairs */ + pow = ilog2(qcount); + if (!is_power_of_2(qcount)) + pow++; + + qmap = (offset << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) | + (pow << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT); + + /* Setup queue TC[0].qmap for given VSI context */ + ctxt->info.tc_mapping[0] = cpu_to_le16(qmap); + + ctxt->info.up_enable_bits = 0x1; /* TC0 enabled */ + ctxt->info.mapping_flags |= cpu_to_le16(I40E_AQ_VSI_QUE_MAP_CONTIG); + ctxt->info.queue_mapping[0] = cpu_to_le16(ch->base_queue); + ctxt->info.valid_sections |= cpu_to_le16(sections); +} + +/** + * i40e_add_channel - add a channel by adding VSI + * @pf: ptr to PF device + * @uplink_seid: underlying HW switching element (VEB) ID + * @ch: ptr to channel structure + * + * Add a channel (VSI) using add_vsi and queue_map + **/ +static int i40e_add_channel(struct i40e_pf *pf, u16 uplink_seid, + struct i40e_channel *ch) +{ + struct i40e_hw *hw = &pf->hw; + struct i40e_vsi_context ctxt; + u8 enabled_tc = 0x1; /* TC0 enabled */ + int ret; + + if (ch->type != I40E_VSI_VMDQ2) { + dev_info(&pf->pdev->dev, + "add new vsi failed, ch->type %d\n", ch->type); + return -EINVAL; + } + + memset(&ctxt, 0, sizeof(ctxt)); + ctxt.pf_num = hw->pf_id; + ctxt.vf_num = 0; + ctxt.uplink_seid = uplink_seid; + ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL; + if (ch->type == I40E_VSI_VMDQ2) + ctxt.flags = I40E_AQ_VSI_TYPE_VMDQ2; + + if (pf->flags & I40E_FLAG_VEB_MODE_ENABLED) { + ctxt.info.valid_sections |= + cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID); + ctxt.info.switch_id = + cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB); + } + + /* Set queue map for a given VSI context */ + i40e_channel_setup_queue_map(pf, &ctxt, ch); + + /* Now time to create VSI */ + ret = i40e_aq_add_vsi(hw, &ctxt, NULL); + if (ret) { + dev_info(&pf->pdev->dev, + "add new vsi failed, err %s aq_err %s\n", + i40e_stat_str(&pf->hw, ret), + i40e_aq_str(&pf->hw, + pf->hw.aq.asq_last_status)); + return -ENOENT; + } + + /* Success, update channel */ + ch->enabled_tc = enabled_tc; + ch->seid = ctxt.seid; + ch->vsi_number = ctxt.vsi_number; + ch->stat_counter_idx = cpu_to_le16(ctxt.info.stat_counter_idx); + + /* copy just the sections touched not the entire info + * since not all sections are valid as returned by + * update vsi params + */ + ch->info.mapping_flags = ctxt.info.mapping_flags; + memcpy(&ch->info.queue_mapping, + &ctxt.info.queue_mapping, sizeof(ctxt.info.queue_mapping)); + memcpy(&ch->info.tc_mapping, ctxt.info.tc_mapping, + sizeof(ctxt.info.tc_mapping)); + + return 0; +} + +static int i40e_channel_config_bw(struct i40e_vsi *vsi, struct i40e_channel *ch, + u8 *bw_share) +{ + struct i40e_aqc_configure_vsi_tc_bw_data bw_data; + i40e_status ret; + int i; + + bw_data.tc_valid_bits = ch->enabled_tc; + for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) + bw_data.tc_bw_credits[i] = bw_share[i]; + + ret = i40e_aq_config_vsi_tc_bw(&vsi->back->hw, ch->seid, + &bw_data, NULL); + if (ret) { + dev_info(&vsi->back->pdev->dev, + "Config VSI BW allocation per TC failed, aq_err: %d for new_vsi->seid %u\n", + vsi->back->hw.aq.asq_last_status, ch->seid); + return -EINVAL; + } + + for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) + ch->info.qs_handle[i] = bw_data.qs_handles[i]; + + return 0; +} + +/** + * i40e_channel_config_tx_ring - config TX ring associated with new channel + * @pf: ptr to PF device + * @vsi: the VSI being setup + * @ch: ptr to channel structure + * + * Configure TX rings associated with channel (VSI) since queues are being + * from parent VSI. + **/ +static int i40e_channel_config_tx_ring(struct i40e_pf *pf, + struct i40e_vsi *vsi, + struct i40e_channel *ch) +{ + i40e_status ret; + int i; + u8 bw_share[I40E_MAX_TRAFFIC_CLASS] = {0}; + + /* Enable ETS TCs with equal BW Share for now across all VSIs */ + for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { + if (ch->enabled_tc & BIT(i)) + bw_share[i] = 1; + } + + /* configure BW for new VSI */ + ret = i40e_channel_config_bw(vsi, ch, bw_share); + if (ret) { + dev_info(&vsi->back->pdev->dev, + "Failed configuring TC map %d for channel (seid %u)\n", + ch->enabled_tc, ch->seid); + return ret; + } + + for (i = 0; i < ch->num_queue_pairs; i++) { + struct i40e_ring *tx_ring, *rx_ring; + u16 pf_q; + + pf_q = ch->base_queue + i; + + /* Get to TX ring ptr of main VSI, for re-setup TX queue + * context + */ + tx_ring = vsi->tx_rings[pf_q]; + tx_ring->ch = ch; + + /* Get the RX ring ptr */ + rx_ring = vsi->rx_rings[pf_q]; + rx_ring->ch = ch; + } + + return 0; +} + +/** + * i40e_setup_hw_channel - setup new channel + * @pf: ptr to PF device + * @vsi: the VSI being setup + * @ch: ptr to channel structure + * @uplink_seid: underlying HW switching element (VEB) ID + * @type: type of channel to be created (VMDq2/VF) + * + * Setup new channel (VSI) based on specified type (VMDq2/VF) + * and configures TX rings accordingly + **/ +static inline int i40e_setup_hw_channel(struct i40e_pf *pf, + struct i40e_vsi *vsi, + struct i40e_channel *ch, + u16 uplink_seid, u8 type) +{ + int ret; + + ch->initialized = false; + ch->base_queue = vsi->next_base_queue; + ch->type = type; + + /* Proceed with creation of channel (VMDq2) VSI */ + ret = i40e_add_channel(pf, uplink_seid, ch); + if (ret) { + dev_info(&pf->pdev->dev, + "failed to add_channel using uplink_seid %u\n", + uplink_seid); + return ret; + } + + /* Mark the successful creation of channel */ + ch->initialized = true; + + /* Reconfigure TX queues using QTX_CTL register */ + ret = i40e_channel_config_tx_ring(pf, vsi, ch); + if (ret) { + dev_info(&pf->pdev->dev, + "failed to configure TX rings for channel %u\n", + ch->seid); + return ret; + } + + /* update 'next_base_queue' */ + vsi->next_base_queue = vsi->next_base_queue + ch->num_queue_pairs; + dev_dbg(&pf->pdev->dev, + "Added channel: vsi_seid %u, vsi_number %u, stat_counter_idx %u, num_queue_pairs %u, pf->next_base_queue %d\n", + ch->seid, ch->vsi_number, ch->stat_counter_idx, + ch->num_queue_pairs, + vsi->next_base_queue); + return ret; +} + +/** + * i40e_setup_channel - setup new channel using uplink element + * @pf: ptr to PF device + * @type: type of channel to be created (VMDq2/VF) + * @uplink_seid: underlying HW switching element (VEB) ID + * @ch: ptr to channel structure + * + * Setup new channel (VSI) based on specified type (VMDq2/VF) + * and uplink switching element (uplink_seid) + **/ +static bool i40e_setup_channel(struct i40e_pf *pf, struct i40e_vsi *vsi, + struct i40e_channel *ch) +{ + u8 vsi_type; + u16 seid; + int ret; + + if (vsi->type == I40E_VSI_MAIN) { + vsi_type = I40E_VSI_VMDQ2; + } else { + dev_err(&pf->pdev->dev, "unsupported parent vsi type(%d)\n", + vsi->type); + return false; + } + + /* underlying switching element */ + seid = pf->vsi[pf->lan_vsi]->uplink_seid; + + /* create channel (VSI), configure TX rings */ + ret = i40e_setup_hw_channel(pf, vsi, ch, seid, vsi_type); + if (ret) { + dev_err(&pf->pdev->dev, "failed to setup hw_channel\n"); + return false; + } + + return ch->initialized ? true : false; +} + +/** + * i40e_create_queue_channel - function to create channel + * @vsi: VSI to be configured + * @ch: ptr to channel (it contains channel specific params) + * + * This function creates channel (VSI) using num_queues specified by user, + * reconfigs RSS if needed. + **/ +int i40e_create_queue_channel(struct i40e_vsi *vsi, + struct i40e_channel *ch) +{ + struct i40e_pf *pf = vsi->back; + bool reconfig_rss; + int err; + + if (!ch) + return -EINVAL; + + if (!ch->num_queue_pairs) { + dev_err(&pf->pdev->dev, "Invalid num_queues requested: %d\n", + ch->num_queue_pairs); + return -EINVAL; + } + + /* validate user requested num_queues for channel */ + err = i40e_validate_num_queues(pf, ch->num_queue_pairs, vsi, + &reconfig_rss); + if (err) { + dev_info(&pf->pdev->dev, "Failed to validate num_queues (%d)\n", + ch->num_queue_pairs); + return -EINVAL; + } + + /* By default we are in VEPA mode, if this is the first VF/VMDq + * VSI to be added switch to VEB mode. + */ + if ((!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) || + (!i40e_is_any_channel(vsi))) { + if (!is_power_of_2(vsi->tc_config.tc_info[0].qcount)) { + dev_dbg(&pf->pdev->dev, + "Failed to create channel. Override queues (%u) not power of 2\n", + vsi->tc_config.tc_info[0].qcount); + return -EINVAL; + } + + if (!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) { + pf->flags |= I40E_FLAG_VEB_MODE_ENABLED; + + if (vsi->type == I40E_VSI_MAIN) { + if (pf->flags & I40E_FLAG_TC_MQPRIO) + i40e_do_reset(pf, I40E_PF_RESET_FLAG, + true); + else + i40e_do_reset_safe(pf, + I40E_PF_RESET_FLAG); + } + } + /* now onwards for main VSI, number of queues will be value + * of TC0's queue count + */ + } + + /* By this time, vsi->cnt_q_avail shall be set to non-zero and + * it should be more than num_queues + */ + if (!vsi->cnt_q_avail || vsi->cnt_q_avail < ch->num_queue_pairs) { + dev_dbg(&pf->pdev->dev, + "Error: cnt_q_avail (%u) less than num_queues %d\n", + vsi->cnt_q_avail, ch->num_queue_pairs); + return -EINVAL; + } + + /* reconfig_rss only if vsi type is MAIN_VSI */ + if (reconfig_rss && (vsi->type == I40E_VSI_MAIN)) { + err = i40e_vsi_reconfig_rss(vsi, ch->num_queue_pairs); + if (err) { + dev_info(&pf->pdev->dev, + "Error: unable to reconfig rss for num_queues (%u)\n", + ch->num_queue_pairs); + return -EINVAL; + } + } + + if (!i40e_setup_channel(pf, vsi, ch)) { + dev_info(&pf->pdev->dev, "Failed to setup channel\n"); + return -EINVAL; + } + + dev_info(&pf->pdev->dev, + "Setup channel (id:%u) utilizing num_queues %d\n", + ch->seid, ch->num_queue_pairs); + + /* in case of VF, this will be main SRIOV VSI */ + ch->parent_vsi = vsi; + + /* and update main_vsi's count for queue_available to use */ + vsi->cnt_q_avail -= ch->num_queue_pairs; + + return 0; +} + +/** + * i40e_configure_queue_channels - Add queue channel for the given TCs + * @vsi: VSI to be configured + * + * Configures queue channel mapping to the given TCs + **/ +static int i40e_configure_queue_channels(struct i40e_vsi *vsi) +{ + struct i40e_channel *ch; + int ret = 0, i; + + /* Create app vsi with the TCs. Main VSI with TC0 is already set up */ + for (i = 1; i < I40E_MAX_TRAFFIC_CLASS; i++) { + if (vsi->tc_config.enabled_tc & BIT(i)) { + ch = kzalloc(sizeof(*ch), GFP_KERNEL); + if (!ch) { + ret = -ENOMEM; + goto err_free; + } + + INIT_LIST_HEAD(&ch->list); + ch->num_queue_pairs = + vsi->tc_config.tc_info[i].qcount; + ch->base_queue = + vsi->tc_config.tc_info[i].qoffset; + + list_add_tail(&ch->list, &vsi->ch_list); + + ret = i40e_create_queue_channel(vsi, ch); + if (ret) { + dev_err(&vsi->back->pdev->dev, + "Failed creating queue channel with TC%d: queues %d\n", + i, ch->num_queue_pairs); + goto err_free; + } + } + } + return ret; + +err_free: + i40e_remove_queue_channels(vsi); + return ret; +} + /** * i40e_veb_config_tc - Configure TCs for given VEB * @veb: given VEB @@ -5612,10 +6267,18 @@ static int i40e_setup_tc(struct net_device *netdev, u8 tc) goto exit; } - /* Unquiesce VSI */ - i40e_unquiesce_vsi(vsi); + if (pf->flags & I40E_FLAG_TC_MQPRIO) { + ret = i40e_configure_queue_channels(vsi); + if (ret) { + netdev_info(netdev, + "Failed configuring queue channels\n"); + goto exit; + } + } exit: + /* Unquiesce VSI */ + i40e_unquiesce_vsi(vsi); return ret; } @@ -7030,6 +7693,35 @@ static void i40e_fdir_teardown(struct i40e_pf *pf) i40e_vsi_release(vsi); } +/** + * i40e_rebuild_channels - Rebuilds channel VSIs if they existed before reset + * @vsi: PF main vsi + * + * Rebuilds channel VSIs if they existed before reset + **/ +static int i40e_rebuild_channels(struct i40e_vsi *vsi) +{ + struct i40e_channel *ch, *ch_tmp; + i40e_status ret; + + if (list_empty(&vsi->ch_list)) + return 0; + + list_for_each_entry_safe(ch, ch_tmp, &vsi->ch_list, list) { + if (!ch->initialized) + break; + /* Proceed with creation of channel (VMDq2) VSI */ + ret = i40e_add_channel(vsi->back, vsi->uplink_seid, ch); + if (ret) { + dev_info(&vsi->back->pdev->dev, + "failed to rebuild channels using uplink_seid %u\n", + vsi->uplink_seid); + return ret; + } + } + return 0; +} + /** * i40e_prep_for_reset - prep for the core to reset * @pf: board private structure @@ -7295,6 +7987,13 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired) } } + /* PF Main VSI is rebuild by now, go ahead and rebuild channel VSIs + * for this main VSI if they exist + */ + ret = i40e_rebuild_channels(pf->vsi[pf->lan_vsi]); + if (ret) + goto end_unlock; + /* Reconfigure hardware for allowing smaller MSS in the case * of TSO, so that we avoid the MDD being fired and causing * a reset in the case of small MSS+TSO. @@ -11608,6 +12307,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) dev_info(&pdev->dev, "setup_pf_switch failed: %d\n", err); goto err_vsis; } + INIT_LIST_HEAD(&pf->vsi[pf->lan_vsi]->ch_list); /* Make sure flow control is set according to current settings */ err = i40e_set_fc(hw, &set_fc_aq_fail, true); diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.h b/drivers/net/ethernet/intel/i40e/i40e_txrx.h index ff57ae451524..fbae1182e2ea 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.h +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.h @@ -426,6 +426,8 @@ struct i40e_ring { * i40e_clean_rx_ring_irq() is called * for this ring. */ + + struct i40e_channel *ch; } ____cacheline_internodealigned_in_smp; static inline bool ring_uses_build_skb(struct i40e_ring *ring) -- cgit v1.2.3 From a9ce82f744dc401ec27c787e2eacf3bbb33565ec Mon Sep 17 00:00:00 2001 From: Amritha Nambiar Date: Thu, 7 Sep 2017 04:00:22 -0700 Subject: i40e: Enable 'channel' mode in mqprio for TC configs The i40e driver is modified to enable the new mqprio hardware offload mode and factor the TCs and queue configuration by creating channel VSIs. In this mode, the priority to traffic class mapping and the user specified queue ranges are used to configure the traffic classes by setting the mode option to 'channel'. Example: map 0 0 0 0 1 2 2 3 queues 2@0 2@2 1@4 1@5\ hw 1 mode channel qdisc mqprio 8038: root tc 4 map 0 0 0 0 1 2 2 3 0 0 0 0 0 0 0 0 queues:(0:1) (2:3) (4:4) (5:5) mode:channel shaper:dcb The HW channels created are removed and all the queue configuration is set to default when the qdisc is detached from the root of the device. This patch also disables setting up channels via ethtool (ethtool -L) when the TCs are configured using mqprio scheduler. The patch also limits setting ethtool Rx flow hash indirection (ethtool -X eth0 equal N) to max queues configured via mqprio. The Rx flow hash indirection input through ethtool should be validated so that it is within in the queue range configured via tc/mqprio. The bound checking is achieved by reporting the current rss size to the kernel when queues are configured via mqprio. Example: map 0 0 0 1 0 2 3 0 queues 2@0 4@2 8@6 11@14\ hw 1 mode channel Cannot set RX flow hash configuration: Invalid argument Signed-off-by: Amritha Nambiar Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e.h | 3 + drivers/net/ethernet/intel/i40e/i40e_ethtool.c | 8 +- drivers/net/ethernet/intel/i40e/i40e_main.c | 457 +++++++++++++++++++------ 3 files changed, 362 insertions(+), 106 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h index bde982541772..024c88474951 100644 --- a/drivers/net/ethernet/intel/i40e/i40e.h +++ b/drivers/net/ethernet/intel/i40e/i40e.h @@ -54,6 +54,7 @@ #include #include #include +#include #include "i40e_type.h" #include "i40e_prototype.h" #include "i40e_client.h" @@ -700,6 +701,7 @@ struct i40e_vsi { enum i40e_vsi_type type; /* VSI type, e.g., LAN, FCoE, etc */ s16 vf_id; /* Virtual function ID for SRIOV VSIs */ + struct tc_mqprio_qopt_offload mqprio_qopt; /* queue parameters */ struct i40e_tc_configuration tc_config; struct i40e_aqc_vsi_properties_data info; @@ -725,6 +727,7 @@ struct i40e_vsi { u16 cnt_q_avail; /* num of queues available for channel usage */ u16 orig_rss_size; u16 current_rss_size; + bool reconfig_rss; u16 next_base_queue; /* next queue to be used for channel setup */ diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c index afd3ca8d9851..72d5f2cdf419 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c @@ -2652,7 +2652,7 @@ static int i40e_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd, switch (cmd->cmd) { case ETHTOOL_GRXRINGS: - cmd->data = vsi->num_queue_pairs; + cmd->data = vsi->rss_size; ret = 0; break; case ETHTOOL_GRXFH: @@ -3897,6 +3897,12 @@ static int i40e_set_channels(struct net_device *dev, if (vsi->type != I40E_VSI_MAIN) return -EINVAL; + /* We do not support setting channels via ethtool when TCs are + * configured through mqprio + */ + if (pf->flags & I40E_FLAG_TC_MQPRIO) + return -EINVAL; + /* verify they are not requesting separate vectors */ if (!count || ch->rx_count || ch->tx_count) return -EINVAL; diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index e23105bee6d1..e803aa1552c6 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -1588,6 +1588,170 @@ static int i40e_set_mac(struct net_device *netdev, void *p) return 0; } +/** + * i40e_config_rss_aq - Prepare for RSS using AQ commands + * @vsi: vsi structure + * @seed: RSS hash seed + **/ +static int i40e_config_rss_aq(struct i40e_vsi *vsi, const u8 *seed, + u8 *lut, u16 lut_size) +{ + struct i40e_pf *pf = vsi->back; + struct i40e_hw *hw = &pf->hw; + int ret = 0; + + if (seed) { + struct i40e_aqc_get_set_rss_key_data *seed_dw = + (struct i40e_aqc_get_set_rss_key_data *)seed; + ret = i40e_aq_set_rss_key(hw, vsi->id, seed_dw); + if (ret) { + dev_info(&pf->pdev->dev, + "Cannot set RSS key, err %s aq_err %s\n", + i40e_stat_str(hw, ret), + i40e_aq_str(hw, hw->aq.asq_last_status)); + return ret; + } + } + if (lut) { + bool pf_lut = vsi->type == I40E_VSI_MAIN ? true : false; + + ret = i40e_aq_set_rss_lut(hw, vsi->id, pf_lut, lut, lut_size); + if (ret) { + dev_info(&pf->pdev->dev, + "Cannot set RSS lut, err %s aq_err %s\n", + i40e_stat_str(hw, ret), + i40e_aq_str(hw, hw->aq.asq_last_status)); + return ret; + } + } + return ret; +} + +/** + * i40e_vsi_config_rss - Prepare for VSI(VMDq) RSS if used + * @vsi: VSI structure + **/ +static int i40e_vsi_config_rss(struct i40e_vsi *vsi) +{ + struct i40e_pf *pf = vsi->back; + u8 seed[I40E_HKEY_ARRAY_SIZE]; + u8 *lut; + int ret; + + if (!(pf->hw_features & I40E_HW_RSS_AQ_CAPABLE)) + return 0; + if (!vsi->rss_size) + vsi->rss_size = min_t(int, pf->alloc_rss_size, + vsi->num_queue_pairs); + if (!vsi->rss_size) + return -EINVAL; + lut = kzalloc(vsi->rss_table_size, GFP_KERNEL); + if (!lut) + return -ENOMEM; + + /* Use the user configured hash keys and lookup table if there is one, + * otherwise use default + */ + if (vsi->rss_lut_user) + memcpy(lut, vsi->rss_lut_user, vsi->rss_table_size); + else + i40e_fill_rss_lut(pf, lut, vsi->rss_table_size, vsi->rss_size); + if (vsi->rss_hkey_user) + memcpy(seed, vsi->rss_hkey_user, I40E_HKEY_ARRAY_SIZE); + else + netdev_rss_key_fill((void *)seed, I40E_HKEY_ARRAY_SIZE); + ret = i40e_config_rss_aq(vsi, seed, lut, vsi->rss_table_size); + kfree(lut); + return ret; +} + +/** + * i40e_vsi_setup_queue_map_mqprio - Prepares mqprio based tc_config + * @vsi: the VSI being configured, + * @ctxt: VSI context structure + * @enabled_tc: number of traffic classes to enable + * + * Prepares VSI tc_config to have queue configurations based on MQPRIO options. + **/ +static int i40e_vsi_setup_queue_map_mqprio(struct i40e_vsi *vsi, + struct i40e_vsi_context *ctxt, + u8 enabled_tc) +{ + u16 qcount = 0, max_qcount, qmap, sections = 0; + int i, override_q, pow, num_qps, ret; + u8 netdev_tc = 0, offset = 0; + + if (vsi->type != I40E_VSI_MAIN) + return -EINVAL; + sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID; + sections |= I40E_AQ_VSI_PROP_SCHED_VALID; + vsi->tc_config.numtc = vsi->mqprio_qopt.qopt.num_tc; + vsi->tc_config.enabled_tc = enabled_tc ? enabled_tc : 1; + num_qps = vsi->mqprio_qopt.qopt.count[0]; + + /* find the next higher power-of-2 of num queue pairs */ + pow = ilog2(num_qps); + if (!is_power_of_2(num_qps)) + pow++; + qmap = (offset << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) | + (pow << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT); + + /* Setup queue offset/count for all TCs for given VSI */ + max_qcount = vsi->mqprio_qopt.qopt.count[0]; + for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { + /* See if the given TC is enabled for the given VSI */ + if (vsi->tc_config.enabled_tc & BIT(i)) { + offset = vsi->mqprio_qopt.qopt.offset[i]; + qcount = vsi->mqprio_qopt.qopt.count[i]; + if (qcount > max_qcount) + max_qcount = qcount; + vsi->tc_config.tc_info[i].qoffset = offset; + vsi->tc_config.tc_info[i].qcount = qcount; + vsi->tc_config.tc_info[i].netdev_tc = netdev_tc++; + } else { + /* TC is not enabled so set the offset to + * default queue and allocate one queue + * for the given TC. + */ + vsi->tc_config.tc_info[i].qoffset = 0; + vsi->tc_config.tc_info[i].qcount = 1; + vsi->tc_config.tc_info[i].netdev_tc = 0; + } + } + + /* Set actual Tx/Rx queue pairs */ + vsi->num_queue_pairs = offset + qcount; + + /* Setup queue TC[0].qmap for given VSI context */ + ctxt->info.tc_mapping[0] = cpu_to_le16(qmap); + ctxt->info.mapping_flags |= cpu_to_le16(I40E_AQ_VSI_QUE_MAP_CONTIG); + ctxt->info.queue_mapping[0] = cpu_to_le16(vsi->base_queue); + ctxt->info.valid_sections |= cpu_to_le16(sections); + + /* Reconfigure RSS for main VSI with max queue count */ + vsi->rss_size = max_qcount; + ret = i40e_vsi_config_rss(vsi); + if (ret) { + dev_info(&vsi->back->pdev->dev, + "Failed to reconfig rss for num_queues (%u)\n", + max_qcount); + return ret; + } + vsi->reconfig_rss = true; + dev_dbg(&vsi->back->pdev->dev, + "Reconfigured rss with num_queues (%u)\n", max_qcount); + + /* Find queue count available for channel VSIs and starting offset + * for channel VSIs + */ + override_q = vsi->mqprio_qopt.qopt.count[0]; + if (override_q && override_q < vsi->num_queue_pairs) { + vsi->cnt_q_avail = vsi->num_queue_pairs - override_q; + vsi->next_base_queue = override_q; + } + return 0; +} + /** * i40e_vsi_setup_queue_map - Setup a VSI queue map based on enabled_tc * @vsi: the VSI being setup @@ -1626,7 +1790,7 @@ static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi, numtc = 1; } } else { - /* At least TC0 is enabled in case of non-DCB case */ + /* At least TC0 is enabled in non-DCB, non-MQPRIO case */ numtc = 1; } @@ -3158,6 +3322,7 @@ static void i40e_vsi_config_dcb_rings(struct i40e_vsi *vsi) rx_ring->dcb_tc = 0; tx_ring->dcb_tc = 0; } + return; } for (n = 0; n < I40E_MAX_TRAFFIC_CLASS; n++) { @@ -4873,6 +5038,24 @@ static u8 i40e_dcb_get_enabled_tc(struct i40e_dcbx_config *dcbcfg) return enabled_tc; } +/** + * i40e_mqprio_get_enabled_tc - Get enabled traffic classes + * @pf: PF being queried + * + * Query the current MQPRIO configuration and return the number of + * traffic classes enabled. + **/ +static u8 i40e_mqprio_get_enabled_tc(struct i40e_pf *pf) +{ + struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; + u8 num_tc = vsi->mqprio_qopt.qopt.num_tc; + u8 enabled_tc = 1, i; + + for (i = 1; i < num_tc; i++) + enabled_tc |= BIT(i); + return enabled_tc; +} + /** * i40e_pf_get_num_tc - Get enabled traffic classes for PF * @pf: PF being queried @@ -4886,7 +5069,10 @@ static u8 i40e_pf_get_num_tc(struct i40e_pf *pf) u8 num_tc = 0; struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config; - /* If DCB is not enabled then always in single TC */ + if (pf->flags & I40E_FLAG_TC_MQPRIO) + return pf->vsi[pf->lan_vsi]->mqprio_qopt.qopt.num_tc; + + /* If neither MQPRIO nor DCB is enabled, then always use single TC */ if (!(pf->flags & I40E_FLAG_DCB_ENABLED)) return 1; @@ -4915,7 +5101,12 @@ static u8 i40e_pf_get_num_tc(struct i40e_pf *pf) **/ static u8 i40e_pf_get_tc_map(struct i40e_pf *pf) { - /* If DCB is not enabled for this PF then just return default TC */ + if (pf->flags & I40E_FLAG_TC_MQPRIO) + return i40e_mqprio_get_enabled_tc(pf); + + /* If neither MQPRIO nor DCB is enabled for this PF then just return + * default TC + */ if (!(pf->flags & I40E_FLAG_DCB_ENABLED)) return I40E_DEFAULT_TRAFFIC_CLASS; @@ -5005,6 +5196,9 @@ static int i40e_vsi_configure_bw_alloc(struct i40e_vsi *vsi, u8 enabled_tc, i40e_status ret; int i; + if ((vsi->back->flags & I40E_FLAG_TC_MQPRIO) || + !vsi->mqprio_qopt.qopt.hw) + return 0; bw_data.tc_valid_bits = enabled_tc; for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) bw_data.tc_bw_credits[i] = bw_share[i]; @@ -5067,6 +5261,9 @@ static void i40e_vsi_config_netdev_tc(struct i40e_vsi *vsi, u8 enabled_tc) vsi->tc_config.tc_info[i].qoffset); } + if (pf->flags & I40E_FLAG_TC_MQPRIO) + return; + /* Assign UP2TC map for the VSI */ for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) { /* Get the actual TC# for the UP */ @@ -5117,7 +5314,8 @@ static int i40e_vsi_config_tc(struct i40e_vsi *vsi, u8 enabled_tc) int i; /* Check if enabled_tc is same as existing or new TCs */ - if (vsi->tc_config.enabled_tc == enabled_tc) + if (vsi->tc_config.enabled_tc == enabled_tc && + vsi->mqprio_qopt.mode != TC_MQPRIO_MODE_CHANNEL) return ret; /* Enable ETS TCs with equal BW Share for now across all VSIs */ @@ -5140,15 +5338,37 @@ static int i40e_vsi_config_tc(struct i40e_vsi *vsi, u8 enabled_tc) ctxt.vf_num = 0; ctxt.uplink_seid = vsi->uplink_seid; ctxt.info = vsi->info; - i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, false); + if (vsi->back->flags & I40E_FLAG_TC_MQPRIO) { + ret = i40e_vsi_setup_queue_map_mqprio(vsi, &ctxt, enabled_tc); + if (ret) + goto out; + } else { + i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, false); + } + /* On destroying the qdisc, reset vsi->rss_size, as number of enabled + * queues changed. + */ + if (!vsi->mqprio_qopt.qopt.hw && vsi->reconfig_rss) { + vsi->rss_size = min_t(int, vsi->back->alloc_rss_size, + vsi->num_queue_pairs); + ret = i40e_vsi_config_rss(vsi); + if (ret) { + dev_info(&vsi->back->pdev->dev, + "Failed to reconfig rss for num_queues\n"); + return ret; + } + vsi->reconfig_rss = false; + } if (vsi->back->flags & I40E_FLAG_IWARP_ENABLED) { ctxt.info.valid_sections |= cpu_to_le16(I40E_AQ_VSI_PROP_QUEUE_OPT_VALID); ctxt.info.queueing_opt_flags |= I40E_AQ_VSI_QUE_OPT_TCP_ENA; } - /* Update the VSI after updating the VSI queue-mapping information */ + /* Update the VSI after updating the VSI queue-mapping + * information + */ ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL); if (ret) { dev_info(&vsi->back->pdev->dev, @@ -6216,54 +6436,157 @@ void i40e_down(struct i40e_vsi *vsi) } +/** + * i40e_validate_mqprio_qopt- validate queue mapping info + * @vsi: the VSI being configured + * @mqprio_qopt: queue parametrs + **/ +static int i40e_validate_mqprio_qopt(struct i40e_vsi *vsi, + struct tc_mqprio_qopt_offload *mqprio_qopt) +{ + int i; + + if (mqprio_qopt->qopt.offset[0] != 0 || + mqprio_qopt->qopt.num_tc < 1 || + mqprio_qopt->qopt.num_tc > I40E_MAX_TRAFFIC_CLASS) + return -EINVAL; + for (i = 0; ; i++) { + if (!mqprio_qopt->qopt.count[i]) + return -EINVAL; + if (mqprio_qopt->min_rate[i] || mqprio_qopt->max_rate[i]) + return -EINVAL; + if (i >= mqprio_qopt->qopt.num_tc - 1) + break; + if (mqprio_qopt->qopt.offset[i + 1] != + (mqprio_qopt->qopt.offset[i] + mqprio_qopt->qopt.count[i])) + return -EINVAL; + } + if (vsi->num_queue_pairs < + (mqprio_qopt->qopt.offset[i] + mqprio_qopt->qopt.count[i])) { + return -EINVAL; + } + return 0; +} + +/** + * i40e_vsi_set_default_tc_config - set default values for tc configuration + * @vsi: the VSI being configured + **/ +static void i40e_vsi_set_default_tc_config(struct i40e_vsi *vsi) +{ + u16 qcount; + int i; + + /* Only TC0 is enabled */ + vsi->tc_config.numtc = 1; + vsi->tc_config.enabled_tc = 1; + qcount = min_t(int, vsi->alloc_queue_pairs, + i40e_pf_get_max_q_per_tc(vsi->back)); + for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { + /* For the TC that is not enabled set the offset to to default + * queue and allocate one queue for the given TC. + */ + vsi->tc_config.tc_info[i].qoffset = 0; + if (i == 0) + vsi->tc_config.tc_info[i].qcount = qcount; + else + vsi->tc_config.tc_info[i].qcount = 1; + vsi->tc_config.tc_info[i].netdev_tc = 0; + } +} + /** * i40e_setup_tc - configure multiple traffic classes * @netdev: net device to configure - * @tc: number of traffic classes to enable + * @type_data: tc offload data **/ -static int i40e_setup_tc(struct net_device *netdev, u8 tc) +static int i40e_setup_tc(struct net_device *netdev, void *type_data) { + struct tc_mqprio_qopt_offload *mqprio_qopt = type_data; struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_vsi *vsi = np->vsi; struct i40e_pf *pf = vsi->back; - u8 enabled_tc = 0; + u8 enabled_tc = 0, num_tc, hw; + bool need_reset = false; int ret = -EINVAL; + u16 mode; int i; - /* Check if DCB enabled to continue */ - if (!(pf->flags & I40E_FLAG_DCB_ENABLED)) { - netdev_info(netdev, "DCB is not enabled for adapter\n"); - goto exit; + num_tc = mqprio_qopt->qopt.num_tc; + hw = mqprio_qopt->qopt.hw; + mode = mqprio_qopt->mode; + if (!hw) { + pf->flags &= ~I40E_FLAG_TC_MQPRIO; + memcpy(&vsi->mqprio_qopt, mqprio_qopt, sizeof(*mqprio_qopt)); + goto config_tc; } /* Check if MFP enabled */ if (pf->flags & I40E_FLAG_MFP_ENABLED) { - netdev_info(netdev, "Configuring TC not supported in MFP mode\n"); - goto exit; + netdev_info(netdev, + "Configuring TC not supported in MFP mode\n"); + return ret; } + switch (mode) { + case TC_MQPRIO_MODE_DCB: + pf->flags &= ~I40E_FLAG_TC_MQPRIO; - /* Check whether tc count is within enabled limit */ - if (tc > i40e_pf_get_num_tc(pf)) { - netdev_info(netdev, "TC count greater than enabled on link for adapter\n"); - goto exit; + /* Check if DCB enabled to continue */ + if (!(pf->flags & I40E_FLAG_DCB_ENABLED)) { + netdev_info(netdev, + "DCB is not enabled for adapter\n"); + return ret; + } + + /* Check whether tc count is within enabled limit */ + if (num_tc > i40e_pf_get_num_tc(pf)) { + netdev_info(netdev, + "TC count greater than enabled on link for adapter\n"); + return ret; + } + break; + case TC_MQPRIO_MODE_CHANNEL: + if (pf->flags & I40E_FLAG_DCB_ENABLED) { + netdev_info(netdev, + "Full offload of TC Mqprio options is not supported when DCB is enabled\n"); + return ret; + } + if (!(pf->flags & I40E_FLAG_MSIX_ENABLED)) + return ret; + ret = i40e_validate_mqprio_qopt(vsi, mqprio_qopt); + if (ret) + return ret; + memcpy(&vsi->mqprio_qopt, mqprio_qopt, + sizeof(*mqprio_qopt)); + pf->flags |= I40E_FLAG_TC_MQPRIO; + pf->flags &= ~I40E_FLAG_DCB_ENABLED; + break; + default: + return -EINVAL; } +config_tc: /* Generate TC map for number of tc requested */ - for (i = 0; i < tc; i++) + for (i = 0; i < num_tc; i++) enabled_tc |= BIT(i); /* Requesting same TC configuration as already enabled */ - if (enabled_tc == vsi->tc_config.enabled_tc) + if (enabled_tc == vsi->tc_config.enabled_tc && + mode != TC_MQPRIO_MODE_CHANNEL) return 0; /* Quiesce VSI queues */ i40e_quiesce_vsi(vsi); + if (!hw && !(pf->flags & I40E_FLAG_TC_MQPRIO)) + i40e_remove_queue_channels(vsi); + /* Configure VSI for enabled TCs */ ret = i40e_vsi_config_tc(vsi, enabled_tc); if (ret) { netdev_info(netdev, "Failed configuring TC for VSI seid=%d\n", vsi->seid); + need_reset = true; goto exit; } @@ -6272,11 +6595,18 @@ static int i40e_setup_tc(struct net_device *netdev, u8 tc) if (ret) { netdev_info(netdev, "Failed configuring queue channels\n"); + need_reset = true; goto exit; } } exit: + /* Reset the configuration data to defaults, only TC0 is enabled */ + if (need_reset) { + i40e_vsi_set_default_tc_config(vsi); + need_reset = false; + } + /* Unquiesce VSI */ i40e_unquiesce_vsi(vsi); return ret; @@ -6285,14 +6615,10 @@ exit: static int __i40e_setup_tc(struct net_device *netdev, enum tc_setup_type type, void *type_data) { - struct tc_mqprio_qopt *mqprio = type_data; - if (type != TC_SETUP_MQPRIO) return -EOPNOTSUPP; - mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS; - - return i40e_setup_tc(netdev, mqprio->num_tc); + return i40e_setup_tc(netdev, type_data); } /** @@ -9153,45 +9479,6 @@ static int i40e_setup_misc_vector(struct i40e_pf *pf) return err; } -/** - * i40e_config_rss_aq - Prepare for RSS using AQ commands - * @vsi: vsi structure - * @seed: RSS hash seed - **/ -static int i40e_config_rss_aq(struct i40e_vsi *vsi, const u8 *seed, - u8 *lut, u16 lut_size) -{ - struct i40e_pf *pf = vsi->back; - struct i40e_hw *hw = &pf->hw; - int ret = 0; - - if (seed) { - struct i40e_aqc_get_set_rss_key_data *seed_dw = - (struct i40e_aqc_get_set_rss_key_data *)seed; - ret = i40e_aq_set_rss_key(hw, vsi->id, seed_dw); - if (ret) { - dev_info(&pf->pdev->dev, - "Cannot set RSS key, err %s aq_err %s\n", - i40e_stat_str(hw, ret), - i40e_aq_str(hw, hw->aq.asq_last_status)); - return ret; - } - } - if (lut) { - bool pf_lut = vsi->type == I40E_VSI_MAIN ? true : false; - - ret = i40e_aq_set_rss_lut(hw, vsi->id, pf_lut, lut, lut_size); - if (ret) { - dev_info(&pf->pdev->dev, - "Cannot set RSS lut, err %s aq_err %s\n", - i40e_stat_str(hw, ret), - i40e_aq_str(hw, hw->aq.asq_last_status)); - return ret; - } - } - return ret; -} - /** * i40e_get_rss_aq - Get RSS keys and lut by using AQ commands * @vsi: Pointer to vsi structure @@ -9238,46 +9525,6 @@ static int i40e_get_rss_aq(struct i40e_vsi *vsi, const u8 *seed, return ret; } -/** - * i40e_vsi_config_rss - Prepare for VSI(VMDq) RSS if used - * @vsi: VSI structure - **/ -static int i40e_vsi_config_rss(struct i40e_vsi *vsi) -{ - u8 seed[I40E_HKEY_ARRAY_SIZE]; - struct i40e_pf *pf = vsi->back; - u8 *lut; - int ret; - - if (!(pf->hw_features & I40E_HW_RSS_AQ_CAPABLE)) - return 0; - - if (!vsi->rss_size) - vsi->rss_size = min_t(int, pf->alloc_rss_size, - vsi->num_queue_pairs); - if (!vsi->rss_size) - return -EINVAL; - - lut = kzalloc(vsi->rss_table_size, GFP_KERNEL); - if (!lut) - return -ENOMEM; - /* Use the user configured hash keys and lookup table if there is one, - * otherwise use default - */ - if (vsi->rss_lut_user) - memcpy(lut, vsi->rss_lut_user, vsi->rss_table_size); - else - i40e_fill_rss_lut(pf, lut, vsi->rss_table_size, vsi->rss_size); - if (vsi->rss_hkey_user) - memcpy(seed, vsi->rss_hkey_user, I40E_HKEY_ARRAY_SIZE); - else - netdev_rss_key_fill((void *)seed, I40E_HKEY_ARRAY_SIZE); - ret = i40e_config_rss_aq(vsi, seed, lut, vsi->rss_table_size); - kfree(lut); - - return ret; -} - /** * i40e_config_rss_reg - Configure RSS keys and lut by writing registers * @vsi: Pointer to vsi structure -- cgit v1.2.3 From 5ecae4120a6b50fb8a31d2f335eab390bcf5ad66 Mon Sep 17 00:00:00 2001 From: Amritha Nambiar Date: Thu, 7 Sep 2017 04:00:27 -0700 Subject: i40e: Refactor VF BW rate limiting This patch refactors the BW rate limiting for Tx traffic on the VF to be reused in the next patch for rate limiting Tx traffic for the VSIs on the PF as well. Signed-off-by: Amritha Nambiar Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e.h | 5 ++ drivers/net/ethernet/intel/i40e/i40e_main.c | 64 ++++++++++++++++++++++ drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c | 45 +-------------- 3 files changed, 71 insertions(+), 43 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h index 024c88474951..524aa06a9e0e 100644 --- a/drivers/net/ethernet/intel/i40e/i40e.h +++ b/drivers/net/ethernet/intel/i40e/i40e.h @@ -128,6 +128,10 @@ /* default to trying for four seconds */ #define I40E_TRY_LINK_TIMEOUT (4 * HZ) +/* BW rate limiting */ +#define I40E_BW_CREDIT_DIVISOR 50 /* 50Mbps per BW credit */ +#define I40E_MAX_BW_INACTIVE_ACCUM 4 /* accumulate 4 credits max */ + /* driver state flags */ enum i40e_state_t { __I40E_TESTING, @@ -1039,4 +1043,5 @@ static inline bool i40e_enabled_xdp_vsi(struct i40e_vsi *vsi) } int i40e_create_queue_channel(struct i40e_vsi *vsi, struct i40e_channel *ch); +int i40e_set_bw_limit(struct i40e_vsi *vsi, u16 seid, u64 max_tx_rate); #endif /* _I40E_H_ */ diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index e803aa1552c6..fc6eaf44d87c 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -5399,6 +5399,70 @@ out: return ret; } +/** + * i40e_get_link_speed - Returns link speed for the interface + * @vsi: VSI to be configured + * + **/ +int i40e_get_link_speed(struct i40e_vsi *vsi) +{ + struct i40e_pf *pf = vsi->back; + + switch (pf->hw.phy.link_info.link_speed) { + case I40E_LINK_SPEED_40GB: + return 40000; + case I40E_LINK_SPEED_25GB: + return 25000; + case I40E_LINK_SPEED_20GB: + return 20000; + case I40E_LINK_SPEED_10GB: + return 10000; + case I40E_LINK_SPEED_1GB: + return 1000; + default: + return -EINVAL; + } +} + +/** + * i40e_set_bw_limit - setup BW limit for Tx traffic based on max_tx_rate + * @vsi: VSI to be configured + * @seid: seid of the channel/VSI + * @max_tx_rate: max TX rate to be configured as BW limit + * + * Helper function to set BW limit for a given VSI + **/ +int i40e_set_bw_limit(struct i40e_vsi *vsi, u16 seid, u64 max_tx_rate) +{ + struct i40e_pf *pf = vsi->back; + int speed = 0; + int ret = 0; + + speed = i40e_get_link_speed(vsi); + if (max_tx_rate > speed) { + dev_err(&pf->pdev->dev, + "Invalid max tx rate %llu specified for VSI seid %d.", + max_tx_rate, seid); + return -EINVAL; + } + if (max_tx_rate && max_tx_rate < 50) { + dev_warn(&pf->pdev->dev, + "Setting max tx rate to minimum usable value of 50Mbps.\n"); + max_tx_rate = 50; + } + + /* Tx rate credits are in values of 50Mbps, 0 is disabled */ + ret = i40e_aq_config_vsi_bw_limit(&pf->hw, seid, + max_tx_rate / I40E_BW_CREDIT_DIVISOR, + I40E_MAX_BW_INACTIVE_ACCUM, NULL); + if (ret) + dev_err(&pf->pdev->dev, + "Failed set tx rate (%llu Mbps) for vsi->seid %u, err %s aq_err %s\n", + max_tx_rate, seid, i40e_stat_str(&pf->hw, ret), + i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); + return ret; +} + /** * i40e_remove_queue_channels - Remove queue channels for the TCs * @vsi: VSI to be configured diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c index e7f98e306554..ce0981e2f605 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c @@ -3117,8 +3117,6 @@ error_pvid: return ret; } -#define I40E_BW_CREDIT_DIVISOR 50 /* 50Mbps per BW credit */ -#define I40E_MAX_BW_INACTIVE_ACCUM 4 /* device can accumulate 4 credits max */ /** * i40e_ndo_set_vf_bw * @netdev: network interface device structure @@ -3134,7 +3132,6 @@ int i40e_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate, struct i40e_pf *pf = np->vsi->back; struct i40e_vsi *vsi; struct i40e_vf *vf; - int speed = 0; int ret = 0; /* validate the request */ @@ -3159,48 +3156,10 @@ int i40e_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate, goto error; } - switch (pf->hw.phy.link_info.link_speed) { - case I40E_LINK_SPEED_40GB: - speed = 40000; - break; - case I40E_LINK_SPEED_25GB: - speed = 25000; - break; - case I40E_LINK_SPEED_20GB: - speed = 20000; - break; - case I40E_LINK_SPEED_10GB: - speed = 10000; - break; - case I40E_LINK_SPEED_1GB: - speed = 1000; - break; - default: - break; - } - - if (max_tx_rate > speed) { - dev_err(&pf->pdev->dev, "Invalid max tx rate %d specified for VF %d.\n", - max_tx_rate, vf->vf_id); - ret = -EINVAL; + ret = i40e_set_bw_limit(vsi, vsi->seid, max_tx_rate); + if (ret) goto error; - } - if ((max_tx_rate < 50) && (max_tx_rate > 0)) { - dev_warn(&pf->pdev->dev, "Setting max Tx rate to minimum usable value of 50Mbps.\n"); - max_tx_rate = 50; - } - - /* Tx rate credits are in values of 50Mbps, 0 is disabled*/ - ret = i40e_aq_config_vsi_bw_limit(&pf->hw, vsi->seid, - max_tx_rate / I40E_BW_CREDIT_DIVISOR, - I40E_MAX_BW_INACTIVE_ACCUM, NULL); - if (ret) { - dev_err(&pf->pdev->dev, "Unable to set max tx rate, error code %d.\n", - ret); - ret = -EIO; - goto error; - } vf->tx_rate = max_tx_rate; error: return ret; -- cgit v1.2.3 From 2027d4deacb129579f022746830ea05b72fe114a Mon Sep 17 00:00:00 2001 From: Amritha Nambiar Date: Thu, 7 Sep 2017 04:00:32 -0700 Subject: i40e: Add support setting TC max bandwidth rates This patch enables setting up maximum Tx rates for the traffic classes in i40e. The maximum rate is offloaded to the hardware through the mqprio framework by specifying the mode option as 'channel' and shaper option as 'bw_rlimit' and is configured for the VSI. Configuring minimum Tx rate limit is not supported in the device. The minimum usable value for Tx rate is 50Mbps. Example: # tc qdisc add dev eth0 root mqprio num_tc 2 map 0 0 0 0 1 1 1 1\ queues 4@0 4@4 hw 1 mode channel shaper bw_rlimit\ max_rate 4Gbit 5Gbit To dump the bandwidth rates: # tc qdisc show dev eth0 qdisc mqprio 804a: root tc 2 map 0 0 0 0 1 1 1 1 0 0 0 0 0 0 0 0 queues:(0:3) (4:7) mode:channel shaper:bw_rlimit max_rate:4Gbit 5Gbit Signed-off-by: Amritha Nambiar Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e.h | 2 + drivers/net/ethernet/intel/i40e/i40e_main.c | 100 +++++++++++++++++++++++++--- 2 files changed, 93 insertions(+), 9 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h index 524aa06a9e0e..266e1dc5e786 100644 --- a/drivers/net/ethernet/intel/i40e/i40e.h +++ b/drivers/net/ethernet/intel/i40e/i40e.h @@ -359,6 +359,8 @@ struct i40e_channel { u8 enabled_tc; struct i40e_aqc_vsi_properties_data info; + u64 max_tx_rate; + /* track this channel belongs to which VSI */ struct i40e_vsi *parent_vsi; }; diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index fc6eaf44d87c..bb31d53c4923 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -5196,9 +5196,16 @@ static int i40e_vsi_configure_bw_alloc(struct i40e_vsi *vsi, u8 enabled_tc, i40e_status ret; int i; - if ((vsi->back->flags & I40E_FLAG_TC_MQPRIO) || - !vsi->mqprio_qopt.qopt.hw) + if (vsi->back->flags & I40E_FLAG_TC_MQPRIO) return 0; + if (!vsi->mqprio_qopt.qopt.hw) { + ret = i40e_set_bw_limit(vsi, vsi->seid, 0); + if (ret) + dev_info(&vsi->back->pdev->dev, + "Failed to reset tx rate for vsi->seid %u\n", + vsi->seid); + return ret; + } bw_data.tc_valid_bits = enabled_tc; for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) bw_data.tc_bw_credits[i] = bw_share[i]; @@ -5505,6 +5512,13 @@ static void i40e_remove_queue_channels(struct i40e_vsi *vsi) rx_ring->ch = NULL; } + /* Reset BW configured for this VSI via mqprio */ + ret = i40e_set_bw_limit(vsi, ch->seid, 0); + if (ret) + dev_info(&vsi->back->pdev->dev, + "Failed to reset tx rate for ch->seid %u\n", + ch->seid); + /* delete VSI from FW */ ret = i40e_aq_delete_element(&vsi->back->hw, ch->seid, NULL); @@ -6047,6 +6061,17 @@ int i40e_create_queue_channel(struct i40e_vsi *vsi, "Setup channel (id:%u) utilizing num_queues %d\n", ch->seid, ch->num_queue_pairs); + /* configure VSI for BW limit */ + if (ch->max_tx_rate) { + if (i40e_set_bw_limit(vsi, ch->seid, ch->max_tx_rate)) + return -EINVAL; + + dev_dbg(&pf->pdev->dev, + "Set tx rate of %llu Mbps (count of 50Mbps %llu) for vsi->seid %u\n", + ch->max_tx_rate, + ch->max_tx_rate / I40E_BW_CREDIT_DIVISOR, ch->seid); + } + /* in case of VF, this will be main SRIOV VSI */ ch->parent_vsi = vsi; @@ -6082,6 +6107,12 @@ static int i40e_configure_queue_channels(struct i40e_vsi *vsi) ch->base_queue = vsi->tc_config.tc_info[i].qoffset; + /* Bandwidth limit through tc interface is in bytes/s, + * change to Mbit/s + */ + ch->max_tx_rate = + vsi->mqprio_qopt.max_rate[i] / (1000000 / 8); + list_add_tail(&ch->list, &vsi->ch_list); ret = i40e_create_queue_channel(vsi, ch); @@ -6508,6 +6539,7 @@ void i40e_down(struct i40e_vsi *vsi) static int i40e_validate_mqprio_qopt(struct i40e_vsi *vsi, struct tc_mqprio_qopt_offload *mqprio_qopt) { + u64 sum_max_rate = 0; int i; if (mqprio_qopt->qopt.offset[0] != 0 || @@ -6517,8 +6549,13 @@ static int i40e_validate_mqprio_qopt(struct i40e_vsi *vsi, for (i = 0; ; i++) { if (!mqprio_qopt->qopt.count[i]) return -EINVAL; - if (mqprio_qopt->min_rate[i] || mqprio_qopt->max_rate[i]) + if (mqprio_qopt->min_rate[i]) { + dev_err(&vsi->back->pdev->dev, + "Invalid min tx rate (greater than 0) specified\n"); return -EINVAL; + } + sum_max_rate += (mqprio_qopt->max_rate[i] / (1000000 / 8)); + if (i >= mqprio_qopt->qopt.num_tc - 1) break; if (mqprio_qopt->qopt.offset[i + 1] != @@ -6529,6 +6566,11 @@ static int i40e_validate_mqprio_qopt(struct i40e_vsi *vsi, (mqprio_qopt->qopt.offset[i] + mqprio_qopt->qopt.count[i])) { return -EINVAL; } + if (sum_max_rate > i40e_get_link_speed(vsi)) { + dev_err(&vsi->back->pdev->dev, + "Invalid max tx rate specified\n"); + return -EINVAL; + } return 0; } @@ -6655,6 +6697,21 @@ config_tc: } if (pf->flags & I40E_FLAG_TC_MQPRIO) { + if (vsi->mqprio_qopt.max_rate[0]) { + u64 max_tx_rate = vsi->mqprio_qopt.max_rate[0] / + (1000000 / 8); + ret = i40e_set_bw_limit(vsi, vsi->seid, max_tx_rate); + if (!ret) { + dev_dbg(&vsi->back->pdev->dev, + "Set tx rate of %llu Mbps (count of 50Mbps %llu) for vsi->seid %u\n", + max_tx_rate, + max_tx_rate / I40E_BW_CREDIT_DIVISOR, + vsi->seid); + } else { + need_reset = true; + goto exit; + } + } ret = i40e_configure_queue_channels(vsi); if (ret) { netdev_info(netdev, @@ -8108,6 +8165,17 @@ static int i40e_rebuild_channels(struct i40e_vsi *vsi) vsi->uplink_seid); return ret; } + if (ch->max_tx_rate) { + if (i40e_set_bw_limit(vsi, ch->seid, + ch->max_tx_rate)) + return -EINVAL; + + dev_dbg(&vsi->back->pdev->dev, + "Set tx rate of %llu Mbps (count of 50Mbps %llu) for vsi->seid %u\n", + ch->max_tx_rate, + ch->max_tx_rate / I40E_BW_CREDIT_DIVISOR, + ch->seid); + } } return 0; } @@ -8248,6 +8316,7 @@ static int i40e_reset(struct i40e_pf *pf) **/ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired) { + struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; struct i40e_hw *hw = &pf->hw; u8 set_fc_aq_fail = 0; i40e_status ret; @@ -8330,7 +8399,7 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired) * If there were VEBs but the reconstitution failed, we'll try * try to recover minimal use by getting the basic PF VSI working. */ - if (pf->vsi[pf->lan_vsi]->uplink_seid != pf->mac_seid) { + if (vsi->uplink_seid != pf->mac_seid) { dev_dbg(&pf->pdev->dev, "attempting to rebuild switch\n"); /* find the one VEB connected to the MAC, and find orphans */ for (v = 0; v < I40E_MAX_VEB; v++) { @@ -8354,8 +8423,7 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired) dev_info(&pf->pdev->dev, "rebuild of switch failed: %d, will try to set up simple PF connection\n", ret); - pf->vsi[pf->lan_vsi]->uplink_seid - = pf->mac_seid; + vsi->uplink_seid = pf->mac_seid; break; } else if (pf->veb[v]->uplink_seid == 0) { dev_info(&pf->pdev->dev, @@ -8366,10 +8434,10 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired) } } - if (pf->vsi[pf->lan_vsi]->uplink_seid == pf->mac_seid) { + if (vsi->uplink_seid == pf->mac_seid) { dev_dbg(&pf->pdev->dev, "attempting to rebuild PF VSI\n"); /* no VEB, so rebuild only the Main VSI */ - ret = i40e_add_vsi(pf->vsi[pf->lan_vsi]); + ret = i40e_add_vsi(vsi); if (ret) { dev_info(&pf->pdev->dev, "rebuild of Main VSI failed: %d\n", ret); @@ -8377,10 +8445,24 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired) } } + if (vsi->mqprio_qopt.max_rate[0]) { + u64 max_tx_rate = vsi->mqprio_qopt.max_rate[0] / (1000000 / 8); + + ret = i40e_set_bw_limit(vsi, vsi->seid, max_tx_rate); + if (!ret) + dev_dbg(&vsi->back->pdev->dev, + "Set tx rate of %llu Mbps (count of 50Mbps %llu) for vsi->seid %u\n", + max_tx_rate, + max_tx_rate / I40E_BW_CREDIT_DIVISOR, + vsi->seid); + else + goto end_unlock; + } + /* PF Main VSI is rebuild by now, go ahead and rebuild channel VSIs * for this main VSI if they exist */ - ret = i40e_rebuild_channels(pf->vsi[pf->lan_vsi]); + ret = i40e_rebuild_channels(vsi); if (ret) goto end_unlock; -- cgit v1.2.3 From b06da8f939ff8dcf28eb9b86aae933850658742e Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Fri, 22 Sep 2017 15:11:38 +0100 Subject: i40e: make const array patterns static, reduces object code size Don't populate const array patterns on the stack, instead make it static. Makes the object code smaller by over 60 bytes: Before: text data bss dec hex filename 1953 496 0 2449 991 i40e_diag.o After: text data bss dec hex filename 1798 584 0 2382 94e i40e_diag.o (gcc 6.3.0, x86-64) Signed-off-by: Colin Ian King Acked-by: Jesse Brandeburg Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_diag.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/intel/i40e/i40e_diag.c b/drivers/net/ethernet/intel/i40e/i40e_diag.c index f141e78d409e..76ed56641864 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_diag.c +++ b/drivers/net/ethernet/intel/i40e/i40e_diag.c @@ -36,7 +36,9 @@ static i40e_status i40e_diag_reg_pattern_test(struct i40e_hw *hw, u32 reg, u32 mask) { - const u32 patterns[] = {0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF}; + static const u32 patterns[] = { + 0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF + }; u32 pat, val, orig_val; int i; -- cgit v1.2.3 From 8fdb69dd383f1f937f7e2f1f24efe97c5268a84c Mon Sep 17 00:00:00 2001 From: Alan Brady Date: Wed, 11 Oct 2017 14:49:42 -0700 Subject: i40e: fix link reporting When querying the NVM for supported phy_types, on some firmware versions, we were failing to actually fill out the phy_types which means ethtool wouldn't report any link types. Testing-hints: Check 'ethtool ' if you have the right (wrong?) firmware. Without this patch, no link modes will be reported. Signed-off-by: Alan Brady Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_common.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c index 53aad378d49c..aeb497258f20 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_common.c +++ b/drivers/net/ethernet/intel/i40e/i40e_common.c @@ -1611,8 +1611,13 @@ i40e_status i40e_aq_get_phy_capabilities(struct i40e_hw *hw, if (report_init) { if (hw->mac.type == I40E_MAC_XL710 && hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR && - hw->aq.api_min_ver >= I40E_MINOR_VER_GET_LINK_INFO_XL710) + hw->aq.api_min_ver >= I40E_MINOR_VER_GET_LINK_INFO_XL710) { status = i40e_aq_get_link_info(hw, true, NULL, NULL); + } else { + hw->phy.phy_types = le32_to_cpu(abilities->phy_type); + hw->phy.phy_types |= + ((u64)abilities->phy_type_ext << 32); + } } return status; -- cgit v1.2.3 From 17a9422de78c3a59b490b400f555635c477f1476 Mon Sep 17 00:00:00 2001 From: Alan Brady Date: Wed, 11 Oct 2017 14:49:43 -0700 Subject: i40e/i40evf: don't trust VF to reset itself When using 'ethtool -L' on a VF to change number of requested queues from PF, we shouldn't trust the VF to reset itself after making the request. Doing it that way opens the door for a potentially malicious VF to do nasty things to the PF which should never be the case. This makes it such that after VF makes a successful request, PF will then reset the VF to institute required changes. Only if the request fails will PF send a message back to VF letting it know the request was unsuccessful. Testing-hints: There should be no real functional changes. This is simply hardening against a potentially malicious VF. Signed-off-by: Alan Brady Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c | 9 +++++++-- drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c | 7 +++---- include/linux/avf/virtchnl.h | 4 ++-- 3 files changed, 12 insertions(+), 8 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c index ce0981e2f605..f8a794b72462 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c @@ -2045,8 +2045,9 @@ error_param: * @msglen: msg length * * VFs get a default number of queues but can use this message to request a - * different number. Will respond with either the number requested or the - * maximum we can support. + * different number. If the request is successful, PF will reset the VF and + * return 0. If unsuccessful, PF will send message informing VF of number of + * available queues and return result of sending VF a message. **/ static int i40e_vc_request_queues_msg(struct i40e_vf *vf, u8 *msg, int msglen) { @@ -2077,7 +2078,11 @@ static int i40e_vc_request_queues_msg(struct i40e_vf *vf, u8 *msg, int msglen) pf->queues_left); vfres->num_queue_pairs = pf->queues_left + cur_pairs; } else { + /* successful request */ vf->num_req_queues = req_pairs; + i40e_vc_notify_vf_reset(vf); + i40e_reset_vf(vf, false); + return 0; } return i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_REQUEST_QUEUES, 0, diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c b/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c index 2bb81c39d85f..46c8b8a3907c 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c +++ b/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c @@ -407,6 +407,7 @@ int i40evf_request_queues(struct i40evf_adapter *adapter, int num) vfres.num_queue_pairs = num; adapter->current_op = VIRTCHNL_OP_REQUEST_QUEUES; + adapter->flags |= I40EVF_FLAG_REINIT_ITR_NEEDED; return i40evf_send_pf_msg(adapter, VIRTCHNL_OP_REQUEST_QUEUES, (u8 *)&vfres, sizeof(vfres)); } @@ -1098,15 +1099,13 @@ void i40evf_virtchnl_completion(struct i40evf_adapter *adapter, case VIRTCHNL_OP_REQUEST_QUEUES: { struct virtchnl_vf_res_request *vfres = (struct virtchnl_vf_res_request *)msg; - if (vfres->num_queue_pairs == adapter->num_req_queues) { - adapter->flags |= I40EVF_FLAG_REINIT_ITR_NEEDED; - i40evf_schedule_reset(adapter); - } else { + if (vfres->num_queue_pairs != adapter->num_req_queues) { dev_info(&adapter->pdev->dev, "Requested %d queues, PF can support %d\n", adapter->num_req_queues, vfres->num_queue_pairs); adapter->num_req_queues = 0; + adapter->flags &= ~I40EVF_FLAG_REINIT_ITR_NEEDED; } } break; diff --git a/include/linux/avf/virtchnl.h b/include/linux/avf/virtchnl.h index 60e5d90cb18a..3ce61342fa31 100644 --- a/include/linux/avf/virtchnl.h +++ b/include/linux/avf/virtchnl.h @@ -333,8 +333,8 @@ struct virtchnl_vsi_queue_config_info { * additional queues must be negotiated. This is a best effort request as it * is possible the PF does not have enough queues left to support the request. * If the PF cannot support the number requested it will respond with the - * maximum number it is able to support; otherwise it will respond with the - * number requested. + * maximum number it is able to support. If the request is successful, PF will + * then reset the VF to institute required changes. */ /* VF resource request */ -- cgit v1.2.3 From 52a76235d0c4dd259cd0df503afed4757c04ba1d Mon Sep 17 00:00:00 2001 From: Jose Abreu Date: Fri, 13 Oct 2017 10:58:36 +0100 Subject: net: stmmac: Use correct values in TQS/RQS fields Currently we are using all the available fifo size in RQS and TQS fields. This will not work correctly in multi-queues IP's because total fifo size must be splitted to the enabled queues. Correct this by computing the available fifo size per queue and setting the right value in TQS and RQS fields. Signed-off-by: Jose Abreu Cc: David S. Miller Cc: Joao Pinto Cc: Giuseppe Cavallaro Cc: Alexandre Torgue Signed-off-by: David S. Miller --- drivers/net/ethernet/stmicro/stmmac/common.h | 3 ++- drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c | 15 +++++++++------ drivers/net/ethernet/stmicro/stmmac/stmmac_main.c | 22 ++++++++++++++++++++-- 3 files changed, 31 insertions(+), 9 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h index e82b4b70b7be..c26c8a7f957f 100644 --- a/drivers/net/ethernet/stmicro/stmmac/common.h +++ b/drivers/net/ethernet/stmicro/stmmac/common.h @@ -443,7 +443,8 @@ struct stmmac_dma_ops { int rxfifosz); void (*dma_rx_mode)(void __iomem *ioaddr, int mode, u32 channel, int fifosz); - void (*dma_tx_mode)(void __iomem *ioaddr, int mode, u32 channel); + void (*dma_tx_mode)(void __iomem *ioaddr, int mode, u32 channel, + int fifosz); /* To track extra statistic (if supported) */ void (*dma_diagnostic_fr) (void *data, struct stmmac_extra_stats *x, void __iomem *ioaddr); diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c index e84831e1b63b..898849bbc7d4 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c @@ -271,9 +271,10 @@ static void dwmac4_dma_rx_chan_op_mode(void __iomem *ioaddr, int mode, } static void dwmac4_dma_tx_chan_op_mode(void __iomem *ioaddr, int mode, - u32 channel) + u32 channel, int fifosz) { u32 mtl_tx_op = readl(ioaddr + MTL_CHAN_TX_OP_MODE(channel)); + unsigned int tqs = fifosz / 256 - 1; if (mode == SF_DMA_MODE) { pr_debug("GMAC: enable TX store and forward mode\n"); @@ -306,12 +307,14 @@ static void dwmac4_dma_tx_chan_op_mode(void __iomem *ioaddr, int mode, * For an IP with DWC_EQOS_NUM_TXQ > 1, the fields TXQEN and TQS are R/W * with reset values: TXQEN off, TQS 256 bytes. * - * Write the bits in both cases, since it will have no effect when RO. - * For DWC_EQOS_NUM_TXQ > 1, the top bits in MTL_OP_MODE_TQS_MASK might - * be RO, however, writing the whole TQS field will result in a value - * equal to DWC_EQOS_TXFIFO_SIZE, just like for DWC_EQOS_NUM_TXQ == 1. + * TXQEN must be written for multi-channel operation and TQS must + * reflect the available fifo size per queue (total fifo size / number + * of enabled queues). */ - mtl_tx_op |= MTL_OP_MODE_TXQEN | MTL_OP_MODE_TQS_MASK; + mtl_tx_op |= MTL_OP_MODE_TXQEN; + mtl_tx_op &= ~MTL_OP_MODE_TQS_MASK; + mtl_tx_op |= tqs << MTL_OP_MODE_TQS_SHIFT; + writel(mtl_tx_op, ioaddr + MTL_CHAN_TX_OP_MODE(channel)); } diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index f41661a04f23..edf245b8bce3 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -1750,12 +1750,19 @@ static void stmmac_dma_operation_mode(struct stmmac_priv *priv) u32 rx_channels_count = priv->plat->rx_queues_to_use; u32 tx_channels_count = priv->plat->tx_queues_to_use; int rxfifosz = priv->plat->rx_fifo_size; + int txfifosz = priv->plat->tx_fifo_size; u32 txmode = 0; u32 rxmode = 0; u32 chan = 0; if (rxfifosz == 0) rxfifosz = priv->dma_cap.rx_fifo_size; + if (txfifosz == 0) + txfifosz = priv->dma_cap.tx_fifo_size; + + /* Adjust for real per queue fifo size */ + rxfifosz /= rx_channels_count; + txfifosz /= tx_channels_count; if (priv->plat->force_thresh_dma_mode) { txmode = tc; @@ -1783,7 +1790,8 @@ static void stmmac_dma_operation_mode(struct stmmac_priv *priv) rxfifosz); for (chan = 0; chan < tx_channels_count; chan++) - priv->hw->dma->dma_tx_mode(priv->ioaddr, txmode, chan); + priv->hw->dma->dma_tx_mode(priv->ioaddr, txmode, chan, + txfifosz); } else { priv->hw->dma->dma_mode(priv->ioaddr, txmode, rxmode, rxfifosz); @@ -1946,15 +1954,25 @@ static void stmmac_tx_err(struct stmmac_priv *priv, u32 chan) static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode, u32 rxmode, u32 chan) { + u32 rx_channels_count = priv->plat->rx_queues_to_use; + u32 tx_channels_count = priv->plat->tx_queues_to_use; int rxfifosz = priv->plat->rx_fifo_size; + int txfifosz = priv->plat->tx_fifo_size; if (rxfifosz == 0) rxfifosz = priv->dma_cap.rx_fifo_size; + if (txfifosz == 0) + txfifosz = priv->dma_cap.tx_fifo_size; + + /* Adjust for real per queue fifo size */ + rxfifosz /= rx_channels_count; + txfifosz /= tx_channels_count; if (priv->synopsys_id >= DWMAC_CORE_4_00) { priv->hw->dma->dma_rx_mode(priv->ioaddr, rxmode, chan, rxfifosz); - priv->hw->dma->dma_tx_mode(priv->ioaddr, txmode, chan); + priv->hw->dma->dma_tx_mode(priv->ioaddr, txmode, chan, + txfifosz); } else { priv->hw->dma->dma_mode(priv->ioaddr, txmode, rxmode, rxfifosz); -- cgit v1.2.3 From a0daae13776994cf90e9a7bc81cd8e4ad3959093 Mon Sep 17 00:00:00 2001 From: Jose Abreu Date: Fri, 13 Oct 2017 10:58:37 +0100 Subject: net: stmmac: Disable flow ctrl for RX AVB queues and really enable TX AVB queues Flow control must be disabled for AVB enabled queues and TX AVB queues must be enabled by setting BIT(2) of TXQEN. Correct this by passing the queue mode to DMA callbacks and by checking in these functions wether we are in AVB performing the necessary adjustments. Signed-off-by: Jose Abreu Cc: David S. Miller Cc: Joao Pinto Cc: Giuseppe Cavallaro Cc: Alexandre Torgue Signed-off-by: David S. Miller --- drivers/net/ethernet/stmicro/stmmac/common.h | 4 ++-- drivers/net/ethernet/stmicro/stmmac/dwmac4.h | 2 ++ drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c | 16 +++++++++++----- drivers/net/ethernet/stmicro/stmmac/stmmac_main.c | 21 +++++++++++++++------ 4 files changed, 30 insertions(+), 13 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h index c26c8a7f957f..e1e5ac053760 100644 --- a/drivers/net/ethernet/stmicro/stmmac/common.h +++ b/drivers/net/ethernet/stmicro/stmmac/common.h @@ -442,9 +442,9 @@ struct stmmac_dma_ops { void (*dma_mode)(void __iomem *ioaddr, int txmode, int rxmode, int rxfifosz); void (*dma_rx_mode)(void __iomem *ioaddr, int mode, u32 channel, - int fifosz); + int fifosz, u8 qmode); void (*dma_tx_mode)(void __iomem *ioaddr, int mode, u32 channel, - int fifosz); + int fifosz, u8 qmode); /* To track extra statistic (if supported) */ void (*dma_diagnostic_fr) (void *data, struct stmmac_extra_stats *x, void __iomem *ioaddr); diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4.h b/drivers/net/ethernet/stmicro/stmmac/dwmac4.h index d74cedf2a397..aeda3ab2d761 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4.h +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4.h @@ -225,6 +225,8 @@ enum power_event { #define MTL_CHAN_RX_DEBUG(x) (MTL_CHANX_BASE_ADDR(x) + 0x38) #define MTL_OP_MODE_RSF BIT(5) +#define MTL_OP_MODE_TXQEN_MASK GENMASK(3, 2) +#define MTL_OP_MODE_TXQEN_AV BIT(2) #define MTL_OP_MODE_TXQEN BIT(3) #define MTL_OP_MODE_TSF BIT(1) diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c index 898849bbc7d4..c110f6850ffa 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c @@ -191,7 +191,7 @@ static void dwmac4_rx_watchdog(void __iomem *ioaddr, u32 riwt, u32 number_chan) } static void dwmac4_dma_rx_chan_op_mode(void __iomem *ioaddr, int mode, - u32 channel, int fifosz) + u32 channel, int fifosz, u8 qmode) { unsigned int rqs = fifosz / 256 - 1; u32 mtl_rx_op, mtl_rx_int; @@ -218,8 +218,10 @@ static void dwmac4_dma_rx_chan_op_mode(void __iomem *ioaddr, int mode, mtl_rx_op &= ~MTL_OP_MODE_RQS_MASK; mtl_rx_op |= rqs << MTL_OP_MODE_RQS_SHIFT; - /* enable flow control only if each channel gets 4 KiB or more FIFO */ - if (fifosz >= 4096) { + /* Enable flow control only if each channel gets 4 KiB or more FIFO and + * only if channel is not an AVB channel. + */ + if ((fifosz >= 4096) && (qmode != MTL_QUEUE_AVB)) { unsigned int rfd, rfa; mtl_rx_op |= MTL_OP_MODE_EHFC; @@ -271,7 +273,7 @@ static void dwmac4_dma_rx_chan_op_mode(void __iomem *ioaddr, int mode, } static void dwmac4_dma_tx_chan_op_mode(void __iomem *ioaddr, int mode, - u32 channel, int fifosz) + u32 channel, int fifosz, u8 qmode) { u32 mtl_tx_op = readl(ioaddr + MTL_CHAN_TX_OP_MODE(channel)); unsigned int tqs = fifosz / 256 - 1; @@ -311,7 +313,11 @@ static void dwmac4_dma_tx_chan_op_mode(void __iomem *ioaddr, int mode, * reflect the available fifo size per queue (total fifo size / number * of enabled queues). */ - mtl_tx_op |= MTL_OP_MODE_TXQEN; + mtl_tx_op &= ~MTL_OP_MODE_TXQEN_MASK; + if (qmode != MTL_QUEUE_AVB) + mtl_tx_op |= MTL_OP_MODE_TXQEN; + else + mtl_tx_op |= MTL_OP_MODE_TXQEN_AV; mtl_tx_op &= ~MTL_OP_MODE_TQS_MASK; mtl_tx_op |= tqs << MTL_OP_MODE_TQS_SHIFT; diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index edf245b8bce3..0e1b0a3d7b76 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -1754,6 +1754,7 @@ static void stmmac_dma_operation_mode(struct stmmac_priv *priv) u32 txmode = 0; u32 rxmode = 0; u32 chan = 0; + u8 qmode = 0; if (rxfifosz == 0) rxfifosz = priv->dma_cap.rx_fifo_size; @@ -1785,13 +1786,19 @@ static void stmmac_dma_operation_mode(struct stmmac_priv *priv) /* configure all channels */ if (priv->synopsys_id >= DWMAC_CORE_4_00) { - for (chan = 0; chan < rx_channels_count; chan++) + for (chan = 0; chan < rx_channels_count; chan++) { + qmode = priv->plat->rx_queues_cfg[chan].mode_to_use; + priv->hw->dma->dma_rx_mode(priv->ioaddr, rxmode, chan, - rxfifosz); + rxfifosz, qmode); + } + + for (chan = 0; chan < tx_channels_count; chan++) { + qmode = priv->plat->tx_queues_cfg[chan].mode_to_use; - for (chan = 0; chan < tx_channels_count; chan++) priv->hw->dma->dma_tx_mode(priv->ioaddr, txmode, chan, - txfifosz); + txfifosz, qmode); + } } else { priv->hw->dma->dma_mode(priv->ioaddr, txmode, rxmode, rxfifosz); @@ -1954,6 +1961,8 @@ static void stmmac_tx_err(struct stmmac_priv *priv, u32 chan) static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode, u32 rxmode, u32 chan) { + u8 rxqmode = priv->plat->rx_queues_cfg[chan].mode_to_use; + u8 txqmode = priv->plat->tx_queues_cfg[chan].mode_to_use; u32 rx_channels_count = priv->plat->rx_queues_to_use; u32 tx_channels_count = priv->plat->tx_queues_to_use; int rxfifosz = priv->plat->rx_fifo_size; @@ -1970,9 +1979,9 @@ static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode, if (priv->synopsys_id >= DWMAC_CORE_4_00) { priv->hw->dma->dma_rx_mode(priv->ioaddr, rxmode, chan, - rxfifosz); + rxfifosz, rxqmode); priv->hw->dma->dma_tx_mode(priv->ioaddr, txmode, chan, - txfifosz); + txfifosz, txqmode); } else { priv->hw->dma->dma_mode(priv->ioaddr, txmode, rxmode, rxfifosz); -- cgit v1.2.3 From bc8c80a8c978d24b2746dc7d9a8cef65ae82be3c Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Thu, 12 Oct 2017 10:34:08 -0700 Subject: nfp: bpf: reorder arguments to emit_ld_field_any() ld_field instruction has the following format in NFP assembler: ld_field[dst, 1000, src, <<24] reoder parameters to emit_ld_field_any() to make it closer to the familiar assembler order. Signed-off-by: Jakub Kicinski Reviewed-by: Simon Horman Signed-off-by: David S. Miller --- drivers/net/ethernet/netronome/nfp/bpf/jit.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/netronome/nfp/bpf/jit.c b/drivers/net/ethernet/netronome/nfp/bpf/jit.c index 13148f30fc4c..cf8a6eb3ec99 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/jit.c +++ b/drivers/net/ethernet/netronome/nfp/bpf/jit.c @@ -402,8 +402,8 @@ __emit_ld_field(struct nfp_prog *nfp_prog, enum shf_sc sc, } static void -emit_ld_field_any(struct nfp_prog *nfp_prog, enum shf_sc sc, u8 shift, - swreg dst, u8 bmask, swreg src, bool zero) +emit_ld_field_any(struct nfp_prog *nfp_prog, swreg dst, u8 bmask, swreg src, + enum shf_sc sc, u8 shift, bool zero) { struct nfp_insn_re_regs reg; int err; @@ -424,7 +424,7 @@ static void emit_ld_field(struct nfp_prog *nfp_prog, swreg dst, u8 bmask, swreg src, enum shf_sc sc, u8 shift) { - emit_ld_field_any(nfp_prog, sc, shift, dst, bmask, src, false); + emit_ld_field_any(nfp_prog, dst, bmask, src, sc, shift, false); } static void emit_nop(struct nfp_prog *nfp_prog) -- cgit v1.2.3 From 8283737065b2dab480cd10e00e6f8abbcb62b5b0 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Thu, 12 Oct 2017 10:34:09 -0700 Subject: nfp: bpf: add missing return in jne_imm optimization We optimize comparisons to immediate 0 as if (reg.lo | reg.hi). The early return statement was missing, however, which means we would generate two comparisons - optimized one followed by a normal 2x 32 bit compare. Signed-off-by: Jakub Kicinski Reviewed-by: Simon Horman Signed-off-by: David S. Miller --- drivers/net/ethernet/netronome/nfp/bpf/jit.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/netronome/nfp/bpf/jit.c b/drivers/net/ethernet/netronome/nfp/bpf/jit.c index cf8a6eb3ec99..5ac834e91aed 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/jit.c +++ b/drivers/net/ethernet/netronome/nfp/bpf/jit.c @@ -1191,6 +1191,7 @@ static int jne_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) emit_alu(nfp_prog, reg_none(), reg_a(insn->dst_reg * 2), ALU_OP_OR, reg_b(insn->dst_reg * 2 + 1)); emit_br(nfp_prog, BR_BNE, insn->off, 0); + return 0; } tmp_reg = ur_load_imm_any(nfp_prog, imm & ~0U, imm_b(nfp_prog)); -- cgit v1.2.3 From 26fa818dc07c649fcb37674580ebd5a3c7cae66c Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Thu, 12 Oct 2017 10:34:10 -0700 Subject: nfp: bpf: fix compare instructions Now that we have BPF assemebler support in LLVM 6 we can easily test all compare instructions (LLVM 4 didn't generate most of them from C). Fix the compare to immediates and refactor the order of compare to regs to make sure they both follow the same pattern. Signed-off-by: Jakub Kicinski Reviewed-by: Simon Horman Signed-off-by: David S. Miller --- drivers/net/ethernet/netronome/nfp/bpf/jit.c | 21 ++++++++++++--------- 1 file changed, 12 insertions(+), 9 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/netronome/nfp/bpf/jit.c b/drivers/net/ethernet/netronome/nfp/bpf/jit.c index 5ac834e91aed..e970f284c8a4 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/jit.c +++ b/drivers/net/ethernet/netronome/nfp/bpf/jit.c @@ -720,7 +720,10 @@ wrp_cmp_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, enum br_mask br_mask, bool swap) { const struct bpf_insn *insn = &meta->insn; - u8 areg = insn->src_reg * 2, breg = insn->dst_reg * 2; + u8 areg, breg; + + areg = insn->dst_reg * 2; + breg = insn->src_reg * 2; if (insn->off < 0) /* TODO */ return -EOPNOTSUPP; @@ -1129,22 +1132,22 @@ static int jeq_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) static int jgt_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) { - return wrp_cmp_imm(nfp_prog, meta, BR_BLO, false); + return wrp_cmp_imm(nfp_prog, meta, BR_BLO, true); } static int jge_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) { - return wrp_cmp_imm(nfp_prog, meta, BR_BHS, true); + return wrp_cmp_imm(nfp_prog, meta, BR_BHS, false); } static int jlt_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) { - return wrp_cmp_imm(nfp_prog, meta, BR_BHS, false); + return wrp_cmp_imm(nfp_prog, meta, BR_BLO, false); } static int jle_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) { - return wrp_cmp_imm(nfp_prog, meta, BR_BLO, true); + return wrp_cmp_imm(nfp_prog, meta, BR_BHS, true); } static int jset_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) @@ -1227,22 +1230,22 @@ static int jeq_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) static int jgt_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) { - return wrp_cmp_reg(nfp_prog, meta, BR_BLO, false); + return wrp_cmp_reg(nfp_prog, meta, BR_BLO, true); } static int jge_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) { - return wrp_cmp_reg(nfp_prog, meta, BR_BHS, true); + return wrp_cmp_reg(nfp_prog, meta, BR_BHS, false); } static int jlt_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) { - return wrp_cmp_reg(nfp_prog, meta, BR_BHS, false); + return wrp_cmp_reg(nfp_prog, meta, BR_BLO, false); } static int jle_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) { - return wrp_cmp_reg(nfp_prog, meta, BR_BLO, true); + return wrp_cmp_reg(nfp_prog, meta, BR_BHS, true); } static int jset_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) -- cgit v1.2.3 From c000dfb5e29a2abaf303cf90502cb68227f29fae Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Thu, 12 Oct 2017 10:34:11 -0700 Subject: nfp: bpf: add mov helper Register move operation is encoded as alu no op. This means that one has to specify number of unused/none parameters to the emit_alu(). Add a helper. Signed-off-by: Jakub Kicinski Reviewed-by: Simon Horman Signed-off-by: David S. Miller --- drivers/net/ethernet/netronome/nfp/bpf/jit.c | 31 ++++++++++++++-------------- 1 file changed, 15 insertions(+), 16 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/netronome/nfp/bpf/jit.c b/drivers/net/ethernet/netronome/nfp/bpf/jit.c index e970f284c8a4..4f7cfa6adfc1 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/jit.c +++ b/drivers/net/ethernet/netronome/nfp/bpf/jit.c @@ -504,9 +504,14 @@ wrp_br_special(struct nfp_prog *nfp_prog, enum br_mask mask, FIELD_PREP(OP_BR_SPECIAL, special); } +static void wrp_mov(struct nfp_prog *nfp_prog, swreg dst, swreg src) +{ + emit_alu(nfp_prog, dst, reg_none(), ALU_OP_NONE, src); +} + static void wrp_reg_mov(struct nfp_prog *nfp_prog, u16 dst, u16 src) { - emit_alu(nfp_prog, reg_both(dst), reg_none(), ALU_OP_NONE, reg_b(src)); + wrp_mov(nfp_prog, reg_both(dst), reg_b(src)); } static int @@ -556,8 +561,7 @@ construct_data_ind_ld(struct nfp_prog *nfp_prog, u16 offset, reg_xfer(0), SHF_SC_R_SHF, shift * 8); else for (; i * 4 < size; i++) - emit_alu(nfp_prog, reg_both(i), - reg_none(), ALU_OP_NONE, reg_xfer(i)); + wrp_mov(nfp_prog, reg_both(i), reg_xfer(i)); if (i < 2) wrp_immed(nfp_prog, reg_both(1), 0); @@ -1032,8 +1036,8 @@ static int data_ind_ld4(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) static int mem_ldx4_skb(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) { if (meta->insn.off == offsetof(struct sk_buff, len)) - emit_alu(nfp_prog, reg_both(meta->insn.dst_reg * 2), - reg_none(), ALU_OP_NONE, plen_reg(nfp_prog)); + wrp_mov(nfp_prog, + reg_both(meta->insn.dst_reg * 2), plen_reg(nfp_prog)); else return -EOPNOTSUPP; @@ -1048,7 +1052,7 @@ static int mem_ldx4_xdp(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) meta->insn.off != offsetof(struct xdp_md, data_end)) return -EOPNOTSUPP; - emit_alu(nfp_prog, dst, reg_none(), ALU_OP_NONE, pptr_reg(nfp_prog)); + wrp_mov(nfp_prog, dst, pptr_reg(nfp_prog)); if (meta->insn.off == offsetof(struct xdp_md, data)) return 0; @@ -1438,8 +1442,7 @@ static void nfp_outro_tc_legacy(struct nfp_prog *nfp_prog) * ife + tx 0x24 -> redir, count as stat1 */ emit_br_byte_neq(nfp_prog, reg_b(0), 0xff, 0, nfp_prog->tgt_done, 2); - emit_alu(nfp_prog, reg_a(0), - reg_none(), ALU_OP_NONE, NFP_BPF_ABI_FLAGS); + wrp_mov(nfp_prog, reg_a(0), NFP_BPF_ABI_FLAGS); emit_ld_field(nfp_prog, reg_a(0), 0xc, reg_imm(0x11), SHF_SC_L_SHF, 16); emit_br(nfp_prog, BR_UNC, nfp_prog->tgt_done, 1); @@ -1466,8 +1469,7 @@ static void nfp_outro_tc_da(struct nfp_prog *nfp_prog) emit_br_def(nfp_prog, nfp_prog->tgt_done, 2); - emit_alu(nfp_prog, reg_a(0), - reg_none(), ALU_OP_NONE, NFP_BPF_ABI_FLAGS); + wrp_mov(nfp_prog, reg_a(0), NFP_BPF_ABI_FLAGS); emit_ld_field(nfp_prog, reg_a(0), 0xc, reg_imm(0x11), SHF_SC_L_SHF, 16); /* Target for normal exits */ @@ -1476,8 +1478,7 @@ static void nfp_outro_tc_da(struct nfp_prog *nfp_prog) /* if R0 > 7 jump to abort */ emit_alu(nfp_prog, reg_none(), reg_imm(7), ALU_OP_SUB, reg_b(0)); emit_br(nfp_prog, BR_BLO, nfp_prog->tgt_abort, 0); - emit_alu(nfp_prog, reg_a(0), - reg_none(), ALU_OP_NONE, NFP_BPF_ABI_FLAGS); + wrp_mov(nfp_prog, reg_a(0), NFP_BPF_ABI_FLAGS); wrp_immed(nfp_prog, reg_b(2), 0x41221211); wrp_immed(nfp_prog, reg_b(3), 0x41001211); @@ -1514,8 +1515,7 @@ static void nfp_outro_xdp(struct nfp_prog *nfp_prog) emit_br_def(nfp_prog, nfp_prog->tgt_done, 2); - emit_alu(nfp_prog, reg_a(0), - reg_none(), ALU_OP_NONE, NFP_BPF_ABI_FLAGS); + wrp_mov(nfp_prog, reg_a(0), NFP_BPF_ABI_FLAGS); emit_ld_field(nfp_prog, reg_a(0), 0xc, reg_imm(0x82), SHF_SC_L_SHF, 16); /* Target for normal exits */ @@ -1536,8 +1536,7 @@ static void nfp_outro_xdp(struct nfp_prog *nfp_prog) emit_br_def(nfp_prog, nfp_prog->tgt_done, 2); - emit_alu(nfp_prog, reg_a(0), - reg_none(), ALU_OP_NONE, NFP_BPF_ABI_FLAGS); + wrp_mov(nfp_prog, reg_a(0), NFP_BPF_ABI_FLAGS); emit_ld_field(nfp_prog, reg_a(0), 0xc, reg_b(2), SHF_SC_L_SHF, 16); } -- cgit v1.2.3 From 3119d1fd46464c61c80731c3a9f40eee4434fc1d Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Thu, 12 Oct 2017 10:34:12 -0700 Subject: nfp: bpf: implement byte swap instruction Implement byte swaps with rotations, shifts and byte loads. Remember to clear upper parts of the 64 bit registers. Signed-off-by: Jakub Kicinski Reviewed-by: Simon Horman Signed-off-by: David S. Miller --- drivers/net/ethernet/netronome/nfp/bpf/jit.c | 38 ++++++++++++++++++++++++++++ 1 file changed, 38 insertions(+) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/netronome/nfp/bpf/jit.c b/drivers/net/ethernet/netronome/nfp/bpf/jit.c index 4f7cfa6adfc1..5e8a6b766790 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/jit.c +++ b/drivers/net/ethernet/netronome/nfp/bpf/jit.c @@ -746,6 +746,14 @@ wrp_cmp_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, return 0; } +static void wrp_end32(struct nfp_prog *nfp_prog, swreg reg_in, u8 gpr_out) +{ + emit_ld_field(nfp_prog, reg_both(gpr_out), 0xf, reg_in, + SHF_SC_R_ROT, 8); + emit_ld_field(nfp_prog, reg_both(gpr_out), 0x5, reg_a(gpr_out), + SHF_SC_R_ROT, 16); +} + /* --- Callbacks --- */ static int mov_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) { @@ -982,6 +990,35 @@ static int shl_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) return 0; } +static int end_reg32(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) +{ + const struct bpf_insn *insn = &meta->insn; + u8 gpr = insn->dst_reg * 2; + + switch (insn->imm) { + case 16: + emit_ld_field(nfp_prog, reg_both(gpr), 0x9, reg_b(gpr), + SHF_SC_R_ROT, 8); + emit_ld_field(nfp_prog, reg_both(gpr), 0xe, reg_a(gpr), + SHF_SC_R_SHF, 16); + + wrp_immed(nfp_prog, reg_both(gpr + 1), 0); + break; + case 32: + wrp_end32(nfp_prog, reg_a(gpr), gpr); + wrp_immed(nfp_prog, reg_both(gpr + 1), 0); + break; + case 64: + wrp_mov(nfp_prog, imm_a(nfp_prog), reg_b(gpr + 1)); + + wrp_end32(nfp_prog, reg_a(gpr), gpr + 1); + wrp_end32(nfp_prog, imm_a(nfp_prog), gpr); + break; + } + + return 0; +} + static int imm_ld8_part2(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) { wrp_immed(nfp_prog, reg_both(nfp_meta_prev(meta)->insn.dst_reg * 2 + 1), @@ -1297,6 +1334,7 @@ static const instr_cb_t instr_cb[256] = { [BPF_ALU | BPF_SUB | BPF_X] = sub_reg, [BPF_ALU | BPF_SUB | BPF_K] = sub_imm, [BPF_ALU | BPF_LSH | BPF_K] = shl_imm, + [BPF_ALU | BPF_END | BPF_X] = end_reg32, [BPF_LD | BPF_IMM | BPF_DW] = imm_ld8, [BPF_LD | BPF_ABS | BPF_B] = data_ld1, [BPF_LD | BPF_ABS | BPF_H] = data_ld2, -- cgit v1.2.3 From 0f6cf4ddf63fa4d645c36d96ed1092fe7a0a8d0f Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Thu, 12 Oct 2017 10:34:13 -0700 Subject: nfp: bpf: support BPF offload only on little endian eBPF is host-endian specific. Translating both BE and LE eBPF to the NFP is feasible, but would require quite a bit of indirection. The fact that I don't have access to any BE hosts that would fit a 25G/40G/100G NIC is also limiting my ability to test big endian. For now restrict the offload to little endian hosts only. Signed-off-by: Jakub Kicinski Reviewed-by: Simon Horman Signed-off-by: David S. Miller --- drivers/net/ethernet/netronome/nfp/bpf/main.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/netronome/nfp/bpf/main.c b/drivers/net/ethernet/netronome/nfp/bpf/main.c index 074726980994..6e74f8db1cc1 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/main.c +++ b/drivers/net/ethernet/netronome/nfp/bpf/main.c @@ -42,9 +42,11 @@ static bool nfp_net_ebpf_capable(struct nfp_net *nn) { +#ifdef __LITTLE_ENDIAN if (nn->cap & NFP_NET_CFG_CTRL_BPF && nn_readb(nn, NFP_NET_CFG_BPF_ABI) == NFP_NET_BPF_ABI) return true; +#endif return false; } -- cgit v1.2.3 From 943c57b97cde2ce0806e59b553c650c9889d8b69 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Thu, 12 Oct 2017 10:34:14 -0700 Subject: nfp: bpf: fix context accesses Sizes of fields in struct xdp_md/xdp_buff and some in sk_buff depend on target architecture. Take that into account and use struct xdp_buff, not struct xdp_md. Signed-off-by: Jakub Kicinski Reviewed-by: Simon Horman Signed-off-by: David S. Miller --- drivers/net/ethernet/netronome/nfp/bpf/jit.c | 49 ++++++++++++++++------------ 1 file changed, 29 insertions(+), 20 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/netronome/nfp/bpf/jit.c b/drivers/net/ethernet/netronome/nfp/bpf/jit.c index 5e8a6b766790..4b62f5497728 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/jit.c +++ b/drivers/net/ethernet/netronome/nfp/bpf/jit.c @@ -1070,47 +1070,56 @@ static int data_ind_ld4(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) meta->insn.src_reg * 2, true, 4); } -static int mem_ldx4_skb(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) +static int mem_ldx_skb(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, + u8 size) { - if (meta->insn.off == offsetof(struct sk_buff, len)) + switch (meta->insn.off) { + case offsetof(struct sk_buff, len): + if (size != FIELD_SIZEOF(struct sk_buff, len)) + return -EOPNOTSUPP; wrp_mov(nfp_prog, reg_both(meta->insn.dst_reg * 2), plen_reg(nfp_prog)); - else + break; + default: return -EOPNOTSUPP; + } + + wrp_immed(nfp_prog, reg_both(meta->insn.dst_reg * 2 + 1), 0); return 0; } -static int mem_ldx4_xdp(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) +static int mem_ldx_xdp(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, + u8 size) { swreg dst = reg_both(meta->insn.dst_reg * 2); - if (meta->insn.off != offsetof(struct xdp_md, data) && - meta->insn.off != offsetof(struct xdp_md, data_end)) - return -EOPNOTSUPP; - - wrp_mov(nfp_prog, dst, pptr_reg(nfp_prog)); + if (size != sizeof(void *)) + return -EINVAL; - if (meta->insn.off == offsetof(struct xdp_md, data)) - return 0; + switch (meta->insn.off) { + case offsetof(struct xdp_buff, data): + wrp_mov(nfp_prog, dst, pptr_reg(nfp_prog)); + break; + case offsetof(struct xdp_buff, data_end): + emit_alu(nfp_prog, dst, + plen_reg(nfp_prog), ALU_OP_ADD, pptr_reg(nfp_prog)); + break; + default: + return -EOPNOTSUPP; + } - emit_alu(nfp_prog, dst, dst, ALU_OP_ADD, plen_reg(nfp_prog)); + wrp_immed(nfp_prog, reg_both(meta->insn.dst_reg * 2 + 1), 0); return 0; } static int mem_ldx4(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) { - int ret; - if (nfp_prog->act == NN_ACT_XDP) - ret = mem_ldx4_xdp(nfp_prog, meta); + return mem_ldx_xdp(nfp_prog, meta, 4); else - ret = mem_ldx4_skb(nfp_prog, meta); - - wrp_immed(nfp_prog, reg_both(meta->insn.dst_reg * 2 + 1), 0); - - return ret; + return mem_ldx_skb(nfp_prog, meta, 4); } static int mem_stx4_skb(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) -- cgit v1.2.3 From 0a7939775f8546268206c1e8efe78218f3c18aae Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Thu, 12 Oct 2017 10:34:15 -0700 Subject: nfp: bpf: separate I/O from checks for legacy data load Move data load into a separate function and separate it from packet length checks of legacy I/O. This makes the code more readable and easier to reuse. Signed-off-by: Jakub Kicinski Reviewed-by: Simon Horman Signed-off-by: David S. Miller --- drivers/net/ethernet/netronome/nfp/bpf/jit.c | 77 +++++++++++++++------------- 1 file changed, 40 insertions(+), 37 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/netronome/nfp/bpf/jit.c b/drivers/net/ethernet/netronome/nfp/bpf/jit.c index 4b62f5497728..3e173da16428 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/jit.c +++ b/drivers/net/ethernet/netronome/nfp/bpf/jit.c @@ -515,63 +515,66 @@ static void wrp_reg_mov(struct nfp_prog *nfp_prog, u16 dst, u16 src) } static int -construct_data_ind_ld(struct nfp_prog *nfp_prog, u16 offset, - u16 src, bool src_valid, u8 size) +data_ld(struct nfp_prog *nfp_prog, swreg offset, u8 dst_gpr, int size) { unsigned int i; u16 shift, sz; - swreg tmp_reg; /* We load the value from the address indicated in @offset and then * shift out the data we don't need. Note: this is big endian! */ - sz = size < 4 ? 4 : size; + sz = max(size, 4); shift = size < 4 ? 4 - size : 0; - if (src_valid) { - /* Calculate the true offset (src_reg + imm) */ - tmp_reg = ur_load_imm_any(nfp_prog, offset, imm_b(nfp_prog)); - emit_alu(nfp_prog, imm_both(nfp_prog), - reg_a(src), ALU_OP_ADD, tmp_reg); - /* Check packet length (size guaranteed to fit b/c it's u8) */ - emit_alu(nfp_prog, imm_a(nfp_prog), - imm_a(nfp_prog), ALU_OP_ADD, reg_imm(size)); - emit_alu(nfp_prog, reg_none(), - plen_reg(nfp_prog), ALU_OP_SUB, imm_a(nfp_prog)); - wrp_br_special(nfp_prog, BR_BLO, OP_BR_GO_ABORT); - /* Load data */ - emit_cmd(nfp_prog, CMD_TGT_READ8, CMD_MODE_32b, 0, - pptr_reg(nfp_prog), imm_b(nfp_prog), sz - 1, true); - } else { - /* Check packet length */ - tmp_reg = ur_load_imm_any(nfp_prog, offset + size, - imm_a(nfp_prog)); - emit_alu(nfp_prog, reg_none(), - plen_reg(nfp_prog), ALU_OP_SUB, tmp_reg); - wrp_br_special(nfp_prog, BR_BLO, OP_BR_GO_ABORT); - /* Load data */ - tmp_reg = re_load_imm_any(nfp_prog, offset, imm_b(nfp_prog)); - emit_cmd(nfp_prog, CMD_TGT_READ8, CMD_MODE_32b, 0, - pptr_reg(nfp_prog), tmp_reg, sz - 1, true); - } + emit_cmd(nfp_prog, CMD_TGT_READ8, CMD_MODE_32b, 0, + pptr_reg(nfp_prog), offset, sz - 1, true); i = 0; if (shift) - emit_shf(nfp_prog, reg_both(0), reg_none(), SHF_OP_NONE, + emit_shf(nfp_prog, reg_both(dst_gpr), reg_none(), SHF_OP_NONE, reg_xfer(0), SHF_SC_R_SHF, shift * 8); else for (; i * 4 < size; i++) - wrp_mov(nfp_prog, reg_both(i), reg_xfer(i)); + wrp_mov(nfp_prog, reg_both(dst_gpr + i), reg_xfer(i)); if (i < 2) - wrp_immed(nfp_prog, reg_both(1), 0); + wrp_immed(nfp_prog, reg_both(dst_gpr + 1), 0); return 0; } +static int +construct_data_ind_ld(struct nfp_prog *nfp_prog, u16 offset, u16 src, u8 size) +{ + swreg tmp_reg; + + /* Calculate the true offset (src_reg + imm) */ + tmp_reg = ur_load_imm_any(nfp_prog, offset, imm_b(nfp_prog)); + emit_alu(nfp_prog, imm_both(nfp_prog), reg_a(src), ALU_OP_ADD, tmp_reg); + + /* Check packet length (size guaranteed to fit b/c it's u8) */ + emit_alu(nfp_prog, imm_a(nfp_prog), + imm_a(nfp_prog), ALU_OP_ADD, reg_imm(size)); + emit_alu(nfp_prog, reg_none(), + plen_reg(nfp_prog), ALU_OP_SUB, imm_a(nfp_prog)); + wrp_br_special(nfp_prog, BR_BLO, OP_BR_GO_ABORT); + + /* Load data */ + return data_ld(nfp_prog, imm_b(nfp_prog), 0, size); +} + static int construct_data_ld(struct nfp_prog *nfp_prog, u16 offset, u8 size) { - return construct_data_ind_ld(nfp_prog, offset, 0, false, size); + swreg tmp_reg; + + /* Check packet length */ + tmp_reg = ur_load_imm_any(nfp_prog, offset + size, imm_a(nfp_prog)); + emit_alu(nfp_prog, reg_none(), plen_reg(nfp_prog), ALU_OP_SUB, tmp_reg); + wrp_br_special(nfp_prog, BR_BLO, OP_BR_GO_ABORT); + + /* Load data */ + tmp_reg = re_load_imm_any(nfp_prog, offset, imm_b(nfp_prog)); + return data_ld(nfp_prog, tmp_reg, 0, size); } static void @@ -1055,19 +1058,19 @@ static int data_ld4(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) static int data_ind_ld1(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) { return construct_data_ind_ld(nfp_prog, meta->insn.imm, - meta->insn.src_reg * 2, true, 1); + meta->insn.src_reg * 2, 1); } static int data_ind_ld2(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) { return construct_data_ind_ld(nfp_prog, meta->insn.imm, - meta->insn.src_reg * 2, true, 2); + meta->insn.src_reg * 2, 2); } static int data_ind_ld4(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) { return construct_data_ind_ld(nfp_prog, meta->insn.imm, - meta->insn.src_reg * 2, true, 4); + meta->insn.src_reg * 2, 4); } static int mem_ldx_skb(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, -- cgit v1.2.3 From 2ca71441f524b0a0cc01d8e51c875b00fbe31275 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Thu, 12 Oct 2017 10:34:16 -0700 Subject: nfp: bpf: add support for direct packet access - read In direct packet access bound checks are already done, we can simply dereference the packet pointer. Verifier/parser logic needs to record pointer type. Note that although verifier does protect us from CTX vs other pointer changes we will also want to differentiate between PACKET vs MAP_VALUE or STACK, so we can add the check already. Signed-off-by: Jakub Kicinski Reviewed-by: Simon Horman Signed-off-by: David S. Miller --- drivers/net/ethernet/netronome/nfp/bpf/jit.c | 85 +++++++++++++++++++++-- drivers/net/ethernet/netronome/nfp/bpf/main.h | 3 + drivers/net/ethernet/netronome/nfp/bpf/verifier.c | 21 ++++-- drivers/net/ethernet/netronome/nfp/nfp_asm.c | 3 + drivers/net/ethernet/netronome/nfp/nfp_asm.h | 4 ++ 5 files changed, 105 insertions(+), 11 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/netronome/nfp/bpf/jit.c b/drivers/net/ethernet/netronome/nfp/bpf/jit.c index 3e173da16428..975d63fbc1d5 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/jit.c +++ b/drivers/net/ethernet/netronome/nfp/bpf/jit.c @@ -543,6 +543,36 @@ data_ld(struct nfp_prog *nfp_prog, swreg offset, u8 dst_gpr, int size) return 0; } +static int +data_ld_host_order(struct nfp_prog *nfp_prog, u8 src_gpr, swreg offset, + u8 dst_gpr, int size) +{ + unsigned int i; + u8 mask, sz; + + /* We load the value from the address indicated in @offset and then + * mask out the data we don't need. Note: this is little endian! + */ + sz = max(size, 4); + mask = size < 4 ? GENMASK(size - 1, 0) : 0; + + emit_cmd(nfp_prog, CMD_TGT_READ32_SWAP, CMD_MODE_32b, 0, + reg_a(src_gpr), offset, sz / 4 - 1, true); + + i = 0; + if (mask) + emit_ld_field_any(nfp_prog, reg_both(dst_gpr), mask, + reg_xfer(0), SHF_SC_NONE, 0, true); + else + for (; i * 4 < size; i++) + wrp_mov(nfp_prog, reg_both(dst_gpr + i), reg_xfer(i)); + + if (i < 2) + wrp_immed(nfp_prog, reg_both(dst_gpr + 1), 0); + + return 0; +} + static int construct_data_ind_ld(struct nfp_prog *nfp_prog, u16 offset, u16 src, u8 size) { @@ -1117,12 +1147,53 @@ static int mem_ldx_xdp(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, return 0; } +static int +mem_ldx_data(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, + unsigned int size) +{ + swreg tmp_reg; + + tmp_reg = re_load_imm_any(nfp_prog, meta->insn.off, imm_b(nfp_prog)); + + return data_ld_host_order(nfp_prog, meta->insn.src_reg * 2, tmp_reg, + meta->insn.dst_reg * 2, size); +} + +static int +mem_ldx(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, + unsigned int size) +{ + if (meta->ptr.type == PTR_TO_CTX) { + if (nfp_prog->act == NN_ACT_XDP) + return mem_ldx_xdp(nfp_prog, meta, size); + else + return mem_ldx_skb(nfp_prog, meta, size); + } + + if (meta->ptr.type == PTR_TO_PACKET) + return mem_ldx_data(nfp_prog, meta, size); + + return -EOPNOTSUPP; +} + +static int mem_ldx1(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) +{ + return mem_ldx(nfp_prog, meta, 1); +} + +static int mem_ldx2(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) +{ + return mem_ldx(nfp_prog, meta, 2); +} + static int mem_ldx4(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) { - if (nfp_prog->act == NN_ACT_XDP) - return mem_ldx_xdp(nfp_prog, meta, 4); - else - return mem_ldx_skb(nfp_prog, meta, 4); + return mem_ldx(nfp_prog, meta, 4); +} + +static int mem_ldx8(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) +{ + return mem_ldx(nfp_prog, meta, 8); } static int mem_stx4_skb(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) @@ -1137,6 +1208,9 @@ static int mem_stx4_xdp(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) static int mem_stx4(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) { + if (meta->ptr.type == PTR_TO_PACKET) + return -EOPNOTSUPP; + if (nfp_prog->act == NN_ACT_XDP) return mem_stx4_xdp(nfp_prog, meta); return mem_stx4_skb(nfp_prog, meta); @@ -1354,7 +1428,10 @@ static const instr_cb_t instr_cb[256] = { [BPF_LD | BPF_IND | BPF_B] = data_ind_ld1, [BPF_LD | BPF_IND | BPF_H] = data_ind_ld2, [BPF_LD | BPF_IND | BPF_W] = data_ind_ld4, + [BPF_LDX | BPF_MEM | BPF_B] = mem_ldx1, + [BPF_LDX | BPF_MEM | BPF_H] = mem_ldx2, [BPF_LDX | BPF_MEM | BPF_W] = mem_ldx4, + [BPF_LDX | BPF_MEM | BPF_DW] = mem_ldx8, [BPF_STX | BPF_MEM | BPF_W] = mem_stx4, [BPF_JMP | BPF_JA | BPF_K] = jump, [BPF_JMP | BPF_JEQ | BPF_K] = jeq_imm, diff --git a/drivers/net/ethernet/netronome/nfp/bpf/main.h b/drivers/net/ethernet/netronome/nfp/bpf/main.h index b7a112acbdb7..d77e88a45409 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/main.h +++ b/drivers/net/ethernet/netronome/nfp/bpf/main.h @@ -36,6 +36,7 @@ #include #include +#include #include #include @@ -96,6 +97,7 @@ typedef int (*instr_cb_t)(struct nfp_prog *, struct nfp_insn_meta *); /** * struct nfp_insn_meta - BPF instruction wrapper * @insn: BPF instruction + * @ptr: pointer type for memory operations * @off: index of first generated machine instruction (in nfp_prog.prog) * @n: eBPF instruction number * @skip: skip this instruction (optimized out) @@ -104,6 +106,7 @@ typedef int (*instr_cb_t)(struct nfp_prog *, struct nfp_insn_meta *); */ struct nfp_insn_meta { struct bpf_insn insn; + struct bpf_reg_state ptr; unsigned int off; unsigned short n; bool skip; diff --git a/drivers/net/ethernet/netronome/nfp/bpf/verifier.c b/drivers/net/ethernet/netronome/nfp/bpf/verifier.c index 5b783a91b115..e361c0e3b788 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/verifier.c +++ b/drivers/net/ethernet/netronome/nfp/bpf/verifier.c @@ -112,12 +112,19 @@ nfp_bpf_check_exit(struct nfp_prog *nfp_prog, } static int -nfp_bpf_check_ctx_ptr(struct nfp_prog *nfp_prog, - const struct bpf_verifier_env *env, u8 reg) +nfp_bpf_check_ptr(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, + const struct bpf_verifier_env *env, u8 reg) { - if (env->cur_state.regs[reg].type != PTR_TO_CTX) + if (env->cur_state.regs[reg].type != PTR_TO_CTX && + env->cur_state.regs[reg].type != PTR_TO_PACKET) return -EINVAL; + if (meta->ptr.type != NOT_INIT && + meta->ptr.type != env->cur_state.regs[reg].type) + return -EINVAL; + + meta->ptr = env->cur_state.regs[reg]; + return 0; } @@ -145,11 +152,11 @@ nfp_verify_insn(struct bpf_verifier_env *env, int insn_idx, int prev_insn_idx) return nfp_bpf_check_exit(priv->prog, env); if ((meta->insn.code & ~BPF_SIZE_MASK) == (BPF_LDX | BPF_MEM)) - return nfp_bpf_check_ctx_ptr(priv->prog, env, - meta->insn.src_reg); + return nfp_bpf_check_ptr(priv->prog, meta, env, + meta->insn.src_reg); if ((meta->insn.code & ~BPF_SIZE_MASK) == (BPF_STX | BPF_MEM)) - return nfp_bpf_check_ctx_ptr(priv->prog, env, - meta->insn.dst_reg); + return nfp_bpf_check_ptr(priv->prog, meta, env, + meta->insn.dst_reg); return 0; } diff --git a/drivers/net/ethernet/netronome/nfp/nfp_asm.c b/drivers/net/ethernet/netronome/nfp/nfp_asm.c index de76e7444fc2..7cae99b3e00a 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_asm.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_asm.c @@ -42,6 +42,9 @@ const struct cmd_tgt_act cmd_tgt_act[__CMD_TGT_MAP_SIZE] = { [CMD_TGT_WRITE8] = { 0x00, 0x42 }, [CMD_TGT_READ8] = { 0x01, 0x43 }, + [CMD_TGT_READ32] = { 0x00, 0x5c }, + [CMD_TGT_READ32_LE] = { 0x01, 0x5c }, + [CMD_TGT_READ32_SWAP] = { 0x02, 0x5c }, [CMD_TGT_READ_LE] = { 0x01, 0x40 }, [CMD_TGT_READ_SWAP_LE] = { 0x03, 0x40 }, }; diff --git a/drivers/net/ethernet/netronome/nfp/nfp_asm.h b/drivers/net/ethernet/netronome/nfp/nfp_asm.h index c4c18dd5630a..e3df7a26724f 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_asm.h +++ b/drivers/net/ethernet/netronome/nfp/nfp_asm.h @@ -153,6 +153,7 @@ enum shf_op { enum shf_sc { SHF_SC_R_ROT = 0, + SHF_SC_NONE = SHF_SC_R_ROT, SHF_SC_R_SHF = 1, SHF_SC_L_SHF = 2, SHF_SC_R_DSHF = 3, @@ -217,6 +218,9 @@ struct cmd_tgt_act { enum cmd_tgt_map { CMD_TGT_READ8, CMD_TGT_WRITE8, + CMD_TGT_READ32, + CMD_TGT_READ32_LE, + CMD_TGT_READ32_SWAP, CMD_TGT_READ_LE, CMD_TGT_READ_SWAP_LE, __CMD_TGT_MAP_SIZE, -- cgit v1.2.3 From e663fe3863ad20c5e6a84a1a1d47aff8e71f583f Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Thu, 12 Oct 2017 10:34:17 -0700 Subject: nfp: bpf: direct packet access - write This patch adds ability to write packet contents using pre-validated packet pointers (direct packet access). Signed-off-by: Jakub Kicinski Reviewed-by: Simon Horman Signed-off-by: David S. Miller --- drivers/net/ethernet/netronome/nfp/bpf/jit.c | 114 +++++++++++++++++++++++++-- drivers/net/ethernet/netronome/nfp/nfp_asm.c | 2 +- drivers/net/ethernet/netronome/nfp/nfp_asm.h | 2 +- 3 files changed, 109 insertions(+), 9 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/netronome/nfp/bpf/jit.c b/drivers/net/ethernet/netronome/nfp/bpf/jit.c index 975d63fbc1d5..139a4ebdc774 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/jit.c +++ b/drivers/net/ethernet/netronome/nfp/bpf/jit.c @@ -607,6 +607,35 @@ static int construct_data_ld(struct nfp_prog *nfp_prog, u16 offset, u8 size) return data_ld(nfp_prog, tmp_reg, 0, size); } +static int +data_stx_host_order(struct nfp_prog *nfp_prog, u8 dst_gpr, swreg offset, + u8 src_gpr, u8 size) +{ + unsigned int i; + + for (i = 0; i * 4 < size; i++) + wrp_mov(nfp_prog, reg_xfer(i), reg_a(src_gpr + i)); + + emit_cmd(nfp_prog, CMD_TGT_WRITE8_SWAP, CMD_MODE_32b, 0, + reg_a(dst_gpr), offset, size - 1, true); + + return 0; +} + +static int +data_st_host_order(struct nfp_prog *nfp_prog, u8 dst_gpr, swreg offset, + u64 imm, u8 size) +{ + wrp_immed(nfp_prog, reg_xfer(0), imm); + if (size == 8) + wrp_immed(nfp_prog, reg_xfer(1), imm >> 32); + + emit_cmd(nfp_prog, CMD_TGT_WRITE8_SWAP, CMD_MODE_32b, 0, + reg_a(dst_gpr), offset, size - 1, true); + + return 0; +} + static void wrp_alu_imm(struct nfp_prog *nfp_prog, u8 dst, enum alu_op alu_op, u32 imm) { @@ -1196,24 +1225,88 @@ static int mem_ldx8(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) return mem_ldx(nfp_prog, meta, 8); } -static int mem_stx4_skb(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) +static int +mem_st_data(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, + unsigned int size) +{ + u64 imm = meta->insn.imm; /* sign extend */ + swreg off_reg; + + off_reg = re_load_imm_any(nfp_prog, meta->insn.off, imm_b(nfp_prog)); + + return data_st_host_order(nfp_prog, meta->insn.dst_reg * 2, off_reg, + imm, size); +} + +static int mem_st(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, + unsigned int size) { + if (meta->ptr.type == PTR_TO_PACKET) + return mem_st_data(nfp_prog, meta, size); + return -EOPNOTSUPP; } -static int mem_stx4_xdp(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) +static int mem_st1(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) +{ + return mem_st(nfp_prog, meta, 1); +} + +static int mem_st2(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) +{ + return mem_st(nfp_prog, meta, 2); +} + +static int mem_st4(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) +{ + return mem_st(nfp_prog, meta, 4); +} + +static int mem_st8(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) { + return mem_st(nfp_prog, meta, 8); +} + +static int +mem_stx_data(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, + unsigned int size) +{ + swreg off_reg; + + off_reg = re_load_imm_any(nfp_prog, meta->insn.off, imm_b(nfp_prog)); + + return data_stx_host_order(nfp_prog, meta->insn.dst_reg * 2, off_reg, + meta->insn.src_reg * 2, size); +} + +static int +mem_stx(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, + unsigned int size) +{ + if (meta->ptr.type == PTR_TO_PACKET) + return mem_stx_data(nfp_prog, meta, size); + return -EOPNOTSUPP; } +static int mem_stx1(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) +{ + return mem_stx(nfp_prog, meta, 1); +} + +static int mem_stx2(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) +{ + return mem_stx(nfp_prog, meta, 2); +} + static int mem_stx4(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) { - if (meta->ptr.type == PTR_TO_PACKET) - return -EOPNOTSUPP; + return mem_stx(nfp_prog, meta, 4); +} - if (nfp_prog->act == NN_ACT_XDP) - return mem_stx4_xdp(nfp_prog, meta); - return mem_stx4_skb(nfp_prog, meta); +static int mem_stx8(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) +{ + return mem_stx(nfp_prog, meta, 8); } static int jump(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) @@ -1432,7 +1525,14 @@ static const instr_cb_t instr_cb[256] = { [BPF_LDX | BPF_MEM | BPF_H] = mem_ldx2, [BPF_LDX | BPF_MEM | BPF_W] = mem_ldx4, [BPF_LDX | BPF_MEM | BPF_DW] = mem_ldx8, + [BPF_STX | BPF_MEM | BPF_B] = mem_stx1, + [BPF_STX | BPF_MEM | BPF_H] = mem_stx2, [BPF_STX | BPF_MEM | BPF_W] = mem_stx4, + [BPF_STX | BPF_MEM | BPF_DW] = mem_stx8, + [BPF_ST | BPF_MEM | BPF_B] = mem_st1, + [BPF_ST | BPF_MEM | BPF_H] = mem_st2, + [BPF_ST | BPF_MEM | BPF_W] = mem_st4, + [BPF_ST | BPF_MEM | BPF_DW] = mem_st8, [BPF_JMP | BPF_JA | BPF_K] = jump, [BPF_JMP | BPF_JEQ | BPF_K] = jeq_imm, [BPF_JMP | BPF_JGT | BPF_K] = jgt_imm, diff --git a/drivers/net/ethernet/netronome/nfp/nfp_asm.c b/drivers/net/ethernet/netronome/nfp/nfp_asm.c index 7cae99b3e00a..830f6de25f47 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_asm.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_asm.c @@ -40,7 +40,7 @@ #include "nfp_asm.h" const struct cmd_tgt_act cmd_tgt_act[__CMD_TGT_MAP_SIZE] = { - [CMD_TGT_WRITE8] = { 0x00, 0x42 }, + [CMD_TGT_WRITE8_SWAP] = { 0x02, 0x42 }, [CMD_TGT_READ8] = { 0x01, 0x43 }, [CMD_TGT_READ32] = { 0x00, 0x5c }, [CMD_TGT_READ32_LE] = { 0x01, 0x5c }, diff --git a/drivers/net/ethernet/netronome/nfp/nfp_asm.h b/drivers/net/ethernet/netronome/nfp/nfp_asm.h index e3df7a26724f..c26aa7e4a839 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_asm.h +++ b/drivers/net/ethernet/netronome/nfp/nfp_asm.h @@ -217,7 +217,7 @@ struct cmd_tgt_act { enum cmd_tgt_map { CMD_TGT_READ8, - CMD_TGT_WRITE8, + CMD_TGT_WRITE8_SWAP, CMD_TGT_READ32, CMD_TGT_READ32_LE, CMD_TGT_READ32_SWAP, -- cgit v1.2.3 From bfddbc8adcd471806f2369d347a958d11e80f53b Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Thu, 12 Oct 2017 10:34:18 -0700 Subject: nfp: bpf: support direct packet access in TC Add support for direct packet access in TC, note that because writing the packet will cause the verifier to generate a csum fixup prologue we won't be able to offload packet writes from TC, just yet, only the reads will work. Signed-off-by: Jakub Kicinski Reviewed-by: Simon Horman Signed-off-by: David S. Miller --- drivers/net/ethernet/netronome/nfp/bpf/jit.c | 17 +++++++++++++++-- 1 file changed, 15 insertions(+), 2 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/netronome/nfp/bpf/jit.c b/drivers/net/ethernet/netronome/nfp/bpf/jit.c index 139a4ebdc774..23fb11a41cc4 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/jit.c +++ b/drivers/net/ethernet/netronome/nfp/bpf/jit.c @@ -1135,12 +1135,25 @@ static int data_ind_ld4(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) static int mem_ldx_skb(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, u8 size) { + swreg dst = reg_both(meta->insn.dst_reg * 2); + switch (meta->insn.off) { case offsetof(struct sk_buff, len): if (size != FIELD_SIZEOF(struct sk_buff, len)) return -EOPNOTSUPP; - wrp_mov(nfp_prog, - reg_both(meta->insn.dst_reg * 2), plen_reg(nfp_prog)); + wrp_mov(nfp_prog, dst, plen_reg(nfp_prog)); + break; + case offsetof(struct sk_buff, data): + if (size != sizeof(void *)) + return -EOPNOTSUPP; + wrp_mov(nfp_prog, dst, pptr_reg(nfp_prog)); + break; + case offsetof(struct sk_buff, cb) + + offsetof(struct bpf_skb_data_end, data_end): + if (size != sizeof(void *)) + return -EOPNOTSUPP; + emit_alu(nfp_prog, dst, + plen_reg(nfp_prog), ALU_OP_ADD, pptr_reg(nfp_prog)); break; default: return -EOPNOTSUPP; -- cgit v1.2.3 From ae904beaea48d369205c81dbffecc23afcec46de Mon Sep 17 00:00:00 2001 From: Feras Daoud Date: Mon, 14 Aug 2017 11:23:27 +0300 Subject: net/mlx5: File renaming towards ptp core implementation en_clock.c renamed clock.c and moved to lib/ as first step towards relocating code to core part of the driver to allow sharing between Ethernet and Infiniband. Signed-off-by: Feras Daoud Signed-off-by: Eitan Rabin Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/Kconfig | 2 +- drivers/net/ethernet/mellanox/mlx5/core/Makefile | 4 +- drivers/net/ethernet/mellanox/mlx5/core/en_clock.c | 619 --------------------- .../net/ethernet/mellanox/mlx5/core/lib/clock.c | 619 +++++++++++++++++++++ 4 files changed, 622 insertions(+), 622 deletions(-) delete mode 100644 drivers/net/ethernet/mellanox/mlx5/core/en_clock.c create mode 100644 drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig index fdaef00465d7..25deaa5a534c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig +++ b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig @@ -6,6 +6,7 @@ config MLX5_CORE tristate "Mellanox Technologies ConnectX-4 and Connect-IB core driver" depends on MAY_USE_DEVLINK depends on PCI + imply PTP_1588_CLOCK default n ---help--- Core driver for low level functionality of the ConnectX-4 and @@ -29,7 +30,6 @@ config MLX5_CORE_EN bool "Mellanox Technologies ConnectX-4 Ethernet support" depends on NETDEVICES && ETHERNET && INET && PCI && MLX5_CORE depends on IPV6=y || IPV6=n || MLX5_CORE=m - imply PTP_1588_CLOCK default n ---help--- Ethernet support in Mellanox Technologies ConnectX-4 NIC. diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/Makefile index 87a3099808f3..d9621b2152d3 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/Makefile +++ b/drivers/net/ethernet/mellanox/mlx5/core/Makefile @@ -4,7 +4,7 @@ subdir-ccflags-y += -I$(src) mlx5_core-y := main.o cmd.o debugfs.o fw.o eq.o uar.o pagealloc.o \ health.o mcg.o cq.o srq.o alloc.o qp.o port.o mr.o pd.o \ mad.o transobj.o vport.o sriov.o fs_cmd.o fs_core.o \ - fs_counters.o rl.o lag.o dev.o wq.o lib/gid.o \ + fs_counters.o rl.o lag.o dev.o wq.o lib/gid.o lib/clock.o \ diag/fs_tracepoint.o mlx5_core-$(CONFIG_MLX5_ACCEL) += accel/ipsec.o @@ -13,7 +13,7 @@ mlx5_core-$(CONFIG_MLX5_FPGA) += fpga/cmd.o fpga/core.o fpga/conn.o fpga/sdk.o \ fpga/ipsec.o mlx5_core-$(CONFIG_MLX5_CORE_EN) += en_main.o en_common.o en_fs.o en_ethtool.o \ - en_tx.o en_rx.o en_rx_am.o en_txrx.o en_clock.o vxlan.o \ + en_tx.o en_rx.o en_rx_am.o en_txrx.o vxlan.o \ en_arfs.o en_fs_ethtool.o en_selftest.o mlx5_core-$(CONFIG_MLX5_MPFS) += lib/mpfs.o diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_clock.c b/drivers/net/ethernet/mellanox/mlx5/core/en_clock.c deleted file mode 100644 index 84dd63e74041..000000000000 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_clock.c +++ /dev/null @@ -1,619 +0,0 @@ -/* - * Copyright (c) 2015, Mellanox Technologies. All rights reserved. - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -#include -#include "en.h" - -enum { - MLX5E_CYCLES_SHIFT = 23 -}; - -enum { - MLX5E_PIN_MODE_IN = 0x0, - MLX5E_PIN_MODE_OUT = 0x1, -}; - -enum { - MLX5E_OUT_PATTERN_PULSE = 0x0, - MLX5E_OUT_PATTERN_PERIODIC = 0x1, -}; - -enum { - MLX5E_EVENT_MODE_DISABLE = 0x0, - MLX5E_EVENT_MODE_REPETETIVE = 0x1, - MLX5E_EVENT_MODE_ONCE_TILL_ARM = 0x2, -}; - -enum { - MLX5E_MTPPS_FS_ENABLE = BIT(0x0), - MLX5E_MTPPS_FS_PATTERN = BIT(0x2), - MLX5E_MTPPS_FS_PIN_MODE = BIT(0x3), - MLX5E_MTPPS_FS_TIME_STAMP = BIT(0x4), - MLX5E_MTPPS_FS_OUT_PULSE_DURATION = BIT(0x5), - MLX5E_MTPPS_FS_ENH_OUT_PER_ADJ = BIT(0x7), -}; - -void mlx5e_fill_hwstamp(struct mlx5e_tstamp *tstamp, u64 timestamp, - struct skb_shared_hwtstamps *hwts) -{ - u64 nsec; - - read_lock(&tstamp->lock); - nsec = timecounter_cyc2time(&tstamp->clock, timestamp); - read_unlock(&tstamp->lock); - - hwts->hwtstamp = ns_to_ktime(nsec); -} - -static u64 mlx5e_read_internal_timer(const struct cyclecounter *cc) -{ - struct mlx5e_tstamp *tstamp = container_of(cc, struct mlx5e_tstamp, - cycles); - - return mlx5_read_internal_timer(tstamp->mdev) & cc->mask; -} - -static void mlx5e_pps_out(struct work_struct *work) -{ - struct mlx5e_pps *pps_info = container_of(work, struct mlx5e_pps, - out_work); - struct mlx5e_tstamp *tstamp = container_of(pps_info, struct mlx5e_tstamp, - pps_info); - u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {0}; - unsigned long flags; - int i; - - for (i = 0; i < tstamp->ptp_info.n_pins; i++) { - u64 tstart; - - write_lock_irqsave(&tstamp->lock, flags); - tstart = tstamp->pps_info.start[i]; - tstamp->pps_info.start[i] = 0; - write_unlock_irqrestore(&tstamp->lock, flags); - if (!tstart) - continue; - - MLX5_SET(mtpps_reg, in, pin, i); - MLX5_SET64(mtpps_reg, in, time_stamp, tstart); - MLX5_SET(mtpps_reg, in, field_select, MLX5E_MTPPS_FS_TIME_STAMP); - mlx5_set_mtpps(tstamp->mdev, in, sizeof(in)); - } -} - -static void mlx5e_timestamp_overflow(struct work_struct *work) -{ - struct delayed_work *dwork = to_delayed_work(work); - struct mlx5e_tstamp *tstamp = container_of(dwork, struct mlx5e_tstamp, - overflow_work); - struct mlx5e_priv *priv = container_of(tstamp, struct mlx5e_priv, tstamp); - unsigned long flags; - - write_lock_irqsave(&tstamp->lock, flags); - timecounter_read(&tstamp->clock); - write_unlock_irqrestore(&tstamp->lock, flags); - queue_delayed_work(priv->wq, &tstamp->overflow_work, - msecs_to_jiffies(tstamp->overflow_period * 1000)); -} - -int mlx5e_hwstamp_set(struct mlx5e_priv *priv, struct ifreq *ifr) -{ - struct hwtstamp_config config; - int err; - - if (!MLX5_CAP_GEN(priv->mdev, device_frequency_khz)) - return -EOPNOTSUPP; - - if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) - return -EFAULT; - - /* TX HW timestamp */ - switch (config.tx_type) { - case HWTSTAMP_TX_OFF: - case HWTSTAMP_TX_ON: - break; - default: - return -ERANGE; - } - - mutex_lock(&priv->state_lock); - /* RX HW timestamp */ - switch (config.rx_filter) { - case HWTSTAMP_FILTER_NONE: - /* Reset CQE compression to Admin default */ - mlx5e_modify_rx_cqe_compression_locked(priv, priv->channels.params.rx_cqe_compress_def); - break; - case HWTSTAMP_FILTER_ALL: - case HWTSTAMP_FILTER_SOME: - case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: - case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: - case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: - case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: - case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: - case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: - case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: - case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: - case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: - case HWTSTAMP_FILTER_PTP_V2_EVENT: - case HWTSTAMP_FILTER_PTP_V2_SYNC: - case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: - case HWTSTAMP_FILTER_NTP_ALL: - /* Disable CQE compression */ - netdev_warn(priv->netdev, "Disabling cqe compression"); - err = mlx5e_modify_rx_cqe_compression_locked(priv, false); - if (err) { - netdev_err(priv->netdev, "Failed disabling cqe compression err=%d\n", err); - mutex_unlock(&priv->state_lock); - return err; - } - config.rx_filter = HWTSTAMP_FILTER_ALL; - break; - default: - mutex_unlock(&priv->state_lock); - return -ERANGE; - } - - memcpy(&priv->tstamp.hwtstamp_config, &config, sizeof(config)); - mutex_unlock(&priv->state_lock); - - return copy_to_user(ifr->ifr_data, &config, - sizeof(config)) ? -EFAULT : 0; -} - -int mlx5e_hwstamp_get(struct mlx5e_priv *priv, struct ifreq *ifr) -{ - struct hwtstamp_config *cfg = &priv->tstamp.hwtstamp_config; - - if (!MLX5_CAP_GEN(priv->mdev, device_frequency_khz)) - return -EOPNOTSUPP; - - return copy_to_user(ifr->ifr_data, cfg, sizeof(*cfg)) ? -EFAULT : 0; -} - -static int mlx5e_ptp_settime(struct ptp_clock_info *ptp, - const struct timespec64 *ts) -{ - struct mlx5e_tstamp *tstamp = container_of(ptp, struct mlx5e_tstamp, - ptp_info); - u64 ns = timespec64_to_ns(ts); - unsigned long flags; - - write_lock_irqsave(&tstamp->lock, flags); - timecounter_init(&tstamp->clock, &tstamp->cycles, ns); - write_unlock_irqrestore(&tstamp->lock, flags); - - return 0; -} - -static int mlx5e_ptp_gettime(struct ptp_clock_info *ptp, - struct timespec64 *ts) -{ - struct mlx5e_tstamp *tstamp = container_of(ptp, struct mlx5e_tstamp, - ptp_info); - u64 ns; - unsigned long flags; - - write_lock_irqsave(&tstamp->lock, flags); - ns = timecounter_read(&tstamp->clock); - write_unlock_irqrestore(&tstamp->lock, flags); - - *ts = ns_to_timespec64(ns); - - return 0; -} - -static int mlx5e_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta) -{ - struct mlx5e_tstamp *tstamp = container_of(ptp, struct mlx5e_tstamp, - ptp_info); - unsigned long flags; - - write_lock_irqsave(&tstamp->lock, flags); - timecounter_adjtime(&tstamp->clock, delta); - write_unlock_irqrestore(&tstamp->lock, flags); - - return 0; -} - -static int mlx5e_ptp_adjfreq(struct ptp_clock_info *ptp, s32 delta) -{ - u64 adj; - u32 diff; - unsigned long flags; - int neg_adj = 0; - struct mlx5e_tstamp *tstamp = container_of(ptp, struct mlx5e_tstamp, - ptp_info); - - if (delta < 0) { - neg_adj = 1; - delta = -delta; - } - - adj = tstamp->nominal_c_mult; - adj *= delta; - diff = div_u64(adj, 1000000000ULL); - - write_lock_irqsave(&tstamp->lock, flags); - timecounter_read(&tstamp->clock); - tstamp->cycles.mult = neg_adj ? tstamp->nominal_c_mult - diff : - tstamp->nominal_c_mult + diff; - write_unlock_irqrestore(&tstamp->lock, flags); - - return 0; -} - -static int mlx5e_extts_configure(struct ptp_clock_info *ptp, - struct ptp_clock_request *rq, - int on) -{ - struct mlx5e_tstamp *tstamp = - container_of(ptp, struct mlx5e_tstamp, ptp_info); - struct mlx5e_priv *priv = - container_of(tstamp, struct mlx5e_priv, tstamp); - u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {0}; - u32 field_select = 0; - u8 pin_mode = 0; - u8 pattern = 0; - int pin = -1; - int err = 0; - - if (!MLX5_PPS_CAP(priv->mdev)) - return -EOPNOTSUPP; - - if (rq->extts.index >= tstamp->ptp_info.n_pins) - return -EINVAL; - - if (on) { - pin = ptp_find_pin(tstamp->ptp, PTP_PF_EXTTS, rq->extts.index); - if (pin < 0) - return -EBUSY; - pin_mode = MLX5E_PIN_MODE_IN; - pattern = !!(rq->extts.flags & PTP_FALLING_EDGE); - field_select = MLX5E_MTPPS_FS_PIN_MODE | - MLX5E_MTPPS_FS_PATTERN | - MLX5E_MTPPS_FS_ENABLE; - } else { - pin = rq->extts.index; - field_select = MLX5E_MTPPS_FS_ENABLE; - } - - MLX5_SET(mtpps_reg, in, pin, pin); - MLX5_SET(mtpps_reg, in, pin_mode, pin_mode); - MLX5_SET(mtpps_reg, in, pattern, pattern); - MLX5_SET(mtpps_reg, in, enable, on); - MLX5_SET(mtpps_reg, in, field_select, field_select); - - err = mlx5_set_mtpps(priv->mdev, in, sizeof(in)); - if (err) - return err; - - return mlx5_set_mtppse(priv->mdev, pin, 0, - MLX5E_EVENT_MODE_REPETETIVE & on); -} - -static int mlx5e_perout_configure(struct ptp_clock_info *ptp, - struct ptp_clock_request *rq, - int on) -{ - struct mlx5e_tstamp *tstamp = - container_of(ptp, struct mlx5e_tstamp, ptp_info); - struct mlx5e_priv *priv = - container_of(tstamp, struct mlx5e_priv, tstamp); - u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {0}; - u64 nsec_now, nsec_delta, time_stamp = 0; - u64 cycles_now, cycles_delta; - struct timespec64 ts; - unsigned long flags; - u32 field_select = 0; - u8 pin_mode = 0; - u8 pattern = 0; - int pin = -1; - int err = 0; - s64 ns; - - if (!MLX5_PPS_CAP(priv->mdev)) - return -EOPNOTSUPP; - - if (rq->perout.index >= tstamp->ptp_info.n_pins) - return -EINVAL; - - if (on) { - pin = ptp_find_pin(tstamp->ptp, PTP_PF_PEROUT, - rq->perout.index); - if (pin < 0) - return -EBUSY; - - pin_mode = MLX5E_PIN_MODE_OUT; - pattern = MLX5E_OUT_PATTERN_PERIODIC; - ts.tv_sec = rq->perout.period.sec; - ts.tv_nsec = rq->perout.period.nsec; - ns = timespec64_to_ns(&ts); - - if ((ns >> 1) != 500000000LL) - return -EINVAL; - - ts.tv_sec = rq->perout.start.sec; - ts.tv_nsec = rq->perout.start.nsec; - ns = timespec64_to_ns(&ts); - cycles_now = mlx5_read_internal_timer(tstamp->mdev); - write_lock_irqsave(&tstamp->lock, flags); - nsec_now = timecounter_cyc2time(&tstamp->clock, cycles_now); - nsec_delta = ns - nsec_now; - cycles_delta = div64_u64(nsec_delta << tstamp->cycles.shift, - tstamp->cycles.mult); - write_unlock_irqrestore(&tstamp->lock, flags); - time_stamp = cycles_now + cycles_delta; - field_select = MLX5E_MTPPS_FS_PIN_MODE | - MLX5E_MTPPS_FS_PATTERN | - MLX5E_MTPPS_FS_ENABLE | - MLX5E_MTPPS_FS_TIME_STAMP; - } else { - pin = rq->perout.index; - field_select = MLX5E_MTPPS_FS_ENABLE; - } - - MLX5_SET(mtpps_reg, in, pin, pin); - MLX5_SET(mtpps_reg, in, pin_mode, pin_mode); - MLX5_SET(mtpps_reg, in, pattern, pattern); - MLX5_SET(mtpps_reg, in, enable, on); - MLX5_SET64(mtpps_reg, in, time_stamp, time_stamp); - MLX5_SET(mtpps_reg, in, field_select, field_select); - - err = mlx5_set_mtpps(priv->mdev, in, sizeof(in)); - if (err) - return err; - - return mlx5_set_mtppse(priv->mdev, pin, 0, - MLX5E_EVENT_MODE_REPETETIVE & on); -} - -static int mlx5e_pps_configure(struct ptp_clock_info *ptp, - struct ptp_clock_request *rq, - int on) -{ - struct mlx5e_tstamp *tstamp = - container_of(ptp, struct mlx5e_tstamp, ptp_info); - - tstamp->pps_info.enabled = !!on; - return 0; -} - -static int mlx5e_ptp_enable(struct ptp_clock_info *ptp, - struct ptp_clock_request *rq, - int on) -{ - switch (rq->type) { - case PTP_CLK_REQ_EXTTS: - return mlx5e_extts_configure(ptp, rq, on); - case PTP_CLK_REQ_PEROUT: - return mlx5e_perout_configure(ptp, rq, on); - case PTP_CLK_REQ_PPS: - return mlx5e_pps_configure(ptp, rq, on); - default: - return -EOPNOTSUPP; - } - return 0; -} - -static int mlx5e_ptp_verify(struct ptp_clock_info *ptp, unsigned int pin, - enum ptp_pin_function func, unsigned int chan) -{ - return (func == PTP_PF_PHYSYNC) ? -EOPNOTSUPP : 0; -} - -static const struct ptp_clock_info mlx5e_ptp_clock_info = { - .owner = THIS_MODULE, - .max_adj = 100000000, - .n_alarm = 0, - .n_ext_ts = 0, - .n_per_out = 0, - .n_pins = 0, - .pps = 0, - .adjfreq = mlx5e_ptp_adjfreq, - .adjtime = mlx5e_ptp_adjtime, - .gettime64 = mlx5e_ptp_gettime, - .settime64 = mlx5e_ptp_settime, - .enable = NULL, - .verify = NULL, -}; - -static void mlx5e_timestamp_init_config(struct mlx5e_tstamp *tstamp) -{ - tstamp->hwtstamp_config.tx_type = HWTSTAMP_TX_OFF; - tstamp->hwtstamp_config.rx_filter = HWTSTAMP_FILTER_NONE; -} - -static int mlx5e_init_pin_config(struct mlx5e_tstamp *tstamp) -{ - int i; - - tstamp->ptp_info.pin_config = - kzalloc(sizeof(*tstamp->ptp_info.pin_config) * - tstamp->ptp_info.n_pins, GFP_KERNEL); - if (!tstamp->ptp_info.pin_config) - return -ENOMEM; - tstamp->ptp_info.enable = mlx5e_ptp_enable; - tstamp->ptp_info.verify = mlx5e_ptp_verify; - tstamp->ptp_info.pps = 1; - - for (i = 0; i < tstamp->ptp_info.n_pins; i++) { - snprintf(tstamp->ptp_info.pin_config[i].name, - sizeof(tstamp->ptp_info.pin_config[i].name), - "mlx5_pps%d", i); - tstamp->ptp_info.pin_config[i].index = i; - tstamp->ptp_info.pin_config[i].func = PTP_PF_NONE; - tstamp->ptp_info.pin_config[i].chan = i; - } - - return 0; -} - -static void mlx5e_get_pps_caps(struct mlx5e_priv *priv, - struct mlx5e_tstamp *tstamp) -{ - u32 out[MLX5_ST_SZ_DW(mtpps_reg)] = {0}; - - mlx5_query_mtpps(priv->mdev, out, sizeof(out)); - - tstamp->ptp_info.n_pins = MLX5_GET(mtpps_reg, out, - cap_number_of_pps_pins); - tstamp->ptp_info.n_ext_ts = MLX5_GET(mtpps_reg, out, - cap_max_num_of_pps_in_pins); - tstamp->ptp_info.n_per_out = MLX5_GET(mtpps_reg, out, - cap_max_num_of_pps_out_pins); - - tstamp->pps_info.pin_caps[0] = MLX5_GET(mtpps_reg, out, cap_pin_0_mode); - tstamp->pps_info.pin_caps[1] = MLX5_GET(mtpps_reg, out, cap_pin_1_mode); - tstamp->pps_info.pin_caps[2] = MLX5_GET(mtpps_reg, out, cap_pin_2_mode); - tstamp->pps_info.pin_caps[3] = MLX5_GET(mtpps_reg, out, cap_pin_3_mode); - tstamp->pps_info.pin_caps[4] = MLX5_GET(mtpps_reg, out, cap_pin_4_mode); - tstamp->pps_info.pin_caps[5] = MLX5_GET(mtpps_reg, out, cap_pin_5_mode); - tstamp->pps_info.pin_caps[6] = MLX5_GET(mtpps_reg, out, cap_pin_6_mode); - tstamp->pps_info.pin_caps[7] = MLX5_GET(mtpps_reg, out, cap_pin_7_mode); -} - -void mlx5e_pps_event_handler(struct mlx5e_priv *priv, - struct ptp_clock_event *event) -{ - struct net_device *netdev = priv->netdev; - struct mlx5e_tstamp *tstamp = &priv->tstamp; - struct timespec64 ts; - u64 nsec_now, nsec_delta; - u64 cycles_now, cycles_delta; - int pin = event->index; - s64 ns; - unsigned long flags; - - switch (tstamp->ptp_info.pin_config[pin].func) { - case PTP_PF_EXTTS: - if (tstamp->pps_info.enabled) { - event->type = PTP_CLOCK_PPSUSR; - event->pps_times.ts_real = ns_to_timespec64(event->timestamp); - } else { - event->type = PTP_CLOCK_EXTTS; - } - ptp_clock_event(tstamp->ptp, event); - break; - case PTP_PF_PEROUT: - mlx5e_ptp_gettime(&tstamp->ptp_info, &ts); - cycles_now = mlx5_read_internal_timer(tstamp->mdev); - ts.tv_sec += 1; - ts.tv_nsec = 0; - ns = timespec64_to_ns(&ts); - write_lock_irqsave(&tstamp->lock, flags); - nsec_now = timecounter_cyc2time(&tstamp->clock, cycles_now); - nsec_delta = ns - nsec_now; - cycles_delta = div64_u64(nsec_delta << tstamp->cycles.shift, - tstamp->cycles.mult); - tstamp->pps_info.start[pin] = cycles_now + cycles_delta; - queue_work(priv->wq, &tstamp->pps_info.out_work); - write_unlock_irqrestore(&tstamp->lock, flags); - break; - default: - netdev_err(netdev, "%s: Unhandled event\n", __func__); - } -} - -void mlx5e_timestamp_init(struct mlx5e_priv *priv) -{ - struct mlx5e_tstamp *tstamp = &priv->tstamp; - u64 ns; - u64 frac = 0; - u32 dev_freq; - - mlx5e_timestamp_init_config(tstamp); - dev_freq = MLX5_CAP_GEN(priv->mdev, device_frequency_khz); - if (!dev_freq) { - mlx5_core_warn(priv->mdev, "invalid device_frequency_khz, aborting HW clock init\n"); - return; - } - rwlock_init(&tstamp->lock); - tstamp->cycles.read = mlx5e_read_internal_timer; - tstamp->cycles.shift = MLX5E_CYCLES_SHIFT; - tstamp->cycles.mult = clocksource_khz2mult(dev_freq, - tstamp->cycles.shift); - tstamp->nominal_c_mult = tstamp->cycles.mult; - tstamp->cycles.mask = CLOCKSOURCE_MASK(41); - tstamp->mdev = priv->mdev; - - timecounter_init(&tstamp->clock, &tstamp->cycles, - ktime_to_ns(ktime_get_real())); - - /* Calculate period in seconds to call the overflow watchdog - to make - * sure counter is checked at least once every wrap around. - */ - ns = cyclecounter_cyc2ns(&tstamp->cycles, tstamp->cycles.mask, - frac, &frac); - do_div(ns, NSEC_PER_SEC / 2 / HZ); - tstamp->overflow_period = ns; - - INIT_WORK(&tstamp->pps_info.out_work, mlx5e_pps_out); - INIT_DELAYED_WORK(&tstamp->overflow_work, mlx5e_timestamp_overflow); - if (tstamp->overflow_period) - queue_delayed_work(priv->wq, &tstamp->overflow_work, 0); - else - mlx5_core_warn(priv->mdev, "invalid overflow period, overflow_work is not scheduled\n"); - - /* Configure the PHC */ - tstamp->ptp_info = mlx5e_ptp_clock_info; - snprintf(tstamp->ptp_info.name, 16, "mlx5 ptp"); - - /* Initialize 1PPS data structures */ - if (MLX5_PPS_CAP(priv->mdev)) - mlx5e_get_pps_caps(priv, tstamp); - if (tstamp->ptp_info.n_pins) - mlx5e_init_pin_config(tstamp); - - tstamp->ptp = ptp_clock_register(&tstamp->ptp_info, - &priv->mdev->pdev->dev); - if (IS_ERR(tstamp->ptp)) { - mlx5_core_warn(priv->mdev, "ptp_clock_register failed %ld\n", - PTR_ERR(tstamp->ptp)); - tstamp->ptp = NULL; - } -} - -void mlx5e_timestamp_cleanup(struct mlx5e_priv *priv) -{ - struct mlx5e_tstamp *tstamp = &priv->tstamp; - - if (!MLX5_CAP_GEN(priv->mdev, device_frequency_khz)) - return; - - if (priv->tstamp.ptp) { - ptp_clock_unregister(priv->tstamp.ptp); - priv->tstamp.ptp = NULL; - } - - cancel_work_sync(&tstamp->pps_info.out_work); - cancel_delayed_work_sync(&tstamp->overflow_work); - kfree(tstamp->ptp_info.pin_config); -} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c new file mode 100644 index 000000000000..84dd63e74041 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c @@ -0,0 +1,619 @@ +/* + * Copyright (c) 2015, Mellanox Technologies. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include +#include "en.h" + +enum { + MLX5E_CYCLES_SHIFT = 23 +}; + +enum { + MLX5E_PIN_MODE_IN = 0x0, + MLX5E_PIN_MODE_OUT = 0x1, +}; + +enum { + MLX5E_OUT_PATTERN_PULSE = 0x0, + MLX5E_OUT_PATTERN_PERIODIC = 0x1, +}; + +enum { + MLX5E_EVENT_MODE_DISABLE = 0x0, + MLX5E_EVENT_MODE_REPETETIVE = 0x1, + MLX5E_EVENT_MODE_ONCE_TILL_ARM = 0x2, +}; + +enum { + MLX5E_MTPPS_FS_ENABLE = BIT(0x0), + MLX5E_MTPPS_FS_PATTERN = BIT(0x2), + MLX5E_MTPPS_FS_PIN_MODE = BIT(0x3), + MLX5E_MTPPS_FS_TIME_STAMP = BIT(0x4), + MLX5E_MTPPS_FS_OUT_PULSE_DURATION = BIT(0x5), + MLX5E_MTPPS_FS_ENH_OUT_PER_ADJ = BIT(0x7), +}; + +void mlx5e_fill_hwstamp(struct mlx5e_tstamp *tstamp, u64 timestamp, + struct skb_shared_hwtstamps *hwts) +{ + u64 nsec; + + read_lock(&tstamp->lock); + nsec = timecounter_cyc2time(&tstamp->clock, timestamp); + read_unlock(&tstamp->lock); + + hwts->hwtstamp = ns_to_ktime(nsec); +} + +static u64 mlx5e_read_internal_timer(const struct cyclecounter *cc) +{ + struct mlx5e_tstamp *tstamp = container_of(cc, struct mlx5e_tstamp, + cycles); + + return mlx5_read_internal_timer(tstamp->mdev) & cc->mask; +} + +static void mlx5e_pps_out(struct work_struct *work) +{ + struct mlx5e_pps *pps_info = container_of(work, struct mlx5e_pps, + out_work); + struct mlx5e_tstamp *tstamp = container_of(pps_info, struct mlx5e_tstamp, + pps_info); + u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {0}; + unsigned long flags; + int i; + + for (i = 0; i < tstamp->ptp_info.n_pins; i++) { + u64 tstart; + + write_lock_irqsave(&tstamp->lock, flags); + tstart = tstamp->pps_info.start[i]; + tstamp->pps_info.start[i] = 0; + write_unlock_irqrestore(&tstamp->lock, flags); + if (!tstart) + continue; + + MLX5_SET(mtpps_reg, in, pin, i); + MLX5_SET64(mtpps_reg, in, time_stamp, tstart); + MLX5_SET(mtpps_reg, in, field_select, MLX5E_MTPPS_FS_TIME_STAMP); + mlx5_set_mtpps(tstamp->mdev, in, sizeof(in)); + } +} + +static void mlx5e_timestamp_overflow(struct work_struct *work) +{ + struct delayed_work *dwork = to_delayed_work(work); + struct mlx5e_tstamp *tstamp = container_of(dwork, struct mlx5e_tstamp, + overflow_work); + struct mlx5e_priv *priv = container_of(tstamp, struct mlx5e_priv, tstamp); + unsigned long flags; + + write_lock_irqsave(&tstamp->lock, flags); + timecounter_read(&tstamp->clock); + write_unlock_irqrestore(&tstamp->lock, flags); + queue_delayed_work(priv->wq, &tstamp->overflow_work, + msecs_to_jiffies(tstamp->overflow_period * 1000)); +} + +int mlx5e_hwstamp_set(struct mlx5e_priv *priv, struct ifreq *ifr) +{ + struct hwtstamp_config config; + int err; + + if (!MLX5_CAP_GEN(priv->mdev, device_frequency_khz)) + return -EOPNOTSUPP; + + if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) + return -EFAULT; + + /* TX HW timestamp */ + switch (config.tx_type) { + case HWTSTAMP_TX_OFF: + case HWTSTAMP_TX_ON: + break; + default: + return -ERANGE; + } + + mutex_lock(&priv->state_lock); + /* RX HW timestamp */ + switch (config.rx_filter) { + case HWTSTAMP_FILTER_NONE: + /* Reset CQE compression to Admin default */ + mlx5e_modify_rx_cqe_compression_locked(priv, priv->channels.params.rx_cqe_compress_def); + break; + case HWTSTAMP_FILTER_ALL: + case HWTSTAMP_FILTER_SOME: + case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: + case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: + case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: + case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: + case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: + case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: + case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: + case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: + case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: + case HWTSTAMP_FILTER_PTP_V2_EVENT: + case HWTSTAMP_FILTER_PTP_V2_SYNC: + case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: + case HWTSTAMP_FILTER_NTP_ALL: + /* Disable CQE compression */ + netdev_warn(priv->netdev, "Disabling cqe compression"); + err = mlx5e_modify_rx_cqe_compression_locked(priv, false); + if (err) { + netdev_err(priv->netdev, "Failed disabling cqe compression err=%d\n", err); + mutex_unlock(&priv->state_lock); + return err; + } + config.rx_filter = HWTSTAMP_FILTER_ALL; + break; + default: + mutex_unlock(&priv->state_lock); + return -ERANGE; + } + + memcpy(&priv->tstamp.hwtstamp_config, &config, sizeof(config)); + mutex_unlock(&priv->state_lock); + + return copy_to_user(ifr->ifr_data, &config, + sizeof(config)) ? -EFAULT : 0; +} + +int mlx5e_hwstamp_get(struct mlx5e_priv *priv, struct ifreq *ifr) +{ + struct hwtstamp_config *cfg = &priv->tstamp.hwtstamp_config; + + if (!MLX5_CAP_GEN(priv->mdev, device_frequency_khz)) + return -EOPNOTSUPP; + + return copy_to_user(ifr->ifr_data, cfg, sizeof(*cfg)) ? -EFAULT : 0; +} + +static int mlx5e_ptp_settime(struct ptp_clock_info *ptp, + const struct timespec64 *ts) +{ + struct mlx5e_tstamp *tstamp = container_of(ptp, struct mlx5e_tstamp, + ptp_info); + u64 ns = timespec64_to_ns(ts); + unsigned long flags; + + write_lock_irqsave(&tstamp->lock, flags); + timecounter_init(&tstamp->clock, &tstamp->cycles, ns); + write_unlock_irqrestore(&tstamp->lock, flags); + + return 0; +} + +static int mlx5e_ptp_gettime(struct ptp_clock_info *ptp, + struct timespec64 *ts) +{ + struct mlx5e_tstamp *tstamp = container_of(ptp, struct mlx5e_tstamp, + ptp_info); + u64 ns; + unsigned long flags; + + write_lock_irqsave(&tstamp->lock, flags); + ns = timecounter_read(&tstamp->clock); + write_unlock_irqrestore(&tstamp->lock, flags); + + *ts = ns_to_timespec64(ns); + + return 0; +} + +static int mlx5e_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta) +{ + struct mlx5e_tstamp *tstamp = container_of(ptp, struct mlx5e_tstamp, + ptp_info); + unsigned long flags; + + write_lock_irqsave(&tstamp->lock, flags); + timecounter_adjtime(&tstamp->clock, delta); + write_unlock_irqrestore(&tstamp->lock, flags); + + return 0; +} + +static int mlx5e_ptp_adjfreq(struct ptp_clock_info *ptp, s32 delta) +{ + u64 adj; + u32 diff; + unsigned long flags; + int neg_adj = 0; + struct mlx5e_tstamp *tstamp = container_of(ptp, struct mlx5e_tstamp, + ptp_info); + + if (delta < 0) { + neg_adj = 1; + delta = -delta; + } + + adj = tstamp->nominal_c_mult; + adj *= delta; + diff = div_u64(adj, 1000000000ULL); + + write_lock_irqsave(&tstamp->lock, flags); + timecounter_read(&tstamp->clock); + tstamp->cycles.mult = neg_adj ? tstamp->nominal_c_mult - diff : + tstamp->nominal_c_mult + diff; + write_unlock_irqrestore(&tstamp->lock, flags); + + return 0; +} + +static int mlx5e_extts_configure(struct ptp_clock_info *ptp, + struct ptp_clock_request *rq, + int on) +{ + struct mlx5e_tstamp *tstamp = + container_of(ptp, struct mlx5e_tstamp, ptp_info); + struct mlx5e_priv *priv = + container_of(tstamp, struct mlx5e_priv, tstamp); + u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {0}; + u32 field_select = 0; + u8 pin_mode = 0; + u8 pattern = 0; + int pin = -1; + int err = 0; + + if (!MLX5_PPS_CAP(priv->mdev)) + return -EOPNOTSUPP; + + if (rq->extts.index >= tstamp->ptp_info.n_pins) + return -EINVAL; + + if (on) { + pin = ptp_find_pin(tstamp->ptp, PTP_PF_EXTTS, rq->extts.index); + if (pin < 0) + return -EBUSY; + pin_mode = MLX5E_PIN_MODE_IN; + pattern = !!(rq->extts.flags & PTP_FALLING_EDGE); + field_select = MLX5E_MTPPS_FS_PIN_MODE | + MLX5E_MTPPS_FS_PATTERN | + MLX5E_MTPPS_FS_ENABLE; + } else { + pin = rq->extts.index; + field_select = MLX5E_MTPPS_FS_ENABLE; + } + + MLX5_SET(mtpps_reg, in, pin, pin); + MLX5_SET(mtpps_reg, in, pin_mode, pin_mode); + MLX5_SET(mtpps_reg, in, pattern, pattern); + MLX5_SET(mtpps_reg, in, enable, on); + MLX5_SET(mtpps_reg, in, field_select, field_select); + + err = mlx5_set_mtpps(priv->mdev, in, sizeof(in)); + if (err) + return err; + + return mlx5_set_mtppse(priv->mdev, pin, 0, + MLX5E_EVENT_MODE_REPETETIVE & on); +} + +static int mlx5e_perout_configure(struct ptp_clock_info *ptp, + struct ptp_clock_request *rq, + int on) +{ + struct mlx5e_tstamp *tstamp = + container_of(ptp, struct mlx5e_tstamp, ptp_info); + struct mlx5e_priv *priv = + container_of(tstamp, struct mlx5e_priv, tstamp); + u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {0}; + u64 nsec_now, nsec_delta, time_stamp = 0; + u64 cycles_now, cycles_delta; + struct timespec64 ts; + unsigned long flags; + u32 field_select = 0; + u8 pin_mode = 0; + u8 pattern = 0; + int pin = -1; + int err = 0; + s64 ns; + + if (!MLX5_PPS_CAP(priv->mdev)) + return -EOPNOTSUPP; + + if (rq->perout.index >= tstamp->ptp_info.n_pins) + return -EINVAL; + + if (on) { + pin = ptp_find_pin(tstamp->ptp, PTP_PF_PEROUT, + rq->perout.index); + if (pin < 0) + return -EBUSY; + + pin_mode = MLX5E_PIN_MODE_OUT; + pattern = MLX5E_OUT_PATTERN_PERIODIC; + ts.tv_sec = rq->perout.period.sec; + ts.tv_nsec = rq->perout.period.nsec; + ns = timespec64_to_ns(&ts); + + if ((ns >> 1) != 500000000LL) + return -EINVAL; + + ts.tv_sec = rq->perout.start.sec; + ts.tv_nsec = rq->perout.start.nsec; + ns = timespec64_to_ns(&ts); + cycles_now = mlx5_read_internal_timer(tstamp->mdev); + write_lock_irqsave(&tstamp->lock, flags); + nsec_now = timecounter_cyc2time(&tstamp->clock, cycles_now); + nsec_delta = ns - nsec_now; + cycles_delta = div64_u64(nsec_delta << tstamp->cycles.shift, + tstamp->cycles.mult); + write_unlock_irqrestore(&tstamp->lock, flags); + time_stamp = cycles_now + cycles_delta; + field_select = MLX5E_MTPPS_FS_PIN_MODE | + MLX5E_MTPPS_FS_PATTERN | + MLX5E_MTPPS_FS_ENABLE | + MLX5E_MTPPS_FS_TIME_STAMP; + } else { + pin = rq->perout.index; + field_select = MLX5E_MTPPS_FS_ENABLE; + } + + MLX5_SET(mtpps_reg, in, pin, pin); + MLX5_SET(mtpps_reg, in, pin_mode, pin_mode); + MLX5_SET(mtpps_reg, in, pattern, pattern); + MLX5_SET(mtpps_reg, in, enable, on); + MLX5_SET64(mtpps_reg, in, time_stamp, time_stamp); + MLX5_SET(mtpps_reg, in, field_select, field_select); + + err = mlx5_set_mtpps(priv->mdev, in, sizeof(in)); + if (err) + return err; + + return mlx5_set_mtppse(priv->mdev, pin, 0, + MLX5E_EVENT_MODE_REPETETIVE & on); +} + +static int mlx5e_pps_configure(struct ptp_clock_info *ptp, + struct ptp_clock_request *rq, + int on) +{ + struct mlx5e_tstamp *tstamp = + container_of(ptp, struct mlx5e_tstamp, ptp_info); + + tstamp->pps_info.enabled = !!on; + return 0; +} + +static int mlx5e_ptp_enable(struct ptp_clock_info *ptp, + struct ptp_clock_request *rq, + int on) +{ + switch (rq->type) { + case PTP_CLK_REQ_EXTTS: + return mlx5e_extts_configure(ptp, rq, on); + case PTP_CLK_REQ_PEROUT: + return mlx5e_perout_configure(ptp, rq, on); + case PTP_CLK_REQ_PPS: + return mlx5e_pps_configure(ptp, rq, on); + default: + return -EOPNOTSUPP; + } + return 0; +} + +static int mlx5e_ptp_verify(struct ptp_clock_info *ptp, unsigned int pin, + enum ptp_pin_function func, unsigned int chan) +{ + return (func == PTP_PF_PHYSYNC) ? -EOPNOTSUPP : 0; +} + +static const struct ptp_clock_info mlx5e_ptp_clock_info = { + .owner = THIS_MODULE, + .max_adj = 100000000, + .n_alarm = 0, + .n_ext_ts = 0, + .n_per_out = 0, + .n_pins = 0, + .pps = 0, + .adjfreq = mlx5e_ptp_adjfreq, + .adjtime = mlx5e_ptp_adjtime, + .gettime64 = mlx5e_ptp_gettime, + .settime64 = mlx5e_ptp_settime, + .enable = NULL, + .verify = NULL, +}; + +static void mlx5e_timestamp_init_config(struct mlx5e_tstamp *tstamp) +{ + tstamp->hwtstamp_config.tx_type = HWTSTAMP_TX_OFF; + tstamp->hwtstamp_config.rx_filter = HWTSTAMP_FILTER_NONE; +} + +static int mlx5e_init_pin_config(struct mlx5e_tstamp *tstamp) +{ + int i; + + tstamp->ptp_info.pin_config = + kzalloc(sizeof(*tstamp->ptp_info.pin_config) * + tstamp->ptp_info.n_pins, GFP_KERNEL); + if (!tstamp->ptp_info.pin_config) + return -ENOMEM; + tstamp->ptp_info.enable = mlx5e_ptp_enable; + tstamp->ptp_info.verify = mlx5e_ptp_verify; + tstamp->ptp_info.pps = 1; + + for (i = 0; i < tstamp->ptp_info.n_pins; i++) { + snprintf(tstamp->ptp_info.pin_config[i].name, + sizeof(tstamp->ptp_info.pin_config[i].name), + "mlx5_pps%d", i); + tstamp->ptp_info.pin_config[i].index = i; + tstamp->ptp_info.pin_config[i].func = PTP_PF_NONE; + tstamp->ptp_info.pin_config[i].chan = i; + } + + return 0; +} + +static void mlx5e_get_pps_caps(struct mlx5e_priv *priv, + struct mlx5e_tstamp *tstamp) +{ + u32 out[MLX5_ST_SZ_DW(mtpps_reg)] = {0}; + + mlx5_query_mtpps(priv->mdev, out, sizeof(out)); + + tstamp->ptp_info.n_pins = MLX5_GET(mtpps_reg, out, + cap_number_of_pps_pins); + tstamp->ptp_info.n_ext_ts = MLX5_GET(mtpps_reg, out, + cap_max_num_of_pps_in_pins); + tstamp->ptp_info.n_per_out = MLX5_GET(mtpps_reg, out, + cap_max_num_of_pps_out_pins); + + tstamp->pps_info.pin_caps[0] = MLX5_GET(mtpps_reg, out, cap_pin_0_mode); + tstamp->pps_info.pin_caps[1] = MLX5_GET(mtpps_reg, out, cap_pin_1_mode); + tstamp->pps_info.pin_caps[2] = MLX5_GET(mtpps_reg, out, cap_pin_2_mode); + tstamp->pps_info.pin_caps[3] = MLX5_GET(mtpps_reg, out, cap_pin_3_mode); + tstamp->pps_info.pin_caps[4] = MLX5_GET(mtpps_reg, out, cap_pin_4_mode); + tstamp->pps_info.pin_caps[5] = MLX5_GET(mtpps_reg, out, cap_pin_5_mode); + tstamp->pps_info.pin_caps[6] = MLX5_GET(mtpps_reg, out, cap_pin_6_mode); + tstamp->pps_info.pin_caps[7] = MLX5_GET(mtpps_reg, out, cap_pin_7_mode); +} + +void mlx5e_pps_event_handler(struct mlx5e_priv *priv, + struct ptp_clock_event *event) +{ + struct net_device *netdev = priv->netdev; + struct mlx5e_tstamp *tstamp = &priv->tstamp; + struct timespec64 ts; + u64 nsec_now, nsec_delta; + u64 cycles_now, cycles_delta; + int pin = event->index; + s64 ns; + unsigned long flags; + + switch (tstamp->ptp_info.pin_config[pin].func) { + case PTP_PF_EXTTS: + if (tstamp->pps_info.enabled) { + event->type = PTP_CLOCK_PPSUSR; + event->pps_times.ts_real = ns_to_timespec64(event->timestamp); + } else { + event->type = PTP_CLOCK_EXTTS; + } + ptp_clock_event(tstamp->ptp, event); + break; + case PTP_PF_PEROUT: + mlx5e_ptp_gettime(&tstamp->ptp_info, &ts); + cycles_now = mlx5_read_internal_timer(tstamp->mdev); + ts.tv_sec += 1; + ts.tv_nsec = 0; + ns = timespec64_to_ns(&ts); + write_lock_irqsave(&tstamp->lock, flags); + nsec_now = timecounter_cyc2time(&tstamp->clock, cycles_now); + nsec_delta = ns - nsec_now; + cycles_delta = div64_u64(nsec_delta << tstamp->cycles.shift, + tstamp->cycles.mult); + tstamp->pps_info.start[pin] = cycles_now + cycles_delta; + queue_work(priv->wq, &tstamp->pps_info.out_work); + write_unlock_irqrestore(&tstamp->lock, flags); + break; + default: + netdev_err(netdev, "%s: Unhandled event\n", __func__); + } +} + +void mlx5e_timestamp_init(struct mlx5e_priv *priv) +{ + struct mlx5e_tstamp *tstamp = &priv->tstamp; + u64 ns; + u64 frac = 0; + u32 dev_freq; + + mlx5e_timestamp_init_config(tstamp); + dev_freq = MLX5_CAP_GEN(priv->mdev, device_frequency_khz); + if (!dev_freq) { + mlx5_core_warn(priv->mdev, "invalid device_frequency_khz, aborting HW clock init\n"); + return; + } + rwlock_init(&tstamp->lock); + tstamp->cycles.read = mlx5e_read_internal_timer; + tstamp->cycles.shift = MLX5E_CYCLES_SHIFT; + tstamp->cycles.mult = clocksource_khz2mult(dev_freq, + tstamp->cycles.shift); + tstamp->nominal_c_mult = tstamp->cycles.mult; + tstamp->cycles.mask = CLOCKSOURCE_MASK(41); + tstamp->mdev = priv->mdev; + + timecounter_init(&tstamp->clock, &tstamp->cycles, + ktime_to_ns(ktime_get_real())); + + /* Calculate period in seconds to call the overflow watchdog - to make + * sure counter is checked at least once every wrap around. + */ + ns = cyclecounter_cyc2ns(&tstamp->cycles, tstamp->cycles.mask, + frac, &frac); + do_div(ns, NSEC_PER_SEC / 2 / HZ); + tstamp->overflow_period = ns; + + INIT_WORK(&tstamp->pps_info.out_work, mlx5e_pps_out); + INIT_DELAYED_WORK(&tstamp->overflow_work, mlx5e_timestamp_overflow); + if (tstamp->overflow_period) + queue_delayed_work(priv->wq, &tstamp->overflow_work, 0); + else + mlx5_core_warn(priv->mdev, "invalid overflow period, overflow_work is not scheduled\n"); + + /* Configure the PHC */ + tstamp->ptp_info = mlx5e_ptp_clock_info; + snprintf(tstamp->ptp_info.name, 16, "mlx5 ptp"); + + /* Initialize 1PPS data structures */ + if (MLX5_PPS_CAP(priv->mdev)) + mlx5e_get_pps_caps(priv, tstamp); + if (tstamp->ptp_info.n_pins) + mlx5e_init_pin_config(tstamp); + + tstamp->ptp = ptp_clock_register(&tstamp->ptp_info, + &priv->mdev->pdev->dev); + if (IS_ERR(tstamp->ptp)) { + mlx5_core_warn(priv->mdev, "ptp_clock_register failed %ld\n", + PTR_ERR(tstamp->ptp)); + tstamp->ptp = NULL; + } +} + +void mlx5e_timestamp_cleanup(struct mlx5e_priv *priv) +{ + struct mlx5e_tstamp *tstamp = &priv->tstamp; + + if (!MLX5_CAP_GEN(priv->mdev, device_frequency_khz)) + return; + + if (priv->tstamp.ptp) { + ptp_clock_unregister(priv->tstamp.ptp); + priv->tstamp.ptp = NULL; + } + + cancel_work_sync(&tstamp->pps_info.out_work); + cancel_delayed_work_sync(&tstamp->overflow_work); + kfree(tstamp->ptp_info.pin_config); +} -- cgit v1.2.3 From 7c39afb394c79e72c3795b4a42d55155b34ee073 Mon Sep 17 00:00:00 2001 From: Feras Daoud Date: Tue, 15 Aug 2017 13:46:04 +0300 Subject: net/mlx5: PTP code migration to driver core section PTP code is moved to core section of mlx5 driver in order to share it between ethernet and infiniband. This movement involves the following changes: - Change mlx5e_ prefix to be mlx5_ - Add clock structs to Core - Add clock object to mlx5_core_dev - Call Init/Uninit clock from core init/cleanup - Rename mlx5e_tstamp to be mlx5_clock Signed-off-by: Feras Daoud Signed-off-by: Eitan Rabin Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/en.h | 39 +- .../net/ethernet/mellanox/mlx5/core/en_ethtool.c | 7 +- drivers/net/ethernet/mellanox/mlx5/core/en_main.c | 95 +++- drivers/net/ethernet/mellanox/mlx5/core/en_rx.c | 17 +- drivers/net/ethernet/mellanox/mlx5/core/en_tx.c | 6 +- drivers/net/ethernet/mellanox/mlx5/core/eq.c | 3 +- .../net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c | 3 +- .../net/ethernet/mellanox/mlx5/core/lib/clock.c | 548 +++++++++------------ .../net/ethernet/mellanox/mlx5/core/lib/clock.h | 51 ++ drivers/net/ethernet/mellanox/mlx5/core/main.c | 4 + .../net/ethernet/mellanox/mlx5/core/mlx5_core.h | 1 + include/linux/mlx5/driver.h | 24 + 12 files changed, 416 insertions(+), 382 deletions(-) create mode 100644 drivers/net/ethernet/mellanox/mlx5/core/lib/clock.h (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index cc13d3dbd366..2059122eb089 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -267,28 +267,6 @@ struct mlx5e_dcbx { }; #endif -#define MAX_PIN_NUM 8 -struct mlx5e_pps { - u8 pin_caps[MAX_PIN_NUM]; - struct work_struct out_work; - u64 start[MAX_PIN_NUM]; - u8 enabled; -}; - -struct mlx5e_tstamp { - rwlock_t lock; - struct cyclecounter cycles; - struct timecounter clock; - struct hwtstamp_config hwtstamp_config; - u32 nominal_c_mult; - unsigned long overflow_period; - struct delayed_work overflow_work; - struct mlx5_core_dev *mdev; - struct ptp_clock *ptp; - struct ptp_clock_info ptp_info; - struct mlx5e_pps pps_info; -}; - enum { MLX5E_RQ_STATE_ENABLED, MLX5E_RQ_STATE_AM, @@ -375,9 +353,10 @@ struct mlx5e_txqsq { u8 min_inline_mode; u16 edge; struct device *pdev; - struct mlx5e_tstamp *tstamp; __be32 mkey_be; unsigned long state; + struct hwtstamp_config *tstamp; + struct mlx5_clock *clock; /* control path */ struct mlx5_wq_ctrl wq_ctrl; @@ -543,10 +522,11 @@ struct mlx5e_rq { struct mlx5e_channel *channel; struct device *pdev; struct net_device *netdev; - struct mlx5e_tstamp *tstamp; struct mlx5e_rq_stats stats; struct mlx5e_cq cq; struct mlx5e_page_cache page_cache; + struct hwtstamp_config *tstamp; + struct mlx5_clock *clock; mlx5e_fp_handle_rx_cqe handle_rx_cqe; mlx5e_fp_post_rx_wqes post_wqes; @@ -588,7 +568,7 @@ struct mlx5e_channel { /* control */ struct mlx5e_priv *priv; struct mlx5_core_dev *mdev; - struct mlx5e_tstamp *tstamp; + struct hwtstamp_config *tstamp; int ix; }; @@ -789,7 +769,7 @@ struct mlx5e_priv { struct mlx5_core_dev *mdev; struct net_device *netdev; struct mlx5e_stats stats; - struct mlx5e_tstamp tstamp; + struct hwtstamp_config tstamp; u16 q_counter; #ifdef CONFIG_MLX5_CORE_EN_DCB struct mlx5e_dcbx dcbx; @@ -873,12 +853,6 @@ void mlx5e_ethtool_init_steering(struct mlx5e_priv *priv); void mlx5e_ethtool_cleanup_steering(struct mlx5e_priv *priv); void mlx5e_set_rx_mode_work(struct work_struct *work); -void mlx5e_fill_hwstamp(struct mlx5e_tstamp *clock, u64 timestamp, - struct skb_shared_hwtstamps *hwts); -void mlx5e_timestamp_init(struct mlx5e_priv *priv); -void mlx5e_timestamp_cleanup(struct mlx5e_priv *priv); -void mlx5e_pps_event_handler(struct mlx5e_priv *priv, - struct ptp_clock_event *event); int mlx5e_hwstamp_set(struct mlx5e_priv *priv, struct ifreq *ifr); int mlx5e_hwstamp_get(struct mlx5e_priv *priv, struct ifreq *ifr); int mlx5e_modify_rx_cqe_compression_locked(struct mlx5e_priv *priv, bool val); @@ -889,6 +863,7 @@ int mlx5e_vlan_rx_kill_vid(struct net_device *dev, __always_unused __be16 proto, u16 vid); void mlx5e_enable_vlan_filter(struct mlx5e_priv *priv); void mlx5e_disable_vlan_filter(struct mlx5e_priv *priv); +void mlx5e_timestamp_set(struct mlx5e_priv *priv); struct mlx5e_redirect_rqt_param { bool is_rss; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c index d12e9fc0d76b..81a112e40fe3 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c @@ -1417,14 +1417,15 @@ static int mlx5e_set_pauseparam(struct net_device *netdev, int mlx5e_ethtool_get_ts_info(struct mlx5e_priv *priv, struct ethtool_ts_info *info) { + struct mlx5_core_dev *mdev = priv->mdev; int ret; ret = ethtool_op_get_ts_info(priv->netdev, info); if (ret) return ret; - info->phc_index = priv->tstamp.ptp ? - ptp_clock_index(priv->tstamp.ptp) : -1; + info->phc_index = mdev->clock.ptp ? + ptp_clock_index(mdev->clock.ptp) : -1; if (!MLX5_CAP_GEN(priv->mdev, device_frequency_khz)) return 0; @@ -1754,7 +1755,7 @@ static int set_pflag_rx_cqe_compress(struct net_device *netdev, if (!MLX5_CAP_GEN(mdev, cqe_compression)) return -EOPNOTSUPP; - if (enable && priv->tstamp.hwtstamp_config.rx_filter != HWTSTAMP_FILTER_NONE) { + if (enable && priv->tstamp.rx_filter != HWTSTAMP_FILTER_NONE) { netdev_err(netdev, "Can't enable cqe compression while timestamping is enabled.\n"); return -EINVAL; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index cc11bbbd0309..6df00dd9745a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -373,8 +373,6 @@ static void mlx5e_async_event(struct mlx5_core_dev *mdev, void *vpriv, enum mlx5_dev_event event, unsigned long param) { struct mlx5e_priv *priv = vpriv; - struct ptp_clock_event ptp_event; - struct mlx5_eqe *eqe = NULL; if (!test_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLED, &priv->state)) return; @@ -384,14 +382,6 @@ static void mlx5e_async_event(struct mlx5_core_dev *mdev, void *vpriv, case MLX5_DEV_EVENT_PORT_DOWN: queue_work(priv->wq, &priv->update_carrier_work); break; - case MLX5_DEV_EVENT_PPS: - eqe = (struct mlx5_eqe *)param; - ptp_event.index = eqe->data.pps.pin; - ptp_event.timestamp = - timecounter_cyc2time(&priv->tstamp.clock, - be64_to_cpu(eqe->data.pps.time_stamp)); - mlx5e_pps_event_handler(vpriv, &ptp_event); - break; default: break; } @@ -585,6 +575,7 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c, rq->pdev = c->pdev; rq->netdev = c->netdev; rq->tstamp = c->tstamp; + rq->clock = &mdev->clock; rq->channel = c; rq->ix = c->ix; rq->mdev = mdev; @@ -1123,6 +1114,7 @@ static int mlx5e_alloc_txqsq(struct mlx5e_channel *c, sq->pdev = c->pdev; sq->tstamp = c->tstamp; + sq->clock = &mdev->clock; sq->mkey_be = c->mkey_be; sq->channel = c; sq->txq_ix = txq_ix; @@ -2678,6 +2670,12 @@ void mlx5e_switch_priv_channels(struct mlx5e_priv *priv, netif_carrier_on(netdev); } +void mlx5e_timestamp_set(struct mlx5e_priv *priv) +{ + priv->tstamp.tx_type = HWTSTAMP_TX_OFF; + priv->tstamp.rx_filter = HWTSTAMP_FILTER_NONE; +} + int mlx5e_open_locked(struct net_device *netdev) { struct mlx5e_priv *priv = netdev_priv(netdev); @@ -2693,7 +2691,7 @@ int mlx5e_open_locked(struct net_device *netdev) mlx5e_activate_priv_channels(priv); if (priv->profile->update_carrier) priv->profile->update_carrier(priv); - mlx5e_timestamp_init(priv); + mlx5e_timestamp_set(priv); if (priv->profile->update_stats) queue_delayed_work(priv->wq, &priv->update_stats_work, 0); @@ -2731,7 +2729,6 @@ int mlx5e_close_locked(struct net_device *netdev) clear_bit(MLX5E_STATE_OPENED, &priv->state); - mlx5e_timestamp_cleanup(priv); netif_carrier_off(priv->netdev); mlx5e_deactivate_priv_channels(priv); mlx5e_close_channels(&priv->channels); @@ -3403,6 +3400,80 @@ out: return err; } +int mlx5e_hwstamp_set(struct mlx5e_priv *priv, struct ifreq *ifr) +{ + struct hwtstamp_config config; + int err; + + if (!MLX5_CAP_GEN(priv->mdev, device_frequency_khz)) + return -EOPNOTSUPP; + + if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) + return -EFAULT; + + /* TX HW timestamp */ + switch (config.tx_type) { + case HWTSTAMP_TX_OFF: + case HWTSTAMP_TX_ON: + break; + default: + return -ERANGE; + } + + mutex_lock(&priv->state_lock); + /* RX HW timestamp */ + switch (config.rx_filter) { + case HWTSTAMP_FILTER_NONE: + /* Reset CQE compression to Admin default */ + mlx5e_modify_rx_cqe_compression_locked(priv, priv->channels.params.rx_cqe_compress_def); + break; + case HWTSTAMP_FILTER_ALL: + case HWTSTAMP_FILTER_SOME: + case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: + case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: + case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: + case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: + case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: + case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: + case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: + case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: + case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: + case HWTSTAMP_FILTER_PTP_V2_EVENT: + case HWTSTAMP_FILTER_PTP_V2_SYNC: + case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: + case HWTSTAMP_FILTER_NTP_ALL: + /* Disable CQE compression */ + netdev_warn(priv->netdev, "Disabling cqe compression"); + err = mlx5e_modify_rx_cqe_compression_locked(priv, false); + if (err) { + netdev_err(priv->netdev, "Failed disabling cqe compression err=%d\n", err); + mutex_unlock(&priv->state_lock); + return err; + } + config.rx_filter = HWTSTAMP_FILTER_ALL; + break; + default: + mutex_unlock(&priv->state_lock); + return -ERANGE; + } + + memcpy(&priv->tstamp, &config, sizeof(config)); + mutex_unlock(&priv->state_lock); + + return copy_to_user(ifr->ifr_data, &config, + sizeof(config)) ? -EFAULT : 0; +} + +int mlx5e_hwstamp_get(struct mlx5e_priv *priv, struct ifreq *ifr) +{ + struct hwtstamp_config *cfg = &priv->tstamp; + + if (!MLX5_CAP_GEN(priv->mdev, device_frequency_khz)) + return -EOPNOTSUPP; + + return copy_to_user(ifr->ifr_data, cfg, sizeof(*cfg)) ? -EFAULT : 0; +} + static int mlx5e_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) { struct mlx5e_priv *priv = netdev_priv(dev); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c index 15a1687483cc..7e3bfe62ef6e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c @@ -42,10 +42,11 @@ #include "en_rep.h" #include "ipoib/ipoib.h" #include "en_accel/ipsec_rxtx.h" +#include "lib/clock.h" -static inline bool mlx5e_rx_hw_stamp(struct mlx5e_tstamp *tstamp) +static inline bool mlx5e_rx_hw_stamp(struct hwtstamp_config *config) { - return tstamp->hwtstamp_config.rx_filter == HWTSTAMP_FILTER_ALL; + return config->rx_filter == HWTSTAMP_FILTER_ALL; } static inline void mlx5e_read_cqe_slot(struct mlx5e_cq *cq, u32 cqcc, @@ -661,7 +662,6 @@ static inline void mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe, struct sk_buff *skb) { struct net_device *netdev = rq->netdev; - struct mlx5e_tstamp *tstamp = rq->tstamp; int lro_num_seg; lro_num_seg = be32_to_cpu(cqe->srqn) >> 24; @@ -676,8 +676,9 @@ static inline void mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe, rq->stats.lro_bytes += cqe_bcnt; } - if (unlikely(mlx5e_rx_hw_stamp(tstamp))) - mlx5e_fill_hwstamp(tstamp, get_cqe_ts(cqe), skb_hwtstamps(skb)); + if (unlikely(mlx5e_rx_hw_stamp(rq->tstamp))) + skb_hwtstamps(skb)->hwtstamp = + mlx5_timecounter_cyc2time(rq->clock, get_cqe_ts(cqe)); skb_record_rx_queue(skb, rq->ix); @@ -1163,7 +1164,6 @@ static inline void mlx5i_complete_rx_cqe(struct mlx5e_rq *rq, struct sk_buff *skb) { struct net_device *netdev = rq->netdev; - struct mlx5e_tstamp *tstamp = rq->tstamp; char *pseudo_header; u8 *dgid; u8 g; @@ -1188,8 +1188,9 @@ static inline void mlx5i_complete_rx_cqe(struct mlx5e_rq *rq, skb->ip_summed = CHECKSUM_COMPLETE; skb->csum = csum_unfold((__force __sum16)cqe->check_sum); - if (unlikely(mlx5e_rx_hw_stamp(tstamp))) - mlx5e_fill_hwstamp(tstamp, get_cqe_ts(cqe), skb_hwtstamps(skb)); + if (unlikely(mlx5e_rx_hw_stamp(rq->tstamp))) + skb_hwtstamps(skb)->hwtstamp = + mlx5_timecounter_cyc2time(rq->clock, get_cqe_ts(cqe)); skb_record_rx_queue(skb, rq->ix); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c index 1d6925d4369a..a7c208a1ad83 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c @@ -35,6 +35,7 @@ #include "en.h" #include "ipoib/ipoib.h" #include "en_accel/ipsec_rxtx.h" +#include "lib/clock.h" #define MLX5E_SQ_NOPS_ROOM MLX5_SEND_WQE_MAX_WQEBBS #define MLX5E_SQ_STOP_ROOM (MLX5_SEND_WQE_MAX_WQEBBS +\ @@ -452,8 +453,9 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget) SKBTX_HW_TSTAMP)) { struct skb_shared_hwtstamps hwts = {}; - mlx5e_fill_hwstamp(sq->tstamp, - get_cqe_ts(cqe), &hwts); + hwts.hwtstamp = + mlx5_timecounter_cyc2time(sq->clock, + get_cqe_ts(cqe)); skb_tstamp_tx(skb, &hwts); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c index fc606bfd1d6e..60771865c99c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c @@ -491,8 +491,7 @@ static irqreturn_t mlx5_eq_int(int irq, void *eq_ptr) break; case MLX5_EVENT_TYPE_PPS_EVENT: - if (dev->event) - dev->event(dev, MLX5_DEV_EVENT_PPS, (unsigned long)eqe); + mlx5_pps_event(dev, eqe); break; case MLX5_EVENT_TYPE_FPGA_ERROR: diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c index 145e392ab849..14dfb577691b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c @@ -404,7 +404,7 @@ static int mlx5i_open(struct net_device *netdev) mlx5e_refresh_tirs(priv, false); mlx5e_activate_priv_channels(priv); - mlx5e_timestamp_init(priv); + mlx5e_timestamp_set(priv); mutex_unlock(&priv->state_lock); return 0; @@ -429,7 +429,6 @@ static int mlx5i_close(struct net_device *netdev) clear_bit(MLX5E_STATE_OPENED, &priv->state); - mlx5e_timestamp_cleanup(priv); netif_carrier_off(priv->netdev); mlx5e_deactivate_priv_channels(priv); mlx5e_close_channels(&priv->channels); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c index 84dd63e74041..fa8aed62b231 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c @@ -34,250 +34,164 @@ #include "en.h" enum { - MLX5E_CYCLES_SHIFT = 23 + MLX5_CYCLES_SHIFT = 23 }; enum { - MLX5E_PIN_MODE_IN = 0x0, - MLX5E_PIN_MODE_OUT = 0x1, + MLX5_PIN_MODE_IN = 0x0, + MLX5_PIN_MODE_OUT = 0x1, }; enum { - MLX5E_OUT_PATTERN_PULSE = 0x0, - MLX5E_OUT_PATTERN_PERIODIC = 0x1, + MLX5_OUT_PATTERN_PULSE = 0x0, + MLX5_OUT_PATTERN_PERIODIC = 0x1, }; enum { - MLX5E_EVENT_MODE_DISABLE = 0x0, - MLX5E_EVENT_MODE_REPETETIVE = 0x1, - MLX5E_EVENT_MODE_ONCE_TILL_ARM = 0x2, + MLX5_EVENT_MODE_DISABLE = 0x0, + MLX5_EVENT_MODE_REPETETIVE = 0x1, + MLX5_EVENT_MODE_ONCE_TILL_ARM = 0x2, }; enum { - MLX5E_MTPPS_FS_ENABLE = BIT(0x0), - MLX5E_MTPPS_FS_PATTERN = BIT(0x2), - MLX5E_MTPPS_FS_PIN_MODE = BIT(0x3), - MLX5E_MTPPS_FS_TIME_STAMP = BIT(0x4), - MLX5E_MTPPS_FS_OUT_PULSE_DURATION = BIT(0x5), - MLX5E_MTPPS_FS_ENH_OUT_PER_ADJ = BIT(0x7), + MLX5_MTPPS_FS_ENABLE = BIT(0x0), + MLX5_MTPPS_FS_PATTERN = BIT(0x2), + MLX5_MTPPS_FS_PIN_MODE = BIT(0x3), + MLX5_MTPPS_FS_TIME_STAMP = BIT(0x4), + MLX5_MTPPS_FS_OUT_PULSE_DURATION = BIT(0x5), + MLX5_MTPPS_FS_ENH_OUT_PER_ADJ = BIT(0x7), }; -void mlx5e_fill_hwstamp(struct mlx5e_tstamp *tstamp, u64 timestamp, - struct skb_shared_hwtstamps *hwts) +static u64 read_internal_timer(const struct cyclecounter *cc) { - u64 nsec; + struct mlx5_clock *clock = container_of(cc, struct mlx5_clock, cycles); + struct mlx5_core_dev *mdev = container_of(clock, struct mlx5_core_dev, + clock); - read_lock(&tstamp->lock); - nsec = timecounter_cyc2time(&tstamp->clock, timestamp); - read_unlock(&tstamp->lock); - - hwts->hwtstamp = ns_to_ktime(nsec); -} - -static u64 mlx5e_read_internal_timer(const struct cyclecounter *cc) -{ - struct mlx5e_tstamp *tstamp = container_of(cc, struct mlx5e_tstamp, - cycles); - - return mlx5_read_internal_timer(tstamp->mdev) & cc->mask; + return mlx5_read_internal_timer(mdev) & cc->mask; } -static void mlx5e_pps_out(struct work_struct *work) +static void mlx5_pps_out(struct work_struct *work) { - struct mlx5e_pps *pps_info = container_of(work, struct mlx5e_pps, - out_work); - struct mlx5e_tstamp *tstamp = container_of(pps_info, struct mlx5e_tstamp, - pps_info); + struct mlx5_pps *pps_info = container_of(work, struct mlx5_pps, + out_work); + struct mlx5_clock *clock = container_of(pps_info, struct mlx5_clock, + pps_info); + struct mlx5_core_dev *mdev = container_of(clock, struct mlx5_core_dev, + clock); u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {0}; unsigned long flags; int i; - for (i = 0; i < tstamp->ptp_info.n_pins; i++) { + for (i = 0; i < clock->ptp_info.n_pins; i++) { u64 tstart; - write_lock_irqsave(&tstamp->lock, flags); - tstart = tstamp->pps_info.start[i]; - tstamp->pps_info.start[i] = 0; - write_unlock_irqrestore(&tstamp->lock, flags); + write_lock_irqsave(&clock->lock, flags); + tstart = clock->pps_info.start[i]; + clock->pps_info.start[i] = 0; + write_unlock_irqrestore(&clock->lock, flags); if (!tstart) continue; MLX5_SET(mtpps_reg, in, pin, i); MLX5_SET64(mtpps_reg, in, time_stamp, tstart); - MLX5_SET(mtpps_reg, in, field_select, MLX5E_MTPPS_FS_TIME_STAMP); - mlx5_set_mtpps(tstamp->mdev, in, sizeof(in)); + MLX5_SET(mtpps_reg, in, field_select, MLX5_MTPPS_FS_TIME_STAMP); + mlx5_set_mtpps(mdev, in, sizeof(in)); } } -static void mlx5e_timestamp_overflow(struct work_struct *work) +static void mlx5_timestamp_overflow(struct work_struct *work) { struct delayed_work *dwork = to_delayed_work(work); - struct mlx5e_tstamp *tstamp = container_of(dwork, struct mlx5e_tstamp, - overflow_work); - struct mlx5e_priv *priv = container_of(tstamp, struct mlx5e_priv, tstamp); + struct mlx5_clock *clock = container_of(dwork, struct mlx5_clock, + overflow_work); unsigned long flags; - write_lock_irqsave(&tstamp->lock, flags); - timecounter_read(&tstamp->clock); - write_unlock_irqrestore(&tstamp->lock, flags); - queue_delayed_work(priv->wq, &tstamp->overflow_work, - msecs_to_jiffies(tstamp->overflow_period * 1000)); -} - -int mlx5e_hwstamp_set(struct mlx5e_priv *priv, struct ifreq *ifr) -{ - struct hwtstamp_config config; - int err; - - if (!MLX5_CAP_GEN(priv->mdev, device_frequency_khz)) - return -EOPNOTSUPP; - - if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) - return -EFAULT; - - /* TX HW timestamp */ - switch (config.tx_type) { - case HWTSTAMP_TX_OFF: - case HWTSTAMP_TX_ON: - break; - default: - return -ERANGE; - } - - mutex_lock(&priv->state_lock); - /* RX HW timestamp */ - switch (config.rx_filter) { - case HWTSTAMP_FILTER_NONE: - /* Reset CQE compression to Admin default */ - mlx5e_modify_rx_cqe_compression_locked(priv, priv->channels.params.rx_cqe_compress_def); - break; - case HWTSTAMP_FILTER_ALL: - case HWTSTAMP_FILTER_SOME: - case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: - case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: - case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: - case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: - case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: - case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: - case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: - case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: - case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: - case HWTSTAMP_FILTER_PTP_V2_EVENT: - case HWTSTAMP_FILTER_PTP_V2_SYNC: - case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: - case HWTSTAMP_FILTER_NTP_ALL: - /* Disable CQE compression */ - netdev_warn(priv->netdev, "Disabling cqe compression"); - err = mlx5e_modify_rx_cqe_compression_locked(priv, false); - if (err) { - netdev_err(priv->netdev, "Failed disabling cqe compression err=%d\n", err); - mutex_unlock(&priv->state_lock); - return err; - } - config.rx_filter = HWTSTAMP_FILTER_ALL; - break; - default: - mutex_unlock(&priv->state_lock); - return -ERANGE; - } - - memcpy(&priv->tstamp.hwtstamp_config, &config, sizeof(config)); - mutex_unlock(&priv->state_lock); - - return copy_to_user(ifr->ifr_data, &config, - sizeof(config)) ? -EFAULT : 0; -} - -int mlx5e_hwstamp_get(struct mlx5e_priv *priv, struct ifreq *ifr) -{ - struct hwtstamp_config *cfg = &priv->tstamp.hwtstamp_config; - - if (!MLX5_CAP_GEN(priv->mdev, device_frequency_khz)) - return -EOPNOTSUPP; - - return copy_to_user(ifr->ifr_data, cfg, sizeof(*cfg)) ? -EFAULT : 0; + write_lock_irqsave(&clock->lock, flags); + timecounter_read(&clock->tc); + write_unlock_irqrestore(&clock->lock, flags); + schedule_delayed_work(&clock->overflow_work, clock->overflow_period); } -static int mlx5e_ptp_settime(struct ptp_clock_info *ptp, - const struct timespec64 *ts) +static int mlx5_ptp_settime(struct ptp_clock_info *ptp, + const struct timespec64 *ts) { - struct mlx5e_tstamp *tstamp = container_of(ptp, struct mlx5e_tstamp, - ptp_info); + struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock, + ptp_info); u64 ns = timespec64_to_ns(ts); unsigned long flags; - write_lock_irqsave(&tstamp->lock, flags); - timecounter_init(&tstamp->clock, &tstamp->cycles, ns); - write_unlock_irqrestore(&tstamp->lock, flags); + write_lock_irqsave(&clock->lock, flags); + timecounter_init(&clock->tc, &clock->cycles, ns); + write_unlock_irqrestore(&clock->lock, flags); return 0; } -static int mlx5e_ptp_gettime(struct ptp_clock_info *ptp, - struct timespec64 *ts) +static int mlx5_ptp_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts) { - struct mlx5e_tstamp *tstamp = container_of(ptp, struct mlx5e_tstamp, - ptp_info); + struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock, + ptp_info); u64 ns; unsigned long flags; - write_lock_irqsave(&tstamp->lock, flags); - ns = timecounter_read(&tstamp->clock); - write_unlock_irqrestore(&tstamp->lock, flags); + write_lock_irqsave(&clock->lock, flags); + ns = timecounter_read(&clock->tc); + write_unlock_irqrestore(&clock->lock, flags); *ts = ns_to_timespec64(ns); return 0; } -static int mlx5e_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta) +static int mlx5_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta) { - struct mlx5e_tstamp *tstamp = container_of(ptp, struct mlx5e_tstamp, - ptp_info); + struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock, + ptp_info); unsigned long flags; - write_lock_irqsave(&tstamp->lock, flags); - timecounter_adjtime(&tstamp->clock, delta); - write_unlock_irqrestore(&tstamp->lock, flags); + write_lock_irqsave(&clock->lock, flags); + timecounter_adjtime(&clock->tc, delta); + write_unlock_irqrestore(&clock->lock, flags); return 0; } -static int mlx5e_ptp_adjfreq(struct ptp_clock_info *ptp, s32 delta) +static int mlx5_ptp_adjfreq(struct ptp_clock_info *ptp, s32 delta) { u64 adj; u32 diff; unsigned long flags; int neg_adj = 0; - struct mlx5e_tstamp *tstamp = container_of(ptp, struct mlx5e_tstamp, - ptp_info); + struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock, + ptp_info); if (delta < 0) { neg_adj = 1; delta = -delta; } - adj = tstamp->nominal_c_mult; + adj = clock->nominal_c_mult; adj *= delta; diff = div_u64(adj, 1000000000ULL); - write_lock_irqsave(&tstamp->lock, flags); - timecounter_read(&tstamp->clock); - tstamp->cycles.mult = neg_adj ? tstamp->nominal_c_mult - diff : - tstamp->nominal_c_mult + diff; - write_unlock_irqrestore(&tstamp->lock, flags); + write_lock_irqsave(&clock->lock, flags); + timecounter_read(&clock->tc); + clock->cycles.mult = neg_adj ? clock->nominal_c_mult - diff : + clock->nominal_c_mult + diff; + write_unlock_irqrestore(&clock->lock, flags); return 0; } -static int mlx5e_extts_configure(struct ptp_clock_info *ptp, - struct ptp_clock_request *rq, - int on) +static int mlx5_extts_configure(struct ptp_clock_info *ptp, + struct ptp_clock_request *rq, + int on) { - struct mlx5e_tstamp *tstamp = - container_of(ptp, struct mlx5e_tstamp, ptp_info); - struct mlx5e_priv *priv = - container_of(tstamp, struct mlx5e_priv, tstamp); + struct mlx5_clock *clock = + container_of(ptp, struct mlx5_clock, ptp_info); + struct mlx5_core_dev *mdev = + container_of(clock, struct mlx5_core_dev, clock); u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {0}; u32 field_select = 0; u8 pin_mode = 0; @@ -285,24 +199,24 @@ static int mlx5e_extts_configure(struct ptp_clock_info *ptp, int pin = -1; int err = 0; - if (!MLX5_PPS_CAP(priv->mdev)) + if (!MLX5_PPS_CAP(mdev)) return -EOPNOTSUPP; - if (rq->extts.index >= tstamp->ptp_info.n_pins) + if (rq->extts.index >= clock->ptp_info.n_pins) return -EINVAL; if (on) { - pin = ptp_find_pin(tstamp->ptp, PTP_PF_EXTTS, rq->extts.index); + pin = ptp_find_pin(clock->ptp, PTP_PF_EXTTS, rq->extts.index); if (pin < 0) return -EBUSY; - pin_mode = MLX5E_PIN_MODE_IN; + pin_mode = MLX5_PIN_MODE_IN; pattern = !!(rq->extts.flags & PTP_FALLING_EDGE); - field_select = MLX5E_MTPPS_FS_PIN_MODE | - MLX5E_MTPPS_FS_PATTERN | - MLX5E_MTPPS_FS_ENABLE; + field_select = MLX5_MTPPS_FS_PIN_MODE | + MLX5_MTPPS_FS_PATTERN | + MLX5_MTPPS_FS_ENABLE; } else { pin = rq->extts.index; - field_select = MLX5E_MTPPS_FS_ENABLE; + field_select = MLX5_MTPPS_FS_ENABLE; } MLX5_SET(mtpps_reg, in, pin, pin); @@ -311,22 +225,22 @@ static int mlx5e_extts_configure(struct ptp_clock_info *ptp, MLX5_SET(mtpps_reg, in, enable, on); MLX5_SET(mtpps_reg, in, field_select, field_select); - err = mlx5_set_mtpps(priv->mdev, in, sizeof(in)); + err = mlx5_set_mtpps(mdev, in, sizeof(in)); if (err) return err; - return mlx5_set_mtppse(priv->mdev, pin, 0, - MLX5E_EVENT_MODE_REPETETIVE & on); + return mlx5_set_mtppse(mdev, pin, 0, + MLX5_EVENT_MODE_REPETETIVE & on); } -static int mlx5e_perout_configure(struct ptp_clock_info *ptp, - struct ptp_clock_request *rq, - int on) +static int mlx5_perout_configure(struct ptp_clock_info *ptp, + struct ptp_clock_request *rq, + int on) { - struct mlx5e_tstamp *tstamp = - container_of(ptp, struct mlx5e_tstamp, ptp_info); - struct mlx5e_priv *priv = - container_of(tstamp, struct mlx5e_priv, tstamp); + struct mlx5_clock *clock = + container_of(ptp, struct mlx5_clock, ptp_info); + struct mlx5_core_dev *mdev = + container_of(clock, struct mlx5_core_dev, clock); u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {0}; u64 nsec_now, nsec_delta, time_stamp = 0; u64 cycles_now, cycles_delta; @@ -339,20 +253,20 @@ static int mlx5e_perout_configure(struct ptp_clock_info *ptp, int err = 0; s64 ns; - if (!MLX5_PPS_CAP(priv->mdev)) + if (!MLX5_PPS_CAP(mdev)) return -EOPNOTSUPP; - if (rq->perout.index >= tstamp->ptp_info.n_pins) + if (rq->perout.index >= clock->ptp_info.n_pins) return -EINVAL; if (on) { - pin = ptp_find_pin(tstamp->ptp, PTP_PF_PEROUT, + pin = ptp_find_pin(clock->ptp, PTP_PF_PEROUT, rq->perout.index); if (pin < 0) return -EBUSY; - pin_mode = MLX5E_PIN_MODE_OUT; - pattern = MLX5E_OUT_PATTERN_PERIODIC; + pin_mode = MLX5_PIN_MODE_OUT; + pattern = MLX5_OUT_PATTERN_PERIODIC; ts.tv_sec = rq->perout.period.sec; ts.tv_nsec = rq->perout.period.nsec; ns = timespec64_to_ns(&ts); @@ -363,21 +277,21 @@ static int mlx5e_perout_configure(struct ptp_clock_info *ptp, ts.tv_sec = rq->perout.start.sec; ts.tv_nsec = rq->perout.start.nsec; ns = timespec64_to_ns(&ts); - cycles_now = mlx5_read_internal_timer(tstamp->mdev); - write_lock_irqsave(&tstamp->lock, flags); - nsec_now = timecounter_cyc2time(&tstamp->clock, cycles_now); + cycles_now = mlx5_read_internal_timer(mdev); + write_lock_irqsave(&clock->lock, flags); + nsec_now = timecounter_cyc2time(&clock->tc, cycles_now); nsec_delta = ns - nsec_now; - cycles_delta = div64_u64(nsec_delta << tstamp->cycles.shift, - tstamp->cycles.mult); - write_unlock_irqrestore(&tstamp->lock, flags); + cycles_delta = div64_u64(nsec_delta << clock->cycles.shift, + clock->cycles.mult); + write_unlock_irqrestore(&clock->lock, flags); time_stamp = cycles_now + cycles_delta; - field_select = MLX5E_MTPPS_FS_PIN_MODE | - MLX5E_MTPPS_FS_PATTERN | - MLX5E_MTPPS_FS_ENABLE | - MLX5E_MTPPS_FS_TIME_STAMP; + field_select = MLX5_MTPPS_FS_PIN_MODE | + MLX5_MTPPS_FS_PATTERN | + MLX5_MTPPS_FS_ENABLE | + MLX5_MTPPS_FS_TIME_STAMP; } else { pin = rq->perout.index; - field_select = MLX5E_MTPPS_FS_ENABLE; + field_select = MLX5_MTPPS_FS_ENABLE; } MLX5_SET(mtpps_reg, in, pin, pin); @@ -387,233 +301,225 @@ static int mlx5e_perout_configure(struct ptp_clock_info *ptp, MLX5_SET64(mtpps_reg, in, time_stamp, time_stamp); MLX5_SET(mtpps_reg, in, field_select, field_select); - err = mlx5_set_mtpps(priv->mdev, in, sizeof(in)); + err = mlx5_set_mtpps(mdev, in, sizeof(in)); if (err) return err; - return mlx5_set_mtppse(priv->mdev, pin, 0, - MLX5E_EVENT_MODE_REPETETIVE & on); + return mlx5_set_mtppse(mdev, pin, 0, + MLX5_EVENT_MODE_REPETETIVE & on); } -static int mlx5e_pps_configure(struct ptp_clock_info *ptp, - struct ptp_clock_request *rq, - int on) +static int mlx5_pps_configure(struct ptp_clock_info *ptp, + struct ptp_clock_request *rq, + int on) { - struct mlx5e_tstamp *tstamp = - container_of(ptp, struct mlx5e_tstamp, ptp_info); + struct mlx5_clock *clock = + container_of(ptp, struct mlx5_clock, ptp_info); - tstamp->pps_info.enabled = !!on; + clock->pps_info.enabled = !!on; return 0; } -static int mlx5e_ptp_enable(struct ptp_clock_info *ptp, - struct ptp_clock_request *rq, - int on) +static int mlx5_ptp_enable(struct ptp_clock_info *ptp, + struct ptp_clock_request *rq, + int on) { switch (rq->type) { case PTP_CLK_REQ_EXTTS: - return mlx5e_extts_configure(ptp, rq, on); + return mlx5_extts_configure(ptp, rq, on); case PTP_CLK_REQ_PEROUT: - return mlx5e_perout_configure(ptp, rq, on); + return mlx5_perout_configure(ptp, rq, on); case PTP_CLK_REQ_PPS: - return mlx5e_pps_configure(ptp, rq, on); + return mlx5_pps_configure(ptp, rq, on); default: return -EOPNOTSUPP; } return 0; } -static int mlx5e_ptp_verify(struct ptp_clock_info *ptp, unsigned int pin, - enum ptp_pin_function func, unsigned int chan) +static int mlx5_ptp_verify(struct ptp_clock_info *ptp, unsigned int pin, + enum ptp_pin_function func, unsigned int chan) { return (func == PTP_PF_PHYSYNC) ? -EOPNOTSUPP : 0; } -static const struct ptp_clock_info mlx5e_ptp_clock_info = { +static const struct ptp_clock_info mlx5_ptp_clock_info = { .owner = THIS_MODULE, + .name = "mlx5_p2p", .max_adj = 100000000, .n_alarm = 0, .n_ext_ts = 0, .n_per_out = 0, .n_pins = 0, .pps = 0, - .adjfreq = mlx5e_ptp_adjfreq, - .adjtime = mlx5e_ptp_adjtime, - .gettime64 = mlx5e_ptp_gettime, - .settime64 = mlx5e_ptp_settime, + .adjfreq = mlx5_ptp_adjfreq, + .adjtime = mlx5_ptp_adjtime, + .gettime64 = mlx5_ptp_gettime, + .settime64 = mlx5_ptp_settime, .enable = NULL, .verify = NULL, }; -static void mlx5e_timestamp_init_config(struct mlx5e_tstamp *tstamp) -{ - tstamp->hwtstamp_config.tx_type = HWTSTAMP_TX_OFF; - tstamp->hwtstamp_config.rx_filter = HWTSTAMP_FILTER_NONE; -} - -static int mlx5e_init_pin_config(struct mlx5e_tstamp *tstamp) +static int mlx5_init_pin_config(struct mlx5_clock *clock) { int i; - tstamp->ptp_info.pin_config = - kzalloc(sizeof(*tstamp->ptp_info.pin_config) * - tstamp->ptp_info.n_pins, GFP_KERNEL); - if (!tstamp->ptp_info.pin_config) + clock->ptp_info.pin_config = + kzalloc(sizeof(*clock->ptp_info.pin_config) * + clock->ptp_info.n_pins, GFP_KERNEL); + if (!clock->ptp_info.pin_config) return -ENOMEM; - tstamp->ptp_info.enable = mlx5e_ptp_enable; - tstamp->ptp_info.verify = mlx5e_ptp_verify; - tstamp->ptp_info.pps = 1; + clock->ptp_info.enable = mlx5_ptp_enable; + clock->ptp_info.verify = mlx5_ptp_verify; + clock->ptp_info.pps = 1; - for (i = 0; i < tstamp->ptp_info.n_pins; i++) { - snprintf(tstamp->ptp_info.pin_config[i].name, - sizeof(tstamp->ptp_info.pin_config[i].name), + for (i = 0; i < clock->ptp_info.n_pins; i++) { + snprintf(clock->ptp_info.pin_config[i].name, + sizeof(clock->ptp_info.pin_config[i].name), "mlx5_pps%d", i); - tstamp->ptp_info.pin_config[i].index = i; - tstamp->ptp_info.pin_config[i].func = PTP_PF_NONE; - tstamp->ptp_info.pin_config[i].chan = i; + clock->ptp_info.pin_config[i].index = i; + clock->ptp_info.pin_config[i].func = PTP_PF_NONE; + clock->ptp_info.pin_config[i].chan = i; } return 0; } -static void mlx5e_get_pps_caps(struct mlx5e_priv *priv, - struct mlx5e_tstamp *tstamp) +static void mlx5_get_pps_caps(struct mlx5_core_dev *mdev) { + struct mlx5_clock *clock = &mdev->clock; u32 out[MLX5_ST_SZ_DW(mtpps_reg)] = {0}; - mlx5_query_mtpps(priv->mdev, out, sizeof(out)); - - tstamp->ptp_info.n_pins = MLX5_GET(mtpps_reg, out, - cap_number_of_pps_pins); - tstamp->ptp_info.n_ext_ts = MLX5_GET(mtpps_reg, out, - cap_max_num_of_pps_in_pins); - tstamp->ptp_info.n_per_out = MLX5_GET(mtpps_reg, out, - cap_max_num_of_pps_out_pins); - - tstamp->pps_info.pin_caps[0] = MLX5_GET(mtpps_reg, out, cap_pin_0_mode); - tstamp->pps_info.pin_caps[1] = MLX5_GET(mtpps_reg, out, cap_pin_1_mode); - tstamp->pps_info.pin_caps[2] = MLX5_GET(mtpps_reg, out, cap_pin_2_mode); - tstamp->pps_info.pin_caps[3] = MLX5_GET(mtpps_reg, out, cap_pin_3_mode); - tstamp->pps_info.pin_caps[4] = MLX5_GET(mtpps_reg, out, cap_pin_4_mode); - tstamp->pps_info.pin_caps[5] = MLX5_GET(mtpps_reg, out, cap_pin_5_mode); - tstamp->pps_info.pin_caps[6] = MLX5_GET(mtpps_reg, out, cap_pin_6_mode); - tstamp->pps_info.pin_caps[7] = MLX5_GET(mtpps_reg, out, cap_pin_7_mode); + mlx5_query_mtpps(mdev, out, sizeof(out)); + + clock->ptp_info.n_pins = MLX5_GET(mtpps_reg, out, + cap_number_of_pps_pins); + clock->ptp_info.n_ext_ts = MLX5_GET(mtpps_reg, out, + cap_max_num_of_pps_in_pins); + clock->ptp_info.n_per_out = MLX5_GET(mtpps_reg, out, + cap_max_num_of_pps_out_pins); + + clock->pps_info.pin_caps[0] = MLX5_GET(mtpps_reg, out, cap_pin_0_mode); + clock->pps_info.pin_caps[1] = MLX5_GET(mtpps_reg, out, cap_pin_1_mode); + clock->pps_info.pin_caps[2] = MLX5_GET(mtpps_reg, out, cap_pin_2_mode); + clock->pps_info.pin_caps[3] = MLX5_GET(mtpps_reg, out, cap_pin_3_mode); + clock->pps_info.pin_caps[4] = MLX5_GET(mtpps_reg, out, cap_pin_4_mode); + clock->pps_info.pin_caps[5] = MLX5_GET(mtpps_reg, out, cap_pin_5_mode); + clock->pps_info.pin_caps[6] = MLX5_GET(mtpps_reg, out, cap_pin_6_mode); + clock->pps_info.pin_caps[7] = MLX5_GET(mtpps_reg, out, cap_pin_7_mode); } -void mlx5e_pps_event_handler(struct mlx5e_priv *priv, - struct ptp_clock_event *event) +void mlx5_pps_event(struct mlx5_core_dev *mdev, + struct mlx5_eqe *eqe) { - struct net_device *netdev = priv->netdev; - struct mlx5e_tstamp *tstamp = &priv->tstamp; + struct mlx5_clock *clock = &mdev->clock; + struct ptp_clock_event ptp_event; struct timespec64 ts; u64 nsec_now, nsec_delta; u64 cycles_now, cycles_delta; - int pin = event->index; + int pin = eqe->data.pps.pin; s64 ns; unsigned long flags; - switch (tstamp->ptp_info.pin_config[pin].func) { + switch (clock->ptp_info.pin_config[pin].func) { case PTP_PF_EXTTS: - if (tstamp->pps_info.enabled) { - event->type = PTP_CLOCK_PPSUSR; - event->pps_times.ts_real = ns_to_timespec64(event->timestamp); + if (clock->pps_info.enabled) { + ptp_event.type = PTP_CLOCK_PPSUSR; + ptp_event.pps_times.ts_real = ns_to_timespec64(eqe->data.pps.time_stamp); } else { - event->type = PTP_CLOCK_EXTTS; + ptp_event.type = PTP_CLOCK_EXTTS; } - ptp_clock_event(tstamp->ptp, event); + ptp_clock_event(clock->ptp, &ptp_event); break; case PTP_PF_PEROUT: - mlx5e_ptp_gettime(&tstamp->ptp_info, &ts); - cycles_now = mlx5_read_internal_timer(tstamp->mdev); + mlx5_ptp_gettime(&clock->ptp_info, &ts); + cycles_now = mlx5_read_internal_timer(mdev); ts.tv_sec += 1; ts.tv_nsec = 0; ns = timespec64_to_ns(&ts); - write_lock_irqsave(&tstamp->lock, flags); - nsec_now = timecounter_cyc2time(&tstamp->clock, cycles_now); + write_lock_irqsave(&clock->lock, flags); + nsec_now = timecounter_cyc2time(&clock->tc, cycles_now); nsec_delta = ns - nsec_now; - cycles_delta = div64_u64(nsec_delta << tstamp->cycles.shift, - tstamp->cycles.mult); - tstamp->pps_info.start[pin] = cycles_now + cycles_delta; - queue_work(priv->wq, &tstamp->pps_info.out_work); - write_unlock_irqrestore(&tstamp->lock, flags); + cycles_delta = div64_u64(nsec_delta << clock->cycles.shift, + clock->cycles.mult); + clock->pps_info.start[pin] = cycles_now + cycles_delta; + schedule_work(&clock->pps_info.out_work); + write_unlock_irqrestore(&clock->lock, flags); break; default: - netdev_err(netdev, "%s: Unhandled event\n", __func__); + mlx5_core_err(mdev, " Unhandled event\n"); } } -void mlx5e_timestamp_init(struct mlx5e_priv *priv) +void mlx5_init_clock(struct mlx5_core_dev *mdev) { - struct mlx5e_tstamp *tstamp = &priv->tstamp; + struct mlx5_clock *clock = &mdev->clock; u64 ns; u64 frac = 0; u32 dev_freq; - mlx5e_timestamp_init_config(tstamp); - dev_freq = MLX5_CAP_GEN(priv->mdev, device_frequency_khz); + dev_freq = MLX5_CAP_GEN(mdev, device_frequency_khz); if (!dev_freq) { - mlx5_core_warn(priv->mdev, "invalid device_frequency_khz, aborting HW clock init\n"); + mlx5_core_warn(mdev, "invalid device_frequency_khz, aborting HW clock init\n"); return; } - rwlock_init(&tstamp->lock); - tstamp->cycles.read = mlx5e_read_internal_timer; - tstamp->cycles.shift = MLX5E_CYCLES_SHIFT; - tstamp->cycles.mult = clocksource_khz2mult(dev_freq, - tstamp->cycles.shift); - tstamp->nominal_c_mult = tstamp->cycles.mult; - tstamp->cycles.mask = CLOCKSOURCE_MASK(41); - tstamp->mdev = priv->mdev; - - timecounter_init(&tstamp->clock, &tstamp->cycles, + rwlock_init(&clock->lock); + clock->cycles.read = read_internal_timer; + clock->cycles.shift = MLX5_CYCLES_SHIFT; + clock->cycles.mult = clocksource_khz2mult(dev_freq, + clock->cycles.shift); + clock->nominal_c_mult = clock->cycles.mult; + clock->cycles.mask = CLOCKSOURCE_MASK(41); + + timecounter_init(&clock->tc, &clock->cycles, ktime_to_ns(ktime_get_real())); /* Calculate period in seconds to call the overflow watchdog - to make * sure counter is checked at least once every wrap around. */ - ns = cyclecounter_cyc2ns(&tstamp->cycles, tstamp->cycles.mask, + ns = cyclecounter_cyc2ns(&clock->cycles, clock->cycles.mask, frac, &frac); do_div(ns, NSEC_PER_SEC / 2 / HZ); - tstamp->overflow_period = ns; + clock->overflow_period = ns; - INIT_WORK(&tstamp->pps_info.out_work, mlx5e_pps_out); - INIT_DELAYED_WORK(&tstamp->overflow_work, mlx5e_timestamp_overflow); - if (tstamp->overflow_period) - queue_delayed_work(priv->wq, &tstamp->overflow_work, 0); + INIT_WORK(&clock->pps_info.out_work, mlx5_pps_out); + INIT_DELAYED_WORK(&clock->overflow_work, mlx5_timestamp_overflow); + if (clock->overflow_period) + schedule_delayed_work(&clock->overflow_work, 0); else - mlx5_core_warn(priv->mdev, "invalid overflow period, overflow_work is not scheduled\n"); + mlx5_core_warn(mdev, "invalid overflow period, overflow_work is not scheduled\n"); /* Configure the PHC */ - tstamp->ptp_info = mlx5e_ptp_clock_info; - snprintf(tstamp->ptp_info.name, 16, "mlx5 ptp"); + clock->ptp_info = mlx5_ptp_clock_info; /* Initialize 1PPS data structures */ - if (MLX5_PPS_CAP(priv->mdev)) - mlx5e_get_pps_caps(priv, tstamp); - if (tstamp->ptp_info.n_pins) - mlx5e_init_pin_config(tstamp); - - tstamp->ptp = ptp_clock_register(&tstamp->ptp_info, - &priv->mdev->pdev->dev); - if (IS_ERR(tstamp->ptp)) { - mlx5_core_warn(priv->mdev, "ptp_clock_register failed %ld\n", - PTR_ERR(tstamp->ptp)); - tstamp->ptp = NULL; + if (MLX5_PPS_CAP(mdev)) + mlx5_get_pps_caps(mdev); + if (clock->ptp_info.n_pins) + mlx5_init_pin_config(clock); + + clock->ptp = ptp_clock_register(&clock->ptp_info, + &mdev->pdev->dev); + if (IS_ERR(clock->ptp)) { + mlx5_core_warn(mdev, "ptp_clock_register failed %ld\n", + PTR_ERR(clock->ptp)); + clock->ptp = NULL; } } -void mlx5e_timestamp_cleanup(struct mlx5e_priv *priv) +void mlx5_cleanup_clock(struct mlx5_core_dev *mdev) { - struct mlx5e_tstamp *tstamp = &priv->tstamp; + struct mlx5_clock *clock = &mdev->clock; - if (!MLX5_CAP_GEN(priv->mdev, device_frequency_khz)) + if (!MLX5_CAP_GEN(mdev, device_frequency_khz)) return; - if (priv->tstamp.ptp) { - ptp_clock_unregister(priv->tstamp.ptp); - priv->tstamp.ptp = NULL; + if (clock->ptp) { + ptp_clock_unregister(clock->ptp); + clock->ptp = NULL; } - cancel_work_sync(&tstamp->pps_info.out_work); - cancel_delayed_work_sync(&tstamp->overflow_work); - kfree(tstamp->ptp_info.pin_config); + cancel_work_sync(&clock->pps_info.out_work); + cancel_delayed_work_sync(&clock->overflow_work); + kfree(clock->ptp_info.pin_config); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.h new file mode 100644 index 000000000000..a8eecedd46c2 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.h @@ -0,0 +1,51 @@ +/* + * Copyright (c) 2017, Mellanox Technologies, Ltd. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef __LIB_CLOCK_H__ +#define __LIB_CLOCK_H__ + +void mlx5_init_clock(struct mlx5_core_dev *mdev); +void mlx5_cleanup_clock(struct mlx5_core_dev *mdev); + +static inline ktime_t mlx5_timecounter_cyc2time(struct mlx5_clock *clock, + u64 timestamp) +{ + u64 nsec; + + read_lock(&clock->lock); + nsec = timecounter_cyc2time(&clock->tc, timestamp); + read_unlock(&clock->lock); + + return ns_to_ktime(nsec); +} + +#endif diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c index 0d2c8dcd6eae..ecbe9fad22d8 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c @@ -59,6 +59,7 @@ #include "lib/mlx5.h" #include "fpga/core.h" #include "accel/ipsec.h" +#include "lib/clock.h" MODULE_AUTHOR("Eli Cohen "); MODULE_DESCRIPTION("Mellanox Connect-IB, ConnectX-4 core driver"); @@ -889,6 +890,8 @@ static int mlx5_init_once(struct mlx5_core_dev *dev, struct mlx5_priv *priv) mlx5_init_reserved_gids(dev); + mlx5_init_clock(dev); + err = mlx5_init_rl_table(dev); if (err) { dev_err(&pdev->dev, "Failed to init rate limiting\n"); @@ -949,6 +952,7 @@ static void mlx5_cleanup_once(struct mlx5_core_dev *dev) mlx5_eswitch_cleanup(dev->priv.eswitch); mlx5_mpfs_cleanup(dev); mlx5_cleanup_rl_table(dev); + mlx5_cleanup_clock(dev); mlx5_cleanup_reserved_gids(dev); mlx5_cleanup_mkey_table(dev); mlx5_cleanup_srq_table(dev); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h index b7c2900b75f9..8f00de2fe283 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h @@ -93,6 +93,7 @@ void mlx5_core_event(struct mlx5_core_dev *dev, enum mlx5_dev_event event, unsigned long param); void mlx5_core_page_fault(struct mlx5_core_dev *dev, struct mlx5_pagefault *pfault); +void mlx5_pps_event(struct mlx5_core_dev *dev, struct mlx5_eqe *eqe); void mlx5_port_module_event(struct mlx5_core_dev *dev, struct mlx5_eqe *eqe); void mlx5_enter_error_state(struct mlx5_core_dev *dev, bool force); void mlx5_disable_device(struct mlx5_core_dev *dev); diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h index 401c8972cc3a..08c77b7e59cb 100644 --- a/include/linux/mlx5/driver.h +++ b/include/linux/mlx5/driver.h @@ -49,6 +49,8 @@ #include #include #include +#include +#include enum { MLX5_BOARD_ID_LEN = 64, @@ -760,6 +762,27 @@ struct mlx5_rsvd_gids { struct ida ida; }; +#define MAX_PIN_NUM 8 +struct mlx5_pps { + u8 pin_caps[MAX_PIN_NUM]; + struct work_struct out_work; + u64 start[MAX_PIN_NUM]; + u8 enabled; +}; + +struct mlx5_clock { + rwlock_t lock; + struct cyclecounter cycles; + struct timecounter tc; + struct hwtstamp_config hwtstamp_config; + u32 nominal_c_mult; + unsigned long overflow_period; + struct delayed_work overflow_work; + struct ptp_clock *ptp; + struct ptp_clock_info ptp_info; + struct mlx5_pps pps_info; +}; + struct mlx5_core_dev { struct pci_dev *pdev; /* sync pci state */ @@ -800,6 +823,7 @@ struct mlx5_core_dev { #ifdef CONFIG_RFS_ACCEL struct cpu_rmap *rmap; #endif + struct mlx5_clock clock; }; struct mlx5_db { -- cgit v1.2.3 From c8249eda7fac00b55eca17ab05207be291d91a3f Mon Sep 17 00:00:00 2001 From: Alex Vesker Date: Tue, 12 Sep 2017 14:11:29 +0300 Subject: net/mlx5e: IPoIB, Move underlay QP init/uninit to separate functions During the creation of the underlay QP the PKEY index is unknown, the PKEY index is known only when calling ndo_open. PKEY index attached to the QP during state modification. Splitting the functions will also make the code symmetric and more readable. This split is also required for later PKEY support to be called with the PKEY index during ndo_open. Signed-off-by: Alex Vesker Reviewed-by: Erez Shitrit Signed-off-by: Leon Romanovsky --- .../net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c | 108 +++++++++++++-------- 1 file changed, 70 insertions(+), 38 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c index 14dfb577691b..feb94db6b921 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c @@ -108,11 +108,68 @@ static void mlx5i_cleanup(struct mlx5e_priv *priv) /* Do nothing .. */ } +static int mlx5i_init_underlay_qp(struct mlx5e_priv *priv) +{ + struct mlx5_core_dev *mdev = priv->mdev; + struct mlx5i_priv *ipriv = priv->ppriv; + struct mlx5_core_qp *qp = &ipriv->qp; + struct mlx5_qp_context *context; + int ret; + + /* QP states */ + context = kzalloc(sizeof(*context), GFP_KERNEL); + if (!context) + return -ENOMEM; + + context->flags = cpu_to_be32(MLX5_QP_PM_MIGRATED << 11); + context->pri_path.port = 1; + context->qkey = cpu_to_be32(IB_DEFAULT_Q_KEY); + + ret = mlx5_core_qp_modify(mdev, MLX5_CMD_OP_RST2INIT_QP, 0, context, qp); + if (ret) { + mlx5_core_err(mdev, "Failed to modify qp RST2INIT, err: %d\n", ret); + goto err_qp_modify_to_err; + } + memset(context, 0, sizeof(*context)); + + ret = mlx5_core_qp_modify(mdev, MLX5_CMD_OP_INIT2RTR_QP, 0, context, qp); + if (ret) { + mlx5_core_err(mdev, "Failed to modify qp INIT2RTR, err: %d\n", ret); + goto err_qp_modify_to_err; + } + + ret = mlx5_core_qp_modify(mdev, MLX5_CMD_OP_RTR2RTS_QP, 0, context, qp); + if (ret) { + mlx5_core_err(mdev, "Failed to modify qp RTR2RTS, err: %d\n", ret); + goto err_qp_modify_to_err; + } + + kfree(context); + return 0; + +err_qp_modify_to_err: + mlx5_core_qp_modify(mdev, MLX5_CMD_OP_2ERR_QP, 0, &context, qp); + kfree(context); + return ret; +} + +static void mlx5i_uninit_underlay_qp(struct mlx5e_priv *priv) +{ + struct mlx5i_priv *ipriv = priv->ppriv; + struct mlx5_core_dev *mdev = priv->mdev; + struct mlx5_qp_context context; + int err; + + err = mlx5_core_qp_modify(mdev, MLX5_CMD_OP_2RST_QP, 0, &context, + &ipriv->qp); + if (err) + mlx5_core_err(mdev, "Failed to modify qp 2RST, err: %d\n", err); +} + #define MLX5_QP_ENHANCED_ULP_STATELESS_MODE 2 static int mlx5i_create_underlay_qp(struct mlx5_core_dev *mdev, struct mlx5_core_qp *qp) { - struct mlx5_qp_context *context = NULL; u32 *in = NULL; void *addr_path; int ret = 0; @@ -140,38 +197,7 @@ static int mlx5i_create_underlay_qp(struct mlx5_core_dev *mdev, struct mlx5_core goto out; } - /* QP states */ - context = kzalloc(sizeof(*context), GFP_KERNEL); - if (!context) { - ret = -ENOMEM; - goto out; - } - - context->flags = cpu_to_be32(MLX5_QP_PM_MIGRATED << 11); - context->pri_path.port = 1; - context->qkey = cpu_to_be32(IB_DEFAULT_Q_KEY); - - ret = mlx5_core_qp_modify(mdev, MLX5_CMD_OP_RST2INIT_QP, 0, context, qp); - if (ret) { - mlx5_core_err(mdev, "Failed to modify qp RST2INIT, err: %d\n", ret); - goto out; - } - memset(context, 0, sizeof(*context)); - - ret = mlx5_core_qp_modify(mdev, MLX5_CMD_OP_INIT2RTR_QP, 0, context, qp); - if (ret) { - mlx5_core_err(mdev, "Failed to modify qp INIT2RTR, err: %d\n", ret); - goto out; - } - - ret = mlx5_core_qp_modify(mdev, MLX5_CMD_OP_RTR2RTS_QP, 0, context, qp); - if (ret) { - mlx5_core_err(mdev, "Failed to modify qp RTR2RTS, err: %d\n", ret); - goto out; - } - out: - kfree(context); kvfree(in); return ret; } @@ -192,13 +218,23 @@ static int mlx5i_init_tx(struct mlx5e_priv *priv) return err; } + err = mlx5i_init_underlay_qp(priv); + if (err) { + mlx5_core_warn(priv->mdev, "intilize underlay QP failed, %d\n", err); + goto err_destroy_underlay_qp; + } + err = mlx5e_create_tis(priv->mdev, 0 /* tc */, ipriv->qp.qpn, &priv->tisn[0]); if (err) { mlx5_core_warn(priv->mdev, "create tis failed, %d\n", err); - return err; + goto err_destroy_underlay_qp; } return 0; + +err_destroy_underlay_qp: + mlx5i_destroy_underlay_qp(priv->mdev, &ipriv->qp); + return err; } static void mlx5i_cleanup_tx(struct mlx5e_priv *priv) @@ -381,12 +417,8 @@ static int mlx5i_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) static void mlx5i_dev_cleanup(struct net_device *dev) { struct mlx5e_priv *priv = mlx5i_epriv(dev); - struct mlx5_core_dev *mdev = priv->mdev; - struct mlx5i_priv *ipriv = priv->ppriv; - struct mlx5_qp_context context; - /* detach qp from flow-steering by reset it */ - mlx5_core_qp_modify(mdev, MLX5_CMD_OP_2RST_QP, 0, &context, &ipriv->qp); + mlx5i_uninit_underlay_qp(priv); } static int mlx5i_open(struct net_device *netdev) -- cgit v1.2.3 From dae37456c8ac3afe8d5f306717f2b75ed5ca38d9 Mon Sep 17 00:00:00 2001 From: Alex Vesker Date: Wed, 13 Sep 2017 11:37:02 +0300 Subject: net/mlx5: Support for attaching multiple underlay QPs to root flow table Previous support allowed connecting only a single QPN to the FT. Now using a linked list multiple QPNs can be attached to the same FT. Supporting attaching multiple underlay QPs is required for PKEY support in which child and parent share the same FT. The actual attaching/detaching FW commands will be called inside the function symmetrically. This change requires a change in IPoIB open and close functions, the attaching/detaching to/from the FT is done each time we open/close. Signed-off-by: Alex Vesker Reviewed-by: Maor Gottlieb Signed-off-by: Saeed Mahameed Signed-off-by: Leon Romanovsky --- drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c | 13 ++- drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h | 4 +- drivers/net/ethernet/mellanox/mlx5/core/fs_core.c | 123 ++++++++++++++++++--- drivers/net/ethernet/mellanox/mlx5/core/fs_core.h | 7 +- .../net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c | 78 +++++++------ 5 files changed, 171 insertions(+), 54 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c index 36ecc2b2e187..881e2e55840c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c @@ -40,7 +40,8 @@ #include "eswitch.h" int mlx5_cmd_update_root_ft(struct mlx5_core_dev *dev, - struct mlx5_flow_table *ft, u32 underlay_qpn) + struct mlx5_flow_table *ft, u32 underlay_qpn, + bool disconnect) { u32 in[MLX5_ST_SZ_DW(set_flow_table_root_in)] = {0}; u32 out[MLX5_ST_SZ_DW(set_flow_table_root_out)] = {0}; @@ -52,7 +53,15 @@ int mlx5_cmd_update_root_ft(struct mlx5_core_dev *dev, MLX5_SET(set_flow_table_root_in, in, opcode, MLX5_CMD_OP_SET_FLOW_TABLE_ROOT); MLX5_SET(set_flow_table_root_in, in, table_type, ft->type); - MLX5_SET(set_flow_table_root_in, in, table_id, ft->id); + + if (disconnect) { + MLX5_SET(set_flow_table_root_in, in, op_mod, 1); + MLX5_SET(set_flow_table_root_in, in, table_id, 0); + } else { + MLX5_SET(set_flow_table_root_in, in, op_mod, 0); + MLX5_SET(set_flow_table_root_in, in, table_id, ft->id); + } + MLX5_SET(set_flow_table_root_in, in, underlay_qpn, underlay_qpn); if (ft->vport) { MLX5_SET(set_flow_table_root_in, in, vport_number, ft->vport); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h index c6d7bdf255b6..71e2d0f37ad9 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h @@ -71,8 +71,8 @@ int mlx5_cmd_delete_fte(struct mlx5_core_dev *dev, unsigned int index); int mlx5_cmd_update_root_ft(struct mlx5_core_dev *dev, - struct mlx5_flow_table *ft, - u32 underlay_qpn); + struct mlx5_flow_table *ft, u32 underlay_qpn, + bool disconnect); int mlx5_cmd_fc_alloc(struct mlx5_core_dev *dev, u32 *id); int mlx5_cmd_fc_free(struct mlx5_core_dev *dev, u32 id); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c index 5a7bea688ec8..8a1a7ba9fe53 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c @@ -693,8 +693,10 @@ static int update_root_ft_create(struct mlx5_flow_table *ft, struct fs_prio *prio) { struct mlx5_flow_root_namespace *root = find_root(&prio->node); + struct mlx5_ft_underlay_qp *uqp; int min_level = INT_MAX; int err; + u32 qpn; if (root->root_ft) min_level = root->root_ft->level; @@ -702,10 +704,24 @@ static int update_root_ft_create(struct mlx5_flow_table *ft, struct fs_prio if (ft->level >= min_level) return 0; - err = mlx5_cmd_update_root_ft(root->dev, ft, root->underlay_qpn); + if (list_empty(&root->underlay_qpns)) { + /* Don't set any QPN (zero) in case QPN list is empty */ + qpn = 0; + err = mlx5_cmd_update_root_ft(root->dev, ft, qpn, false); + } else { + list_for_each_entry(uqp, &root->underlay_qpns, list) { + qpn = uqp->qpn; + err = mlx5_cmd_update_root_ft(root->dev, ft, qpn, + false); + if (err) + break; + } + } + if (err) - mlx5_core_warn(root->dev, "Update root flow table of id=%u failed\n", - ft->id); + mlx5_core_warn(root->dev, + "Update root flow table of id(%u) qpn(%d) failed\n", + ft->id, qpn); else root->root_ft = ft; @@ -1661,23 +1677,43 @@ static struct mlx5_flow_table *find_next_ft(struct mlx5_flow_table *ft) static int update_root_ft_destroy(struct mlx5_flow_table *ft) { struct mlx5_flow_root_namespace *root = find_root(&ft->node); + struct mlx5_ft_underlay_qp *uqp; struct mlx5_flow_table *new_root_ft = NULL; + int err = 0; + u32 qpn; if (root->root_ft != ft) return 0; new_root_ft = find_next_ft(ft); - if (new_root_ft) { - int err = mlx5_cmd_update_root_ft(root->dev, new_root_ft, - root->underlay_qpn); - if (err) { - mlx5_core_warn(root->dev, "Update root flow table of id=%u failed\n", - ft->id); - return err; + if (!new_root_ft) { + root->root_ft = NULL; + return 0; + } + + if (list_empty(&root->underlay_qpns)) { + /* Don't set any QPN (zero) in case QPN list is empty */ + qpn = 0; + err = mlx5_cmd_update_root_ft(root->dev, new_root_ft, qpn, + false); + } else { + list_for_each_entry(uqp, &root->underlay_qpns, list) { + qpn = uqp->qpn; + err = mlx5_cmd_update_root_ft(root->dev, new_root_ft, + qpn, false); + if (err) + break; } } - root->root_ft = new_root_ft; + + if (err) + mlx5_core_warn(root->dev, + "Update root flow table of id(%u) qpn(%d) failed\n", + ft->id, qpn); + else + root->root_ft = new_root_ft; + return 0; } @@ -1965,6 +2001,8 @@ static struct mlx5_flow_root_namespace *create_root_ns(struct mlx5_flow_steering root_ns->dev = steering->dev; root_ns->table_type = table_type; + INIT_LIST_HEAD(&root_ns->underlay_qpns); + ns = &root_ns->ns; fs_init_namespace(ns); mutex_init(&root_ns->chain_lock); @@ -2245,17 +2283,76 @@ err: int mlx5_fs_add_rx_underlay_qpn(struct mlx5_core_dev *dev, u32 underlay_qpn) { struct mlx5_flow_root_namespace *root = dev->priv.steering->root_ns; + struct mlx5_ft_underlay_qp *new_uqp; + int err = 0; + + new_uqp = kzalloc(sizeof(*new_uqp), GFP_KERNEL); + if (!new_uqp) + return -ENOMEM; + + mutex_lock(&root->chain_lock); + + if (!root->root_ft) { + err = -EINVAL; + goto update_ft_fail; + } + + err = mlx5_cmd_update_root_ft(dev, root->root_ft, underlay_qpn, false); + if (err) { + mlx5_core_warn(dev, "Failed adding underlay QPN (%u) to root FT err(%d)\n", + underlay_qpn, err); + goto update_ft_fail; + } + + new_uqp->qpn = underlay_qpn; + list_add_tail(&new_uqp->list, &root->underlay_qpns); + + mutex_unlock(&root->chain_lock); - root->underlay_qpn = underlay_qpn; return 0; + +update_ft_fail: + mutex_unlock(&root->chain_lock); + kfree(new_uqp); + return err; } EXPORT_SYMBOL(mlx5_fs_add_rx_underlay_qpn); int mlx5_fs_remove_rx_underlay_qpn(struct mlx5_core_dev *dev, u32 underlay_qpn) { struct mlx5_flow_root_namespace *root = dev->priv.steering->root_ns; + struct mlx5_ft_underlay_qp *uqp; + bool found = false; + int err = 0; + + mutex_lock(&root->chain_lock); + list_for_each_entry(uqp, &root->underlay_qpns, list) { + if (uqp->qpn == underlay_qpn) { + found = true; + break; + } + } + + if (!found) { + mlx5_core_warn(dev, "Failed finding underlay qp (%u) in qpn list\n", + underlay_qpn); + err = -EINVAL; + goto out; + } + + err = mlx5_cmd_update_root_ft(dev, root->root_ft, underlay_qpn, true); + if (err) + mlx5_core_warn(dev, "Failed removing underlay QPN (%u) from root FT err(%d)\n", + underlay_qpn, err); + + list_del(&uqp->list); + mutex_unlock(&root->chain_lock); + kfree(uqp); - root->underlay_qpn = 0; return 0; + +out: + mutex_unlock(&root->chain_lock); + return err; } EXPORT_SYMBOL(mlx5_fs_remove_rx_underlay_qpn); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h index 48dd78975062..9bc048a89bc0 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h @@ -147,6 +147,11 @@ struct mlx5_fc { struct mlx5_fc_cache cache ____cacheline_aligned_in_smp; }; +struct mlx5_ft_underlay_qp { + struct list_head list; + u32 qpn; +}; + #define MLX5_FTE_MATCH_PARAM_RESERVED reserved_at_600 /* Calculate the fte_match_param length and without the reserved length. * Make sure the reserved field is the last. @@ -212,7 +217,7 @@ struct mlx5_flow_root_namespace { struct mlx5_flow_table *root_ft; /* Should be held when chaining flow tables */ struct mutex chain_lock; - u32 underlay_qpn; + struct list_head underlay_qpns; }; int mlx5_init_fc_stats(struct mlx5_core_dev *dev); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c index feb94db6b921..00f0e6a038bb 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c @@ -218,12 +218,6 @@ static int mlx5i_init_tx(struct mlx5e_priv *priv) return err; } - err = mlx5i_init_underlay_qp(priv); - if (err) { - mlx5_core_warn(priv->mdev, "intilize underlay QP failed, %d\n", err); - goto err_destroy_underlay_qp; - } - err = mlx5e_create_tis(priv->mdev, 0 /* tc */, ipriv->qp.qpn, &priv->tisn[0]); if (err) { mlx5_core_warn(priv->mdev, "create tis failed, %d\n", err); @@ -285,7 +279,6 @@ static void mlx5i_destroy_flow_steering(struct mlx5e_priv *priv) static int mlx5i_init_rx(struct mlx5e_priv *priv) { - struct mlx5i_priv *ipriv = priv->ppriv; int err; err = mlx5e_create_indirect_rqt(priv); @@ -304,18 +297,12 @@ static int mlx5i_init_rx(struct mlx5e_priv *priv) if (err) goto err_destroy_indirect_tirs; - err = mlx5_fs_add_rx_underlay_qpn(priv->mdev, ipriv->qp.qpn); - if (err) - goto err_destroy_direct_tirs; - err = mlx5i_create_flow_steering(priv); if (err) - goto err_remove_rx_underlay_qpn; + goto err_destroy_direct_tirs; return 0; -err_remove_rx_underlay_qpn: - mlx5_fs_remove_rx_underlay_qpn(priv->mdev, ipriv->qp.qpn); err_destroy_direct_tirs: mlx5e_destroy_direct_tirs(priv); err_destroy_indirect_tirs: @@ -329,9 +316,6 @@ err_destroy_indirect_rqts: static void mlx5i_cleanup_rx(struct mlx5e_priv *priv) { - struct mlx5i_priv *ipriv = priv->ppriv; - - mlx5_fs_remove_rx_underlay_qpn(priv->mdev, ipriv->qp.qpn); mlx5i_destroy_flow_steering(priv); mlx5e_destroy_direct_tirs(priv); mlx5e_destroy_indirect_tirs(priv); @@ -423,49 +407,71 @@ static void mlx5i_dev_cleanup(struct net_device *dev) static int mlx5i_open(struct net_device *netdev) { - struct mlx5e_priv *priv = mlx5i_epriv(netdev); + struct mlx5e_priv *epriv = mlx5i_epriv(netdev); + struct mlx5i_priv *ipriv = epriv->ppriv; + struct mlx5_core_dev *mdev = epriv->mdev; int err; - mutex_lock(&priv->state_lock); + mutex_lock(&epriv->state_lock); - set_bit(MLX5E_STATE_OPENED, &priv->state); + set_bit(MLX5E_STATE_OPENED, &epriv->state); - err = mlx5e_open_channels(priv, &priv->channels); - if (err) + err = mlx5i_init_underlay_qp(epriv); + if (err) { + mlx5_core_warn(mdev, "prepare underlay qp state failed, %d\n", err); goto err_clear_state_opened_flag; + } - mlx5e_refresh_tirs(priv, false); - mlx5e_activate_priv_channels(priv); - mlx5e_timestamp_set(priv); + err = mlx5_fs_add_rx_underlay_qpn(mdev, ipriv->qp.qpn); + if (err) { + mlx5_core_warn(mdev, "attach underlay qp to ft failed, %d\n", err); + goto err_reset_qp; + } - mutex_unlock(&priv->state_lock); + err = mlx5e_open_channels(epriv, &epriv->channels); + if (err) + goto err_remove_fs_underlay_qp; + + mlx5e_refresh_tirs(epriv, false); + mlx5e_activate_priv_channels(epriv); + mlx5e_timestamp_set(epriv); + + mutex_unlock(&epriv->state_lock); return 0; +err_remove_fs_underlay_qp: + mlx5_fs_remove_rx_underlay_qpn(mdev, ipriv->qp.qpn); +err_reset_qp: + mlx5i_uninit_underlay_qp(epriv); err_clear_state_opened_flag: - clear_bit(MLX5E_STATE_OPENED, &priv->state); - mutex_unlock(&priv->state_lock); + clear_bit(MLX5E_STATE_OPENED, &epriv->state); + mutex_unlock(&epriv->state_lock); return err; } static int mlx5i_close(struct net_device *netdev) { - struct mlx5e_priv *priv = mlx5i_epriv(netdev); + struct mlx5e_priv *epriv = mlx5i_epriv(netdev); + struct mlx5i_priv *ipriv = epriv->ppriv; + struct mlx5_core_dev *mdev = epriv->mdev; /* May already be CLOSED in case a previous configuration operation * (e.g RX/TX queue size change) that involves close&open failed. */ - mutex_lock(&priv->state_lock); + mutex_lock(&epriv->state_lock); - if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) + if (!test_bit(MLX5E_STATE_OPENED, &epriv->state)) goto unlock; - clear_bit(MLX5E_STATE_OPENED, &priv->state); + clear_bit(MLX5E_STATE_OPENED, &epriv->state); - netif_carrier_off(priv->netdev); - mlx5e_deactivate_priv_channels(priv); - mlx5e_close_channels(&priv->channels); + netif_carrier_off(epriv->netdev); + mlx5_fs_remove_rx_underlay_qpn(mdev, ipriv->qp.qpn); + mlx5i_uninit_underlay_qp(epriv); + mlx5e_deactivate_priv_channels(epriv); + mlx5e_close_channels(&epriv->channels);; unlock: - mutex_unlock(&priv->state_lock); + mutex_unlock(&epriv->state_lock); return 0; } -- cgit v1.2.3 From da34f1a85b78c2220dac1ce4f4c4595dd0cab5a9 Mon Sep 17 00:00:00 2001 From: Alex Vesker Date: Wed, 13 Sep 2017 12:17:50 +0300 Subject: net/mlx5e: IPoIB, Support for setting PKEY index to underlay QP Added a function to set PKEY index to IPoIB device driver using the already present set_id function. PKEY index is attached to the QP during state modification. Signed-off-by: Alex Vesker Reviewed-by: Erez Shitrit --- drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c | 9 +++++++++ drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h | 1 + 2 files changed, 10 insertions(+) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c index 00f0e6a038bb..679c1f9af642 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c @@ -123,6 +123,7 @@ static int mlx5i_init_underlay_qp(struct mlx5e_priv *priv) context->flags = cpu_to_be32(MLX5_QP_PM_MIGRATED << 11); context->pri_path.port = 1; + context->pri_path.pkey_index = cpu_to_be16(ipriv->pkey_index); context->qkey = cpu_to_be32(IB_DEFAULT_Q_KEY); ret = mlx5_core_qp_modify(mdev, MLX5_CMD_OP_RST2INIT_QP, 0, context, qp); @@ -529,6 +530,13 @@ static int mlx5i_xmit(struct net_device *dev, struct sk_buff *skb, return mlx5i_sq_xmit(sq, skb, &mah->av, dqpn, ipriv->qkey); } +static void mlx5i_set_pkey_index(struct net_device *netdev, int id) +{ + struct mlx5i_priv *ipriv = netdev_priv(netdev); + + ipriv->pkey_index = (u16)id; +} + static int mlx5i_check_required_hca_cap(struct mlx5_core_dev *mdev) { if (MLX5_CAP_GEN(mdev, port_type) != MLX5_CAP_PORT_TYPE_IB) @@ -593,6 +601,7 @@ struct net_device *mlx5_rdma_netdev_alloc(struct mlx5_core_dev *mdev, rn->send = mlx5i_xmit; rn->attach_mcast = mlx5i_attach_mcast; rn->detach_mcast = mlx5i_detach_mcast; + rn->set_id = mlx5i_set_pkey_index; return netdev; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h index a0f405f520f7..9a729883c3b3 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h @@ -50,6 +50,7 @@ struct mlx5i_priv { struct rdma_netdev rn; /* keep this first */ struct mlx5_core_qp qp; u32 qkey; + u16 pkey_index; char *mlx5e_priv[0]; }; -- cgit v1.2.3 From 7e7f4780c3402bd181eea82ca6395013623e4fbf Mon Sep 17 00:00:00 2001 From: Alex Vesker Date: Thu, 14 Sep 2017 10:27:25 +0300 Subject: net/mlx5e: IPoIB, Use hash-table to map between QPN to child netdev This change is needed for PKEY support, since the RQs are shared between the child interface and the parent. The parent is responsible for NAPI and the precessing of RX completions. Using the dqpn in the completion descriptor we set the corresponding child IPoIB netdevice on the SKB. The mapping between the dqpn and the netdevice is done using a HT, each mlx5 IPoIB interface registers its mapping on creation. Signed-off-by: Alex Vesker Reviewed-by: Erez Shitrit --- drivers/net/ethernet/mellanox/mlx5/core/Makefile | 2 +- drivers/net/ethernet/mellanox/mlx5/core/en_rx.c | 20 ++- .../net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c | 16 +++ .../net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h | 12 ++ .../ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c | 136 +++++++++++++++++++++ 5 files changed, 184 insertions(+), 2 deletions(-) create mode 100644 drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/Makefile index d9621b2152d3..100fe4ecad9b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/Makefile +++ b/drivers/net/ethernet/mellanox/mlx5/core/Makefile @@ -22,7 +22,7 @@ mlx5_core-$(CONFIG_MLX5_ESWITCH) += eswitch.o eswitch_offloads.o en_rep.o en_tc. mlx5_core-$(CONFIG_MLX5_CORE_EN_DCB) += en_dcbnl.o -mlx5_core-$(CONFIG_MLX5_CORE_IPOIB) += ipoib/ipoib.o ipoib/ethtool.o +mlx5_core-$(CONFIG_MLX5_CORE_IPOIB) += ipoib/ipoib.o ipoib/ethtool.o ipoib/ipoib_vlan.o mlx5_core-$(CONFIG_MLX5_EN_IPSEC) += en_accel/ipsec.o en_accel/ipsec_rxtx.o \ en_accel/ipsec_stats.o diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c index 7e3bfe62ef6e..2c3f2e9b6983 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c @@ -1163,11 +1163,25 @@ static inline void mlx5i_complete_rx_cqe(struct mlx5e_rq *rq, u32 cqe_bcnt, struct sk_buff *skb) { - struct net_device *netdev = rq->netdev; + struct net_device *netdev; char *pseudo_header; + u32 qpn; u8 *dgid; u8 g; + qpn = be32_to_cpu(cqe->sop_drop_qpn) & 0xffffff; + netdev = mlx5i_pkey_get_netdev(rq->netdev, qpn); + + /* No mapping present, cannot process SKB. This might happen if a child + * interface is going down while having unprocessed CQEs on parent RQ + */ + if (unlikely(!netdev)) { + /* TODO: add drop counters support */ + skb->dev = NULL; + pr_warn_once("Unable to map QPN %u to dev - dropping skb\n", qpn); + return; + } + g = (be32_to_cpu(cqe->flags_rqpn) >> 28) & 3; dgid = skb->data + MLX5_IB_GRH_DGID_OFFSET; if ((!g) || dgid[0] != 0xff) @@ -1230,6 +1244,10 @@ void mlx5i_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) goto wq_free_wqe; mlx5i_complete_rx_cqe(rq, cqe, cqe_bcnt, skb); + if (unlikely(!skb->dev)) { + dev_kfree_skb_any(skb); + goto wq_free_wqe; + } napi_gro_receive(rq->cq.napi, skb); wq_free_wqe: diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c index 679c1f9af642..c479fe54a6ca 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c @@ -382,6 +382,9 @@ static int mlx5i_dev_init(struct net_device *dev) dev->dev_addr[2] = (ipriv->qp.qpn >> 8) & 0xff; dev->dev_addr[3] = (ipriv->qp.qpn) & 0xff; + /* Add QPN to net-device mapping to HT */ + mlx5i_pkey_add_qpn(dev ,ipriv->qp.qpn); + return 0; } @@ -402,8 +405,12 @@ static int mlx5i_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) static void mlx5i_dev_cleanup(struct net_device *dev) { struct mlx5e_priv *priv = mlx5i_epriv(dev); + struct mlx5i_priv *ipriv = priv->ppriv; mlx5i_uninit_underlay_qp(priv); + + /* Delete QPN to net-device mapping from HT */ + mlx5i_pkey_del_qpn(dev, ipriv->qp.qpn); } static int mlx5i_open(struct net_device *netdev) @@ -590,6 +597,12 @@ struct net_device *mlx5_rdma_netdev_alloc(struct mlx5_core_dev *mdev, if (!epriv->wq) goto err_free_netdev; + err = mlx5i_pkey_qpn_ht_init(netdev); + if (err) { + mlx5_core_warn(mdev, "allocate qpn_to_netdev ht failed\n"); + goto destroy_wq; + } + profile->init(mdev, netdev, profile, ipriv); mlx5e_attach_netdev(epriv); @@ -605,6 +618,8 @@ struct net_device *mlx5_rdma_netdev_alloc(struct mlx5_core_dev *mdev, return netdev; +destroy_wq: + destroy_workqueue(epriv->wq); err_free_netdev: free_netdev(netdev); free_mdev_resources: @@ -623,6 +638,7 @@ void mlx5_rdma_netdev_free(struct net_device *netdev) mlx5e_detach_netdev(priv); profile->cleanup(priv); destroy_workqueue(priv->wq); + mlx5i_pkey_qpn_ht_cleanup(netdev); free_netdev(netdev); mlx5e_destroy_mdev_resources(mdev); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h index 9a729883c3b3..e313f6d90729 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h @@ -51,9 +51,21 @@ struct mlx5i_priv { struct mlx5_core_qp qp; u32 qkey; u16 pkey_index; + struct mlx5i_pkey_qpn_ht *qpn_htbl; char *mlx5e_priv[0]; }; +/* Allocate/Free underlay QPN to net-device hash table */ +int mlx5i_pkey_qpn_ht_init(struct net_device *netdev); +void mlx5i_pkey_qpn_ht_cleanup(struct net_device *netdev); + +/* Add/Remove an underlay QPN to net-device mapping to/from the hash table */ +int mlx5i_pkey_add_qpn(struct net_device *netdev, u32 qpn); +int mlx5i_pkey_del_qpn(struct net_device *netdev, u32 qpn); + +/* Get the net-device corresponding to the given underlay QPN */ +struct net_device *mlx5i_pkey_get_netdev(struct net_device *netdev, u32 qpn); + /* Extract mlx5e_priv from IPoIB netdev */ #define mlx5i_epriv(netdev) ((void *)(((struct mlx5i_priv *)netdev_priv(netdev))->mlx5e_priv)) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c new file mode 100644 index 000000000000..e4d39aa1f552 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c @@ -0,0 +1,136 @@ +/* + * Copyright (c) 2017, Mellanox Technologies. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include +#include "ipoib.h" + +#define MLX5I_MAX_LOG_PKEY_SUP 7 + +struct qpn_to_netdev { + struct net_device *netdev; + struct hlist_node hlist; + u32 underlay_qpn; +}; + +struct mlx5i_pkey_qpn_ht { + struct hlist_head buckets[1 << MLX5I_MAX_LOG_PKEY_SUP]; + spinlock_t ht_lock; /* Synchronise with NAPI */ +}; + +int mlx5i_pkey_qpn_ht_init(struct net_device *netdev) +{ + struct mlx5i_priv *ipriv = netdev_priv(netdev); + struct mlx5i_pkey_qpn_ht *qpn_htbl; + + qpn_htbl = kzalloc(sizeof(*qpn_htbl), GFP_KERNEL); + if (!qpn_htbl) + return -ENOMEM; + + ipriv->qpn_htbl = qpn_htbl; + spin_lock_init(&qpn_htbl->ht_lock); + + return 0; +} + +void mlx5i_pkey_qpn_ht_cleanup(struct net_device *netdev) +{ + struct mlx5i_priv *ipriv = netdev_priv(netdev); + + kfree(ipriv->qpn_htbl); +} + +static struct qpn_to_netdev *mlx5i_find_qpn_to_netdev_node(struct hlist_head *buckets, + u32 qpn) +{ + struct hlist_head *h = &buckets[hash_32(qpn, MLX5I_MAX_LOG_PKEY_SUP)]; + struct qpn_to_netdev *node; + + hlist_for_each_entry(node, h, hlist) { + if (node->underlay_qpn == qpn) + return node; + } + + return NULL; +} + +int mlx5i_pkey_add_qpn(struct net_device *netdev, u32 qpn) +{ + struct mlx5i_priv *ipriv = netdev_priv(netdev); + struct mlx5i_pkey_qpn_ht *ht = ipriv->qpn_htbl; + u8 key = hash_32(qpn, MLX5I_MAX_LOG_PKEY_SUP); + struct qpn_to_netdev *new_node; + + new_node = kzalloc(sizeof(*new_node), GFP_KERNEL); + if (!new_node) + return -ENOMEM; + + new_node->netdev = netdev; + new_node->underlay_qpn = qpn; + spin_lock_bh(&ht->ht_lock); + hlist_add_head(&new_node->hlist, &ht->buckets[key]); + spin_unlock_bh(&ht->ht_lock); + + return 0; +} + +int mlx5i_pkey_del_qpn(struct net_device *netdev, u32 qpn) +{ + struct mlx5e_priv *epriv = mlx5i_epriv(netdev); + struct mlx5i_priv *ipriv = epriv->ppriv; + struct mlx5i_pkey_qpn_ht *ht = ipriv->qpn_htbl; + struct qpn_to_netdev *node; + + node = mlx5i_find_qpn_to_netdev_node(ht->buckets, qpn); + if (!node) { + mlx5_core_warn(epriv->mdev, "QPN to netdev delete from HT failed\n"); + return -EINVAL; + } + + spin_lock_bh(&ht->ht_lock); + hlist_del_init(&node->hlist); + spin_unlock_bh(&ht->ht_lock); + kfree(node); + + return 0; +} + +struct net_device *mlx5i_pkey_get_netdev(struct net_device *netdev, u32 qpn) +{ + struct mlx5i_priv *ipriv = netdev_priv(netdev); + struct qpn_to_netdev *node; + + node = mlx5i_find_qpn_to_netdev_node(ipriv->qpn_htbl->buckets, qpn); + if (!node) + return NULL; + + return node->netdev; +} -- cgit v1.2.3 From 4c6c615e3f308aee26277abebc7d4ffcd9a6abe2 Mon Sep 17 00:00:00 2001 From: Alex Vesker Date: Thu, 14 Sep 2017 14:08:39 +0300 Subject: net/mlx5e: IPoIB, Add PKEY child interface nic profile Child interface profile will be called to support child interface specific behaviour. The child code is sparse compared to the parent since the RX channels are shared between the interfaces. Creating a septate profile for child and parent will make a smother code with a better ability for future expansion. The profile stuct is exposed to the parent using a getter function. Signed-off-by: Alex Vesker Reviewed-by: Erez Shitrit --- .../net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c | 12 ++-- .../net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h | 13 ++++ .../ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c | 83 ++++++++++++++++++++++ 3 files changed, 102 insertions(+), 6 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c index c479fe54a6ca..196771cc599e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c @@ -70,10 +70,10 @@ static void mlx5i_build_nic_params(struct mlx5_core_dev *mdev, } /* Called directly after IPoIB netdevice was created to initialize SW structs */ -static void mlx5i_init(struct mlx5_core_dev *mdev, - struct net_device *netdev, - const struct mlx5e_profile *profile, - void *ppriv) +void mlx5i_init(struct mlx5_core_dev *mdev, + struct net_device *netdev, + const struct mlx5e_profile *profile, + void *ppriv) { struct mlx5e_priv *priv = mlx5i_epriv(netdev); @@ -169,7 +169,7 @@ static void mlx5i_uninit_underlay_qp(struct mlx5e_priv *priv) #define MLX5_QP_ENHANCED_ULP_STATELESS_MODE 2 -static int mlx5i_create_underlay_qp(struct mlx5_core_dev *mdev, struct mlx5_core_qp *qp) +int mlx5i_create_underlay_qp(struct mlx5_core_dev *mdev, struct mlx5_core_qp *qp) { u32 *in = NULL; void *addr_path; @@ -203,7 +203,7 @@ out: return ret; } -static void mlx5i_destroy_underlay_qp(struct mlx5_core_dev *mdev, struct mlx5_core_qp *qp) +void mlx5i_destroy_underlay_qp(struct mlx5_core_dev *mdev, struct mlx5_core_qp *qp) { mlx5_core_destroy_qp(mdev, qp); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h index e313f6d90729..c9895f7a2358 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h @@ -55,6 +55,10 @@ struct mlx5i_priv { char *mlx5e_priv[0]; }; +/* Underlay QP create/destroy functions */ +int mlx5i_create_underlay_qp(struct mlx5_core_dev *mdev, struct mlx5_core_qp *qp); +void mlx5i_destroy_underlay_qp(struct mlx5_core_dev *mdev, struct mlx5_core_qp *qp); + /* Allocate/Free underlay QPN to net-device hash table */ int mlx5i_pkey_qpn_ht_init(struct net_device *netdev); void mlx5i_pkey_qpn_ht_cleanup(struct net_device *netdev); @@ -66,6 +70,15 @@ int mlx5i_pkey_del_qpn(struct net_device *netdev, u32 qpn); /* Get the net-device corresponding to the given underlay QPN */ struct net_device *mlx5i_pkey_get_netdev(struct net_device *netdev, u32 qpn); +/* Parent profile functions */ +void mlx5i_init(struct mlx5_core_dev *mdev, + struct net_device *netdev, + const struct mlx5e_profile *profile, + void *ppriv); + +/* Get child interface nic profile */ +const struct mlx5e_profile *mlx5i_pkey_get_profile(void); + /* Extract mlx5e_priv from IPoIB netdev */ #define mlx5i_epriv(netdev) ((void *)(((struct mlx5i_priv *)netdev_priv(netdev))->mlx5e_priv)) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c index e4d39aa1f552..17c508d98dbb 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c @@ -134,3 +134,86 @@ struct net_device *mlx5i_pkey_get_netdev(struct net_device *netdev, u32 qpn) return node->netdev; } + +/* Called directly after IPoIB netdevice was created to initialize SW structs */ +static void mlx5i_pkey_init(struct mlx5_core_dev *mdev, + struct net_device *netdev, + const struct mlx5e_profile *profile, + void *ppriv) +{ + struct mlx5e_priv *priv = mlx5i_epriv(netdev); + + mlx5i_init(mdev, netdev, profile, ppriv); + + /* Override parent ndo */ + netdev->netdev_ops = NULL; + + /* Currently no ethtool support */ + netdev->ethtool_ops = NULL; + + /* Use dummy rqs */ + priv->channels.params.log_rq_size = MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE; +} + +/* Called directly before IPoIB netdevice is destroyed to cleanup SW structs */ +static void mlx5i_pkey_cleanup(struct mlx5e_priv *priv) +{ + /* Do nothing .. */ +} + +static int mlx5i_pkey_init_tx(struct mlx5e_priv *priv) +{ + struct mlx5i_priv *ipriv = priv->ppriv; + int err; + + err = mlx5i_create_underlay_qp(priv->mdev, &ipriv->qp); + if (err) { + mlx5_core_warn(priv->mdev, "create child underlay QP failed, %d\n", err); + return err; + } + + return 0; +} + +static void mlx5i_pkey_cleanup_tx(struct mlx5e_priv *priv) +{ + struct mlx5i_priv *ipriv = priv->ppriv; + + mlx5i_destroy_underlay_qp(priv->mdev, &ipriv->qp); +} + +static int mlx5i_pkey_init_rx(struct mlx5e_priv *priv) +{ + /* Since the rx resources are shared between child and parent, the + * parent interface is taking care of rx resource allocation and init + */ + return 0; +} + +static void mlx5i_pkey_cleanup_rx(struct mlx5e_priv *priv) +{ + /* Since the rx resources are shared between child and parent, the + * parent interface is taking care of rx resource free and de-init + */ +} + +static const struct mlx5e_profile mlx5i_pkey_nic_profile = { + .init = mlx5i_pkey_init, + .cleanup = mlx5i_pkey_cleanup, + .init_tx = mlx5i_pkey_init_tx, + .cleanup_tx = mlx5i_pkey_cleanup_tx, + .init_rx = mlx5i_pkey_init_rx, + .cleanup_rx = mlx5i_pkey_cleanup_rx, + .enable = NULL, + .disable = NULL, + .update_stats = NULL, + .max_nch = mlx5e_get_max_num_channels, + .rx_handlers.handle_rx_cqe = mlx5i_handle_rx_cqe, + .rx_handlers.handle_rx_cqe_mpwqe = NULL, /* Not supported */ + .max_tc = MLX5I_MAX_NUM_TC, +}; + +const struct mlx5e_profile *mlx5i_pkey_get_profile(void) +{ + return &mlx5i_pkey_nic_profile; +} -- cgit v1.2.3 From af98cebcb3e66d349173c33c0aaef352d108a081 Mon Sep 17 00:00:00 2001 From: Alex Vesker Date: Thu, 14 Sep 2017 16:33:35 +0300 Subject: net/mlx5e: IPoIB, Add PKEY child interface ndos Child interface ndos will be called to support child interface specific behaviour. ndo_init flow: -Acquire shared QPN to net-device HT from parent -Continue with the same flow as parent interface ndo_open flow: -Initialize child underlay QP and connect to shared FT -Create child send TIS -Open child send channels Signed-off-by: Alex Vesker Reviewed-by: Erez Shitrit --- .../net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c | 10 +- .../net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h | 8 ++ .../ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c | 133 ++++++++++++++++++++- 3 files changed, 144 insertions(+), 7 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c index 196771cc599e..70706eb70d3e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c @@ -40,8 +40,6 @@ static int mlx5i_open(struct net_device *netdev); static int mlx5i_close(struct net_device *netdev); -static int mlx5i_dev_init(struct net_device *dev); -static void mlx5i_dev_cleanup(struct net_device *dev); static int mlx5i_change_mtu(struct net_device *netdev, int new_mtu); static int mlx5i_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd); @@ -108,7 +106,7 @@ static void mlx5i_cleanup(struct mlx5e_priv *priv) /* Do nothing .. */ } -static int mlx5i_init_underlay_qp(struct mlx5e_priv *priv) +int mlx5i_init_underlay_qp(struct mlx5e_priv *priv) { struct mlx5_core_dev *mdev = priv->mdev; struct mlx5i_priv *ipriv = priv->ppriv; @@ -154,7 +152,7 @@ err_qp_modify_to_err: return ret; } -static void mlx5i_uninit_underlay_qp(struct mlx5e_priv *priv) +void mlx5i_uninit_underlay_qp(struct mlx5e_priv *priv) { struct mlx5i_priv *ipriv = priv->ppriv; struct mlx5_core_dev *mdev = priv->mdev; @@ -372,7 +370,7 @@ out: return err; } -static int mlx5i_dev_init(struct net_device *dev) +int mlx5i_dev_init(struct net_device *dev) { struct mlx5e_priv *priv = mlx5i_epriv(dev); struct mlx5i_priv *ipriv = priv->ppriv; @@ -402,7 +400,7 @@ static int mlx5i_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) } } -static void mlx5i_dev_cleanup(struct net_device *dev) +void mlx5i_dev_cleanup(struct net_device *dev) { struct mlx5e_priv *priv = mlx5i_epriv(dev); struct mlx5i_priv *ipriv = priv->ppriv; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h index c9895f7a2358..80c0cfee7164 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h @@ -59,6 +59,10 @@ struct mlx5i_priv { int mlx5i_create_underlay_qp(struct mlx5_core_dev *mdev, struct mlx5_core_qp *qp); void mlx5i_destroy_underlay_qp(struct mlx5_core_dev *mdev, struct mlx5_core_qp *qp); +/* Underlay QP state modification init/uninit functions */ +int mlx5i_init_underlay_qp(struct mlx5e_priv *priv); +void mlx5i_uninit_underlay_qp(struct mlx5e_priv *priv); + /* Allocate/Free underlay QPN to net-device hash table */ int mlx5i_pkey_qpn_ht_init(struct net_device *netdev); void mlx5i_pkey_qpn_ht_cleanup(struct net_device *netdev); @@ -70,6 +74,10 @@ int mlx5i_pkey_del_qpn(struct net_device *netdev, u32 qpn); /* Get the net-device corresponding to the given underlay QPN */ struct net_device *mlx5i_pkey_get_netdev(struct net_device *netdev, u32 qpn); +/* Shared ndo functionts */ +int mlx5i_dev_init(struct net_device *dev); +void mlx5i_dev_cleanup(struct net_device *dev); + /* Parent profile functions */ void mlx5i_init(struct mlx5_core_dev *mdev, struct net_device *netdev, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c index 17c508d98dbb..d99bec6855de 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c @@ -135,6 +135,137 @@ struct net_device *mlx5i_pkey_get_netdev(struct net_device *netdev, u32 qpn) return node->netdev; } +static int mlx5i_pkey_open(struct net_device *netdev); +static int mlx5i_pkey_close(struct net_device *netdev); +static int mlx5i_pkey_dev_init(struct net_device *dev); +static void mlx5i_pkey_dev_cleanup(struct net_device *netdev); +static int mlx5i_pkey_change_mtu(struct net_device *netdev, int new_mtu); + +static const struct net_device_ops mlx5i_pkey_netdev_ops = { + .ndo_open = mlx5i_pkey_open, + .ndo_stop = mlx5i_pkey_close, + .ndo_init = mlx5i_pkey_dev_init, + .ndo_uninit = mlx5i_pkey_dev_cleanup, + .ndo_change_mtu = mlx5i_pkey_change_mtu, +}; + +/* Child NDOs */ +static int mlx5i_pkey_dev_init(struct net_device *dev) +{ + struct mlx5e_priv *priv = mlx5i_epriv(dev); + struct mlx5i_priv *ipriv, *parent_ipriv; + struct net_device *parent_dev; + int parent_ifindex; + + ipriv = priv->ppriv; + + /* Get QPN to netdevice hash table from parent */ + parent_ifindex = dev->netdev_ops->ndo_get_iflink(dev); + parent_dev = dev_get_by_index(dev_net(dev), parent_ifindex); + if (!parent_dev) { + mlx5_core_warn(priv->mdev, "failed to get parent device\n"); + return -EINVAL; + } + + parent_ipriv = netdev_priv(parent_dev); + ipriv->qpn_htbl = parent_ipriv->qpn_htbl; + dev_put(parent_dev); + + return mlx5i_dev_init(dev); +} + +static void mlx5i_pkey_dev_cleanup(struct net_device *netdev) +{ + return mlx5i_dev_cleanup(netdev); +} + +static int mlx5i_pkey_open(struct net_device *netdev) +{ + struct mlx5e_priv *epriv = mlx5i_epriv(netdev); + struct mlx5i_priv *ipriv = epriv->ppriv; + struct mlx5_core_dev *mdev = epriv->mdev; + int err; + + mutex_lock(&epriv->state_lock); + + set_bit(MLX5E_STATE_OPENED, &epriv->state); + + err = mlx5i_init_underlay_qp(epriv); + if (err) { + mlx5_core_warn(mdev, "prepare child underlay qp state failed, %d\n", err); + goto err_release_lock; + } + + err = mlx5_fs_add_rx_underlay_qpn(mdev, ipriv->qp.qpn); + if (err) { + mlx5_core_warn(mdev, "attach child underlay qp to ft failed, %d\n", err); + goto err_unint_underlay_qp; + } + + err = mlx5e_create_tis(mdev, 0 /* tc */, ipriv->qp.qpn, &epriv->tisn[0]); + if (err) { + mlx5_core_warn(mdev, "create child tis failed, %d\n", err); + goto err_remove_rx_uderlay_qp; + } + + err = mlx5e_open_channels(epriv, &epriv->channels); + if (err) { + mlx5_core_warn(mdev, "opening child channels failed, %d\n", err); + goto err_clear_state_opened_flag; + } + mlx5e_refresh_tirs(epriv, false); + mlx5e_activate_priv_channels(epriv); + mutex_unlock(&epriv->state_lock); + + return 0; + +err_clear_state_opened_flag: + mlx5e_destroy_tis(mdev, epriv->tisn[0]); +err_remove_rx_uderlay_qp: + mlx5_fs_remove_rx_underlay_qpn(mdev, ipriv->qp.qpn); +err_unint_underlay_qp: + mlx5i_uninit_underlay_qp(epriv); +err_release_lock: + clear_bit(MLX5E_STATE_OPENED, &epriv->state); + mutex_unlock(&epriv->state_lock); + return err; +} + +static int mlx5i_pkey_close(struct net_device *netdev) +{ + struct mlx5e_priv *priv = mlx5i_epriv(netdev); + struct mlx5i_priv *ipriv = priv->ppriv; + struct mlx5_core_dev *mdev = priv->mdev; + + mutex_lock(&priv->state_lock); + + if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) + goto unlock; + + clear_bit(MLX5E_STATE_OPENED, &priv->state); + + netif_carrier_off(priv->netdev); + mlx5_fs_remove_rx_underlay_qpn(mdev, ipriv->qp.qpn); + mlx5i_uninit_underlay_qp(priv); + mlx5e_deactivate_priv_channels(priv); + mlx5e_close_channels(&priv->channels); + mlx5e_destroy_tis(mdev, priv->tisn[0]); +unlock: + mutex_unlock(&priv->state_lock); + return 0; +} + +static int mlx5i_pkey_change_mtu(struct net_device *netdev, int new_mtu) +{ + struct mlx5e_priv *priv = mlx5i_epriv(netdev); + + mutex_lock(&priv->state_lock); + netdev->mtu = new_mtu; + mutex_unlock(&priv->state_lock); + + return 0; +} + /* Called directly after IPoIB netdevice was created to initialize SW structs */ static void mlx5i_pkey_init(struct mlx5_core_dev *mdev, struct net_device *netdev, @@ -146,7 +277,7 @@ static void mlx5i_pkey_init(struct mlx5_core_dev *mdev, mlx5i_init(mdev, netdev, profile, ppriv); /* Override parent ndo */ - netdev->netdev_ops = NULL; + netdev->netdev_ops = &mlx5i_pkey_netdev_ops; /* Currently no ethtool support */ netdev->ethtool_ops = NULL; -- cgit v1.2.3 From 6a910233c1eb19673dd0f37f1d72d7cdc419e176 Mon Sep 17 00:00:00 2001 From: Alex Vesker Date: Thu, 14 Sep 2017 18:02:31 +0300 Subject: net/mlx5e: IPoIB, Add PKEY child interface ethtool ops Similar to VLAN interfaces child interfaces have limited ethtool support. In current code the main limitation that does not allow child interface ethtool configuration is due to shared resources which are managed by the parent. Signed-off-by: Alex Vesker Reviewed-by: Erez Shitrit --- drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c | 5 +++++ drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h | 1 + drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c | 4 ++-- 3 files changed, 8 insertions(+), 2 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c index 43c126c63955..6f338a9219c8 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c @@ -250,3 +250,8 @@ const struct ethtool_ops mlx5i_ethtool_ops = { .get_link_ksettings = mlx5i_get_link_ksettings, .get_link = ethtool_op_get_link, }; + +const struct ethtool_ops mlx5i_pkey_ethtool_ops = { + .get_drvinfo = mlx5i_get_drvinfo, + .get_link = ethtool_op_get_link, +}; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h index 80c0cfee7164..a50c1a19550e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h @@ -39,6 +39,7 @@ #define MLX5I_MAX_NUM_TC 1 extern const struct ethtool_ops mlx5i_ethtool_ops; +extern const struct ethtool_ops mlx5i_pkey_ethtool_ops; #define MLX5_IB_GRH_BYTES 40 #define MLX5_IPOIB_ENCAP_LEN 4 diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c index d99bec6855de..531b02cc979b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c @@ -279,8 +279,8 @@ static void mlx5i_pkey_init(struct mlx5_core_dev *mdev, /* Override parent ndo */ netdev->netdev_ops = &mlx5i_pkey_netdev_ops; - /* Currently no ethtool support */ - netdev->ethtool_ops = NULL; + /* Set child limited ethtool support */ + netdev->ethtool_ops = &mlx5i_pkey_ethtool_ops; /* Use dummy rqs */ priv->channels.params.log_rq_size = MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE; -- cgit v1.2.3 From b5ae577741bec22b584fa704076ccd8221cad19d Mon Sep 17 00:00:00 2001 From: Alex Vesker Date: Thu, 14 Sep 2017 18:22:50 +0300 Subject: net/mlx5e: IPoIB, Modify rdma netdev allocate and free to support PKEY Resources such as FT, QPN HT and mdev resources should be allocated only by parent netdev. Shared resources are allocated and freed by the parent interface since the parent is always present and created before the IPoIB PKEY sub-interface. Signed-off-by: Alex Vesker Reviewed-by: Erez Shitrit --- .../net/ethernet/mellanox/mlx5/core/en_common.c | 1 + .../net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c | 52 ++++++++++++++-------- .../net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h | 1 + 3 files changed, 36 insertions(+), 18 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_common.c b/drivers/net/ethernet/mellanox/mlx5/core/en_common.c index ece3fb147e3e..157d02917237 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_common.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_common.c @@ -134,6 +134,7 @@ void mlx5e_destroy_mdev_resources(struct mlx5_core_dev *mdev) mlx5_core_destroy_mkey(mdev, &res->mkey); mlx5_core_dealloc_transport_domain(mdev, res->td.tdn); mlx5_core_dealloc_pd(mdev, res->pdn); + memset(res, 0, sizeof(*res)); } int mlx5e_refresh_tirs(struct mlx5e_priv *priv, bool enable_uc_lb) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c index 70706eb70d3e..abf270d7f556 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c @@ -560,12 +560,13 @@ struct net_device *mlx5_rdma_netdev_alloc(struct mlx5_core_dev *mdev, const char *name, void (*setup)(struct net_device *)) { - const struct mlx5e_profile *profile = &mlx5i_nic_profile; - int nch = profile->max_nch(mdev); + const struct mlx5e_profile *profile; struct net_device *netdev; struct mlx5i_priv *ipriv; struct mlx5e_priv *epriv; struct rdma_netdev *rn; + bool sub_interface; + int nch; int err; if (mlx5i_check_required_hca_cap(mdev)) { @@ -573,10 +574,15 @@ struct net_device *mlx5_rdma_netdev_alloc(struct mlx5_core_dev *mdev, return ERR_PTR(-EOPNOTSUPP); } - /* This function should only be called once per mdev */ - err = mlx5e_create_mdev_resources(mdev); - if (err) - return NULL; + /* TODO: Need to find a better way to check if child device*/ + sub_interface = (mdev->mlx5e_res.pdn != 0); + + if (sub_interface) + profile = mlx5i_pkey_get_profile(); + else + profile = &mlx5i_nic_profile; + + nch = profile->max_nch(mdev); netdev = alloc_netdev_mqs(sizeof(struct mlx5i_priv) + sizeof(struct mlx5e_priv), name, NET_NAME_UNKNOWN, @@ -585,7 +591,7 @@ struct net_device *mlx5_rdma_netdev_alloc(struct mlx5_core_dev *mdev, nch); if (!netdev) { mlx5_core_warn(mdev, "alloc_netdev_mqs failed\n"); - goto free_mdev_resources; + return NULL; } ipriv = netdev_priv(netdev); @@ -595,10 +601,18 @@ struct net_device *mlx5_rdma_netdev_alloc(struct mlx5_core_dev *mdev, if (!epriv->wq) goto err_free_netdev; - err = mlx5i_pkey_qpn_ht_init(netdev); - if (err) { - mlx5_core_warn(mdev, "allocate qpn_to_netdev ht failed\n"); - goto destroy_wq; + ipriv->sub_interface = sub_interface; + if (!ipriv->sub_interface) { + err = mlx5i_pkey_qpn_ht_init(netdev); + if (err) { + mlx5_core_warn(mdev, "allocate qpn_to_netdev ht failed\n"); + goto destroy_wq; + } + + /* This should only be called once per mdev */ + err = mlx5e_create_mdev_resources(mdev); + if (err) + goto destroy_ht; } profile->init(mdev, netdev, profile, ipriv); @@ -616,12 +630,12 @@ struct net_device *mlx5_rdma_netdev_alloc(struct mlx5_core_dev *mdev, return netdev; +destroy_ht: + mlx5i_pkey_qpn_ht_cleanup(netdev); destroy_wq: destroy_workqueue(epriv->wq); err_free_netdev: free_netdev(netdev); -free_mdev_resources: - mlx5e_destroy_mdev_resources(mdev); return NULL; } @@ -629,16 +643,18 @@ EXPORT_SYMBOL(mlx5_rdma_netdev_alloc); void mlx5_rdma_netdev_free(struct net_device *netdev) { - struct mlx5e_priv *priv = mlx5i_epriv(netdev); + struct mlx5e_priv *priv = mlx5i_epriv(netdev); + struct mlx5i_priv *ipriv = priv->ppriv; const struct mlx5e_profile *profile = priv->profile; - struct mlx5_core_dev *mdev = priv->mdev; mlx5e_detach_netdev(priv); profile->cleanup(priv); destroy_workqueue(priv->wq); - mlx5i_pkey_qpn_ht_cleanup(netdev); - free_netdev(netdev); - mlx5e_destroy_mdev_resources(mdev); + if (!ipriv->sub_interface) { + mlx5i_pkey_qpn_ht_cleanup(netdev); + mlx5e_destroy_mdev_resources(priv->mdev); + } + free_netdev(netdev); } EXPORT_SYMBOL(mlx5_rdma_netdev_free); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h index a50c1a19550e..49008022c306 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h @@ -50,6 +50,7 @@ extern const struct ethtool_ops mlx5i_pkey_ethtool_ops; struct mlx5i_priv { struct rdma_netdev rn; /* keep this first */ struct mlx5_core_qp qp; + bool sub_interface; u32 qkey; u16 pkey_index; struct mlx5i_pkey_qpn_ht *qpn_htbl; -- cgit v1.2.3 From 4c7787ba3a1f583dba6ff3420817229f0056df84 Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Fri, 13 Oct 2017 03:50:35 +0100 Subject: nfp: Explicitly include linux/bug.h Today's -next build encountered an error due to a missing definition of WARN_ON(), caused by some header reorganization removing an implicit inclusion of linux/bug.h. Fix this with an explicit inclusion. Signed-off-by: Mark Brown Acked-by: Jakub Kicinski Signed-off-by: David S. Miller --- drivers/net/ethernet/netronome/nfp/nfp_app.c | 1 + drivers/net/ethernet/netronome/nfp/nfp_asm.h | 1 + 2 files changed, 2 insertions(+) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/netronome/nfp/nfp_app.c b/drivers/net/ethernet/netronome/nfp/nfp_app.c index 82c290763529..5d9e2eba5b49 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_app.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_app.c @@ -31,6 +31,7 @@ * SOFTWARE. */ +#include #include #include diff --git a/drivers/net/ethernet/netronome/nfp/nfp_asm.h b/drivers/net/ethernet/netronome/nfp/nfp_asm.h index c26aa7e4a839..86e7daee6099 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_asm.h +++ b/drivers/net/ethernet/netronome/nfp/nfp_asm.h @@ -35,6 +35,7 @@ #define __NFP_ASM_H__ 1 #include +#include #include #define REG_NONE 0 -- cgit v1.2.3 From ad75b7d32f2517a6cc92a5d70569c33455157453 Mon Sep 17 00:00:00 2001 From: Rahul Lakkireddy Date: Fri, 13 Oct 2017 18:48:13 +0530 Subject: cxgb4: implement ethtool dump data operations Implement operations to set/get dump data via ethtool. Also add template header that precedes dump data, which helps in decoding and extracting the dump data. Signed-off-by: Rahul Lakkireddy Signed-off-by: Ganesh Goudar Signed-off-by: David S. Miller --- drivers/net/ethernet/chelsio/cxgb4/Makefile | 2 +- drivers/net/ethernet/chelsio/cxgb4/cudbg_if.h | 33 ++++++++++ .../net/ethernet/chelsio/cxgb4/cudbg_lib_common.h | 65 +++++++++++++++++++ drivers/net/ethernet/chelsio/cxgb4/cxgb4.h | 3 + drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c | 73 ++++++++++++++++++++++ drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.h | 32 ++++++++++ drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c | 56 ++++++++++++++++- drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c | 3 + 8 files changed, 265 insertions(+), 2 deletions(-) create mode 100644 drivers/net/ethernet/chelsio/cxgb4/cudbg_if.h create mode 100644 drivers/net/ethernet/chelsio/cxgb4/cudbg_lib_common.h create mode 100644 drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c create mode 100644 drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.h (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/chelsio/cxgb4/Makefile b/drivers/net/ethernet/chelsio/cxgb4/Makefile index fecd7aab673b..4c6041f45630 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/Makefile +++ b/drivers/net/ethernet/chelsio/cxgb4/Makefile @@ -6,7 +6,7 @@ obj-$(CONFIG_CHELSIO_T4) += cxgb4.o cxgb4-objs := cxgb4_main.o l2t.o t4_hw.o sge.o clip_tbl.o cxgb4_ethtool.o \ cxgb4_uld.o sched.o cxgb4_filter.o cxgb4_tc_u32.o \ - cxgb4_ptp.o cxgb4_tc_flower.o + cxgb4_ptp.o cxgb4_tc_flower.o cxgb4_cudbg.o cxgb4-$(CONFIG_CHELSIO_T4_DCB) += cxgb4_dcb.o cxgb4-$(CONFIG_CHELSIO_T4_FCOE) += cxgb4_fcoe.o cxgb4-$(CONFIG_DEBUG_FS) += cxgb4_debugfs.o diff --git a/drivers/net/ethernet/chelsio/cxgb4/cudbg_if.h b/drivers/net/ethernet/chelsio/cxgb4/cudbg_if.h new file mode 100644 index 000000000000..ebaa5b7063cf --- /dev/null +++ b/drivers/net/ethernet/chelsio/cxgb4/cudbg_if.h @@ -0,0 +1,33 @@ +/* + * Copyright (C) 2017 Chelsio Communications. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + */ + +#ifndef __CUDBG_IF_H__ +#define __CUDBG_IF_H__ + +#define CUDBG_MAJOR_VERSION 1 +#define CUDBG_MINOR_VERSION 14 + +enum cudbg_dbg_entity_type { + CUDBG_MAX_ENTITY = 70, +}; + +struct cudbg_init { + struct adapter *adap; /* Pointer to adapter structure */ + void *outbuf; /* Output buffer */ + u32 outbuf_size; /* Output buffer size */ +}; +#endif /* __CUDBG_IF_H__ */ diff --git a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib_common.h b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib_common.h new file mode 100644 index 000000000000..eb1b36b72455 --- /dev/null +++ b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib_common.h @@ -0,0 +1,65 @@ +/* + * Copyright (C) 2017 Chelsio Communications. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + */ + +#ifndef __CUDBG_LIB_COMMON_H__ +#define __CUDBG_LIB_COMMON_H__ + +#define CUDBG_SIGNATURE 67856866 /* CUDB in ascii */ + +enum cudbg_dump_type { + CUDBG_DUMP_TYPE_MINI = 1, +}; + +enum cudbg_compression_type { + CUDBG_COMPRESSION_NONE = 1, +}; + +struct cudbg_hdr { + u32 signature; + u32 hdr_len; + u16 major_ver; + u16 minor_ver; + u32 data_len; + u32 hdr_flags; + u16 max_entities; + u8 chip_ver; + u8 dump_type:3; + u8 reserved1:1; + u8 compress_type:4; + u32 reserved[8]; +}; + +struct cudbg_entity_hdr { + u32 entity_type; + u32 start_offset; + u32 size; + int hdr_flags; + u32 sys_warn; + u32 sys_err; + u8 num_pad; + u8 flag; /* bit 0 is used to indicate ext data */ + u8 reserved1[2]; + u32 next_ext_offset; /* pointer to next extended entity meta data */ + u32 reserved[5]; +}; + +struct cudbg_buffer { + u32 size; + u32 offset; + char *data; +}; +#endif /* __CUDBG_LIB_COMMON_H__ */ diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h index 0db3ab6ad094..a749602fdc41 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h @@ -909,6 +909,9 @@ struct adapter { /* TC flower offload */ DECLARE_HASHTABLE(flower_anymatch_tbl, 9); struct timer_list flower_stats_timer; + + /* Ethtool Dump */ + struct ethtool_dump eth_dump; }; /* Support for "sched-class" command to allow a TX Scheduling Class to be diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c new file mode 100644 index 000000000000..a808150de208 --- /dev/null +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c @@ -0,0 +1,73 @@ +/* + * Copyright (C) 2017 Chelsio Communications. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + */ + +#include "cxgb4.h" +#include "cxgb4_cudbg.h" + +u32 cxgb4_get_dump_length(struct adapter *adap, u32 flag) +{ + return 0; +} + +int cxgb4_cudbg_collect(struct adapter *adap, void *buf, u32 *buf_size, + u32 flag) +{ + struct cudbg_init cudbg_init = { 0 }; + struct cudbg_buffer dbg_buff = { 0 }; + u32 size, min_size, total_size = 0; + struct cudbg_hdr *cudbg_hdr; + + size = *buf_size; + + cudbg_init.adap = adap; + cudbg_init.outbuf = buf; + cudbg_init.outbuf_size = size; + + dbg_buff.data = buf; + dbg_buff.size = size; + dbg_buff.offset = 0; + + cudbg_hdr = (struct cudbg_hdr *)buf; + cudbg_hdr->signature = CUDBG_SIGNATURE; + cudbg_hdr->hdr_len = sizeof(struct cudbg_hdr); + cudbg_hdr->major_ver = CUDBG_MAJOR_VERSION; + cudbg_hdr->minor_ver = CUDBG_MINOR_VERSION; + cudbg_hdr->max_entities = CUDBG_MAX_ENTITY; + cudbg_hdr->chip_ver = adap->params.chip; + cudbg_hdr->dump_type = CUDBG_DUMP_TYPE_MINI; + cudbg_hdr->compress_type = CUDBG_COMPRESSION_NONE; + + min_size = sizeof(struct cudbg_hdr) + + sizeof(struct cudbg_entity_hdr) * + cudbg_hdr->max_entities; + if (size < min_size) + return -ENOMEM; + + dbg_buff.offset += min_size; + total_size = dbg_buff.offset; + + cudbg_hdr->data_len = total_size; + *buf_size = total_size; + return 0; +} + +void cxgb4_init_ethtool_dump(struct adapter *adapter) +{ + adapter->eth_dump.flag = CXGB4_ETH_DUMP_NONE; + adapter->eth_dump.version = adapter->params.fw_vers; + adapter->eth_dump.len = 0; +} diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.h new file mode 100644 index 000000000000..8c5dd6794f81 --- /dev/null +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.h @@ -0,0 +1,32 @@ +/* + * Copyright (C) 2017 Chelsio Communications. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + */ + +#ifndef __CXGB4_CUDBG_H__ +#define __CXGB4_CUDBG_H__ + +#include "cudbg_if.h" +#include "cudbg_lib_common.h" + +enum CXGB4_ETHTOOL_DUMP_FLAGS { + CXGB4_ETH_DUMP_NONE = ETH_FW_DUMP_DISABLE, +}; + +u32 cxgb4_get_dump_length(struct adapter *adap, u32 flag); +int cxgb4_cudbg_collect(struct adapter *adap, void *buf, u32 *buf_size, + u32 flag); +void cxgb4_init_ethtool_dump(struct adapter *adapter); +#endif /* __CXGB4_CUDBG_H__ */ diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c index a71af1e587e2..796eb051cb2f 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c @@ -21,6 +21,7 @@ #include "cxgb4.h" #include "t4_regs.h" #include "t4fw_api.h" +#include "cxgb4_cudbg.h" #define EEPROM_MAGIC 0x38E2F10C @@ -1374,6 +1375,56 @@ static int get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info, return -EOPNOTSUPP; } +static int set_dump(struct net_device *dev, struct ethtool_dump *eth_dump) +{ + struct adapter *adapter = netdev2adap(dev); + u32 len = 0; + + len = sizeof(struct cudbg_hdr) + + sizeof(struct cudbg_entity_hdr) * CUDBG_MAX_ENTITY; + len += cxgb4_get_dump_length(adapter, eth_dump->flag); + + adapter->eth_dump.flag = eth_dump->flag; + adapter->eth_dump.len = len; + return 0; +} + +static int get_dump_flag(struct net_device *dev, struct ethtool_dump *eth_dump) +{ + struct adapter *adapter = netdev2adap(dev); + + eth_dump->flag = adapter->eth_dump.flag; + eth_dump->len = adapter->eth_dump.len; + eth_dump->version = adapter->eth_dump.version; + return 0; +} + +static int get_dump_data(struct net_device *dev, struct ethtool_dump *eth_dump, + void *buf) +{ + struct adapter *adapter = netdev2adap(dev); + u32 len = 0; + int ret = 0; + + if (adapter->eth_dump.flag == CXGB4_ETH_DUMP_NONE) + return -ENOENT; + + len = sizeof(struct cudbg_hdr) + + sizeof(struct cudbg_entity_hdr) * CUDBG_MAX_ENTITY; + len += cxgb4_get_dump_length(adapter, adapter->eth_dump.flag); + if (eth_dump->len < len) + return -ENOMEM; + + ret = cxgb4_cudbg_collect(adapter, buf, &len, adapter->eth_dump.flag); + if (ret) + return ret; + + eth_dump->flag = adapter->eth_dump.flag; + eth_dump->len = len; + eth_dump->version = adapter->eth_dump.version; + return 0; +} + static const struct ethtool_ops cxgb_ethtool_ops = { .get_link_ksettings = get_link_ksettings, .set_link_ksettings = set_link_ksettings, @@ -1404,7 +1455,10 @@ static const struct ethtool_ops cxgb_ethtool_ops = { .get_rxfh = get_rss_table, .set_rxfh = set_rss_table, .flash_device = set_flash, - .get_ts_info = get_ts_info + .get_ts_info = get_ts_info, + .set_dump = set_dump, + .get_dump_flag = get_dump_flag, + .get_dump_data = get_dump_data, }; void cxgb4_set_ethtool_ops(struct net_device *netdev) diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index fe4cbe22d5d7..70c395d18087 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c @@ -81,6 +81,7 @@ #include "cxgb4_tc_u32.h" #include "cxgb4_tc_flower.h" #include "cxgb4_ptp.h" +#include "cxgb4_cudbg.h" char cxgb4_driver_name[] = KBUILD_MODNAME; @@ -5035,6 +5036,8 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) cxgb4_set_ethtool_ops(netdev); } + cxgb4_init_ethtool_dump(adapter); + pci_set_drvdata(pdev, adapter); if (adapter->flags & FW_OK) { -- cgit v1.2.3 From a7975a2f9a7984de9b9b318da9d1826033db32c7 Mon Sep 17 00:00:00 2001 From: Rahul Lakkireddy Date: Fri, 13 Oct 2017 18:48:14 +0530 Subject: cxgb4: collect register dump Add base to collect dump entities. Collect register dump and update template header accordingly. Signed-off-by: Rahul Lakkireddy Signed-off-by: Ganesh Goudar Signed-off-by: David S. Miller --- drivers/net/ethernet/chelsio/cxgb4/Makefile | 3 +- drivers/net/ethernet/chelsio/cxgb4/cudbg_common.c | 54 +++++++++++++ drivers/net/ethernet/chelsio/cxgb4/cudbg_if.h | 5 ++ drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c | 79 ++++++++++++++++++ drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.h | 28 +++++++ .../net/ethernet/chelsio/cxgb4/cudbg_lib_common.h | 13 +++ drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c | 94 +++++++++++++++++++++- drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.h | 11 +++ 8 files changed, 285 insertions(+), 2 deletions(-) create mode 100644 drivers/net/ethernet/chelsio/cxgb4/cudbg_common.c create mode 100644 drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c create mode 100644 drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.h (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/chelsio/cxgb4/Makefile b/drivers/net/ethernet/chelsio/cxgb4/Makefile index 4c6041f45630..70d454379996 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/Makefile +++ b/drivers/net/ethernet/chelsio/cxgb4/Makefile @@ -6,7 +6,8 @@ obj-$(CONFIG_CHELSIO_T4) += cxgb4.o cxgb4-objs := cxgb4_main.o l2t.o t4_hw.o sge.o clip_tbl.o cxgb4_ethtool.o \ cxgb4_uld.o sched.o cxgb4_filter.o cxgb4_tc_u32.o \ - cxgb4_ptp.o cxgb4_tc_flower.o cxgb4_cudbg.o + cxgb4_ptp.o cxgb4_tc_flower.o cxgb4_cudbg.o \ + cudbg_common.o cudbg_lib.o cxgb4-$(CONFIG_CHELSIO_T4_DCB) += cxgb4_dcb.o cxgb4-$(CONFIG_CHELSIO_T4_FCOE) += cxgb4_fcoe.o cxgb4-$(CONFIG_DEBUG_FS) += cxgb4_debugfs.o diff --git a/drivers/net/ethernet/chelsio/cxgb4/cudbg_common.c b/drivers/net/ethernet/chelsio/cxgb4/cudbg_common.c new file mode 100644 index 000000000000..f78ba1743b5a --- /dev/null +++ b/drivers/net/ethernet/chelsio/cxgb4/cudbg_common.c @@ -0,0 +1,54 @@ +/* + * Copyright (C) 2017 Chelsio Communications. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + */ + +#include "cxgb4.h" +#include "cudbg_if.h" +#include "cudbg_lib_common.h" + +int cudbg_get_buff(struct cudbg_buffer *pdbg_buff, u32 size, + struct cudbg_buffer *pin_buff) +{ + u32 offset; + + offset = pdbg_buff->offset; + if (offset + size > pdbg_buff->size) + return CUDBG_STATUS_NO_MEM; + + pin_buff->data = (char *)pdbg_buff->data + offset; + pin_buff->offset = offset; + pin_buff->size = size; + pdbg_buff->size -= size; + return 0; +} + +void cudbg_put_buff(struct cudbg_buffer *pin_buff, + struct cudbg_buffer *pdbg_buff) +{ + pdbg_buff->size += pin_buff->size; + pin_buff->data = NULL; + pin_buff->offset = 0; + pin_buff->size = 0; +} + +void cudbg_update_buff(struct cudbg_buffer *pin_buff, + struct cudbg_buffer *pout_buff) +{ + /* We already write to buffer provided by ethool, so just + * increment offset to next free space. + */ + pout_buff->offset += pin_buff->size; +} diff --git a/drivers/net/ethernet/chelsio/cxgb4/cudbg_if.h b/drivers/net/ethernet/chelsio/cxgb4/cudbg_if.h index ebaa5b7063cf..73725a8666df 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cudbg_if.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cudbg_if.h @@ -18,10 +18,15 @@ #ifndef __CUDBG_IF_H__ #define __CUDBG_IF_H__ +/* Error codes */ +#define CUDBG_STATUS_NO_MEM -19 +#define CUDBG_SYSTEM_ERROR -29 + #define CUDBG_MAJOR_VERSION 1 #define CUDBG_MINOR_VERSION 14 enum cudbg_dbg_entity_type { + CUDBG_REG_DUMP = 1, CUDBG_MAX_ENTITY = 70, }; diff --git a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c new file mode 100644 index 000000000000..b37d8f7825dd --- /dev/null +++ b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c @@ -0,0 +1,79 @@ +/* + * Copyright (C) 2017 Chelsio Communications. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + */ + +#include "cxgb4.h" +#include "cudbg_if.h" +#include "cudbg_lib_common.h" +#include "cudbg_lib.h" + +static void cudbg_write_and_release_buff(struct cudbg_buffer *pin_buff, + struct cudbg_buffer *dbg_buff) +{ + cudbg_update_buff(pin_buff, dbg_buff); + cudbg_put_buff(pin_buff, dbg_buff); +} + +/* This function will add additional padding bytes into debug_buffer to make it + * 4 byte aligned. + */ +void cudbg_align_debug_buffer(struct cudbg_buffer *dbg_buff, + struct cudbg_entity_hdr *entity_hdr) +{ + u8 zero_buf[4] = {0}; + u8 padding, remain; + + remain = (dbg_buff->offset - entity_hdr->start_offset) % 4; + padding = 4 - remain; + if (remain) { + memcpy(((u8 *)dbg_buff->data) + dbg_buff->offset, &zero_buf, + padding); + dbg_buff->offset += padding; + entity_hdr->num_pad = padding; + } + entity_hdr->size = dbg_buff->offset - entity_hdr->start_offset; +} + +struct cudbg_entity_hdr *cudbg_get_entity_hdr(void *outbuf, int i) +{ + struct cudbg_hdr *cudbg_hdr = (struct cudbg_hdr *)outbuf; + + return (struct cudbg_entity_hdr *) + ((char *)outbuf + cudbg_hdr->hdr_len + + (sizeof(struct cudbg_entity_hdr) * (i - 1))); +} + +int cudbg_collect_reg_dump(struct cudbg_init *pdbg_init, + struct cudbg_buffer *dbg_buff, + struct cudbg_error *cudbg_err) +{ + struct adapter *padap = pdbg_init->adap; + struct cudbg_buffer temp_buff = { 0 }; + u32 buf_size = 0; + int rc = 0; + + if (is_t4(padap->params.chip)) + buf_size = T4_REGMAP_SIZE; + else if (is_t5(padap->params.chip) || is_t6(padap->params.chip)) + buf_size = T5_REGMAP_SIZE; + + rc = cudbg_get_buff(dbg_buff, buf_size, &temp_buff); + if (rc) + return rc; + t4_get_regs(padap, (void *)temp_buff.data, temp_buff.size); + cudbg_write_and_release_buff(&temp_buff, dbg_buff); + return rc; +} diff --git a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.h b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.h new file mode 100644 index 000000000000..5b0a0e964601 --- /dev/null +++ b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.h @@ -0,0 +1,28 @@ +/* + * Copyright (C) 2017 Chelsio Communications. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + */ + +#ifndef __CUDBG_LIB_H__ +#define __CUDBG_LIB_H__ + +int cudbg_collect_reg_dump(struct cudbg_init *pdbg_init, + struct cudbg_buffer *dbg_buff, + struct cudbg_error *cudbg_err); + +struct cudbg_entity_hdr *cudbg_get_entity_hdr(void *outbuf, int i); +void cudbg_align_debug_buffer(struct cudbg_buffer *dbg_buff, + struct cudbg_entity_hdr *entity_hdr); +#endif /* __CUDBG_LIB_H__ */ diff --git a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib_common.h b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib_common.h index eb1b36b72455..422a5ceedd2e 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib_common.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib_common.h @@ -62,4 +62,17 @@ struct cudbg_buffer { u32 offset; char *data; }; + +struct cudbg_error { + int sys_err; + int sys_warn; + int app_err; +}; + +int cudbg_get_buff(struct cudbg_buffer *pdbg_buff, u32 size, + struct cudbg_buffer *pin_buff); +void cudbg_put_buff(struct cudbg_buffer *pin_buff, + struct cudbg_buffer *pdbg_buff); +void cudbg_update_buff(struct cudbg_buffer *pin_buff, + struct cudbg_buffer *pout_buff); #endif /* __CUDBG_LIB_COMMON_H__ */ diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c index a808150de208..4ec322eec68c 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c @@ -18,9 +18,94 @@ #include "cxgb4.h" #include "cxgb4_cudbg.h" +static const struct cxgb4_collect_entity cxgb4_collect_hw_dump[] = { + { CUDBG_REG_DUMP, cudbg_collect_reg_dump }, +}; + +static u32 cxgb4_get_entity_length(struct adapter *adap, u32 entity) +{ + u32 len = 0; + + switch (entity) { + case CUDBG_REG_DUMP: + switch (CHELSIO_CHIP_VERSION(adap->params.chip)) { + case CHELSIO_T4: + len = T4_REGMAP_SIZE; + break; + case CHELSIO_T5: + case CHELSIO_T6: + len = T5_REGMAP_SIZE; + break; + default: + break; + } + break; + default: + break; + } + + return len; +} + u32 cxgb4_get_dump_length(struct adapter *adap, u32 flag) { - return 0; + u32 i, entity; + u32 len = 0; + + if (flag & CXGB4_ETH_DUMP_HW) { + for (i = 0; i < ARRAY_SIZE(cxgb4_collect_hw_dump); i++) { + entity = cxgb4_collect_hw_dump[i].entity; + len += cxgb4_get_entity_length(adap, entity); + } + } + + return len; +} + +static void cxgb4_cudbg_collect_entity(struct cudbg_init *pdbg_init, + struct cudbg_buffer *dbg_buff, + const struct cxgb4_collect_entity *e_arr, + u32 arr_size, void *buf, u32 *tot_size) +{ + struct adapter *adap = pdbg_init->adap; + struct cudbg_error cudbg_err = { 0 }; + struct cudbg_entity_hdr *entity_hdr; + u32 entity_size, i; + u32 total_size = 0; + int ret; + + for (i = 0; i < arr_size; i++) { + const struct cxgb4_collect_entity *e = &e_arr[i]; + + /* Skip entities that won't fit in output buffer */ + entity_size = cxgb4_get_entity_length(adap, e->entity); + if (entity_size > + pdbg_init->outbuf_size - *tot_size - total_size) + continue; + + entity_hdr = cudbg_get_entity_hdr(buf, e->entity); + entity_hdr->entity_type = e->entity; + entity_hdr->start_offset = dbg_buff->offset; + memset(&cudbg_err, 0, sizeof(struct cudbg_error)); + ret = e->collect_cb(pdbg_init, dbg_buff, &cudbg_err); + if (ret) { + entity_hdr->size = 0; + dbg_buff->offset = entity_hdr->start_offset; + } else { + cudbg_align_debug_buffer(dbg_buff, entity_hdr); + } + + /* Log error and continue with next entity */ + if (cudbg_err.sys_err) + ret = CUDBG_SYSTEM_ERROR; + + entity_hdr->hdr_flags = ret; + entity_hdr->sys_err = cudbg_err.sys_err; + entity_hdr->sys_warn = cudbg_err.sys_warn; + total_size += entity_hdr->size; + } + + *tot_size += total_size; } int cxgb4_cudbg_collect(struct adapter *adap, void *buf, u32 *buf_size, @@ -60,6 +145,13 @@ int cxgb4_cudbg_collect(struct adapter *adap, void *buf, u32 *buf_size, dbg_buff.offset += min_size; total_size = dbg_buff.offset; + if (flag & CXGB4_ETH_DUMP_HW) + cxgb4_cudbg_collect_entity(&cudbg_init, &dbg_buff, + cxgb4_collect_hw_dump, + ARRAY_SIZE(cxgb4_collect_hw_dump), + buf, + &total_size); + cudbg_hdr->data_len = total_size; *buf_size = total_size; return 0; diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.h index 8c5dd6794f81..7369a7e22b89 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.h @@ -20,9 +20,20 @@ #include "cudbg_if.h" #include "cudbg_lib_common.h" +#include "cudbg_lib.h" + +typedef int (*cudbg_collect_callback_t)(struct cudbg_init *pdbg_init, + struct cudbg_buffer *dbg_buff, + struct cudbg_error *cudbg_err); + +struct cxgb4_collect_entity { + enum cudbg_dbg_entity_type entity; + cudbg_collect_callback_t collect_cb; +}; enum CXGB4_ETHTOOL_DUMP_FLAGS { CXGB4_ETH_DUMP_NONE = ETH_FW_DUMP_DISABLE, + CXGB4_ETH_DUMP_HW = (1 << 1), /* various FW and HW dumps */ }; u32 cxgb4_get_dump_length(struct adapter *adap, u32 flag); -- cgit v1.2.3 From b33af022e57996dc818ec960cbdf0f07cb5130d8 Mon Sep 17 00:00:00 2001 From: Rahul Lakkireddy Date: Fri, 13 Oct 2017 18:48:15 +0530 Subject: cxgb4: collect on-chip memory dump Collect EDC0 and EDC1 memory dump. Signed-off-by: Rahul Lakkireddy Signed-off-by: Ganesh Goudar Signed-off-by: David S. Miller --- drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h | 29 +++++ drivers/net/ethernet/chelsio/cxgb4/cudbg_if.h | 8 ++ drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c | 136 +++++++++++++++++++++ drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.h | 6 + .../net/ethernet/chelsio/cxgb4/cudbg_lib_common.h | 3 + drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c | 38 +++++- drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.h | 1 + 7 files changed, 220 insertions(+), 1 deletion(-) create mode 100644 drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h b/drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h new file mode 100644 index 000000000000..71a426dd22f5 --- /dev/null +++ b/drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h @@ -0,0 +1,29 @@ +/* + * Copyright (C) 2017 Chelsio Communications. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + */ + +#ifndef __CUDBG_ENTITY_H__ +#define __CUDBG_ENTITY_H__ + +#define EDC0_FLAG 3 +#define EDC1_FLAG 4 + +struct card_mem { + u16 size_edc0; + u16 size_edc1; + u16 mem_flag; +}; +#endif /* __CUDBG_ENTITY_H__ */ diff --git a/drivers/net/ethernet/chelsio/cxgb4/cudbg_if.h b/drivers/net/ethernet/chelsio/cxgb4/cudbg_if.h index 73725a8666df..8bcea985af77 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cudbg_if.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cudbg_if.h @@ -20,6 +20,7 @@ /* Error codes */ #define CUDBG_STATUS_NO_MEM -19 +#define CUDBG_STATUS_ENTITY_NOT_FOUND -24 #define CUDBG_SYSTEM_ERROR -29 #define CUDBG_MAJOR_VERSION 1 @@ -27,6 +28,8 @@ enum cudbg_dbg_entity_type { CUDBG_REG_DUMP = 1, + CUDBG_EDC0 = 18, + CUDBG_EDC1 = 19, CUDBG_MAX_ENTITY = 70, }; @@ -35,4 +38,9 @@ struct cudbg_init { void *outbuf; /* Output buffer */ u32 outbuf_size; /* Output buffer size */ }; + +static inline unsigned int cudbg_mbytes_to_bytes(unsigned int size) +{ + return size * 1024 * 1024; +} #endif /* __CUDBG_IF_H__ */ diff --git a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c index b37d8f7825dd..fb0e97e6a6a0 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c @@ -15,10 +15,12 @@ * */ +#include "t4_regs.h" #include "cxgb4.h" #include "cudbg_if.h" #include "cudbg_lib_common.h" #include "cudbg_lib.h" +#include "cudbg_entity.h" static void cudbg_write_and_release_buff(struct cudbg_buffer *pin_buff, struct cudbg_buffer *dbg_buff) @@ -27,6 +29,16 @@ static void cudbg_write_and_release_buff(struct cudbg_buffer *pin_buff, cudbg_put_buff(pin_buff, dbg_buff); } +static int is_fw_attached(struct cudbg_init *pdbg_init) +{ + struct adapter *padap = pdbg_init->adap; + + if (!(padap->flags & FW_OK) || padap->use_bd) + return 0; + + return 1; +} + /* This function will add additional padding bytes into debug_buffer to make it * 4 byte aligned. */ @@ -77,3 +89,127 @@ int cudbg_collect_reg_dump(struct cudbg_init *pdbg_init, cudbg_write_and_release_buff(&temp_buff, dbg_buff); return rc; } + +static int cudbg_read_fw_mem(struct cudbg_init *pdbg_init, + struct cudbg_buffer *dbg_buff, u8 mem_type, + unsigned long tot_len, + struct cudbg_error *cudbg_err) +{ + unsigned long bytes, bytes_left, bytes_read = 0; + struct adapter *padap = pdbg_init->adap; + struct cudbg_buffer temp_buff = { 0 }; + int rc = 0; + + bytes_left = tot_len; + while (bytes_left > 0) { + bytes = min_t(unsigned long, bytes_left, + (unsigned long)CUDBG_CHUNK_SIZE); + rc = cudbg_get_buff(dbg_buff, bytes, &temp_buff); + if (rc) + return rc; + spin_lock(&padap->win0_lock); + rc = t4_memory_rw(padap, MEMWIN_NIC, mem_type, + bytes_read, bytes, + (__be32 *)temp_buff.data, + 1); + spin_unlock(&padap->win0_lock); + if (rc) { + cudbg_err->sys_err = rc; + cudbg_put_buff(&temp_buff, dbg_buff); + return rc; + } + bytes_left -= bytes; + bytes_read += bytes; + cudbg_write_and_release_buff(&temp_buff, dbg_buff); + } + return rc; +} + +static void cudbg_collect_mem_info(struct cudbg_init *pdbg_init, + struct card_mem *mem_info) +{ + struct adapter *padap = pdbg_init->adap; + u32 value; + + value = t4_read_reg(padap, MA_EDRAM0_BAR_A); + value = EDRAM0_SIZE_G(value); + mem_info->size_edc0 = (u16)value; + + value = t4_read_reg(padap, MA_EDRAM1_BAR_A); + value = EDRAM1_SIZE_G(value); + mem_info->size_edc1 = (u16)value; + + value = t4_read_reg(padap, MA_TARGET_MEM_ENABLE_A); + if (value & EDRAM0_ENABLE_F) + mem_info->mem_flag |= (1 << EDC0_FLAG); + if (value & EDRAM1_ENABLE_F) + mem_info->mem_flag |= (1 << EDC1_FLAG); +} + +static void cudbg_t4_fwcache(struct cudbg_init *pdbg_init, + struct cudbg_error *cudbg_err) +{ + struct adapter *padap = pdbg_init->adap; + int rc; + + if (is_fw_attached(pdbg_init)) { + /* Flush uP dcache before reading edcX/mcX */ + rc = t4_fwcache(padap, FW_PARAM_DEV_FWCACHE_FLUSH); + if (rc) + cudbg_err->sys_warn = rc; + } +} + +static int cudbg_collect_mem_region(struct cudbg_init *pdbg_init, + struct cudbg_buffer *dbg_buff, + struct cudbg_error *cudbg_err, + u8 mem_type) +{ + struct card_mem mem_info = {0}; + unsigned long flag, size; + int rc; + + cudbg_t4_fwcache(pdbg_init, cudbg_err); + cudbg_collect_mem_info(pdbg_init, &mem_info); + switch (mem_type) { + case MEM_EDC0: + flag = (1 << EDC0_FLAG); + size = cudbg_mbytes_to_bytes(mem_info.size_edc0); + break; + case MEM_EDC1: + flag = (1 << EDC1_FLAG); + size = cudbg_mbytes_to_bytes(mem_info.size_edc1); + break; + default: + rc = CUDBG_STATUS_ENTITY_NOT_FOUND; + goto err; + } + + if (mem_info.mem_flag & flag) { + rc = cudbg_read_fw_mem(pdbg_init, dbg_buff, mem_type, + size, cudbg_err); + if (rc) + goto err; + } else { + rc = CUDBG_STATUS_ENTITY_NOT_FOUND; + goto err; + } +err: + return rc; +} + +int cudbg_collect_edc0_meminfo(struct cudbg_init *pdbg_init, + struct cudbg_buffer *dbg_buff, + struct cudbg_error *cudbg_err) +{ + return cudbg_collect_mem_region(pdbg_init, dbg_buff, cudbg_err, + MEM_EDC0); +} + +int cudbg_collect_edc1_meminfo(struct cudbg_init *pdbg_init, + struct cudbg_buffer *dbg_buff, + struct cudbg_error *cudbg_err) +{ + return cudbg_collect_mem_region(pdbg_init, dbg_buff, cudbg_err, + MEM_EDC1); +} diff --git a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.h b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.h index 5b0a0e964601..f7be5090b172 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.h @@ -21,6 +21,12 @@ int cudbg_collect_reg_dump(struct cudbg_init *pdbg_init, struct cudbg_buffer *dbg_buff, struct cudbg_error *cudbg_err); +int cudbg_collect_edc0_meminfo(struct cudbg_init *pdbg_init, + struct cudbg_buffer *dbg_buff, + struct cudbg_error *cudbg_err); +int cudbg_collect_edc1_meminfo(struct cudbg_init *pdbg_init, + struct cudbg_buffer *dbg_buff, + struct cudbg_error *cudbg_err); struct cudbg_entity_hdr *cudbg_get_entity_hdr(void *outbuf, int i); void cudbg_align_debug_buffer(struct cudbg_buffer *dbg_buff, diff --git a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib_common.h b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib_common.h index 422a5ceedd2e..b150c5d1f7c0 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib_common.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib_common.h @@ -69,6 +69,9 @@ struct cudbg_error { int app_err; }; +#define CDUMP_MAX_COMP_BUF_SIZE ((64 * 1024) - 1) +#define CUDBG_CHUNK_SIZE ((CDUMP_MAX_COMP_BUF_SIZE / 1024) * 1024) + int cudbg_get_buff(struct cudbg_buffer *pdbg_buff, u32 size, struct cudbg_buffer *pin_buff); void cudbg_put_buff(struct cudbg_buffer *pin_buff, diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c index 4ec322eec68c..286d172a9c19 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c @@ -15,16 +15,22 @@ * */ +#include "t4_regs.h" #include "cxgb4.h" #include "cxgb4_cudbg.h" +static const struct cxgb4_collect_entity cxgb4_collect_mem_dump[] = { + { CUDBG_EDC0, cudbg_collect_edc0_meminfo }, + { CUDBG_EDC1, cudbg_collect_edc1_meminfo }, +}; + static const struct cxgb4_collect_entity cxgb4_collect_hw_dump[] = { { CUDBG_REG_DUMP, cudbg_collect_reg_dump }, }; static u32 cxgb4_get_entity_length(struct adapter *adap, u32 entity) { - u32 len = 0; + u32 value, len = 0; switch (entity) { case CUDBG_REG_DUMP: @@ -40,6 +46,22 @@ static u32 cxgb4_get_entity_length(struct adapter *adap, u32 entity) break; } break; + case CUDBG_EDC0: + value = t4_read_reg(adap, MA_TARGET_MEM_ENABLE_A); + if (value & EDRAM0_ENABLE_F) { + value = t4_read_reg(adap, MA_EDRAM0_BAR_A); + len = EDRAM0_SIZE_G(value); + } + len = cudbg_mbytes_to_bytes(len); + break; + case CUDBG_EDC1: + value = t4_read_reg(adap, MA_TARGET_MEM_ENABLE_A); + if (value & EDRAM1_ENABLE_F) { + value = t4_read_reg(adap, MA_EDRAM1_BAR_A); + len = EDRAM1_SIZE_G(value); + } + len = cudbg_mbytes_to_bytes(len); + break; default: break; } @@ -59,6 +81,13 @@ u32 cxgb4_get_dump_length(struct adapter *adap, u32 flag) } } + if (flag & CXGB4_ETH_DUMP_MEM) { + for (i = 0; i < ARRAY_SIZE(cxgb4_collect_mem_dump); i++) { + entity = cxgb4_collect_mem_dump[i].entity; + len += cxgb4_get_entity_length(adap, entity); + } + } + return len; } @@ -152,6 +181,13 @@ int cxgb4_cudbg_collect(struct adapter *adap, void *buf, u32 *buf_size, buf, &total_size); + if (flag & CXGB4_ETH_DUMP_MEM) + cxgb4_cudbg_collect_entity(&cudbg_init, &dbg_buff, + cxgb4_collect_mem_dump, + ARRAY_SIZE(cxgb4_collect_mem_dump), + buf, + &total_size); + cudbg_hdr->data_len = total_size; *buf_size = total_size; return 0; diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.h index 7369a7e22b89..c099b5aa2214 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.h @@ -33,6 +33,7 @@ struct cxgb4_collect_entity { enum CXGB4_ETHTOOL_DUMP_FLAGS { CXGB4_ETH_DUMP_NONE = ETH_FW_DUMP_DISABLE, + CXGB4_ETH_DUMP_MEM = (1 << 0), /* On-Chip Memory Dumps */ CXGB4_ETH_DUMP_HW = (1 << 1), /* various FW and HW dumps */ }; -- cgit v1.2.3 From 844d1b6f0ef8051a1ac0403327ab881dd4d101a3 Mon Sep 17 00:00:00 2001 From: Rahul Lakkireddy Date: Fri, 13 Oct 2017 18:48:16 +0530 Subject: cxgb4: collect firmware mbox and device log dump Collect firmware mbox and device logs before collecting the rest of the hardware dumps to snap the firmware state before the mailbox logs are updated by other hardware dumps. Signed-off-by: Rahul Lakkireddy Signed-off-by: Ganesh Goudar Signed-off-by: David S. Miller --- drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h | 6 ++ drivers/net/ethernet/chelsio/cxgb4/cudbg_if.h | 2 + drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c | 84 +++++++++++++++++++++++ drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.h | 6 ++ drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c | 9 +++ 5 files changed, 107 insertions(+) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h b/drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h index 71a426dd22f5..2b717e700bbc 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h @@ -26,4 +26,10 @@ struct card_mem { u16 size_edc1; u16 mem_flag; }; + +struct cudbg_mbox_log { + struct mbox_cmd entry; + u32 hi[MBOX_LEN / 8]; + u32 lo[MBOX_LEN / 8]; +}; #endif /* __CUDBG_ENTITY_H__ */ diff --git a/drivers/net/ethernet/chelsio/cxgb4/cudbg_if.h b/drivers/net/ethernet/chelsio/cxgb4/cudbg_if.h index 8bcea985af77..0a37d9b6cd32 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cudbg_if.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cudbg_if.h @@ -28,8 +28,10 @@ enum cudbg_dbg_entity_type { CUDBG_REG_DUMP = 1, + CUDBG_DEV_LOG = 2, CUDBG_EDC0 = 18, CUDBG_EDC1 = 19, + CUDBG_MBOX_LOG = 66, CUDBG_MAX_ENTITY = 70, }; diff --git a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c index fb0e97e6a6a0..960635e37a9d 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c @@ -90,6 +90,45 @@ int cudbg_collect_reg_dump(struct cudbg_init *pdbg_init, return rc; } +int cudbg_collect_fw_devlog(struct cudbg_init *pdbg_init, + struct cudbg_buffer *dbg_buff, + struct cudbg_error *cudbg_err) +{ + struct adapter *padap = pdbg_init->adap; + struct cudbg_buffer temp_buff = { 0 }; + struct devlog_params *dparams; + int rc = 0; + + rc = t4_init_devlog_params(padap); + if (rc < 0) { + cudbg_err->sys_err = rc; + return rc; + } + + dparams = &padap->params.devlog; + rc = cudbg_get_buff(dbg_buff, dparams->size, &temp_buff); + if (rc) + return rc; + + /* Collect FW devlog */ + if (dparams->start != 0) { + spin_lock(&padap->win0_lock); + rc = t4_memory_rw(padap, padap->params.drv_memwin, + dparams->memtype, dparams->start, + dparams->size, + (__be32 *)(char *)temp_buff.data, + 1); + spin_unlock(&padap->win0_lock); + if (rc) { + cudbg_err->sys_err = rc; + cudbg_put_buff(&temp_buff, dbg_buff); + return rc; + } + } + cudbg_write_and_release_buff(&temp_buff, dbg_buff); + return rc; +} + static int cudbg_read_fw_mem(struct cudbg_init *pdbg_init, struct cudbg_buffer *dbg_buff, u8 mem_type, unsigned long tot_len, @@ -213,3 +252,48 @@ int cudbg_collect_edc1_meminfo(struct cudbg_init *pdbg_init, return cudbg_collect_mem_region(pdbg_init, dbg_buff, cudbg_err, MEM_EDC1); } + +int cudbg_collect_mbox_log(struct cudbg_init *pdbg_init, + struct cudbg_buffer *dbg_buff, + struct cudbg_error *cudbg_err) +{ + struct adapter *padap = pdbg_init->adap; + struct cudbg_mbox_log *mboxlog = NULL; + struct cudbg_buffer temp_buff = { 0 }; + struct mbox_cmd_log *log = NULL; + struct mbox_cmd *entry; + unsigned int entry_idx; + u16 mbox_cmds; + int i, k, rc; + u64 flit; + u32 size; + + log = padap->mbox_log; + mbox_cmds = padap->mbox_log->size; + size = sizeof(struct cudbg_mbox_log) * mbox_cmds; + rc = cudbg_get_buff(dbg_buff, size, &temp_buff); + if (rc) + return rc; + + mboxlog = (struct cudbg_mbox_log *)temp_buff.data; + for (k = 0; k < mbox_cmds; k++) { + entry_idx = log->cursor + k; + if (entry_idx >= log->size) + entry_idx -= log->size; + + entry = mbox_cmd_log_entry(log, entry_idx); + /* skip over unused entries */ + if (entry->timestamp == 0) + continue; + + memcpy(&mboxlog->entry, entry, sizeof(struct mbox_cmd)); + for (i = 0; i < MBOX_LEN / 8; i++) { + flit = entry->cmd[i]; + mboxlog->hi[i] = (u32)(flit >> 32); + mboxlog->lo[i] = (u32)flit; + } + mboxlog++; + } + cudbg_write_and_release_buff(&temp_buff, dbg_buff); + return rc; +} diff --git a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.h b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.h index f7be5090b172..690591b36d4c 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.h @@ -21,12 +21,18 @@ int cudbg_collect_reg_dump(struct cudbg_init *pdbg_init, struct cudbg_buffer *dbg_buff, struct cudbg_error *cudbg_err); +int cudbg_collect_fw_devlog(struct cudbg_init *pdbg_init, + struct cudbg_buffer *dbg_buff, + struct cudbg_error *cudbg_err); int cudbg_collect_edc0_meminfo(struct cudbg_init *pdbg_init, struct cudbg_buffer *dbg_buff, struct cudbg_error *cudbg_err); int cudbg_collect_edc1_meminfo(struct cudbg_init *pdbg_init, struct cudbg_buffer *dbg_buff, struct cudbg_error *cudbg_err); +int cudbg_collect_mbox_log(struct cudbg_init *pdbg_init, + struct cudbg_buffer *dbg_buff, + struct cudbg_error *cudbg_err); struct cudbg_entity_hdr *cudbg_get_entity_hdr(void *outbuf, int i); void cudbg_align_debug_buffer(struct cudbg_buffer *dbg_buff, diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c index 286d172a9c19..f8c4f4199ce6 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c @@ -18,6 +18,7 @@ #include "t4_regs.h" #include "cxgb4.h" #include "cxgb4_cudbg.h" +#include "cudbg_entity.h" static const struct cxgb4_collect_entity cxgb4_collect_mem_dump[] = { { CUDBG_EDC0, cudbg_collect_edc0_meminfo }, @@ -25,6 +26,8 @@ static const struct cxgb4_collect_entity cxgb4_collect_mem_dump[] = { }; static const struct cxgb4_collect_entity cxgb4_collect_hw_dump[] = { + { CUDBG_MBOX_LOG, cudbg_collect_mbox_log }, + { CUDBG_DEV_LOG, cudbg_collect_fw_devlog }, { CUDBG_REG_DUMP, cudbg_collect_reg_dump }, }; @@ -46,6 +49,9 @@ static u32 cxgb4_get_entity_length(struct adapter *adap, u32 entity) break; } break; + case CUDBG_DEV_LOG: + len = adap->params.devlog.size; + break; case CUDBG_EDC0: value = t4_read_reg(adap, MA_TARGET_MEM_ENABLE_A); if (value & EDRAM0_ENABLE_F) { @@ -62,6 +68,9 @@ static u32 cxgb4_get_entity_length(struct adapter *adap, u32 entity) } len = cudbg_mbytes_to_bytes(len); break; + case CUDBG_MBOX_LOG: + len = sizeof(struct cudbg_mbox_log) * adap->mbox_log->size; + break; default: break; } -- cgit v1.2.3 From 5ccf9d049615994349e9b0a1f0d4b9a398b9b0c2 Mon Sep 17 00:00:00 2001 From: Rahul Lakkireddy Date: Fri, 13 Oct 2017 18:48:17 +0530 Subject: cxgb4: update API for TP indirect register access Try to access TP indirect registers via firmware first. If this fails, fallback and access them directly. This ensures that driver and firmware do not conflict each other while accessing the TP indirect registers. Signed-off-by: Rahul Lakkireddy Signed-off-by: Ganesh Goudar Signed-off-by: David S. Miller --- drivers/net/ethernet/chelsio/cxgb4/cxgb4.h | 36 ++- drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c | 13 +- drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c | 14 +- drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c | 4 +- drivers/net/ethernet/chelsio/cxgb4/t4_hw.c | 312 +++++++++++++-------- 5 files changed, 239 insertions(+), 140 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h index a749602fdc41..d4032e373927 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h @@ -1459,7 +1459,7 @@ unsigned int qtimer_val(const struct adapter *adap, int t4_init_devlog_params(struct adapter *adapter); int t4_init_sge_params(struct adapter *adapter); -int t4_init_tp_params(struct adapter *adap); +int t4_init_tp_params(struct adapter *adap, bool sleep_ok); int t4_filter_field_shift(const struct adapter *adap, int filter_sel); int t4_init_rss_mode(struct adapter *adap, int mbox); int t4_init_portinfo(struct port_info *pi, int mbox, @@ -1473,14 +1473,15 @@ int t4_config_glbl_rss(struct adapter *adapter, int mbox, unsigned int mode, int t4_config_vi_rss(struct adapter *adapter, int mbox, unsigned int viid, unsigned int flags, unsigned int defq); int t4_read_rss(struct adapter *adapter, u16 *entries); -void t4_read_rss_key(struct adapter *adapter, u32 *key); -void t4_write_rss_key(struct adapter *adap, const u32 *key, int idx); +void t4_read_rss_key(struct adapter *adapter, u32 *key, bool sleep_ok); +void t4_write_rss_key(struct adapter *adap, const u32 *key, int idx, + bool sleep_ok); void t4_read_rss_pf_config(struct adapter *adapter, unsigned int index, - u32 *valp); + u32 *valp, bool sleep_ok); void t4_read_rss_vf_config(struct adapter *adapter, unsigned int index, - u32 *vfl, u32 *vfh); -u32 t4_read_rss_pf_map(struct adapter *adapter); -u32 t4_read_rss_pf_mask(struct adapter *adapter); + u32 *vfl, u32 *vfh, bool sleep_ok); +u32 t4_read_rss_pf_map(struct adapter *adapter, bool sleep_ok); +u32 t4_read_rss_pf_mask(struct adapter *adapter, bool sleep_ok); unsigned int t4_get_mps_bg_map(struct adapter *adapter, int pidx); unsigned int t4_get_tp_ch_map(struct adapter *adapter, int pidx); @@ -1511,14 +1512,18 @@ void t4_read_cong_tbl(struct adapter *adap, u16 incr[NMTUS][NCCTRL_WIN]); void t4_tp_wr_bits_indirect(struct adapter *adap, unsigned int addr, unsigned int mask, unsigned int val); void t4_tp_read_la(struct adapter *adap, u64 *la_buf, unsigned int *wrptr); -void t4_tp_get_err_stats(struct adapter *adap, struct tp_err_stats *st); -void t4_tp_get_cpl_stats(struct adapter *adap, struct tp_cpl_stats *st); -void t4_tp_get_rdma_stats(struct adapter *adap, struct tp_rdma_stats *st); -void t4_get_usm_stats(struct adapter *adap, struct tp_usm_stats *st); +void t4_tp_get_err_stats(struct adapter *adap, struct tp_err_stats *st, + bool sleep_ok); +void t4_tp_get_cpl_stats(struct adapter *adap, struct tp_cpl_stats *st, + bool sleep_ok); +void t4_tp_get_rdma_stats(struct adapter *adap, struct tp_rdma_stats *st, + bool sleep_ok); +void t4_get_usm_stats(struct adapter *adap, struct tp_usm_stats *st, + bool sleep_ok); void t4_tp_get_tcp_stats(struct adapter *adap, struct tp_tcp_stats *v4, - struct tp_tcp_stats *v6); + struct tp_tcp_stats *v6, bool sleep_ok); void t4_get_fcoe_stats(struct adapter *adap, unsigned int idx, - struct tp_fcoe_stats *st); + struct tp_fcoe_stats *st, bool sleep_ok); void t4_load_mtus(struct adapter *adap, const unsigned short *mtus, const unsigned short *alpha, const unsigned short *beta); @@ -1627,6 +1632,11 @@ void t4_idma_monitor(struct adapter *adapter, int hz, int ticks); int t4_set_vf_mac_acl(struct adapter *adapter, unsigned int vf, unsigned int naddr, u8 *addr); +void t4_tp_pio_read(struct adapter *adap, u32 *buff, u32 nregs, + u32 start_index, bool sleep_ok); +void t4_tp_mib_read(struct adapter *adap, u32 *buff, u32 nregs, + u32 start_index, bool sleep_ok); + void t4_uld_mem_free(struct adapter *adap); int t4_uld_mem_alloc(struct adapter *adap); void t4_uld_clean_up(struct adapter *adap); diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c index 76540b0e082d..917663b35603 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c @@ -2211,7 +2211,7 @@ static int rss_key_show(struct seq_file *seq, void *v) { u32 key[10]; - t4_read_rss_key(seq->private, key); + t4_read_rss_key(seq->private, key, true); seq_printf(seq, "%08x%08x%08x%08x%08x%08x%08x%08x%08x%08x\n", key[9], key[8], key[7], key[6], key[5], key[4], key[3], key[2], key[1], key[0]); @@ -2248,7 +2248,7 @@ static ssize_t rss_key_write(struct file *file, const char __user *buf, } } - t4_write_rss_key(adap, key, -1); + t4_write_rss_key(adap, key, -1, true); return count; } @@ -2325,12 +2325,13 @@ static int rss_pf_config_open(struct inode *inode, struct file *file) return -ENOMEM; pfconf = (struct rss_pf_conf *)p->data; - rss_pf_map = t4_read_rss_pf_map(adapter); - rss_pf_mask = t4_read_rss_pf_mask(adapter); + rss_pf_map = t4_read_rss_pf_map(adapter, true); + rss_pf_mask = t4_read_rss_pf_mask(adapter, true); for (pf = 0; pf < 8; pf++) { pfconf[pf].rss_pf_map = rss_pf_map; pfconf[pf].rss_pf_mask = rss_pf_mask; - t4_read_rss_pf_config(adapter, pf, &pfconf[pf].rss_pf_config); + t4_read_rss_pf_config(adapter, pf, &pfconf[pf].rss_pf_config, + true); } return 0; } @@ -2393,7 +2394,7 @@ static int rss_vf_config_open(struct inode *inode, struct file *file) vfconf = (struct rss_vf_conf *)p->data; for (vf = 0; vf < vfcount; vf++) { t4_read_rss_vf_config(adapter, vf, &vfconf[vf].rss_vf_vfl, - &vfconf[vf].rss_vf_vfh); + &vfconf[vf].rss_vf_vfh, true); } return 0; } diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c index 796eb051cb2f..1b7f6b9ccc8b 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c @@ -336,10 +336,10 @@ static void collect_adapter_stats(struct adapter *adap, struct adapter_stats *s) memset(s, 0, sizeof(*s)); spin_lock(&adap->stats_lock); - t4_tp_get_tcp_stats(adap, &v4, &v6); - t4_tp_get_rdma_stats(adap, &rdma_stats); - t4_get_usm_stats(adap, &usm_stats); - t4_tp_get_err_stats(adap, &err_stats); + t4_tp_get_tcp_stats(adap, &v4, &v6, false); + t4_tp_get_rdma_stats(adap, &rdma_stats, false); + t4_get_usm_stats(adap, &usm_stats, false); + t4_tp_get_err_stats(adap, &err_stats, false); spin_unlock(&adap->stats_lock); s->db_drop = adap->db_stats.db_drop; @@ -389,9 +389,9 @@ static void collect_channel_stats(struct adapter *adap, struct channel_stats *s, memset(s, 0, sizeof(*s)); spin_lock(&adap->stats_lock); - t4_tp_get_cpl_stats(adap, &cpl_stats); - t4_tp_get_err_stats(adap, &err_stats); - t4_get_fcoe_stats(adap, i, &fcoe_stats); + t4_tp_get_cpl_stats(adap, &cpl_stats, false); + t4_tp_get_err_stats(adap, &err_stats, false); + t4_get_fcoe_stats(adap, i, &fcoe_stats, false); spin_unlock(&adap->stats_lock); s->cpl_req = cpl_stats.req[i]; diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index 70c395d18087..8d97ae6039aa 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c @@ -1639,7 +1639,7 @@ void cxgb4_get_tcp_stats(struct pci_dev *pdev, struct tp_tcp_stats *v4, struct adapter *adap = pci_get_drvdata(pdev); spin_lock(&adap->stats_lock); - t4_tp_get_tcp_stats(adap, v4, v6); + t4_tp_get_tcp_stats(adap, v4, v6, false); spin_unlock(&adap->stats_lock); } EXPORT_SYMBOL(cxgb4_get_tcp_stats); @@ -4077,7 +4077,7 @@ static int adap_init0(struct adapter *adap) } t4_init_sge_params(adap); adap->flags |= FW_OK; - t4_init_tp_params(adap); + t4_init_tp_params(adap, true); return 0; /* diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c index b3fd1f457639..0f12bf507d56 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c @@ -5052,23 +5052,26 @@ static unsigned int t4_use_ldst(struct adapter *adap) } /** - * t4_fw_tp_pio_rw - Access TP PIO through LDST - * @adap: the adapter - * @vals: where the indirect register values are stored/written - * @nregs: how many indirect registers to read/write - * @start_idx: index of first indirect register to read/write - * @rw: Read (1) or Write (0) + * t4_tp_fw_ldst_rw - Access TP indirect register through LDST + * @adap: the adapter + * @cmd: TP fw ldst address space type + * @vals: where the indirect register values are stored/written + * @nregs: how many indirect registers to read/write + * @start_idx: index of first indirect register to read/write + * @rw: Read (1) or Write (0) + * @sleep_ok: if true we may sleep while awaiting command completion * - * Access TP PIO registers through LDST + * Access TP indirect registers through LDST */ -static void t4_fw_tp_pio_rw(struct adapter *adap, u32 *vals, unsigned int nregs, - unsigned int start_index, unsigned int rw) +static int t4_tp_fw_ldst_rw(struct adapter *adap, int cmd, u32 *vals, + unsigned int nregs, unsigned int start_index, + unsigned int rw, bool sleep_ok) { - int ret, i; - int cmd = FW_LDST_ADDRSPC_TP_PIO; + int ret = 0; + unsigned int i; struct fw_ldst_cmd c; - for (i = 0 ; i < nregs; i++) { + for (i = 0; i < nregs; i++) { memset(&c, 0, sizeof(c)); c.op_to_addrspace = cpu_to_be32(FW_CMD_OP_V(FW_LDST_CMD) | FW_CMD_REQUEST_F | @@ -5079,26 +5082,127 @@ static void t4_fw_tp_pio_rw(struct adapter *adap, u32 *vals, unsigned int nregs, c.u.addrval.addr = cpu_to_be32(start_index + i); c.u.addrval.val = rw ? 0 : cpu_to_be32(vals[i]); - ret = t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), &c); - if (!ret && rw) + ret = t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, + sleep_ok); + if (ret) + return ret; + + if (rw) vals[i] = be32_to_cpu(c.u.addrval.val); } + return 0; +} + +/** + * t4_tp_indirect_rw - Read/Write TP indirect register through LDST or backdoor + * @adap: the adapter + * @reg_addr: Address Register + * @reg_data: Data register + * @buff: where the indirect register values are stored/written + * @nregs: how many indirect registers to read/write + * @start_index: index of first indirect register to read/write + * @rw: READ(1) or WRITE(0) + * @sleep_ok: if true we may sleep while awaiting command completion + * + * Read/Write TP indirect registers through LDST if possible. + * Else, use backdoor access + **/ +static void t4_tp_indirect_rw(struct adapter *adap, u32 reg_addr, u32 reg_data, + u32 *buff, u32 nregs, u32 start_index, int rw, + bool sleep_ok) +{ + int rc = -EINVAL; + int cmd; + + switch (reg_addr) { + case TP_PIO_ADDR_A: + cmd = FW_LDST_ADDRSPC_TP_PIO; + break; + case TP_MIB_INDEX_A: + cmd = FW_LDST_ADDRSPC_TP_MIB; + break; + default: + goto indirect_access; + } + + if (t4_use_ldst(adap)) + rc = t4_tp_fw_ldst_rw(adap, cmd, buff, nregs, start_index, rw, + sleep_ok); + +indirect_access: + + if (rc) { + if (rw) + t4_read_indirect(adap, reg_addr, reg_data, buff, nregs, + start_index); + else + t4_write_indirect(adap, reg_addr, reg_data, buff, nregs, + start_index); + } +} + +/** + * t4_tp_pio_read - Read TP PIO registers + * @adap: the adapter + * @buff: where the indirect register values are written + * @nregs: how many indirect registers to read + * @start_index: index of first indirect register to read + * @sleep_ok: if true we may sleep while awaiting command completion + * + * Read TP PIO Registers + **/ +void t4_tp_pio_read(struct adapter *adap, u32 *buff, u32 nregs, + u32 start_index, bool sleep_ok) +{ + t4_tp_indirect_rw(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A, buff, nregs, + start_index, 1, sleep_ok); +} + +/** + * t4_tp_pio_write - Write TP PIO registers + * @adap: the adapter + * @buff: where the indirect register values are stored + * @nregs: how many indirect registers to write + * @start_index: index of first indirect register to write + * @sleep_ok: if true we may sleep while awaiting command completion + * + * Write TP PIO Registers + **/ +static void t4_tp_pio_write(struct adapter *adap, u32 *buff, u32 nregs, + u32 start_index, bool sleep_ok) +{ + t4_tp_indirect_rw(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A, buff, nregs, + start_index, 0, sleep_ok); +} + +/** + * t4_tp_mib_read - Read TP MIB registers + * @adap: the adapter + * @buff: where the indirect register values are written + * @nregs: how many indirect registers to read + * @start_index: index of first indirect register to read + * @sleep_ok: if true we may sleep while awaiting command completion + * + * Read TP MIB Registers + **/ +void t4_tp_mib_read(struct adapter *adap, u32 *buff, u32 nregs, u32 start_index, + bool sleep_ok) +{ + t4_tp_indirect_rw(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, buff, nregs, + start_index, 1, sleep_ok); } /** * t4_read_rss_key - read the global RSS key * @adap: the adapter * @key: 10-entry array holding the 320-bit RSS key + * @sleep_ok: if true we may sleep while awaiting command completion * * Reads the global 320-bit RSS key. */ -void t4_read_rss_key(struct adapter *adap, u32 *key) +void t4_read_rss_key(struct adapter *adap, u32 *key, bool sleep_ok) { - if (t4_use_ldst(adap)) - t4_fw_tp_pio_rw(adap, key, 10, TP_RSS_SECRET_KEY0_A, 1); - else - t4_read_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A, key, 10, - TP_RSS_SECRET_KEY0_A); + t4_tp_pio_read(adap, key, 10, TP_RSS_SECRET_KEY0_A, sleep_ok); } /** @@ -5106,12 +5210,14 @@ void t4_read_rss_key(struct adapter *adap, u32 *key) * @adap: the adapter * @key: 10-entry array holding the 320-bit RSS key * @idx: which RSS key to write + * @sleep_ok: if true we may sleep while awaiting command completion * * Writes one of the RSS keys with the given 320-bit value. If @idx is * 0..15 the corresponding entry in the RSS key table is written, * otherwise the global RSS key is written. */ -void t4_write_rss_key(struct adapter *adap, const u32 *key, int idx) +void t4_write_rss_key(struct adapter *adap, const u32 *key, int idx, + bool sleep_ok) { u8 rss_key_addr_cnt = 16; u32 vrt = t4_read_reg(adap, TP_RSS_CONFIG_VRT_A); @@ -5124,11 +5230,7 @@ void t4_write_rss_key(struct adapter *adap, const u32 *key, int idx) (vrt & KEYEXTEND_F) && (KEYMODE_G(vrt) == 3)) rss_key_addr_cnt = 32; - if (t4_use_ldst(adap)) - t4_fw_tp_pio_rw(adap, (void *)key, 10, TP_RSS_SECRET_KEY0_A, 0); - else - t4_write_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A, key, 10, - TP_RSS_SECRET_KEY0_A); + t4_tp_pio_write(adap, (void *)key, 10, TP_RSS_SECRET_KEY0_A, sleep_ok); if (idx >= 0 && idx < rss_key_addr_cnt) { if (rss_key_addr_cnt > 16) @@ -5146,19 +5248,15 @@ void t4_write_rss_key(struct adapter *adap, const u32 *key, int idx) * @adapter: the adapter * @index: the entry in the PF RSS table to read * @valp: where to store the returned value + * @sleep_ok: if true we may sleep while awaiting command completion * * Reads the PF RSS Configuration Table at the specified index and returns * the value found there. */ void t4_read_rss_pf_config(struct adapter *adapter, unsigned int index, - u32 *valp) + u32 *valp, bool sleep_ok) { - if (t4_use_ldst(adapter)) - t4_fw_tp_pio_rw(adapter, valp, 1, - TP_RSS_PF0_CONFIG_A + index, 1); - else - t4_read_indirect(adapter, TP_PIO_ADDR_A, TP_PIO_DATA_A, - valp, 1, TP_RSS_PF0_CONFIG_A + index); + t4_tp_pio_read(adapter, valp, 1, TP_RSS_PF0_CONFIG_A + index, sleep_ok); } /** @@ -5167,12 +5265,13 @@ void t4_read_rss_pf_config(struct adapter *adapter, unsigned int index, * @index: the entry in the VF RSS table to read * @vfl: where to store the returned VFL * @vfh: where to store the returned VFH + * @sleep_ok: if true we may sleep while awaiting command completion * * Reads the VF RSS Configuration Table at the specified index and returns * the (VFL, VFH) values found there. */ void t4_read_rss_vf_config(struct adapter *adapter, unsigned int index, - u32 *vfl, u32 *vfh) + u32 *vfl, u32 *vfh, bool sleep_ok) { u32 vrt, mask, data; @@ -5193,50 +5292,37 @@ void t4_read_rss_vf_config(struct adapter *adapter, unsigned int index, /* Grab the VFL/VFH values ... */ - if (t4_use_ldst(adapter)) { - t4_fw_tp_pio_rw(adapter, vfl, 1, TP_RSS_VFL_CONFIG_A, 1); - t4_fw_tp_pio_rw(adapter, vfh, 1, TP_RSS_VFH_CONFIG_A, 1); - } else { - t4_read_indirect(adapter, TP_PIO_ADDR_A, TP_PIO_DATA_A, - vfl, 1, TP_RSS_VFL_CONFIG_A); - t4_read_indirect(adapter, TP_PIO_ADDR_A, TP_PIO_DATA_A, - vfh, 1, TP_RSS_VFH_CONFIG_A); - } + t4_tp_pio_read(adapter, vfl, 1, TP_RSS_VFL_CONFIG_A, sleep_ok); + t4_tp_pio_read(adapter, vfh, 1, TP_RSS_VFH_CONFIG_A, sleep_ok); } /** * t4_read_rss_pf_map - read PF RSS Map * @adapter: the adapter + * @sleep_ok: if true we may sleep while awaiting command completion * * Reads the PF RSS Map register and returns its value. */ -u32 t4_read_rss_pf_map(struct adapter *adapter) +u32 t4_read_rss_pf_map(struct adapter *adapter, bool sleep_ok) { u32 pfmap; - if (t4_use_ldst(adapter)) - t4_fw_tp_pio_rw(adapter, &pfmap, 1, TP_RSS_PF_MAP_A, 1); - else - t4_read_indirect(adapter, TP_PIO_ADDR_A, TP_PIO_DATA_A, - &pfmap, 1, TP_RSS_PF_MAP_A); + t4_tp_pio_read(adapter, &pfmap, 1, TP_RSS_PF_MAP_A, sleep_ok); return pfmap; } /** * t4_read_rss_pf_mask - read PF RSS Mask * @adapter: the adapter + * @sleep_ok: if true we may sleep while awaiting command completion * * Reads the PF RSS Mask register and returns its value. */ -u32 t4_read_rss_pf_mask(struct adapter *adapter) +u32 t4_read_rss_pf_mask(struct adapter *adapter, bool sleep_ok) { u32 pfmask; - if (t4_use_ldst(adapter)) - t4_fw_tp_pio_rw(adapter, &pfmask, 1, TP_RSS_PF_MSK_A, 1); - else - t4_read_indirect(adapter, TP_PIO_ADDR_A, TP_PIO_DATA_A, - &pfmask, 1, TP_RSS_PF_MSK_A); + t4_tp_pio_read(adapter, &pfmask, 1, TP_RSS_PF_MSK_A, sleep_ok); return pfmask; } @@ -5245,12 +5331,13 @@ u32 t4_read_rss_pf_mask(struct adapter *adapter) * @adap: the adapter * @v4: holds the TCP/IP counter values * @v6: holds the TCP/IPv6 counter values + * @sleep_ok: if true we may sleep while awaiting command completion * * Returns the values of TP's TCP/IP and TCP/IPv6 MIB counters. * Either @v4 or @v6 may be %NULL to skip the corresponding stats. */ void t4_tp_get_tcp_stats(struct adapter *adap, struct tp_tcp_stats *v4, - struct tp_tcp_stats *v6) + struct tp_tcp_stats *v6, bool sleep_ok) { u32 val[TP_MIB_TCP_RXT_SEG_LO_A - TP_MIB_TCP_OUT_RST_A + 1]; @@ -5259,16 +5346,16 @@ void t4_tp_get_tcp_stats(struct adapter *adap, struct tp_tcp_stats *v4, #define STAT64(x) (((u64)STAT(x##_HI) << 32) | STAT(x##_LO)) if (v4) { - t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, val, - ARRAY_SIZE(val), TP_MIB_TCP_OUT_RST_A); + t4_tp_mib_read(adap, val, ARRAY_SIZE(val), + TP_MIB_TCP_OUT_RST_A, sleep_ok); v4->tcp_out_rsts = STAT(OUT_RST); v4->tcp_in_segs = STAT64(IN_SEG); v4->tcp_out_segs = STAT64(OUT_SEG); v4->tcp_retrans_segs = STAT64(RXT_SEG); } if (v6) { - t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, val, - ARRAY_SIZE(val), TP_MIB_TCP_V6OUT_RST_A); + t4_tp_mib_read(adap, val, ARRAY_SIZE(val), + TP_MIB_TCP_V6OUT_RST_A, sleep_ok); v6->tcp_out_rsts = STAT(OUT_RST); v6->tcp_in_segs = STAT64(IN_SEG); v6->tcp_out_segs = STAT64(OUT_SEG); @@ -5283,63 +5370,66 @@ void t4_tp_get_tcp_stats(struct adapter *adap, struct tp_tcp_stats *v4, * t4_tp_get_err_stats - read TP's error MIB counters * @adap: the adapter * @st: holds the counter values + * @sleep_ok: if true we may sleep while awaiting command completion * * Returns the values of TP's error counters. */ -void t4_tp_get_err_stats(struct adapter *adap, struct tp_err_stats *st) +void t4_tp_get_err_stats(struct adapter *adap, struct tp_err_stats *st, + bool sleep_ok) { int nchan = adap->params.arch.nchan; - t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, - st->mac_in_errs, nchan, TP_MIB_MAC_IN_ERR_0_A); - t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, - st->hdr_in_errs, nchan, TP_MIB_HDR_IN_ERR_0_A); - t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, - st->tcp_in_errs, nchan, TP_MIB_TCP_IN_ERR_0_A); - t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, - st->tnl_cong_drops, nchan, TP_MIB_TNL_CNG_DROP_0_A); - t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, - st->ofld_chan_drops, nchan, TP_MIB_OFD_CHN_DROP_0_A); - t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, - st->tnl_tx_drops, nchan, TP_MIB_TNL_DROP_0_A); - t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, - st->ofld_vlan_drops, nchan, TP_MIB_OFD_VLN_DROP_0_A); - t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, - st->tcp6_in_errs, nchan, TP_MIB_TCP_V6IN_ERR_0_A); - - t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, - &st->ofld_no_neigh, 2, TP_MIB_OFD_ARP_DROP_A); + t4_tp_mib_read(adap, st->mac_in_errs, nchan, TP_MIB_MAC_IN_ERR_0_A, + sleep_ok); + t4_tp_mib_read(adap, st->hdr_in_errs, nchan, TP_MIB_HDR_IN_ERR_0_A, + sleep_ok); + t4_tp_mib_read(adap, st->tcp_in_errs, nchan, TP_MIB_TCP_IN_ERR_0_A, + sleep_ok); + t4_tp_mib_read(adap, st->tnl_cong_drops, nchan, + TP_MIB_TNL_CNG_DROP_0_A, sleep_ok); + t4_tp_mib_read(adap, st->ofld_chan_drops, nchan, + TP_MIB_OFD_CHN_DROP_0_A, sleep_ok); + t4_tp_mib_read(adap, st->tnl_tx_drops, nchan, TP_MIB_TNL_DROP_0_A, + sleep_ok); + t4_tp_mib_read(adap, st->ofld_vlan_drops, nchan, + TP_MIB_OFD_VLN_DROP_0_A, sleep_ok); + t4_tp_mib_read(adap, st->tcp6_in_errs, nchan, + TP_MIB_TCP_V6IN_ERR_0_A, sleep_ok); + t4_tp_mib_read(adap, &st->ofld_no_neigh, 2, TP_MIB_OFD_ARP_DROP_A, + sleep_ok); } /** * t4_tp_get_cpl_stats - read TP's CPL MIB counters * @adap: the adapter * @st: holds the counter values + * @sleep_ok: if true we may sleep while awaiting command completion * * Returns the values of TP's CPL counters. */ -void t4_tp_get_cpl_stats(struct adapter *adap, struct tp_cpl_stats *st) +void t4_tp_get_cpl_stats(struct adapter *adap, struct tp_cpl_stats *st, + bool sleep_ok) { int nchan = adap->params.arch.nchan; - t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, st->req, - nchan, TP_MIB_CPL_IN_REQ_0_A); - t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, st->rsp, - nchan, TP_MIB_CPL_OUT_RSP_0_A); + t4_tp_mib_read(adap, st->req, nchan, TP_MIB_CPL_IN_REQ_0_A, sleep_ok); + t4_tp_mib_read(adap, st->rsp, nchan, TP_MIB_CPL_OUT_RSP_0_A, sleep_ok); } /** * t4_tp_get_rdma_stats - read TP's RDMA MIB counters * @adap: the adapter * @st: holds the counter values + * @sleep_ok: if true we may sleep while awaiting command completion * * Returns the values of TP's RDMA counters. */ -void t4_tp_get_rdma_stats(struct adapter *adap, struct tp_rdma_stats *st) +void t4_tp_get_rdma_stats(struct adapter *adap, struct tp_rdma_stats *st, + bool sleep_ok) { - t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, &st->rqe_dfr_pkt, - 2, TP_MIB_RQE_DFR_PKT_A); + t4_tp_mib_read(adap, &st->rqe_dfr_pkt, 2, TP_MIB_RQE_DFR_PKT_A, + sleep_ok); } /** @@ -5347,20 +5437,24 @@ void t4_tp_get_rdma_stats(struct adapter *adap, struct tp_rdma_stats *st) * @adap: the adapter * @idx: the port index * @st: holds the counter values + * @sleep_ok: if true we may sleep while awaiting command completion * * Returns the values of TP's FCoE counters for the selected port. */ void t4_get_fcoe_stats(struct adapter *adap, unsigned int idx, - struct tp_fcoe_stats *st) + struct tp_fcoe_stats *st, bool sleep_ok) { u32 val[2]; - t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, &st->frames_ddp, - 1, TP_MIB_FCOE_DDP_0_A + idx); - t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, &st->frames_drop, - 1, TP_MIB_FCOE_DROP_0_A + idx); - t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, val, - 2, TP_MIB_FCOE_BYTE_0_HI_A + 2 * idx); + t4_tp_mib_read(adap, &st->frames_ddp, 1, TP_MIB_FCOE_DDP_0_A + idx, + sleep_ok); + + t4_tp_mib_read(adap, &st->frames_drop, 1, + TP_MIB_FCOE_DROP_0_A + idx, sleep_ok); + + t4_tp_mib_read(adap, val, 2, TP_MIB_FCOE_BYTE_0_HI_A + 2 * idx, + sleep_ok); + st->octets_ddp = ((u64)val[0] << 32) | val[1]; } @@ -5368,15 +5462,16 @@ void t4_get_fcoe_stats(struct adapter *adap, unsigned int idx, * t4_get_usm_stats - read TP's non-TCP DDP MIB counters * @adap: the adapter * @st: holds the counter values + * @sleep_ok: if true we may sleep while awaiting command completion * * Returns the values of TP's counters for non-TCP directly-placed packets. */ -void t4_get_usm_stats(struct adapter *adap, struct tp_usm_stats *st) +void t4_get_usm_stats(struct adapter *adap, struct tp_usm_stats *st, + bool sleep_ok) { u32 val[4]; - t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, val, 4, - TP_MIB_USM_PKTS_A); + t4_tp_mib_read(adap, val, 4, TP_MIB_USM_PKTS_A, sleep_ok); st->frames = val[0]; st->drops = val[1]; st->octets = ((u64)val[2] << 32) | val[3]; @@ -8663,10 +8758,11 @@ int t4_init_sge_params(struct adapter *adapter) /** * t4_init_tp_params - initialize adap->params.tp * @adap: the adapter + * @sleep_ok: if true we may sleep while awaiting command completion * * Initialize various fields of the adapter's TP Parameters structure. */ -int t4_init_tp_params(struct adapter *adap) +int t4_init_tp_params(struct adapter *adap, bool sleep_ok) { int chan; u32 v; @@ -8682,19 +8778,11 @@ int t4_init_tp_params(struct adapter *adap) /* Cache the adapter's Compressed Filter Mode and global Incress * Configuration. */ - if (t4_use_ldst(adap)) { - t4_fw_tp_pio_rw(adap, &adap->params.tp.vlan_pri_map, 1, - TP_VLAN_PRI_MAP_A, 1); - t4_fw_tp_pio_rw(adap, &adap->params.tp.ingress_config, 1, - TP_INGRESS_CONFIG_A, 1); - } else { - t4_read_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A, - &adap->params.tp.vlan_pri_map, 1, - TP_VLAN_PRI_MAP_A); - t4_read_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A, - &adap->params.tp.ingress_config, 1, - TP_INGRESS_CONFIG_A); - } + t4_tp_pio_read(adap, &adap->params.tp.vlan_pri_map, 1, + TP_VLAN_PRI_MAP_A, sleep_ok); + t4_tp_pio_read(adap, &adap->params.tp.ingress_config, 1, + TP_INGRESS_CONFIG_A, sleep_ok); + /* For T6, cache the adapter's compressed error vector * and passing outer header info for encapsulated packets. */ -- cgit v1.2.3 From 4359cf33680c3f276c6bba11730836c41d3540a2 Mon Sep 17 00:00:00 2001 From: Rahul Lakkireddy Date: Fri, 13 Oct 2017 18:48:18 +0530 Subject: cxgb4: collect TP dump Signed-off-by: Rahul Lakkireddy Signed-off-by: Ganesh Goudar Signed-off-by: David S. Miller --- drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h | 72 ++++++++++++++ drivers/net/ethernet/chelsio/cxgb4/cudbg_if.h | 1 + drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c | 114 ++++++++++++++++++++++ drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.h | 3 + drivers/net/ethernet/chelsio/cxgb4/cxgb4.h | 2 + drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c | 21 +++- drivers/net/ethernet/chelsio/cxgb4/t4_hw.c | 20 ++++ drivers/net/ethernet/chelsio/cxgb4/t4_regs.h | 2 + 8 files changed, 234 insertions(+), 1 deletion(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h b/drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h index 2b717e700bbc..a7446fd09366 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h @@ -32,4 +32,76 @@ struct cudbg_mbox_log { u32 hi[MBOX_LEN / 8]; u32 lo[MBOX_LEN / 8]; }; + +struct ireg_field { + u32 ireg_addr; + u32 ireg_data; + u32 ireg_local_offset; + u32 ireg_offset_range; +}; + +struct ireg_buf { + struct ireg_field tp_pio; + u32 outbuf[32]; +}; + +#define IREG_NUM_ELEM 4 + +static const u32 t6_tp_pio_array[][IREG_NUM_ELEM] = { + {0x7e40, 0x7e44, 0x020, 28}, /* t6_tp_pio_regs_20_to_3b */ + {0x7e40, 0x7e44, 0x040, 10}, /* t6_tp_pio_regs_40_to_49 */ + {0x7e40, 0x7e44, 0x050, 10}, /* t6_tp_pio_regs_50_to_59 */ + {0x7e40, 0x7e44, 0x060, 14}, /* t6_tp_pio_regs_60_to_6d */ + {0x7e40, 0x7e44, 0x06F, 1}, /* t6_tp_pio_regs_6f */ + {0x7e40, 0x7e44, 0x070, 6}, /* t6_tp_pio_regs_70_to_75 */ + {0x7e40, 0x7e44, 0x130, 18}, /* t6_tp_pio_regs_130_to_141 */ + {0x7e40, 0x7e44, 0x145, 19}, /* t6_tp_pio_regs_145_to_157 */ + {0x7e40, 0x7e44, 0x160, 1}, /* t6_tp_pio_regs_160 */ + {0x7e40, 0x7e44, 0x230, 25}, /* t6_tp_pio_regs_230_to_248 */ + {0x7e40, 0x7e44, 0x24a, 3}, /* t6_tp_pio_regs_24c */ + {0x7e40, 0x7e44, 0x8C0, 1} /* t6_tp_pio_regs_8c0 */ +}; + +static const u32 t5_tp_pio_array[][IREG_NUM_ELEM] = { + {0x7e40, 0x7e44, 0x020, 28}, /* t5_tp_pio_regs_20_to_3b */ + {0x7e40, 0x7e44, 0x040, 19}, /* t5_tp_pio_regs_40_to_52 */ + {0x7e40, 0x7e44, 0x054, 2}, /* t5_tp_pio_regs_54_to_55 */ + {0x7e40, 0x7e44, 0x060, 13}, /* t5_tp_pio_regs_60_to_6c */ + {0x7e40, 0x7e44, 0x06F, 1}, /* t5_tp_pio_regs_6f */ + {0x7e40, 0x7e44, 0x120, 4}, /* t5_tp_pio_regs_120_to_123 */ + {0x7e40, 0x7e44, 0x12b, 2}, /* t5_tp_pio_regs_12b_to_12c */ + {0x7e40, 0x7e44, 0x12f, 21}, /* t5_tp_pio_regs_12f_to_143 */ + {0x7e40, 0x7e44, 0x145, 19}, /* t5_tp_pio_regs_145_to_157 */ + {0x7e40, 0x7e44, 0x230, 25}, /* t5_tp_pio_regs_230_to_248 */ + {0x7e40, 0x7e44, 0x8C0, 1} /* t5_tp_pio_regs_8c0 */ +}; + +static const u32 t6_tp_tm_pio_array[][IREG_NUM_ELEM] = { + {0x7e18, 0x7e1c, 0x0, 12} +}; + +static const u32 t5_tp_tm_pio_array[][IREG_NUM_ELEM] = { + {0x7e18, 0x7e1c, 0x0, 12} +}; + +static const u32 t6_tp_mib_index_array[6][IREG_NUM_ELEM] = { + {0x7e50, 0x7e54, 0x0, 13}, + {0x7e50, 0x7e54, 0x10, 6}, + {0x7e50, 0x7e54, 0x18, 21}, + {0x7e50, 0x7e54, 0x30, 32}, + {0x7e50, 0x7e54, 0x50, 22}, + {0x7e50, 0x7e54, 0x68, 12} +}; + +static const u32 t5_tp_mib_index_array[9][IREG_NUM_ELEM] = { + {0x7e50, 0x7e54, 0x0, 13}, + {0x7e50, 0x7e54, 0x10, 6}, + {0x7e50, 0x7e54, 0x18, 8}, + {0x7e50, 0x7e54, 0x20, 13}, + {0x7e50, 0x7e54, 0x30, 16}, + {0x7e50, 0x7e54, 0x40, 16}, + {0x7e50, 0x7e54, 0x50, 16}, + {0x7e50, 0x7e54, 0x60, 6}, + {0x7e50, 0x7e54, 0x68, 4} +}; #endif /* __CUDBG_ENTITY_H__ */ diff --git a/drivers/net/ethernet/chelsio/cxgb4/cudbg_if.h b/drivers/net/ethernet/chelsio/cxgb4/cudbg_if.h index 0a37d9b6cd32..53ea447c9103 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cudbg_if.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cudbg_if.h @@ -31,6 +31,7 @@ enum cudbg_dbg_entity_type { CUDBG_DEV_LOG = 2, CUDBG_EDC0 = 18, CUDBG_EDC1 = 19, + CUDBG_TP_INDIRECT = 36, CUDBG_MBOX_LOG = 66, CUDBG_MAX_ENTITY = 70, }; diff --git a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c index 960635e37a9d..6efa1de3723c 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c @@ -253,6 +253,120 @@ int cudbg_collect_edc1_meminfo(struct cudbg_init *pdbg_init, MEM_EDC1); } +int cudbg_collect_tp_indirect(struct cudbg_init *pdbg_init, + struct cudbg_buffer *dbg_buff, + struct cudbg_error *cudbg_err) +{ + struct adapter *padap = pdbg_init->adap; + struct cudbg_buffer temp_buff = { 0 }; + struct ireg_buf *ch_tp_pio; + int i, rc, n = 0; + u32 size; + + if (is_t5(padap->params.chip)) + n = sizeof(t5_tp_pio_array) + + sizeof(t5_tp_tm_pio_array) + + sizeof(t5_tp_mib_index_array); + else + n = sizeof(t6_tp_pio_array) + + sizeof(t6_tp_tm_pio_array) + + sizeof(t6_tp_mib_index_array); + + n = n / (IREG_NUM_ELEM * sizeof(u32)); + size = sizeof(struct ireg_buf) * n; + rc = cudbg_get_buff(dbg_buff, size, &temp_buff); + if (rc) + return rc; + + ch_tp_pio = (struct ireg_buf *)temp_buff.data; + + /* TP_PIO */ + if (is_t5(padap->params.chip)) + n = sizeof(t5_tp_pio_array) / (IREG_NUM_ELEM * sizeof(u32)); + else if (is_t6(padap->params.chip)) + n = sizeof(t6_tp_pio_array) / (IREG_NUM_ELEM * sizeof(u32)); + + for (i = 0; i < n; i++) { + struct ireg_field *tp_pio = &ch_tp_pio->tp_pio; + u32 *buff = ch_tp_pio->outbuf; + + if (is_t5(padap->params.chip)) { + tp_pio->ireg_addr = t5_tp_pio_array[i][0]; + tp_pio->ireg_data = t5_tp_pio_array[i][1]; + tp_pio->ireg_local_offset = t5_tp_pio_array[i][2]; + tp_pio->ireg_offset_range = t5_tp_pio_array[i][3]; + } else if (is_t6(padap->params.chip)) { + tp_pio->ireg_addr = t6_tp_pio_array[i][0]; + tp_pio->ireg_data = t6_tp_pio_array[i][1]; + tp_pio->ireg_local_offset = t6_tp_pio_array[i][2]; + tp_pio->ireg_offset_range = t6_tp_pio_array[i][3]; + } + t4_tp_pio_read(padap, buff, tp_pio->ireg_offset_range, + tp_pio->ireg_local_offset, true); + ch_tp_pio++; + } + + /* TP_TM_PIO */ + if (is_t5(padap->params.chip)) + n = sizeof(t5_tp_tm_pio_array) / (IREG_NUM_ELEM * sizeof(u32)); + else if (is_t6(padap->params.chip)) + n = sizeof(t6_tp_tm_pio_array) / (IREG_NUM_ELEM * sizeof(u32)); + + for (i = 0; i < n; i++) { + struct ireg_field *tp_pio = &ch_tp_pio->tp_pio; + u32 *buff = ch_tp_pio->outbuf; + + if (is_t5(padap->params.chip)) { + tp_pio->ireg_addr = t5_tp_tm_pio_array[i][0]; + tp_pio->ireg_data = t5_tp_tm_pio_array[i][1]; + tp_pio->ireg_local_offset = t5_tp_tm_pio_array[i][2]; + tp_pio->ireg_offset_range = t5_tp_tm_pio_array[i][3]; + } else if (is_t6(padap->params.chip)) { + tp_pio->ireg_addr = t6_tp_tm_pio_array[i][0]; + tp_pio->ireg_data = t6_tp_tm_pio_array[i][1]; + tp_pio->ireg_local_offset = t6_tp_tm_pio_array[i][2]; + tp_pio->ireg_offset_range = t6_tp_tm_pio_array[i][3]; + } + t4_tp_tm_pio_read(padap, buff, tp_pio->ireg_offset_range, + tp_pio->ireg_local_offset, true); + ch_tp_pio++; + } + + /* TP_MIB_INDEX */ + if (is_t5(padap->params.chip)) + n = sizeof(t5_tp_mib_index_array) / + (IREG_NUM_ELEM * sizeof(u32)); + else if (is_t6(padap->params.chip)) + n = sizeof(t6_tp_mib_index_array) / + (IREG_NUM_ELEM * sizeof(u32)); + + for (i = 0; i < n ; i++) { + struct ireg_field *tp_pio = &ch_tp_pio->tp_pio; + u32 *buff = ch_tp_pio->outbuf; + + if (is_t5(padap->params.chip)) { + tp_pio->ireg_addr = t5_tp_mib_index_array[i][0]; + tp_pio->ireg_data = t5_tp_mib_index_array[i][1]; + tp_pio->ireg_local_offset = + t5_tp_mib_index_array[i][2]; + tp_pio->ireg_offset_range = + t5_tp_mib_index_array[i][3]; + } else if (is_t6(padap->params.chip)) { + tp_pio->ireg_addr = t6_tp_mib_index_array[i][0]; + tp_pio->ireg_data = t6_tp_mib_index_array[i][1]; + tp_pio->ireg_local_offset = + t6_tp_mib_index_array[i][2]; + tp_pio->ireg_offset_range = + t6_tp_mib_index_array[i][3]; + } + t4_tp_mib_read(padap, buff, tp_pio->ireg_offset_range, + tp_pio->ireg_local_offset, true); + ch_tp_pio++; + } + cudbg_write_and_release_buff(&temp_buff, dbg_buff); + return rc; +} + int cudbg_collect_mbox_log(struct cudbg_init *pdbg_init, struct cudbg_buffer *dbg_buff, struct cudbg_error *cudbg_err) diff --git a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.h b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.h index 690591b36d4c..7a927ec71a5f 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.h @@ -30,6 +30,9 @@ int cudbg_collect_edc0_meminfo(struct cudbg_init *pdbg_init, int cudbg_collect_edc1_meminfo(struct cudbg_init *pdbg_init, struct cudbg_buffer *dbg_buff, struct cudbg_error *cudbg_err); +int cudbg_collect_tp_indirect(struct cudbg_init *pdbg_init, + struct cudbg_buffer *dbg_buff, + struct cudbg_error *cudbg_err); int cudbg_collect_mbox_log(struct cudbg_init *pdbg_init, struct cudbg_buffer *dbg_buff, struct cudbg_error *cudbg_err); diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h index d4032e373927..4eaca05ebd3a 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h @@ -1634,6 +1634,8 @@ int t4_set_vf_mac_acl(struct adapter *adapter, unsigned int vf, unsigned int naddr, u8 *addr); void t4_tp_pio_read(struct adapter *adap, u32 *buff, u32 nregs, u32 start_index, bool sleep_ok); +void t4_tp_tm_pio_read(struct adapter *adap, u32 *buff, u32 nregs, + u32 start_index, bool sleep_ok); void t4_tp_mib_read(struct adapter *adap, u32 *buff, u32 nregs, u32 start_index, bool sleep_ok); diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c index f8c4f4199ce6..7dfee6adc51e 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c @@ -29,11 +29,12 @@ static const struct cxgb4_collect_entity cxgb4_collect_hw_dump[] = { { CUDBG_MBOX_LOG, cudbg_collect_mbox_log }, { CUDBG_DEV_LOG, cudbg_collect_fw_devlog }, { CUDBG_REG_DUMP, cudbg_collect_reg_dump }, + { CUDBG_TP_INDIRECT, cudbg_collect_tp_indirect }, }; static u32 cxgb4_get_entity_length(struct adapter *adap, u32 entity) { - u32 value, len = 0; + u32 value, n = 0, len = 0; switch (entity) { case CUDBG_REG_DUMP: @@ -68,6 +69,24 @@ static u32 cxgb4_get_entity_length(struct adapter *adap, u32 entity) } len = cudbg_mbytes_to_bytes(len); break; + case CUDBG_TP_INDIRECT: + switch (CHELSIO_CHIP_VERSION(adap->params.chip)) { + case CHELSIO_T5: + n = sizeof(t5_tp_pio_array) + + sizeof(t5_tp_tm_pio_array) + + sizeof(t5_tp_mib_index_array); + break; + case CHELSIO_T6: + n = sizeof(t6_tp_pio_array) + + sizeof(t6_tp_tm_pio_array) + + sizeof(t6_tp_mib_index_array); + break; + default: + break; + } + n = n / (IREG_NUM_ELEM * sizeof(u32)); + len = sizeof(struct ireg_buf) * n; + break; case CUDBG_MBOX_LOG: len = sizeof(struct cudbg_mbox_log) * adap->mbox_log->size; break; diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c index 0f12bf507d56..8fa40f9e75c4 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c @@ -5118,6 +5118,9 @@ static void t4_tp_indirect_rw(struct adapter *adap, u32 reg_addr, u32 reg_data, case TP_PIO_ADDR_A: cmd = FW_LDST_ADDRSPC_TP_PIO; break; + case TP_TM_PIO_ADDR_A: + cmd = FW_LDST_ADDRSPC_TP_TM_PIO; + break; case TP_MIB_INDEX_A: cmd = FW_LDST_ADDRSPC_TP_MIB; break; @@ -5175,6 +5178,23 @@ static void t4_tp_pio_write(struct adapter *adap, u32 *buff, u32 nregs, start_index, 0, sleep_ok); } +/** + * t4_tp_tm_pio_read - Read TP TM PIO registers + * @adap: the adapter + * @buff: where the indirect register values are written + * @nregs: how many indirect registers to read + * @start_index: index of first indirect register to read + * @sleep_ok: if true we may sleep while awaiting command completion + * + * Read TP TM PIO Registers + **/ +void t4_tp_tm_pio_read(struct adapter *adap, u32 *buff, u32 nregs, + u32 start_index, bool sleep_ok) +{ + t4_tp_indirect_rw(adap, TP_TM_PIO_ADDR_A, TP_TM_PIO_DATA_A, buff, + nregs, start_index, 1, sleep_ok); +} + /** * t4_tp_mib_read - Read TP MIB registers * @adap: the adapter diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h index dac90837842b..82614e078f50 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h @@ -1447,6 +1447,8 @@ #define LKPTBLQUEUE0_M 0x3ffU #define LKPTBLQUEUE0_G(x) (((x) >> LKPTBLQUEUE0_S) & LKPTBLQUEUE0_M) +#define TP_TM_PIO_ADDR_A 0x7e18 +#define TP_TM_PIO_DATA_A 0x7e1c #define TP_PIO_ADDR_A 0x7e40 #define TP_PIO_DATA_A 0x7e44 #define TP_MIB_INDEX_A 0x7e50 -- cgit v1.2.3 From 270d39bf324ecdb9ab3f9c521e6b7fd9cc6c27b8 Mon Sep 17 00:00:00 2001 From: Rahul Lakkireddy Date: Fri, 13 Oct 2017 18:48:19 +0530 Subject: cxgb4: collect hardware module dumps Collect SGE, PCIE, PM, UP CIM, MA and HMA dumps. Signed-off-by: Rahul Lakkireddy Signed-off-by: Ganesh Goudar Signed-off-by: David S. Miller --- drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h | 74 ++++++ drivers/net/ethernet/chelsio/cxgb4/cudbg_if.h | 6 + drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c | 289 ++++++++++++++++++++++ drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.h | 18 ++ drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c | 35 +++ 5 files changed, 422 insertions(+) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h b/drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h index a7446fd09366..d7f3392f618f 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h @@ -104,4 +104,78 @@ static const u32 t5_tp_mib_index_array[9][IREG_NUM_ELEM] = { {0x7e50, 0x7e54, 0x60, 6}, {0x7e50, 0x7e54, 0x68, 4} }; + +static const u32 t5_sge_dbg_index_array[2][IREG_NUM_ELEM] = { + {0x10cc, 0x10d0, 0x0, 16}, + {0x10cc, 0x10d4, 0x0, 16}, +}; + +static const u32 t5_pcie_pdbg_array[][IREG_NUM_ELEM] = { + {0x5a04, 0x5a0c, 0x00, 0x20}, /* t5_pcie_pdbg_regs_00_to_20 */ + {0x5a04, 0x5a0c, 0x21, 0x20}, /* t5_pcie_pdbg_regs_21_to_40 */ + {0x5a04, 0x5a0c, 0x41, 0x10}, /* t5_pcie_pdbg_regs_41_to_50 */ +}; + +static const u32 t5_pcie_cdbg_array[][IREG_NUM_ELEM] = { + {0x5a10, 0x5a18, 0x00, 0x20}, /* t5_pcie_cdbg_regs_00_to_20 */ + {0x5a10, 0x5a18, 0x21, 0x18}, /* t5_pcie_cdbg_regs_21_to_37 */ +}; + +static const u32 t5_pm_rx_array[][IREG_NUM_ELEM] = { + {0x8FD0, 0x8FD4, 0x10000, 0x20}, /* t5_pm_rx_regs_10000_to_10020 */ + {0x8FD0, 0x8FD4, 0x10021, 0x0D}, /* t5_pm_rx_regs_10021_to_1002c */ +}; + +static const u32 t5_pm_tx_array[][IREG_NUM_ELEM] = { + {0x8FF0, 0x8FF4, 0x10000, 0x20}, /* t5_pm_tx_regs_10000_to_10020 */ + {0x8FF0, 0x8FF4, 0x10021, 0x1D}, /* t5_pm_tx_regs_10021_to_1003c */ +}; + +static const u32 t6_ma_ireg_array[][IREG_NUM_ELEM] = { + {0x78f8, 0x78fc, 0xa000, 23}, /* t6_ma_regs_a000_to_a016 */ + {0x78f8, 0x78fc, 0xa400, 30}, /* t6_ma_regs_a400_to_a41e */ + {0x78f8, 0x78fc, 0xa800, 20} /* t6_ma_regs_a800_to_a813 */ +}; + +static const u32 t6_ma_ireg_array2[][IREG_NUM_ELEM] = { + {0x78f8, 0x78fc, 0xe400, 17}, /* t6_ma_regs_e400_to_e600 */ + {0x78f8, 0x78fc, 0xe640, 13} /* t6_ma_regs_e640_to_e7c0 */ +}; + +static const u32 t6_up_cim_reg_array[][IREG_NUM_ELEM] = { + {0x7b50, 0x7b54, 0x2000, 0x20}, /* up_cim_2000_to_207c */ + {0x7b50, 0x7b54, 0x2080, 0x1d}, /* up_cim_2080_to_20fc */ + {0x7b50, 0x7b54, 0x00, 0x20}, /* up_cim_00_to_7c */ + {0x7b50, 0x7b54, 0x80, 0x20}, /* up_cim_80_to_fc */ + {0x7b50, 0x7b54, 0x100, 0x11}, /* up_cim_100_to_14c */ + {0x7b50, 0x7b54, 0x200, 0x10}, /* up_cim_200_to_23c */ + {0x7b50, 0x7b54, 0x240, 0x2}, /* up_cim_240_to_244 */ + {0x7b50, 0x7b54, 0x250, 0x2}, /* up_cim_250_to_254 */ + {0x7b50, 0x7b54, 0x260, 0x2}, /* up_cim_260_to_264 */ + {0x7b50, 0x7b54, 0x270, 0x2}, /* up_cim_270_to_274 */ + {0x7b50, 0x7b54, 0x280, 0x20}, /* up_cim_280_to_2fc */ + {0x7b50, 0x7b54, 0x300, 0x20}, /* up_cim_300_to_37c */ + {0x7b50, 0x7b54, 0x380, 0x14}, /* up_cim_380_to_3cc */ + +}; + +static const u32 t5_up_cim_reg_array[][IREG_NUM_ELEM] = { + {0x7b50, 0x7b54, 0x2000, 0x20}, /* up_cim_2000_to_207c */ + {0x7b50, 0x7b54, 0x2080, 0x19}, /* up_cim_2080_to_20ec */ + {0x7b50, 0x7b54, 0x00, 0x20}, /* up_cim_00_to_7c */ + {0x7b50, 0x7b54, 0x80, 0x20}, /* up_cim_80_to_fc */ + {0x7b50, 0x7b54, 0x100, 0x11}, /* up_cim_100_to_14c */ + {0x7b50, 0x7b54, 0x200, 0x10}, /* up_cim_200_to_23c */ + {0x7b50, 0x7b54, 0x240, 0x2}, /* up_cim_240_to_244 */ + {0x7b50, 0x7b54, 0x250, 0x2}, /* up_cim_250_to_254 */ + {0x7b50, 0x7b54, 0x260, 0x2}, /* up_cim_260_to_264 */ + {0x7b50, 0x7b54, 0x270, 0x2}, /* up_cim_270_to_274 */ + {0x7b50, 0x7b54, 0x280, 0x20}, /* up_cim_280_to_2fc */ + {0x7b50, 0x7b54, 0x300, 0x20}, /* up_cim_300_to_37c */ + {0x7b50, 0x7b54, 0x380, 0x14}, /* up_cim_380_to_3cc */ +}; + +static const u32 t6_hma_ireg_array[][IREG_NUM_ELEM] = { + {0x51320, 0x51324, 0xa000, 32} /* t6_hma_regs_a000_to_a01f */ +}; #endif /* __CUDBG_ENTITY_H__ */ diff --git a/drivers/net/ethernet/chelsio/cxgb4/cudbg_if.h b/drivers/net/ethernet/chelsio/cxgb4/cudbg_if.h index 53ea447c9103..01d282081b2d 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cudbg_if.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cudbg_if.h @@ -32,7 +32,13 @@ enum cudbg_dbg_entity_type { CUDBG_EDC0 = 18, CUDBG_EDC1 = 19, CUDBG_TP_INDIRECT = 36, + CUDBG_SGE_INDIRECT = 37, + CUDBG_PCIE_INDIRECT = 50, + CUDBG_PM_INDIRECT = 51, + CUDBG_MA_INDIRECT = 61, + CUDBG_UP_CIM_INDIRECT = 64, CUDBG_MBOX_LOG = 66, + CUDBG_HMA_INDIRECT = 67, CUDBG_MAX_ENTITY = 70, }; diff --git a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c index 6efa1de3723c..0149f1ca9f51 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c @@ -367,6 +367,258 @@ int cudbg_collect_tp_indirect(struct cudbg_init *pdbg_init, return rc; } +int cudbg_collect_sge_indirect(struct cudbg_init *pdbg_init, + struct cudbg_buffer *dbg_buff, + struct cudbg_error *cudbg_err) +{ + struct adapter *padap = pdbg_init->adap; + struct cudbg_buffer temp_buff = { 0 }; + struct ireg_buf *ch_sge_dbg; + int i, rc; + + rc = cudbg_get_buff(dbg_buff, sizeof(*ch_sge_dbg) * 2, &temp_buff); + if (rc) + return rc; + + ch_sge_dbg = (struct ireg_buf *)temp_buff.data; + for (i = 0; i < 2; i++) { + struct ireg_field *sge_pio = &ch_sge_dbg->tp_pio; + u32 *buff = ch_sge_dbg->outbuf; + + sge_pio->ireg_addr = t5_sge_dbg_index_array[i][0]; + sge_pio->ireg_data = t5_sge_dbg_index_array[i][1]; + sge_pio->ireg_local_offset = t5_sge_dbg_index_array[i][2]; + sge_pio->ireg_offset_range = t5_sge_dbg_index_array[i][3]; + t4_read_indirect(padap, + sge_pio->ireg_addr, + sge_pio->ireg_data, + buff, + sge_pio->ireg_offset_range, + sge_pio->ireg_local_offset); + ch_sge_dbg++; + } + cudbg_write_and_release_buff(&temp_buff, dbg_buff); + return rc; +} + +int cudbg_collect_pcie_indirect(struct cudbg_init *pdbg_init, + struct cudbg_buffer *dbg_buff, + struct cudbg_error *cudbg_err) +{ + struct adapter *padap = pdbg_init->adap; + struct cudbg_buffer temp_buff = { 0 }; + struct ireg_buf *ch_pcie; + int i, rc, n; + u32 size; + + n = sizeof(t5_pcie_pdbg_array) / (IREG_NUM_ELEM * sizeof(u32)); + size = sizeof(struct ireg_buf) * n * 2; + rc = cudbg_get_buff(dbg_buff, size, &temp_buff); + if (rc) + return rc; + + ch_pcie = (struct ireg_buf *)temp_buff.data; + /* PCIE_PDBG */ + for (i = 0; i < n; i++) { + struct ireg_field *pcie_pio = &ch_pcie->tp_pio; + u32 *buff = ch_pcie->outbuf; + + pcie_pio->ireg_addr = t5_pcie_pdbg_array[i][0]; + pcie_pio->ireg_data = t5_pcie_pdbg_array[i][1]; + pcie_pio->ireg_local_offset = t5_pcie_pdbg_array[i][2]; + pcie_pio->ireg_offset_range = t5_pcie_pdbg_array[i][3]; + t4_read_indirect(padap, + pcie_pio->ireg_addr, + pcie_pio->ireg_data, + buff, + pcie_pio->ireg_offset_range, + pcie_pio->ireg_local_offset); + ch_pcie++; + } + + /* PCIE_CDBG */ + n = sizeof(t5_pcie_cdbg_array) / (IREG_NUM_ELEM * sizeof(u32)); + for (i = 0; i < n; i++) { + struct ireg_field *pcie_pio = &ch_pcie->tp_pio; + u32 *buff = ch_pcie->outbuf; + + pcie_pio->ireg_addr = t5_pcie_cdbg_array[i][0]; + pcie_pio->ireg_data = t5_pcie_cdbg_array[i][1]; + pcie_pio->ireg_local_offset = t5_pcie_cdbg_array[i][2]; + pcie_pio->ireg_offset_range = t5_pcie_cdbg_array[i][3]; + t4_read_indirect(padap, + pcie_pio->ireg_addr, + pcie_pio->ireg_data, + buff, + pcie_pio->ireg_offset_range, + pcie_pio->ireg_local_offset); + ch_pcie++; + } + cudbg_write_and_release_buff(&temp_buff, dbg_buff); + return rc; +} + +int cudbg_collect_pm_indirect(struct cudbg_init *pdbg_init, + struct cudbg_buffer *dbg_buff, + struct cudbg_error *cudbg_err) +{ + struct adapter *padap = pdbg_init->adap; + struct cudbg_buffer temp_buff = { 0 }; + struct ireg_buf *ch_pm; + int i, rc, n; + u32 size; + + n = sizeof(t5_pm_rx_array) / (IREG_NUM_ELEM * sizeof(u32)); + size = sizeof(struct ireg_buf) * n * 2; + rc = cudbg_get_buff(dbg_buff, size, &temp_buff); + if (rc) + return rc; + + ch_pm = (struct ireg_buf *)temp_buff.data; + /* PM_RX */ + for (i = 0; i < n; i++) { + struct ireg_field *pm_pio = &ch_pm->tp_pio; + u32 *buff = ch_pm->outbuf; + + pm_pio->ireg_addr = t5_pm_rx_array[i][0]; + pm_pio->ireg_data = t5_pm_rx_array[i][1]; + pm_pio->ireg_local_offset = t5_pm_rx_array[i][2]; + pm_pio->ireg_offset_range = t5_pm_rx_array[i][3]; + t4_read_indirect(padap, + pm_pio->ireg_addr, + pm_pio->ireg_data, + buff, + pm_pio->ireg_offset_range, + pm_pio->ireg_local_offset); + ch_pm++; + } + + /* PM_TX */ + n = sizeof(t5_pm_tx_array) / (IREG_NUM_ELEM * sizeof(u32)); + for (i = 0; i < n; i++) { + struct ireg_field *pm_pio = &ch_pm->tp_pio; + u32 *buff = ch_pm->outbuf; + + pm_pio->ireg_addr = t5_pm_tx_array[i][0]; + pm_pio->ireg_data = t5_pm_tx_array[i][1]; + pm_pio->ireg_local_offset = t5_pm_tx_array[i][2]; + pm_pio->ireg_offset_range = t5_pm_tx_array[i][3]; + t4_read_indirect(padap, + pm_pio->ireg_addr, + pm_pio->ireg_data, + buff, + pm_pio->ireg_offset_range, + pm_pio->ireg_local_offset); + ch_pm++; + } + cudbg_write_and_release_buff(&temp_buff, dbg_buff); + return rc; +} + +int cudbg_collect_ma_indirect(struct cudbg_init *pdbg_init, + struct cudbg_buffer *dbg_buff, + struct cudbg_error *cudbg_err) +{ + struct adapter *padap = pdbg_init->adap; + struct cudbg_buffer temp_buff = { 0 }; + struct ireg_buf *ma_indr; + int i, rc, n; + u32 size, j; + + if (CHELSIO_CHIP_VERSION(padap->params.chip) < CHELSIO_T6) + return CUDBG_STATUS_ENTITY_NOT_FOUND; + + n = sizeof(t6_ma_ireg_array) / (IREG_NUM_ELEM * sizeof(u32)); + size = sizeof(struct ireg_buf) * n * 2; + rc = cudbg_get_buff(dbg_buff, size, &temp_buff); + if (rc) + return rc; + + ma_indr = (struct ireg_buf *)temp_buff.data; + for (i = 0; i < n; i++) { + struct ireg_field *ma_fli = &ma_indr->tp_pio; + u32 *buff = ma_indr->outbuf; + + ma_fli->ireg_addr = t6_ma_ireg_array[i][0]; + ma_fli->ireg_data = t6_ma_ireg_array[i][1]; + ma_fli->ireg_local_offset = t6_ma_ireg_array[i][2]; + ma_fli->ireg_offset_range = t6_ma_ireg_array[i][3]; + t4_read_indirect(padap, ma_fli->ireg_addr, ma_fli->ireg_data, + buff, ma_fli->ireg_offset_range, + ma_fli->ireg_local_offset); + ma_indr++; + } + + n = sizeof(t6_ma_ireg_array2) / (IREG_NUM_ELEM * sizeof(u32)); + for (i = 0; i < n; i++) { + struct ireg_field *ma_fli = &ma_indr->tp_pio; + u32 *buff = ma_indr->outbuf; + + ma_fli->ireg_addr = t6_ma_ireg_array2[i][0]; + ma_fli->ireg_data = t6_ma_ireg_array2[i][1]; + ma_fli->ireg_local_offset = t6_ma_ireg_array2[i][2]; + for (j = 0; j < t6_ma_ireg_array2[i][3]; j++) { + t4_read_indirect(padap, ma_fli->ireg_addr, + ma_fli->ireg_data, buff, 1, + ma_fli->ireg_local_offset); + buff++; + ma_fli->ireg_local_offset += 0x20; + } + ma_indr++; + } + cudbg_write_and_release_buff(&temp_buff, dbg_buff); + return rc; +} + +int cudbg_collect_up_cim_indirect(struct cudbg_init *pdbg_init, + struct cudbg_buffer *dbg_buff, + struct cudbg_error *cudbg_err) +{ + struct adapter *padap = pdbg_init->adap; + struct cudbg_buffer temp_buff = { 0 }; + struct ireg_buf *up_cim; + int i, rc, n; + u32 size; + + n = sizeof(t5_up_cim_reg_array) / (IREG_NUM_ELEM * sizeof(u32)); + size = sizeof(struct ireg_buf) * n; + rc = cudbg_get_buff(dbg_buff, size, &temp_buff); + if (rc) + return rc; + + up_cim = (struct ireg_buf *)temp_buff.data; + for (i = 0; i < n; i++) { + struct ireg_field *up_cim_reg = &up_cim->tp_pio; + u32 *buff = up_cim->outbuf; + + if (is_t5(padap->params.chip)) { + up_cim_reg->ireg_addr = t5_up_cim_reg_array[i][0]; + up_cim_reg->ireg_data = t5_up_cim_reg_array[i][1]; + up_cim_reg->ireg_local_offset = + t5_up_cim_reg_array[i][2]; + up_cim_reg->ireg_offset_range = + t5_up_cim_reg_array[i][3]; + } else if (is_t6(padap->params.chip)) { + up_cim_reg->ireg_addr = t6_up_cim_reg_array[i][0]; + up_cim_reg->ireg_data = t6_up_cim_reg_array[i][1]; + up_cim_reg->ireg_local_offset = + t6_up_cim_reg_array[i][2]; + up_cim_reg->ireg_offset_range = + t6_up_cim_reg_array[i][3]; + } + + rc = t4_cim_read(padap, up_cim_reg->ireg_local_offset, + up_cim_reg->ireg_offset_range, buff); + if (rc) { + cudbg_put_buff(&temp_buff, dbg_buff); + return rc; + } + up_cim++; + } + cudbg_write_and_release_buff(&temp_buff, dbg_buff); + return rc; +} + int cudbg_collect_mbox_log(struct cudbg_init *pdbg_init, struct cudbg_buffer *dbg_buff, struct cudbg_error *cudbg_err) @@ -411,3 +663,40 @@ int cudbg_collect_mbox_log(struct cudbg_init *pdbg_init, cudbg_write_and_release_buff(&temp_buff, dbg_buff); return rc; } + +int cudbg_collect_hma_indirect(struct cudbg_init *pdbg_init, + struct cudbg_buffer *dbg_buff, + struct cudbg_error *cudbg_err) +{ + struct adapter *padap = pdbg_init->adap; + struct cudbg_buffer temp_buff = { 0 }; + struct ireg_buf *hma_indr; + int i, rc, n; + u32 size; + + if (CHELSIO_CHIP_VERSION(padap->params.chip) < CHELSIO_T6) + return CUDBG_STATUS_ENTITY_NOT_FOUND; + + n = sizeof(t6_hma_ireg_array) / (IREG_NUM_ELEM * sizeof(u32)); + size = sizeof(struct ireg_buf) * n; + rc = cudbg_get_buff(dbg_buff, size, &temp_buff); + if (rc) + return rc; + + hma_indr = (struct ireg_buf *)temp_buff.data; + for (i = 0; i < n; i++) { + struct ireg_field *hma_fli = &hma_indr->tp_pio; + u32 *buff = hma_indr->outbuf; + + hma_fli->ireg_addr = t6_hma_ireg_array[i][0]; + hma_fli->ireg_data = t6_hma_ireg_array[i][1]; + hma_fli->ireg_local_offset = t6_hma_ireg_array[i][2]; + hma_fli->ireg_offset_range = t6_hma_ireg_array[i][3]; + t4_read_indirect(padap, hma_fli->ireg_addr, hma_fli->ireg_data, + buff, hma_fli->ireg_offset_range, + hma_fli->ireg_local_offset); + hma_indr++; + } + cudbg_write_and_release_buff(&temp_buff, dbg_buff); + return rc; +} diff --git a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.h b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.h index 7a927ec71a5f..4838d823750f 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.h @@ -33,9 +33,27 @@ int cudbg_collect_edc1_meminfo(struct cudbg_init *pdbg_init, int cudbg_collect_tp_indirect(struct cudbg_init *pdbg_init, struct cudbg_buffer *dbg_buff, struct cudbg_error *cudbg_err); +int cudbg_collect_sge_indirect(struct cudbg_init *pdbg_init, + struct cudbg_buffer *dbg_buff, + struct cudbg_error *cudbg_err); +int cudbg_collect_pcie_indirect(struct cudbg_init *pdbg_init, + struct cudbg_buffer *dbg_buff, + struct cudbg_error *cudbg_err); +int cudbg_collect_pm_indirect(struct cudbg_init *pdbg_init, + struct cudbg_buffer *dbg_buff, + struct cudbg_error *cudbg_err); +int cudbg_collect_ma_indirect(struct cudbg_init *pdbg_init, + struct cudbg_buffer *dbg_buff, + struct cudbg_error *cudbg_err); +int cudbg_collect_up_cim_indirect(struct cudbg_init *pdbg_init, + struct cudbg_buffer *dbg_buff, + struct cudbg_error *cudbg_err); int cudbg_collect_mbox_log(struct cudbg_init *pdbg_init, struct cudbg_buffer *dbg_buff, struct cudbg_error *cudbg_err); +int cudbg_collect_hma_indirect(struct cudbg_init *pdbg_init, + struct cudbg_buffer *dbg_buff, + struct cudbg_error *cudbg_err); struct cudbg_entity_hdr *cudbg_get_entity_hdr(void *outbuf, int i); void cudbg_align_debug_buffer(struct cudbg_buffer *dbg_buff, diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c index 7dfee6adc51e..1f6d800dd1be 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c @@ -30,6 +30,12 @@ static const struct cxgb4_collect_entity cxgb4_collect_hw_dump[] = { { CUDBG_DEV_LOG, cudbg_collect_fw_devlog }, { CUDBG_REG_DUMP, cudbg_collect_reg_dump }, { CUDBG_TP_INDIRECT, cudbg_collect_tp_indirect }, + { CUDBG_SGE_INDIRECT, cudbg_collect_sge_indirect }, + { CUDBG_PCIE_INDIRECT, cudbg_collect_pcie_indirect }, + { CUDBG_PM_INDIRECT, cudbg_collect_pm_indirect }, + { CUDBG_MA_INDIRECT, cudbg_collect_ma_indirect }, + { CUDBG_UP_CIM_INDIRECT, cudbg_collect_up_cim_indirect }, + { CUDBG_HMA_INDIRECT, cudbg_collect_hma_indirect }, }; static u32 cxgb4_get_entity_length(struct adapter *adap, u32 entity) @@ -87,9 +93,38 @@ static u32 cxgb4_get_entity_length(struct adapter *adap, u32 entity) n = n / (IREG_NUM_ELEM * sizeof(u32)); len = sizeof(struct ireg_buf) * n; break; + case CUDBG_SGE_INDIRECT: + len = sizeof(struct ireg_buf) * 2; + break; + case CUDBG_PCIE_INDIRECT: + n = sizeof(t5_pcie_pdbg_array) / (IREG_NUM_ELEM * sizeof(u32)); + len = sizeof(struct ireg_buf) * n * 2; + break; + case CUDBG_PM_INDIRECT: + n = sizeof(t5_pm_rx_array) / (IREG_NUM_ELEM * sizeof(u32)); + len = sizeof(struct ireg_buf) * n * 2; + break; + case CUDBG_MA_INDIRECT: + if (CHELSIO_CHIP_VERSION(adap->params.chip) > CHELSIO_T5) { + n = sizeof(t6_ma_ireg_array) / + (IREG_NUM_ELEM * sizeof(u32)); + len = sizeof(struct ireg_buf) * n * 2; + } + break; + case CUDBG_UP_CIM_INDIRECT: + n = sizeof(t5_up_cim_reg_array) / (IREG_NUM_ELEM * sizeof(u32)); + len = sizeof(struct ireg_buf) * n; + break; case CUDBG_MBOX_LOG: len = sizeof(struct cudbg_mbox_log) * adap->mbox_log->size; break; + case CUDBG_HMA_INDIRECT: + if (CHELSIO_CHIP_VERSION(adap->params.chip) > CHELSIO_T5) { + n = sizeof(t6_hma_ireg_array) / + (IREG_NUM_ELEM * sizeof(u32)); + len = sizeof(struct ireg_buf) * n; + } + break; default: break; } -- cgit v1.2.3 From 7c075ce221cf10a7aaef96b002d1d4c5dc715822 Mon Sep 17 00:00:00 2001 From: Rahul Lakkireddy Date: Fri, 13 Oct 2017 18:48:20 +0530 Subject: cxgb4: collect IBQ and OBQ dumps Signed-off-by: Rahul Lakkireddy Signed-off-by: Ganesh Goudar Signed-off-by: David S. Miller --- drivers/net/ethernet/chelsio/cxgb4/cudbg_if.h | 14 ++ drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c | 165 +++++++++++++++++++++++ drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.h | 42 ++++++ drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c | 32 +++++ 4 files changed, 253 insertions(+) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/chelsio/cxgb4/cudbg_if.h b/drivers/net/ethernet/chelsio/cxgb4/cudbg_if.h index 01d282081b2d..9b8005e67811 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cudbg_if.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cudbg_if.h @@ -29,10 +29,24 @@ enum cudbg_dbg_entity_type { CUDBG_REG_DUMP = 1, CUDBG_DEV_LOG = 2, + CUDBG_CIM_IBQ_TP0 = 6, + CUDBG_CIM_IBQ_TP1 = 7, + CUDBG_CIM_IBQ_ULP = 8, + CUDBG_CIM_IBQ_SGE0 = 9, + CUDBG_CIM_IBQ_SGE1 = 10, + CUDBG_CIM_IBQ_NCSI = 11, + CUDBG_CIM_OBQ_ULP0 = 12, + CUDBG_CIM_OBQ_ULP1 = 13, + CUDBG_CIM_OBQ_ULP2 = 14, + CUDBG_CIM_OBQ_ULP3 = 15, + CUDBG_CIM_OBQ_SGE = 16, + CUDBG_CIM_OBQ_NCSI = 17, CUDBG_EDC0 = 18, CUDBG_EDC1 = 19, CUDBG_TP_INDIRECT = 36, CUDBG_SGE_INDIRECT = 37, + CUDBG_CIM_OBQ_RXQ0 = 47, + CUDBG_CIM_OBQ_RXQ1 = 48, CUDBG_PCIE_INDIRECT = 50, CUDBG_PM_INDIRECT = 51, CUDBG_MA_INDIRECT = 61, diff --git a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c index 0149f1ca9f51..c451b2e42a6c 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c @@ -129,6 +129,171 @@ int cudbg_collect_fw_devlog(struct cudbg_init *pdbg_init, return rc; } +static int cudbg_read_cim_ibq(struct cudbg_init *pdbg_init, + struct cudbg_buffer *dbg_buff, + struct cudbg_error *cudbg_err, int qid) +{ + struct adapter *padap = pdbg_init->adap; + struct cudbg_buffer temp_buff = { 0 }; + int no_of_read_words, rc = 0; + u32 qsize; + + /* collect CIM IBQ */ + qsize = CIM_IBQ_SIZE * 4 * sizeof(u32); + rc = cudbg_get_buff(dbg_buff, qsize, &temp_buff); + if (rc) + return rc; + + /* t4_read_cim_ibq will return no. of read words or error */ + no_of_read_words = t4_read_cim_ibq(padap, qid, + (u32 *)((u32 *)temp_buff.data + + temp_buff.offset), qsize); + /* no_of_read_words is less than or equal to 0 means error */ + if (no_of_read_words <= 0) { + if (!no_of_read_words) + rc = CUDBG_SYSTEM_ERROR; + else + rc = no_of_read_words; + cudbg_err->sys_err = rc; + cudbg_put_buff(&temp_buff, dbg_buff); + return rc; + } + cudbg_write_and_release_buff(&temp_buff, dbg_buff); + return rc; +} + +int cudbg_collect_cim_ibq_tp0(struct cudbg_init *pdbg_init, + struct cudbg_buffer *dbg_buff, + struct cudbg_error *cudbg_err) +{ + return cudbg_read_cim_ibq(pdbg_init, dbg_buff, cudbg_err, 0); +} + +int cudbg_collect_cim_ibq_tp1(struct cudbg_init *pdbg_init, + struct cudbg_buffer *dbg_buff, + struct cudbg_error *cudbg_err) +{ + return cudbg_read_cim_ibq(pdbg_init, dbg_buff, cudbg_err, 1); +} + +int cudbg_collect_cim_ibq_ulp(struct cudbg_init *pdbg_init, + struct cudbg_buffer *dbg_buff, + struct cudbg_error *cudbg_err) +{ + return cudbg_read_cim_ibq(pdbg_init, dbg_buff, cudbg_err, 2); +} + +int cudbg_collect_cim_ibq_sge0(struct cudbg_init *pdbg_init, + struct cudbg_buffer *dbg_buff, + struct cudbg_error *cudbg_err) +{ + return cudbg_read_cim_ibq(pdbg_init, dbg_buff, cudbg_err, 3); +} + +int cudbg_collect_cim_ibq_sge1(struct cudbg_init *pdbg_init, + struct cudbg_buffer *dbg_buff, + struct cudbg_error *cudbg_err) +{ + return cudbg_read_cim_ibq(pdbg_init, dbg_buff, cudbg_err, 4); +} + +int cudbg_collect_cim_ibq_ncsi(struct cudbg_init *pdbg_init, + struct cudbg_buffer *dbg_buff, + struct cudbg_error *cudbg_err) +{ + return cudbg_read_cim_ibq(pdbg_init, dbg_buff, cudbg_err, 5); +} + +static int cudbg_read_cim_obq(struct cudbg_init *pdbg_init, + struct cudbg_buffer *dbg_buff, + struct cudbg_error *cudbg_err, int qid) +{ + struct adapter *padap = pdbg_init->adap; + struct cudbg_buffer temp_buff = { 0 }; + int no_of_read_words, rc = 0; + u32 qsize; + + /* collect CIM OBQ */ + qsize = 6 * CIM_OBQ_SIZE * 4 * sizeof(u32); + rc = cudbg_get_buff(dbg_buff, qsize, &temp_buff); + if (rc) + return rc; + + /* t4_read_cim_obq will return no. of read words or error */ + no_of_read_words = t4_read_cim_obq(padap, qid, + (u32 *)((u32 *)temp_buff.data + + temp_buff.offset), qsize); + /* no_of_read_words is less than or equal to 0 means error */ + if (no_of_read_words <= 0) { + if (!no_of_read_words) + rc = CUDBG_SYSTEM_ERROR; + else + rc = no_of_read_words; + cudbg_err->sys_err = rc; + cudbg_put_buff(&temp_buff, dbg_buff); + return rc; + } + temp_buff.size = no_of_read_words * 4; + cudbg_write_and_release_buff(&temp_buff, dbg_buff); + return rc; +} + +int cudbg_collect_cim_obq_ulp0(struct cudbg_init *pdbg_init, + struct cudbg_buffer *dbg_buff, + struct cudbg_error *cudbg_err) +{ + return cudbg_read_cim_obq(pdbg_init, dbg_buff, cudbg_err, 0); +} + +int cudbg_collect_cim_obq_ulp1(struct cudbg_init *pdbg_init, + struct cudbg_buffer *dbg_buff, + struct cudbg_error *cudbg_err) +{ + return cudbg_read_cim_obq(pdbg_init, dbg_buff, cudbg_err, 1); +} + +int cudbg_collect_cim_obq_ulp2(struct cudbg_init *pdbg_init, + struct cudbg_buffer *dbg_buff, + struct cudbg_error *cudbg_err) +{ + return cudbg_read_cim_obq(pdbg_init, dbg_buff, cudbg_err, 2); +} + +int cudbg_collect_cim_obq_ulp3(struct cudbg_init *pdbg_init, + struct cudbg_buffer *dbg_buff, + struct cudbg_error *cudbg_err) +{ + return cudbg_read_cim_obq(pdbg_init, dbg_buff, cudbg_err, 3); +} + +int cudbg_collect_cim_obq_sge(struct cudbg_init *pdbg_init, + struct cudbg_buffer *dbg_buff, + struct cudbg_error *cudbg_err) +{ + return cudbg_read_cim_obq(pdbg_init, dbg_buff, cudbg_err, 4); +} + +int cudbg_collect_cim_obq_ncsi(struct cudbg_init *pdbg_init, + struct cudbg_buffer *dbg_buff, + struct cudbg_error *cudbg_err) +{ + return cudbg_read_cim_obq(pdbg_init, dbg_buff, cudbg_err, 5); +} + +int cudbg_collect_obq_sge_rx_q0(struct cudbg_init *pdbg_init, + struct cudbg_buffer *dbg_buff, + struct cudbg_error *cudbg_err) +{ + return cudbg_read_cim_obq(pdbg_init, dbg_buff, cudbg_err, 6); +} + +int cudbg_collect_obq_sge_rx_q1(struct cudbg_init *pdbg_init, + struct cudbg_buffer *dbg_buff, + struct cudbg_error *cudbg_err) +{ + return cudbg_read_cim_obq(pdbg_init, dbg_buff, cudbg_err, 7); +} + static int cudbg_read_fw_mem(struct cudbg_init *pdbg_init, struct cudbg_buffer *dbg_buff, u8 mem_type, unsigned long tot_len, diff --git a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.h b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.h index 4838d823750f..c4440c1d0142 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.h @@ -24,6 +24,42 @@ int cudbg_collect_reg_dump(struct cudbg_init *pdbg_init, int cudbg_collect_fw_devlog(struct cudbg_init *pdbg_init, struct cudbg_buffer *dbg_buff, struct cudbg_error *cudbg_err); +int cudbg_collect_cim_ibq_tp0(struct cudbg_init *pdbg_init, + struct cudbg_buffer *dbg_buff, + struct cudbg_error *cudbg_err); +int cudbg_collect_cim_ibq_tp1(struct cudbg_init *pdbg_init, + struct cudbg_buffer *dbg_buff, + struct cudbg_error *cudbg_err); +int cudbg_collect_cim_ibq_ulp(struct cudbg_init *pdbg_init, + struct cudbg_buffer *dbg_buff, + struct cudbg_error *cudbg_err); +int cudbg_collect_cim_ibq_sge0(struct cudbg_init *pdbg_init, + struct cudbg_buffer *dbg_buff, + struct cudbg_error *cudbg_err); +int cudbg_collect_cim_ibq_sge1(struct cudbg_init *pdbg_init, + struct cudbg_buffer *dbg_buff, + struct cudbg_error *cudbg_err); +int cudbg_collect_cim_ibq_ncsi(struct cudbg_init *pdbg_init, + struct cudbg_buffer *dbg_buff, + struct cudbg_error *cudbg_err); +int cudbg_collect_cim_obq_ulp0(struct cudbg_init *pdbg_init, + struct cudbg_buffer *dbg_buff, + struct cudbg_error *cudbg_err); +int cudbg_collect_cim_obq_ulp1(struct cudbg_init *pdbg_init, + struct cudbg_buffer *dbg_buff, + struct cudbg_error *cudbg_err); +int cudbg_collect_cim_obq_ulp2(struct cudbg_init *pdbg_init, + struct cudbg_buffer *dbg_buff, + struct cudbg_error *cudbg_err); +int cudbg_collect_cim_obq_ulp3(struct cudbg_init *pdbg_init, + struct cudbg_buffer *dbg_buff, + struct cudbg_error *cudbg_err); +int cudbg_collect_cim_obq_sge(struct cudbg_init *pdbg_init, + struct cudbg_buffer *dbg_buff, + struct cudbg_error *cudbg_err); +int cudbg_collect_cim_obq_ncsi(struct cudbg_init *pdbg_init, + struct cudbg_buffer *dbg_buff, + struct cudbg_error *cudbg_err); int cudbg_collect_edc0_meminfo(struct cudbg_init *pdbg_init, struct cudbg_buffer *dbg_buff, struct cudbg_error *cudbg_err); @@ -36,6 +72,12 @@ int cudbg_collect_tp_indirect(struct cudbg_init *pdbg_init, int cudbg_collect_sge_indirect(struct cudbg_init *pdbg_init, struct cudbg_buffer *dbg_buff, struct cudbg_error *cudbg_err); +int cudbg_collect_obq_sge_rx_q0(struct cudbg_init *pdbg_init, + struct cudbg_buffer *dbg_buff, + struct cudbg_error *cudbg_err); +int cudbg_collect_obq_sge_rx_q1(struct cudbg_init *pdbg_init, + struct cudbg_buffer *dbg_buff, + struct cudbg_error *cudbg_err); int cudbg_collect_pcie_indirect(struct cudbg_init *pdbg_init, struct cudbg_buffer *dbg_buff, struct cudbg_error *cudbg_err); diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c index 1f6d800dd1be..9d97080a9d17 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c @@ -29,8 +29,22 @@ static const struct cxgb4_collect_entity cxgb4_collect_hw_dump[] = { { CUDBG_MBOX_LOG, cudbg_collect_mbox_log }, { CUDBG_DEV_LOG, cudbg_collect_fw_devlog }, { CUDBG_REG_DUMP, cudbg_collect_reg_dump }, + { CUDBG_CIM_IBQ_TP0, cudbg_collect_cim_ibq_tp0 }, + { CUDBG_CIM_IBQ_TP1, cudbg_collect_cim_ibq_tp1 }, + { CUDBG_CIM_IBQ_ULP, cudbg_collect_cim_ibq_ulp }, + { CUDBG_CIM_IBQ_SGE0, cudbg_collect_cim_ibq_sge0 }, + { CUDBG_CIM_IBQ_SGE1, cudbg_collect_cim_ibq_sge1 }, + { CUDBG_CIM_IBQ_NCSI, cudbg_collect_cim_ibq_ncsi }, + { CUDBG_CIM_OBQ_ULP0, cudbg_collect_cim_obq_ulp0 }, + { CUDBG_CIM_OBQ_ULP1, cudbg_collect_cim_obq_ulp1 }, + { CUDBG_CIM_OBQ_ULP2, cudbg_collect_cim_obq_ulp2 }, + { CUDBG_CIM_OBQ_ULP3, cudbg_collect_cim_obq_ulp3 }, + { CUDBG_CIM_OBQ_SGE, cudbg_collect_cim_obq_sge }, + { CUDBG_CIM_OBQ_NCSI, cudbg_collect_cim_obq_ncsi }, { CUDBG_TP_INDIRECT, cudbg_collect_tp_indirect }, { CUDBG_SGE_INDIRECT, cudbg_collect_sge_indirect }, + { CUDBG_CIM_OBQ_RXQ0, cudbg_collect_obq_sge_rx_q0 }, + { CUDBG_CIM_OBQ_RXQ1, cudbg_collect_obq_sge_rx_q1 }, { CUDBG_PCIE_INDIRECT, cudbg_collect_pcie_indirect }, { CUDBG_PM_INDIRECT, cudbg_collect_pm_indirect }, { CUDBG_MA_INDIRECT, cudbg_collect_ma_indirect }, @@ -59,6 +73,24 @@ static u32 cxgb4_get_entity_length(struct adapter *adap, u32 entity) case CUDBG_DEV_LOG: len = adap->params.devlog.size; break; + case CUDBG_CIM_IBQ_TP0: + case CUDBG_CIM_IBQ_TP1: + case CUDBG_CIM_IBQ_ULP: + case CUDBG_CIM_IBQ_SGE0: + case CUDBG_CIM_IBQ_SGE1: + case CUDBG_CIM_IBQ_NCSI: + len = CIM_IBQ_SIZE * 4 * sizeof(u32); + break; + case CUDBG_CIM_OBQ_ULP0: + case CUDBG_CIM_OBQ_ULP1: + case CUDBG_CIM_OBQ_ULP2: + case CUDBG_CIM_OBQ_ULP3: + case CUDBG_CIM_OBQ_SGE: + case CUDBG_CIM_OBQ_NCSI: + case CUDBG_CIM_OBQ_RXQ0: + case CUDBG_CIM_OBQ_RXQ1: + len = 6 * CIM_OBQ_SIZE * 4 * sizeof(u32); + break; case CUDBG_EDC0: value = t4_read_reg(adap, MA_TARGET_MEM_ENABLE_A); if (value & EDRAM0_ENABLE_F) { -- cgit v1.2.3 From 5dc874252faa818426480a7c00fa05738fe05402 Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Fri, 13 Oct 2017 17:29:00 +0100 Subject: cxgb4: fix missing break in switch and indent return statements The break statement for the Macronix case is missing and will fall through to the Winbond case and re-assign the size setting. Fix this by adding the missing break statement. Also correctly indent the return statements. Detected by CoverityScan, CID#1458020 ("Missing break in switch") Fixes: 96ac18f14a5a ("cxgb4: Add support for new flash parts") Signed-off-by: Colin Ian King Signed-off-by: David S. Miller --- drivers/net/ethernet/chelsio/cxgb4/t4_hw.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c index 8fa40f9e75c4..006414758f65 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c @@ -8404,7 +8404,7 @@ static int t4_get_flash_params(struct adapter *adap) default: dev_err(adap->pdev_dev, "Micron Flash Part has bad size, ID = %#x, Density code = %#x\n", flashid, density); - return -EINVAL; + return -EINVAL; } break; } @@ -8423,8 +8423,9 @@ static int t4_get_flash_params(struct adapter *adap) default: dev_err(adap->pdev_dev, "Macronix Flash Part has bad size, ID = %#x, Density code = %#x\n", flashid, density); - return -EINVAL; + return -EINVAL; } + break; } case 0xef: { /* Winbond */ /* This Density -> Size decoding table is taken from Winbond @@ -8441,7 +8442,7 @@ static int t4_get_flash_params(struct adapter *adap) default: dev_err(adap->pdev_dev, "Winbond Flash Part has bad size, ID = %#x, Density code = %#x\n", flashid, density); - return -EINVAL; + return -EINVAL; } break; } -- cgit v1.2.3 From 2a600d97cbb2a9311e6b42547d37e0eca9b9d6d6 Mon Sep 17 00:00:00 2001 From: Andy Shevchenko Date: Sat, 14 Oct 2017 17:04:40 +0300 Subject: pch_gbe: Switch to new PCI IRQ allocation API This removes custom flag handling. Signed-off-by: Andy Shevchenko Signed-off-by: David S. Miller --- drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h | 3 +- .../net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c | 42 +++++++++------------- 2 files changed, 17 insertions(+), 28 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h index 8d710a3b4db0..697e29dd4bd3 100644 --- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h +++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h @@ -613,7 +613,6 @@ struct pch_gbe_privdata { * @rx_ring: Pointer of Rx descriptor ring structure * @rx_buffer_len: Receive buffer length * @tx_queue_len: Transmit queue length - * @have_msi: PCI MSI mode flag * @pch_gbe_privdata: PCI Device ID driver_data */ @@ -623,6 +622,7 @@ struct pch_gbe_adapter { atomic_t irq_sem; struct net_device *netdev; struct pci_dev *pdev; + int irq; struct net_device *polling_netdev; struct napi_struct napi; struct pch_gbe_hw hw; @@ -637,7 +637,6 @@ struct pch_gbe_adapter { struct pch_gbe_rx_ring *rx_ring; unsigned long rx_buffer_len; unsigned long tx_queue_len; - bool have_msi; bool rx_stop_flag; int hwts_tx_en; int hwts_rx_en; diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c index 5ae9681a2da7..457ee80307ea 100644 --- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c +++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c @@ -781,11 +781,8 @@ static void pch_gbe_free_irq(struct pch_gbe_adapter *adapter) { struct net_device *netdev = adapter->netdev; - free_irq(adapter->pdev->irq, netdev); - if (adapter->have_msi) { - pci_disable_msi(adapter->pdev); - netdev_dbg(netdev, "call pci_disable_msi\n"); - } + free_irq(adapter->irq, netdev); + pci_free_irq_vectors(adapter->pdev); } /** @@ -799,7 +796,7 @@ static void pch_gbe_irq_disable(struct pch_gbe_adapter *adapter) atomic_inc(&adapter->irq_sem); iowrite32(0, &hw->reg->INT_EN); ioread32(&hw->reg->INT_ST); - synchronize_irq(adapter->pdev->irq); + synchronize_irq(adapter->irq); netdev_dbg(adapter->netdev, "INT_EN reg : 0x%08x\n", ioread32(&hw->reg->INT_EN)); @@ -1903,30 +1900,23 @@ static int pch_gbe_request_irq(struct pch_gbe_adapter *adapter) { struct net_device *netdev = adapter->netdev; int err; - int flags; - flags = IRQF_SHARED; - adapter->have_msi = false; - err = pci_enable_msi(adapter->pdev); - netdev_dbg(netdev, "call pci_enable_msi\n"); - if (err) { - netdev_dbg(netdev, "call pci_enable_msi - Error: %d\n", err); - } else { - flags = 0; - adapter->have_msi = true; - } - err = request_irq(adapter->pdev->irq, &pch_gbe_intr, - flags, netdev->name, netdev); + err = pci_alloc_irq_vectors(adapter->pdev, 1, 1, PCI_IRQ_ALL_TYPES); + if (err < 0) + return err; + + adapter->irq = pci_irq_vector(adapter->pdev, 0); + + err = request_irq(adapter->irq, &pch_gbe_intr, IRQF_SHARED, + netdev->name, netdev); if (err) netdev_err(netdev, "Unable to allocate interrupt Error: %d\n", err); - netdev_dbg(netdev, - "adapter->have_msi : %d flags : 0x%04x return : 0x%04x\n", - adapter->have_msi, flags, err); + netdev_dbg(netdev, "have_msi : %d return : 0x%04x\n", + pci_dev_msi_enabled(adapter->pdev), err); return err; } - /** * pch_gbe_up - Up GbE network device * @adapter: Board private structure @@ -2399,9 +2389,9 @@ static void pch_gbe_netpoll(struct net_device *netdev) { struct pch_gbe_adapter *adapter = netdev_priv(netdev); - disable_irq(adapter->pdev->irq); - pch_gbe_intr(adapter->pdev->irq, netdev); - enable_irq(adapter->pdev->irq); + disable_irq(adapter->irq); + pch_gbe_intr(adapter->irq, netdev); + enable_irq(adapter->irq); } #endif -- cgit v1.2.3 From 00fb3a7c7c14dd28feb0372a8458267e058add66 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Mon, 16 Oct 2017 13:32:36 +0200 Subject: net: systemport: add NET_DSA dependency The notifier cause a link error when NET_DSA is a loadable module: drivers/net/ethernet/broadcom/bcmsysport.o: In function `bcm_sysport_remove': bcmsysport.c:(.text+0x1582): undefined reference to `unregister_dsa_notifier' drivers/net/ethernet/broadcom/bcmsysport.o: In function `bcm_sysport_probe': bcmsysport.c:(.text+0x278d): undefined reference to `register_dsa_notifier' This adds a dependency that forces the systemport driver to be a loadable module as well when that happens, but otherwise allows it to be built normally when DSA is either built-in or completely disabled. Fixes: d156576362c0 ("net: systemport: Establish lower/upper queue mapping") Signed-off-by: Arnd Bergmann Reviewed-by: Florian Fainelli Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/Kconfig | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/broadcom/Kconfig b/drivers/net/ethernet/broadcom/Kconfig index 67134ece1107..af75156919ed 100644 --- a/drivers/net/ethernet/broadcom/Kconfig +++ b/drivers/net/ethernet/broadcom/Kconfig @@ -184,6 +184,7 @@ config BGMAC_PLATFORM config SYSTEMPORT tristate "Broadcom SYSTEMPORT internal MAC support" depends on OF + depends on NET_DSA || !NET_DSA select MII select PHYLIB select FIXED_PHY -- cgit v1.2.3 From c30f5d012edf755959c44d71757fbf4648ad75a8 Mon Sep 17 00:00:00 2001 From: Petr Machata Date: Mon, 16 Oct 2017 16:26:35 +0200 Subject: mlxsw: spectrum: Move netdevice NB to struct mlxsw_sp So far, all netdevice notifications that the driver cared about were related to its own ports, and mlxsw_sp could be retrieved from the netdevice's private data. For IP-in-IP offloading however, the driver cares about events on foreign netdevices, and getting at mlxsw_sp or router data structures from the handler is inconvenient. Therefore move the netdevice notifier blocks from global scope to struct mlxsw_sp to allow retrieval from the notifier block pointer itself. Signed-off-by: Petr Machata Reviewed-by: Ido Schimmel Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/spectrum.c | 23 ++++++++++++++++------- drivers/net/ethernet/mellanox/mlxsw/spectrum.h | 1 + 2 files changed, 17 insertions(+), 7 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c index 321988ac57cc..83f9c2564f61 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c @@ -3667,6 +3667,9 @@ static int mlxsw_sp_basic_trap_groups_set(struct mlxsw_core *mlxsw_core) return mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl); } +static int mlxsw_sp_netdevice_event(struct notifier_block *unused, + unsigned long event, void *ptr); + static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core, const struct mlxsw_bus_info *mlxsw_bus_info) { @@ -3736,6 +3739,16 @@ static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core, goto err_router_init; } + /* Initialize netdevice notifier after router is initialized, so that + * the event handler can use router structures. + */ + mlxsw_sp->netdevice_nb.notifier_call = mlxsw_sp_netdevice_event; + err = register_netdevice_notifier(&mlxsw_sp->netdevice_nb); + if (err) { + dev_err(mlxsw_sp->bus_info->dev, "Failed to register netdev notifier\n"); + goto err_netdev_notifier; + } + err = mlxsw_sp_span_init(mlxsw_sp); if (err) { dev_err(mlxsw_sp->bus_info->dev, "Failed to init span system\n"); @@ -3769,6 +3782,8 @@ err_dpipe_init: err_acl_init: mlxsw_sp_span_fini(mlxsw_sp); err_span_init: + unregister_netdevice_notifier(&mlxsw_sp->netdevice_nb); +err_netdev_notifier: mlxsw_sp_router_fini(mlxsw_sp); err_router_init: mlxsw_sp_afa_fini(mlxsw_sp); @@ -3795,6 +3810,7 @@ static void mlxsw_sp_fini(struct mlxsw_core *mlxsw_core) mlxsw_sp_dpipe_fini(mlxsw_sp); mlxsw_sp_acl_fini(mlxsw_sp); mlxsw_sp_span_fini(mlxsw_sp); + unregister_netdevice_notifier(&mlxsw_sp->netdevice_nb); mlxsw_sp_router_fini(mlxsw_sp); mlxsw_sp_afa_fini(mlxsw_sp); mlxsw_sp_counter_pool_fini(mlxsw_sp); @@ -4501,10 +4517,6 @@ static int mlxsw_sp_netdevice_event(struct notifier_block *unused, return notifier_from_errno(err); } -static struct notifier_block mlxsw_sp_netdevice_nb __read_mostly = { - .notifier_call = mlxsw_sp_netdevice_event, -}; - static struct notifier_block mlxsw_sp_inetaddr_nb __read_mostly = { .notifier_call = mlxsw_sp_inetaddr_event, .priority = 10, /* Must be called before FIB notifier block */ @@ -4532,7 +4544,6 @@ static int __init mlxsw_sp_module_init(void) { int err; - register_netdevice_notifier(&mlxsw_sp_netdevice_nb); register_inetaddr_notifier(&mlxsw_sp_inetaddr_nb); register_inet6addr_notifier(&mlxsw_sp_inet6addr_nb); register_netevent_notifier(&mlxsw_sp_router_netevent_nb); @@ -4553,7 +4564,6 @@ err_core_driver_register: unregister_netevent_notifier(&mlxsw_sp_router_netevent_nb); unregister_inet6addr_notifier(&mlxsw_sp_inet6addr_nb); unregister_inetaddr_notifier(&mlxsw_sp_inetaddr_nb); - unregister_netdevice_notifier(&mlxsw_sp_netdevice_nb); return err; } @@ -4564,7 +4574,6 @@ static void __exit mlxsw_sp_module_exit(void) unregister_netevent_notifier(&mlxsw_sp_router_netevent_nb); unregister_inet6addr_notifier(&mlxsw_sp_inet6addr_nb); unregister_inetaddr_notifier(&mlxsw_sp_inetaddr_nb); - unregister_netdevice_notifier(&mlxsw_sp_netdevice_nb); } module_init(mlxsw_sp_module_init); diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h index 8e45183dc9bb..e1a0157c0b94 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h @@ -161,6 +161,7 @@ struct mlxsw_sp { struct { DECLARE_BITMAP(usage, MLXSW_SP_KVD_LINEAR_SIZE); } kvdl; + struct notifier_block netdevice_nb; struct mlxsw_sp_counter_pool *counter_pool; struct { -- cgit v1.2.3 From 6698c168bf48cb85505d7f6e77f0091a83aa497e Mon Sep 17 00:00:00 2001 From: Petr Machata Date: Mon, 16 Oct 2017 16:26:36 +0200 Subject: mlxsw: spectrum_router: Move mlxsw_sp_netdev_ipip_type() Signed-off-by: Petr Machata Reviewed-by: Ido Schimmel Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- .../net/ethernet/mellanox/mlxsw/spectrum_router.c | 38 +++++++++++----------- 1 file changed, 19 insertions(+), 19 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c index 6a356f4b99a3..c5e574bf3e08 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c @@ -1295,6 +1295,25 @@ mlxsw_sp_ipip_entry_find_by_decap(struct mlxsw_sp *mlxsw_sp, return NULL; } +static bool mlxsw_sp_netdev_ipip_type(const struct mlxsw_sp *mlxsw_sp, + const struct net_device *dev, + enum mlxsw_sp_ipip_type *p_type) +{ + struct mlxsw_sp_router *router = mlxsw_sp->router; + const struct mlxsw_sp_ipip_ops *ipip_ops; + enum mlxsw_sp_ipip_type ipipt; + + for (ipipt = 0; ipipt < MLXSW_SP_IPIP_TYPE_MAX; ++ipipt) { + ipip_ops = router->ipip_ops_arr[ipipt]; + if (dev->type == ipip_ops->dev_type) { + if (p_type) + *p_type = ipipt; + return true; + } + } + return false; +} + struct mlxsw_sp_neigh_key { struct neighbour *n; }; @@ -2785,25 +2804,6 @@ static void mlxsw_sp_nexthop_neigh_fini(struct mlxsw_sp *mlxsw_sp, neigh_release(n); } -static bool mlxsw_sp_netdev_ipip_type(const struct mlxsw_sp *mlxsw_sp, - const struct net_device *dev, - enum mlxsw_sp_ipip_type *p_type) -{ - struct mlxsw_sp_router *router = mlxsw_sp->router; - const struct mlxsw_sp_ipip_ops *ipip_ops; - enum mlxsw_sp_ipip_type ipipt; - - for (ipipt = 0; ipipt < MLXSW_SP_IPIP_TYPE_MAX; ++ipipt) { - ipip_ops = router->ipip_ops_arr[ipipt]; - if (dev->type == ipip_ops->dev_type) { - if (p_type) - *p_type = ipipt; - return true; - } - } - return false; -} - static int mlxsw_sp_nexthop_ipip_init(struct mlxsw_sp *mlxsw_sp, enum mlxsw_sp_ipip_type ipipt, struct mlxsw_sp_nexthop *nh, -- cgit v1.2.3 From 0063587d358733008423c80302cb7b077be8e237 Mon Sep 17 00:00:00 2001 From: Petr Machata Date: Mon, 16 Oct 2017 16:26:37 +0200 Subject: mlxsw: spectrum: Support decap-only IP-in-IP tunnels Current code for offloading IP-in-IP tunneling assumes that there is no decap without encap. But that's never true for IPv6 overlays, and is not true for IPv4 ones either, if net.ipv4.conf.*.rp_filter is unset. To support decap-only tunnels, an IPIP entry is now created as soon as an offloadable tunneling device is created. When that netdevice is up'd, a decap route is looked up and possibly offloaded. Thus decap is not handled implicitly as part of mlxsw_sp_ipip_entry_get() call anymore, but needs to be done explicitly after the get, if desired. Signed-off-by: Petr Machata Reviewed-by: Ido Schimmel Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/spectrum.c | 8 +- drivers/net/ethernet/mellanox/mlxsw/spectrum.h | 6 ++ .../net/ethernet/mellanox/mlxsw/spectrum_router.c | 105 +++++++++++++++++++-- 3 files changed, 109 insertions(+), 10 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c index 83f9c2564f61..c3ae650fbe5e 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c @@ -4497,13 +4497,17 @@ static bool mlxsw_sp_is_vrf_event(unsigned long event, void *ptr) return netif_is_l3_master(info->upper_dev); } -static int mlxsw_sp_netdevice_event(struct notifier_block *unused, +static int mlxsw_sp_netdevice_event(struct notifier_block *nb, unsigned long event, void *ptr) { struct net_device *dev = netdev_notifier_info_to_dev(ptr); + struct mlxsw_sp *mlxsw_sp; int err = 0; - if (event == NETDEV_CHANGEADDR || event == NETDEV_CHANGEMTU) + mlxsw_sp = container_of(nb, struct mlxsw_sp, netdevice_nb); + if (mlxsw_sp_netdev_is_ipip(mlxsw_sp, dev)) + err = mlxsw_sp_netdevice_ipip_event(mlxsw_sp, dev, event); + else if (event == NETDEV_CHANGEADDR || event == NETDEV_CHANGEMTU) err = mlxsw_sp_netdevice_router_port_event(dev); else if (mlxsw_sp_is_vrf_event(event, ptr)) err = mlxsw_sp_netdevice_vrf_event(dev, event, ptr); diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h index e1a0157c0b94..a4f21afd7f00 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h @@ -395,6 +395,12 @@ int mlxsw_sp_inet6addr_event(struct notifier_block *unused, unsigned long event, void *ptr); int mlxsw_sp_netdevice_vrf_event(struct net_device *l3_dev, unsigned long event, struct netdev_notifier_changeupper_info *info); +bool mlxsw_sp_netdev_is_ipip(const struct mlxsw_sp *mlxsw_sp, + const struct net_device *dev); +int +mlxsw_sp_netdevice_ipip_event(struct mlxsw_sp *mlxsw_sp, + struct net_device *l3_dev, + unsigned long event); void mlxsw_sp_port_vlan_router_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan); void mlxsw_sp_rif_destroy(struct mlxsw_sp_rif *rif); diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c index c5e574bf3e08..db834220a2fe 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c @@ -1206,7 +1206,6 @@ mlxsw_sp_ipip_entry_get(struct mlxsw_sp *mlxsw_sp, { u32 ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(ol_dev); struct mlxsw_sp_router *router = mlxsw_sp->router; - struct mlxsw_sp_fib_entry *decap_fib_entry; struct mlxsw_sp_ipip_entry *ipip_entry; enum mlxsw_sp_l3proto ul_proto; union mlxsw_sp_l3addr saddr; @@ -1231,11 +1230,6 @@ mlxsw_sp_ipip_entry_get(struct mlxsw_sp *mlxsw_sp, if (IS_ERR(ipip_entry)) return ipip_entry; - decap_fib_entry = mlxsw_sp_ipip_entry_find_decap(mlxsw_sp, ipip_entry); - if (decap_fib_entry) - mlxsw_sp_ipip_entry_promote_decap(mlxsw_sp, ipip_entry, - decap_fib_entry); - list_add_tail(&ipip_entry->ipip_list_node, &mlxsw_sp->router->ipip_list); @@ -1250,8 +1244,6 @@ mlxsw_sp_ipip_entry_put(struct mlxsw_sp *mlxsw_sp, { if (--ipip_entry->ref_count == 0) { list_del(&ipip_entry->ipip_list_node); - if (ipip_entry->decap_fib_entry) - mlxsw_sp_ipip_entry_demote_decap(mlxsw_sp, ipip_entry); mlxsw_sp_ipip_entry_destroy(ipip_entry); } } @@ -1314,6 +1306,103 @@ static bool mlxsw_sp_netdev_ipip_type(const struct mlxsw_sp *mlxsw_sp, return false; } +bool mlxsw_sp_netdev_is_ipip(const struct mlxsw_sp *mlxsw_sp, + const struct net_device *dev) +{ + return mlxsw_sp_netdev_ipip_type(mlxsw_sp, dev, NULL); +} + +static struct mlxsw_sp_ipip_entry * +mlxsw_sp_ipip_entry_find_by_ol_dev(struct mlxsw_sp *mlxsw_sp, + const struct net_device *ol_dev) +{ + struct mlxsw_sp_ipip_entry *ipip_entry; + + list_for_each_entry(ipip_entry, &mlxsw_sp->router->ipip_list, + ipip_list_node) + if (ipip_entry->ol_dev == ol_dev) + return ipip_entry; + + return NULL; +} + +static int mlxsw_sp_netdevice_ipip_reg_event(struct mlxsw_sp *mlxsw_sp, + struct net_device *ol_dev) +{ + struct mlxsw_sp_router *router = mlxsw_sp->router; + struct mlxsw_sp_ipip_entry *ipip_entry; + enum mlxsw_sp_ipip_type ipipt; + + mlxsw_sp_netdev_ipip_type(mlxsw_sp, ol_dev, &ipipt); + if (router->ipip_ops_arr[ipipt]->can_offload(mlxsw_sp, ol_dev, + MLXSW_SP_L3_PROTO_IPV4) || + router->ipip_ops_arr[ipipt]->can_offload(mlxsw_sp, ol_dev, + MLXSW_SP_L3_PROTO_IPV6)) { + ipip_entry = mlxsw_sp_ipip_entry_get(mlxsw_sp, ipipt, ol_dev); + if (IS_ERR(ipip_entry)) + return PTR_ERR(ipip_entry); + } + + return 0; +} + +static void mlxsw_sp_netdevice_ipip_unreg_event(struct mlxsw_sp *mlxsw_sp, + struct net_device *ol_dev) +{ + struct mlxsw_sp_ipip_entry *ipip_entry; + + ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev); + if (ipip_entry) + mlxsw_sp_ipip_entry_put(mlxsw_sp, ipip_entry); +} + +static int mlxsw_sp_netdevice_ipip_up_event(struct mlxsw_sp *mlxsw_sp, + struct net_device *ol_dev) +{ + struct mlxsw_sp_fib_entry *decap_fib_entry; + struct mlxsw_sp_ipip_entry *ipip_entry; + + ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev); + if (ipip_entry) { + decap_fib_entry = mlxsw_sp_ipip_entry_find_decap(mlxsw_sp, + ipip_entry); + if (decap_fib_entry) + mlxsw_sp_ipip_entry_promote_decap(mlxsw_sp, ipip_entry, + decap_fib_entry); + } + + return 0; +} + +static void mlxsw_sp_netdevice_ipip_down_event(struct mlxsw_sp *mlxsw_sp, + struct net_device *ol_dev) +{ + struct mlxsw_sp_ipip_entry *ipip_entry; + + ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev); + if (ipip_entry && ipip_entry->decap_fib_entry) + mlxsw_sp_ipip_entry_demote_decap(mlxsw_sp, ipip_entry); +} + +int mlxsw_sp_netdevice_ipip_event(struct mlxsw_sp *mlxsw_sp, + struct net_device *ol_dev, + unsigned long event) +{ + switch (event) { + case NETDEV_REGISTER: + return mlxsw_sp_netdevice_ipip_reg_event(mlxsw_sp, ol_dev); + case NETDEV_UNREGISTER: + mlxsw_sp_netdevice_ipip_unreg_event(mlxsw_sp, ol_dev); + return 0; + case NETDEV_UP: + return mlxsw_sp_netdevice_ipip_up_event(mlxsw_sp, ol_dev); + case NETDEV_DOWN: + mlxsw_sp_netdevice_ipip_down_event(mlxsw_sp, ol_dev); + return 0; + } + return 0; +} + struct mlxsw_sp_neigh_key { struct neighbour *n; }; -- cgit v1.2.3 From f63ce4e54a424d9f99bad2ba099c972a07eab517 Mon Sep 17 00:00:00 2001 From: Petr Machata Date: Mon, 16 Oct 2017 16:26:38 +0200 Subject: mlxsw: spectrum: Support IPIP overlay VRF migration IPIP entries are created as soon as an offloadable device is created. That means that when such a device is later moved to a different VRF, the loopback device that backs the tunnel is wrong. Thus when an offloadable encapsulating netdevice moves from one VRF to another, make sure that the loopback is updated as necessary. Signed-off-by: Petr Machata Reviewed-by: Ido Schimmel Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/spectrum.c | 2 +- drivers/net/ethernet/mellanox/mlxsw/spectrum.h | 3 +- .../net/ethernet/mellanox/mlxsw/spectrum_router.c | 47 +++++++++++++++++++++- 3 files changed, 49 insertions(+), 3 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c index c3ae650fbe5e..e1e11c726c16 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c @@ -4506,7 +4506,7 @@ static int mlxsw_sp_netdevice_event(struct notifier_block *nb, mlxsw_sp = container_of(nb, struct mlxsw_sp, netdevice_nb); if (mlxsw_sp_netdev_is_ipip(mlxsw_sp, dev)) - err = mlxsw_sp_netdevice_ipip_event(mlxsw_sp, dev, event); + err = mlxsw_sp_netdevice_ipip_event(mlxsw_sp, dev, event, ptr); else if (event == NETDEV_CHANGEADDR || event == NETDEV_CHANGEMTU) err = mlxsw_sp_netdevice_router_port_event(dev); else if (mlxsw_sp_is_vrf_event(event, ptr)) diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h index a4f21afd7f00..28feb745a38a 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h @@ -400,7 +400,8 @@ bool mlxsw_sp_netdev_is_ipip(const struct mlxsw_sp *mlxsw_sp, int mlxsw_sp_netdevice_ipip_event(struct mlxsw_sp *mlxsw_sp, struct net_device *l3_dev, - unsigned long event); + unsigned long event, + struct netdev_notifier_changeupper_info *info); void mlxsw_sp_port_vlan_router_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan); void mlxsw_sp_rif_destroy(struct mlxsw_sp_rif *rif); diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c index db834220a2fe..082cf00eaadb 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c @@ -1384,9 +1384,49 @@ static void mlxsw_sp_netdevice_ipip_down_event(struct mlxsw_sp *mlxsw_sp, mlxsw_sp_ipip_entry_demote_decap(mlxsw_sp, ipip_entry); } +static int mlxsw_sp_netdevice_ipip_vrf_event(struct mlxsw_sp *mlxsw_sp, + struct net_device *ol_dev) +{ + struct mlxsw_sp_fib_entry *decap_fib_entry; + struct mlxsw_sp_ipip_entry *ipip_entry; + struct mlxsw_sp_rif_ipip_lb *lb_rif; + + ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev); + if (!ipip_entry) + return 0; + + /* When a tunneling device is moved to a different VRF, we need to + * update the backing loopback. Since RIFs can't be edited, we need to + * destroy and recreate it. That might create a window of opportunity + * where RALUE and RATR registers end up referencing a RIF that's + * already gone. RATRs are handled by the RIF destroy, and to take care + * of RALUE, demote the decap route back. + */ + if (ipip_entry->decap_fib_entry) + mlxsw_sp_ipip_entry_demote_decap(mlxsw_sp, ipip_entry); + + lb_rif = mlxsw_sp_ipip_ol_ipip_lb_create(mlxsw_sp, ipip_entry->ipipt, + ol_dev); + if (IS_ERR(lb_rif)) + return PTR_ERR(lb_rif); + mlxsw_sp_rif_destroy(&ipip_entry->ol_lb->common); + ipip_entry->ol_lb = lb_rif; + + if (ol_dev->flags & IFF_UP) { + decap_fib_entry = mlxsw_sp_ipip_entry_find_decap(mlxsw_sp, + ipip_entry); + if (decap_fib_entry) + mlxsw_sp_ipip_entry_promote_decap(mlxsw_sp, ipip_entry, + decap_fib_entry); + } + + return 0; +} + int mlxsw_sp_netdevice_ipip_event(struct mlxsw_sp *mlxsw_sp, struct net_device *ol_dev, - unsigned long event) + unsigned long event, + struct netdev_notifier_changeupper_info *info) { switch (event) { case NETDEV_REGISTER: @@ -1399,6 +1439,11 @@ int mlxsw_sp_netdevice_ipip_event(struct mlxsw_sp *mlxsw_sp, case NETDEV_DOWN: mlxsw_sp_netdevice_ipip_down_event(mlxsw_sp, ol_dev); return 0; + case NETDEV_CHANGEUPPER: + if (netif_is_l3_master(info->upper_dev)) + return mlxsw_sp_netdevice_ipip_vrf_event(mlxsw_sp, + ol_dev); + return 0; } return 0; } -- cgit v1.2.3 From 4cccb737d2fd0d78b939a97b5ac1831b9a27d4c0 Mon Sep 17 00:00:00 2001 From: Petr Machata Date: Mon, 16 Oct 2017 16:26:39 +0200 Subject: mlxsw: spectrum: Drop refcounting of IPIP entries Formerly, IPIP entries were created lazily by next hops that referenced an offloadable IP-in-IP netdevice. However now that they are created eagerly as a reaction to events on such netdevices, the reference counting is useless. Hence drop it. The routes whose next hops reference an offloaded IP-in-IP netdevice actually linger around a bit after their device is unregistered. However, mlxsw_sp_ipip_entry_destroy() also destroys the backing loopback, and mlxsw_sp_rif_destroy() transitively (via mlxsw_sp_nexthop_rif_gone_sync()) calls mlxsw_sp_nexthop_ipip_fini(), which unlinks the IPIP entry from a next hop. Thus no dangling pointers are left behind for the brief window after netdevice is gone, but routes not yet. Signed-off-by: Petr Machata Reviewed-by: Ido Schimmel Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- .../net/ethernet/mellanox/mlxsw/spectrum_ipip.h | 1 - .../net/ethernet/mellanox/mlxsw/spectrum_router.c | 49 +++++++++------------- 2 files changed, 20 insertions(+), 30 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.h index 1c2db831d83b..6fb49129ce87 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.h @@ -47,7 +47,6 @@ struct mlxsw_sp_ipip_entry { enum mlxsw_sp_ipip_type ipipt; struct net_device *ol_dev; /* Overlay. */ struct mlxsw_sp_rif_ipip_lb *ol_lb; - unsigned int ref_count; /* Number of next hops using the tunnel. */ struct mlxsw_sp_fib_entry *decap_fib_entry; struct list_head ipip_list_node; }; diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c index 082cf00eaadb..3330120f2f8e 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c @@ -1002,9 +1002,8 @@ err_ol_ipip_lb_create: } static void -mlxsw_sp_ipip_entry_destroy(struct mlxsw_sp_ipip_entry *ipip_entry) +mlxsw_sp_ipip_entry_dealloc(struct mlxsw_sp_ipip_entry *ipip_entry) { - WARN_ON(ipip_entry->ref_count > 0); mlxsw_sp_rif_destroy(&ipip_entry->ol_lb->common); kfree(ipip_entry); } @@ -1200,9 +1199,9 @@ mlxsw_sp_ipip_entry_find_decap(struct mlxsw_sp *mlxsw_sp, } static struct mlxsw_sp_ipip_entry * -mlxsw_sp_ipip_entry_get(struct mlxsw_sp *mlxsw_sp, - enum mlxsw_sp_ipip_type ipipt, - struct net_device *ol_dev) +mlxsw_sp_ipip_entry_create(struct mlxsw_sp *mlxsw_sp, + enum mlxsw_sp_ipip_type ipipt, + struct net_device *ol_dev) { u32 ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(ol_dev); struct mlxsw_sp_router *router = mlxsw_sp->router; @@ -1210,15 +1209,12 @@ mlxsw_sp_ipip_entry_get(struct mlxsw_sp *mlxsw_sp, enum mlxsw_sp_l3proto ul_proto; union mlxsw_sp_l3addr saddr; + /* The configuration where several tunnels have the same local address + * in the same underlay table needs special treatment in the HW. That is + * currently not implemented in the driver. + */ list_for_each_entry(ipip_entry, &mlxsw_sp->router->ipip_list, ipip_list_node) { - if (ipip_entry->ol_dev == ol_dev) - goto inc_ref_count; - - /* The configuration where several tunnels have the same local - * address in the same underlay table needs special treatment in - * the HW. That is currently not implemented in the driver. - */ ul_proto = router->ipip_ops_arr[ipip_entry->ipipt]->ul_proto; saddr = mlxsw_sp_ipip_netdev_saddr(ul_proto, ol_dev); if (mlxsw_sp_ipip_entry_saddr_matches(mlxsw_sp, ul_proto, saddr, @@ -1233,19 +1229,15 @@ mlxsw_sp_ipip_entry_get(struct mlxsw_sp *mlxsw_sp, list_add_tail(&ipip_entry->ipip_list_node, &mlxsw_sp->router->ipip_list); -inc_ref_count: - ++ipip_entry->ref_count; return ipip_entry; } static void -mlxsw_sp_ipip_entry_put(struct mlxsw_sp *mlxsw_sp, - struct mlxsw_sp_ipip_entry *ipip_entry) +mlxsw_sp_ipip_entry_destroy(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_ipip_entry *ipip_entry) { - if (--ipip_entry->ref_count == 0) { - list_del(&ipip_entry->ipip_list_node); - mlxsw_sp_ipip_entry_destroy(ipip_entry); - } + list_del(&ipip_entry->ipip_list_node); + mlxsw_sp_ipip_entry_dealloc(ipip_entry); } static bool @@ -1338,7 +1330,8 @@ static int mlxsw_sp_netdevice_ipip_reg_event(struct mlxsw_sp *mlxsw_sp, MLXSW_SP_L3_PROTO_IPV4) || router->ipip_ops_arr[ipipt]->can_offload(mlxsw_sp, ol_dev, MLXSW_SP_L3_PROTO_IPV6)) { - ipip_entry = mlxsw_sp_ipip_entry_get(mlxsw_sp, ipipt, ol_dev); + ipip_entry = mlxsw_sp_ipip_entry_create(mlxsw_sp, ipipt, + ol_dev); if (IS_ERR(ipip_entry)) return PTR_ERR(ipip_entry); } @@ -1353,7 +1346,7 @@ static void mlxsw_sp_netdevice_ipip_unreg_event(struct mlxsw_sp *mlxsw_sp, ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev); if (ipip_entry) - mlxsw_sp_ipip_entry_put(mlxsw_sp, ipip_entry); + mlxsw_sp_ipip_entry_destroy(mlxsw_sp, ipip_entry); } static int mlxsw_sp_netdevice_ipip_up_event(struct mlxsw_sp *mlxsw_sp, @@ -2939,16 +2932,15 @@ static void mlxsw_sp_nexthop_neigh_fini(struct mlxsw_sp *mlxsw_sp, } static int mlxsw_sp_nexthop_ipip_init(struct mlxsw_sp *mlxsw_sp, - enum mlxsw_sp_ipip_type ipipt, struct mlxsw_sp_nexthop *nh, struct net_device *ol_dev) { if (!nh->nh_grp->gateway || nh->ipip_entry) return 0; - nh->ipip_entry = mlxsw_sp_ipip_entry_get(mlxsw_sp, ipipt, ol_dev); - if (IS_ERR(nh->ipip_entry)) - return PTR_ERR(nh->ipip_entry); + nh->ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev); + if (!nh->ipip_entry) + return -ENOENT; __mlxsw_sp_nexthop_neigh_update(nh, false); return 0; @@ -2963,7 +2955,6 @@ static void mlxsw_sp_nexthop_ipip_fini(struct mlxsw_sp *mlxsw_sp, return; __mlxsw_sp_nexthop_neigh_update(nh, true); - mlxsw_sp_ipip_entry_put(mlxsw_sp, ipip_entry); nh->ipip_entry = NULL; } @@ -3007,7 +2998,7 @@ static int mlxsw_sp_nexthop4_type_init(struct mlxsw_sp *mlxsw_sp, router->ipip_ops_arr[ipipt]->can_offload(mlxsw_sp, dev, MLXSW_SP_L3_PROTO_IPV4)) { nh->type = MLXSW_SP_NEXTHOP_TYPE_IPIP; - err = mlxsw_sp_nexthop_ipip_init(mlxsw_sp, ipipt, nh, dev); + err = mlxsw_sp_nexthop_ipip_init(mlxsw_sp, nh, dev); if (err) return err; mlxsw_sp_nexthop_rif_init(nh, &nh->ipip_entry->ol_lb->common); @@ -4269,7 +4260,7 @@ static int mlxsw_sp_nexthop6_type_init(struct mlxsw_sp *mlxsw_sp, router->ipip_ops_arr[ipipt]->can_offload(mlxsw_sp, dev, MLXSW_SP_L3_PROTO_IPV6)) { nh->type = MLXSW_SP_NEXTHOP_TYPE_IPIP; - err = mlxsw_sp_nexthop_ipip_init(mlxsw_sp, ipipt, nh, dev); + err = mlxsw_sp_nexthop_ipip_init(mlxsw_sp, nh, dev); if (err) return err; mlxsw_sp_nexthop_rif_init(nh, &nh->ipip_entry->ol_lb->common); -- cgit v1.2.3 From 1c142e1c639bcbcb5b5db210d8fa4d2ecef6037e Mon Sep 17 00:00:00 2001 From: Alan Brady Date: Thu, 5 Oct 2017 14:53:31 -0700 Subject: i40e: rename 'cmd' variables in ethtool interface After the switch to the new ethtool API, ethtool passes us ethtool_ksettings structs instead of ethtool_command structs, however we were still referring to them as 'cmd' variables. This renames them to 'ks' variables which makes the code easier to understand. Signed-off-by: Alan Brady Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_ethtool.c | 145 +++++++++++++------------ 1 file changed, 74 insertions(+), 71 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c index 72d5f2cdf419..06514a76ff91 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c @@ -378,12 +378,12 @@ static void i40e_phy_type_to_ethtool(struct i40e_pf *pf, u32 *supported, /** * i40e_get_settings_link_up - Get the Link settings for when link is up * @hw: hw structure - * @ecmd: ethtool command to fill in + * @ks: ethtool ksettings to fill in * @netdev: network interface device structure - * + * @pf: pointer to physical function struct **/ static void i40e_get_settings_link_up(struct i40e_hw *hw, - struct ethtool_link_ksettings *cmd, + struct ethtool_link_ksettings *ks, struct net_device *netdev, struct i40e_pf *pf) { @@ -394,9 +394,9 @@ static void i40e_get_settings_link_up(struct i40e_hw *hw, u32 supported, advertising; ethtool_convert_link_mode_to_legacy_u32(&supported, - cmd->link_modes.supported); + ks->link_modes.supported); ethtool_convert_link_mode_to_legacy_u32(&advertising, - cmd->link_modes.advertising); + ks->link_modes.advertising); /* Initialize supported and advertised settings based on phy settings */ switch (hw_link_info->phy_type) { @@ -528,48 +528,49 @@ static void i40e_get_settings_link_up(struct i40e_hw *hw, /* Set speed and duplex */ switch (link_speed) { case I40E_LINK_SPEED_40GB: - cmd->base.speed = SPEED_40000; + ks->base.speed = SPEED_40000; break; case I40E_LINK_SPEED_25GB: #ifdef SPEED_25000 - cmd->base.speed = SPEED_25000; + ks->base.speed = SPEED_25000; #else netdev_info(netdev, "Speed is 25G, display not supported by this version of ethtool.\n"); #endif break; case I40E_LINK_SPEED_20GB: - cmd->base.speed = SPEED_20000; + ks->base.speed = SPEED_20000; break; case I40E_LINK_SPEED_10GB: - cmd->base.speed = SPEED_10000; + ks->base.speed = SPEED_10000; break; case I40E_LINK_SPEED_1GB: - cmd->base.speed = SPEED_1000; + ks->base.speed = SPEED_1000; break; case I40E_LINK_SPEED_100MB: - cmd->base.speed = SPEED_100; + ks->base.speed = SPEED_100; break; default: break; } - cmd->base.duplex = DUPLEX_FULL; + ks->base.duplex = DUPLEX_FULL; - ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported, + ethtool_convert_legacy_u32_to_link_mode(ks->link_modes.supported, supported); - ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising, + ethtool_convert_legacy_u32_to_link_mode(ks->link_modes.advertising, advertising); } /** * i40e_get_settings_link_down - Get the Link settings for when link is down * @hw: hw structure - * @ecmd: ethtool command to fill in + * @ks: ethtool ksettings to fill in + * @pf: pointer to physical function struct * * Reports link settings that can be determined when link is down **/ static void i40e_get_settings_link_down(struct i40e_hw *hw, - struct ethtool_link_ksettings *cmd, + struct ethtool_link_ksettings *ks, struct i40e_pf *pf) { u32 supported, advertising; @@ -579,25 +580,25 @@ static void i40e_get_settings_link_down(struct i40e_hw *hw, */ i40e_phy_type_to_ethtool(pf, &supported, &advertising); - ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported, + ethtool_convert_legacy_u32_to_link_mode(ks->link_modes.supported, supported); - ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising, + ethtool_convert_legacy_u32_to_link_mode(ks->link_modes.advertising, advertising); /* With no link speed and duplex are unknown */ - cmd->base.speed = SPEED_UNKNOWN; - cmd->base.duplex = DUPLEX_UNKNOWN; + ks->base.speed = SPEED_UNKNOWN; + ks->base.duplex = DUPLEX_UNKNOWN; } /** - * i40e_get_settings - Get Link Speed and Duplex settings + * i40e_get_link_ksettings - Get Link Speed and Duplex settings * @netdev: network interface device structure - * @ecmd: ethtool command + * @ks: ethtool ksettings * * Reports speed/duplex settings based on media_type **/ static int i40e_get_link_ksettings(struct net_device *netdev, - struct ethtool_link_ksettings *cmd) + struct ethtool_link_ksettings *ks) { struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_pf *pf = np->vsi->back; @@ -607,74 +608,74 @@ static int i40e_get_link_ksettings(struct net_device *netdev, u32 advertising; if (link_up) - i40e_get_settings_link_up(hw, cmd, netdev, pf); + i40e_get_settings_link_up(hw, ks, netdev, pf); else - i40e_get_settings_link_down(hw, cmd, pf); + i40e_get_settings_link_down(hw, ks, pf); /* Now set the settings that don't rely on link being up/down */ /* Set autoneg settings */ - cmd->base.autoneg = ((hw_link_info->an_info & I40E_AQ_AN_COMPLETED) ? - AUTONEG_ENABLE : AUTONEG_DISABLE); + ks->base.autoneg = ((hw_link_info->an_info & I40E_AQ_AN_COMPLETED) ? + AUTONEG_ENABLE : AUTONEG_DISABLE); switch (hw->phy.media_type) { case I40E_MEDIA_TYPE_BACKPLANE: - ethtool_link_ksettings_add_link_mode(cmd, supported, + ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg); - ethtool_link_ksettings_add_link_mode(cmd, supported, + ethtool_link_ksettings_add_link_mode(ks, supported, Backplane); - ethtool_link_ksettings_add_link_mode(cmd, advertising, + ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg); - ethtool_link_ksettings_add_link_mode(cmd, advertising, + ethtool_link_ksettings_add_link_mode(ks, advertising, Backplane); - cmd->base.port = PORT_NONE; + ks->base.port = PORT_NONE; break; case I40E_MEDIA_TYPE_BASET: - ethtool_link_ksettings_add_link_mode(cmd, supported, TP); - ethtool_link_ksettings_add_link_mode(cmd, advertising, TP); - cmd->base.port = PORT_TP; + ethtool_link_ksettings_add_link_mode(ks, supported, TP); + ethtool_link_ksettings_add_link_mode(ks, advertising, TP); + ks->base.port = PORT_TP; break; case I40E_MEDIA_TYPE_DA: case I40E_MEDIA_TYPE_CX4: - ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE); - ethtool_link_ksettings_add_link_mode(cmd, advertising, FIBRE); - cmd->base.port = PORT_DA; + ethtool_link_ksettings_add_link_mode(ks, supported, FIBRE); + ethtool_link_ksettings_add_link_mode(ks, advertising, FIBRE); + ks->base.port = PORT_DA; break; case I40E_MEDIA_TYPE_FIBER: - ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE); - cmd->base.port = PORT_FIBRE; + ethtool_link_ksettings_add_link_mode(ks, supported, FIBRE); + ks->base.port = PORT_FIBRE; break; case I40E_MEDIA_TYPE_UNKNOWN: default: - cmd->base.port = PORT_OTHER; + ks->base.port = PORT_OTHER; break; } /* Set flow control settings */ - ethtool_link_ksettings_add_link_mode(cmd, supported, Pause); + ethtool_link_ksettings_add_link_mode(ks, supported, Pause); switch (hw->fc.requested_mode) { case I40E_FC_FULL: - ethtool_link_ksettings_add_link_mode(cmd, advertising, + ethtool_link_ksettings_add_link_mode(ks, advertising, Pause); break; case I40E_FC_TX_PAUSE: - ethtool_link_ksettings_add_link_mode(cmd, advertising, + ethtool_link_ksettings_add_link_mode(ks, advertising, Asym_Pause); break; case I40E_FC_RX_PAUSE: - ethtool_link_ksettings_add_link_mode(cmd, advertising, + ethtool_link_ksettings_add_link_mode(ks, advertising, Pause); - ethtool_link_ksettings_add_link_mode(cmd, advertising, + ethtool_link_ksettings_add_link_mode(ks, advertising, Asym_Pause); break; default: ethtool_convert_link_mode_to_legacy_u32( - &advertising, cmd->link_modes.advertising); + &advertising, ks->link_modes.advertising); advertising &= ~(ADVERTISED_Pause | ADVERTISED_Asym_Pause); ethtool_convert_legacy_u32_to_link_mode( - cmd->link_modes.advertising, advertising); + ks->link_modes.advertising, advertising); break; } @@ -682,14 +683,14 @@ static int i40e_get_link_ksettings(struct net_device *netdev, } /** - * i40e_set_settings - Set Speed and Duplex + * i40e_set_link_ksettings - Set Speed and Duplex * @netdev: network interface device structure - * @ecmd: ethtool command + * @ks: ethtool ksettings * * Set speed/duplex per media_types advertised/forced **/ static int i40e_set_link_ksettings(struct net_device *netdev, - const struct ethtool_link_ksettings *cmd) + const struct ethtool_link_ksettings *ks) { struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_aq_get_phy_abilities_resp abilities; @@ -697,8 +698,8 @@ static int i40e_set_link_ksettings(struct net_device *netdev, struct i40e_pf *pf = np->vsi->back; struct i40e_vsi *vsi = np->vsi; struct i40e_hw *hw = &pf->hw; - struct ethtool_link_ksettings safe_cmd; - struct ethtool_link_ksettings copy_cmd; + struct ethtool_link_ksettings safe_ks; + struct ethtool_link_ksettings copy_ks; i40e_status status = 0; bool change = false; int timeout = 50; @@ -733,31 +734,31 @@ static int i40e_set_link_ksettings(struct net_device *netdev, return -EOPNOTSUPP; } - /* copy the cmd to copy_cmd to avoid modifying the origin */ - memcpy(©_cmd, cmd, sizeof(struct ethtool_link_ksettings)); + /* copy the ksettings to copy_ks to avoid modifying the origin */ + memcpy(©_ks, ks, sizeof(struct ethtool_link_ksettings)); /* get our own copy of the bits to check against */ - memset(&safe_cmd, 0, sizeof(struct ethtool_link_ksettings)); - i40e_get_link_ksettings(netdev, &safe_cmd); + memset(&safe_ks, 0, sizeof(struct ethtool_link_ksettings)); + i40e_get_link_ksettings(netdev, &safe_ks); - /* save autoneg and speed out of cmd */ - autoneg = cmd->base.autoneg; + /* save autoneg and speed out of ksettings */ + autoneg = ks->base.autoneg; ethtool_convert_link_mode_to_legacy_u32(&advertise, - cmd->link_modes.advertising); + ks->link_modes.advertising); /* set autoneg and speed back to what they currently are */ - copy_cmd.base.autoneg = safe_cmd.base.autoneg; + copy_ks.base.autoneg = safe_ks.base.autoneg; ethtool_convert_link_mode_to_legacy_u32( - &tmp, safe_cmd.link_modes.advertising); + &tmp, safe_ks.link_modes.advertising); ethtool_convert_legacy_u32_to_link_mode( - copy_cmd.link_modes.advertising, tmp); + copy_ks.link_modes.advertising, tmp); - copy_cmd.base.cmd = safe_cmd.base.cmd; + copy_ks.base.cmd = safe_ks.base.cmd; - /* If copy_cmd and safe_cmd are not the same now, then they are + /* If copy_ks and safe_ks are not the same now, then they are * trying to set something that we do not support */ - if (memcmp(©_cmd, &safe_cmd, sizeof(struct ethtool_link_ksettings))) + if (memcmp(©_ks, &safe_ks, sizeof(struct ethtool_link_ksettings))) return -EOPNOTSUPP; while (test_and_set_bit(__I40E_CONFIG_BUSY, pf->state)) { @@ -786,8 +787,9 @@ static int i40e_set_link_ksettings(struct net_device *netdev, /* If autoneg was not already enabled */ if (!(hw->phy.link_info.an_info & I40E_AQ_AN_COMPLETED)) { /* If autoneg is not supported, return error */ - if (!ethtool_link_ksettings_test_link_mode( - &safe_cmd, supported, Autoneg)) { + if (!ethtool_link_ksettings_test_link_mode(&safe_ks, + supported, + Autoneg)) { netdev_info(netdev, "Autoneg not supported on this phy\n"); err = -EINVAL; goto done; @@ -803,8 +805,9 @@ static int i40e_set_link_ksettings(struct net_device *netdev, /* If autoneg is supported 10GBASE_T is the only PHY * that can disable it, so otherwise return error */ - if (ethtool_link_ksettings_test_link_mode( - &safe_cmd, supported, Autoneg) && + if (ethtool_link_ksettings_test_link_mode(&safe_ks, + supported, + Autoneg) && hw->phy.link_info.phy_type != I40E_PHY_TYPE_10GBASE_T) { netdev_info(netdev, "Autoneg cannot be disabled on this phy\n"); @@ -819,7 +822,7 @@ static int i40e_set_link_ksettings(struct net_device *netdev, } ethtool_convert_link_mode_to_legacy_u32(&tmp, - safe_cmd.link_modes.supported); + safe_ks.link_modes.supported); if (advertise & ~tmp) { err = -EINVAL; goto done; -- cgit v1.2.3 From c6faca730dc01391e08011945fab5e67b09cdd05 Mon Sep 17 00:00:00 2001 From: Alan Brady Date: Thu, 5 Oct 2017 14:53:32 -0700 Subject: i40e: remove ifdef SPEED_25000 This 'ifdef' doesn't accomplish anything so remove it. Signed-off-by: Alan Brady Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_ethtool.c | 5 ----- 1 file changed, 5 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c index 06514a76ff91..c250116e5e22 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c @@ -531,12 +531,7 @@ static void i40e_get_settings_link_up(struct i40e_hw *hw, ks->base.speed = SPEED_40000; break; case I40E_LINK_SPEED_25GB: -#ifdef SPEED_25000 ks->base.speed = SPEED_25000; -#else - netdev_info(netdev, - "Speed is 25G, display not supported by this version of ethtool.\n"); -#endif break; case I40E_LINK_SPEED_20GB: ks->base.speed = SPEED_20000; -- cgit v1.2.3 From 21675bdc214b34d2ce4e30396e9ff36f0e61ae93 Mon Sep 17 00:00:00 2001 From: Alan Brady Date: Thu, 5 Oct 2017 14:53:33 -0700 Subject: i40e: add function header for i40e_get_rxfh Someone left this poor little function naked with no header. This dresses it up in a proper function header it deserves. Signed-off-by: Alan Brady Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_ethtool.c | 10 ++++++++++ 1 file changed, 10 insertions(+) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c index c250116e5e22..f4a70ef3f2e0 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c @@ -3968,6 +3968,16 @@ static u32 i40e_get_rxfh_indir_size(struct net_device *netdev) return I40E_HLUT_ARRAY_SIZE; } +/** + * i40e_get_rxfh - get the rx flow hash indirection table + * @netdev: network interface device structure + * @indir: indirection table + * @key: hash key + * @hfunc: hash function + * + * Reads the indirection table directly from the hardware. Returns 0 on + * success. + **/ static int i40e_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, u8 *hfunc) { -- cgit v1.2.3 From 5f434994ba94f7f02c1f47a9dd13204d1fbc9686 Mon Sep 17 00:00:00 2001 From: Alan Brady Date: Thu, 5 Oct 2017 14:53:34 -0700 Subject: i40e: fix clearing link masks in i40e_get_link_ksettings This fixes two issues in i40e_get_link_ksettings. It adds calls to ethtool_link_ksettings_zero_link_mode to make sure advertising and supported link masks are cleared before we start setting bits in them. This also replaces some funky bit manipulations with a much nicer call to ethtool_link_ksettings_del_link_mode when removing link modes. Signed-off-by: Alan Brady Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_ethtool.c | 14 ++++++-------- 1 file changed, 6 insertions(+), 8 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c index f4a70ef3f2e0..fe0b2327de5b 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c @@ -600,7 +600,9 @@ static int i40e_get_link_ksettings(struct net_device *netdev, struct i40e_hw *hw = &pf->hw; struct i40e_link_status *hw_link_info = &hw->phy.link_info; bool link_up = hw_link_info->link_info & I40E_AQ_LINK_UP; - u32 advertising; + + ethtool_link_ksettings_zero_link_mode(ks, supported); + ethtool_link_ksettings_zero_link_mode(ks, advertising); if (link_up) i40e_get_settings_link_up(hw, ks, netdev, pf); @@ -664,13 +666,9 @@ static int i40e_get_link_ksettings(struct net_device *netdev, Asym_Pause); break; default: - ethtool_convert_link_mode_to_legacy_u32( - &advertising, ks->link_modes.advertising); - - advertising &= ~(ADVERTISED_Pause | ADVERTISED_Asym_Pause); - - ethtool_convert_legacy_u32_to_link_mode( - ks->link_modes.advertising, advertising); + ethtool_link_ksettings_del_link_mode(ks, advertising, Pause); + ethtool_link_ksettings_del_link_mode(ks, advertising, + Asym_Pause); break; } -- cgit v1.2.3 From 52e2d02e42e9a9ac299b61a1b1acbac06fe7949d Mon Sep 17 00:00:00 2001 From: Alan Brady Date: Thu, 5 Oct 2017 14:53:35 -0700 Subject: i40e: fix i40e_phy_type_to_ethtool function header The function header erroneously listed 'phy_types' as a parameter. The correct parameter is 'pf'. Signed-off-by: Alan Brady Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_ethtool.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c index fe0b2327de5b..a137675c1426 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c @@ -253,7 +253,7 @@ static void i40e_partition_setting_complaint(struct i40e_pf *pf) /** * i40e_phy_type_to_ethtool - convert the phy_types to ethtool link modes - * @phy_types: PHY types to convert + * @pf: PF struct with phy_types * @supported: pointer to the ethtool supported variable to fill in * @advertising: pointer to the ethtool advertising variable to fill in * -- cgit v1.2.3 From 91a5c44722c077e30c5ee2b22c5a460d9694ea1d Mon Sep 17 00:00:00 2001 From: Alan Brady Date: Thu, 5 Oct 2017 14:53:36 -0700 Subject: i40e: fix comment typo Someone forgot a word in this comment and it's confusing without it. Signed-off-by: Alan Brady Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_ethtool.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c index a137675c1426..e40fb559dacb 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c @@ -516,8 +516,8 @@ static void i40e_get_settings_link_up(struct i40e_hw *hw, } /* Now that we've worked out everything that could be supported by the - * current PHY type, get what is supported by the NVM and them to - * get what is truly supported + * current PHY type, get what is supported by the NVM and intersect + * them to get what is truly supported */ i40e_phy_type_to_ethtool(pf, &e_supported, &e_advertising); -- cgit v1.2.3 From a03af69f5c2813e3c76630d043716a3b685c9d30 Mon Sep 17 00:00:00 2001 From: Alan Brady Date: Thu, 5 Oct 2017 14:53:37 -0700 Subject: i40e: fix whitespace issues in i40e_ethtool.c There's a number of minor incidental whitespace issues in this file. This addresses most of the ones I could find. Signed-off-by: Alan Brady Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_ethtool.c | 44 +++++++++++--------------- 1 file changed, 18 insertions(+), 26 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c index e40fb559dacb..89ab398a7d30 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c @@ -511,7 +511,8 @@ static void i40e_get_settings_link_up(struct i40e_hw *hw, break; default: /* if we got here and link is up something bad is afoot */ - netdev_info(netdev, "WARNING: Link is up but PHY type 0x%x is not recognized.\n", + netdev_info(netdev, + "WARNING: Link is up but PHY type 0x%x is not recognized.\n", hw_link_info->phy_type); } @@ -614,14 +615,12 @@ static int i40e_get_link_ksettings(struct net_device *netdev, ks->base.autoneg = ((hw_link_info->an_info & I40E_AQ_AN_COMPLETED) ? AUTONEG_ENABLE : AUTONEG_DISABLE); + /* Set media type settings */ switch (hw->phy.media_type) { case I40E_MEDIA_TYPE_BACKPLANE: - ethtool_link_ksettings_add_link_mode(ks, supported, - Autoneg); - ethtool_link_ksettings_add_link_mode(ks, supported, - Backplane); - ethtool_link_ksettings_add_link_mode(ks, advertising, - Autoneg); + ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg); + ethtool_link_ksettings_add_link_mode(ks, supported, Backplane); + ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg); ethtool_link_ksettings_add_link_mode(ks, advertising, Backplane); ks->base.port = PORT_NONE; @@ -652,16 +651,14 @@ static int i40e_get_link_ksettings(struct net_device *netdev, switch (hw->fc.requested_mode) { case I40E_FC_FULL: - ethtool_link_ksettings_add_link_mode(ks, advertising, - Pause); + ethtool_link_ksettings_add_link_mode(ks, advertising, Pause); break; case I40E_FC_TX_PAUSE: ethtool_link_ksettings_add_link_mode(ks, advertising, Asym_Pause); break; case I40E_FC_RX_PAUSE: - ethtool_link_ksettings_add_link_mode(ks, advertising, - Pause); + ethtool_link_ksettings_add_link_mode(ks, advertising, Pause); ethtool_link_ksettings_add_link_mode(ks, advertising, Asym_Pause); break; @@ -708,17 +705,14 @@ static int i40e_set_link_ksettings(struct net_device *netdev, i40e_partition_setting_complaint(pf); return -EOPNOTSUPP; } - if (vsi != pf->vsi[pf->lan_vsi]) return -EOPNOTSUPP; - if (hw->phy.media_type != I40E_MEDIA_TYPE_BASET && hw->phy.media_type != I40E_MEDIA_TYPE_FIBER && hw->phy.media_type != I40E_MEDIA_TYPE_BACKPLANE && hw->phy.media_type != I40E_MEDIA_TYPE_DA && hw->phy.link_info.link_info & I40E_AQ_LINK_UP) return -EOPNOTSUPP; - if (hw->device_id == I40E_DEV_ID_KX_B || hw->device_id == I40E_DEV_ID_KX_C || hw->device_id == I40E_DEV_ID_20G_KR2 || @@ -844,7 +838,6 @@ static int i40e_set_link_ksettings(struct net_device *netdev, */ if (!config.link_speed) config.link_speed = abilities.link_speed; - if (change || (abilities.link_speed != config.link_speed)) { /* copy over the rest of the abilities */ config.phy_type = abilities.phy_type; @@ -872,7 +865,8 @@ static int i40e_set_link_ksettings(struct net_device *netdev, /* make the aq call */ status = i40e_aq_set_phy_config(hw, &config, NULL); if (status) { - netdev_info(netdev, "Set phy config failed, err %s aq_err %s\n", + netdev_info(netdev, + "Set phy config failed, err %s aq_err %s\n", i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status)); err = -EAGAIN; @@ -881,7 +875,8 @@ static int i40e_set_link_ksettings(struct net_device *netdev, status = i40e_update_link_info(hw); if (status) - netdev_dbg(netdev, "Updating link info failed with err %s aq_err %s\n", + netdev_dbg(netdev, + "Updating link info failed with err %s aq_err %s\n", i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status)); @@ -2072,14 +2067,13 @@ static int __i40e_get_coalesce(struct net_device *netdev, ec->tx_max_coalesced_frames_irq = vsi->work_limit; ec->rx_max_coalesced_frames_irq = vsi->work_limit; - /* rx and tx usecs has per queue value. If user doesn't specify the queue, - * return queue 0's value to represent. + /* rx and tx usecs has per queue value. If user doesn't specify the + * queue, return queue 0's value to represent. */ - if (queue < 0) { + if (queue < 0) queue = 0; - } else if (queue >= vsi->num_queue_pairs) { + else if (queue >= vsi->num_queue_pairs) return -EINVAL; - } rx_ring = vsi->rx_rings[queue]; tx_ring = vsi->tx_rings[queue]; @@ -2093,7 +2087,6 @@ static int __i40e_get_coalesce(struct net_device *netdev, ec->rx_coalesce_usecs = rx_ring->rx_itr_setting & ~I40E_ITR_DYNAMIC; ec->tx_coalesce_usecs = tx_ring->tx_itr_setting & ~I40E_ITR_DYNAMIC; - /* we use the _usecs_high to store/set the interrupt rate limit * that the hardware supports, that almost but not quite * fits the original intent of the ethtool variable, @@ -2143,7 +2136,6 @@ static int i40e_get_per_queue_coalesce(struct net_device *netdev, u32 queue, * * Change the ITR settings for a specific queue. **/ - static void i40e_set_itr_per_queue(struct i40e_vsi *vsi, struct ethtool_coalesce *ec, int queue) @@ -2265,8 +2257,8 @@ static int __i40e_set_coalesce(struct net_device *netdev, vsi->int_rate_limit); } - /* rx and tx usecs has per queue value. If user doesn't specify the queue, - * apply to all queues. + /* rx and tx usecs has per queue value. If user doesn't specify the + * queue, apply to all queues. */ if (queue < 0) { for (i = 0; i < vsi->num_queue_pairs; i++) -- cgit v1.2.3 From 6987bd25e2be49ec0c25c6c15ba2bcb6327f9ed4 Mon Sep 17 00:00:00 2001 From: Alan Brady Date: Thu, 5 Oct 2017 14:53:38 -0700 Subject: i40e: group autoneg PHY types together This separates the setting of autoneg in i40e_phy_types_to_ethtool into its own conditional. Doing this adds clarity as what PHYs support/advertise autoneg and makes it easier to add new PHY types in the future. This also fixes an issue on devices with CRT_RETIMER where advertising autoneg was being set, but supported autoneg was not. Signed-off-by: Alan Brady Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_ethtool.c | 85 +++++++++++++------------- 1 file changed, 41 insertions(+), 44 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c index 89ab398a7d30..30deae77e745 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c @@ -268,9 +268,7 @@ static void i40e_phy_type_to_ethtool(struct i40e_pf *pf, u32 *supported, *advertising = 0x0; if (phy_types & I40E_CAP_PHY_TYPE_SGMII) { - *supported |= SUPPORTED_Autoneg | - SUPPORTED_1000baseT_Full; - *advertising |= ADVERTISED_Autoneg; + *supported |= SUPPORTED_1000baseT_Full; if (hw_link_info->requested_speeds & I40E_LINK_SPEED_1GB) *advertising |= ADVERTISED_1000baseT_Full; if (pf->hw_features & I40E_HW_100M_SGMII_CAPABLE) { @@ -289,9 +287,7 @@ static void i40e_phy_type_to_ethtool(struct i40e_pf *pf, u32 *supported, phy_types & I40E_CAP_PHY_TYPE_10GBASE_T || phy_types & I40E_CAP_PHY_TYPE_10GBASE_SR || phy_types & I40E_CAP_PHY_TYPE_10GBASE_LR) { - *supported |= SUPPORTED_Autoneg | - SUPPORTED_10000baseT_Full; - *advertising |= ADVERTISED_Autoneg; + *supported |= SUPPORTED_10000baseT_Full; if (hw_link_info->requested_speeds & I40E_LINK_SPEED_10GB) *advertising |= ADVERTISED_10000baseT_Full; } @@ -301,16 +297,12 @@ static void i40e_phy_type_to_ethtool(struct i40e_pf *pf, u32 *supported, *supported |= SUPPORTED_40000baseCR4_Full; if (phy_types & I40E_CAP_PHY_TYPE_40GBASE_CR4_CU || phy_types & I40E_CAP_PHY_TYPE_40GBASE_CR4) { - *supported |= SUPPORTED_Autoneg | - SUPPORTED_40000baseCR4_Full; - *advertising |= ADVERTISED_Autoneg; + *supported |= SUPPORTED_40000baseCR4_Full; if (hw_link_info->requested_speeds & I40E_LINK_SPEED_40GB) *advertising |= ADVERTISED_40000baseCR4_Full; } if (phy_types & I40E_CAP_PHY_TYPE_100BASE_TX) { - *supported |= SUPPORTED_Autoneg | - SUPPORTED_100baseT_Full; - *advertising |= ADVERTISED_Autoneg; + *supported |= SUPPORTED_100baseT_Full; if (hw_link_info->requested_speeds & I40E_LINK_SPEED_100MB) *advertising |= ADVERTISED_100baseT_Full; } @@ -318,9 +310,7 @@ static void i40e_phy_type_to_ethtool(struct i40e_pf *pf, u32 *supported, phy_types & I40E_CAP_PHY_TYPE_1000BASE_SX || phy_types & I40E_CAP_PHY_TYPE_1000BASE_LX || phy_types & I40E_CAP_PHY_TYPE_1000BASE_T_OPTICAL) { - *supported |= SUPPORTED_Autoneg | - SUPPORTED_1000baseT_Full; - *advertising |= ADVERTISED_Autoneg; + *supported |= SUPPORTED_1000baseT_Full; if (hw_link_info->requested_speeds & I40E_LINK_SPEED_1GB) *advertising |= ADVERTISED_1000baseT_Full; } @@ -329,47 +319,54 @@ static void i40e_phy_type_to_ethtool(struct i40e_pf *pf, u32 *supported, if (phy_types & I40E_CAP_PHY_TYPE_40GBASE_LR4) *supported |= SUPPORTED_40000baseLR4_Full; if (phy_types & I40E_CAP_PHY_TYPE_40GBASE_KR4) { - *supported |= SUPPORTED_40000baseKR4_Full | - SUPPORTED_Autoneg; - *advertising |= ADVERTISED_40000baseKR4_Full | - ADVERTISED_Autoneg; + *supported |= SUPPORTED_40000baseKR4_Full; + *advertising |= ADVERTISED_40000baseKR4_Full; } if (phy_types & I40E_CAP_PHY_TYPE_20GBASE_KR2) { - *supported |= SUPPORTED_20000baseKR2_Full | - SUPPORTED_Autoneg; - *advertising |= ADVERTISED_Autoneg; + *supported |= SUPPORTED_20000baseKR2_Full; if (hw_link_info->requested_speeds & I40E_LINK_SPEED_20GB) *advertising |= ADVERTISED_20000baseKR2_Full; } - if (phy_types & I40E_CAP_PHY_TYPE_10GBASE_KR) { - if (!(pf->hw_features & I40E_HW_HAVE_CRT_RETIMER)) - *supported |= SUPPORTED_10000baseKR_Full | - SUPPORTED_Autoneg; - *advertising |= ADVERTISED_Autoneg; - if (hw_link_info->requested_speeds & I40E_LINK_SPEED_10GB) - if (!(pf->hw_features & I40E_HW_HAVE_CRT_RETIMER)) - *advertising |= ADVERTISED_10000baseKR_Full; - } if (phy_types & I40E_CAP_PHY_TYPE_10GBASE_KX4) { - *supported |= SUPPORTED_10000baseKX4_Full | - SUPPORTED_Autoneg; - *advertising |= ADVERTISED_Autoneg; + *supported |= SUPPORTED_10000baseKX4_Full; if (hw_link_info->requested_speeds & I40E_LINK_SPEED_10GB) *advertising |= ADVERTISED_10000baseKX4_Full; } - if (phy_types & I40E_CAP_PHY_TYPE_1000BASE_KX) { - if (!(pf->hw_features & I40E_HW_HAVE_CRT_RETIMER)) - *supported |= SUPPORTED_1000baseKX_Full | - SUPPORTED_Autoneg; - *advertising |= ADVERTISED_Autoneg; + if (phy_types & I40E_CAP_PHY_TYPE_10GBASE_KR && + !(pf->hw_features & I40E_HW_HAVE_CRT_RETIMER)) { + *supported |= SUPPORTED_10000baseKR_Full; + if (hw_link_info->requested_speeds & I40E_LINK_SPEED_10GB) + *advertising |= ADVERTISED_10000baseKR_Full; + } + if (phy_types & I40E_CAP_PHY_TYPE_1000BASE_KX && + !(pf->hw_features & I40E_HW_HAVE_CRT_RETIMER)) { + *supported |= SUPPORTED_1000baseKX_Full; if (hw_link_info->requested_speeds & I40E_LINK_SPEED_1GB) - if (!(pf->hw_features & I40E_HW_HAVE_CRT_RETIMER)) - *advertising |= ADVERTISED_1000baseKX_Full; + *advertising |= ADVERTISED_1000baseKX_Full; } - if (phy_types & I40E_CAP_PHY_TYPE_25GBASE_KR || - phy_types & I40E_CAP_PHY_TYPE_25GBASE_CR || + /* Autoneg PHY types */ + if (phy_types & I40E_CAP_PHY_TYPE_SGMII || + phy_types & I40E_CAP_PHY_TYPE_40GBASE_KR4 || + phy_types & I40E_CAP_PHY_TYPE_40GBASE_CR4_CU || + phy_types & I40E_CAP_PHY_TYPE_40GBASE_CR4 || phy_types & I40E_CAP_PHY_TYPE_25GBASE_SR || - phy_types & I40E_CAP_PHY_TYPE_25GBASE_LR) { + phy_types & I40E_CAP_PHY_TYPE_25GBASE_LR || + phy_types & I40E_CAP_PHY_TYPE_25GBASE_KR || + phy_types & I40E_CAP_PHY_TYPE_25GBASE_CR || + phy_types & I40E_CAP_PHY_TYPE_20GBASE_KR2 || + phy_types & I40E_CAP_PHY_TYPE_10GBASE_T || + phy_types & I40E_CAP_PHY_TYPE_10GBASE_SR || + phy_types & I40E_CAP_PHY_TYPE_10GBASE_LR || + phy_types & I40E_CAP_PHY_TYPE_10GBASE_KX4 || + phy_types & I40E_CAP_PHY_TYPE_10GBASE_KR || + phy_types & I40E_CAP_PHY_TYPE_10GBASE_CR1_CU || + phy_types & I40E_CAP_PHY_TYPE_10GBASE_CR1 || + phy_types & I40E_CAP_PHY_TYPE_1000BASE_T_OPTICAL || + phy_types & I40E_CAP_PHY_TYPE_1000BASE_T || + phy_types & I40E_CAP_PHY_TYPE_1000BASE_SX || + phy_types & I40E_CAP_PHY_TYPE_1000BASE_LX || + phy_types & I40E_CAP_PHY_TYPE_1000BASE_KX || + phy_types & I40E_CAP_PHY_TYPE_100BASE_TX) { *supported |= SUPPORTED_Autoneg; *advertising |= ADVERTISED_Autoneg; } -- cgit v1.2.3 From 211b4c140a9de0a672a8f5c3cbaa3639ef507205 Mon Sep 17 00:00:00 2001 From: Sudheer Mogilappagari Date: Thu, 5 Oct 2017 14:53:39 -0700 Subject: i40e: Add new PHY types for 25G AOC and ACC support This patch adds support for 25G Active Optical Cables (AOC) and Active Copper Cables (ACC) PHY types. Signed-off-by: Sudheer Mogilappagari Signed-off-by: Krzysztof Malek Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h | 4 ++++ drivers/net/ethernet/intel/i40e/i40e_common.c | 2 ++ drivers/net/ethernet/intel/i40e/i40e_ethtool.c | 2 ++ drivers/net/ethernet/intel/i40e/i40e_type.h | 4 ++++ drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h | 4 ++++ 5 files changed, 16 insertions(+) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h index a8f65aed5421..6a5db1b33fa2 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h +++ b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h @@ -1771,6 +1771,8 @@ enum i40e_aq_phy_type { I40E_PHY_TYPE_25GBASE_CR = 0x20, I40E_PHY_TYPE_25GBASE_SR = 0x21, I40E_PHY_TYPE_25GBASE_LR = 0x22, + I40E_PHY_TYPE_25GBASE_AOC = 0x23, + I40E_PHY_TYPE_25GBASE_ACC = 0x24, I40E_PHY_TYPE_MAX, I40E_PHY_TYPE_NOT_SUPPORTED_HIGH_TEMP = 0xFD, I40E_PHY_TYPE_EMPTY = 0xFE, @@ -1831,6 +1833,8 @@ struct i40e_aq_get_phy_abilities_resp { #define I40E_AQ_PHY_TYPE_EXT_25G_CR 0X02 #define I40E_AQ_PHY_TYPE_EXT_25G_SR 0x04 #define I40E_AQ_PHY_TYPE_EXT_25G_LR 0x08 +#define I40E_AQ_PHY_TYPE_EXT_25G_AOC 0x10 +#define I40E_AQ_PHY_TYPE_EXT_25G_ACC 0x20 u8 fec_cfg_curr_mod_ext_info; #define I40E_AQ_ENABLE_FEC_KR 0x01 #define I40E_AQ_ENABLE_FEC_RS 0x02 diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c index aeb497258f20..8d0ee006606b 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_common.c +++ b/drivers/net/ethernet/intel/i40e/i40e_common.c @@ -1180,6 +1180,8 @@ static enum i40e_media_type i40e_get_media_type(struct i40e_hw *hw) case I40E_PHY_TYPE_40GBASE_AOC: case I40E_PHY_TYPE_10GBASE_AOC: case I40E_PHY_TYPE_25GBASE_CR: + case I40E_PHY_TYPE_25GBASE_AOC: + case I40E_PHY_TYPE_25GBASE_ACC: media = I40E_MEDIA_TYPE_DA; break; case I40E_PHY_TYPE_1000BASE_KX: diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c index 30deae77e745..a4210ccdaa5f 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c @@ -502,6 +502,8 @@ static void i40e_get_settings_link_up(struct i40e_hw *hw, case I40E_PHY_TYPE_25GBASE_CR: case I40E_PHY_TYPE_25GBASE_SR: case I40E_PHY_TYPE_25GBASE_LR: + case I40E_PHY_TYPE_25GBASE_AOC: + case I40E_PHY_TYPE_25GBASE_ACC: supported = SUPPORTED_Autoneg; advertising = ADVERTISED_Autoneg; /* TODO: add speeds when ethtool is ready to support*/ diff --git a/drivers/net/ethernet/intel/i40e/i40e_type.h b/drivers/net/ethernet/intel/i40e/i40e_type.h index 0410fcbdbb94..17a99b53acd9 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_type.h +++ b/drivers/net/ethernet/intel/i40e/i40e_type.h @@ -271,6 +271,10 @@ struct i40e_phy_info { I40E_PHY_TYPE_OFFSET) #define I40E_CAP_PHY_TYPE_25GBASE_LR BIT_ULL(I40E_PHY_TYPE_25GBASE_LR + \ I40E_PHY_TYPE_OFFSET) +#define I40E_CAP_PHY_TYPE_25GBASE_AOC BIT_ULL(I40E_PHY_TYPE_25GBASE_AOC + \ + I40E_PHY_TYPE_OFFSET) +#define I40E_CAP_PHY_TYPE_25GBASE_ACC BIT_ULL(I40E_PHY_TYPE_25GBASE_ACC + \ + I40E_PHY_TYPE_OFFSET) #define I40E_HW_CAP_MAX_GPIO 30 /* Capabilities of a PF or a VF or the whole device */ struct i40e_hw_capabilities { diff --git a/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h index 60c892f559b9..463e331a70a9 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h +++ b/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h @@ -1767,6 +1767,8 @@ enum i40e_aq_phy_type { I40E_PHY_TYPE_25GBASE_CR = 0x20, I40E_PHY_TYPE_25GBASE_SR = 0x21, I40E_PHY_TYPE_25GBASE_LR = 0x22, + I40E_PHY_TYPE_25GBASE_AOC = 0x23, + I40E_PHY_TYPE_25GBASE_ACC = 0x24, I40E_PHY_TYPE_MAX, I40E_PHY_TYPE_NOT_SUPPORTED_HIGH_TEMP = 0xFD, I40E_PHY_TYPE_EMPTY = 0xFE, @@ -1827,6 +1829,8 @@ struct i40e_aq_get_phy_abilities_resp { #define I40E_AQ_PHY_TYPE_EXT_25G_CR 0X02 #define I40E_AQ_PHY_TYPE_EXT_25G_SR 0x04 #define I40E_AQ_PHY_TYPE_EXT_25G_LR 0x08 +#define I40E_AQ_PHY_TYPE_EXT_25G_AOC 0x10 +#define I40E_AQ_PHY_TYPE_EXT_25G_ACC 0x20 u8 fec_cfg_curr_mod_ext_info; #define I40E_AQ_ENABLE_FEC_KR 0x01 #define I40E_AQ_ENABLE_FEC_RS 0x02 -- cgit v1.2.3 From 1eaae5198e0db1f523fa3432ded69247aa33bf20 Mon Sep 17 00:00:00 2001 From: Alan Brady Date: Thu, 5 Oct 2017 14:53:41 -0700 Subject: i40e: convert i40e_phy_type_to_ethtool to new API We are still largely using the old ethtool API macros. This is problematic because eventually they will be removed and they only support 32 bits of PHY types. This overhauls i40e_phy_type_to_ethtool to use only the new API. Doing this also allows us to provide much better support for newer 25G and 10G PHY types which is included here as well. The remaining usages of the old ethtool API will be addressed in other patches in the series. Signed-off-by: Alan Brady Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_ethtool.c | 204 +++++++++++++++++-------- 1 file changed, 140 insertions(+), 64 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c index a4210ccdaa5f..0cef8aa85c1d 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c @@ -254,95 +254,180 @@ static void i40e_partition_setting_complaint(struct i40e_pf *pf) /** * i40e_phy_type_to_ethtool - convert the phy_types to ethtool link modes * @pf: PF struct with phy_types - * @supported: pointer to the ethtool supported variable to fill in - * @advertising: pointer to the ethtool advertising variable to fill in + * @ks: ethtool link ksettings struct to fill out * **/ -static void i40e_phy_type_to_ethtool(struct i40e_pf *pf, u32 *supported, - u32 *advertising) +static void i40e_phy_type_to_ethtool(struct i40e_pf *pf, + struct ethtool_link_ksettings *ks) { struct i40e_link_status *hw_link_info = &pf->hw.phy.link_info; u64 phy_types = pf->hw.phy.phy_types; - *supported = 0x0; - *advertising = 0x0; + ethtool_link_ksettings_zero_link_mode(ks, supported); + ethtool_link_ksettings_zero_link_mode(ks, advertising); if (phy_types & I40E_CAP_PHY_TYPE_SGMII) { - *supported |= SUPPORTED_1000baseT_Full; + ethtool_link_ksettings_add_link_mode(ks, supported, + 1000baseT_Full); if (hw_link_info->requested_speeds & I40E_LINK_SPEED_1GB) - *advertising |= ADVERTISED_1000baseT_Full; + ethtool_link_ksettings_add_link_mode(ks, advertising, + 1000baseT_Full); if (pf->hw_features & I40E_HW_100M_SGMII_CAPABLE) { - *supported |= SUPPORTED_100baseT_Full; - *advertising |= ADVERTISED_100baseT_Full; + ethtool_link_ksettings_add_link_mode(ks, supported, + 100baseT_Full); + ethtool_link_ksettings_add_link_mode(ks, advertising, + 100baseT_Full); } } if (phy_types & I40E_CAP_PHY_TYPE_XAUI || phy_types & I40E_CAP_PHY_TYPE_XFI || phy_types & I40E_CAP_PHY_TYPE_SFI || phy_types & I40E_CAP_PHY_TYPE_10GBASE_SFPP_CU || - phy_types & I40E_CAP_PHY_TYPE_10GBASE_AOC) - *supported |= SUPPORTED_10000baseT_Full; - if (phy_types & I40E_CAP_PHY_TYPE_10GBASE_CR1_CU || - phy_types & I40E_CAP_PHY_TYPE_10GBASE_CR1 || - phy_types & I40E_CAP_PHY_TYPE_10GBASE_T || - phy_types & I40E_CAP_PHY_TYPE_10GBASE_SR || - phy_types & I40E_CAP_PHY_TYPE_10GBASE_LR) { - *supported |= SUPPORTED_10000baseT_Full; + phy_types & I40E_CAP_PHY_TYPE_10GBASE_AOC) { + ethtool_link_ksettings_add_link_mode(ks, supported, + 10000baseT_Full); if (hw_link_info->requested_speeds & I40E_LINK_SPEED_10GB) - *advertising |= ADVERTISED_10000baseT_Full; + ethtool_link_ksettings_add_link_mode(ks, advertising, + 10000baseT_Full); + } + if (phy_types & I40E_CAP_PHY_TYPE_10GBASE_T) { + ethtool_link_ksettings_add_link_mode(ks, supported, + 10000baseT_Full); + if (hw_link_info->requested_speeds & I40E_LINK_SPEED_10GB) + ethtool_link_ksettings_add_link_mode(ks, advertising, + 10000baseT_Full); } if (phy_types & I40E_CAP_PHY_TYPE_XLAUI || phy_types & I40E_CAP_PHY_TYPE_XLPPI || phy_types & I40E_CAP_PHY_TYPE_40GBASE_AOC) - *supported |= SUPPORTED_40000baseCR4_Full; + ethtool_link_ksettings_add_link_mode(ks, supported, + 40000baseCR4_Full); if (phy_types & I40E_CAP_PHY_TYPE_40GBASE_CR4_CU || phy_types & I40E_CAP_PHY_TYPE_40GBASE_CR4) { - *supported |= SUPPORTED_40000baseCR4_Full; + ethtool_link_ksettings_add_link_mode(ks, supported, + 40000baseCR4_Full); if (hw_link_info->requested_speeds & I40E_LINK_SPEED_40GB) - *advertising |= ADVERTISED_40000baseCR4_Full; + ethtool_link_ksettings_add_link_mode(ks, advertising, + 40000baseCR4_Full); } if (phy_types & I40E_CAP_PHY_TYPE_100BASE_TX) { - *supported |= SUPPORTED_100baseT_Full; + ethtool_link_ksettings_add_link_mode(ks, supported, + 100baseT_Full); if (hw_link_info->requested_speeds & I40E_LINK_SPEED_100MB) - *advertising |= ADVERTISED_100baseT_Full; + ethtool_link_ksettings_add_link_mode(ks, advertising, + 100baseT_Full); } - if (phy_types & I40E_CAP_PHY_TYPE_1000BASE_T || - phy_types & I40E_CAP_PHY_TYPE_1000BASE_SX || - phy_types & I40E_CAP_PHY_TYPE_1000BASE_LX || - phy_types & I40E_CAP_PHY_TYPE_1000BASE_T_OPTICAL) { - *supported |= SUPPORTED_1000baseT_Full; + if (phy_types & I40E_CAP_PHY_TYPE_1000BASE_T) { + ethtool_link_ksettings_add_link_mode(ks, supported, + 1000baseT_Full); if (hw_link_info->requested_speeds & I40E_LINK_SPEED_1GB) - *advertising |= ADVERTISED_1000baseT_Full; + ethtool_link_ksettings_add_link_mode(ks, advertising, + 1000baseT_Full); } if (phy_types & I40E_CAP_PHY_TYPE_40GBASE_SR4) - *supported |= SUPPORTED_40000baseSR4_Full; + ethtool_link_ksettings_add_link_mode(ks, supported, + 40000baseSR4_Full); if (phy_types & I40E_CAP_PHY_TYPE_40GBASE_LR4) - *supported |= SUPPORTED_40000baseLR4_Full; + ethtool_link_ksettings_add_link_mode(ks, supported, + 40000baseLR4_Full); if (phy_types & I40E_CAP_PHY_TYPE_40GBASE_KR4) { - *supported |= SUPPORTED_40000baseKR4_Full; - *advertising |= ADVERTISED_40000baseKR4_Full; + ethtool_link_ksettings_add_link_mode(ks, supported, + 40000baseLR4_Full); + ethtool_link_ksettings_add_link_mode(ks, advertising, + 40000baseLR4_Full); } if (phy_types & I40E_CAP_PHY_TYPE_20GBASE_KR2) { - *supported |= SUPPORTED_20000baseKR2_Full; + ethtool_link_ksettings_add_link_mode(ks, supported, + 20000baseKR2_Full); if (hw_link_info->requested_speeds & I40E_LINK_SPEED_20GB) - *advertising |= ADVERTISED_20000baseKR2_Full; + ethtool_link_ksettings_add_link_mode(ks, advertising, + 20000baseKR2_Full); } if (phy_types & I40E_CAP_PHY_TYPE_10GBASE_KX4) { - *supported |= SUPPORTED_10000baseKX4_Full; + ethtool_link_ksettings_add_link_mode(ks, supported, + 10000baseKX4_Full); if (hw_link_info->requested_speeds & I40E_LINK_SPEED_10GB) - *advertising |= ADVERTISED_10000baseKX4_Full; + ethtool_link_ksettings_add_link_mode(ks, advertising, + 10000baseKX4_Full); } if (phy_types & I40E_CAP_PHY_TYPE_10GBASE_KR && !(pf->hw_features & I40E_HW_HAVE_CRT_RETIMER)) { - *supported |= SUPPORTED_10000baseKR_Full; + ethtool_link_ksettings_add_link_mode(ks, supported, + 10000baseKR_Full); if (hw_link_info->requested_speeds & I40E_LINK_SPEED_10GB) - *advertising |= ADVERTISED_10000baseKR_Full; + ethtool_link_ksettings_add_link_mode(ks, advertising, + 10000baseKR_Full); } if (phy_types & I40E_CAP_PHY_TYPE_1000BASE_KX && !(pf->hw_features & I40E_HW_HAVE_CRT_RETIMER)) { - *supported |= SUPPORTED_1000baseKX_Full; + ethtool_link_ksettings_add_link_mode(ks, supported, + 1000baseKX_Full); if (hw_link_info->requested_speeds & I40E_LINK_SPEED_1GB) - *advertising |= ADVERTISED_1000baseKX_Full; + ethtool_link_ksettings_add_link_mode(ks, advertising, + 1000baseKX_Full); + } + /* need to add 25G PHY types */ + if (phy_types & I40E_CAP_PHY_TYPE_25GBASE_KR) { + ethtool_link_ksettings_add_link_mode(ks, supported, + 25000baseKR_Full); + if (hw_link_info->requested_speeds & I40E_LINK_SPEED_25GB) + ethtool_link_ksettings_add_link_mode(ks, advertising, + 25000baseKR_Full); + } + if (phy_types & I40E_CAP_PHY_TYPE_25GBASE_CR) { + ethtool_link_ksettings_add_link_mode(ks, supported, + 25000baseCR_Full); + if (hw_link_info->requested_speeds & I40E_LINK_SPEED_25GB) + ethtool_link_ksettings_add_link_mode(ks, advertising, + 25000baseCR_Full); + } + if (phy_types & I40E_CAP_PHY_TYPE_25GBASE_SR || + phy_types & I40E_CAP_PHY_TYPE_25GBASE_LR) { + ethtool_link_ksettings_add_link_mode(ks, supported, + 25000baseSR_Full); + if (hw_link_info->requested_speeds & I40E_LINK_SPEED_25GB) + ethtool_link_ksettings_add_link_mode(ks, advertising, + 25000baseSR_Full); + } + if (phy_types & I40E_CAP_PHY_TYPE_25GBASE_AOC || + phy_types & I40E_CAP_PHY_TYPE_25GBASE_ACC) { + ethtool_link_ksettings_add_link_mode(ks, supported, + 25000baseCR_Full); + if (hw_link_info->requested_speeds & I40E_LINK_SPEED_25GB) + ethtool_link_ksettings_add_link_mode(ks, advertising, + 25000baseCR_Full); + } + /* need to add new 10G PHY types */ + if (phy_types & I40E_CAP_PHY_TYPE_10GBASE_CR1 || + phy_types & I40E_CAP_PHY_TYPE_10GBASE_CR1_CU) { + ethtool_link_ksettings_add_link_mode(ks, supported, + 10000baseCR_Full); + if (hw_link_info->requested_speeds & I40E_LINK_SPEED_10GB) + ethtool_link_ksettings_add_link_mode(ks, advertising, + 10000baseCR_Full); + } + if (phy_types & I40E_CAP_PHY_TYPE_10GBASE_SR) { + ethtool_link_ksettings_add_link_mode(ks, supported, + 10000baseSR_Full); + if (hw_link_info->requested_speeds & I40E_LINK_SPEED_10GB) + ethtool_link_ksettings_add_link_mode(ks, advertising, + 10000baseSR_Full); + } + if (phy_types & I40E_CAP_PHY_TYPE_10GBASE_LR) { + ethtool_link_ksettings_add_link_mode(ks, supported, + 10000baseLR_Full); + if (hw_link_info->requested_speeds & I40E_LINK_SPEED_10GB) + ethtool_link_ksettings_add_link_mode(ks, advertising, + 10000baseLR_Full); + } + if (phy_types & I40E_CAP_PHY_TYPE_1000BASE_SX || + phy_types & I40E_CAP_PHY_TYPE_1000BASE_LX || + phy_types & I40E_CAP_PHY_TYPE_1000BASE_T_OPTICAL) { + ethtool_link_ksettings_add_link_mode(ks, supported, + 1000baseX_Full); + if (hw_link_info->requested_speeds & I40E_LINK_SPEED_1GB) + ethtool_link_ksettings_add_link_mode(ks, advertising, + 1000baseX_Full); } /* Autoneg PHY types */ if (phy_types & I40E_CAP_PHY_TYPE_SGMII || @@ -367,8 +452,10 @@ static void i40e_phy_type_to_ethtool(struct i40e_pf *pf, u32 *supported, phy_types & I40E_CAP_PHY_TYPE_1000BASE_LX || phy_types & I40E_CAP_PHY_TYPE_1000BASE_KX || phy_types & I40E_CAP_PHY_TYPE_100BASE_TX) { - *supported |= SUPPORTED_Autoneg; - *advertising |= ADVERTISED_Autoneg; + ethtool_link_ksettings_add_link_mode(ks, supported, + Autoneg); + ethtool_link_ksettings_add_link_mode(ks, advertising, + Autoneg); } } @@ -385,9 +472,8 @@ static void i40e_get_settings_link_up(struct i40e_hw *hw, struct i40e_pf *pf) { struct i40e_link_status *hw_link_info = &hw->phy.link_info; + struct ethtool_link_ksettings cap_ksettings; u32 link_speed = hw_link_info->link_speed; - u32 e_advertising = 0x0; - u32 e_supported = 0x0; u32 supported, advertising; ethtool_convert_link_mode_to_legacy_u32(&supported, @@ -519,11 +605,13 @@ static void i40e_get_settings_link_up(struct i40e_hw *hw, * current PHY type, get what is supported by the NVM and intersect * them to get what is truly supported */ - i40e_phy_type_to_ethtool(pf, &e_supported, - &e_advertising); - - supported = supported & e_supported; - advertising = advertising & e_advertising; + ethtool_convert_legacy_u32_to_link_mode(ks->link_modes.supported, + supported); + ethtool_convert_legacy_u32_to_link_mode(ks->link_modes.advertising, + advertising); + memset(&cap_ksettings, 0, sizeof(struct ethtool_link_ksettings)); + i40e_phy_type_to_ethtool(pf, &cap_ksettings); + ethtool_intersect_link_masks(ks, &cap_ksettings); /* Set speed and duplex */ switch (link_speed) { @@ -549,11 +637,6 @@ static void i40e_get_settings_link_up(struct i40e_hw *hw, break; } ks->base.duplex = DUPLEX_FULL; - - ethtool_convert_legacy_u32_to_link_mode(ks->link_modes.supported, - supported); - ethtool_convert_legacy_u32_to_link_mode(ks->link_modes.advertising, - advertising); } /** @@ -568,17 +651,10 @@ static void i40e_get_settings_link_down(struct i40e_hw *hw, struct ethtool_link_ksettings *ks, struct i40e_pf *pf) { - u32 supported, advertising; - /* link is down and the driver needs to fall back on * supported phy types to figure out what info to display */ - i40e_phy_type_to_ethtool(pf, &supported, &advertising); - - ethtool_convert_legacy_u32_to_link_mode(ks->link_modes.supported, - supported); - ethtool_convert_legacy_u32_to_link_mode(ks->link_modes.advertising, - advertising); + i40e_phy_type_to_ethtool(pf, ks); /* With no link speed and duplex are unknown */ ks->base.speed = SPEED_UNKNOWN; -- cgit v1.2.3 From 79f04a3aba91531a3b979f6ebd846367a664638f Mon Sep 17 00:00:00 2001 From: Alan Brady Date: Thu, 5 Oct 2017 14:53:42 -0700 Subject: i40e: convert i40e_get_settings_link_up to new API This removes references to old ethtool API macros and functions in i40e_get_settings_link_up as part of the process of converting to the new API. The new API also allows us to provide more explicit support for new 25G and 10G PHY types so some of the PHY types have been adjusted where necessary as well. Signed-off-by: Alan Brady Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_ethtool.c | 187 +++++++++++++++++-------- 1 file changed, 125 insertions(+), 62 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c index 0cef8aa85c1d..913ba91fac6c 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c @@ -474,125 +474,192 @@ static void i40e_get_settings_link_up(struct i40e_hw *hw, struct i40e_link_status *hw_link_info = &hw->phy.link_info; struct ethtool_link_ksettings cap_ksettings; u32 link_speed = hw_link_info->link_speed; - u32 supported, advertising; - - ethtool_convert_link_mode_to_legacy_u32(&supported, - ks->link_modes.supported); - ethtool_convert_link_mode_to_legacy_u32(&advertising, - ks->link_modes.advertising); /* Initialize supported and advertised settings based on phy settings */ switch (hw_link_info->phy_type) { case I40E_PHY_TYPE_40GBASE_CR4: case I40E_PHY_TYPE_40GBASE_CR4_CU: - supported = SUPPORTED_Autoneg | - SUPPORTED_40000baseCR4_Full; - advertising = ADVERTISED_Autoneg | - ADVERTISED_40000baseCR4_Full; + ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg); + ethtool_link_ksettings_add_link_mode(ks, supported, + 40000baseCR4_Full); + ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg); + ethtool_link_ksettings_add_link_mode(ks, advertising, + 40000baseCR4_Full); break; case I40E_PHY_TYPE_XLAUI: case I40E_PHY_TYPE_XLPPI: case I40E_PHY_TYPE_40GBASE_AOC: - supported = SUPPORTED_40000baseCR4_Full; + ethtool_link_ksettings_add_link_mode(ks, supported, + 40000baseCR4_Full); break; case I40E_PHY_TYPE_40GBASE_SR4: - supported = SUPPORTED_40000baseSR4_Full; + ethtool_link_ksettings_add_link_mode(ks, supported, + 40000baseSR4_Full); break; case I40E_PHY_TYPE_40GBASE_LR4: - supported = SUPPORTED_40000baseLR4_Full; + ethtool_link_ksettings_add_link_mode(ks, supported, + 40000baseLR4_Full); break; + case I40E_PHY_TYPE_25GBASE_SR: + case I40E_PHY_TYPE_25GBASE_LR: case I40E_PHY_TYPE_10GBASE_SR: case I40E_PHY_TYPE_10GBASE_LR: case I40E_PHY_TYPE_1000BASE_SX: case I40E_PHY_TYPE_1000BASE_LX: - supported = SUPPORTED_10000baseT_Full; + ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg); + ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg); + ethtool_link_ksettings_add_link_mode(ks, supported, + 25000baseSR_Full); + ethtool_link_ksettings_add_link_mode(ks, advertising, + 25000baseSR_Full); + ethtool_link_ksettings_add_link_mode(ks, supported, + 10000baseSR_Full); + ethtool_link_ksettings_add_link_mode(ks, advertising, + 10000baseSR_Full); + ethtool_link_ksettings_add_link_mode(ks, supported, + 10000baseLR_Full); + ethtool_link_ksettings_add_link_mode(ks, advertising, + 10000baseLR_Full); + ethtool_link_ksettings_add_link_mode(ks, supported, + 1000baseX_Full); + ethtool_link_ksettings_add_link_mode(ks, advertising, + 1000baseX_Full); + ethtool_link_ksettings_add_link_mode(ks, supported, + 10000baseT_Full); if (hw_link_info->module_type[2] & I40E_MODULE_TYPE_1000BASE_SX || hw_link_info->module_type[2] & I40E_MODULE_TYPE_1000BASE_LX) { - supported |= SUPPORTED_1000baseT_Full; + ethtool_link_ksettings_add_link_mode(ks, supported, + 1000baseT_Full); if (hw_link_info->requested_speeds & I40E_LINK_SPEED_1GB) - advertising |= ADVERTISED_1000baseT_Full; + ethtool_link_ksettings_add_link_mode( + ks, advertising, 1000baseT_Full); } if (hw_link_info->requested_speeds & I40E_LINK_SPEED_10GB) - advertising |= ADVERTISED_10000baseT_Full; + ethtool_link_ksettings_add_link_mode(ks, advertising, + 10000baseT_Full); break; case I40E_PHY_TYPE_10GBASE_T: case I40E_PHY_TYPE_1000BASE_T: case I40E_PHY_TYPE_100BASE_TX: - supported = SUPPORTED_Autoneg | - SUPPORTED_10000baseT_Full | - SUPPORTED_1000baseT_Full | - SUPPORTED_100baseT_Full; - advertising = ADVERTISED_Autoneg; + ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg); + ethtool_link_ksettings_add_link_mode(ks, supported, + 10000baseT_Full); + ethtool_link_ksettings_add_link_mode(ks, supported, + 1000baseT_Full); + ethtool_link_ksettings_add_link_mode(ks, supported, + 100baseT_Full); + ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg); if (hw_link_info->requested_speeds & I40E_LINK_SPEED_10GB) - advertising |= ADVERTISED_10000baseT_Full; + ethtool_link_ksettings_add_link_mode(ks, advertising, + 10000baseT_Full); if (hw_link_info->requested_speeds & I40E_LINK_SPEED_1GB) - advertising |= ADVERTISED_1000baseT_Full; + ethtool_link_ksettings_add_link_mode(ks, advertising, + 1000baseT_Full); if (hw_link_info->requested_speeds & I40E_LINK_SPEED_100MB) - advertising |= ADVERTISED_100baseT_Full; + ethtool_link_ksettings_add_link_mode(ks, advertising, + 100baseT_Full); break; case I40E_PHY_TYPE_1000BASE_T_OPTICAL: - supported = SUPPORTED_Autoneg | - SUPPORTED_1000baseT_Full; - advertising = ADVERTISED_Autoneg | - ADVERTISED_1000baseT_Full; + ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg); + ethtool_link_ksettings_add_link_mode(ks, supported, + 1000baseT_Full); + ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg); + ethtool_link_ksettings_add_link_mode(ks, advertising, + 1000baseT_Full); break; case I40E_PHY_TYPE_10GBASE_CR1_CU: case I40E_PHY_TYPE_10GBASE_CR1: - supported = SUPPORTED_Autoneg | - SUPPORTED_10000baseT_Full; - advertising = ADVERTISED_Autoneg | - ADVERTISED_10000baseT_Full; + ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg); + ethtool_link_ksettings_add_link_mode(ks, supported, + 10000baseT_Full); + ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg); + ethtool_link_ksettings_add_link_mode(ks, advertising, + 10000baseT_Full); break; case I40E_PHY_TYPE_XAUI: case I40E_PHY_TYPE_XFI: case I40E_PHY_TYPE_SFI: case I40E_PHY_TYPE_10GBASE_SFPP_CU: case I40E_PHY_TYPE_10GBASE_AOC: - supported = SUPPORTED_10000baseT_Full; - advertising = SUPPORTED_10000baseT_Full; + ethtool_link_ksettings_add_link_mode(ks, supported, + 10000baseT_Full); + if (hw_link_info->requested_speeds & I40E_LINK_SPEED_10GB) + ethtool_link_ksettings_add_link_mode(ks, advertising, + 10000baseT_Full); break; case I40E_PHY_TYPE_SGMII: - supported = SUPPORTED_Autoneg | - SUPPORTED_1000baseT_Full; + ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg); + ethtool_link_ksettings_add_link_mode(ks, supported, + 1000baseT_Full); if (hw_link_info->requested_speeds & I40E_LINK_SPEED_1GB) - advertising |= ADVERTISED_1000baseT_Full; + ethtool_link_ksettings_add_link_mode(ks, advertising, + 1000baseT_Full); if (pf->hw_features & I40E_HW_100M_SGMII_CAPABLE) { - supported |= SUPPORTED_100baseT_Full; + ethtool_link_ksettings_add_link_mode(ks, supported, + 100baseT_Full); if (hw_link_info->requested_speeds & I40E_LINK_SPEED_100MB) - advertising |= ADVERTISED_100baseT_Full; + ethtool_link_ksettings_add_link_mode( + ks, advertising, 100baseT_Full); } break; case I40E_PHY_TYPE_40GBASE_KR4: + case I40E_PHY_TYPE_25GBASE_KR: case I40E_PHY_TYPE_20GBASE_KR2: case I40E_PHY_TYPE_10GBASE_KR: case I40E_PHY_TYPE_10GBASE_KX4: case I40E_PHY_TYPE_1000BASE_KX: - supported |= SUPPORTED_40000baseKR4_Full | - SUPPORTED_20000baseKR2_Full | - SUPPORTED_10000baseKR_Full | - SUPPORTED_10000baseKX4_Full | - SUPPORTED_1000baseKX_Full | - SUPPORTED_Autoneg; - advertising |= ADVERTISED_40000baseKR4_Full | - ADVERTISED_20000baseKR2_Full | - ADVERTISED_10000baseKR_Full | - ADVERTISED_10000baseKX4_Full | - ADVERTISED_1000baseKX_Full | - ADVERTISED_Autoneg; + ethtool_link_ksettings_add_link_mode(ks, supported, + 40000baseKR4_Full); + ethtool_link_ksettings_add_link_mode(ks, supported, + 25000baseKR_Full); + ethtool_link_ksettings_add_link_mode(ks, supported, + 20000baseKR2_Full); + ethtool_link_ksettings_add_link_mode(ks, supported, + 10000baseKR_Full); + ethtool_link_ksettings_add_link_mode(ks, supported, + 10000baseKX4_Full); + ethtool_link_ksettings_add_link_mode(ks, supported, + 1000baseKX_Full); + ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg); + ethtool_link_ksettings_add_link_mode(ks, advertising, + 40000baseKR4_Full); + ethtool_link_ksettings_add_link_mode(ks, advertising, + 25000baseKR_Full); + ethtool_link_ksettings_add_link_mode(ks, advertising, + 20000baseKR2_Full); + ethtool_link_ksettings_add_link_mode(ks, advertising, + 10000baseKR_Full); + ethtool_link_ksettings_add_link_mode(ks, advertising, + 10000baseKX4_Full); + ethtool_link_ksettings_add_link_mode(ks, advertising, + 1000baseKX_Full); + ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg); break; - case I40E_PHY_TYPE_25GBASE_KR: case I40E_PHY_TYPE_25GBASE_CR: - case I40E_PHY_TYPE_25GBASE_SR: - case I40E_PHY_TYPE_25GBASE_LR: + ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg); + ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg); + ethtool_link_ksettings_add_link_mode(ks, supported, + 25000baseCR_Full); + ethtool_link_ksettings_add_link_mode(ks, advertising, + 25000baseCR_Full); + break; case I40E_PHY_TYPE_25GBASE_AOC: case I40E_PHY_TYPE_25GBASE_ACC: - supported = SUPPORTED_Autoneg; - advertising = ADVERTISED_Autoneg; - /* TODO: add speeds when ethtool is ready to support*/ + ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg); + ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg); + ethtool_link_ksettings_add_link_mode(ks, supported, + 25000baseCR_Full); + + ethtool_link_ksettings_add_link_mode(ks, advertising, + 25000baseCR_Full); + ethtool_link_ksettings_add_link_mode(ks, supported, + 10000baseCR_Full); + ethtool_link_ksettings_add_link_mode(ks, advertising, + 10000baseCR_Full); break; default: /* if we got here and link is up something bad is afoot */ @@ -605,10 +672,6 @@ static void i40e_get_settings_link_up(struct i40e_hw *hw, * current PHY type, get what is supported by the NVM and intersect * them to get what is truly supported */ - ethtool_convert_legacy_u32_to_link_mode(ks->link_modes.supported, - supported); - ethtool_convert_legacy_u32_to_link_mode(ks->link_modes.advertising, - advertising); memset(&cap_ksettings, 0, sizeof(struct ethtool_link_ksettings)); i40e_phy_type_to_ethtool(pf, &cap_ksettings); ethtool_intersect_link_masks(ks, &cap_ksettings); -- cgit v1.2.3 From 636b62d778302149216fad6aaa5e8d84c934a794 Mon Sep 17 00:00:00 2001 From: Alan Brady Date: Thu, 5 Oct 2017 14:53:43 -0700 Subject: i40e: rename 'change' variable to 'autoneg_changed' This variable isn't actually very descriptive and makes the code a bit confusing as to what it is being used for. This patch enhances the variable with the longer name, 'autoneg_changed', which makes it clear we are concerned with autoneg changing in this context. Signed-off-by: Alan Brady Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_ethtool.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c index 913ba91fac6c..9c70555bf49c 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c @@ -822,14 +822,14 @@ static int i40e_set_link_ksettings(struct net_device *netdev, { struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_aq_get_phy_abilities_resp abilities; + struct ethtool_link_ksettings safe_ks; + struct ethtool_link_ksettings copy_ks; struct i40e_aq_set_phy_config config; struct i40e_pf *pf = np->vsi->back; struct i40e_vsi *vsi = np->vsi; struct i40e_hw *hw = &pf->hw; - struct ethtool_link_ksettings safe_ks; - struct ethtool_link_ksettings copy_ks; + bool autoneg_changed = false; i40e_status status = 0; - bool change = false; int timeout = 50; int err = 0; u32 autoneg; @@ -922,7 +922,7 @@ static int i40e_set_link_ksettings(struct net_device *netdev, /* Autoneg is allowed to change */ config.abilities = abilities.abilities | I40E_AQ_PHY_ENABLE_AN; - change = true; + autoneg_changed = true; } } else { /* If autoneg is currently enabled */ @@ -942,7 +942,7 @@ static int i40e_set_link_ksettings(struct net_device *netdev, /* Autoneg is allowed to change */ config.abilities = abilities.abilities & ~I40E_AQ_PHY_ENABLE_AN; - change = true; + autoneg_changed = true; } } @@ -976,7 +976,7 @@ static int i40e_set_link_ksettings(struct net_device *netdev, */ if (!config.link_speed) config.link_speed = abilities.link_speed; - if (change || (abilities.link_speed != config.link_speed)) { + if (autoneg_changed || abilities.link_speed != config.link_speed) { /* copy over the rest of the abilities */ config.phy_type = abilities.phy_type; config.phy_type_ext = abilities.phy_type_ext; -- cgit v1.2.3 From cee919959b6193325f34223370650402dee34e30 Mon Sep 17 00:00:00 2001 From: Alan Brady Date: Thu, 5 Oct 2017 14:53:44 -0700 Subject: i40e: convert i40e_set_link_ksettings to new API This finishes off the conversion to the new ethtool API by removing the old macros being used in i40e_set_link_ksettings and replacing them with shiny new ones. This conversion also allows us to provide link speed support for new 25G and 10G macros which is included here as well. Signed-off-by: Alan Brady Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_ethtool.c | 93 ++++++++++++++++---------- 1 file changed, 57 insertions(+), 36 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c index 9c70555bf49c..9eb618799a30 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c @@ -832,9 +832,7 @@ static int i40e_set_link_ksettings(struct net_device *netdev, i40e_status status = 0; int timeout = 50; int err = 0; - u32 autoneg; - u32 advertise; - u32 tmp; + u8 autoneg; /* Changing port settings is not supported if this isn't the * port's controlling PF @@ -862,28 +860,34 @@ static int i40e_set_link_ksettings(struct net_device *netdev, /* copy the ksettings to copy_ks to avoid modifying the origin */ memcpy(©_ks, ks, sizeof(struct ethtool_link_ksettings)); + /* save autoneg out of ksettings */ + autoneg = copy_ks.base.autoneg; + + memset(&safe_ks, 0, sizeof(safe_ks)); + /* Get link modes supported by hardware and check against modes + * requested by the user. Return an error if unsupported mode was set. + */ + i40e_phy_type_to_ethtool(pf, &safe_ks); + if (!bitmap_subset(copy_ks.link_modes.advertising, + safe_ks.link_modes.supported, + __ETHTOOL_LINK_MODE_MASK_NBITS)) + return -EINVAL; + /* get our own copy of the bits to check against */ memset(&safe_ks, 0, sizeof(struct ethtool_link_ksettings)); + safe_ks.base.cmd = copy_ks.base.cmd; + safe_ks.base.link_mode_masks_nwords = + copy_ks.base.link_mode_masks_nwords; i40e_get_link_ksettings(netdev, &safe_ks); - /* save autoneg and speed out of ksettings */ - autoneg = ks->base.autoneg; - ethtool_convert_link_mode_to_legacy_u32(&advertise, - ks->link_modes.advertising); - - /* set autoneg and speed back to what they currently are */ + /* set autoneg back to what it currently is */ copy_ks.base.autoneg = safe_ks.base.autoneg; - ethtool_convert_link_mode_to_legacy_u32( - &tmp, safe_ks.link_modes.advertising); - ethtool_convert_legacy_u32_to_link_mode( - copy_ks.link_modes.advertising, tmp); - copy_ks.base.cmd = safe_ks.base.cmd; - - /* If copy_ks and safe_ks are not the same now, then they are - * trying to set something that we do not support + /* If copy_ks.base and safe_ks.base are not the same now, then they are + * trying to set something that we do not support. */ - if (memcmp(©_ks, &safe_ks, sizeof(struct ethtool_link_ksettings))) + if (memcmp(©_ks.base, &safe_ks.base, + sizeof(struct ethtool_link_settings))) return -EOPNOTSUPP; while (test_and_set_bit(__I40E_CONFIG_BUSY, pf->state)) { @@ -946,28 +950,45 @@ static int i40e_set_link_ksettings(struct net_device *netdev, } } - ethtool_convert_link_mode_to_legacy_u32(&tmp, - safe_ks.link_modes.supported); - if (advertise & ~tmp) { - err = -EINVAL; - goto done; - } - - if (advertise & ADVERTISED_100baseT_Full) + if (ethtool_link_ksettings_test_link_mode(ks, advertising, + 100baseT_Full)) config.link_speed |= I40E_LINK_SPEED_100MB; - if (advertise & ADVERTISED_1000baseT_Full || - advertise & ADVERTISED_1000baseKX_Full) + if (ethtool_link_ksettings_test_link_mode(ks, advertising, + 1000baseT_Full) || + ethtool_link_ksettings_test_link_mode(ks, advertising, + 1000baseX_Full) || + ethtool_link_ksettings_test_link_mode(ks, advertising, + 1000baseKX_Full)) config.link_speed |= I40E_LINK_SPEED_1GB; - if (advertise & ADVERTISED_10000baseT_Full || - advertise & ADVERTISED_10000baseKX4_Full || - advertise & ADVERTISED_10000baseKR_Full) + if (ethtool_link_ksettings_test_link_mode(ks, advertising, + 10000baseT_Full) || + ethtool_link_ksettings_test_link_mode(ks, advertising, + 10000baseKX4_Full) || + ethtool_link_ksettings_test_link_mode(ks, advertising, + 10000baseKR_Full) || + ethtool_link_ksettings_test_link_mode(ks, advertising, + 10000baseCR_Full) || + ethtool_link_ksettings_test_link_mode(ks, advertising, + 10000baseSR_Full)) config.link_speed |= I40E_LINK_SPEED_10GB; - if (advertise & ADVERTISED_20000baseKR2_Full) + if (ethtool_link_ksettings_test_link_mode(ks, advertising, + 20000baseKR2_Full)) config.link_speed |= I40E_LINK_SPEED_20GB; - if (advertise & ADVERTISED_40000baseKR4_Full || - advertise & ADVERTISED_40000baseCR4_Full || - advertise & ADVERTISED_40000baseSR4_Full || - advertise & ADVERTISED_40000baseLR4_Full) + if (ethtool_link_ksettings_test_link_mode(ks, advertising, + 25000baseCR_Full) || + ethtool_link_ksettings_test_link_mode(ks, advertising, + 25000baseKR_Full) || + ethtool_link_ksettings_test_link_mode(ks, advertising, + 25000baseSR_Full)) + config.link_speed |= I40E_LINK_SPEED_25GB; + if (ethtool_link_ksettings_test_link_mode(ks, advertising, + 40000baseKR4_Full) || + ethtool_link_ksettings_test_link_mode(ks, advertising, + 40000baseCR4_Full) || + ethtool_link_ksettings_test_link_mode(ks, advertising, + 40000baseSR4_Full) || + ethtool_link_ksettings_test_link_mode(ks, advertising, + 40000baseLR4_Full)) config.link_speed |= I40E_LINK_SPEED_40GB; /* If speed didn't get set, set it to what it currently is. -- cgit v1.2.3 From 6c32e0d9fdd56a7af54512aff700e20d85563499 Mon Sep 17 00:00:00 2001 From: Alan Brady Date: Mon, 9 Oct 2017 15:48:45 -0700 Subject: i40e: fix u64 division usage Commit 52eb1ff93e98 ("i40e: Add support setting TC max bandwidth rates") and commit 1ea6f21ae530 ("i40e: Refactor VF BW rate limiting") add some needed functionality for TC bandwidth rate limiting. Unfortunately they introduce several usages of unsigned 64-bit division which needs to be handled special by the kernel to support all architectures. Fixes: 52eb1ff93e98 ("i40e: Add support setting TC max bandwidth rates") Fixes: 1ea6f21ae530 ("i40e: Refactor VF BW rate limiting") Signed-off-by: Alan Brady Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e.h | 3 +- drivers/net/ethernet/intel/i40e/i40e_main.c | 58 ++++++++++++++++++++--------- 2 files changed, 42 insertions(+), 19 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h index 266e1dc5e786..eb017763646d 100644 --- a/drivers/net/ethernet/intel/i40e/i40e.h +++ b/drivers/net/ethernet/intel/i40e/i40e.h @@ -130,7 +130,8 @@ /* BW rate limiting */ #define I40E_BW_CREDIT_DIVISOR 50 /* 50Mbps per BW credit */ -#define I40E_MAX_BW_INACTIVE_ACCUM 4 /* accumulate 4 credits max */ +#define I40E_BW_MBPS_DIVISOR 125000 /* rate / (1000000 / 8) Mbps */ +#define I40E_MAX_BW_INACTIVE_ACCUM 4 /* accumulate 4 credits max */ /* driver state flags */ enum i40e_state_t { diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index bb31d53c4923..1252aaf92fd3 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -5442,6 +5442,7 @@ int i40e_get_link_speed(struct i40e_vsi *vsi) int i40e_set_bw_limit(struct i40e_vsi *vsi, u16 seid, u64 max_tx_rate) { struct i40e_pf *pf = vsi->back; + u64 credits = 0; int speed = 0; int ret = 0; @@ -5459,8 +5460,9 @@ int i40e_set_bw_limit(struct i40e_vsi *vsi, u16 seid, u64 max_tx_rate) } /* Tx rate credits are in values of 50Mbps, 0 is disabled */ - ret = i40e_aq_config_vsi_bw_limit(&pf->hw, seid, - max_tx_rate / I40E_BW_CREDIT_DIVISOR, + credits = max_tx_rate; + do_div(credits, I40E_BW_CREDIT_DIVISOR); + ret = i40e_aq_config_vsi_bw_limit(&pf->hw, seid, credits, I40E_MAX_BW_INACTIVE_ACCUM, NULL); if (ret) dev_err(&pf->pdev->dev, @@ -6063,13 +6065,17 @@ int i40e_create_queue_channel(struct i40e_vsi *vsi, /* configure VSI for BW limit */ if (ch->max_tx_rate) { + u64 credits = ch->max_tx_rate; + if (i40e_set_bw_limit(vsi, ch->seid, ch->max_tx_rate)) return -EINVAL; + do_div(credits, I40E_BW_CREDIT_DIVISOR); dev_dbg(&pf->pdev->dev, "Set tx rate of %llu Mbps (count of 50Mbps %llu) for vsi->seid %u\n", ch->max_tx_rate, - ch->max_tx_rate / I40E_BW_CREDIT_DIVISOR, ch->seid); + credits, + ch->seid); } /* in case of VF, this will be main SRIOV VSI */ @@ -6090,6 +6096,7 @@ int i40e_create_queue_channel(struct i40e_vsi *vsi, static int i40e_configure_queue_channels(struct i40e_vsi *vsi) { struct i40e_channel *ch; + u64 max_rate = 0; int ret = 0, i; /* Create app vsi with the TCs. Main VSI with TC0 is already set up */ @@ -6110,8 +6117,9 @@ static int i40e_configure_queue_channels(struct i40e_vsi *vsi) /* Bandwidth limit through tc interface is in bytes/s, * change to Mbit/s */ - ch->max_tx_rate = - vsi->mqprio_qopt.max_rate[i] / (1000000 / 8); + max_rate = vsi->mqprio_qopt.max_rate[i]; + do_div(max_rate, I40E_BW_MBPS_DIVISOR); + ch->max_tx_rate = max_rate; list_add_tail(&ch->list, &vsi->ch_list); @@ -6540,6 +6548,7 @@ static int i40e_validate_mqprio_qopt(struct i40e_vsi *vsi, struct tc_mqprio_qopt_offload *mqprio_qopt) { u64 sum_max_rate = 0; + u64 max_rate = 0; int i; if (mqprio_qopt->qopt.offset[0] != 0 || @@ -6554,7 +6563,9 @@ static int i40e_validate_mqprio_qopt(struct i40e_vsi *vsi, "Invalid min tx rate (greater than 0) specified\n"); return -EINVAL; } - sum_max_rate += (mqprio_qopt->max_rate[i] / (1000000 / 8)); + max_rate = mqprio_qopt->max_rate[i]; + do_div(max_rate, I40E_BW_MBPS_DIVISOR); + sum_max_rate += max_rate; if (i >= mqprio_qopt->qopt.num_tc - 1) break; @@ -6698,14 +6709,18 @@ config_tc: if (pf->flags & I40E_FLAG_TC_MQPRIO) { if (vsi->mqprio_qopt.max_rate[0]) { - u64 max_tx_rate = vsi->mqprio_qopt.max_rate[0] / - (1000000 / 8); + u64 max_tx_rate = vsi->mqprio_qopt.max_rate[0]; + + do_div(max_tx_rate, I40E_BW_MBPS_DIVISOR); ret = i40e_set_bw_limit(vsi, vsi->seid, max_tx_rate); if (!ret) { + u64 credits = max_tx_rate; + + do_div(credits, I40E_BW_CREDIT_DIVISOR); dev_dbg(&vsi->back->pdev->dev, "Set tx rate of %llu Mbps (count of 50Mbps %llu) for vsi->seid %u\n", max_tx_rate, - max_tx_rate / I40E_BW_CREDIT_DIVISOR, + credits, vsi->seid); } else { need_reset = true; @@ -8166,14 +8181,17 @@ static int i40e_rebuild_channels(struct i40e_vsi *vsi) return ret; } if (ch->max_tx_rate) { + u64 credits = ch->max_tx_rate; + if (i40e_set_bw_limit(vsi, ch->seid, ch->max_tx_rate)) return -EINVAL; + do_div(credits, I40E_BW_CREDIT_DIVISOR); dev_dbg(&vsi->back->pdev->dev, "Set tx rate of %llu Mbps (count of 50Mbps %llu) for vsi->seid %u\n", ch->max_tx_rate, - ch->max_tx_rate / I40E_BW_CREDIT_DIVISOR, + credits, ch->seid); } } @@ -8446,17 +8464,21 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired) } if (vsi->mqprio_qopt.max_rate[0]) { - u64 max_tx_rate = vsi->mqprio_qopt.max_rate[0] / (1000000 / 8); + u64 max_tx_rate = vsi->mqprio_qopt.max_rate[0]; + u64 credits = 0; + do_div(max_tx_rate, I40E_BW_MBPS_DIVISOR); ret = i40e_set_bw_limit(vsi, vsi->seid, max_tx_rate); - if (!ret) - dev_dbg(&vsi->back->pdev->dev, - "Set tx rate of %llu Mbps (count of 50Mbps %llu) for vsi->seid %u\n", - max_tx_rate, - max_tx_rate / I40E_BW_CREDIT_DIVISOR, - vsi->seid); - else + if (ret) goto end_unlock; + + credits = max_tx_rate; + do_div(credits, I40E_BW_CREDIT_DIVISOR); + dev_dbg(&vsi->back->pdev->dev, + "Set tx rate of %llu Mbps (count of 50Mbps %llu) for vsi->seid %u\n", + max_tx_rate, + credits, + vsi->seid); } /* PF Main VSI is rebuild by now, go ahead and rebuild channel VSIs -- cgit v1.2.3 From 4b70c62b9eafcee0505b440732d2e00c50f3085d Mon Sep 17 00:00:00 2001 From: Joel Stanley Date: Fri, 13 Oct 2017 12:16:38 +0800 Subject: net: ftgmac100: Request clock and set speed According to the ASPEED datasheet, gigabit speeds require a clock of 100MHz or higher. Other speeds require 25MHz or higher. This patch configures a 100MHz clock if the system has a direct-attached PHY, or 25MHz if the system is running NC-SI which is limited to 100MHz. There appear to be no other upstream users of the FTGMAC100 driver it is hard to know the clocking requirements of other platforms. Therefore a conservative approach was taken with enabling clocks. If the platform is not ASPEED, both requesting the clock and configuring the speed is skipped. Signed-off-by: Joel Stanley Tested-by: Andrew Jeffery Signed-off-by: David S. Miller --- drivers/net/ethernet/faraday/ftgmac100.c | 26 ++++++++++++++++++++++++++ 1 file changed, 26 insertions(+) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c index 9ed8e4b81530..78db8e62a83f 100644 --- a/drivers/net/ethernet/faraday/ftgmac100.c +++ b/drivers/net/ethernet/faraday/ftgmac100.c @@ -21,6 +21,7 @@ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt +#include #include #include #include @@ -59,6 +60,9 @@ /* Min number of tx ring entries before stopping queue */ #define TX_THRESHOLD (MAX_SKB_FRAGS + 1) +#define FTGMAC_100MHZ 100000000 +#define FTGMAC_25MHZ 25000000 + struct ftgmac100 { /* Registers */ struct resource *res; @@ -96,6 +100,7 @@ struct ftgmac100 { struct napi_struct napi; struct work_struct reset_task; struct mii_bus *mii_bus; + struct clk *clk; /* Link management */ int cur_speed; @@ -1734,6 +1739,22 @@ static void ftgmac100_ncsi_handler(struct ncsi_dev *nd) nd->link_up ? "up" : "down"); } +static void ftgmac100_setup_clk(struct ftgmac100 *priv) +{ + priv->clk = devm_clk_get(priv->dev, NULL); + if (IS_ERR(priv->clk)) + return; + + clk_prepare_enable(priv->clk); + + /* Aspeed specifies a 100MHz clock is required for up to + * 1000Mbit link speeds. As NCSI is limited to 100Mbit, 25MHz + * is sufficient + */ + clk_set_rate(priv->clk, priv->use_ncsi ? FTGMAC_25MHZ : + FTGMAC_100MHZ); +} + static int ftgmac100_probe(struct platform_device *pdev) { struct resource *res; @@ -1830,6 +1851,9 @@ static int ftgmac100_probe(struct platform_device *pdev) goto err_setup_mdio; } + if (priv->is_aspeed) + ftgmac100_setup_clk(priv); + /* Default ring sizes */ priv->rx_q_entries = priv->new_rx_q_entries = DEF_RX_QUEUE_ENTRIES; priv->tx_q_entries = priv->new_tx_q_entries = DEF_TX_QUEUE_ENTRIES; @@ -1883,6 +1907,8 @@ static int ftgmac100_remove(struct platform_device *pdev) unregister_netdev(netdev); + clk_disable_unprepare(priv->clk); + /* There's a small chance the reset task will have been re-queued, * during stop, make sure it's gone before we free the structure. */ -- cgit v1.2.3 From 0010e3f8b3537b8e7c8a8e7249f9d184e92df1a5 Mon Sep 17 00:00:00 2001 From: Kees Cook Date: Mon, 16 Oct 2017 17:28:50 -0700 Subject: net/ti/tlan: Convert timers to use timer_setup() In preparation for unconditionally passing the struct timer_list pointer to all timer callbacks, switch to using the new timer_setup() and from_timer() to pass the timer pointer explicitly. Cc: Samuel Chessman Cc: netdev@vger.kernel.org Signed-off-by: Kees Cook Signed-off-by: David S. Miller --- drivers/net/ethernet/ti/tlan.c | 32 +++++++++++++------------------- 1 file changed, 13 insertions(+), 19 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/ti/tlan.c b/drivers/net/ethernet/ti/tlan.c index c8d53d8c83ee..8f53d762fbc4 100644 --- a/drivers/net/ethernet/ti/tlan.c +++ b/drivers/net/ethernet/ti/tlan.c @@ -172,7 +172,8 @@ static u32 tlan_handle_tx_eoc(struct net_device *, u16); static u32 tlan_handle_status_check(struct net_device *, u16); static u32 tlan_handle_rx_eoc(struct net_device *, u16); -static void tlan_timer(unsigned long); +static void tlan_timer(struct timer_list *t); +static void tlan_phy_monitor(struct timer_list *t); static void tlan_reset_lists(struct net_device *); static void tlan_free_lists(struct net_device *); @@ -190,7 +191,6 @@ static void tlan_phy_power_up(struct net_device *); static void tlan_phy_reset(struct net_device *); static void tlan_phy_start_link(struct net_device *); static void tlan_phy_finish_auto_neg(struct net_device *); -static void tlan_phy_monitor(unsigned long); /* static int tlan_phy_nop(struct net_device *); @@ -254,11 +254,10 @@ tlan_set_timer(struct net_device *dev, u32 ticks, u32 type) spin_unlock_irqrestore(&priv->lock, flags); return; } - priv->timer.function = tlan_timer; + priv->timer.function = (TIMER_FUNC_TYPE)tlan_timer; if (!in_irq()) spin_unlock_irqrestore(&priv->lock, flags); - priv->timer.data = (unsigned long) dev; priv->timer_set_at = jiffies; priv->timer_type = type; mod_timer(&priv->timer, jiffies + ticks); @@ -926,8 +925,8 @@ static int tlan_open(struct net_device *dev) return err; } - init_timer(&priv->timer); - init_timer(&priv->media_timer); + timer_setup(&priv->timer, NULL, 0); + timer_setup(&priv->media_timer, tlan_phy_monitor, 0); tlan_start(dev); @@ -1426,8 +1425,7 @@ static u32 tlan_handle_tx_eof(struct net_device *dev, u16 host_int) tlan_dio_write8(dev->base_addr, TLAN_LED_REG, TLAN_LED_LINK | TLAN_LED_ACT); if (priv->timer.function == NULL) { - priv->timer.function = tlan_timer; - priv->timer.data = (unsigned long) dev; + priv->timer.function = (TIMER_FUNC_TYPE)tlan_timer; priv->timer.expires = jiffies + TLAN_TIMER_ACT_DELAY; priv->timer_set_at = jiffies; priv->timer_type = TLAN_TIMER_ACTIVITY; @@ -1578,8 +1576,7 @@ drop_and_reuse: tlan_dio_write8(dev->base_addr, TLAN_LED_REG, TLAN_LED_LINK | TLAN_LED_ACT); if (priv->timer.function == NULL) { - priv->timer.function = tlan_timer; - priv->timer.data = (unsigned long) dev; + priv->timer.function = (TIMER_FUNC_TYPE)tlan_timer; priv->timer.expires = jiffies + TLAN_TIMER_ACT_DELAY; priv->timer_set_at = jiffies; priv->timer_type = TLAN_TIMER_ACTIVITY; @@ -1836,10 +1833,10 @@ ThunderLAN driver timer function * **************************************************************/ -static void tlan_timer(unsigned long data) +static void tlan_timer(struct timer_list *t) { - struct net_device *dev = (struct net_device *) data; - struct tlan_priv *priv = netdev_priv(dev); + struct tlan_priv *priv = from_timer(priv, t, timer); + struct net_device *dev = priv->dev; u32 elapsed; unsigned long flags = 0; @@ -1872,7 +1869,6 @@ static void tlan_timer(unsigned long data) tlan_dio_write8(dev->base_addr, TLAN_LED_REG, TLAN_LED_LINK); } else { - priv->timer.function = tlan_timer; priv->timer.expires = priv->timer_set_at + TLAN_TIMER_ACT_DELAY; spin_unlock_irqrestore(&priv->lock, flags); @@ -2317,8 +2313,6 @@ tlan_finish_reset(struct net_device *dev) } else netdev_info(dev, "Link active\n"); /* Enabling link beat monitoring */ - priv->media_timer.function = tlan_phy_monitor; - priv->media_timer.data = (unsigned long) dev; priv->media_timer.expires = jiffies + HZ; add_timer(&priv->media_timer); } @@ -2763,10 +2757,10 @@ static void tlan_phy_finish_auto_neg(struct net_device *dev) * *******************************************************************/ -static void tlan_phy_monitor(unsigned long data) +static void tlan_phy_monitor(struct timer_list *t) { - struct net_device *dev = (struct net_device *) data; - struct tlan_priv *priv = netdev_priv(dev); + struct tlan_priv *priv = from_timer(priv, t, media_timer); + struct net_device *dev = priv->dev; u16 phy; u16 phy_status; -- cgit v1.2.3 From eb8c6b5b4402581a805a5d8a736a7058c4c5abb7 Mon Sep 17 00:00:00 2001 From: Kees Cook Date: Mon, 16 Oct 2017 17:28:57 -0700 Subject: ethernet/broadcom: Convert timers to use timer_setup() In preparation for unconditionally passing the struct timer_list pointer to all timer callbacks, switch to using the new timer_setup() and from_timer() helper to pass the timer pointer explicitly. Cc: Florian Fainelli Cc: bcm-kernel-feedback-list@broadcom.com Cc: "David S. Miller" Cc: Arnd Bergmann Cc: Jarod Wilson Cc: netdev@vger.kernel.org Cc: linux-arm-kernel@lists.infradead.org Signed-off-by: Kees Cook Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bcm63xx_enet.c | 24 +++++++++--------------- 1 file changed, 9 insertions(+), 15 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/broadcom/bcm63xx_enet.c b/drivers/net/ethernet/broadcom/bcm63xx_enet.c index f6bc13fe8a99..d9346e2ac720 100644 --- a/drivers/net/ethernet/broadcom/bcm63xx_enet.c +++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.c @@ -295,16 +295,13 @@ static int bcm_enet_refill_rx(struct net_device *dev) /* * timer callback to defer refill rx queue in case we're OOM */ -static void bcm_enet_refill_rx_timer(unsigned long data) +static void bcm_enet_refill_rx_timer(struct timer_list *t) { - struct net_device *dev; - struct bcm_enet_priv *priv; - - dev = (struct net_device *)data; - priv = netdev_priv(dev); + struct bcm_enet_priv *priv = from_timer(priv, t, rx_timeout); + struct net_device *dev = priv->net_dev; spin_lock(&priv->rx_lock); - bcm_enet_refill_rx((struct net_device *)data); + bcm_enet_refill_rx(dev); spin_unlock(&priv->rx_lock); } @@ -1860,8 +1857,7 @@ static int bcm_enet_probe(struct platform_device *pdev) spin_lock_init(&priv->rx_lock); /* init rx timeout (used for oom) */ - setup_timer(&priv->rx_timeout, bcm_enet_refill_rx_timer, - (unsigned long)dev); + timer_setup(&priv->rx_timeout, bcm_enet_refill_rx_timer, 0); /* init the mib update lock&work */ mutex_init(&priv->mib_update_lock); @@ -2015,9 +2011,9 @@ static inline int bcm_enet_port_is_rgmii(int portid) /* * enet sw PHY polling */ -static void swphy_poll_timer(unsigned long data) +static void swphy_poll_timer(struct timer_list *t) { - struct bcm_enet_priv *priv = (struct bcm_enet_priv *)data; + struct bcm_enet_priv *priv = from_timer(priv, t, swphy_poll); unsigned int i; for (i = 0; i < priv->num_ports; i++) { @@ -2326,7 +2322,7 @@ static int bcm_enetsw_open(struct net_device *dev) } /* start phy polling timer */ - setup_timer(&priv->swphy_poll, swphy_poll_timer, (unsigned long)priv); + timer_setup(&priv->swphy_poll, swphy_poll_timer, 0); mod_timer(&priv->swphy_poll, jiffies); return 0; @@ -2743,9 +2739,7 @@ static int bcm_enetsw_probe(struct platform_device *pdev) spin_lock_init(&priv->rx_lock); /* init rx timeout (used for oom) */ - init_timer(&priv->rx_timeout); - priv->rx_timeout.function = bcm_enet_refill_rx_timer; - priv->rx_timeout.data = (unsigned long)dev; + timer_setup(&priv->rx_timeout, bcm_enet_refill_rx_timer, 0); /* register netdevice */ dev->netdev_ops = &bcm_enetsw_ops; -- cgit v1.2.3 From 41fce7034bf39721a30d7e6f2cb479808d7aea78 Mon Sep 17 00:00:00 2001 From: Kees Cook Date: Mon, 16 Oct 2017 17:28:58 -0700 Subject: net: tulip: de2104x: Convert timers to use timer_setup() In preparation for unconditionally passing the struct timer_list pointer to all timer callbacks, switch to using the new timer_setup() and from_timer() to pass the timer pointer explicitly. Cc: "David S. Miller" Cc: "yuval.shaia@oracle.com" Cc: Tobias Klauser Cc: Jarod Wilson Cc: Philippe Reynes Cc: netdev@vger.kernel.org Cc: linux-parisc@vger.kernel.org Signed-off-by: Kees Cook Reviewed-by: Tobias Klauser Signed-off-by: David S. Miller --- drivers/net/ethernet/dec/tulip/de2104x.c | 21 +++++++++------------ 1 file changed, 9 insertions(+), 12 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/dec/tulip/de2104x.c b/drivers/net/ethernet/dec/tulip/de2104x.c index c87b8cc42963..13430f75496c 100644 --- a/drivers/net/ethernet/dec/tulip/de2104x.c +++ b/drivers/net/ethernet/dec/tulip/de2104x.c @@ -333,8 +333,8 @@ static void de_set_rx_mode (struct net_device *dev); static void de_tx (struct de_private *de); static void de_clean_rings (struct de_private *de); static void de_media_interrupt (struct de_private *de, u32 status); -static void de21040_media_timer (unsigned long data); -static void de21041_media_timer (unsigned long data); +static void de21040_media_timer (struct timer_list *t); +static void de21041_media_timer (struct timer_list *t); static unsigned int de_ok_to_advertise (struct de_private *de, u32 new_media); @@ -959,9 +959,9 @@ static void de_next_media (struct de_private *de, const u32 *media, } } -static void de21040_media_timer (unsigned long data) +static void de21040_media_timer (struct timer_list *t) { - struct de_private *de = (struct de_private *) data; + struct de_private *de = from_timer(de, t, media_timer); struct net_device *dev = de->dev; u32 status = dr32(SIAStatus); unsigned int carrier; @@ -1040,9 +1040,9 @@ static unsigned int de_ok_to_advertise (struct de_private *de, u32 new_media) return 1; } -static void de21041_media_timer (unsigned long data) +static void de21041_media_timer (struct timer_list *t) { - struct de_private *de = (struct de_private *) data; + struct de_private *de = from_timer(de, t, media_timer); struct net_device *dev = de->dev; u32 status = dr32(SIAStatus); unsigned int carrier; @@ -1999,12 +1999,9 @@ static int de_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) de->msg_enable = (debug < 0 ? DE_DEF_MSG_ENABLE : debug); de->board_idx = board_idx; spin_lock_init (&de->lock); - init_timer(&de->media_timer); - if (de->de21040) - de->media_timer.function = de21040_media_timer; - else - de->media_timer.function = de21041_media_timer; - de->media_timer.data = (unsigned long) de; + timer_setup(&de->media_timer, + de->de21040 ? de21040_media_timer : de21041_media_timer, + 0); netif_carrier_off(dev); -- cgit v1.2.3 From abec4be3ee68c8572adb39da68fbfd86e63daa84 Mon Sep 17 00:00:00 2001 From: Kees Cook Date: Mon, 16 Oct 2017 17:29:00 -0700 Subject: net: ethernet: stmmac: Convert timers to use timer_setup() In preparation for unconditionally passing the struct timer_list pointer to all timer callbacks, switch to using the new timer_setup() and from_timer() to pass the timer pointer explicitly. Cc: Giuseppe Cavallaro Cc: Alexandre Torgue Cc: netdev@vger.kernel.org Signed-off-by: Kees Cook Acked-by: Giuseppe Cavallaro Signed-off-by: David S. Miller --- drivers/net/ethernet/stmicro/stmmac/altr_tse_pcs.c | 22 ++++++++++------------ 1 file changed, 10 insertions(+), 12 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/stmicro/stmmac/altr_tse_pcs.c b/drivers/net/ethernet/stmicro/stmmac/altr_tse_pcs.c index 6a9c954492f2..8b50afcdb52d 100644 --- a/drivers/net/ethernet/stmicro/stmmac/altr_tse_pcs.c +++ b/drivers/net/ethernet/stmicro/stmmac/altr_tse_pcs.c @@ -118,10 +118,9 @@ int tse_pcs_init(void __iomem *base, struct tse_pcs *pcs) return ret; } -static void pcs_link_timer_callback(unsigned long data) +static void pcs_link_timer_callback(struct tse_pcs *pcs) { u16 val = 0; - struct tse_pcs *pcs = (struct tse_pcs *)data; void __iomem *tse_pcs_base = pcs->tse_pcs_base; void __iomem *sgmii_adapter_base = pcs->sgmii_adapter_base; @@ -138,12 +137,11 @@ static void pcs_link_timer_callback(unsigned long data) } } -static void auto_nego_timer_callback(unsigned long data) +static void auto_nego_timer_callback(struct tse_pcs *pcs) { u16 val = 0; u16 speed = 0; u16 duplex = 0; - struct tse_pcs *pcs = (struct tse_pcs *)data; void __iomem *tse_pcs_base = pcs->tse_pcs_base; void __iomem *sgmii_adapter_base = pcs->sgmii_adapter_base; @@ -201,14 +199,14 @@ static void auto_nego_timer_callback(unsigned long data) } } -static void aneg_link_timer_callback(unsigned long data) +static void aneg_link_timer_callback(struct timer_list *t) { - struct tse_pcs *pcs = (struct tse_pcs *)data; + struct tse_pcs *pcs = from_timer(pcs, t, aneg_link_timer); if (pcs->autoneg == AUTONEG_ENABLE) - auto_nego_timer_callback(data); + auto_nego_timer_callback(pcs); else if (pcs->autoneg == AUTONEG_DISABLE) - pcs_link_timer_callback(data); + pcs_link_timer_callback(pcs); } void tse_pcs_fix_mac_speed(struct tse_pcs *pcs, struct phy_device *phy_dev, @@ -237,8 +235,8 @@ void tse_pcs_fix_mac_speed(struct tse_pcs *pcs, struct phy_device *phy_dev, tse_pcs_reset(tse_pcs_base, pcs); - setup_timer(&pcs->aneg_link_timer, - aneg_link_timer_callback, (unsigned long)pcs); + timer_setup(&pcs->aneg_link_timer, aneg_link_timer_callback, + 0); mod_timer(&pcs->aneg_link_timer, jiffies + msecs_to_jiffies(AUTONEGO_LINK_TIMER)); } else if (phy_dev->autoneg == AUTONEG_DISABLE) { @@ -270,8 +268,8 @@ void tse_pcs_fix_mac_speed(struct tse_pcs *pcs, struct phy_device *phy_dev, tse_pcs_reset(tse_pcs_base, pcs); - setup_timer(&pcs->aneg_link_timer, - aneg_link_timer_callback, (unsigned long)pcs); + timer_setup(&pcs->aneg_link_timer, aneg_link_timer_callback, + 0); mod_timer(&pcs->aneg_link_timer, jiffies + msecs_to_jiffies(AUTONEGO_LINK_TIMER)); } -- cgit v1.2.3 From 5a3a8962035277210098a6b498680769a8eb36b6 Mon Sep 17 00:00:00 2001 From: Kees Cook Date: Mon, 16 Oct 2017 17:29:02 -0700 Subject: net: vxge: Convert timers to use timer_setup() In preparation for unconditionally passing the struct timer_list pointer to all timer callbacks, switch to using the new timer_setup() and from_timer() to pass the timer pointer explicitly. Cc: Jon Mason Cc: "David S. Miller" Cc: Miroslav Lichvar Cc: Jarod Wilson Cc: Eric Dumazet Cc: stephen hemminger Cc: netdev@vger.kernel.org Signed-off-by: Kees Cook Signed-off-by: David S. Miller --- drivers/net/ethernet/neterion/vxge/vxge-main.c | 12 ++++++------ drivers/net/ethernet/neterion/vxge/vxge-main.h | 8 +++----- 2 files changed, 9 insertions(+), 11 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/neterion/vxge/vxge-main.c b/drivers/net/ethernet/neterion/vxge/vxge-main.c index 50ea69d88480..5d5b9855e24e 100644 --- a/drivers/net/ethernet/neterion/vxge/vxge-main.c +++ b/drivers/net/ethernet/neterion/vxge/vxge-main.c @@ -2597,9 +2597,9 @@ INTA_MODE: return VXGE_HW_OK; } -static void vxge_poll_vp_reset(unsigned long data) +static void vxge_poll_vp_reset(struct timer_list *t) { - struct vxgedev *vdev = (struct vxgedev *)data; + struct vxgedev *vdev = from_timer(vdev, t, vp_reset_timer); int i, j = 0; for (i = 0; i < vdev->no_of_vpath; i++) { @@ -2616,9 +2616,9 @@ static void vxge_poll_vp_reset(unsigned long data) mod_timer(&vdev->vp_reset_timer, jiffies + HZ / 2); } -static void vxge_poll_vp_lockup(unsigned long data) +static void vxge_poll_vp_lockup(struct timer_list *t) { - struct vxgedev *vdev = (struct vxgedev *)data; + struct vxgedev *vdev = from_timer(vdev, t, vp_lockup_timer); enum vxge_hw_status status = VXGE_HW_OK; struct vxge_vpath *vpath; struct vxge_ring *ring; @@ -2858,12 +2858,12 @@ static int vxge_open(struct net_device *dev) vdev->config.rx_pause_enable); if (vdev->vp_reset_timer.function == NULL) - vxge_os_timer(&vdev->vp_reset_timer, vxge_poll_vp_reset, vdev, + vxge_os_timer(&vdev->vp_reset_timer, vxge_poll_vp_reset, HZ / 2); /* There is no need to check for RxD leak and RxD lookup on Titan1A */ if (vdev->titan1 && vdev->vp_lockup_timer.function == NULL) - vxge_os_timer(&vdev->vp_lockup_timer, vxge_poll_vp_lockup, vdev, + vxge_os_timer(&vdev->vp_lockup_timer, vxge_poll_vp_lockup, HZ / 2); set_bit(__VXGE_STATE_CARD_UP, &vdev->state); diff --git a/drivers/net/ethernet/neterion/vxge/vxge-main.h b/drivers/net/ethernet/neterion/vxge/vxge-main.h index 3a79d93b8445..59a57ff5e96a 100644 --- a/drivers/net/ethernet/neterion/vxge/vxge-main.h +++ b/drivers/net/ethernet/neterion/vxge/vxge-main.h @@ -417,12 +417,10 @@ struct vxge_tx_priv { module_param(p, int, 0) static inline -void vxge_os_timer(struct timer_list *timer, void (*func)(unsigned long data), - struct vxgedev *vdev, unsigned long timeout) +void vxge_os_timer(struct timer_list *timer, void (*func)(struct timer_list *), + unsigned long timeout) { - init_timer(timer); - timer->function = func; - timer->data = (unsigned long)vdev; + timer_setup(timer, func, 0); mod_timer(timer, jiffies + timeout); } -- cgit v1.2.3 From a8c22a2bbc67d001479696c5696ae11c84116701 Mon Sep 17 00:00:00 2001 From: Kees Cook Date: Mon, 16 Oct 2017 17:29:05 -0700 Subject: net: tulip: Convert timers to use timer_setup() In preparation for unconditionally passing the struct timer_list pointer to all timer callbacks, switch to using the new timer_setup() and from_timer() to pass the timer pointer explicitly. Cc: "David S. Miller" Cc: David Howells Cc: Jarod Wilson Cc: Stephen Hemminger Cc: Johannes Berg Cc: Eric Dumazet Cc: Philippe Reynes Cc: "yuval.shaia@oracle.com" Cc: netdev@vger.kernel.org Cc: linux-parisc@vger.kernel.org Signed-off-by: Kees Cook Signed-off-by: David S. Miller --- drivers/net/ethernet/dec/tulip/de4x5.c | 12 ++++++------ drivers/net/ethernet/dec/tulip/dmfe.c | 10 +++++----- drivers/net/ethernet/dec/tulip/interrupt.c | 6 +++--- drivers/net/ethernet/dec/tulip/pnic.c | 6 +++--- drivers/net/ethernet/dec/tulip/pnic2.c | 6 +++--- drivers/net/ethernet/dec/tulip/timer.c | 12 ++++++------ drivers/net/ethernet/dec/tulip/tulip.h | 12 ++++++------ drivers/net/ethernet/dec/tulip/tulip_core.c | 14 ++++++-------- drivers/net/ethernet/dec/tulip/uli526x.c | 10 +++++----- drivers/net/ethernet/dec/tulip/winbond-840.c | 10 +++++----- 10 files changed, 48 insertions(+), 50 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/dec/tulip/de4x5.c b/drivers/net/ethernet/dec/tulip/de4x5.c index 299812e92db7..a31b4df3e7ff 100644 --- a/drivers/net/ethernet/dec/tulip/de4x5.c +++ b/drivers/net/ethernet/dec/tulip/de4x5.c @@ -912,7 +912,7 @@ static int de4x5_init(struct net_device *dev); static int de4x5_sw_reset(struct net_device *dev); static int de4x5_rx(struct net_device *dev); static int de4x5_tx(struct net_device *dev); -static void de4x5_ast(struct net_device *dev); +static void de4x5_ast(struct timer_list *t); static int de4x5_txur(struct net_device *dev); static int de4x5_rx_ovfc(struct net_device *dev); @@ -1147,8 +1147,7 @@ de4x5_hw_init(struct net_device *dev, u_long iobase, struct device *gendev) lp->timeout = -1; lp->gendev = gendev; spin_lock_init(&lp->lock); - setup_timer(&lp->timer, (void (*)(unsigned long))de4x5_ast, - (unsigned long)dev); + timer_setup(&lp->timer, de4x5_ast, 0); de4x5_parse_params(dev); /* @@ -1741,9 +1740,10 @@ de4x5_tx(struct net_device *dev) } static void -de4x5_ast(struct net_device *dev) +de4x5_ast(struct timer_list *t) { - struct de4x5_private *lp = netdev_priv(dev); + struct de4x5_private *lp = from_timer(lp, t, timer); + struct net_device *dev = dev_get_drvdata(lp->gendev); int next_tick = DE4X5_AUTOSENSE_MS; int dt; @@ -2369,7 +2369,7 @@ autoconf_media(struct net_device *dev) lp->media = INIT; lp->tcount = 0; - de4x5_ast(dev); + de4x5_ast(&lp->timer); return lp->media; } diff --git a/drivers/net/ethernet/dec/tulip/dmfe.c b/drivers/net/ethernet/dec/tulip/dmfe.c index 6585f737d08b..17ef7a28873d 100644 --- a/drivers/net/ethernet/dec/tulip/dmfe.c +++ b/drivers/net/ethernet/dec/tulip/dmfe.c @@ -331,7 +331,7 @@ static void dmfe_phy_write_1bit(void __iomem *, u32); static u16 dmfe_phy_read_1bit(void __iomem *); static u8 dmfe_sense_speed(struct dmfe_board_info *); static void dmfe_process_mode(struct dmfe_board_info *); -static void dmfe_timer(unsigned long); +static void dmfe_timer(struct timer_list *); static inline u32 cal_CRC(unsigned char *, unsigned int, u8); static void dmfe_rx_packet(struct net_device *, struct dmfe_board_info *); static void dmfe_free_tx_pkt(struct net_device *, struct dmfe_board_info *); @@ -596,7 +596,7 @@ static int dmfe_open(struct net_device *dev) netif_wake_queue(dev); /* set and active a timer process */ - setup_timer(&db->timer, dmfe_timer, (unsigned long)dev); + timer_setup(&db->timer, dmfe_timer, 0); db->timer.expires = DMFE_TIMER_WUT + HZ * 2; add_timer(&db->timer); @@ -1128,10 +1128,10 @@ static const struct ethtool_ops netdev_ethtool_ops = { * Dynamic media sense, allocate Rx buffer... */ -static void dmfe_timer(unsigned long data) +static void dmfe_timer(struct timer_list *t) { - struct net_device *dev = (struct net_device *)data; - struct dmfe_board_info *db = netdev_priv(dev); + struct dmfe_board_info *db = from_timer(db, t, timer); + struct net_device *dev = pci_get_drvdata(db->pdev); void __iomem *ioaddr = db->ioaddr; u32 tmp_cr8; unsigned char tmp_cr12; diff --git a/drivers/net/ethernet/dec/tulip/interrupt.c b/drivers/net/ethernet/dec/tulip/interrupt.c index 8df80880ecaa..c1ca0765d56d 100644 --- a/drivers/net/ethernet/dec/tulip/interrupt.c +++ b/drivers/net/ethernet/dec/tulip/interrupt.c @@ -102,10 +102,10 @@ int tulip_refill_rx(struct net_device *dev) #ifdef CONFIG_TULIP_NAPI -void oom_timer(unsigned long data) +void oom_timer(struct timer_list *t) { - struct net_device *dev = (struct net_device *)data; - struct tulip_private *tp = netdev_priv(dev); + struct tulip_private *tp = from_timer(tp, t, oom_timer); + napi_schedule(&tp->napi); } diff --git a/drivers/net/ethernet/dec/tulip/pnic.c b/drivers/net/ethernet/dec/tulip/pnic.c index 7bcccf5cac7a..3fb39e32e1b4 100644 --- a/drivers/net/ethernet/dec/tulip/pnic.c +++ b/drivers/net/ethernet/dec/tulip/pnic.c @@ -84,10 +84,10 @@ void pnic_lnk_change(struct net_device *dev, int csr5) } } -void pnic_timer(unsigned long data) +void pnic_timer(struct timer_list *t) { - struct net_device *dev = (struct net_device *)data; - struct tulip_private *tp = netdev_priv(dev); + struct tulip_private *tp = from_timer(tp, t, timer); + struct net_device *dev = tp->dev; void __iomem *ioaddr = tp->base_addr; int next_tick = 60*HZ; diff --git a/drivers/net/ethernet/dec/tulip/pnic2.c b/drivers/net/ethernet/dec/tulip/pnic2.c index 5895fc43f6e0..412adaa7fdf8 100644 --- a/drivers/net/ethernet/dec/tulip/pnic2.c +++ b/drivers/net/ethernet/dec/tulip/pnic2.c @@ -76,10 +76,10 @@ #include -void pnic2_timer(unsigned long data) +void pnic2_timer(struct timer_list *t) { - struct net_device *dev = (struct net_device *)data; - struct tulip_private *tp = netdev_priv(dev); + struct tulip_private *tp = from_timer(tp, t, timer); + struct net_device *dev = tp->dev; void __iomem *ioaddr = tp->base_addr; int next_tick = 60*HZ; diff --git a/drivers/net/ethernet/dec/tulip/timer.c b/drivers/net/ethernet/dec/tulip/timer.c index 523d9dde50a2..642e9dfc5451 100644 --- a/drivers/net/ethernet/dec/tulip/timer.c +++ b/drivers/net/ethernet/dec/tulip/timer.c @@ -137,10 +137,10 @@ void tulip_media_task(struct work_struct *work) } -void mxic_timer(unsigned long data) +void mxic_timer(struct timer_list *t) { - struct net_device *dev = (struct net_device *)data; - struct tulip_private *tp = netdev_priv(dev); + struct tulip_private *tp = from_timer(tp, t, timer); + struct net_device *dev = tp->dev; void __iomem *ioaddr = tp->base_addr; int next_tick = 60*HZ; @@ -154,10 +154,10 @@ void mxic_timer(unsigned long data) } -void comet_timer(unsigned long data) +void comet_timer(struct timer_list *t) { - struct net_device *dev = (struct net_device *)data; - struct tulip_private *tp = netdev_priv(dev); + struct tulip_private *tp = from_timer(tp, t, timer); + struct net_device *dev = tp->dev; int next_tick = 2*HZ; if (tulip_debug > 1) diff --git a/drivers/net/ethernet/dec/tulip/tulip.h b/drivers/net/ethernet/dec/tulip/tulip.h index 06660dbc44b7..b458140aeaef 100644 --- a/drivers/net/ethernet/dec/tulip/tulip.h +++ b/drivers/net/ethernet/dec/tulip/tulip.h @@ -43,7 +43,7 @@ struct tulip_chip_table { int io_size; int valid_intrs; /* CSR7 interrupt enable settings */ int flags; - void (*media_timer) (unsigned long); + void (*media_timer) (struct timer_list *); work_func_t media_task; }; @@ -476,7 +476,7 @@ void t21142_lnk_change(struct net_device *dev, int csr5); /* PNIC2.c */ void pnic2_lnk_change(struct net_device *dev, int csr5); -void pnic2_timer(unsigned long data); +void pnic2_timer(struct timer_list *t); void pnic2_start_nway(struct net_device *dev); void pnic2_lnk_change(struct net_device *dev, int csr5); @@ -504,19 +504,19 @@ void tulip_find_mii (struct net_device *dev, int board_idx); /* pnic.c */ void pnic_do_nway(struct net_device *dev); void pnic_lnk_change(struct net_device *dev, int csr5); -void pnic_timer(unsigned long data); +void pnic_timer(struct timer_list *t); /* timer.c */ void tulip_media_task(struct work_struct *work); -void mxic_timer(unsigned long data); -void comet_timer(unsigned long data); +void mxic_timer(struct timer_list *t); +void comet_timer(struct timer_list *t); /* tulip_core.c */ extern int tulip_debug; extern const char * const medianame[]; extern const char tulip_media_cap[]; extern const struct tulip_chip_table tulip_tbl[]; -void oom_timer(unsigned long data); +void oom_timer(struct timer_list *t); extern u8 t21040_csr13[]; static inline void tulip_start_rxtx(struct tulip_private *tp) diff --git a/drivers/net/ethernet/dec/tulip/tulip_core.c b/drivers/net/ethernet/dec/tulip/tulip_core.c index 851b6d1f5a42..00d02a0967d0 100644 --- a/drivers/net/ethernet/dec/tulip/tulip_core.c +++ b/drivers/net/ethernet/dec/tulip/tulip_core.c @@ -123,10 +123,10 @@ int tulip_debug = TULIP_DEBUG; int tulip_debug = 1; #endif -static void tulip_timer(unsigned long data) +static void tulip_timer(struct timer_list *t) { - struct net_device *dev = (struct net_device *)data; - struct tulip_private *tp = netdev_priv(dev); + struct tulip_private *tp = from_timer(tp, t, timer); + struct net_device *dev = tp->dev; if (netif_running(dev)) schedule_work(&tp->media_work); @@ -505,7 +505,7 @@ media_picked: tp->timer.expires = RUN_AT(next_tick); add_timer(&tp->timer); #ifdef CONFIG_TULIP_NAPI - setup_timer(&tp->oom_timer, oom_timer, (unsigned long)dev); + timer_setup(&tp->oom_timer, oom_timer, 0); #endif } @@ -780,8 +780,7 @@ static void tulip_down (struct net_device *dev) spin_unlock_irqrestore (&tp->lock, flags); - setup_timer(&tp->timer, tulip_tbl[tp->chip_id].media_timer, - (unsigned long)dev); + timer_setup(&tp->timer, tulip_tbl[tp->chip_id].media_timer, 0); dev->if_port = tp->saved_if_port; @@ -1470,8 +1469,7 @@ static int tulip_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) tp->csr0 = csr0; spin_lock_init(&tp->lock); spin_lock_init(&tp->mii_lock); - setup_timer(&tp->timer, tulip_tbl[tp->chip_id].media_timer, - (unsigned long)dev); + timer_setup(&tp->timer, tulip_tbl[tp->chip_id].media_timer, 0); INIT_WORK(&tp->media_work, tulip_tbl[tp->chip_id].media_task); diff --git a/drivers/net/ethernet/dec/tulip/uli526x.c b/drivers/net/ethernet/dec/tulip/uli526x.c index 5fbbc0caba99..488a744084c9 100644 --- a/drivers/net/ethernet/dec/tulip/uli526x.c +++ b/drivers/net/ethernet/dec/tulip/uli526x.c @@ -241,7 +241,7 @@ static void phy_write_1bit(struct uli526x_board_info *db, u32); static u16 phy_read_1bit(struct uli526x_board_info *db); static u8 uli526x_sense_speed(struct uli526x_board_info *); static void uli526x_process_mode(struct uli526x_board_info *); -static void uli526x_timer(unsigned long); +static void uli526x_timer(struct timer_list *t); static void uli526x_rx_packet(struct net_device *, struct uli526x_board_info *); static void uli526x_free_tx_pkt(struct net_device *, struct uli526x_board_info *); static void uli526x_reuse_skb(struct uli526x_board_info *, struct sk_buff *); @@ -491,7 +491,7 @@ static int uli526x_open(struct net_device *dev) netif_wake_queue(dev); /* set and active a timer process */ - setup_timer(&db->timer, uli526x_timer, (unsigned long)dev); + timer_setup(&db->timer, uli526x_timer, 0); db->timer.expires = ULI526X_TIMER_WUT + HZ * 2; add_timer(&db->timer); @@ -1021,10 +1021,10 @@ static const struct ethtool_ops netdev_ethtool_ops = { * Dynamic media sense, allocate Rx buffer... */ -static void uli526x_timer(unsigned long data) +static void uli526x_timer(struct timer_list *t) { - struct net_device *dev = (struct net_device *) data; - struct uli526x_board_info *db = netdev_priv(dev); + struct uli526x_board_info *db = from_timer(db, t, timer); + struct net_device *dev = pci_get_drvdata(db->pdev); struct uli_phy_ops *phy = &db->phy; void __iomem *ioaddr = db->ioaddr; unsigned long flags; diff --git a/drivers/net/ethernet/dec/tulip/winbond-840.c b/drivers/net/ethernet/dec/tulip/winbond-840.c index 6f88d687b6d2..70cb2d689c2c 100644 --- a/drivers/net/ethernet/dec/tulip/winbond-840.c +++ b/drivers/net/ethernet/dec/tulip/winbond-840.c @@ -327,7 +327,7 @@ static int mdio_read(struct net_device *dev, int phy_id, int location); static void mdio_write(struct net_device *dev, int phy_id, int location, int value); static int netdev_open(struct net_device *dev); static int update_link(struct net_device *dev); -static void netdev_timer(unsigned long data); +static void netdev_timer(struct timer_list *t); static void init_rxtx_rings(struct net_device *dev); static void free_rxtx_rings(struct netdev_private *np); static void init_registers(struct net_device *dev); @@ -655,7 +655,7 @@ static int netdev_open(struct net_device *dev) netdev_dbg(dev, "Done netdev_open()\n"); /* Set the timer to check for link beat. */ - setup_timer(&np->timer, netdev_timer, (unsigned long)dev); + timer_setup(&np->timer, netdev_timer, 0); np->timer.expires = jiffies + 1*HZ; add_timer(&np->timer); return 0; @@ -772,10 +772,10 @@ static inline void update_csr6(struct net_device *dev, int new) np->mii_if.full_duplex = 1; } -static void netdev_timer(unsigned long data) +static void netdev_timer(struct timer_list *t) { - struct net_device *dev = (struct net_device *)data; - struct netdev_private *np = netdev_priv(dev); + struct netdev_private *np = from_timer(np, t, timer); + struct net_device *dev = pci_get_drvdata(np->pci_dev); void __iomem *ioaddr = np->base_addr; if (debug > 2) -- cgit v1.2.3 From 7974c0f3622b21825160a37d28b7b1f35ee4cbac Mon Sep 17 00:00:00 2001 From: Kees Cook Date: Mon, 16 Oct 2017 17:29:07 -0700 Subject: drivers/net/3com: Convert timers to use timer_setup() In preparation for unconditionally passing the struct timer_list pointer to all timer callbacks, switch to using the new timer_setup() and from_timer() to pass the timer pointer explicitly. Cc: Steffen Klassert Cc: "David S. Miller" Cc: Jarod Wilson Cc: netdev@vger.kernel.org Signed-off-by: Kees Cook Signed-off-by: David S. Miller --- drivers/net/ethernet/3com/3c574_cs.c | 12 +++++------- drivers/net/ethernet/3com/3c589_cs.c | 10 +++++----- drivers/net/ethernet/3com/3c59x.c | 20 ++++++++++---------- 3 files changed, 20 insertions(+), 22 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/3com/3c574_cs.c b/drivers/net/ethernet/3com/3c574_cs.c index 47c844cc9d27..48bc7fa0258c 100644 --- a/drivers/net/ethernet/3com/3c574_cs.c +++ b/drivers/net/ethernet/3com/3c574_cs.c @@ -225,7 +225,7 @@ static unsigned short read_eeprom(unsigned int ioaddr, int index); static void tc574_wait_for_completion(struct net_device *dev, int cmd); static void tc574_reset(struct net_device *dev); -static void media_check(unsigned long arg); +static void media_check(struct timer_list *t); static int el3_open(struct net_device *dev); static netdev_tx_t el3_start_xmit(struct sk_buff *skb, struct net_device *dev); @@ -377,7 +377,7 @@ static int tc574_config(struct pcmcia_device *link) lp->autoselect = config & Autoselect ? 1 : 0; } - init_timer(&lp->media); + timer_setup(&lp->media, media_check, 0); { int phy; @@ -681,8 +681,6 @@ static int el3_open(struct net_device *dev) netif_start_queue(dev); tc574_reset(dev); - lp->media.function = media_check; - lp->media.data = (unsigned long) dev; lp->media.expires = jiffies + HZ; add_timer(&lp->media); @@ -859,10 +857,10 @@ static irqreturn_t el3_interrupt(int irq, void *dev_id) (and as a last resort, poll the NIC for events), and to monitor the MII, reporting changes in cable status. */ -static void media_check(unsigned long arg) +static void media_check(struct timer_list *t) { - struct net_device *dev = (struct net_device *) arg; - struct el3_private *lp = netdev_priv(dev); + struct el3_private *lp = from_timer(lp, t, media); + struct net_device *dev = lp->p_dev->priv; unsigned int ioaddr = dev->base_addr; unsigned long flags; unsigned short /* cable, */ media, partner; diff --git a/drivers/net/ethernet/3com/3c589_cs.c b/drivers/net/ethernet/3com/3c589_cs.c index e28254a00599..2b2695311bda 100644 --- a/drivers/net/ethernet/3com/3c589_cs.c +++ b/drivers/net/ethernet/3com/3c589_cs.c @@ -163,7 +163,7 @@ static void tc589_release(struct pcmcia_device *link); static u16 read_eeprom(unsigned int ioaddr, int index); static void tc589_reset(struct net_device *dev); -static void media_check(unsigned long arg); +static void media_check(struct timer_list *t); static int el3_config(struct net_device *dev, struct ifmap *map); static int el3_open(struct net_device *dev); static netdev_tx_t el3_start_xmit(struct sk_buff *skb, @@ -517,7 +517,7 @@ static int el3_open(struct net_device *dev) netif_start_queue(dev); tc589_reset(dev); - setup_timer(&lp->media, media_check, (unsigned long)dev); + timer_setup(&lp->media, media_check, 0); mod_timer(&lp->media, jiffies + HZ); dev_dbg(&link->dev, "%s: opened, status %4.4x.\n", @@ -676,10 +676,10 @@ static irqreturn_t el3_interrupt(int irq, void *dev_id) return IRQ_RETVAL(handled); } -static void media_check(unsigned long arg) +static void media_check(struct timer_list *t) { - struct net_device *dev = (struct net_device *)(arg); - struct el3_private *lp = netdev_priv(dev); + struct el3_private *lp = from_timer(lp, t, media); + struct net_device *dev = lp->p_dev->priv; unsigned int ioaddr = dev->base_addr; u16 media, errs; unsigned long flags; diff --git a/drivers/net/ethernet/3com/3c59x.c b/drivers/net/ethernet/3com/3c59x.c index 402d9090ad29..f4e13a7014bd 100644 --- a/drivers/net/ethernet/3com/3c59x.c +++ b/drivers/net/ethernet/3com/3c59x.c @@ -759,8 +759,8 @@ static int vortex_open(struct net_device *dev); static void mdio_sync(struct vortex_private *vp, int bits); static int mdio_read(struct net_device *dev, int phy_id, int location); static void mdio_write(struct net_device *vp, int phy_id, int location, int value); -static void vortex_timer(unsigned long arg); -static void rx_oom_timer(unsigned long arg); +static void vortex_timer(struct timer_list *t); +static void rx_oom_timer(struct timer_list *t); static netdev_tx_t vortex_start_xmit(struct sk_buff *skb, struct net_device *dev); static netdev_tx_t boomerang_start_xmit(struct sk_buff *skb, @@ -1599,9 +1599,9 @@ vortex_up(struct net_device *dev) dev->name, media_tbl[dev->if_port].name); } - setup_timer(&vp->timer, vortex_timer, (unsigned long)dev); + timer_setup(&vp->timer, vortex_timer, 0); mod_timer(&vp->timer, RUN_AT(media_tbl[dev->if_port].wait)); - setup_timer(&vp->rx_oom_timer, rx_oom_timer, (unsigned long)dev); + timer_setup(&vp->rx_oom_timer, rx_oom_timer, 0); if (vortex_debug > 1) pr_debug("%s: Initial media type %s.\n", @@ -1784,10 +1784,10 @@ out: } static void -vortex_timer(unsigned long data) +vortex_timer(struct timer_list *t) { - struct net_device *dev = (struct net_device *)data; - struct vortex_private *vp = netdev_priv(dev); + struct vortex_private *vp = from_timer(vp, t, timer); + struct net_device *dev = vp->mii.dev; void __iomem *ioaddr = vp->ioaddr; int next_tick = 60*HZ; int ok = 0; @@ -2687,10 +2687,10 @@ boomerang_rx(struct net_device *dev) * for some memory. Otherwise there is no way to restart the rx process. */ static void -rx_oom_timer(unsigned long arg) +rx_oom_timer(struct timer_list *t) { - struct net_device *dev = (struct net_device *)arg; - struct vortex_private *vp = netdev_priv(dev); + struct vortex_private *vp = from_timer(vp, t, rx_oom_timer); + struct net_device *dev = vp->mii.dev; spin_lock_irq(&vp->lock); if ((vp->cur_rx - vp->dirty_rx) == RX_RING_SIZE) /* This test is redundant, but makes me feel good */ -- cgit v1.2.3 From cacd2b3fb9815ec29b778342a8aaa80edc0f98d3 Mon Sep 17 00:00:00 2001 From: Kees Cook Date: Mon, 16 Oct 2017 17:29:08 -0700 Subject: chelsio: Convert timers to use timer_setup() In preparation for unconditionally passing the struct timer_list pointer to all timer callbacks, switch to using the new timer_setup() and from_timer() to pass the timer pointer explicitly. Cc: "David S. Miller" Cc: Johannes Berg Cc: Eric Dumazet Cc: netdev@vger.kernel.org Signed-off-by: Kees Cook Signed-off-by: David S. Miller --- drivers/net/ethernet/chelsio/cxgb/sge.c | 29 +++++++++++++---------------- 1 file changed, 13 insertions(+), 16 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/chelsio/cxgb/sge.c b/drivers/net/ethernet/chelsio/cxgb/sge.c index 75e439918700..30de26ef3da4 100644 --- a/drivers/net/ethernet/chelsio/cxgb/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb/sge.c @@ -1882,10 +1882,10 @@ send: /* * Callback for the Tx buffer reclaim timer. Runs with softirqs disabled. */ -static void sge_tx_reclaim_cb(unsigned long data) +static void sge_tx_reclaim_cb(struct timer_list *t) { int i; - struct sge *sge = (struct sge *)data; + struct sge *sge = from_timer(sge, t, tx_reclaim_timer); for (i = 0; i < SGE_CMDQ_N; ++i) { struct cmdQ *q = &sge->cmdQ[i]; @@ -1978,10 +1978,10 @@ void t1_sge_start(struct sge *sge) /* * Callback for the T2 ESPI 'stuck packet feature' workaorund */ -static void espibug_workaround_t204(unsigned long data) +static void espibug_workaround_t204(struct timer_list *t) { - struct adapter *adapter = (struct adapter *)data; - struct sge *sge = adapter->sge; + struct sge *sge = from_timer(sge, t, espibug_timer); + struct adapter *adapter = sge->adapter; unsigned int nports = adapter->params.nports; u32 seop[MAX_NPORTS]; @@ -2021,10 +2021,10 @@ static void espibug_workaround_t204(unsigned long data) mod_timer(&sge->espibug_timer, jiffies + sge->espibug_timeout); } -static void espibug_workaround(unsigned long data) +static void espibug_workaround(struct timer_list *t) { - struct adapter *adapter = (struct adapter *)data; - struct sge *sge = adapter->sge; + struct sge *sge = from_timer(sge, t, espibug_timer); + struct adapter *adapter = sge->adapter; if (netif_running(adapter->port[0].dev)) { struct sk_buff *skb = sge->espibug_skb[0]; @@ -2075,18 +2075,15 @@ struct sge *t1_sge_create(struct adapter *adapter, struct sge_params *p) goto nomem_port; } - setup_timer(&sge->tx_reclaim_timer, sge_tx_reclaim_cb, - (unsigned long)sge); + timer_setup(&sge->tx_reclaim_timer, sge_tx_reclaim_cb, 0); if (is_T2(sge->adapter)) { - init_timer(&sge->espibug_timer); + timer_setup(&sge->espibug_timer, + adapter->params.nports > 1 ? espibug_workaround_t204 : espibug_workaround, + 0); - if (adapter->params.nports > 1) { + if (adapter->params.nports > 1) tx_sched_init(sge); - sge->espibug_timer.function = espibug_workaround_t204; - } else - sge->espibug_timer.function = espibug_workaround; - sge->espibug_timer.data = (unsigned long)sge->adapter; sge->espibug_timeout = 1; /* for T204, every 10ms */ -- cgit v1.2.3 From 495ad9864f17bb2f4e085f9398328d14bfdb4d88 Mon Sep 17 00:00:00 2001 From: Kees Cook Date: Mon, 16 Oct 2017 17:29:09 -0700 Subject: net: amd8111e: Convert timers to use timer_setup() In preparation for unconditionally passing the struct timer_list pointer to all timer callbacks, switch to using the new timer_setup() and from_timer() to pass the timer pointer explicitly. Cc: "David S. Miller" Cc: Eric Dumazet Cc: Jarod Wilson Cc: Philippe Reynes Cc: netdev@vger.kernel.org Signed-off-by: Kees Cook Signed-off-by: David S. Miller --- drivers/net/ethernet/amd/amd8111e.c | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/amd/amd8111e.c b/drivers/net/ethernet/amd/amd8111e.c index 7f22af6e37e0..358f7ab77c70 100644 --- a/drivers/net/ethernet/amd/amd8111e.c +++ b/drivers/net/ethernet/amd/amd8111e.c @@ -1669,9 +1669,9 @@ static int amd8111e_resume(struct pci_dev *pci_dev) return 0; } -static void amd8111e_config_ipg(struct net_device *dev) +static void amd8111e_config_ipg(struct timer_list *t) { - struct amd8111e_priv *lp = netdev_priv(dev); + struct amd8111e_priv *lp = from_timer(lp, t, ipg_data.ipg_timer); struct ipg_info *ipg_data = &lp->ipg_data; void __iomem *mmio = lp->mmio; unsigned int prev_col_cnt = ipg_data->col_cnt; @@ -1883,8 +1883,7 @@ static int amd8111e_probe_one(struct pci_dev *pdev, /* Initialize software ipg timer */ if(lp->options & OPTION_DYN_IPG_ENABLE){ - setup_timer(&lp->ipg_data.ipg_timer, - (void *)&amd8111e_config_ipg, (unsigned long)dev); + timer_setup(&lp->ipg_data.ipg_timer, amd8111e_config_ipg, 0); lp->ipg_data.ipg_timer.expires = jiffies + IPG_CONVERGE_JIFFIES; lp->ipg_data.ipg = DEFAULT_IPG; -- cgit v1.2.3 From b09064b78f396ed0840895b11e8bd90e8380afd5 Mon Sep 17 00:00:00 2001 From: Kees Cook Date: Mon, 16 Oct 2017 17:29:10 -0700 Subject: bna: Convert timers to use timer_setup() In preparation for unconditionally passing the struct timer_list pointer to all timer callbacks, switch to using the new timer_setup() and from_timer() to pass the timer pointer explicitly. Cc: Rasesh Mody Cc: Sudarsana Kalluru Cc: Dept-GELinuxNICDev@cavium.com Cc: netdev@vger.kernel.org Signed-off-by: Kees Cook Signed-off-by: David S. Miller --- drivers/net/ethernet/brocade/bna/bnad.c | 43 +++++++++++++++------------------ 1 file changed, 19 insertions(+), 24 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c index 6e13c937d715..a843076597ec 100644 --- a/drivers/net/ethernet/brocade/bna/bnad.c +++ b/drivers/net/ethernet/brocade/bna/bnad.c @@ -1693,9 +1693,9 @@ err_return: /* Timer callbacks */ /* a) IOC timer */ static void -bnad_ioc_timeout(unsigned long data) +bnad_ioc_timeout(struct timer_list *t) { - struct bnad *bnad = (struct bnad *)data; + struct bnad *bnad = from_timer(bnad, t, bna.ioceth.ioc.ioc_timer); unsigned long flags; spin_lock_irqsave(&bnad->bna_lock, flags); @@ -1704,9 +1704,9 @@ bnad_ioc_timeout(unsigned long data) } static void -bnad_ioc_hb_check(unsigned long data) +bnad_ioc_hb_check(struct timer_list *t) { - struct bnad *bnad = (struct bnad *)data; + struct bnad *bnad = from_timer(bnad, t, bna.ioceth.ioc.hb_timer); unsigned long flags; spin_lock_irqsave(&bnad->bna_lock, flags); @@ -1715,9 +1715,9 @@ bnad_ioc_hb_check(unsigned long data) } static void -bnad_iocpf_timeout(unsigned long data) +bnad_iocpf_timeout(struct timer_list *t) { - struct bnad *bnad = (struct bnad *)data; + struct bnad *bnad = from_timer(bnad, t, bna.ioceth.ioc.iocpf_timer); unsigned long flags; spin_lock_irqsave(&bnad->bna_lock, flags); @@ -1726,9 +1726,9 @@ bnad_iocpf_timeout(unsigned long data) } static void -bnad_iocpf_sem_timeout(unsigned long data) +bnad_iocpf_sem_timeout(struct timer_list *t) { - struct bnad *bnad = (struct bnad *)data; + struct bnad *bnad = from_timer(bnad, t, bna.ioceth.ioc.sem_timer); unsigned long flags; spin_lock_irqsave(&bnad->bna_lock, flags); @@ -1748,9 +1748,9 @@ bnad_iocpf_sem_timeout(unsigned long data) /* b) Dynamic Interrupt Moderation Timer */ static void -bnad_dim_timeout(unsigned long data) +bnad_dim_timeout(struct timer_list *t) { - struct bnad *bnad = (struct bnad *)data; + struct bnad *bnad = from_timer(bnad, t, dim_timer); struct bnad_rx_info *rx_info; struct bnad_rx_ctrl *rx_ctrl; int i, j; @@ -1781,9 +1781,9 @@ bnad_dim_timeout(unsigned long data) /* c) Statistics Timer */ static void -bnad_stats_timeout(unsigned long data) +bnad_stats_timeout(struct timer_list *t) { - struct bnad *bnad = (struct bnad *)data; + struct bnad *bnad = from_timer(bnad, t, stats_timer); unsigned long flags; if (!netif_running(bnad->netdev) || @@ -1804,8 +1804,7 @@ bnad_dim_timer_start(struct bnad *bnad) { if (bnad->cfg_flags & BNAD_CF_DIM_ENABLED && !test_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags)) { - setup_timer(&bnad->dim_timer, bnad_dim_timeout, - (unsigned long)bnad); + timer_setup(&bnad->dim_timer, bnad_dim_timeout, 0); set_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags); mod_timer(&bnad->dim_timer, jiffies + msecs_to_jiffies(BNAD_DIM_TIMER_FREQ)); @@ -1823,8 +1822,7 @@ bnad_stats_timer_start(struct bnad *bnad) spin_lock_irqsave(&bnad->bna_lock, flags); if (!test_and_set_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags)) { - setup_timer(&bnad->stats_timer, bnad_stats_timeout, - (unsigned long)bnad); + timer_setup(&bnad->stats_timer, bnad_stats_timeout, 0); mod_timer(&bnad->stats_timer, jiffies + msecs_to_jiffies(BNAD_STATS_TIMER_FREQ)); } @@ -3692,14 +3690,11 @@ bnad_pci_probe(struct pci_dev *pdev, goto res_free; /* Set up timers */ - setup_timer(&bnad->bna.ioceth.ioc.ioc_timer, bnad_ioc_timeout, - (unsigned long)bnad); - setup_timer(&bnad->bna.ioceth.ioc.hb_timer, bnad_ioc_hb_check, - (unsigned long)bnad); - setup_timer(&bnad->bna.ioceth.ioc.iocpf_timer, bnad_iocpf_timeout, - (unsigned long)bnad); - setup_timer(&bnad->bna.ioceth.ioc.sem_timer, bnad_iocpf_sem_timeout, - (unsigned long)bnad); + timer_setup(&bnad->bna.ioceth.ioc.ioc_timer, bnad_ioc_timeout, 0); + timer_setup(&bnad->bna.ioceth.ioc.hb_timer, bnad_ioc_hb_check, 0); + timer_setup(&bnad->bna.ioceth.ioc.iocpf_timer, bnad_iocpf_timeout, 0); + timer_setup(&bnad->bna.ioceth.ioc.sem_timer, bnad_iocpf_sem_timeout, + 0); /* * Start the chip -- cgit v1.2.3 From 6fa35bd0e9e4e0d1ba3b6a6c7968b8c926317507 Mon Sep 17 00:00:00 2001 From: Kees Cook Date: Mon, 16 Oct 2017 17:29:11 -0700 Subject: net: dl2k: Convert timers to use timer_setup() In preparation for unconditionally passing the struct timer_list pointer to all timer callbacks, switch to using the new timer_setup() and from_timer() to pass the timer pointer explicitly. Cc: "David S. Miller" Cc: Jarod Wilson Cc: Tobias Klauser Cc: Philippe Reynes Cc: netdev@vger.kernel.org Signed-off-by: Kees Cook Reviewed-by: Tobias Klauser Signed-off-by: David S. Miller --- drivers/net/ethernet/dlink/dl2k.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/dlink/dl2k.c b/drivers/net/ethernet/dlink/dl2k.c index 778f974e2928..a2f6758d38dd 100644 --- a/drivers/net/ethernet/dlink/dl2k.c +++ b/drivers/net/ethernet/dlink/dl2k.c @@ -68,7 +68,7 @@ static const int max_intrloop = 50; static const int multicast_filter_limit = 0x40; static int rio_open (struct net_device *dev); -static void rio_timer (unsigned long data); +static void rio_timer (struct timer_list *t); static void rio_tx_timeout (struct net_device *dev); static netdev_tx_t start_xmit (struct sk_buff *skb, struct net_device *dev); static irqreturn_t rio_interrupt (int irq, void *dev_instance); @@ -644,7 +644,7 @@ static int rio_open(struct net_device *dev) return i; } - setup_timer(&np->timer, rio_timer, (unsigned long)dev); + timer_setup(&np->timer, rio_timer, 0); np->timer.expires = jiffies + 1 * HZ; add_timer(&np->timer); @@ -655,10 +655,10 @@ static int rio_open(struct net_device *dev) } static void -rio_timer (unsigned long data) +rio_timer (struct timer_list *t) { - struct net_device *dev = (struct net_device *)data; - struct netdev_private *np = netdev_priv(dev); + struct netdev_private *np = from_timer(np, t, timer); + struct net_device *dev = pci_get_drvdata(np->pdev); unsigned int entry; int next_tick = 1*HZ; unsigned long flags; -- cgit v1.2.3 From 11dd894e4afa7995d8e4bd6008cbd79840c3a8bd Mon Sep 17 00:00:00 2001 From: Kees Cook Date: Mon, 16 Oct 2017 17:29:12 -0700 Subject: net: ksz884x: Convert timers to use timer_setup() In preparation for unconditionally passing the struct timer_list pointer to all timer callbacks, switch to using the new timer_setup() and from_timer() to pass the timer pointer explicitly. Cc: "David S. Miller" Cc: Johannes Berg Cc: Jarod Wilson Cc: Masahiro Yamada Cc: Philippe Reynes Cc: netdev@vger.kernel.org Signed-off-by: Kees Cook Signed-off-by: David S. Miller --- drivers/net/ethernet/micrel/ksz884x.c | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/micrel/ksz884x.c b/drivers/net/ethernet/micrel/ksz884x.c index e798fbe08600..52207508744c 100644 --- a/drivers/net/ethernet/micrel/ksz884x.c +++ b/drivers/net/ethernet/micrel/ksz884x.c @@ -4338,11 +4338,11 @@ static void ksz_stop_timer(struct ksz_timer_info *info) } static void ksz_init_timer(struct ksz_timer_info *info, int period, - void (*function)(unsigned long), void *data) + void (*function)(struct timer_list *)) { info->max = 0; info->period = period; - setup_timer(&info->timer, function, (unsigned long)data); + timer_setup(&info->timer, function, 0); } static void ksz_update_timer(struct ksz_timer_info *info) @@ -6689,9 +6689,9 @@ static void mib_read_work(struct work_struct *work) } } -static void mib_monitor(unsigned long ptr) +static void mib_monitor(struct timer_list *t) { - struct dev_info *hw_priv = (struct dev_info *) ptr; + struct dev_info *hw_priv = from_timer(hw_priv, t, mib_timer_info.timer); mib_read_work(&hw_priv->mib_read); @@ -6716,10 +6716,10 @@ static void mib_monitor(unsigned long ptr) * * This routine is run in a kernel timer to monitor the network device. */ -static void dev_monitor(unsigned long ptr) +static void dev_monitor(struct timer_list *t) { - struct net_device *dev = (struct net_device *) ptr; - struct dev_priv *priv = netdev_priv(dev); + struct dev_priv *priv = from_timer(priv, t, monitor_timer_info.timer); + struct net_device *dev = priv->mii_if.dev; struct dev_info *hw_priv = priv->adapter; struct ksz_hw *hw = &hw_priv->hw; struct ksz_port *port = &priv->port; @@ -6789,7 +6789,7 @@ static int __init netdev_init(struct net_device *dev) /* 500 ms timeout */ ksz_init_timer(&priv->monitor_timer_info, 500 * HZ / 1000, - dev_monitor, dev); + dev_monitor); /* 500 ms timeout */ dev->watchdog_timeo = HZ / 2; @@ -7065,7 +7065,7 @@ static int pcidev_init(struct pci_dev *pdev, const struct pci_device_id *id) /* 500 ms timeout */ ksz_init_timer(&hw_priv->mib_timer_info, 500 * HZ / 1000, - mib_monitor, hw_priv); + mib_monitor); for (i = 0; i < hw->dev_count; i++) { dev = alloc_etherdev(sizeof(struct dev_priv)); -- cgit v1.2.3 From d99356797a8f3abaa57e13c5d1f50e4392eca037 Mon Sep 17 00:00:00 2001 From: Kees Cook Date: Mon, 16 Oct 2017 17:29:13 -0700 Subject: forcedeth: Convert timers to use timer_setup() In preparation for unconditionally passing the struct timer_list pointer to all timer callbacks, switch to using the new timer_setup() and from_timer() to pass the timer pointer explicitly. Cc: "David S. Miller" Cc: Zhu Yanjun Cc: Philippe Reynes Cc: netdev@vger.kernel.org Signed-off-by: Kees Cook Signed-off-by: David S. Miller --- drivers/net/ethernet/nvidia/forcedeth.c | 28 ++++++++++++++-------------- 1 file changed, 14 insertions(+), 14 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/nvidia/forcedeth.c b/drivers/net/ethernet/nvidia/forcedeth.c index a235e8881af9..88128ce61471 100644 --- a/drivers/net/ethernet/nvidia/forcedeth.c +++ b/drivers/net/ethernet/nvidia/forcedeth.c @@ -1884,10 +1884,9 @@ packet_dropped: } /* If rx bufs are exhausted called after 50ms to attempt to refresh */ -static void nv_do_rx_refill(unsigned long data) +static void nv_do_rx_refill(struct timer_list *t) { - struct net_device *dev = (struct net_device *) data; - struct fe_priv *np = netdev_priv(dev); + struct fe_priv *np = from_timer(np, t, oom_kick); /* Just reschedule NAPI rx processing */ napi_schedule(&np->napi); @@ -4065,10 +4064,10 @@ static void nv_free_irq(struct net_device *dev) } } -static void nv_do_nic_poll(unsigned long data) +static void nv_do_nic_poll(struct timer_list *t) { - struct net_device *dev = (struct net_device *) data; - struct fe_priv *np = netdev_priv(dev); + struct fe_priv *np = from_timer(np, t, nic_poll); + struct net_device *dev = np->dev; u8 __iomem *base = get_hwbase(dev); u32 mask = 0; unsigned long flags; @@ -4176,16 +4175,18 @@ static void nv_do_nic_poll(unsigned long data) #ifdef CONFIG_NET_POLL_CONTROLLER static void nv_poll_controller(struct net_device *dev) { - nv_do_nic_poll((unsigned long) dev); + struct fe_priv *np = netdev_priv(dev); + + nv_do_nic_poll(&np->nic_poll); } #endif -static void nv_do_stats_poll(unsigned long data) +static void nv_do_stats_poll(struct timer_list *t) __acquires(&netdev_priv(dev)->hwstats_lock) __releases(&netdev_priv(dev)->hwstats_lock) { - struct net_device *dev = (struct net_device *) data; - struct fe_priv *np = netdev_priv(dev); + struct fe_priv *np = from_timer(np, t, stats_poll); + struct net_device *dev = np->dev; /* If lock is currently taken, the stats are being refreshed * and hence fresh enough */ @@ -5631,10 +5632,9 @@ static int nv_probe(struct pci_dev *pci_dev, const struct pci_device_id *id) u64_stats_init(&np->swstats_rx_syncp); u64_stats_init(&np->swstats_tx_syncp); - setup_timer(&np->oom_kick, nv_do_rx_refill, (unsigned long)dev); - setup_timer(&np->nic_poll, nv_do_nic_poll, (unsigned long)dev); - setup_deferrable_timer(&np->stats_poll, nv_do_stats_poll, - (unsigned long)dev); + timer_setup(&np->oom_kick, nv_do_rx_refill, 0); + timer_setup(&np->nic_poll, nv_do_nic_poll, 0); + timer_setup(&np->stats_poll, nv_do_stats_poll, TIMER_DEFERRABLE); err = pci_enable_device(pci_dev); if (err) -- cgit v1.2.3 From de892f8f2cc8176368a490e0778fd2de15e9bfda Mon Sep 17 00:00:00 2001 From: Kees Cook Date: Mon, 16 Oct 2017 17:29:27 -0700 Subject: net: ethernet: apple: Convert timers to use timer_setup() In preparation for unconditionally passing the struct timer_list pointer to all timer callbacks, switch to using the new timer_setup() and from_timer() to pass the timer pointer explicitly. Cc: "David S. Miller" Cc: Johannes Berg Cc: Jarod Wilson Cc: Rob Herring Cc: netdev@vger.kernel.org Signed-off-by: Kees Cook Signed-off-by: David S. Miller --- drivers/net/ethernet/apple/bmac.c | 12 +++++------- drivers/net/ethernet/apple/mace.c | 12 +++++------- 2 files changed, 10 insertions(+), 14 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/apple/bmac.c b/drivers/net/ethernet/apple/bmac.c index eac740c476ce..5a655d289dd5 100644 --- a/drivers/net/ethernet/apple/bmac.c +++ b/drivers/net/ethernet/apple/bmac.c @@ -157,7 +157,7 @@ static irqreturn_t bmac_misc_intr(int irq, void *dev_id); static irqreturn_t bmac_txdma_intr(int irq, void *dev_id); static irqreturn_t bmac_rxdma_intr(int irq, void *dev_id); static void bmac_set_timeout(struct net_device *dev); -static void bmac_tx_timeout(unsigned long data); +static void bmac_tx_timeout(struct timer_list *t); static int bmac_output(struct sk_buff *skb, struct net_device *dev); static void bmac_start(struct net_device *dev); @@ -555,8 +555,6 @@ static inline void bmac_set_timeout(struct net_device *dev) if (bp->timeout_active) del_timer(&bp->tx_timeout); bp->tx_timeout.expires = jiffies + TX_TIMEOUT; - bp->tx_timeout.function = bmac_tx_timeout; - bp->tx_timeout.data = (unsigned long) dev; add_timer(&bp->tx_timeout); bp->timeout_active = 1; spin_unlock_irqrestore(&bp->lock, flags); @@ -1321,7 +1319,7 @@ static int bmac_probe(struct macio_dev *mdev, const struct of_device_id *match) bp->queue = (struct sk_buff_head *)(bp->rx_cmds + N_RX_RING + 1); skb_queue_head_init(bp->queue); - init_timer(&bp->tx_timeout); + timer_setup(&bp->tx_timeout, bmac_tx_timeout, 0); ret = request_irq(dev->irq, bmac_misc_intr, 0, "BMAC-misc", dev); if (ret) { @@ -1471,10 +1469,10 @@ bmac_output(struct sk_buff *skb, struct net_device *dev) return NETDEV_TX_OK; } -static void bmac_tx_timeout(unsigned long data) +static void bmac_tx_timeout(struct timer_list *t) { - struct net_device *dev = (struct net_device *) data; - struct bmac_data *bp = netdev_priv(dev); + struct bmac_data *bp = from_timer(bp, t, tx_timeout); + struct net_device *dev = macio_get_drvdata(bp->mdev); volatile struct dbdma_regs __iomem *td = bp->tx_dma; volatile struct dbdma_regs __iomem *rd = bp->rx_dma; volatile struct dbdma_cmd *cp; diff --git a/drivers/net/ethernet/apple/mace.c b/drivers/net/ethernet/apple/mace.c index e58b157b7d7c..0b5429d76bcf 100644 --- a/drivers/net/ethernet/apple/mace.c +++ b/drivers/net/ethernet/apple/mace.c @@ -86,7 +86,7 @@ static irqreturn_t mace_interrupt(int irq, void *dev_id); static irqreturn_t mace_txdma_intr(int irq, void *dev_id); static irqreturn_t mace_rxdma_intr(int irq, void *dev_id); static void mace_set_timeout(struct net_device *dev); -static void mace_tx_timeout(unsigned long data); +static void mace_tx_timeout(struct timer_list *t); static inline void dbdma_reset(volatile struct dbdma_regs __iomem *dma); static inline void mace_clean_rings(struct mace_data *mp); static void __mace_set_address(struct net_device *dev, void *addr); @@ -196,7 +196,7 @@ static int mace_probe(struct macio_dev *mdev, const struct of_device_id *match) memset((char *) mp->tx_cmds, 0, (NCMDS_TX*N_TX_RING + N_RX_RING + 2) * sizeof(struct dbdma_cmd)); - init_timer(&mp->tx_timeout); + timer_setup(&mp->tx_timeout, mace_tx_timeout, 0); spin_lock_init(&mp->lock); mp->timeout_active = 0; @@ -521,8 +521,6 @@ static inline void mace_set_timeout(struct net_device *dev) if (mp->timeout_active) del_timer(&mp->tx_timeout); mp->tx_timeout.expires = jiffies + TX_TIMEOUT; - mp->tx_timeout.function = mace_tx_timeout; - mp->tx_timeout.data = (unsigned long) dev; add_timer(&mp->tx_timeout); mp->timeout_active = 1; } @@ -801,10 +799,10 @@ static irqreturn_t mace_interrupt(int irq, void *dev_id) return IRQ_HANDLED; } -static void mace_tx_timeout(unsigned long data) +static void mace_tx_timeout(struct timer_list *t) { - struct net_device *dev = (struct net_device *) data; - struct mace_data *mp = netdev_priv(dev); + struct mace_data *mp = from_timer(mp, t, tx_timeout); + struct net_device *dev = macio_get_drvdata(mp->mdev); volatile struct mace __iomem *mb = mp->mace; volatile struct dbdma_regs __iomem *td = mp->tx_dma; volatile struct dbdma_regs __iomem *rd = mp->rx_dma; -- cgit v1.2.3 From 0822c5d94e10d9790e82bdfea20a10f0884bca54 Mon Sep 17 00:00:00 2001 From: Kees Cook Date: Mon, 16 Oct 2017 17:29:28 -0700 Subject: net: ethernet: sun: Convert timers to use timer_setup() In preparation for unconditionally passing the struct timer_list pointer to all timer callbacks, switch to using the new timer_setup() and from_timer() to pass the timer pointer explicitly. Cc: "David S. Miller" Cc: Philippe Reynes Cc: Jarod Wilson Cc: Shannon Nelson Cc: Rob Herring Cc: chris hyser Cc: Tushar Dave Cc: Tobias Klauser Cc: netdev@vger.kernel.org Signed-off-by: Kees Cook Acked-by: Shannon Nelson Signed-off-by: David S. Miller --- drivers/net/ethernet/sun/cassini.c | 7 ++++--- drivers/net/ethernet/sun/ldmvsw.c | 3 +-- drivers/net/ethernet/sun/niu.c | 10 ++++------ drivers/net/ethernet/sun/sunbmac.c | 10 ++++------ drivers/net/ethernet/sun/sungem.c | 6 +++--- drivers/net/ethernet/sun/sunhme.c | 10 ++++------ drivers/net/ethernet/sun/sunvnet.c | 3 +-- drivers/net/ethernet/sun/sunvnet_common.c | 4 ++-- drivers/net/ethernet/sun/sunvnet_common.h | 2 +- 9 files changed, 24 insertions(+), 31 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/sun/cassini.c b/drivers/net/ethernet/sun/cassini.c index a74d78f64af9..113bd57e2ea0 100644 --- a/drivers/net/ethernet/sun/cassini.c +++ b/drivers/net/ethernet/sun/cassini.c @@ -4079,9 +4079,9 @@ done: #endif } -static void cas_link_timer(unsigned long data) +static void cas_link_timer(struct timer_list *t) { - struct cas *cp = (struct cas *) data; + struct cas *cp = from_timer(cp, t, link_timer); int mask, pending = 0, reset = 0; unsigned long flags; @@ -5039,7 +5039,8 @@ static int cas_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) spin_lock_init(&cp->stat_lock[N_TX_RINGS]); mutex_init(&cp->pm_mutex); - setup_timer(&cp->link_timer, cas_link_timer, (unsigned long)cp); + timer_setup(&cp->link_timer, cas_link_timer, 0); + #if 1 /* Just in case the implementation of atomic operations * change so that an explicit initialization is necessary. diff --git a/drivers/net/ethernet/sun/ldmvsw.c b/drivers/net/ethernet/sun/ldmvsw.c index 5feeaa9f0a9e..5ea037672e6f 100644 --- a/drivers/net/ethernet/sun/ldmvsw.c +++ b/drivers/net/ethernet/sun/ldmvsw.c @@ -363,8 +363,7 @@ static int vsw_port_probe(struct vio_dev *vdev, const struct vio_device_id *id) list_add_rcu(&port->list, &vp->port_list); spin_unlock_irqrestore(&vp->lock, flags); - setup_timer(&port->clean_timer, sunvnet_clean_timer_expire_common, - (unsigned long)port); + timer_setup(&port->clean_timer, sunvnet_clean_timer_expire_common, 0); err = register_netdev(dev); if (err) { diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c index bde19b307d0d..ab502ee35fb2 100644 --- a/drivers/net/ethernet/sun/niu.c +++ b/drivers/net/ethernet/sun/niu.c @@ -2221,9 +2221,9 @@ static int niu_link_status(struct niu *np, int *link_up_p) return err; } -static void niu_timer(unsigned long __opaque) +static void niu_timer(struct timer_list *t) { - struct niu *np = (struct niu *) __opaque; + struct niu *np = from_timer(np, t, timer); unsigned long off; int err, link_up; @@ -6123,7 +6123,7 @@ static int niu_open(struct net_device *dev) err = niu_init_hw(np); if (!err) { - setup_timer(&np->timer, niu_timer, (unsigned long)np); + timer_setup(&np->timer, niu_timer, 0); np->timer.expires = jiffies + HZ; err = niu_enable_interrupts(np, 1); @@ -6773,10 +6773,8 @@ static int niu_change_mtu(struct net_device *dev, int new_mtu) err = niu_init_hw(np); if (!err) { - init_timer(&np->timer); + timer_setup(&np->timer, niu_timer, 0); np->timer.expires = jiffies + HZ; - np->timer.data = (unsigned long) np; - np->timer.function = niu_timer; err = niu_enable_interrupts(np, 1); if (err) diff --git a/drivers/net/ethernet/sun/sunbmac.c b/drivers/net/ethernet/sun/sunbmac.c index 3189722110c2..0b1f41f6bceb 100644 --- a/drivers/net/ethernet/sun/sunbmac.c +++ b/drivers/net/ethernet/sun/sunbmac.c @@ -523,9 +523,9 @@ static int try_next_permutation(struct bigmac *bp, void __iomem *tregs) return -1; } -static void bigmac_timer(unsigned long data) +static void bigmac_timer(struct timer_list *t) { - struct bigmac *bp = (struct bigmac *) data; + struct bigmac *bp = from_timer(bp, t, bigmac_timer); void __iomem *tregs = bp->tregs; int restart_timer = 0; @@ -613,8 +613,6 @@ static void bigmac_begin_auto_negotiation(struct bigmac *bp) bp->timer_state = ltrywait; bp->timer_ticks = 0; bp->bigmac_timer.expires = jiffies + (12 * HZ) / 10; - bp->bigmac_timer.data = (unsigned long) bp; - bp->bigmac_timer.function = bigmac_timer; add_timer(&bp->bigmac_timer); } @@ -921,7 +919,7 @@ static int bigmac_open(struct net_device *dev) printk(KERN_ERR "BIGMAC: Can't order irq %d to go.\n", dev->irq); return ret; } - init_timer(&bp->bigmac_timer); + timer_setup(&bp->bigmac_timer, bigmac_timer, 0); ret = bigmac_init_hw(bp, 0); if (ret) free_irq(dev->irq, bp); @@ -1172,7 +1170,7 @@ static int bigmac_ether_init(struct platform_device *op, "board-version", 1); /* Init auto-negotiation timer state. */ - init_timer(&bp->bigmac_timer); + timer_setup(&bp->bigmac_timer, bigmac_timer, 0); bp->timer_state = asleep; bp->timer_ticks = 0; diff --git a/drivers/net/ethernet/sun/sungem.c b/drivers/net/ethernet/sun/sungem.c index b75ab8f44968..a7afcee3c5ae 100644 --- a/drivers/net/ethernet/sun/sungem.c +++ b/drivers/net/ethernet/sun/sungem.c @@ -1496,9 +1496,9 @@ static int gem_mdio_link_not_up(struct gem *gp) } } -static void gem_link_timer(unsigned long data) +static void gem_link_timer(struct timer_list *t) { - struct gem *gp = (struct gem *) data; + struct gem *gp = from_timer(gp, t, link_timer); struct net_device *dev = gp->dev; int restart_aneg = 0; @@ -2910,7 +2910,7 @@ static int gem_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) gp->msg_enable = DEFAULT_MSG; - setup_timer(&gp->link_timer, gem_link_timer, (unsigned long)gp); + timer_setup(&gp->link_timer, gem_link_timer, 0); INIT_WORK(&gp->reset_task, gem_reset_task); diff --git a/drivers/net/ethernet/sun/sunhme.c b/drivers/net/ethernet/sun/sunhme.c index 9e983e1d8249..0431f1e5f511 100644 --- a/drivers/net/ethernet/sun/sunhme.c +++ b/drivers/net/ethernet/sun/sunhme.c @@ -685,9 +685,9 @@ static int is_lucent_phy(struct happy_meal *hp) return ret; } -static void happy_meal_timer(unsigned long data) +static void happy_meal_timer(struct timer_list *t) { - struct happy_meal *hp = (struct happy_meal *) data; + struct happy_meal *hp = from_timer(hp, t, happy_timer); void __iomem *tregs = hp->tcvregs; int restart_timer = 0; @@ -1413,8 +1413,6 @@ force_link: hp->timer_ticks = 0; hp->happy_timer.expires = jiffies + (12 * HZ)/10; /* 1.2 sec. */ - hp->happy_timer.data = (unsigned long) hp; - hp->happy_timer.function = happy_meal_timer; add_timer(&hp->happy_timer); } @@ -2819,7 +2817,7 @@ static int happy_meal_sbus_probe_one(struct platform_device *op, int is_qfe) hp->timer_state = asleep; hp->timer_ticks = 0; - init_timer(&hp->happy_timer); + timer_setup(&hp->happy_timer, happy_meal_timer, 0); hp->dev = dev; dev->netdev_ops = &hme_netdev_ops; @@ -3133,7 +3131,7 @@ static int happy_meal_pci_probe(struct pci_dev *pdev, hp->timer_state = asleep; hp->timer_ticks = 0; - init_timer(&hp->happy_timer); + timer_setup(&hp->happy_timer, happy_meal_timer, 0); hp->irq = pdev->irq; hp->dev = dev; diff --git a/drivers/net/ethernet/sun/sunvnet.c b/drivers/net/ethernet/sun/sunvnet.c index 0b95105f7060..27fb22638885 100644 --- a/drivers/net/ethernet/sun/sunvnet.c +++ b/drivers/net/ethernet/sun/sunvnet.c @@ -492,8 +492,7 @@ static int vnet_port_probe(struct vio_dev *vdev, const struct vio_device_id *id) pr_info("%s: PORT ( remote-mac %pM%s )\n", vp->dev->name, port->raddr, switch_port ? " switch-port" : ""); - setup_timer(&port->clean_timer, sunvnet_clean_timer_expire_common, - (unsigned long)port); + timer_setup(&port->clean_timer, sunvnet_clean_timer_expire_common, 0); napi_enable(&port->napi); vio_port_up(&port->vio); diff --git a/drivers/net/ethernet/sun/sunvnet_common.c b/drivers/net/ethernet/sun/sunvnet_common.c index ecf456c7b6d1..8aa3ce46bb81 100644 --- a/drivers/net/ethernet/sun/sunvnet_common.c +++ b/drivers/net/ethernet/sun/sunvnet_common.c @@ -1040,9 +1040,9 @@ static inline void vnet_free_skbs(struct sk_buff *skb) } } -void sunvnet_clean_timer_expire_common(unsigned long port0) +void sunvnet_clean_timer_expire_common(struct timer_list *t) { - struct vnet_port *port = (struct vnet_port *)port0; + struct vnet_port *port = from_timer(port, t, clean_timer); struct sk_buff *freeskbs; unsigned pending; diff --git a/drivers/net/ethernet/sun/sunvnet_common.h b/drivers/net/ethernet/sun/sunvnet_common.h index b20d6fa7ef25..656673c31066 100644 --- a/drivers/net/ethernet/sun/sunvnet_common.h +++ b/drivers/net/ethernet/sun/sunvnet_common.h @@ -129,7 +129,7 @@ struct vnet { ((__port)->vsw ? (__port)->dev : (__port)->vp->dev) /* Common funcs */ -void sunvnet_clean_timer_expire_common(unsigned long port0); +void sunvnet_clean_timer_expire_common(struct timer_list *t); int sunvnet_open_common(struct net_device *dev); int sunvnet_close_common(struct net_device *dev); void sunvnet_set_rx_mode_common(struct net_device *dev, struct vnet *vp); -- cgit v1.2.3 From 6fd9c53f71862a4797b7ed8a5de80e2c64829f56 Mon Sep 17 00:00:00 2001 From: Kees Cook Date: Mon, 16 Oct 2017 17:29:29 -0700 Subject: net: seeq: Convert timers to use timer_setup() In preparation for unconditionally passing the struct timer_list pointer to all timer callbacks, switch to using the new timer_setup() and from_timer() to pass the timer pointer explicitly. Cc: Russell King Cc: linux-arm-kernel@lists.infradead.org Cc: netdev@vger.kernel.org Signed-off-by: Kees Cook Signed-off-by: David S. Miller --- drivers/net/ethernet/seeq/ether3.c | 11 ++++++----- drivers/net/ethernet/seeq/ether3.h | 1 + 2 files changed, 7 insertions(+), 5 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/seeq/ether3.c b/drivers/net/ethernet/seeq/ether3.c index 244c1e171017..da4807723a06 100644 --- a/drivers/net/ethernet/seeq/ether3.c +++ b/drivers/net/ethernet/seeq/ether3.c @@ -170,9 +170,11 @@ ether3_setbuffer(struct net_device *dev, buffer_rw_t read, int start) /* * Switch LED off... */ -static void ether3_ledoff(unsigned long data) +static void ether3_ledoff(struct timer_list *t) { - struct net_device *dev = (struct net_device *)data; + struct dev_priv *private = from_timer(priv, t, timer); + struct net_device *dev = private->dev; + ether3_outw(priv(dev)->regs.config2 |= CFG2_CTRLO, REG_CONFIG2); } @@ -183,8 +185,6 @@ static inline void ether3_ledon(struct net_device *dev) { del_timer(&priv(dev)->timer); priv(dev)->timer.expires = jiffies + HZ / 50; /* leave on for 1/50th second */ - priv(dev)->timer.data = (unsigned long)dev; - priv(dev)->timer.function = ether3_ledoff; add_timer(&priv(dev)->timer); if (priv(dev)->regs.config2 & CFG2_CTRLO) ether3_outw(priv(dev)->regs.config2 &= ~CFG2_CTRLO, REG_CONFIG2); @@ -783,7 +783,8 @@ ether3_probe(struct expansion_card *ec, const struct ecard_id *id) ether3_addr(dev->dev_addr, ec); - init_timer(&priv(dev)->timer); + priv(dev)->dev = dev; + timer_setup(&priv(dev)->timer, ether3_ledoff, 0); /* Reset card... */ diff --git a/drivers/net/ethernet/seeq/ether3.h b/drivers/net/ethernet/seeq/ether3.h index 2db63b08bdf3..ea2ba286e665 100644 --- a/drivers/net/ethernet/seeq/ether3.h +++ b/drivers/net/ethernet/seeq/ether3.h @@ -165,6 +165,7 @@ struct dev_priv { unsigned char tx_tail; /* buffer nr of transmitting packet */ unsigned int rx_head; /* address to fetch next packet from */ struct timer_list timer; + net_device *dev; int broken; /* 0 = ok, 1 = something went wrong */ }; -- cgit v1.2.3 From dfc57004945b34cf83f600c697a54afca1fd15c5 Mon Sep 17 00:00:00 2001 From: Kees Cook Date: Mon, 16 Oct 2017 17:29:31 -0700 Subject: net/ethernet/sgi: Convert timers to use timer_setup() In preparation for unconditionally passing the struct timer_list pointer to all timer callbacks, switch to using the new timer_setup() and from_timer() to pass the timer pointer explicitly. Cc: Ralf Baechle Cc: linux-mips@linux-mips.org Cc: netdev@vger.kernel.org Signed-off-by: Kees Cook Signed-off-by: David S. Miller --- drivers/net/ethernet/sgi/ioc3-eth.c | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/sgi/ioc3-eth.c b/drivers/net/ethernet/sgi/ioc3-eth.c index 9c0488e0f08e..18d533fdf14c 100644 --- a/drivers/net/ethernet/sgi/ioc3-eth.c +++ b/drivers/net/ethernet/sgi/ioc3-eth.c @@ -764,9 +764,9 @@ static inline void ioc3_setup_duplex(struct ioc3_private *ip) ioc3_w_emcr(ip->emcr); } -static void ioc3_timer(unsigned long data) +static void ioc3_timer(struct timer_list *t) { - struct ioc3_private *ip = (struct ioc3_private *) data; + struct ioc3_private *ip = from_timer(ip, t, ioc3_timer); /* Print the link status if it has changed */ mii_check_media(&ip->mii, 1, 0); @@ -818,8 +818,6 @@ out: static void ioc3_mii_start(struct ioc3_private *ip) { ip->ioc3_timer.expires = jiffies + (12 * HZ)/10; /* 1.2 sec. */ - ip->ioc3_timer.data = (unsigned long) ip; - ip->ioc3_timer.function = ioc3_timer; add_timer(&ip->ioc3_timer); } @@ -1291,7 +1289,7 @@ static int ioc3_probe(struct pci_dev *pdev, const struct pci_device_id *ent) #endif spin_lock_init(&ip->ioc3_lock); - init_timer(&ip->ioc3_timer); + timer_setup(&ip->ioc3_timer, ioc3_timer, 0); ioc3_stop(ip); ioc3_init(dev); -- cgit v1.2.3 From e84a2ac9ffa9b1ba211c8982f07cd92f60239c3e Mon Sep 17 00:00:00 2001 From: Kees Cook Date: Mon, 16 Oct 2017 17:29:33 -0700 Subject: net: neterion: Convert timers to use timer_setup() In preparation for unconditionally passing the struct timer_list pointer to all timer callbacks, switch to using the new timer_setup() and from_timer() to pass the timer pointer explicitly. Cc: Jon Mason Cc: netdev@vger.kernel.org Signed-off-by: Kees Cook Signed-off-by: David S. Miller --- drivers/net/ethernet/neterion/s2io.c | 13 ++++--------- drivers/net/ethernet/neterion/s2io.h | 2 +- 2 files changed, 5 insertions(+), 10 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/neterion/s2io.c b/drivers/net/ethernet/neterion/s2io.c index 462eda926b1c..b8983e73265a 100644 --- a/drivers/net/ethernet/neterion/s2io.c +++ b/drivers/net/ethernet/neterion/s2io.c @@ -337,12 +337,6 @@ static const char ethtool_driver_stats_keys[][ETH_GSTRING_LEN] = { #define S2IO_TEST_LEN ARRAY_SIZE(s2io_gstrings) #define S2IO_STRINGS_LEN (S2IO_TEST_LEN * ETH_GSTRING_LEN) -#define S2IO_TIMER_CONF(timer, handle, arg, exp) \ - init_timer(&timer); \ - timer.function = handle; \ - timer.data = (unsigned long)arg; \ - mod_timer(&timer, (jiffies + exp)) \ - /* copy mac addr to def_mac_addr array */ static void do_s2io_copy_mac_addr(struct s2io_nic *sp, int offset, u64 mac_addr) { @@ -4193,9 +4187,9 @@ pci_map_failed: } static void -s2io_alarm_handle(unsigned long data) +s2io_alarm_handle(struct timer_list *t) { - struct s2io_nic *sp = (struct s2io_nic *)data; + struct s2io_nic *sp = from_timer(sp, t, alarm_timer); struct net_device *dev = sp->dev; s2io_handle_errors(dev); @@ -7186,7 +7180,8 @@ static int s2io_card_up(struct s2io_nic *sp) return -ENODEV; } - S2IO_TIMER_CONF(sp->alarm_timer, s2io_alarm_handle, sp, (HZ/2)); + timer_setup(&sp->alarm_timer, s2io_alarm_handle, 0); + mod_timer(&sp->alarm_timer, jiffies + HZ / 2); set_bit(__S2IO_STATE_CARD_UP, &sp->state); diff --git a/drivers/net/ethernet/neterion/s2io.h b/drivers/net/ethernet/neterion/s2io.h index 6c5997dc8afc..1a24a7218794 100644 --- a/drivers/net/ethernet/neterion/s2io.h +++ b/drivers/net/ethernet/neterion/s2io.h @@ -1094,7 +1094,7 @@ static int s2io_poll_msix(struct napi_struct *napi, int budget); static int s2io_poll_inta(struct napi_struct *napi, int budget); static void s2io_init_pci(struct s2io_nic * sp); static int do_s2io_prog_unicast(struct net_device *dev, u8 *addr); -static void s2io_alarm_handle(unsigned long data); +static void s2io_alarm_handle(struct timer_list *t); static irqreturn_t s2io_msix_ring_handle(int irq, void *dev_id); static irqreturn_t -- cgit v1.2.3 From d039ef68e94eae81c13b0b39a18f0c3455491e4c Mon Sep 17 00:00:00 2001 From: Kees Cook Date: Mon, 16 Oct 2017 17:29:34 -0700 Subject: net: hns: Convert timers to use timer_setup() In preparation for unconditionally passing the struct timer_list pointer to all timer callbacks, switch to using the new timer_setup() and from_timer() to pass the timer pointer explicitly. Switches test of .data field to .function, since .data will be going away. Cc: Yisen Zhuang Cc: Salil Mehta Cc: "David S. Miller" Cc: lipeng Cc: Lin Yun Sheng Cc: Kejian Yan Cc: Arnd Bergmann Cc: netdev@vger.kernel.org Signed-off-by: Kees Cook Signed-off-by: David S. Miller --- drivers/net/ethernet/hisilicon/hns/hns_enet.c | 7 +++---- drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c | 13 ++++++------- 2 files changed, 9 insertions(+), 11 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c index 36520634c96a..91565c8fee08 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c @@ -2159,9 +2159,9 @@ static void hns_nic_task_schedule(struct hns_nic_priv *priv) (void)schedule_work(&priv->service_task); } -static void hns_nic_service_timer(unsigned long data) +static void hns_nic_service_timer(struct timer_list *t) { - struct hns_nic_priv *priv = (struct hns_nic_priv *)data; + struct hns_nic_priv *priv = from_timer(priv, t, service_timer); (void)mod_timer(&priv->service_timer, jiffies + SERVICE_TIMER_HZ); @@ -2451,8 +2451,7 @@ static int hns_nic_dev_probe(struct platform_device *pdev) /* carrier off reporting is important to ethtool even BEFORE open */ netif_carrier_off(ndev); - setup_timer(&priv->service_timer, hns_nic_service_timer, - (unsigned long)priv); + timer_setup(&priv->service_timer, hns_nic_service_timer, 0); INIT_WORK(&priv->service_task, hns_nic_service_task); set_bit(NIC_STATE_SERVICE_INITED, &priv->state); diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c index c322b4534148..6e93943c489a 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c @@ -2350,11 +2350,11 @@ static int hclge_get_status(struct hnae3_handle *handle) return hdev->hw.mac.link; } -static void hclge_service_timer(unsigned long data) +static void hclge_service_timer(struct timer_list *t) { - struct hclge_dev *hdev = (struct hclge_dev *)data; - (void)mod_timer(&hdev->service_timer, jiffies + HZ); + struct hclge_dev *hdev = from_timer(hdev, t, service_timer); + mod_timer(&hdev->service_timer, jiffies + HZ); hclge_task_schedule(hdev); } @@ -3204,7 +3204,7 @@ static int hclge_ae_start(struct hnae3_handle *handle) /* mac enable */ hclge_cfg_mac_mode(hdev, true); clear_bit(HCLGE_STATE_DOWN, &hdev->state); - (void)mod_timer(&hdev->service_timer, jiffies + HZ); + mod_timer(&hdev->service_timer, jiffies + HZ); ret = hclge_mac_start_phy(hdev); if (ret) @@ -4436,8 +4436,7 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev) hclge_dcb_ops_set(hdev); - setup_timer(&hdev->service_timer, hclge_service_timer, - (unsigned long)hdev); + timer_setup(&hdev->service_timer, hclge_service_timer, 0); INIT_WORK(&hdev->service_task, hclge_service_task); set_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state); @@ -4464,7 +4463,7 @@ static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev) if (IS_ENABLED(CONFIG_PCI_IOV)) hclge_disable_sriov(hdev); - if (hdev->service_timer.data) + if (hdev->service_timer.function) del_timer_sync(&hdev->service_timer); if (hdev->service_task.func) cancel_work_sync(&hdev->service_task); -- cgit v1.2.3 From 26566eae80512d8a6b52e9d6f880f960893c96b4 Mon Sep 17 00:00:00 2001 From: Kees Cook Date: Mon, 16 Oct 2017 17:29:35 -0700 Subject: ethernet/intel: Convert timers to use timer_setup() In preparation for unconditionally passing the struct timer_list pointer to all timer callbacks, switch to using the new timer_setup() and from_timer() to pass the timer pointer explicitly. Switches test of .data field to .function, since .data will be going away. Cc: Jeff Kirsher Cc: intel-wired-lan@lists.osuosl.org Cc: netdev@vger.kernel.org Signed-off-by: Kees Cook Signed-off-by: David S. Miller --- drivers/net/ethernet/intel/e100.c | 6 +++--- drivers/net/ethernet/intel/e1000e/netdev.c | 14 ++++++-------- drivers/net/ethernet/intel/fm10k/fm10k_pci.c | 8 ++++---- drivers/net/ethernet/intel/i40e/i40e_main.c | 8 ++++---- drivers/net/ethernet/intel/i40evf/i40evf_main.c | 8 ++++---- drivers/net/ethernet/intel/igb/igb_main.c | 18 ++++++++---------- drivers/net/ethernet/intel/igbvf/netdev.c | 7 +++---- drivers/net/ethernet/intel/ixgb/ixgb_main.c | 9 ++++----- drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | 7 +++---- drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c | 8 ++++---- 10 files changed, 43 insertions(+), 50 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/intel/e100.c b/drivers/net/ethernet/intel/e100.c index 184f11242f56..44b3937f7e81 100644 --- a/drivers/net/ethernet/intel/e100.c +++ b/drivers/net/ethernet/intel/e100.c @@ -1710,9 +1710,9 @@ static void e100_adjust_adaptive_ifs(struct nic *nic, int speed, int duplex) } } -static void e100_watchdog(unsigned long data) +static void e100_watchdog(struct timer_list *t) { - struct nic *nic = (struct nic *)data; + struct nic *nic = from_timer(nic, t, watchdog); struct ethtool_cmd cmd = { .cmd = ETHTOOL_GSET }; u32 speed; @@ -2920,7 +2920,7 @@ static int e100_probe(struct pci_dev *pdev, const struct pci_device_id *ent) pci_set_master(pdev); - setup_timer(&nic->watchdog, e100_watchdog, (unsigned long)nic); + timer_setup(&nic->watchdog, e100_watchdog, 0); INIT_WORK(&nic->tx_timeout_task, e100_tx_timeout_task); diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c index bf8f38f76953..f2f49239b015 100644 --- a/drivers/net/ethernet/intel/e1000e/netdev.c +++ b/drivers/net/ethernet/intel/e1000e/netdev.c @@ -4823,9 +4823,9 @@ static void e1000e_update_phy_task(struct work_struct *work) * Need to wait a few seconds after link up to get diagnostic information from * the phy **/ -static void e1000_update_phy_info(unsigned long data) +static void e1000_update_phy_info(struct timer_list *t) { - struct e1000_adapter *adapter = (struct e1000_adapter *)data; + struct e1000_adapter *adapter = from_timer(adapter, t, phy_info_timer); if (test_bit(__E1000_DOWN, &adapter->state)) return; @@ -5159,9 +5159,9 @@ static void e1000e_check_82574_phy_workaround(struct e1000_adapter *adapter) * e1000_watchdog - Timer Call-back * @data: pointer to adapter cast into an unsigned long **/ -static void e1000_watchdog(unsigned long data) +static void e1000_watchdog(struct timer_list *t) { - struct e1000_adapter *adapter = (struct e1000_adapter *)data; + struct e1000_adapter *adapter = from_timer(adapter, t, watchdog_timer); /* Do the rest outside of interrupt context */ schedule_work(&adapter->watchdog_task); @@ -7267,10 +7267,8 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent) goto err_eeprom; } - setup_timer(&adapter->watchdog_timer, e1000_watchdog, - (unsigned long)adapter); - setup_timer(&adapter->phy_info_timer, e1000_update_phy_info, - (unsigned long)adapter); + timer_setup(&adapter->watchdog_timer, e1000_watchdog, 0); + timer_setup(&adapter->phy_info_timer, e1000_update_phy_info, 0); INIT_WORK(&adapter->reset_task, e1000_reset_task); INIT_WORK(&adapter->watchdog_task, e1000_watchdog_task); diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c index 1e9ae3197b17..7f605221a686 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c @@ -213,9 +213,10 @@ static void fm10k_start_service_event(struct fm10k_intfc *interface) * fm10k_service_timer - Timer Call-back * @data: pointer to interface cast into an unsigned long **/ -static void fm10k_service_timer(unsigned long data) +static void fm10k_service_timer(struct timer_list *t) { - struct fm10k_intfc *interface = (struct fm10k_intfc *)data; + struct fm10k_intfc *interface = from_timer(interface, t, + service_timer); /* Reset the timer */ mod_timer(&interface->service_timer, (HZ * 2) + jiffies); @@ -2315,8 +2316,7 @@ static int fm10k_probe(struct pci_dev *pdev, const struct pci_device_id *ent) /* Initialize service timer and service task late in order to avoid * cleanup issues. */ - setup_timer(&interface->service_timer, &fm10k_service_timer, - (unsigned long)interface); + timer_setup(&interface->service_timer, fm10k_service_timer, 0); INIT_WORK(&interface->service_task, fm10k_service_task); /* Setup the MAC/VLAN queue */ diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index bb31d53c4923..39989147b30b 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -8800,9 +8800,9 @@ static void i40e_service_task(struct work_struct *work) * i40e_service_timer - timer callback * @data: pointer to PF struct **/ -static void i40e_service_timer(unsigned long data) +static void i40e_service_timer(struct timer_list *t) { - struct i40e_pf *pf = (struct i40e_pf *)data; + struct i40e_pf *pf = from_timer(pf, t, service_timer); mod_timer(&pf->service_timer, round_jiffies(jiffies + pf->service_timer_period)); @@ -12648,7 +12648,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) #endif /* CONFIG_I40E_DCB */ /* set up periodic task facility */ - setup_timer(&pf->service_timer, i40e_service_timer, (unsigned long)pf); + timer_setup(&pf->service_timer, i40e_service_timer, 0); pf->service_timer_period = HZ; INIT_WORK(&pf->service_task, i40e_service_task); @@ -12972,7 +12972,7 @@ static void i40e_remove(struct pci_dev *pdev) /* no more scheduling of any task */ set_bit(__I40E_SUSPENDED, pf->state); set_bit(__I40E_DOWN, pf->state); - if (pf->service_timer.data) + if (pf->service_timer.function) del_timer_sync(&pf->service_timer); if (pf->service_task.func) cancel_work_sync(&pf->service_task); diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_main.c b/drivers/net/ethernet/intel/i40evf/i40evf_main.c index 5bcbd46e2f6c..ca2ebdbd24d7 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf_main.c +++ b/drivers/net/ethernet/intel/i40evf/i40evf_main.c @@ -1594,9 +1594,10 @@ err: * i40evf_watchdog_timer - Periodic call-back timer * @data: pointer to adapter disguised as unsigned long **/ -static void i40evf_watchdog_timer(unsigned long data) +static void i40evf_watchdog_timer(struct timer_list *t) { - struct i40evf_adapter *adapter = (struct i40evf_adapter *)data; + struct i40evf_adapter *adapter = from_timer(adapter, t, + watchdog_timer); schedule_work(&adapter->watchdog_task); /* timer will be rescheduled in watchdog task */ @@ -2748,8 +2749,7 @@ static void i40evf_init_task(struct work_struct *work) ether_addr_copy(netdev->perm_addr, adapter->hw.mac.addr); } - setup_timer(&adapter->watchdog_timer, &i40evf_watchdog_timer, - (unsigned long)adapter); + timer_setup(&adapter->watchdog_timer, i40evf_watchdog_timer, 0); mod_timer(&adapter->watchdog_timer, jiffies + 1); adapter->tx_desc_count = I40EVF_DEFAULT_TXD; diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index 837d9b46a390..58d01a211367 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -133,8 +133,8 @@ static void igb_clean_all_rx_rings(struct igb_adapter *); static void igb_clean_tx_ring(struct igb_ring *); static void igb_clean_rx_ring(struct igb_ring *); static void igb_set_rx_mode(struct net_device *); -static void igb_update_phy_info(unsigned long); -static void igb_watchdog(unsigned long); +static void igb_update_phy_info(struct timer_list *); +static void igb_watchdog(struct timer_list *); static void igb_watchdog_task(struct work_struct *); static netdev_tx_t igb_xmit_frame(struct sk_buff *skb, struct net_device *); static void igb_get_stats64(struct net_device *dev, @@ -2538,10 +2538,8 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent) wr32(E1000_TXPBS, I210_TXPBSIZE_DEFAULT); } - setup_timer(&adapter->watchdog_timer, igb_watchdog, - (unsigned long) adapter); - setup_timer(&adapter->phy_info_timer, igb_update_phy_info, - (unsigned long) adapter); + timer_setup(&adapter->watchdog_timer, igb_watchdog, 0); + timer_setup(&adapter->phy_info_timer, igb_update_phy_info, 0); INIT_WORK(&adapter->reset_task, igb_reset_task); INIT_WORK(&adapter->watchdog_task, igb_watchdog_task); @@ -4425,9 +4423,9 @@ static void igb_spoof_check(struct igb_adapter *adapter) /* Need to wait a few seconds after link up to get diagnostic information from * the phy */ -static void igb_update_phy_info(unsigned long data) +static void igb_update_phy_info(struct timer_list *t) { - struct igb_adapter *adapter = (struct igb_adapter *) data; + struct igb_adapter *adapter = from_timer(adapter, t, phy_info_timer); igb_get_phy_info(&adapter->hw); } @@ -4514,9 +4512,9 @@ static void igb_check_lvmmc(struct igb_adapter *adapter) * igb_watchdog - Timer Call-back * @data: pointer to adapter cast into an unsigned long **/ -static void igb_watchdog(unsigned long data) +static void igb_watchdog(struct timer_list *t) { - struct igb_adapter *adapter = (struct igb_adapter *)data; + struct igb_adapter *adapter = from_timer(adapter, t, watchdog_timer); /* Do the rest outside of interrupt context */ schedule_work(&adapter->watchdog_task); } diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c index 1ed556911b14..713e8df23744 100644 --- a/drivers/net/ethernet/intel/igbvf/netdev.c +++ b/drivers/net/ethernet/intel/igbvf/netdev.c @@ -1915,9 +1915,9 @@ static bool igbvf_has_link(struct igbvf_adapter *adapter) * igbvf_watchdog - Timer Call-back * @data: pointer to adapter cast into an unsigned long **/ -static void igbvf_watchdog(unsigned long data) +static void igbvf_watchdog(struct timer_list *t) { - struct igbvf_adapter *adapter = (struct igbvf_adapter *)data; + struct igbvf_adapter *adapter = from_timer(adapter, t, watchdog_timer); /* Do the rest outside of interrupt context */ schedule_work(&adapter->watchdog_task); @@ -2878,8 +2878,7 @@ static int igbvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) netdev->addr_len); } - setup_timer(&adapter->watchdog_timer, &igbvf_watchdog, - (unsigned long)adapter); + timer_setup(&adapter->watchdog_timer, igbvf_watchdog, 0); INIT_WORK(&adapter->reset_task, igbvf_reset_task); INIT_WORK(&adapter->watchdog_task, igbvf_watchdog_task); diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_main.c b/drivers/net/ethernet/intel/ixgb/ixgb_main.c index 1e6ec2277d54..2353c383f0a7 100644 --- a/drivers/net/ethernet/intel/ixgb/ixgb_main.c +++ b/drivers/net/ethernet/intel/ixgb/ixgb_main.c @@ -83,7 +83,7 @@ static void ixgb_setup_rctl(struct ixgb_adapter *adapter); static void ixgb_clean_tx_ring(struct ixgb_adapter *adapter); static void ixgb_clean_rx_ring(struct ixgb_adapter *adapter); static void ixgb_set_multi(struct net_device *netdev); -static void ixgb_watchdog(unsigned long data); +static void ixgb_watchdog(struct timer_list *t); static netdev_tx_t ixgb_xmit_frame(struct sk_buff *skb, struct net_device *netdev); static int ixgb_change_mtu(struct net_device *netdev, int new_mtu); @@ -508,8 +508,7 @@ ixgb_probe(struct pci_dev *pdev, const struct pci_device_id *ent) adapter->part_num = ixgb_get_ee_pba_number(&adapter->hw); - setup_timer(&adapter->watchdog_timer, ixgb_watchdog, - (unsigned long)adapter); + timer_setup(&adapter->watchdog_timer, ixgb_watchdog, 0); INIT_WORK(&adapter->tx_timeout_task, ixgb_tx_timeout_task); @@ -1151,9 +1150,9 @@ alloc_failed: **/ static void -ixgb_watchdog(unsigned long data) +ixgb_watchdog(struct timer_list *t) { - struct ixgb_adapter *adapter = (struct ixgb_adapter *)data; + struct ixgb_adapter *adapter = from_timer(adapter, t, watchdog_timer); struct net_device *netdev = adapter->netdev; struct ixgb_desc_ring *txdr = &adapter->tx_ring; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 7683c14024aa..3e83edd10e23 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -7690,9 +7690,9 @@ static void ixgbe_sfp_link_config_subtask(struct ixgbe_adapter *adapter) * ixgbe_service_timer - Timer Call-back * @data: pointer to adapter cast into an unsigned long **/ -static void ixgbe_service_timer(unsigned long data) +static void ixgbe_service_timer(struct timer_list *t) { - struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data; + struct ixgbe_adapter *adapter = from_timer(adapter, t, service_timer); unsigned long next_event_offset; /* poll faster when waiting for link */ @@ -10508,8 +10508,7 @@ skip_sriov: ether_addr_copy(hw->mac.addr, hw->mac.perm_addr); ixgbe_mac_set_default_filter(adapter); - setup_timer(&adapter->service_timer, &ixgbe_service_timer, - (unsigned long) adapter); + timer_setup(&adapter->service_timer, ixgbe_service_timer, 0); if (ixgbe_removed(hw->hw_addr)) { err = -EIO; diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c index 032f8ac06357..12d3601b1d57 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c @@ -2747,9 +2747,10 @@ void ixgbevf_update_stats(struct ixgbevf_adapter *adapter) * ixgbevf_service_timer - Timer Call-back * @data: pointer to adapter cast into an unsigned long **/ -static void ixgbevf_service_timer(unsigned long data) +static void ixgbevf_service_timer(struct timer_list *t) { - struct ixgbevf_adapter *adapter = (struct ixgbevf_adapter *)data; + struct ixgbevf_adapter *adapter = from_timer(adapter, t, + service_timer); /* Reset the timer */ mod_timer(&adapter->service_timer, (HZ * 2) + jiffies); @@ -4120,8 +4121,7 @@ static int ixgbevf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) goto err_sw_init; } - setup_timer(&adapter->service_timer, &ixgbevf_service_timer, - (unsigned long)adapter); + timer_setup(&adapter->service_timer, ixgbevf_service_timer, 0); INIT_WORK(&adapter->service_task, ixgbevf_service_task); set_bit(__IXGBEVF_SERVICE_INITED, &adapter->state); -- cgit v1.2.3 From 7d85b2c8d1ca09040e16a3cd7f019d6a9ddd22d7 Mon Sep 17 00:00:00 2001 From: Kees Cook Date: Mon, 16 Oct 2017 17:29:39 -0700 Subject: net: fs_enet: Remove unused timer Removes unused timer and its old initialization call. Cc: Pantelis Antoniou Cc: Vitaly Bordug Cc: linuxppc-dev@lists.ozlabs.org Cc: netdev@vger.kernel.org Signed-off-by: Kees Cook Signed-off-by: David S. Miller --- drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c | 2 -- drivers/net/ethernet/freescale/fs_enet/fs_enet.h | 1 - 2 files changed, 3 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c index 753259091b22..7892f2f0c6b5 100644 --- a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c +++ b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c @@ -1023,8 +1023,6 @@ static int fs_enet_probe(struct platform_device *ofdev) ndev->ethtool_ops = &fs_ethtool_ops; - init_timer(&fep->phy_timer_list); - netif_carrier_off(ndev); ndev->features |= NETIF_F_SG; diff --git a/drivers/net/ethernet/freescale/fs_enet/fs_enet.h b/drivers/net/ethernet/freescale/fs_enet/fs_enet.h index 5ce516c8a62a..dd306deb7cf1 100644 --- a/drivers/net/ethernet/freescale/fs_enet/fs_enet.h +++ b/drivers/net/ethernet/freescale/fs_enet/fs_enet.h @@ -137,7 +137,6 @@ struct fs_enet_private { cbd_t __iomem *cur_rx; cbd_t __iomem *cur_tx; int tx_free; - struct timer_list phy_timer_list; const struct phy_info *phy; u32 msg_enable; struct mii_if_info mii_if; -- cgit v1.2.3 From 48167c9ce0b91c068430345bf039c7be23fa2f3f Mon Sep 17 00:00:00 2001 From: Madalin Bucur Date: Mon, 16 Oct 2017 21:36:05 +0300 Subject: fsl/fman: remove of_node The FMan MAC driver allocates a platform device for the Ethernet driver to probe on. Setting pdev->dev.of_node with the MAC node triggers the MAC driver probing of the new platform device. While this fails quickly and does not affect the functionality of the drivers, it is incorrect and must be removed. This was added to address a report that DSA code using of_find_net_device_by_node() is unable to use the DPAA interfaces. Error message seen before this fix: fsl_mac dpaa-ethernet.0: __devm_request_mem_region(mac) failed fsl_mac: probe of dpaa-ethernet.0 failed with error -16 Signed-off-by: Madalin Bucur Signed-off-by: David S. Miller --- drivers/net/ethernet/freescale/fman/mac.c | 1 - 1 file changed, 1 deletion(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/freescale/fman/mac.c b/drivers/net/ethernet/freescale/fman/mac.c index 387eb4a88b72..9a265f862065 100644 --- a/drivers/net/ethernet/freescale/fman/mac.c +++ b/drivers/net/ethernet/freescale/fman/mac.c @@ -623,7 +623,6 @@ static struct platform_device *dpaa_eth_add_device(int fman_id, goto no_mem; } - pdev->dev.of_node = node; pdev->dev.parent = priv->dev; set_dma_ops(&pdev->dev, get_dma_ops(priv->dev)); -- cgit v1.2.3 From 3c38ec67867c0a3fe9eeda68a3d2a945d5d09a24 Mon Sep 17 00:00:00 2001 From: Madalin Bucur Date: Mon, 16 Oct 2017 21:36:06 +0300 Subject: dpaa_eth: move of_phy_connect() to the eth driver Signed-off-by: Madalin Bucur Signed-off-by: David S. Miller --- drivers/net/ethernet/freescale/dpaa/dpaa_eth.c | 48 +++++++++++-- drivers/net/ethernet/freescale/fman/mac.c | 97 ++++++-------------------- drivers/net/ethernet/freescale/fman/mac.h | 5 +- 3 files changed, 66 insertions(+), 84 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c index 42258060f142..7cf61d62ad5e 100644 --- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c +++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c @@ -2435,6 +2435,48 @@ static void dpaa_eth_napi_disable(struct dpaa_priv *priv) } } +static void dpaa_adjust_link(struct net_device *net_dev) +{ + struct mac_device *mac_dev; + struct dpaa_priv *priv; + + priv = netdev_priv(net_dev); + mac_dev = priv->mac_dev; + mac_dev->adjust_link(mac_dev); +} + +static int dpaa_phy_init(struct net_device *net_dev) +{ + struct mac_device *mac_dev; + struct phy_device *phy_dev; + struct dpaa_priv *priv; + + priv = netdev_priv(net_dev); + mac_dev = priv->mac_dev; + + phy_dev = of_phy_connect(net_dev, mac_dev->phy_node, + &dpaa_adjust_link, 0, + mac_dev->phy_if); + if (!phy_dev) { + netif_err(priv, ifup, net_dev, "init_phy() failed\n"); + return -ENODEV; + } + + /* Remove any features not supported by the controller */ + phy_dev->supported &= mac_dev->if_support; + + /* Enable the symmetric and asymmetric PAUSE frame advertisements, + * as most of the PHY drivers do not enable them by default. + */ + phy_dev->supported |= (SUPPORTED_Pause | SUPPORTED_Asym_Pause); + phy_dev->advertising = phy_dev->supported; + + mac_dev->phy_dev = phy_dev; + net_dev->phydev = phy_dev; + + return 0; +} + static int dpaa_open(struct net_device *net_dev) { struct mac_device *mac_dev; @@ -2445,12 +2487,8 @@ static int dpaa_open(struct net_device *net_dev) mac_dev = priv->mac_dev; dpaa_eth_napi_enable(priv); - net_dev->phydev = mac_dev->init_phy(net_dev, priv->mac_dev); - if (!net_dev->phydev) { - netif_err(priv, ifup, net_dev, "init_phy() failed\n"); - err = -ENODEV; + if (dpaa_phy_init(net_dev)) goto phy_init_failed; - } for (i = 0; i < ARRAY_SIZE(mac_dev->port); i++) { err = fman_port_enable(mac_dev->port[i]); diff --git a/drivers/net/ethernet/freescale/fman/mac.c b/drivers/net/ethernet/freescale/fman/mac.c index 9a265f862065..a0a3107c1f45 100644 --- a/drivers/net/ethernet/freescale/fman/mac.c +++ b/drivers/net/ethernet/freescale/fman/mac.c @@ -57,9 +57,7 @@ struct mac_priv_s { struct device *dev; void __iomem *vaddr; u8 cell_index; - phy_interface_t phy_if; struct fman *fman; - struct device_node *phy_node; struct device_node *internal_phy_node; /* List of multicast addresses */ struct list_head mc_addr_list; @@ -106,7 +104,7 @@ static void set_fman_mac_params(struct mac_device *mac_dev, resource_size(mac_dev->res)); memcpy(¶ms->addr, mac_dev->addr, sizeof(mac_dev->addr)); params->max_speed = priv->max_speed; - params->phy_if = priv->phy_if; + params->phy_if = mac_dev->phy_if; params->basex_if = false; params->mac_id = priv->cell_index; params->fm = (void *)priv->fman; @@ -419,15 +417,12 @@ void fman_get_pause_cfg(struct mac_device *mac_dev, bool *rx_pause, } EXPORT_SYMBOL(fman_get_pause_cfg); -static void adjust_link_void(struct net_device *net_dev) +static void adjust_link_void(struct mac_device *mac_dev) { } -static void adjust_link_dtsec(struct net_device *net_dev) +static void adjust_link_dtsec(struct mac_device *mac_dev) { - struct device *dev = net_dev->dev.parent; - struct dpaa_eth_data *eth_data = dev->platform_data; - struct mac_device *mac_dev = eth_data->mac_dev; struct phy_device *phy_dev = mac_dev->phy_dev; struct fman_mac *fman_mac; bool rx_pause, tx_pause; @@ -444,14 +439,12 @@ static void adjust_link_dtsec(struct net_device *net_dev) fman_get_pause_cfg(mac_dev, &rx_pause, &tx_pause); err = fman_set_mac_active_pause(mac_dev, rx_pause, tx_pause); if (err < 0) - netdev_err(net_dev, "fman_set_mac_active_pause() = %d\n", err); + dev_err(mac_dev->priv->dev, "fman_set_mac_active_pause() = %d\n", + err); } -static void adjust_link_memac(struct net_device *net_dev) +static void adjust_link_memac(struct mac_device *mac_dev) { - struct device *dev = net_dev->dev.parent; - struct dpaa_eth_data *eth_data = dev->platform_data; - struct mac_device *mac_dev = eth_data->mac_dev; struct phy_device *phy_dev = mac_dev->phy_dev; struct fman_mac *fman_mac; bool rx_pause, tx_pause; @@ -463,60 +456,12 @@ static void adjust_link_memac(struct net_device *net_dev) fman_get_pause_cfg(mac_dev, &rx_pause, &tx_pause); err = fman_set_mac_active_pause(mac_dev, rx_pause, tx_pause); if (err < 0) - netdev_err(net_dev, "fman_set_mac_active_pause() = %d\n", err); -} - -/* Initializes driver's PHY state, and attaches to the PHY. - * Returns 0 on success. - */ -static struct phy_device *init_phy(struct net_device *net_dev, - struct mac_device *mac_dev, - void (*adj_lnk)(struct net_device *)) -{ - struct phy_device *phy_dev; - struct mac_priv_s *priv = mac_dev->priv; - - phy_dev = of_phy_connect(net_dev, priv->phy_node, adj_lnk, 0, - priv->phy_if); - if (!phy_dev) { - netdev_err(net_dev, "Could not connect to PHY\n"); - return NULL; - } - - /* Remove any features not supported by the controller */ - phy_dev->supported &= mac_dev->if_support; - /* Enable the symmetric and asymmetric PAUSE frame advertisements, - * as most of the PHY drivers do not enable them by default. - */ - phy_dev->supported |= (SUPPORTED_Pause | SUPPORTED_Asym_Pause); - phy_dev->advertising = phy_dev->supported; - - mac_dev->phy_dev = phy_dev; - - return phy_dev; -} - -static struct phy_device *dtsec_init_phy(struct net_device *net_dev, - struct mac_device *mac_dev) -{ - return init_phy(net_dev, mac_dev, &adjust_link_dtsec); -} - -static struct phy_device *tgec_init_phy(struct net_device *net_dev, - struct mac_device *mac_dev) -{ - return init_phy(net_dev, mac_dev, adjust_link_void); -} - -static struct phy_device *memac_init_phy(struct net_device *net_dev, - struct mac_device *mac_dev) -{ - return init_phy(net_dev, mac_dev, &adjust_link_memac); + dev_err(mac_dev->priv->dev, "fman_set_mac_active_pause() = %d\n", + err); } static void setup_dtsec(struct mac_device *mac_dev) { - mac_dev->init_phy = dtsec_init_phy; mac_dev->init = dtsec_initialization; mac_dev->set_promisc = dtsec_set_promiscuous; mac_dev->change_addr = dtsec_modify_mac_address; @@ -528,14 +473,13 @@ static void setup_dtsec(struct mac_device *mac_dev) mac_dev->set_multi = set_multi; mac_dev->start = start; mac_dev->stop = stop; - + mac_dev->adjust_link = adjust_link_dtsec; mac_dev->priv->enable = dtsec_enable; mac_dev->priv->disable = dtsec_disable; } static void setup_tgec(struct mac_device *mac_dev) { - mac_dev->init_phy = tgec_init_phy; mac_dev->init = tgec_initialization; mac_dev->set_promisc = tgec_set_promiscuous; mac_dev->change_addr = tgec_modify_mac_address; @@ -547,14 +491,13 @@ static void setup_tgec(struct mac_device *mac_dev) mac_dev->set_multi = set_multi; mac_dev->start = start; mac_dev->stop = stop; - + mac_dev->adjust_link = adjust_link_void; mac_dev->priv->enable = tgec_enable; mac_dev->priv->disable = tgec_disable; } static void setup_memac(struct mac_device *mac_dev) { - mac_dev->init_phy = memac_init_phy; mac_dev->init = memac_initialization; mac_dev->set_promisc = memac_set_promiscuous; mac_dev->change_addr = memac_modify_mac_address; @@ -566,7 +509,7 @@ static void setup_memac(struct mac_device *mac_dev) mac_dev->set_multi = set_multi; mac_dev->start = start; mac_dev->stop = stop; - + mac_dev->adjust_link = adjust_link_memac; mac_dev->priv->enable = memac_enable; mac_dev->priv->disable = memac_disable; } @@ -850,13 +793,13 @@ static int mac_probe(struct platform_device *_of_dev) mac_node); phy_if = PHY_INTERFACE_MODE_SGMII; } - priv->phy_if = phy_if; + mac_dev->phy_if = phy_if; - priv->speed = phy2speed[priv->phy_if]; + priv->speed = phy2speed[mac_dev->phy_if]; priv->max_speed = priv->speed; mac_dev->if_support = DTSEC_SUPPORTED; /* We don't support half-duplex in SGMII mode */ - if (priv->phy_if == PHY_INTERFACE_MODE_SGMII) + if (mac_dev->phy_if == PHY_INTERFACE_MODE_SGMII) mac_dev->if_support &= ~(SUPPORTED_10baseT_Half | SUPPORTED_100baseT_Half); @@ -865,12 +808,12 @@ static int mac_probe(struct platform_device *_of_dev) mac_dev->if_support |= SUPPORTED_1000baseT_Full; /* The 10G interface only supports one mode */ - if (priv->phy_if == PHY_INTERFACE_MODE_XGMII) + if (mac_dev->phy_if == PHY_INTERFACE_MODE_XGMII) mac_dev->if_support = SUPPORTED_10000baseT_Full; /* Get the rest of the PHY information */ - priv->phy_node = of_parse_phandle(mac_node, "phy-handle", 0); - if (!priv->phy_node && of_phy_is_fixed_link(mac_node)) { + mac_dev->phy_node = of_parse_phandle(mac_node, "phy-handle", 0); + if (!mac_dev->phy_node && of_phy_is_fixed_link(mac_node)) { struct phy_device *phy; err = of_phy_register_fixed_link(mac_node); @@ -884,8 +827,8 @@ static int mac_probe(struct platform_device *_of_dev) goto _return_dev_set_drvdata; } - priv->phy_node = of_node_get(mac_node); - phy = of_phy_find_device(priv->phy_node); + mac_dev->phy_node = of_node_get(mac_node); + phy = of_phy_find_device(mac_dev->phy_node); if (!phy) { err = -EINVAL; goto _return_dev_set_drvdata; @@ -903,7 +846,7 @@ static int mac_probe(struct platform_device *_of_dev) err = mac_dev->init(mac_dev); if (err < 0) { dev_err(dev, "mac_dev->init() = %d\n", err); - of_node_put(priv->phy_node); + of_node_put(mac_dev->phy_node); goto _return_dev_set_drvdata; } diff --git a/drivers/net/ethernet/freescale/fman/mac.h b/drivers/net/ethernet/freescale/fman/mac.h index d7313f0c5135..1ca85a18ab38 100644 --- a/drivers/net/ethernet/freescale/fman/mac.h +++ b/drivers/net/ethernet/freescale/fman/mac.h @@ -50,6 +50,8 @@ struct mac_device { struct fman_port *port[2]; u32 if_support; struct phy_device *phy_dev; + phy_interface_t phy_if; + struct device_node *phy_node; bool autoneg_pause; bool rx_pause_req; @@ -58,11 +60,10 @@ struct mac_device { bool tx_pause_active; bool promisc; - struct phy_device *(*init_phy)(struct net_device *net_dev, - struct mac_device *mac_dev); int (*init)(struct mac_device *mac_dev); int (*start)(struct mac_device *mac_dev); int (*stop)(struct mac_device *mac_dev); + void (*adjust_link)(struct mac_device *mac_dev); int (*set_promisc)(struct fman_mac *mac_dev, bool enable); int (*change_addr)(struct fman_mac *mac_dev, enet_addr_t *enet_addr); int (*set_multi)(struct net_device *net_dev, -- cgit v1.2.3 From c6e26ea8c893687a83c9feda7ab4f89205e19726 Mon Sep 17 00:00:00 2001 From: Madalin Bucur Date: Mon, 16 Oct 2017 21:36:07 +0300 Subject: dpaa_eth: change device used Change device used for DMA mapping to the MAC device that is an of_device, with proper DMA ops. Using this device for the netdevice should also address the issue with DSA scenarios that need the netdevice to be backed by an of_device. Signed-off-by: Madalin Bucur Signed-off-by: David S. Miller --- drivers/net/ethernet/freescale/dpaa/dpaa_eth.c | 42 ++++++++------------------ drivers/net/ethernet/freescale/fman/mac.c | 37 +++++++++-------------- drivers/net/ethernet/freescale/fman/mac.h | 1 - 3 files changed, 27 insertions(+), 53 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c index 7cf61d62ad5e..823aa6597e56 100644 --- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c +++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c @@ -385,34 +385,19 @@ out: static struct mac_device *dpaa_mac_dev_get(struct platform_device *pdev) { - struct platform_device *of_dev; struct dpaa_eth_data *eth_data; - struct device *dpaa_dev, *dev; - struct device_node *mac_node; + struct device *dpaa_dev; struct mac_device *mac_dev; dpaa_dev = &pdev->dev; eth_data = dpaa_dev->platform_data; - if (!eth_data) + if (!eth_data) { + dev_err(dpaa_dev, "eth_data missing\n"); return ERR_PTR(-ENODEV); - - mac_node = eth_data->mac_node; - - of_dev = of_find_device_by_node(mac_node); - if (!of_dev) { - dev_err(dpaa_dev, "of_find_device_by_node(%pOF) failed\n", - mac_node); - of_node_put(mac_node); - return ERR_PTR(-EINVAL); } - of_node_put(mac_node); - - dev = &of_dev->dev; - - mac_dev = dev_get_drvdata(dev); + mac_dev = eth_data->mac_dev; if (!mac_dev) { - dev_err(dpaa_dev, "dev_get_drvdata(%s) failed\n", - dev_name(dev)); + dev_err(dpaa_dev, "mac_dev missing\n"); return ERR_PTR(-EINVAL); } @@ -2696,7 +2681,13 @@ static int dpaa_eth_probe(struct platform_device *pdev) int err = 0, i, channel; struct device *dev; - dev = &pdev->dev; + /* device used for DMA mapping */ + dev = pdev->dev.parent; + err = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(40)); + if (err) { + dev_err(dev, "dma_coerce_mask_and_coherent() failed\n"); + return err; + } /* Allocate this early, so we can store relevant information in * the private area @@ -2738,14 +2729,6 @@ static int dpaa_eth_probe(struct platform_device *pdev) priv->buf_layout[RX].priv_data_size = DPAA_RX_PRIV_DATA_SIZE; /* Rx */ priv->buf_layout[TX].priv_data_size = DPAA_TX_PRIV_DATA_SIZE; /* Tx */ - /* device used for DMA mapping */ - set_dma_ops(dev, get_dma_ops(&pdev->dev)); - err = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(40)); - if (err) { - dev_err(dev, "dma_coerce_mask_and_coherent() failed\n"); - goto dev_mask_failed; - } - /* bp init */ for (i = 0; i < DPAA_BPS_NUM; i++) { int err; @@ -2879,7 +2862,6 @@ get_channel_failed: dpaa_bps_free(priv); bp_create_failed: fq_probe_failed: -dev_mask_failed: mac_probe_failed: dev_set_drvdata(dev, NULL); free_netdev(net_dev); diff --git a/drivers/net/ethernet/freescale/fman/mac.c b/drivers/net/ethernet/freescale/fman/mac.c index a0a3107c1f45..1d6da1ea7bfb 100644 --- a/drivers/net/ethernet/freescale/fman/mac.c +++ b/drivers/net/ethernet/freescale/fman/mac.c @@ -542,8 +542,7 @@ static const u16 phy2speed[] = { }; static struct platform_device *dpaa_eth_add_device(int fman_id, - struct mac_device *mac_dev, - struct device_node *node) + struct mac_device *mac_dev) { struct platform_device *pdev; struct dpaa_eth_data data; @@ -556,10 +555,8 @@ static struct platform_device *dpaa_eth_add_device(int fman_id, data.mac_dev = mac_dev; data.mac_hw_id = priv->cell_index; data.fman_hw_id = fman_id; - data.mac_node = node; mutex_lock(ð_lock); - pdev = platform_device_alloc("dpaa-ethernet", dpaa_eth_dev_cnt); if (!pdev) { ret = -ENOMEM; @@ -648,9 +645,6 @@ static int mac_probe(struct platform_device *_of_dev) goto _return; } - /* Register mac_dev */ - dev_set_drvdata(dev, mac_dev); - INIT_LIST_HEAD(&priv->mc_addr_list); /* Get the FM node */ @@ -659,7 +653,7 @@ static int mac_probe(struct platform_device *_of_dev) dev_err(dev, "of_get_parent(%pOF) failed\n", mac_node); err = -EINVAL; - goto _return_dev_set_drvdata; + goto _return_of_get_parent; } of_dev = of_find_device_by_node(dev_node); @@ -693,7 +687,7 @@ static int mac_probe(struct platform_device *_of_dev) if (err < 0) { dev_err(dev, "of_address_to_resource(%pOF) = %d\n", mac_node, err); - goto _return_dev_set_drvdata; + goto _return_of_get_parent; } mac_dev->res = __devm_request_region(dev, @@ -703,7 +697,7 @@ static int mac_probe(struct platform_device *_of_dev) if (!mac_dev->res) { dev_err(dev, "__devm_request_mem_region(mac) failed\n"); err = -EBUSY; - goto _return_dev_set_drvdata; + goto _return_of_get_parent; } priv->vaddr = devm_ioremap(dev, mac_dev->res->start, @@ -711,7 +705,7 @@ static int mac_probe(struct platform_device *_of_dev) if (!priv->vaddr) { dev_err(dev, "devm_ioremap() failed\n"); err = -EIO; - goto _return_dev_set_drvdata; + goto _return_of_get_parent; } if (!of_device_is_available(mac_node)) { @@ -728,7 +722,7 @@ static int mac_probe(struct platform_device *_of_dev) if (err) { dev_err(dev, "failed to read cell-index for %pOF\n", mac_node); err = -EINVAL; - goto _return_dev_set_drvdata; + goto _return_of_get_parent; } priv->cell_index = (u8)val; @@ -737,7 +731,7 @@ static int mac_probe(struct platform_device *_of_dev) if (!mac_addr) { dev_err(dev, "of_get_mac_address(%pOF) failed\n", mac_node); err = -EINVAL; - goto _return_dev_set_drvdata; + goto _return_of_get_parent; } memcpy(mac_dev->addr, mac_addr, sizeof(mac_dev->addr)); @@ -747,14 +741,14 @@ static int mac_probe(struct platform_device *_of_dev) dev_err(dev, "of_count_phandle_with_args(%pOF, fsl,fman-ports) failed\n", mac_node); err = nph; - goto _return_dev_set_drvdata; + goto _return_of_get_parent; } if (nph != ARRAY_SIZE(mac_dev->port)) { dev_err(dev, "Not supported number of fman-ports handles of mac node %pOF from device tree\n", mac_node); err = -EINVAL; - goto _return_dev_set_drvdata; + goto _return_of_get_parent; } for (i = 0; i < ARRAY_SIZE(mac_dev->port); i++) { @@ -818,20 +812,20 @@ static int mac_probe(struct platform_device *_of_dev) err = of_phy_register_fixed_link(mac_node); if (err) - goto _return_dev_set_drvdata; + goto _return_of_get_parent; priv->fixed_link = kzalloc(sizeof(*priv->fixed_link), GFP_KERNEL); if (!priv->fixed_link) { err = -ENOMEM; - goto _return_dev_set_drvdata; + goto _return_of_get_parent; } mac_dev->phy_node = of_node_get(mac_node); phy = of_phy_find_device(mac_dev->phy_node); if (!phy) { err = -EINVAL; - goto _return_dev_set_drvdata; + goto _return_of_get_parent; } priv->fixed_link->link = phy->link; @@ -847,7 +841,7 @@ static int mac_probe(struct platform_device *_of_dev) if (err < 0) { dev_err(dev, "mac_dev->init() = %d\n", err); of_node_put(mac_dev->phy_node); - goto _return_dev_set_drvdata; + goto _return_of_get_parent; } /* pause frame autonegotiation enabled */ @@ -868,7 +862,7 @@ static int mac_probe(struct platform_device *_of_dev) mac_dev->addr[0], mac_dev->addr[1], mac_dev->addr[2], mac_dev->addr[3], mac_dev->addr[4], mac_dev->addr[5]); - priv->eth_dev = dpaa_eth_add_device(fman_id, mac_dev, mac_node); + priv->eth_dev = dpaa_eth_add_device(fman_id, mac_dev); if (IS_ERR(priv->eth_dev)) { dev_err(dev, "failed to add Ethernet platform device for MAC %d\n", priv->cell_index); @@ -879,9 +873,8 @@ static int mac_probe(struct platform_device *_of_dev) _return_of_node_put: of_node_put(dev_node); -_return_dev_set_drvdata: +_return_of_get_parent: kfree(priv->fixed_link); - dev_set_drvdata(dev, NULL); _return: return err; } diff --git a/drivers/net/ethernet/freescale/fman/mac.h b/drivers/net/ethernet/freescale/fman/mac.h index 1ca85a18ab38..eefb3357e304 100644 --- a/drivers/net/ethernet/freescale/fman/mac.h +++ b/drivers/net/ethernet/freescale/fman/mac.h @@ -83,7 +83,6 @@ struct mac_device { }; struct dpaa_eth_data { - struct device_node *mac_node; struct mac_device *mac_dev; int mac_hw_id; int fman_hw_id; -- cgit v1.2.3 From 8b9b5a2c27e1a7292f1e97e0eb19b0ae603dfa68 Mon Sep 17 00:00:00 2001 From: Madalin Bucur Date: Mon, 16 Oct 2017 21:36:08 +0300 Subject: dpaa_eth: cleanup dpaa_eth_probe() error paths Signed-off-by: Madalin Bucur Signed-off-by: David S. Miller --- drivers/net/ethernet/freescale/dpaa/dpaa_eth.c | 53 ++++++++++---------------- 1 file changed, 20 insertions(+), 33 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c index 823aa6597e56..c6b97a1b6e43 100644 --- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c +++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c @@ -2695,7 +2695,7 @@ static int dpaa_eth_probe(struct platform_device *pdev) net_dev = alloc_etherdev_mq(sizeof(*priv), DPAA_ETH_TXQ_NUM); if (!net_dev) { dev_err(dev, "alloc_etherdev_mq() failed\n"); - goto alloc_etherdev_mq_failed; + return -ENOMEM; } /* Do this here, so we can be verbose early */ @@ -2711,7 +2711,7 @@ static int dpaa_eth_probe(struct platform_device *pdev) if (IS_ERR(mac_dev)) { dev_err(dev, "dpaa_mac_dev_get() failed\n"); err = PTR_ERR(mac_dev); - goto mac_probe_failed; + goto free_netdev; } /* If fsl_fm_max_frm is set to a higher value than the all-common 1500, @@ -2735,7 +2735,7 @@ static int dpaa_eth_probe(struct platform_device *pdev) dpaa_bps[i] = dpaa_bp_alloc(dev); if (IS_ERR(dpaa_bps[i])) - return PTR_ERR(dpaa_bps[i]); + goto free_dpaa_bps; /* the raw size of the buffers used for reception */ dpaa_bps[i]->raw_size = bpool_buffer_raw_size(i, DPAA_BPS_NUM); /* avoid runtime computations by keeping the usable size here */ @@ -2743,11 +2743,8 @@ static int dpaa_eth_probe(struct platform_device *pdev) dpaa_bps[i]->dev = dev; err = dpaa_bp_alloc_pool(dpaa_bps[i]); - if (err < 0) { - dpaa_bps_free(priv); - priv->dpaa_bps[i] = NULL; - goto bp_create_failed; - } + if (err < 0) + goto free_dpaa_bps; priv->dpaa_bps[i] = dpaa_bps[i]; } @@ -2758,7 +2755,7 @@ static int dpaa_eth_probe(struct platform_device *pdev) err = dpaa_alloc_all_fqs(dev, &priv->dpaa_fq_list, &port_fqs); if (err < 0) { dev_err(dev, "dpaa_alloc_all_fqs() failed\n"); - goto fq_probe_failed; + goto free_dpaa_bps; } priv->mac_dev = mac_dev; @@ -2767,7 +2764,7 @@ static int dpaa_eth_probe(struct platform_device *pdev) if (channel < 0) { dev_err(dev, "dpaa_get_channel() failed\n"); err = channel; - goto get_channel_failed; + goto free_dpaa_bps; } priv->channel = (u16)channel; @@ -2787,20 +2784,20 @@ static int dpaa_eth_probe(struct platform_device *pdev) err = dpaa_eth_cgr_init(priv); if (err < 0) { dev_err(dev, "Error initializing CGR\n"); - goto tx_cgr_init_failed; + goto free_dpaa_bps; } err = dpaa_ingress_cgr_init(priv); if (err < 0) { dev_err(dev, "Error initializing ingress CGR\n"); - goto rx_cgr_init_failed; + goto delete_egress_cgr; } /* Add the FQs to the interface, and make them active */ list_for_each_entry_safe(dpaa_fq, tmp, &priv->dpaa_fq_list, list) { err = dpaa_fq_init(dpaa_fq, false); if (err < 0) - goto fq_alloc_failed; + goto free_dpaa_fqs; } priv->tx_headroom = dpaa_get_headroom(&priv->buf_layout[TX]); @@ -2810,7 +2807,7 @@ static int dpaa_eth_probe(struct platform_device *pdev) err = dpaa_eth_init_ports(mac_dev, dpaa_bps, DPAA_BPS_NUM, &port_fqs, &priv->buf_layout[0], dev); if (err) - goto init_ports_failed; + goto free_dpaa_fqs; /* Rx traffic distribution based on keygen hashing defaults to on */ priv->keygen_in_use = true; @@ -2819,7 +2816,7 @@ static int dpaa_eth_probe(struct platform_device *pdev) if (!priv->percpu_priv) { dev_err(dev, "devm_alloc_percpu() failed\n"); err = -ENOMEM; - goto alloc_percpu_failed; + goto free_dpaa_fqs; } for_each_possible_cpu(i) { percpu_priv = per_cpu_ptr(priv->percpu_priv, i); @@ -2832,11 +2829,11 @@ static int dpaa_eth_probe(struct platform_device *pdev) /* Initialize NAPI */ err = dpaa_napi_add(net_dev); if (err < 0) - goto napi_add_failed; + goto delete_dpaa_napi; err = dpaa_netdev_init(net_dev, &dpaa_ops, tx_timeout); if (err < 0) - goto netdev_init_failed; + goto delete_dpaa_napi; dpaa_eth_sysfs_init(&net_dev->dev); @@ -2845,31 +2842,21 @@ static int dpaa_eth_probe(struct platform_device *pdev) return 0; -netdev_init_failed: -napi_add_failed: +delete_dpaa_napi: dpaa_napi_del(net_dev); -alloc_percpu_failed: -init_ports_failed: +free_dpaa_fqs: dpaa_fq_free(dev, &priv->dpaa_fq_list); -fq_alloc_failed: qman_delete_cgr_safe(&priv->ingress_cgr); qman_release_cgrid(priv->ingress_cgr.cgrid); -rx_cgr_init_failed: +delete_egress_cgr: qman_delete_cgr_safe(&priv->cgr_data.cgr); qman_release_cgrid(priv->cgr_data.cgr.cgrid); -tx_cgr_init_failed: -get_channel_failed: +free_dpaa_bps: dpaa_bps_free(priv); -bp_create_failed: -fq_probe_failed: -mac_probe_failed: +free_netdev: dev_set_drvdata(dev, NULL); free_netdev(net_dev); -alloc_etherdev_mq_failed: - for (i = 0; i < DPAA_BPS_NUM && dpaa_bps[i]; i++) { - if (atomic_read(&dpaa_bps[i]->refs) == 0) - devm_kfree(dev, dpaa_bps[i]); - } + return err; } -- cgit v1.2.3 From c69fde72bf03d7aa2c8d8ab158cc55835a2c0026 Mon Sep 17 00:00:00 2001 From: Madalin Bucur Date: Mon, 16 Oct 2017 21:36:09 +0300 Subject: fsl/fman: add dpaa in module names This change just renames the FMan driver modules, using a common prefix for the DPAA FMan and DPAA Ethernet drivers. Besides making the names more aligned, this allows writing udev rules that match on either driver name, if needed, using the fsl_dpaa_* prefix. The change of netdev dev required for the DSA probing makes the previous rules written using this prefix fail, this change makes them work again, ensuring backwards compatibility for their users. Signed-off-by: Madalin Bucur Signed-off-by: David S. Miller --- drivers/net/ethernet/freescale/fman/Makefile | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/freescale/fman/Makefile b/drivers/net/ethernet/freescale/fman/Makefile index 2c38119b172c..4ae524a352a2 100644 --- a/drivers/net/ethernet/freescale/fman/Makefile +++ b/drivers/net/ethernet/freescale/fman/Makefile @@ -1,9 +1,9 @@ subdir-ccflags-y += -I$(srctree)/drivers/net/ethernet/freescale/fman -obj-$(CONFIG_FSL_FMAN) += fsl_fman.o -obj-$(CONFIG_FSL_FMAN) += fsl_fman_port.o -obj-$(CONFIG_FSL_FMAN) += fsl_mac.o +obj-$(CONFIG_FSL_FMAN) += fsl_dpaa_fman.o +obj-$(CONFIG_FSL_FMAN) += fsl_dpaa_fman_port.o +obj-$(CONFIG_FSL_FMAN) += fsl_dpaa_mac.o -fsl_fman-objs := fman_muram.o fman.o fman_sp.o fman_keygen.o -fsl_fman_port-objs := fman_port.o -fsl_mac-objs:= mac.o fman_dtsec.o fman_memac.o fman_tgec.o +fsl_dpaa_fman-objs := fman_muram.o fman.o fman_sp.o fman_keygen.o +fsl_dpaa_fman_port-objs := fman_port.o +fsl_dpaa_mac-objs:= mac.o fman_dtsec.o fman_memac.o fman_tgec.o -- cgit v1.2.3 From f1851a69b1f4008a7d29c6e446b3da13ed13b7da Mon Sep 17 00:00:00 2001 From: Madalin Bucur Date: Mon, 16 Oct 2017 21:36:10 +0300 Subject: dpaa_eth: remove obsolete comment Comment is no longer valid for a long time now. Signed-off-by: Madalin Bucur Signed-off-by: David S. Miller --- drivers/net/ethernet/freescale/dpaa/dpaa_eth.c | 4 ---- 1 file changed, 4 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c index c6b97a1b6e43..a8d0be824149 100644 --- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c +++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c @@ -2449,10 +2449,6 @@ static int dpaa_phy_init(struct net_device *net_dev) /* Remove any features not supported by the controller */ phy_dev->supported &= mac_dev->if_support; - - /* Enable the symmetric and asymmetric PAUSE frame advertisements, - * as most of the PHY drivers do not enable them by default. - */ phy_dev->supported |= (SUPPORTED_Pause | SUPPORTED_Asym_Pause); phy_dev->advertising = phy_dev->supported; -- cgit v1.2.3 From 30d240dfa2e88f7941f72fac9a256358f7d55ad8 Mon Sep 17 00:00:00 2001 From: Yunsheng Lin Date: Tue, 17 Oct 2017 14:51:30 +0800 Subject: net: hns3: Add mqprio hardware offload support in hns3 driver When using tc qdisc, dcb_ops->setup_tc is used to tell hclge_dcb module to do the tm related setup. Only TC_MQPRIO_MODE_CHANNEL offload mode is supported. Signed-off-by: Yunsheng Lin Signed-off-by: David S. Miller --- drivers/net/ethernet/hisilicon/hns3/hnae3.h | 1 + .../net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c | 47 +++++++++++++++++- .../ethernet/hisilicon/hns3/hns3pf/hclge_main.h | 1 + .../net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c | 56 +++++++++++++++------- 4 files changed, 86 insertions(+), 19 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.h b/drivers/net/ethernet/hisilicon/hns3/hnae3.h index 575f50df340c..3acd8db0a794 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hnae3.h +++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h @@ -381,6 +381,7 @@ struct hnae3_dcb_ops { u8 (*setdcbx)(struct hnae3_handle *, u8); int (*map_update)(struct hnae3_handle *); + int (*setup_tc)(struct hnae3_handle *, u8, u8 *); }; struct hnae3_ae_algo { diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c index 1b30a6f966d8..5018d6633133 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c @@ -178,7 +178,8 @@ static int hclge_ieee_setets(struct hnae3_handle *h, struct ieee_ets *ets) u8 num_tc = 0; int ret; - if (!(hdev->dcbx_cap & DCB_CAP_DCBX_VER_IEEE)) + if (!(hdev->dcbx_cap & DCB_CAP_DCBX_VER_IEEE) || + hdev->flag & HCLGE_FLAG_MQPRIO_ENABLE) return -EINVAL; ret = hclge_ets_validate(hdev, ets, &num_tc, &map_changed); @@ -228,7 +229,8 @@ static int hclge_ieee_setpfc(struct hnae3_handle *h, struct ieee_pfc *pfc) struct hclge_dev *hdev = vport->back; u8 i, j, pfc_map, *prio_tc; - if (!(hdev->dcbx_cap & DCB_CAP_DCBX_VER_IEEE)) + if (!(hdev->dcbx_cap & DCB_CAP_DCBX_VER_IEEE) || + hdev->flag & HCLGE_FLAG_MQPRIO_ENABLE) return -EINVAL; prio_tc = hdev->tm_info.prio_tc; @@ -257,6 +259,9 @@ static u8 hclge_getdcbx(struct hnae3_handle *h) struct hclge_vport *vport = hclge_get_vport(h); struct hclge_dev *hdev = vport->back; + if (hdev->flag & HCLGE_FLAG_MQPRIO_ENABLE) + return 0; + return hdev->dcbx_cap; } @@ -276,6 +281,43 @@ static u8 hclge_setdcbx(struct hnae3_handle *h, u8 mode) return 0; } +/* Set up TC for hardware offloaded mqprio in channel mode */ +static int hclge_setup_tc(struct hnae3_handle *h, u8 tc, u8 *prio_tc) +{ + struct hclge_vport *vport = hclge_get_vport(h); + struct hclge_dev *hdev = vport->back; + int ret; + + if (hdev->flag & HCLGE_FLAG_DCB_ENABLE) + return -EINVAL; + + if (tc > hdev->tc_max) { + dev_err(&hdev->pdev->dev, + "setup tc failed, tc(%u) > tc_max(%u)\n", + tc, hdev->tc_max); + return -EINVAL; + } + + hclge_tm_schd_info_update(hdev, tc); + + ret = hclge_tm_prio_tc_info_update(hdev, prio_tc); + if (ret) + return ret; + + ret = hclge_tm_init_hw(hdev); + if (ret) + return ret; + + hdev->flag &= ~HCLGE_FLAG_DCB_ENABLE; + + if (tc > 1) + hdev->flag |= HCLGE_FLAG_MQPRIO_ENABLE; + else + hdev->flag &= ~HCLGE_FLAG_MQPRIO_ENABLE; + + return 0; +} + static const struct hnae3_dcb_ops hns3_dcb_ops = { .ieee_getets = hclge_ieee_getets, .ieee_setets = hclge_ieee_setets, @@ -284,6 +326,7 @@ static const struct hnae3_dcb_ops hns3_dcb_ops = { .getdcbx = hclge_getdcbx, .setdcbx = hclge_setdcbx, .map_update = hclge_map_update, + .setup_tc = hclge_setup_tc, }; void hclge_dcb_ops_set(struct hclge_dev *hdev) diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h index a7c018c7b0ec..bca4430bb7e7 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h @@ -470,6 +470,7 @@ struct hclge_dev { #define HCLGE_FLAG_MAIN 0x00000004 #define HCLGE_FLAG_DCB_CAPABLE 0x00000008 #define HCLGE_FLAG_DCB_ENABLE 0x00000010 +#define HCLGE_FLAG_MQPRIO_ENABLE 0x00000020 u32 flag; u32 pkt_buf_size; /* Total pf buf size for tx/rx */ diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c index ba550c1b5b01..8fa4e658b273 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c @@ -19,6 +19,7 @@ #include #include #include +#include #include #include "hnae3.h" @@ -1186,53 +1187,74 @@ static void hns3_nic_udp_tunnel_del(struct net_device *netdev, } } -static int hns3_setup_tc(struct net_device *netdev, u8 tc) +static int hns3_setup_tc(struct net_device *netdev, void *type_data) { + struct tc_mqprio_qopt_offload *mqprio_qopt = type_data; struct hnae3_handle *h = hns3_get_handle(netdev); struct hnae3_knic_private_info *kinfo = &h->kinfo; + u8 *prio_tc = mqprio_qopt->qopt.prio_tc_map; + u8 tc = mqprio_qopt->qopt.num_tc; + u16 mode = mqprio_qopt->mode; + u8 hw = mqprio_qopt->qopt.hw; + bool if_running; unsigned int i; int ret; + if (!((hw == TC_MQPRIO_HW_OFFLOAD_TCS && + mode == TC_MQPRIO_MODE_CHANNEL) || (!hw && tc == 0))) + return -EOPNOTSUPP; + if (tc > HNAE3_MAX_TC) return -EINVAL; - if (kinfo->num_tc == tc) - return 0; - if (!netdev) return -EINVAL; - if (!tc) { - netdev_reset_tc(netdev); - return 0; + if_running = netif_running(netdev); + if (if_running) { + hns3_nic_net_stop(netdev); + msleep(100); } - /* Set num_tc for netdev */ - ret = netdev_set_num_tc(netdev, tc); + ret = (kinfo->dcb_ops && kinfo->dcb_ops->setup_tc) ? + kinfo->dcb_ops->setup_tc(h, tc, prio_tc) : -EOPNOTSUPP; if (ret) - return ret; + goto out; + + if (tc <= 1) { + netdev_reset_tc(netdev); + } else { + ret = netdev_set_num_tc(netdev, tc); + if (ret) + goto out; + + for (i = 0; i < HNAE3_MAX_TC; i++) { + if (!kinfo->tc_info[i].enable) + continue; - /* Set per TC queues for the VSI */ - for (i = 0; i < HNAE3_MAX_TC; i++) { - if (kinfo->tc_info[i].enable) netdev_set_tc_queue(netdev, kinfo->tc_info[i].tc, kinfo->tc_info[i].tqp_count, kinfo->tc_info[i].tqp_offset); + } } - return 0; + ret = hns3_nic_set_real_num_queue(netdev); + +out: + if (if_running) + hns3_nic_net_open(netdev); + + return ret; } static int hns3_nic_setup_tc(struct net_device *dev, enum tc_setup_type type, void *type_data) { - struct tc_mqprio_qopt *mqprio = type_data; - if (type != TC_SETUP_MQPRIO) return -EOPNOTSUPP; - return hns3_setup_tc(dev, mqprio->num_tc); + return hns3_setup_tc(dev, type_data); } static int hns3_vlan_rx_add_vid(struct net_device *netdev, -- cgit v1.2.3 From f436baf326ae62aecffbee8572f8bc75394dbaa3 Mon Sep 17 00:00:00 2001 From: Michal Kalderon Date: Tue, 17 Oct 2017 10:23:25 +0300 Subject: qed: Fix iWARP out of order flow Out of order flow is not working for iWARP. This patch got cut out from initial series that added out of order support for iWARP. Make out of order code common for iWARP and iSCSI. Add new configuration option CONFIG_QED_OOO. Set by qedr and qedi Kconfigs. Fixes: d1abfd0b4ee2 ("qed: Add iWARP out of order support") Signed-off-by: Michal Kalderon Signed-off-by: Manish Rangankar Signed-off-by: Ariel Elior Signed-off-by: David S. Miller --- drivers/infiniband/hw/qedr/Kconfig | 1 + drivers/net/ethernet/qlogic/Kconfig | 3 +++ drivers/net/ethernet/qlogic/qed/Makefile | 3 ++- drivers/net/ethernet/qlogic/qed/qed_iwarp.c | 7 ++++++- drivers/net/ethernet/qlogic/qed/qed_ooo.c | 16 +++++++++++++--- drivers/net/ethernet/qlogic/qed/qed_ooo.h | 2 +- drivers/scsi/qedi/Kconfig | 1 + 7 files changed, 27 insertions(+), 6 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/infiniband/hw/qedr/Kconfig b/drivers/infiniband/hw/qedr/Kconfig index 6c9f3923e838..60e867d80b88 100644 --- a/drivers/infiniband/hw/qedr/Kconfig +++ b/drivers/infiniband/hw/qedr/Kconfig @@ -2,6 +2,7 @@ config INFINIBAND_QEDR tristate "QLogic RoCE driver" depends on 64BIT && QEDE select QED_LL2 + select QED_OOO select QED_RDMA ---help--- This driver provides low-level InfiniBand over Ethernet diff --git a/drivers/net/ethernet/qlogic/Kconfig b/drivers/net/ethernet/qlogic/Kconfig index c2e24afbaeb2..26ddf092e3ec 100644 --- a/drivers/net/ethernet/qlogic/Kconfig +++ b/drivers/net/ethernet/qlogic/Kconfig @@ -117,4 +117,7 @@ config QED_ISCSI config QED_FCOE bool +config QED_OOO + bool + endif # NET_VENDOR_QLOGIC diff --git a/drivers/net/ethernet/qlogic/qed/Makefile b/drivers/net/ethernet/qlogic/qed/Makefile index 82dd47068e18..c3c599950574 100644 --- a/drivers/net/ethernet/qlogic/qed/Makefile +++ b/drivers/net/ethernet/qlogic/qed/Makefile @@ -6,5 +6,6 @@ qed-y := qed_cxt.o qed_dev.o qed_hw.o qed_init_fw_funcs.o qed_init_ops.o \ qed-$(CONFIG_QED_SRIOV) += qed_sriov.o qed_vf.o qed-$(CONFIG_QED_LL2) += qed_ll2.o qed-$(CONFIG_QED_RDMA) += qed_roce.o qed_rdma.o qed_iwarp.o -qed-$(CONFIG_QED_ISCSI) += qed_iscsi.o qed_ooo.o +qed-$(CONFIG_QED_ISCSI) += qed_iscsi.o qed-$(CONFIG_QED_FCOE) += qed_fcoe.o +qed-$(CONFIG_QED_OOO) += qed_ooo.o diff --git a/drivers/net/ethernet/qlogic/qed/qed_iwarp.c b/drivers/net/ethernet/qlogic/qed/qed_iwarp.c index b2b1f87864ef..409041eab189 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_iwarp.c +++ b/drivers/net/ethernet/qlogic/qed/qed_iwarp.c @@ -1410,13 +1410,18 @@ int qed_iwarp_alloc(struct qed_hwfn *p_hwfn) INIT_LIST_HEAD(&p_hwfn->p_rdma_info->iwarp.ep_free_list); spin_lock_init(&p_hwfn->p_rdma_info->iwarp.iw_lock); - return qed_iwarp_prealloc_ep(p_hwfn, true); + rc = qed_iwarp_prealloc_ep(p_hwfn, true); + if (rc) + return rc; + + return qed_ooo_alloc(p_hwfn); } void qed_iwarp_resc_free(struct qed_hwfn *p_hwfn) { struct qed_iwarp_info *iwarp_info = &p_hwfn->p_rdma_info->iwarp; + qed_ooo_free(p_hwfn); qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->tcp_cid_map, 1); kfree(iwarp_info->mpa_bufs); kfree(iwarp_info->partial_fpdus); diff --git a/drivers/net/ethernet/qlogic/qed/qed_ooo.c b/drivers/net/ethernet/qlogic/qed/qed_ooo.c index 000636530111..6172354b451c 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_ooo.c +++ b/drivers/net/ethernet/qlogic/qed/qed_ooo.c @@ -103,18 +103,28 @@ int qed_ooo_alloc(struct qed_hwfn *p_hwfn) { u16 max_num_archipelagos = 0, cid_base; struct qed_ooo_info *p_ooo_info; + enum protocol_type proto; u16 max_num_isles = 0; u32 i; - if (p_hwfn->hw_info.personality != QED_PCI_ISCSI) { + switch (p_hwfn->hw_info.personality) { + case QED_PCI_ISCSI: + proto = PROTOCOLID_ISCSI; + break; + case QED_PCI_ETH_RDMA: + case QED_PCI_ETH_IWARP: + proto = PROTOCOLID_IWARP; + break; + default: DP_NOTICE(p_hwfn, "Failed to allocate qed_ooo_info: unknown personality\n"); return -EINVAL; } - max_num_archipelagos = p_hwfn->pf_params.iscsi_pf_params.num_cons; + max_num_archipelagos = (u16)qed_cxt_get_proto_cid_count(p_hwfn, proto, + NULL); max_num_isles = QED_MAX_NUM_ISLES + max_num_archipelagos; - cid_base = (u16)qed_cxt_get_proto_cid_start(p_hwfn, PROTOCOLID_ISCSI); + cid_base = (u16)qed_cxt_get_proto_cid_start(p_hwfn, proto); if (!max_num_archipelagos) { DP_NOTICE(p_hwfn, diff --git a/drivers/net/ethernet/qlogic/qed/qed_ooo.h b/drivers/net/ethernet/qlogic/qed/qed_ooo.h index e8ed40b848f5..49c4e75b15b1 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_ooo.h +++ b/drivers/net/ethernet/qlogic/qed/qed_ooo.h @@ -83,7 +83,7 @@ struct qed_ooo_info { u16 cid_base; }; -#if IS_ENABLED(CONFIG_QED_ISCSI) +#if IS_ENABLED(CONFIG_QED_OOO) void qed_ooo_save_history_entry(struct qed_hwfn *p_hwfn, struct qed_ooo_info *p_ooo_info, struct ooo_opaque *p_cqe); diff --git a/drivers/scsi/qedi/Kconfig b/drivers/scsi/qedi/Kconfig index 2ff753ce6e27..d1db92d24889 100644 --- a/drivers/scsi/qedi/Kconfig +++ b/drivers/scsi/qedi/Kconfig @@ -4,6 +4,7 @@ config QEDI depends on QED select SCSI_ISCSI_ATTRS select QED_LL2 + select QED_OOO select QED_ISCSI select ISCSI_BOOT_SYSFS ---help--- -- cgit v1.2.3 From 88aef2f51c9e3640268aca04a256b8f26cf6bdff Mon Sep 17 00:00:00 2001 From: Netanel Belgazal Date: Tue, 17 Oct 2017 07:33:56 +0000 Subject: net: ena: improve ENA driver boot time. The ena admin commands timeout is in resolutions of 100ms. Therefore, When the driver works in polling mode, it sleeps for 100ms each time. The overall boot time of the ENA driver is ~1.5 sec. To reduce the boot time, This change modifies the granularity of the sleeps to 5ms. This change improves the boot time to 220ms. Signed-off-by: Netanel Belgazal Signed-off-by: David S. Miller --- drivers/net/ethernet/amazon/ena/ena_com.c | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/amazon/ena/ena_com.c b/drivers/net/ethernet/amazon/ena/ena_com.c index ded29af648c9..bf2de5298005 100644 --- a/drivers/net/ethernet/amazon/ena/ena_com.c +++ b/drivers/net/ethernet/amazon/ena/ena_com.c @@ -63,6 +63,8 @@ #define ENA_REGS_ADMIN_INTR_MASK 1 +#define ENA_POLL_MS 5 + /*****************************************************************************/ /*****************************************************************************/ /*****************************************************************************/ @@ -533,7 +535,7 @@ static int ena_com_wait_and_process_admin_cq_polling(struct ena_comp_ctx *comp_c goto err; } - msleep(100); + msleep(ENA_POLL_MS); } if (unlikely(comp_ctx->status == ENA_CMD_ABORTED)) { @@ -746,6 +748,9 @@ static int wait_for_reset_state(struct ena_com_dev *ena_dev, u32 timeout, { u32 val, i; + /* Convert timeout from resolution of 100ms to ENA_POLL_MS */ + timeout = (timeout * 100) / ENA_POLL_MS; + for (i = 0; i < timeout; i++) { val = ena_com_reg_bar_read32(ena_dev, ENA_REGS_DEV_STS_OFF); @@ -758,8 +763,7 @@ static int wait_for_reset_state(struct ena_com_dev *ena_dev, u32 timeout, exp_state) return 0; - /* The resolution of the timeout is 100ms */ - msleep(100); + msleep(ENA_POLL_MS); } return -ETIME; @@ -1253,7 +1257,7 @@ void ena_com_wait_for_abort_completion(struct ena_com_dev *ena_dev) spin_lock_irqsave(&admin_queue->q_lock, flags); while (atomic_read(&admin_queue->outstanding_cmds) != 0) { spin_unlock_irqrestore(&admin_queue->q_lock, flags); - msleep(20); + msleep(ENA_POLL_MS); spin_lock_irqsave(&admin_queue->q_lock, flags); } spin_unlock_irqrestore(&admin_queue->q_lock, flags); -- cgit v1.2.3 From dbeaf1e3c24f0e87c8047fc8dcbd8163ab82c8e7 Mon Sep 17 00:00:00 2001 From: Netanel Belgazal Date: Tue, 17 Oct 2017 07:33:57 +0000 Subject: net: ena: remove legacy suspend suspend/resume support Remove ena_device_io_suspend/resume() methods Those methods were intend to be used by the device to trigger suspend/resume but eventually it was dropped. Signed-off-by: Netanel Belgazal Signed-off-by: David S. Miller --- drivers/net/ethernet/amazon/ena/ena_ethtool.c | 2 -- drivers/net/ethernet/amazon/ena/ena_netdev.c | 50 --------------------------- drivers/net/ethernet/amazon/ena/ena_netdev.h | 4 --- 3 files changed, 56 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/amazon/ena/ena_ethtool.c b/drivers/net/ethernet/amazon/ena/ena_ethtool.c index b1212debc2e1..27b8f4618103 100644 --- a/drivers/net/ethernet/amazon/ena/ena_ethtool.c +++ b/drivers/net/ethernet/amazon/ena/ena_ethtool.c @@ -60,8 +60,6 @@ struct ena_stats { static const struct ena_stats ena_stats_global_strings[] = { ENA_STAT_GLOBAL_ENTRY(tx_timeout), - ENA_STAT_GLOBAL_ENTRY(io_suspend), - ENA_STAT_GLOBAL_ENTRY(io_resume), ENA_STAT_GLOBAL_ENTRY(wd_expired), ENA_STAT_GLOBAL_ENTRY(interface_up), ENA_STAT_GLOBAL_ENTRY(interface_down), diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c index f7dc22f65d9f..6d8e1f1325e9 100644 --- a/drivers/net/ethernet/amazon/ena/ena_netdev.c +++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c @@ -2361,38 +2361,6 @@ static const struct net_device_ops ena_netdev_ops = { #endif /* CONFIG_NET_POLL_CONTROLLER */ }; -static void ena_device_io_suspend(struct work_struct *work) -{ - struct ena_adapter *adapter = - container_of(work, struct ena_adapter, suspend_io_task); - struct net_device *netdev = adapter->netdev; - - /* ena_napi_disable_all disables only the IO handling. - * We are still subject to AENQ keep alive watchdog. - */ - u64_stats_update_begin(&adapter->syncp); - adapter->dev_stats.io_suspend++; - u64_stats_update_begin(&adapter->syncp); - ena_napi_disable_all(adapter); - netif_tx_lock(netdev); - netif_device_detach(netdev); - netif_tx_unlock(netdev); -} - -static void ena_device_io_resume(struct work_struct *work) -{ - struct ena_adapter *adapter = - container_of(work, struct ena_adapter, resume_io_task); - struct net_device *netdev = adapter->netdev; - - u64_stats_update_begin(&adapter->syncp); - adapter->dev_stats.io_resume++; - u64_stats_update_end(&adapter->syncp); - - netif_device_attach(netdev); - ena_napi_enable_all(adapter); -} - static int ena_device_validate_params(struct ena_adapter *adapter, struct ena_com_dev_get_features_ctx *get_feat_ctx) { @@ -3275,8 +3243,6 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent) goto err_rss; } - INIT_WORK(&adapter->suspend_io_task, ena_device_io_suspend); - INIT_WORK(&adapter->resume_io_task, ena_device_io_resume); INIT_WORK(&adapter->reset_task, ena_fw_reset_device); adapter->last_keep_alive_jiffies = jiffies; @@ -3310,8 +3276,6 @@ err_free_msix: err_worker_destroy: ena_com_destroy_interrupt_moderation(ena_dev); del_timer(&adapter->timer_service); - cancel_work_sync(&adapter->suspend_io_task); - cancel_work_sync(&adapter->resume_io_task); err_netdev_destroy: free_netdev(netdev); err_device_destroy: @@ -3381,10 +3345,6 @@ static void ena_remove(struct pci_dev *pdev) cancel_work_sync(&adapter->reset_task); - cancel_work_sync(&adapter->suspend_io_task); - - cancel_work_sync(&adapter->resume_io_task); - /* Reset the device only if the device is running. */ if (test_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags)) ena_com_dev_reset(ena_dev, adapter->reset_reason); @@ -3503,16 +3463,6 @@ static void ena_notification(void *adapter_data, ENA_ADMIN_NOTIFICATION); switch (aenq_e->aenq_common_desc.syndrom) { - case ENA_ADMIN_SUSPEND: - /* Suspend just the IO queues. - * We deliberately don't suspend admin so the timer and - * the keep_alive events should remain. - */ - queue_work(ena_wq, &adapter->suspend_io_task); - break; - case ENA_ADMIN_RESUME: - queue_work(ena_wq, &adapter->resume_io_task); - break; case ENA_ADMIN_UPDATE_HINTS: hints = (struct ena_admin_ena_hw_hints *) (&aenq_e->inline_data_w4); diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.h b/drivers/net/ethernet/amazon/ena/ena_netdev.h index 29bb5704260b..fb0c98bb9290 100644 --- a/drivers/net/ethernet/amazon/ena/ena_netdev.h +++ b/drivers/net/ethernet/amazon/ena/ena_netdev.h @@ -257,8 +257,6 @@ struct ena_ring { struct ena_stats_dev { u64 tx_timeout; - u64 io_suspend; - u64 io_resume; u64 wd_expired; u64 interface_up; u64 interface_down; @@ -326,8 +324,6 @@ struct ena_adapter { /* timer service */ struct work_struct reset_task; - struct work_struct suspend_io_task; - struct work_struct resume_io_task; struct timer_list timer_service; bool wd_state; -- cgit v1.2.3 From 8c5c7abdeb2dfe4b4b28a48702c2cfa83fac15c9 Mon Sep 17 00:00:00 2001 From: Netanel Belgazal Date: Tue, 17 Oct 2017 07:33:58 +0000 Subject: net: ena: add power management ops to the ENA driver Signed-off-by: Netanel Belgazal Signed-off-by: David S. Miller --- drivers/net/ethernet/amazon/ena/ena_ethtool.c | 2 + drivers/net/ethernet/amazon/ena/ena_netdev.c | 117 ++++++++++++++++++++------ drivers/net/ethernet/amazon/ena/ena_netdev.h | 3 + 3 files changed, 95 insertions(+), 27 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/amazon/ena/ena_ethtool.c b/drivers/net/ethernet/amazon/ena/ena_ethtool.c index 27b8f4618103..897e638a014a 100644 --- a/drivers/net/ethernet/amazon/ena/ena_ethtool.c +++ b/drivers/net/ethernet/amazon/ena/ena_ethtool.c @@ -60,6 +60,8 @@ struct ena_stats { static const struct ena_stats ena_stats_global_strings[] = { ENA_STAT_GLOBAL_ENTRY(tx_timeout), + ENA_STAT_GLOBAL_ENTRY(suspend), + ENA_STAT_GLOBAL_ENTRY(resume), ENA_STAT_GLOBAL_ENTRY(wd_expired), ENA_STAT_GLOBAL_ENTRY(interface_up), ENA_STAT_GLOBAL_ENTRY(interface_down), diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c index 6d8e1f1325e9..adc3957df3ab 100644 --- a/drivers/net/ethernet/amazon/ena/ena_netdev.c +++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c @@ -2529,38 +2529,31 @@ err_disable_msix: return rc; } -static void ena_fw_reset_device(struct work_struct *work) +static void ena_destroy_device(struct ena_adapter *adapter) { - struct ena_com_dev_get_features_ctx get_feat_ctx; - struct ena_adapter *adapter = - container_of(work, struct ena_adapter, reset_task); struct net_device *netdev = adapter->netdev; struct ena_com_dev *ena_dev = adapter->ena_dev; - struct pci_dev *pdev = adapter->pdev; - bool dev_up, wd_state; - int rc; - - if (unlikely(!test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))) { - dev_err(&pdev->dev, - "device reset schedule while reset bit is off\n"); - return; - } + bool dev_up; netif_carrier_off(netdev); del_timer_sync(&adapter->timer_service); - rtnl_lock(); - dev_up = test_bit(ENA_FLAG_DEV_UP, &adapter->flags); + adapter->dev_up_before_reset = dev_up; + ena_com_set_admin_running_state(ena_dev, false); - /* After calling ena_close the tx queues and the napi - * are disabled so no one can interfere or touch the - * data structures - */ ena_close(netdev); + /* Before releasing the ENA resources, a device reset is required. + * (to prevent the device from accessing them). + * In case the reset flag is set and the device is up, ena_close + * already perform the reset, so it can be skipped. + */ + if (!(test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags) && dev_up)) + ena_com_dev_reset(adapter->ena_dev, adapter->reset_reason); + ena_free_mgmnt_irq(adapter); ena_disable_msix(adapter); @@ -2574,9 +2567,17 @@ static void ena_fw_reset_device(struct work_struct *work) ena_com_mmio_reg_read_request_destroy(ena_dev); adapter->reset_reason = ENA_REGS_RESET_NORMAL; + clear_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags); +} - /* Finish with the destroy part. Start the init part */ +static int ena_restore_device(struct ena_adapter *adapter) +{ + struct ena_com_dev_get_features_ctx get_feat_ctx; + struct ena_com_dev *ena_dev = adapter->ena_dev; + struct pci_dev *pdev = adapter->pdev; + bool wd_state; + int rc; rc = ena_device_init(ena_dev, adapter->pdev, &get_feat_ctx, &wd_state); if (rc) { @@ -2598,7 +2599,7 @@ static void ena_fw_reset_device(struct work_struct *work) goto err_device_destroy; } /* If the interface was up before the reset bring it up */ - if (dev_up) { + if (adapter->dev_up_before_reset) { rc = ena_up(adapter); if (rc) { dev_err(&pdev->dev, "Failed to create I/O queues\n"); @@ -2607,24 +2608,38 @@ static void ena_fw_reset_device(struct work_struct *work) } mod_timer(&adapter->timer_service, round_jiffies(jiffies + HZ)); - - rtnl_unlock(); - dev_err(&pdev->dev, "Device reset completed successfully\n"); - return; + return rc; err_disable_msix: ena_free_mgmnt_irq(adapter); ena_disable_msix(adapter); err_device_destroy: ena_com_admin_destroy(ena_dev); err: - rtnl_unlock(); - clear_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags); dev_err(&pdev->dev, "Reset attempt failed. Can not reset the device\n"); + + return rc; +} + +static void ena_fw_reset_device(struct work_struct *work) +{ + struct ena_adapter *adapter = + container_of(work, struct ena_adapter, reset_task); + struct pci_dev *pdev = adapter->pdev; + + if (unlikely(!test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))) { + dev_err(&pdev->dev, + "device reset schedule while reset bit is off\n"); + return; + } + rtnl_lock(); + ena_destroy_device(adapter); + ena_restore_device(adapter); + rtnl_unlock(); } static int check_missing_comp_in_queue(struct ena_adapter *adapter, @@ -3378,11 +3393,59 @@ static void ena_remove(struct pci_dev *pdev) vfree(ena_dev); } +#ifdef CONFIG_PM +/* ena_suspend - PM suspend callback + * @pdev: PCI device information struct + * @state:power state + */ +static int ena_suspend(struct pci_dev *pdev, pm_message_t state) +{ + struct ena_adapter *adapter = pci_get_drvdata(pdev); + + u64_stats_update_begin(&adapter->syncp); + adapter->dev_stats.suspend++; + u64_stats_update_end(&adapter->syncp); + + rtnl_lock(); + if (unlikely(test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))) { + dev_err(&pdev->dev, + "ignoring device reset request as the device is being suspended\n"); + clear_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags); + } + ena_destroy_device(adapter); + rtnl_unlock(); + return 0; +} + +/* ena_resume - PM resume callback + * @pdev: PCI device information struct + * + */ +static int ena_resume(struct pci_dev *pdev) +{ + struct ena_adapter *adapter = pci_get_drvdata(pdev); + int rc; + + u64_stats_update_begin(&adapter->syncp); + adapter->dev_stats.resume++; + u64_stats_update_end(&adapter->syncp); + + rtnl_lock(); + rc = ena_restore_device(adapter); + rtnl_unlock(); + return rc; +} +#endif + static struct pci_driver ena_pci_driver = { .name = DRV_MODULE_NAME, .id_table = ena_pci_tbl, .probe = ena_probe, .remove = ena_remove, +#ifdef CONFIG_PM + .suspend = ena_suspend, + .resume = ena_resume, +#endif .sriov_configure = ena_sriov_configure, }; diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.h b/drivers/net/ethernet/amazon/ena/ena_netdev.h index fb0c98bb9290..7b07bfbf0fe4 100644 --- a/drivers/net/ethernet/amazon/ena/ena_netdev.h +++ b/drivers/net/ethernet/amazon/ena/ena_netdev.h @@ -257,6 +257,8 @@ struct ena_ring { struct ena_stats_dev { u64 tx_timeout; + u64 suspend; + u64 resume; u64 wd_expired; u64 interface_up; u64 interface_down; @@ -327,6 +329,7 @@ struct ena_adapter { struct timer_list timer_service; bool wd_state; + bool dev_up_before_reset; unsigned long last_keep_alive_jiffies; struct u64_stats_sync syncp; -- cgit v1.2.3 From 11095fdb712b1aaa7ffd6ccd86d0c45d29732eec Mon Sep 17 00:00:00 2001 From: Netanel Belgazal Date: Tue, 17 Oct 2017 07:33:59 +0000 Subject: net: ena: add statistics for missed tx packets Add a new statistic to ethtool stats that show the number of packets without transmit acknowledgement from ENA device. Signed-off-by: Netanel Belgazal Signed-off-by: David S. Miller --- drivers/net/ethernet/amazon/ena/ena_ethtool.c | 1 + drivers/net/ethernet/amazon/ena/ena_netdev.c | 30 +++++++++++++++------------ drivers/net/ethernet/amazon/ena/ena_netdev.h | 1 + 3 files changed, 19 insertions(+), 13 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/amazon/ena/ena_ethtool.c b/drivers/net/ethernet/amazon/ena/ena_ethtool.c index 897e638a014a..0d97311a1b26 100644 --- a/drivers/net/ethernet/amazon/ena/ena_ethtool.c +++ b/drivers/net/ethernet/amazon/ena/ena_ethtool.c @@ -81,6 +81,7 @@ static const struct ena_stats ena_stats_tx_strings[] = { ENA_STAT_TX_ENTRY(doorbells), ENA_STAT_TX_ENTRY(prepare_ctx_err), ENA_STAT_TX_ENTRY(bad_req_id), + ENA_STAT_TX_ENTRY(missed_tx), }; static const struct ena_stats ena_stats_rx_strings[] = { diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c index adc3957df3ab..47bdbf9bdefb 100644 --- a/drivers/net/ethernet/amazon/ena/ena_netdev.c +++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c @@ -2648,7 +2648,7 @@ static int check_missing_comp_in_queue(struct ena_adapter *adapter, struct ena_tx_buffer *tx_buf; unsigned long last_jiffies; u32 missed_tx = 0; - int i; + int i, rc = 0; for (i = 0; i < tx_ring->ring_size; i++) { tx_buf = &tx_ring->tx_buffer_info[i]; @@ -2662,21 +2662,25 @@ static int check_missing_comp_in_queue(struct ena_adapter *adapter, tx_buf->print_once = 1; missed_tx++; - - if (unlikely(missed_tx > adapter->missing_tx_completion_threshold)) { - netif_err(adapter, tx_err, adapter->netdev, - "The number of lost tx completions is above the threshold (%d > %d). Reset the device\n", - missed_tx, - adapter->missing_tx_completion_threshold); - adapter->reset_reason = - ENA_REGS_RESET_MISS_TX_CMPL; - set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags); - return -EIO; - } } } - return 0; + if (unlikely(missed_tx > adapter->missing_tx_completion_threshold)) { + netif_err(adapter, tx_err, adapter->netdev, + "The number of lost tx completions is above the threshold (%d > %d). Reset the device\n", + missed_tx, + adapter->missing_tx_completion_threshold); + adapter->reset_reason = + ENA_REGS_RESET_MISS_TX_CMPL; + set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags); + rc = -EIO; + } + + u64_stats_update_begin(&tx_ring->syncp); + tx_ring->tx_stats.missed_tx = missed_tx; + u64_stats_update_end(&tx_ring->syncp); + + return rc; } static void check_for_missing_tx_completions(struct ena_adapter *adapter) diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.h b/drivers/net/ethernet/amazon/ena/ena_netdev.h index 7b07bfbf0fe4..eafc5774dd49 100644 --- a/drivers/net/ethernet/amazon/ena/ena_netdev.h +++ b/drivers/net/ethernet/amazon/ena/ena_netdev.h @@ -185,6 +185,7 @@ struct ena_stats_tx { u64 tx_poll; u64 doorbells; u64 bad_req_id; + u64 missed_tx; }; struct ena_stats_rx { -- cgit v1.2.3 From 58894d5219c5d3fdd72d4166f007df5004817e84 Mon Sep 17 00:00:00 2001 From: Netanel Belgazal Date: Tue, 17 Oct 2017 07:34:00 +0000 Subject: net: ena: add new admin define for future support of IPv6 RSS Signed-off-by: Netanel Belgazal Signed-off-by: David S. Miller --- drivers/net/ethernet/amazon/ena/ena_admin_defs.h | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/amazon/ena/ena_admin_defs.h b/drivers/net/ethernet/amazon/ena/ena_admin_defs.h index 305dc1996b4e..4532e574ebcd 100644 --- a/drivers/net/ethernet/amazon/ena/ena_admin_defs.h +++ b/drivers/net/ethernet/amazon/ena/ena_admin_defs.h @@ -627,6 +627,12 @@ enum ena_admin_flow_hash_proto { ENA_ADMIN_RSS_NOT_IP = 7, + /* TCPv6 with extension header */ + ENA_ADMIN_RSS_TCP6_EX = 8, + + /* IPv6 with extension header */ + ENA_ADMIN_RSS_IP6_EX = 9, + ENA_ADMIN_RSS_PROTO_NUM = 16, }; -- cgit v1.2.3 From 046b30718928d616f7decc79c272fdd4f42cc61d Mon Sep 17 00:00:00 2001 From: Netanel Belgazal Date: Tue, 17 Oct 2017 07:34:01 +0000 Subject: net: ena: increase ena driver version to 1.3.0 Signed-off-by: Netanel Belgazal Signed-off-by: David S. Miller --- drivers/net/ethernet/amazon/ena/ena_netdev.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.h b/drivers/net/ethernet/amazon/ena/ena_netdev.h index eafc5774dd49..ed8bd0a579c4 100644 --- a/drivers/net/ethernet/amazon/ena/ena_netdev.h +++ b/drivers/net/ethernet/amazon/ena/ena_netdev.h @@ -44,7 +44,7 @@ #include "ena_eth_com.h" #define DRV_MODULE_VER_MAJOR 1 -#define DRV_MODULE_VER_MINOR 2 +#define DRV_MODULE_VER_MINOR 3 #define DRV_MODULE_VER_SUBMINOR 0 #define DRV_MODULE_NAME "ena" @@ -52,7 +52,7 @@ #define DRV_MODULE_VERSION \ __stringify(DRV_MODULE_VER_MAJOR) "." \ __stringify(DRV_MODULE_VER_MINOR) "." \ - __stringify(DRV_MODULE_VER_SUBMINOR) "k" + __stringify(DRV_MODULE_VER_SUBMINOR) "K" #endif #define DEVICE_NAME "Elastic Network Adapter (ENA)" -- cgit v1.2.3 From 154820563dd4621c78e03e98e70216e832422f8e Mon Sep 17 00:00:00 2001 From: Thomas Falcon Date: Tue, 17 Oct 2017 12:36:54 -0500 Subject: ibmvnic: Enable scatter-gather support This patch enables scatter gather support. Since there is no HW/FW scatter-gather support at this time, the driver needs to loop through each fragment and copy it to a contiguous, pre-mapped buffer entry. Signed-off-by: Thomas Falcon Signed-off-by: David S. Miller --- drivers/net/ethernet/ibm/ibmvnic.c | 23 +++++++++++++++++++++-- 1 file changed, 21 insertions(+), 2 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c index 4bc14a901571..b508877397e1 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.c +++ b/drivers/net/ethernet/ibm/ibmvnic.c @@ -1204,9 +1204,28 @@ static int ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev) offset = index * adapter->req_mtu; dst = tx_pool->long_term_buff.buff + offset; memset(dst, 0, adapter->req_mtu); - skb_copy_from_linear_data(skb, dst, skb->len); data_dma_addr = tx_pool->long_term_buff.addr + offset; + if (skb_shinfo(skb)->nr_frags) { + int cur, i; + + /* Copy the head */ + skb_copy_from_linear_data(skb, dst, skb_headlen(skb)); + cur = skb_headlen(skb); + + /* Copy the frags */ + for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { + const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; + + memcpy(dst + cur, + page_address(skb_frag_page(frag)) + + frag->page_offset, skb_frag_size(frag)); + cur += skb_frag_size(frag); + } + } else { + skb_copy_from_linear_data(skb, dst, skb->len); + } + tx_pool->consumer_index = (tx_pool->consumer_index + 1) % adapter->req_tx_entries_per_subcrq; @@ -2948,7 +2967,7 @@ static void handle_query_ip_offload_rsp(struct ibmvnic_adapter *adapter) adapter->ip_offload_ctrl.large_rx_ipv4 = 0; adapter->ip_offload_ctrl.large_rx_ipv6 = 0; - adapter->netdev->features = NETIF_F_GSO; + adapter->netdev->features = NETIF_F_SG | NETIF_F_GSO; if (buf->tcp_ipv4_chksum || buf->udp_ipv4_chksum) adapter->netdev->features |= NETIF_F_IP_CSUM; -- cgit v1.2.3 From fdb061056f57e849a05cac072a4998c7f33d797e Mon Sep 17 00:00:00 2001 From: Thomas Falcon Date: Tue, 17 Oct 2017 12:36:55 -0500 Subject: ibmvnic: Enable TSO support This patch enables TSO support. It includes additional buffers reserved exclusively for large packets. Throughput is greatly increased with TSO enabled, from about 1 Gb/s to 9 Gb/s on our test systems. Signed-off-by: Thomas Falcon Signed-off-by: David S. Miller --- drivers/net/ethernet/ibm/ibmvnic.c | 56 ++++++++++++++++++++++++++++++++------ drivers/net/ethernet/ibm/ibmvnic.h | 5 ++++ 2 files changed, 53 insertions(+), 8 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c index b508877397e1..aedb81c230a6 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.c +++ b/drivers/net/ethernet/ibm/ibmvnic.c @@ -553,6 +553,10 @@ static int reset_tx_pools(struct ibmvnic_adapter *adapter) if (rc) return rc; + rc = reset_long_term_buff(adapter, &tx_pool->tso_ltb); + if (rc) + return rc; + memset(tx_pool->tx_buff, 0, adapter->req_tx_entries_per_subcrq * sizeof(struct ibmvnic_tx_buff)); @@ -562,6 +566,7 @@ static int reset_tx_pools(struct ibmvnic_adapter *adapter) tx_pool->consumer_index = 0; tx_pool->producer_index = 0; + tx_pool->tso_index = 0; } return 0; @@ -581,6 +586,7 @@ static void release_tx_pools(struct ibmvnic_adapter *adapter) tx_pool = &adapter->tx_pool[i]; kfree(tx_pool->tx_buff); free_long_term_buff(adapter, &tx_pool->long_term_buff); + free_long_term_buff(adapter, &tx_pool->tso_ltb); kfree(tx_pool->free_map); } @@ -625,6 +631,16 @@ static int init_tx_pools(struct net_device *netdev) return -1; } + /* alloc TSO ltb */ + if (alloc_long_term_buff(adapter, &tx_pool->tso_ltb, + IBMVNIC_TSO_BUFS * + IBMVNIC_TSO_BUF_SZ)) { + release_tx_pools(adapter); + return -1; + } + + tx_pool->tso_index = 0; + tx_pool->free_map = kcalloc(adapter->req_tx_entries_per_subcrq, sizeof(int), GFP_KERNEL); if (!tx_pool->free_map) { @@ -1201,10 +1217,21 @@ static int ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev) be32_to_cpu(adapter->login_rsp_buf->off_txsubm_subcrqs)); index = tx_pool->free_map[tx_pool->consumer_index]; - offset = index * adapter->req_mtu; - dst = tx_pool->long_term_buff.buff + offset; - memset(dst, 0, adapter->req_mtu); - data_dma_addr = tx_pool->long_term_buff.addr + offset; + + if (skb_is_gso(skb)) { + offset = tx_pool->tso_index * IBMVNIC_TSO_BUF_SZ; + dst = tx_pool->tso_ltb.buff + offset; + memset(dst, 0, IBMVNIC_TSO_BUF_SZ); + data_dma_addr = tx_pool->tso_ltb.addr + offset; + tx_pool->tso_index++; + if (tx_pool->tso_index == IBMVNIC_TSO_BUFS) + tx_pool->tso_index = 0; + } else { + offset = index * adapter->req_mtu; + dst = tx_pool->long_term_buff.buff + offset; + memset(dst, 0, adapter->req_mtu); + data_dma_addr = tx_pool->long_term_buff.addr + offset; + } if (skb_shinfo(skb)->nr_frags) { int cur, i; @@ -1245,7 +1272,10 @@ static int ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev) tx_crq.v1.n_sge = 1; tx_crq.v1.flags1 = IBMVNIC_TX_COMP_NEEDED; tx_crq.v1.correlator = cpu_to_be32(index); - tx_crq.v1.dma_reg = cpu_to_be16(tx_pool->long_term_buff.map_id); + if (skb_is_gso(skb)) + tx_crq.v1.dma_reg = cpu_to_be16(tx_pool->tso_ltb.map_id); + else + tx_crq.v1.dma_reg = cpu_to_be16(tx_pool->long_term_buff.map_id); tx_crq.v1.sge_len = cpu_to_be32(skb->len); tx_crq.v1.ioba = cpu_to_be64(data_dma_addr); @@ -1270,6 +1300,11 @@ static int ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev) tx_crq.v1.flags1 |= IBMVNIC_TX_CHKSUM_OFFLOAD; hdrs += 2; } + if (skb_is_gso(skb)) { + tx_crq.v1.flags1 |= IBMVNIC_TX_LSO; + tx_crq.v1.mss = cpu_to_be16(skb_shinfo(skb)->gso_size); + hdrs += 2; + } /* determine if l2/3/4 headers are sent to firmware */ if ((*hdrs >> 7) & 1 && (skb->protocol == htons(ETH_P_IP) || @@ -2960,10 +2995,10 @@ static void handle_query_ip_offload_rsp(struct ibmvnic_adapter *adapter) adapter->ip_offload_ctrl.udp_ipv4_chksum = buf->udp_ipv4_chksum; adapter->ip_offload_ctrl.tcp_ipv6_chksum = buf->tcp_ipv6_chksum; adapter->ip_offload_ctrl.udp_ipv6_chksum = buf->udp_ipv6_chksum; + adapter->ip_offload_ctrl.large_tx_ipv4 = buf->large_tx_ipv4; + adapter->ip_offload_ctrl.large_tx_ipv6 = buf->large_tx_ipv6; - /* large_tx/rx disabled for now, additional features needed */ - adapter->ip_offload_ctrl.large_tx_ipv4 = 0; - adapter->ip_offload_ctrl.large_tx_ipv6 = 0; + /* large_rx disabled for now, additional features needed */ adapter->ip_offload_ctrl.large_rx_ipv4 = 0; adapter->ip_offload_ctrl.large_rx_ipv6 = 0; @@ -2979,6 +3014,11 @@ static void handle_query_ip_offload_rsp(struct ibmvnic_adapter *adapter) (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM))) adapter->netdev->features |= NETIF_F_RXCSUM; + if (buf->large_tx_ipv4) + adapter->netdev->features |= NETIF_F_TSO; + if (buf->large_tx_ipv6) + adapter->netdev->features |= NETIF_F_TSO6; + memset(&crq, 0, sizeof(crq)); crq.control_ip_offload.first = IBMVNIC_CRQ_CMD; crq.control_ip_offload.cmd = CONTROL_IP_OFFLOAD; diff --git a/drivers/net/ethernet/ibm/ibmvnic.h b/drivers/net/ethernet/ibm/ibmvnic.h index d02257ccc377..7aa347a21e78 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.h +++ b/drivers/net/ethernet/ibm/ibmvnic.h @@ -39,6 +39,9 @@ #define IBMVNIC_BUFFS_PER_POOL 100 #define IBMVNIC_MAX_TX_QUEUES 5 +#define IBMVNIC_TSO_BUF_SZ 65536 +#define IBMVNIC_TSO_BUFS 64 + struct ibmvnic_login_buffer { __be32 len; __be32 version; @@ -896,6 +899,8 @@ struct ibmvnic_tx_pool { wait_queue_head_t ibmvnic_tx_comp_q; struct task_struct *work_thread; struct ibmvnic_long_term_buff long_term_buff; + struct ibmvnic_long_term_buff tso_ltb; + int tso_index; }; struct ibmvnic_rx_buff { -- cgit v1.2.3 From aa0bf8510dac901badc6889b208fc0e7d9225924 Mon Sep 17 00:00:00 2001 From: Thomas Falcon Date: Tue, 17 Oct 2017 12:36:56 -0500 Subject: ibmvnic: Let users change net device features Signed-off-by: Thomas Falcon Signed-off-by: David S. Miller --- drivers/net/ethernet/ibm/ibmvnic.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c index aedb81c230a6..b991703319f9 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.c +++ b/drivers/net/ethernet/ibm/ibmvnic.c @@ -3019,6 +3019,8 @@ static void handle_query_ip_offload_rsp(struct ibmvnic_adapter *adapter) if (buf->large_tx_ipv6) adapter->netdev->features |= NETIF_F_TSO6; + adapter->netdev->hw_features |= adapter->netdev->features; + memset(&crq, 0, sizeof(crq)); crq.control_ip_offload.first = IBMVNIC_CRQ_CMD; crq.control_ip_offload.cmd = CONTROL_IP_OFFLOAD; -- cgit v1.2.3 From b14bec89042ee6f9a43b437f8133cfcbea140f20 Mon Sep 17 00:00:00 2001 From: "Gustavo A. R. Silva" Date: Tue, 17 Oct 2017 13:59:20 -0500 Subject: liquidio: remove unnecessary NULL check before kfree in delete_glists NULL check before freeing functions like kfree is not needed. This issue was detected with the help of Coccinelle. Signed-off-by: Gustavo A. R. Silva Acked-by: Felix Manlunas Signed-off-by: David S. Miller --- drivers/net/ethernet/cavium/liquidio/lio_vf_main.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c index 2e993ce43b66..e4a112cf4f8e 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c @@ -435,8 +435,7 @@ static void delete_glists(struct lio *lio) do { g = (struct octnic_gather *) list_delete_head(&lio->glist[i]); - if (g) - kfree(g); + kfree(g); } while (g); if (lio->glists_virt_base && lio->glists_virt_base[i] && -- cgit v1.2.3 From 48acc9e847ef335f7d3b62926825397c6bf4eab2 Mon Sep 17 00:00:00 2001 From: "Gustavo A. R. Silva" Date: Tue, 17 Oct 2017 14:01:45 -0500 Subject: liquidio: mark expected switch fall-through in octeon_destroy_resources In preparation to enabling -Wimplicit-fallthrough, mark switch cases where we are expecting to fall through. Signed-off-by: Gustavo A. R. Silva Acked-by: Felix Manlunas Signed-off-by: David S. Miller --- drivers/net/ethernet/cavium/liquidio/lio_vf_main.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c index e4a112cf4f8e..4c3b5688529b 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c @@ -747,7 +747,7 @@ static void octeon_destroy_resources(struct octeon_device *oct) if (lio_wait_for_oq_pkts(oct)) dev_err(&oct->pci_dev->dev, "OQ had pending packets\n"); - + /* fall through */ case OCT_DEV_INTR_SET_DONE: /* Disable interrupts */ oct->fn_list.disable_interrupt(oct, OCTEON_ALL_INTR); -- cgit v1.2.3 From 6c4b2f7e675cf11587182f51adcf0e129005e2f9 Mon Sep 17 00:00:00 2001 From: Simon Horman Date: Wed, 18 Oct 2017 09:21:27 +0200 Subject: net: sh_eth: rename name structures as rcar_gen[12]_* Rename structures describing R-Car SoCs as rcar_gen[12]_* rather than r8a77[79]x_*. This seems a little easier on the eyes. And will make things slightly cleaner in a follow-up patch that adds fallback-compatibility strings for these SoCs. Note that R-Car Gen2 and RZ/G1 have many compatible IP blocks. The approach that has been consistently taken for other IP blocks is to name common code, compatibility strings and so on after R-Car Gen2. Also rename sh_eth_set_rate_r8a777x as sh_eth_set_rate_rcar as it it is used by the R-Car generations supported by the driver. This patch should have no run-time effect and is compile-tested only. Signed-off-by: Simon Horman Reviewed-by: Geert Uytterhoeven Acked-by: Sergei Shtylyov Signed-off-by: David S. Miller --- drivers/net/ethernet/renesas/sh_eth.c | 30 +++++++++++++++--------------- 1 file changed, 15 insertions(+), 15 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c index d2e88a30f57b..c9f92fc8555e 100644 --- a/drivers/net/ethernet/renesas/sh_eth.c +++ b/drivers/net/ethernet/renesas/sh_eth.c @@ -594,7 +594,7 @@ static struct sh_eth_cpu_data r8a7740_data = { }; /* There is CPU dependent code */ -static void sh_eth_set_rate_r8a777x(struct net_device *ndev) +static void sh_eth_set_rate_rcar(struct net_device *ndev) { struct sh_eth_private *mdp = netdev_priv(ndev); @@ -608,10 +608,10 @@ static void sh_eth_set_rate_r8a777x(struct net_device *ndev) } } -/* R8A7778/9 */ -static struct sh_eth_cpu_data r8a777x_data = { +/* R-Car Gen1 */ +static struct sh_eth_cpu_data rcar_gen1_data = { .set_duplex = sh_eth_set_duplex, - .set_rate = sh_eth_set_rate_r8a777x, + .set_rate = sh_eth_set_rate_rcar, .register_type = SH_ETH_REG_FAST_RCAR, @@ -635,10 +635,10 @@ static struct sh_eth_cpu_data r8a777x_data = { .hw_swap = 1, }; -/* R8A7790/1 */ -static struct sh_eth_cpu_data r8a779x_data = { +/* R-Car Gen2 and RZ/G1 */ +static struct sh_eth_cpu_data rcar_gen2_data = { .set_duplex = sh_eth_set_duplex, - .set_rate = sh_eth_set_rate_r8a777x, + .set_rate = sh_eth_set_rate_rcar, .register_type = SH_ETH_REG_FAST_RCAR, @@ -3086,14 +3086,14 @@ static struct sh_eth_plat_data *sh_eth_parse_dt(struct device *dev) static const struct of_device_id sh_eth_match_table[] = { { .compatible = "renesas,gether-r8a7740", .data = &r8a7740_data }, - { .compatible = "renesas,ether-r8a7743", .data = &r8a779x_data }, - { .compatible = "renesas,ether-r8a7745", .data = &r8a779x_data }, - { .compatible = "renesas,ether-r8a7778", .data = &r8a777x_data }, - { .compatible = "renesas,ether-r8a7779", .data = &r8a777x_data }, - { .compatible = "renesas,ether-r8a7790", .data = &r8a779x_data }, - { .compatible = "renesas,ether-r8a7791", .data = &r8a779x_data }, - { .compatible = "renesas,ether-r8a7793", .data = &r8a779x_data }, - { .compatible = "renesas,ether-r8a7794", .data = &r8a779x_data }, + { .compatible = "renesas,ether-r8a7743", .data = &rcar_gen2_data }, + { .compatible = "renesas,ether-r8a7745", .data = &rcar_gen2_data }, + { .compatible = "renesas,ether-r8a7778", .data = &rcar_gen1_data }, + { .compatible = "renesas,ether-r8a7779", .data = &rcar_gen1_data }, + { .compatible = "renesas,ether-r8a7790", .data = &rcar_gen2_data }, + { .compatible = "renesas,ether-r8a7791", .data = &rcar_gen2_data }, + { .compatible = "renesas,ether-r8a7793", .data = &rcar_gen2_data }, + { .compatible = "renesas,ether-r8a7794", .data = &rcar_gen2_data }, { .compatible = "renesas,ether-r7s72100", .data = &r7s72100_data }, { } }; -- cgit v1.2.3 From b4804e0c71c144b673b6c53ca4acfcac6eb98704 Mon Sep 17 00:00:00 2001 From: Simon Horman Date: Wed, 18 Oct 2017 09:21:28 +0200 Subject: net: sh_eth: implement R-Car Gen[12] fallback compatibility strings Implement fallback compatibility strings for R-Car Gen 1 and 2. In the case of Renesas R-Car hardware we know that there are generations of SoCs, f.e. Gen 1 and 2. But beyond that its not clear what the relationship between IP blocks might be. For example, I believe that r8a7790 is older than r8a7791 but that doesn't imply that the latter is a descendant of the former or vice versa. We can, however, by examining the documentation and behaviour of the hardware at run-time observe that the current driver implementation appears to be compatible with the IP blocks on SoCs within a given generation. For the above reasons and convenience when enabling new SoCs a per-generation fallback compatibility string scheme is being adopted for drivers for Renesas SoCs. Note that R-Car Gen2 and RZ/G1 have many compatible IP blocks. The approach that has been consistently taken for other IP blocks is to name common code, compatibility strings and so on after R-Car Gen2. Signed-off-by: Simon Horman Reviewed-by: Geert Uytterhoeven Acked-by: Sergei Shtylyov Signed-off-by: David S. Miller --- drivers/net/ethernet/renesas/sh_eth.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c index c9f92fc8555e..7e060aa9fbed 100644 --- a/drivers/net/ethernet/renesas/sh_eth.c +++ b/drivers/net/ethernet/renesas/sh_eth.c @@ -3095,6 +3095,8 @@ static const struct of_device_id sh_eth_match_table[] = { { .compatible = "renesas,ether-r8a7793", .data = &rcar_gen2_data }, { .compatible = "renesas,ether-r8a7794", .data = &rcar_gen2_data }, { .compatible = "renesas,ether-r7s72100", .data = &r7s72100_data }, + { .compatible = "renesas,rcar-gen1-ether", .data = &rcar_gen1_data }, + { .compatible = "renesas,rcar-gen2-ether", .data = &rcar_gen2_data }, { } }; MODULE_DEVICE_TABLE(of, sh_eth_match_table); -- cgit v1.2.3 From bda1e229153fbdd0efd22a14c1c76a28c05d1b27 Mon Sep 17 00:00:00 2001 From: Kumar Sanghvi Date: Wed, 18 Oct 2017 20:49:07 +0530 Subject: cxgb4: add tc flower match support for TOS Add support for matching on IP TOS. Also check on ethtype value to be either IPv4 or IPv6. Signed-off-by: Kumar Sanghvi Signed-off-by: Rahul Lakkireddy Signed-off-by: Ganesh Goudar Signed-off-by: David S. Miller --- .../net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c | 51 +++++++++++++++++++++- 1 file changed, 50 insertions(+), 1 deletion(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c index 92a311767381..647c86ae343d 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c @@ -147,6 +147,19 @@ static void cxgb4_process_flow_match(struct net_device *dev, fs->mask.fport = cpu_to_be16(mask->src); } + if (dissector_uses_key(cls->dissector, FLOW_DISSECTOR_KEY_IP)) { + struct flow_dissector_key_ip *key, *mask; + + key = skb_flow_dissector_target(cls->dissector, + FLOW_DISSECTOR_KEY_IP, + cls->key); + mask = skb_flow_dissector_target(cls->dissector, + FLOW_DISSECTOR_KEY_IP, + cls->mask); + fs->val.tos = key->tos; + fs->mask.tos = mask->tos; + } + /* Match only packets coming from the ingress port where this * filter will be created. */ @@ -157,16 +170,52 @@ static void cxgb4_process_flow_match(struct net_device *dev, static int cxgb4_validate_flow_match(struct net_device *dev, struct tc_cls_flower_offload *cls) { + u16 ethtype_mask = 0; + u16 ethtype_key = 0; + if (cls->dissector->used_keys & ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) | BIT(FLOW_DISSECTOR_KEY_BASIC) | BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) | BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) | - BIT(FLOW_DISSECTOR_KEY_PORTS))) { + BIT(FLOW_DISSECTOR_KEY_PORTS) | + BIT(FLOW_DISSECTOR_KEY_IP))) { netdev_warn(dev, "Unsupported key used: 0x%x\n", cls->dissector->used_keys); return -EOPNOTSUPP; } + + if (dissector_uses_key(cls->dissector, FLOW_DISSECTOR_KEY_BASIC)) { + struct flow_dissector_key_basic *key = + skb_flow_dissector_target(cls->dissector, + FLOW_DISSECTOR_KEY_BASIC, + cls->key); + struct flow_dissector_key_basic *mask = + skb_flow_dissector_target(cls->dissector, + FLOW_DISSECTOR_KEY_BASIC, + cls->mask); + ethtype_key = ntohs(key->n_proto); + ethtype_mask = ntohs(mask->n_proto); + } + + if (dissector_uses_key(cls->dissector, FLOW_DISSECTOR_KEY_IP)) { + u16 eth_ip_type = ethtype_key & ethtype_mask; + struct flow_dissector_key_ip *mask; + + if (eth_ip_type != ETH_P_IP && eth_ip_type != ETH_P_IPV6) { + netdev_err(dev, "IP Key supported only with IPv4/v6"); + return -EINVAL; + } + + mask = skb_flow_dissector_target(cls->dissector, + FLOW_DISSECTOR_KEY_IP, + cls->mask); + if (mask->ttl) { + netdev_warn(dev, "ttl match unsupported for offload"); + return -EOPNOTSUPP; + } + } + return 0; } -- cgit v1.2.3 From ad9af3e09cb6b201db56190d92eb3ffe469a0bc4 Mon Sep 17 00:00:00 2001 From: Kumar Sanghvi Date: Wed, 18 Oct 2017 20:49:08 +0530 Subject: cxgb4: add tc flower match support for vlan Add support for matching on vlan tci. Construct vlan tci match param based on vlan-id and vlan-pcp values supplied by tc. Signed-off-by: Kumar Sanghvi Signed-off-by: Rahul Lakkireddy Signed-off-by: Ganesh Goudar Signed-off-by: David S. Miller --- .../net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c | 35 ++++++++++++++++++++++ 1 file changed, 35 insertions(+) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c index 647c86ae343d..f7554b768e9d 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c @@ -160,6 +160,40 @@ static void cxgb4_process_flow_match(struct net_device *dev, fs->mask.tos = mask->tos; } + if (dissector_uses_key(cls->dissector, FLOW_DISSECTOR_KEY_VLAN)) { + struct flow_dissector_key_vlan *key, *mask; + u16 vlan_tci, vlan_tci_mask; + + key = skb_flow_dissector_target(cls->dissector, + FLOW_DISSECTOR_KEY_VLAN, + cls->key); + mask = skb_flow_dissector_target(cls->dissector, + FLOW_DISSECTOR_KEY_VLAN, + cls->mask); + vlan_tci = key->vlan_id | (key->vlan_priority << + VLAN_PRIO_SHIFT); + vlan_tci_mask = mask->vlan_id | (mask->vlan_priority << + VLAN_PRIO_SHIFT); + fs->val.ivlan = cpu_to_be16(vlan_tci); + fs->mask.ivlan = cpu_to_be16(vlan_tci_mask); + + /* Chelsio adapters use ivlan_vld bit to match vlan packets + * as 802.1Q. Also, when vlan tag is present in packets, + * ethtype match is used then to match on ethtype of inner + * header ie. the header following the vlan header. + * So, set the ivlan_vld based on ethtype info supplied by + * TC for vlan packets if its 802.1Q. And then reset the + * ethtype value else, hw will try to match the supplied + * ethtype value with ethtype of inner header. + */ + if (fs->val.ethtype == ETH_P_8021Q) { + fs->val.ivlan_vld = 1; + fs->mask.ivlan_vld = 1; + fs->val.ethtype = 0; + fs->mask.ethtype = 0; + } + } + /* Match only packets coming from the ingress port where this * filter will be created. */ @@ -179,6 +213,7 @@ static int cxgb4_validate_flow_match(struct net_device *dev, BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) | BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) | BIT(FLOW_DISSECTOR_KEY_PORTS) | + BIT(FLOW_DISSECTOR_KEY_VLAN) | BIT(FLOW_DISSECTOR_KEY_IP))) { netdev_warn(dev, "Unsupported key used: 0x%x\n", cls->dissector->used_keys); -- cgit v1.2.3 From c39bff47d735e39fdbf59ad56df5581b0cf88c7c Mon Sep 17 00:00:00 2001 From: Kumar Sanghvi Date: Wed, 18 Oct 2017 20:49:09 +0530 Subject: cxgb4: add tc flower support for action PASS Add support for tc flower action PASS. Signed-off-by: Kumar Sanghvi Signed-off-by: Rahul Lakkireddy Signed-off-by: Ganesh Goudar Signed-off-by: David S. Miller --- drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c index f7554b768e9d..4d4f3af20496 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c @@ -263,7 +263,9 @@ static void cxgb4_process_flow_actions(struct net_device *in, tcf_exts_to_list(cls->exts, &actions); list_for_each_entry(a, &actions, list) { - if (is_tcf_gact_shot(a)) { + if (is_tcf_gact_ok(a)) { + fs->action = FILTER_PASS; + } else if (is_tcf_gact_shot(a)) { fs->action = FILTER_DROP; } else if (is_tcf_mirred_egress_redirect(a)) { int ifindex = tcf_mirred_ifindex(a); @@ -306,7 +308,9 @@ static int cxgb4_validate_flow_actions(struct net_device *dev, tcf_exts_to_list(cls->exts, &actions); list_for_each_entry(a, &actions, list) { - if (is_tcf_gact_shot(a)) { + if (is_tcf_gact_ok(a)) { + /* Do nothing */ + } else if (is_tcf_gact_shot(a)) { /* Do nothing */ } else if (is_tcf_mirred_egress_redirect(a)) { struct adapter *adap = netdev2adap(dev); -- cgit v1.2.3 From 27ece1f357b71c63e6e35c645b9c344835d4a129 Mon Sep 17 00:00:00 2001 From: Kumar Sanghvi Date: Wed, 18 Oct 2017 20:49:10 +0530 Subject: cxgb4: add tc flower support for ETH-DMAC rewrite Add support for ETH-DMAC Rewrite via TC-PEDIT action. Also, add check to assert that vlan/eth-dmac rewrite actions are valid only in combination with action egress redirect. Signed-off-by: Kumar Sanghvi Signed-off-by: Rahul Lakkireddy Signed-off-by: Ganesh Goudar Signed-off-by: David S. Miller --- .../net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c | 108 ++++++++++++++++++++- .../net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.h | 19 ++++ 2 files changed, 126 insertions(+), 1 deletion(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c index 4d4f3af20496..7c8b0c65934c 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c @@ -32,8 +32,9 @@ * SOFTWARE. */ -#include #include +#include +#include #include #include "cxgb4.h" @@ -41,6 +42,11 @@ #define STATS_CHECK_PERIOD (HZ / 2) +struct ch_tc_pedit_fields pedits[] = { + PEDIT_FIELDS(ETH_, DMAC_31_0, 4, dmac, 0), + PEDIT_FIELDS(ETH_, DMAC_47_32, 2, dmac, 4), +}; + static struct ch_tc_flower_entry *allocate_flower_entry(void) { struct ch_tc_flower_entry *new = kzalloc(sizeof(*new), GFP_KERNEL); @@ -254,6 +260,41 @@ static int cxgb4_validate_flow_match(struct net_device *dev, return 0; } +static void offload_pedit(struct ch_filter_specification *fs, u32 val, u32 mask, + u8 field) +{ + u32 set_val = val & ~mask; + u32 offset; + u8 size; + int i; + + for (i = 0; i < ARRAY_SIZE(pedits); i++) { + if (pedits[i].field == field) { + offset = pedits[i].offset; + size = pedits[i].size; + break; + } + } + memcpy((u8 *)fs + offset, &set_val, size); +} + +static void process_pedit_field(struct ch_filter_specification *fs, u32 val, + u32 mask, u32 offset, u8 htype) +{ + switch (htype) { + case TCA_PEDIT_KEY_EX_HDR_TYPE_ETH: + switch (offset) { + case PEDIT_ETH_DMAC_31_0: + fs->newdmac = 1; + offload_pedit(fs, val, mask, ETH_DMAC_31_0); + break; + case PEDIT_ETH_DMAC_47_32_SMAC_15_0: + if (~mask & PEDIT_ETH_DMAC_MASK) + offload_pedit(fs, val, mask, ETH_DMAC_47_32); + } + } +} + static void cxgb4_process_flow_actions(struct net_device *in, struct tc_cls_flower_offload *cls, struct ch_filter_specification *fs) @@ -296,6 +337,21 @@ static void cxgb4_process_flow_actions(struct net_device *in, default: break; } + } else if (is_tcf_pedit(a)) { + u32 mask, val, offset; + int nkeys, i; + u8 htype; + + nkeys = tcf_pedit_nkeys(a); + for (i = 0; i < nkeys; i++) { + htype = tcf_pedit_htype(a, i); + mask = tcf_pedit_mask(a, i); + val = tcf_pedit_val(a, i); + offset = tcf_pedit_offset(a, i); + + process_pedit_field(fs, val, mask, offset, + htype); + } } } } @@ -304,6 +360,9 @@ static int cxgb4_validate_flow_actions(struct net_device *dev, struct tc_cls_flower_offload *cls) { const struct tc_action *a; + bool act_redir = false; + bool act_pedit = false; + bool act_vlan = false; LIST_HEAD(actions); tcf_exts_to_list(cls->exts, &actions); @@ -335,6 +394,7 @@ static int cxgb4_validate_flow_actions(struct net_device *dev, __func__); return -EINVAL; } + act_redir = true; } else if (is_tcf_vlan(a)) { u16 proto = be16_to_cpu(tcf_vlan_push_proto(a)); u32 vlan_action = tcf_vlan_action(a); @@ -355,11 +415,57 @@ static int cxgb4_validate_flow_actions(struct net_device *dev, __func__); return -EOPNOTSUPP; } + act_vlan = true; + } else if (is_tcf_pedit(a)) { + u32 mask, val, offset; + u8 cmd, htype; + int nkeys, i; + + nkeys = tcf_pedit_nkeys(a); + for (i = 0; i < nkeys; i++) { + htype = tcf_pedit_htype(a, i); + cmd = tcf_pedit_cmd(a, i); + mask = tcf_pedit_mask(a, i); + val = tcf_pedit_val(a, i); + offset = tcf_pedit_offset(a, i); + + if (cmd != TCA_PEDIT_KEY_EX_CMD_SET) { + netdev_err(dev, "%s: Unsupported pedit cmd\n", + __func__); + return -EOPNOTSUPP; + } + + switch (htype) { + case TCA_PEDIT_KEY_EX_HDR_TYPE_ETH: + switch (offset) { + case PEDIT_ETH_DMAC_31_0: + case PEDIT_ETH_DMAC_47_32_SMAC_15_0: + break; + default: + netdev_err(dev, "%s: Unsupported pedit field\n", + __func__); + return -EOPNOTSUPP; + } + break; + default: + netdev_err(dev, "%s: Unsupported pedit type\n", + __func__); + return -EOPNOTSUPP; + } + } + act_pedit = true; } else { netdev_err(dev, "%s: Unsupported action\n", __func__); return -EOPNOTSUPP; } } + + if ((act_pedit || act_vlan) && !act_redir) { + netdev_err(dev, "%s: pedit/vlan rewrite invalid without egress redirect\n", + __func__); + return -EINVAL; + } + return 0; } diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.h index 604feffc752e..7bf6cfa892aa 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.h @@ -54,6 +54,25 @@ struct ch_tc_flower_entry { u32 filter_id; }; +enum { + ETH_DMAC_31_0, /* dmac bits 0.. 31 */ + ETH_DMAC_47_32, /* dmac bits 32..47 */ +}; + +struct ch_tc_pedit_fields { + u8 field; + u8 size; + u32 offset; +}; + +#define PEDIT_FIELDS(type, field, size, fs_field, offset) \ + { type## field, size, \ + offsetof(struct ch_filter_specification, fs_field) + (offset) } + +#define PEDIT_ETH_DMAC_MASK 0xffff +#define PEDIT_ETH_DMAC_31_0 0x0 +#define PEDIT_ETH_DMAC_47_32_SMAC_15_0 0x4 + int cxgb4_tc_flower_replace(struct net_device *dev, struct tc_cls_flower_offload *cls); int cxgb4_tc_flower_destroy(struct net_device *dev, -- cgit v1.2.3 From 3bdb376e6944134d0f4d6d65497054a54ef273c9 Mon Sep 17 00:00:00 2001 From: Kumar Sanghvi Date: Wed, 18 Oct 2017 20:49:11 +0530 Subject: cxgb4: introduce SMT ops to prepare for SMAC rewrite support Introduce SMT operations for allocating/removing entries from SMAC table. Make TCAM filters use the SMT ops whenever SMAC rewrite is required. Signed-off-by: Kumar Sanghvi Signed-off-by: Rahul Lakkireddy Signed-off-by: Ganesh Goudar Signed-off-by: David S. Miller --- drivers/net/ethernet/chelsio/cxgb4/Makefile | 2 +- drivers/net/ethernet/chelsio/cxgb4/cxgb4.h | 3 +- drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c | 107 ++++++++-- drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c | 12 ++ drivers/net/ethernet/chelsio/cxgb4/smt.c | 247 ++++++++++++++++++++++ drivers/net/ethernet/chelsio/cxgb4/smt.h | 76 +++++++ drivers/net/ethernet/chelsio/cxgb4/t4_msg.h | 44 +++- drivers/net/ethernet/chelsio/cxgb4/t4_tcb.h | 47 ++++ 8 files changed, 519 insertions(+), 19 deletions(-) create mode 100644 drivers/net/ethernet/chelsio/cxgb4/smt.c create mode 100644 drivers/net/ethernet/chelsio/cxgb4/smt.h create mode 100644 drivers/net/ethernet/chelsio/cxgb4/t4_tcb.h (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/chelsio/cxgb4/Makefile b/drivers/net/ethernet/chelsio/cxgb4/Makefile index 70d454379996..43c86b74dfb8 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/Makefile +++ b/drivers/net/ethernet/chelsio/cxgb4/Makefile @@ -4,7 +4,7 @@ obj-$(CONFIG_CHELSIO_T4) += cxgb4.o -cxgb4-objs := cxgb4_main.o l2t.o t4_hw.o sge.o clip_tbl.o cxgb4_ethtool.o \ +cxgb4-objs := cxgb4_main.o l2t.o smt.o t4_hw.o sge.o clip_tbl.o cxgb4_ethtool.o \ cxgb4_uld.o sched.o cxgb4_filter.o cxgb4_tc_u32.o \ cxgb4_ptp.o cxgb4_tc_flower.o cxgb4_cudbg.o \ cudbg_common.o cudbg_lib.o diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h index 4eaca05ebd3a..f7aa3516bf12 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h @@ -858,6 +858,7 @@ struct adapter { unsigned int clipt_start; unsigned int clipt_end; struct clip_tbl *clipt; + struct smt_data *smt; struct cxgb4_uld_info *uld; void *uld_handle[CXGB4_ULD_MAX]; unsigned int num_uld; @@ -1098,9 +1099,9 @@ struct filter_entry { u32 locked:1; /* filter is administratively locked */ u32 pending:1; /* filter action is pending firmware reply */ - u32 smtidx:8; /* Source MAC Table index for smac */ struct filter_ctx *ctx; /* Caller's completion hook */ struct l2t_entry *l2t; /* Layer Two Table entry for dmac */ + struct smt_entry *smt; /* Source Mac Table entry for smac */ struct net_device *dev; /* Associated net device */ u32 tid; /* This will store the actual tid */ diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c index 15361ca2857c..a8084be5e13b 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c @@ -34,7 +34,9 @@ #include "cxgb4.h" #include "t4_regs.h" +#include "t4_tcb.h" #include "l2t.h" +#include "smt.h" #include "t4fw_api.h" #include "cxgb4_filter.h" @@ -332,6 +334,21 @@ int set_filter_wr(struct adapter *adapter, int fidx) } } + /* If the new filter requires loopback Source MAC rewriting then + * we need to allocate a SMT entry for the filter. + */ + if (f->fs.newsmac) { + f->smt = cxgb4_smt_alloc_switching(f->dev, f->fs.smac); + if (!f->smt) { + if (f->l2t) { + cxgb4_l2t_release(f->l2t); + f->l2t = NULL; + } + kfree_skb(skb); + return -ENOMEM; + } + } + fwr = __skb_put_zero(skb, sizeof(*fwr)); /* It would be nice to put most of the following in t4_hw.c but most @@ -357,7 +374,6 @@ int set_filter_wr(struct adapter *adapter, int fidx) FW_FILTER_WR_DIRSTEERHASH_V(f->fs.dirsteerhash) | FW_FILTER_WR_LPBK_V(f->fs.action == FILTER_SWITCH) | FW_FILTER_WR_DMAC_V(f->fs.newdmac) | - FW_FILTER_WR_SMAC_V(f->fs.newsmac) | FW_FILTER_WR_INSVLAN_V(f->fs.newvlan == VLAN_INSERT || f->fs.newvlan == VLAN_REWRITE) | FW_FILTER_WR_RMVLAN_V(f->fs.newvlan == VLAN_REMOVE || @@ -404,8 +420,6 @@ int set_filter_wr(struct adapter *adapter, int fidx) fwr->lpm = htons(f->fs.mask.lport); fwr->fp = htons(f->fs.val.fport); fwr->fpm = htons(f->fs.mask.fport); - if (f->fs.newsmac) - memcpy(fwr->sma, f->fs.smac, sizeof(fwr->sma)); /* Mark the filter as "pending" and ship off the Filter Work Request. * When we get the Work Request Reply we'll clear the pending status. @@ -463,6 +477,9 @@ void clear_filter(struct adapter *adap, struct filter_entry *f) if (f->l2t) cxgb4_l2t_release(f->l2t); + if (f->smt) + cxgb4_smt_release(f->smt); + /* The zeroing of the filter rule below clears the filter valid, * pending, locked flags, l2t pointer, etc. so it's all we need for * this operation. @@ -757,6 +774,62 @@ out: return ret; } +static int set_tcb_field(struct adapter *adap, struct filter_entry *f, + unsigned int ftid, u16 word, u64 mask, u64 val, + int no_reply) +{ + struct cpl_set_tcb_field *req; + struct sk_buff *skb; + + skb = alloc_skb(sizeof(struct cpl_set_tcb_field), GFP_ATOMIC); + if (!skb) + return -ENOMEM; + + req = (struct cpl_set_tcb_field *)__skb_put(skb, sizeof(*req)); + memset(req, 0, sizeof(*req)); + INIT_TP_WR_CPL(req, CPL_SET_TCB_FIELD, ftid); + req->reply_ctrl = htons(REPLY_CHAN_V(0) | + QUEUENO_V(adap->sge.fw_evtq.abs_id) | + NO_REPLY_V(no_reply)); + req->word_cookie = htons(TCB_WORD_V(word) | TCB_COOKIE_V(ftid)); + req->mask = cpu_to_be64(mask); + req->val = cpu_to_be64(val); + set_wr_txq(skb, CPL_PRIORITY_CONTROL, f->fs.val.iport & 0x3); + t4_ofld_send(adap, skb); + return 0; +} + +/* Set one of the t_flags bits in the TCB. + */ +static int set_tcb_tflag(struct adapter *adap, struct filter_entry *f, + unsigned int ftid, unsigned int bit_pos, + unsigned int val, int no_reply) +{ + return set_tcb_field(adap, f, ftid, TCB_T_FLAGS_W, 1ULL << bit_pos, + (unsigned long long)val << bit_pos, no_reply); +} + +static int configure_filter_smac(struct adapter *adap, struct filter_entry *f) +{ + int err; + + /* do a set-tcb for smac-sel and CWR bit.. */ + err = set_tcb_tflag(adap, f, f->tid, TF_CCTRL_CWR_S, 1, 1); + if (err) + goto smac_err; + + err = set_tcb_field(adap, f, f->tid, TCB_SMAC_SEL_W, + TCB_SMAC_SEL_V(TCB_SMAC_SEL_M), + TCB_SMAC_SEL_V(f->smt->idx), 1); + if (!err) + return 0; + +smac_err: + dev_err(adap->pdev_dev, "filter %u smac config failed with error %u\n", + f->tid, err); + return err; +} + /* Handle a filter write/deletion reply. */ void filter_rpl(struct adapter *adap, const struct cpl_set_tcb_rpl *rpl) { @@ -795,19 +868,23 @@ void filter_rpl(struct adapter *adap, const struct cpl_set_tcb_rpl *rpl) clear_filter(adap, f); if (ctx) ctx->result = 0; - } else if (ret == FW_FILTER_WR_SMT_TBL_FULL) { - dev_err(adap->pdev_dev, "filter %u setup failed due to full SMT\n", - idx); - clear_filter(adap, f); - if (ctx) - ctx->result = -ENOMEM; } else if (ret == FW_FILTER_WR_FLT_ADDED) { - f->smtidx = (be64_to_cpu(rpl->oldval) >> 24) & 0xff; - f->pending = 0; /* asynchronous setup completed */ - f->valid = 1; - if (ctx) { - ctx->result = 0; - ctx->tid = idx; + int err = 0; + + if (f->fs.newsmac) + err = configure_filter_smac(adap, f); + + if (!err) { + f->pending = 0; /* async setup completed */ + f->valid = 1; + if (ctx) { + ctx->result = 0; + ctx->tid = idx; + } + } else { + clear_filter(adap, f); + if (ctx) + ctx->result = err; } } else { /* Something went wrong. Issue a warning about the diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index 8d97ae6039aa..796b37de464f 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c @@ -77,6 +77,7 @@ #include "cxgb4_debugfs.h" #include "clip_tbl.h" #include "l2t.h" +#include "smt.h" #include "sched.h" #include "cxgb4_tc_u32.h" #include "cxgb4_tc_flower.h" @@ -563,6 +564,10 @@ static int fwevtq_handler(struct sge_rspq *q, const __be64 *rsp, const struct cpl_l2t_write_rpl *p = (void *)rsp; do_l2t_write_rpl(q->adap, p); + } else if (opcode == CPL_SMT_WRITE_RPL) { + const struct cpl_smt_write_rpl *p = (void *)rsp; + + do_smt_write_rpl(q->adap, p); } else if (opcode == CPL_SET_TCB_RPL) { const struct cpl_set_tcb_rpl *p = (void *)rsp; @@ -4641,6 +4646,7 @@ static void free_some_resources(struct adapter *adapter) { unsigned int i; + kvfree(adapter->smt); kvfree(adapter->l2t); t4_cleanup_sched(adapter); kvfree(adapter->tids.tid_tab); @@ -5067,6 +5073,12 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) */ cfg_queues(adapter); + adapter->smt = t4_init_smt(); + if (!adapter->smt) { + /* We tolerate a lack of SMT, giving up some functionality */ + dev_warn(&pdev->dev, "could not allocate SMT, continuing\n"); + } + adapter->l2t = t4_init_l2t(adapter->l2t_start, adapter->l2t_end); if (!adapter->l2t) { /* We tolerate a lack of L2T, giving up some functionality */ diff --git a/drivers/net/ethernet/chelsio/cxgb4/smt.c b/drivers/net/ethernet/chelsio/cxgb4/smt.c new file mode 100644 index 000000000000..7b2207a2a130 --- /dev/null +++ b/drivers/net/ethernet/chelsio/cxgb4/smt.c @@ -0,0 +1,247 @@ +/* + * This file is part of the Chelsio T4/T5/T6 Ethernet driver for Linux. + * + * Copyright (c) 2017 Chelsio Communications, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include "cxgb4.h" +#include "smt.h" +#include "t4_msg.h" +#include "t4fw_api.h" +#include "t4_regs.h" +#include "t4_values.h" + +struct smt_data *t4_init_smt(void) +{ + unsigned int smt_size; + struct smt_data *s; + int i; + + smt_size = SMT_SIZE; + + s = kvzalloc(sizeof(*s) + smt_size * sizeof(struct smt_entry), + GFP_KERNEL); + if (!s) + return NULL; + s->smt_size = smt_size; + rwlock_init(&s->lock); + for (i = 0; i < s->smt_size; ++i) { + s->smtab[i].idx = i; + s->smtab[i].state = SMT_STATE_UNUSED; + memset(&s->smtab[i].src_mac, 0, ETH_ALEN); + spin_lock_init(&s->smtab[i].lock); + atomic_set(&s->smtab[i].refcnt, 0); + } + return s; +} + +static struct smt_entry *find_or_alloc_smte(struct smt_data *s, u8 *smac) +{ + struct smt_entry *first_free = NULL; + struct smt_entry *e, *end; + + for (e = &s->smtab[0], end = &s->smtab[s->smt_size]; e != end; ++e) { + if (atomic_read(&e->refcnt) == 0) { + if (!first_free) + first_free = e; + } else { + if (e->state == SMT_STATE_SWITCHING) { + /* This entry is actually in use. See if we can + * re-use it ? + */ + if (memcmp(e->src_mac, smac, ETH_ALEN) == 0) + goto found_reuse; + } + } + } + + if (first_free) { + e = first_free; + goto found; + } + return NULL; + +found: + e->state = SMT_STATE_UNUSED; + +found_reuse: + return e; +} + +static void t4_smte_free(struct smt_entry *e) +{ + spin_lock_bh(&e->lock); + if (atomic_read(&e->refcnt) == 0) { /* hasn't been recycled */ + e->state = SMT_STATE_UNUSED; + } + spin_unlock_bh(&e->lock); +} + +/** + * @e: smt entry to release + * + * Releases ref count and frees up an smt entry from SMT table + */ +void cxgb4_smt_release(struct smt_entry *e) +{ + if (atomic_dec_and_test(&e->refcnt)) + t4_smte_free(e); +} +EXPORT_SYMBOL(cxgb4_smt_release); + +void do_smt_write_rpl(struct adapter *adap, const struct cpl_smt_write_rpl *rpl) +{ + unsigned int smtidx = TID_TID_G(GET_TID(rpl)); + struct smt_data *s = adap->smt; + + if (unlikely(rpl->status != CPL_ERR_NONE)) { + struct smt_entry *e = &s->smtab[smtidx]; + + dev_err(adap->pdev_dev, + "Unexpected SMT_WRITE_RPL status %u for entry %u\n", + rpl->status, smtidx); + spin_lock(&e->lock); + e->state = SMT_STATE_ERROR; + spin_unlock(&e->lock); + return; + } +} + +static int write_smt_entry(struct adapter *adapter, struct smt_entry *e) +{ + struct cpl_t6_smt_write_req *t6req; + struct smt_data *s = adapter->smt; + struct cpl_smt_write_req *req; + struct sk_buff *skb; + int size; + u8 row; + + if (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5) { + size = sizeof(*req); + skb = alloc_skb(size, GFP_ATOMIC); + if (!skb) + return -ENOMEM; + /* Source MAC Table (SMT) contains 256 SMAC entries + * organized in 128 rows of 2 entries each. + */ + req = (struct cpl_smt_write_req *)__skb_put(skb, size); + INIT_TP_WR(req, 0); + + /* Each row contains an SMAC pair. + * LSB selects the SMAC entry within a row + */ + row = (e->idx >> 1); + if (e->idx & 1) { + req->pfvf1 = 0x0; + memcpy(req->src_mac1, e->src_mac, ETH_ALEN); + + /* fill pfvf0/src_mac0 with entry + * at prev index from smt-tab. + */ + req->pfvf0 = 0x0; + memcpy(req->src_mac0, s->smtab[e->idx - 1].src_mac, + ETH_ALEN); + } else { + req->pfvf0 = 0x0; + memcpy(req->src_mac0, e->src_mac, ETH_ALEN); + + /* fill pfvf1/src_mac1 with entry + * at next index from smt-tab + */ + req->pfvf1 = 0x0; + memcpy(req->src_mac1, s->smtab[e->idx + 1].src_mac, + ETH_ALEN); + } + } else { + size = sizeof(*t6req); + skb = alloc_skb(size, GFP_ATOMIC); + if (!skb) + return -ENOMEM; + /* Source MAC Table (SMT) contains 256 SMAC entries */ + t6req = (struct cpl_t6_smt_write_req *)__skb_put(skb, size); + INIT_TP_WR(t6req, 0); + req = (struct cpl_smt_write_req *)t6req; + + /* fill pfvf0/src_mac0 from smt-tab */ + req->pfvf0 = 0x0; + memcpy(req->src_mac0, s->smtab[e->idx].src_mac, ETH_ALEN); + row = e->idx; + } + + OPCODE_TID(req) = + htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, e->idx | + TID_QID_V(adapter->sge.fw_evtq.abs_id))); + req->params = htonl(SMTW_NORPL_V(0) | + SMTW_IDX_V(row) | + SMTW_OVLAN_IDX_V(0)); + t4_mgmt_tx(adapter, skb); + return 0; +} + +static struct smt_entry *t4_smt_alloc_switching(struct adapter *adap, u16 pfvf, + u8 *smac) +{ + struct smt_data *s = adap->smt; + struct smt_entry *e; + + write_lock_bh(&s->lock); + e = find_or_alloc_smte(s, smac); + if (e) { + spin_lock(&e->lock); + if (!atomic_read(&e->refcnt)) { + atomic_set(&e->refcnt, 1); + e->state = SMT_STATE_SWITCHING; + e->pfvf = pfvf; + memcpy(e->src_mac, smac, ETH_ALEN); + write_smt_entry(adap, e); + } else { + atomic_inc(&e->refcnt); + } + spin_unlock(&e->lock); + } + write_unlock_bh(&s->lock); + return e; +} + +/** + * @dev: net_device pointer + * @smac: MAC address to add to SMT + * Returns pointer to the SMT entry created + * + * Allocates an SMT entry to be used by switching rule of a filter. + */ +struct smt_entry *cxgb4_smt_alloc_switching(struct net_device *dev, u8 *smac) +{ + struct adapter *adap = netdev2adap(dev); + + return t4_smt_alloc_switching(adap, 0x0, smac); +} +EXPORT_SYMBOL(cxgb4_smt_alloc_switching); diff --git a/drivers/net/ethernet/chelsio/cxgb4/smt.h b/drivers/net/ethernet/chelsio/cxgb4/smt.h new file mode 100644 index 000000000000..d6c2cc271398 --- /dev/null +++ b/drivers/net/ethernet/chelsio/cxgb4/smt.h @@ -0,0 +1,76 @@ +/* + * This file is part of the Chelsio T4/T5/T6 Ethernet driver for Linux. + * + * Copyright (c) 2017 Chelsio Communications, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef __CXGB4_SMT_H +#define __CXGB4_SMT_H + +#include +#include +#include + +struct adapter; +struct cpl_smt_write_rpl; + +/* SMT related handling. Heavily adapted based on l2t ops in l2t.h/l2t.c + */ +enum { + SMT_STATE_SWITCHING, + SMT_STATE_UNUSED, + SMT_STATE_ERROR +}; + +enum { + SMT_SIZE = 256 +}; + +struct smt_entry { + u16 state; + u16 idx; + u16 pfvf; + u8 src_mac[ETH_ALEN]; + atomic_t refcnt; + spinlock_t lock; /* protect smt entry add,removal */ +}; + +struct smt_data { + unsigned int smt_size; + rwlock_t lock; + struct smt_entry smtab[0]; +}; + +struct smt_data *t4_init_smt(void); +struct smt_entry *cxgb4_smt_alloc_switching(struct net_device *dev, u8 *smac); +void cxgb4_smt_release(struct smt_entry *e); +void do_smt_write_rpl(struct adapter *p, const struct cpl_smt_write_rpl *rpl); +#endif /* __CXGB4_SMT_H */ diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h index b0ff78da8aa2..ce4838d907da 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h @@ -50,6 +50,7 @@ enum { CPL_RX_DATA_ACK = 0xD, CPL_TX_PKT = 0xE, CPL_L2T_WRITE_REQ = 0x12, + CPL_SMT_WRITE_REQ = 0x14, CPL_TID_RELEASE = 0x1A, CPL_TX_DATA_ISO = 0x1F, @@ -60,6 +61,7 @@ enum { CPL_PEER_CLOSE = 0x26, CPL_ABORT_REQ_RSS = 0x2B, CPL_ABORT_RPL_RSS = 0x2D, + CPL_SMT_WRITE_RPL = 0x2E, CPL_RX_PHYS_ADDR = 0x30, CPL_CLOSE_CON_RPL = 0x32, @@ -681,8 +683,8 @@ struct cpl_set_tcb_field { }; /* cpl_set_tcb_field.word_cookie fields */ -#define TCB_WORD_S 0 -#define TCB_WORD(x) ((x) << TCB_WORD_S) +#define TCB_WORD_S 0 +#define TCB_WORD_V(x) ((x) << TCB_WORD_S) #define TCB_COOKIE_S 5 #define TCB_COOKIE_M 0x7 @@ -1266,6 +1268,44 @@ struct cpl_l2t_write_rpl { u8 rsvd[3]; }; +struct cpl_smt_write_req { + WR_HDR; + union opcode_tid ot; + __be32 params; + __be16 pfvf1; + u8 src_mac1[6]; + __be16 pfvf0; + u8 src_mac0[6]; +}; + +struct cpl_t6_smt_write_req { + WR_HDR; + union opcode_tid ot; + __be32 params; + __be64 tag; + __be16 pfvf0; + u8 src_mac0[6]; + __be32 local_ip; + __be32 rsvd; +}; + +struct cpl_smt_write_rpl { + union opcode_tid ot; + u8 status; + u8 rsvd[3]; +}; + +/* cpl_smt_{read,write}_req.params fields */ +#define SMTW_OVLAN_IDX_S 16 +#define SMTW_OVLAN_IDX_V(x) ((x) << SMTW_OVLAN_IDX_S) + +#define SMTW_IDX_S 20 +#define SMTW_IDX_V(x) ((x) << SMTW_IDX_S) + +#define SMTW_NORPL_S 31 +#define SMTW_NORPL_V(x) ((x) << SMTW_NORPL_S) +#define SMTW_NORPL_F SMTW_NORPL_V(1U) + struct cpl_rdma_terminate { union opcode_tid ot; __be16 rsvd; diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_tcb.h b/drivers/net/ethernet/chelsio/cxgb4/t4_tcb.h new file mode 100644 index 000000000000..c1c76663034d --- /dev/null +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_tcb.h @@ -0,0 +1,47 @@ +/* + * This file is part of the Chelsio T4/T5/T6 Ethernet driver for Linux. + * + * Copyright (c) 2017 Chelsio Communications, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef __T4_TCB_H +#define __T4_TCB_H + +#define TCB_SMAC_SEL_W 0 +#define TCB_SMAC_SEL_S 24 +#define TCB_SMAC_SEL_M 0xffULL +#define TCB_SMAC_SEL_V(x) ((x) << TCB_SMAC_SEL_S) + +#define TCB_T_FLAGS_W 1 + +#define TF_CCTRL_CWR_S 61 + +#endif /* __T4_TCB_H */ -- cgit v1.2.3 From 202187c34c7e3efd9662a25977cddef6e7dec572 Mon Sep 17 00:00:00 2001 From: Kumar Sanghvi Date: Wed, 18 Oct 2017 20:49:12 +0530 Subject: cxgb4: add tc flower support for ETH-SMAC rewrite Adds support for ETH-SMAC rewrite via TC-PEDIT action. Signed-off-by: Kumar Sanghvi Signed-off-by: Rahul Lakkireddy Signed-off-by: Ganesh Goudar Signed-off-by: David S. Miller --- drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c | 10 ++++++++++ drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.h | 3 +++ 2 files changed, 13 insertions(+) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c index 7c8b0c65934c..34d67a2a86f4 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c @@ -45,6 +45,8 @@ struct ch_tc_pedit_fields pedits[] = { PEDIT_FIELDS(ETH_, DMAC_31_0, 4, dmac, 0), PEDIT_FIELDS(ETH_, DMAC_47_32, 2, dmac, 4), + PEDIT_FIELDS(ETH_, SMAC_15_0, 2, smac, 0), + PEDIT_FIELDS(ETH_, SMAC_47_16, 4, smac, 2), }; static struct ch_tc_flower_entry *allocate_flower_entry(void) @@ -291,6 +293,13 @@ static void process_pedit_field(struct ch_filter_specification *fs, u32 val, case PEDIT_ETH_DMAC_47_32_SMAC_15_0: if (~mask & PEDIT_ETH_DMAC_MASK) offload_pedit(fs, val, mask, ETH_DMAC_47_32); + else + offload_pedit(fs, val >> 16, mask >> 16, + ETH_SMAC_15_0); + break; + case PEDIT_ETH_SMAC_47_16: + fs->newsmac = 1; + offload_pedit(fs, val, mask, ETH_SMAC_47_16); } } } @@ -440,6 +449,7 @@ static int cxgb4_validate_flow_actions(struct net_device *dev, switch (offset) { case PEDIT_ETH_DMAC_31_0: case PEDIT_ETH_DMAC_47_32_SMAC_15_0: + case PEDIT_ETH_SMAC_47_16: break; default: netdev_err(dev, "%s: Unsupported pedit field\n", diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.h index 7bf6cfa892aa..a2acb782918f 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.h @@ -57,6 +57,8 @@ struct ch_tc_flower_entry { enum { ETH_DMAC_31_0, /* dmac bits 0.. 31 */ ETH_DMAC_47_32, /* dmac bits 32..47 */ + ETH_SMAC_15_0, /* smac bits 0.. 15 */ + ETH_SMAC_47_16, /* smac bits 16..47 */ }; struct ch_tc_pedit_fields { @@ -72,6 +74,7 @@ struct ch_tc_pedit_fields { #define PEDIT_ETH_DMAC_MASK 0xffff #define PEDIT_ETH_DMAC_31_0 0x0 #define PEDIT_ETH_DMAC_47_32_SMAC_15_0 0x4 +#define PEDIT_ETH_SMAC_47_16 0x8 int cxgb4_tc_flower_replace(struct net_device *dev, struct tc_cls_flower_offload *cls); -- cgit v1.2.3 From 0ff909946155ed1af2ec8feed3c1bac485201683 Mon Sep 17 00:00:00 2001 From: Kumar Sanghvi Date: Wed, 18 Oct 2017 20:49:13 +0530 Subject: cxgb4: introduce fw_filter2_wr to prepare for L3/L4 rewrite support Update driver to use new fw_filter2_wr in order to support rewrite of L3/L4 header fields via filters. Query FW_PARAMS_PARAM_DEV_FILTER2_WR to check whether FW supports this new wr. Signed-off-by: Kumar Sanghvi Signed-off-by: Rahul Lakkireddy Signed-off-by: Ganesh Goudar Signed-off-by: David S. Miller --- drivers/net/ethernet/chelsio/cxgb4/cxgb4.h | 10 +++ drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c | 19 +++++- drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c | 10 +++ drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h | 75 ++++++++++++++++++++++- 4 files changed, 111 insertions(+), 3 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h index f7aa3516bf12..6a1c0b1fe8d0 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h @@ -367,6 +367,7 @@ struct adapter_params { unsigned int max_ird_adapter; /* Max read depth per adapter */ bool fr_nsmr_tpte_wr_support; /* FW support for FR_NSMR_TPTE_WR */ u8 fw_caps_support; /* 32-bit Port Capabilities */ + bool filter2_wr_support; /* FW support for FILTER2_WR */ /* MPS Buffer Group Map[per Port]. Bit i is set if buffer group i is * used by the Port @@ -1064,10 +1065,19 @@ struct ch_filter_specification { uint32_t newdmac:1; /* rewrite destination MAC address */ uint32_t newsmac:1; /* rewrite source MAC address */ uint32_t newvlan:2; /* rewrite VLAN Tag */ + uint32_t nat_mode:3; /* specify NAT operation mode */ uint8_t dmac[ETH_ALEN]; /* new destination MAC address */ uint8_t smac[ETH_ALEN]; /* new source MAC address */ uint16_t vlan; /* VLAN Tag to insert */ + u8 nat_lip[16]; /* local IP to use after NAT'ing */ + u8 nat_fip[16]; /* foreign IP to use after NAT'ing */ + u16 nat_lport; /* local port to use after NAT'ing */ + u16 nat_fport; /* foreign port to use after NAT'ing */ + + /* reservation for future additions */ + u8 rsvd[24]; + /* Filter rule value/mask pairs. */ struct ch_filter_tuple val; diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c index a8084be5e13b..89272f29f807 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c @@ -313,7 +313,7 @@ static int del_filter_wr(struct adapter *adapter, int fidx) int set_filter_wr(struct adapter *adapter, int fidx) { struct filter_entry *f = &adapter->tids.ftid_tab[fidx]; - struct fw_filter_wr *fwr; + struct fw_filter2_wr *fwr; struct sk_buff *skb; skb = alloc_skb(sizeof(*fwr), GFP_KERNEL); @@ -359,7 +359,10 @@ int set_filter_wr(struct adapter *adapter, int fidx) * filter specification structure but for now it's easiest to simply * put this fairly direct code in line ... */ - fwr->op_pkd = htonl(FW_WR_OP_V(FW_FILTER_WR)); + if (adapter->params.filter2_wr_support) + fwr->op_pkd = htonl(FW_WR_OP_V(FW_FILTER2_WR)); + else + fwr->op_pkd = htonl(FW_WR_OP_V(FW_FILTER_WR)); fwr->len16_pkd = htonl(FW_WR_LEN16_V(sizeof(*fwr) / 16)); fwr->tid_to_iq = htonl(FW_FILTER_WR_TID_V(f->tid) | @@ -421,6 +424,18 @@ int set_filter_wr(struct adapter *adapter, int fidx) fwr->fp = htons(f->fs.val.fport); fwr->fpm = htons(f->fs.mask.fport); + if (adapter->params.filter2_wr_support) { + fwr->natmode_to_ulp_type = + FW_FILTER2_WR_ULP_TYPE_V(f->fs.nat_mode ? + ULP_MODE_TCPDDP : + ULP_MODE_NONE) | + FW_FILTER2_WR_NATMODE_V(f->fs.nat_mode); + memcpy(fwr->newlip, f->fs.nat_lip, sizeof(fwr->newlip)); + memcpy(fwr->newfip, f->fs.nat_fip, sizeof(fwr->newfip)); + fwr->newlport = htons(f->fs.nat_lport); + fwr->newfport = htons(f->fs.nat_fport); + } + /* Mark the filter as "pending" and ship off the Filter Work Request. * When we get the Work Request Reply we'll clear the pending status. */ diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index 796b37de464f..c478291db93f 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c @@ -3910,6 +3910,16 @@ static int adap_init0(struct adapter *adap) 1, params, val); adap->params.fr_nsmr_tpte_wr_support = (ret == 0 && val[0] != 0); + /* See if FW supports FW_FILTER2 work request */ + if (is_t4(adap->params.chip)) { + adap->params.filter2_wr_support = 0; + } else { + params[0] = FW_PARAM_DEV(FILTER2_WR); + ret = t4_query_params(adap, adap->mbox, adap->pf, 0, + 1, params, val); + adap->params.filter2_wr_support = (ret == 0 && val[0] != 0); + } + /* * Get device capabilities so we can determine what resources we need * to manage. diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h index ca2756dcefc5..875d4a72b3ef 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h @@ -105,7 +105,8 @@ enum fw_wr_opcodes { FW_ISCSI_TX_DATA_WR = 0x45, FW_PTP_TX_PKT_WR = 0x46, FW_CRYPTO_LOOKASIDE_WR = 0X6d, - FW_LASTC2E_WR = 0x70 + FW_LASTC2E_WR = 0x70, + FW_FILTER2_WR = 0x77 }; struct fw_wr_hdr { @@ -201,6 +202,51 @@ struct fw_filter_wr { __u8 sma[6]; }; +struct fw_filter2_wr { + __be32 op_pkd; + __be32 len16_pkd; + __be64 r3; + __be32 tid_to_iq; + __be32 del_filter_to_l2tix; + __be16 ethtype; + __be16 ethtypem; + __u8 frag_to_ovlan_vldm; + __u8 smac_sel; + __be16 rx_chan_rx_rpl_iq; + __be32 maci_to_matchtypem; + __u8 ptcl; + __u8 ptclm; + __u8 ttyp; + __u8 ttypm; + __be16 ivlan; + __be16 ivlanm; + __be16 ovlan; + __be16 ovlanm; + __u8 lip[16]; + __u8 lipm[16]; + __u8 fip[16]; + __u8 fipm[16]; + __be16 lp; + __be16 lpm; + __be16 fp; + __be16 fpm; + __be16 r7; + __u8 sma[6]; + __be16 r8; + __u8 filter_type_swapmac; + __u8 natmode_to_ulp_type; + __be16 newlport; + __be16 newfport; + __u8 newlip[16]; + __u8 newfip[16]; + __be32 natseqcheck; + __be32 r9; + __be64 r10; + __be64 r11; + __be64 r12; + __be64 r13; +}; + #define FW_FILTER_WR_TID_S 12 #define FW_FILTER_WR_TID_M 0xfffff #define FW_FILTER_WR_TID_V(x) ((x) << FW_FILTER_WR_TID_S) @@ -385,6 +431,32 @@ struct fw_filter_wr { #define FW_FILTER_WR_RX_RPL_IQ_G(x) \ (((x) >> FW_FILTER_WR_RX_RPL_IQ_S) & FW_FILTER_WR_RX_RPL_IQ_M) +#define FW_FILTER2_WR_FILTER_TYPE_S 1 +#define FW_FILTER2_WR_FILTER_TYPE_M 0x1 +#define FW_FILTER2_WR_FILTER_TYPE_V(x) ((x) << FW_FILTER2_WR_FILTER_TYPE_S) +#define FW_FILTER2_WR_FILTER_TYPE_G(x) \ + (((x) >> FW_FILTER2_WR_FILTER_TYPE_S) & FW_FILTER2_WR_FILTER_TYPE_M) +#define FW_FILTER2_WR_FILTER_TYPE_F FW_FILTER2_WR_FILTER_TYPE_V(1U) + +#define FW_FILTER2_WR_NATMODE_S 5 +#define FW_FILTER2_WR_NATMODE_M 0x7 +#define FW_FILTER2_WR_NATMODE_V(x) ((x) << FW_FILTER2_WR_NATMODE_S) +#define FW_FILTER2_WR_NATMODE_G(x) \ + (((x) >> FW_FILTER2_WR_NATMODE_S) & FW_FILTER2_WR_NATMODE_M) + +#define FW_FILTER2_WR_NATFLAGCHECK_S 4 +#define FW_FILTER2_WR_NATFLAGCHECK_M 0x1 +#define FW_FILTER2_WR_NATFLAGCHECK_V(x) ((x) << FW_FILTER2_WR_NATFLAGCHECK_S) +#define FW_FILTER2_WR_NATFLAGCHECK_G(x) \ + (((x) >> FW_FILTER2_WR_NATFLAGCHECK_S) & FW_FILTER2_WR_NATFLAGCHECK_M) +#define FW_FILTER2_WR_NATFLAGCHECK_F FW_FILTER2_WR_NATFLAGCHECK_V(1U) + +#define FW_FILTER2_WR_ULP_TYPE_S 0 +#define FW_FILTER2_WR_ULP_TYPE_M 0xf +#define FW_FILTER2_WR_ULP_TYPE_V(x) ((x) << FW_FILTER2_WR_ULP_TYPE_S) +#define FW_FILTER2_WR_ULP_TYPE_G(x) \ + (((x) >> FW_FILTER2_WR_ULP_TYPE_S) & FW_FILTER2_WR_ULP_TYPE_M) + #define FW_FILTER_WR_MACI_S 23 #define FW_FILTER_WR_MACI_M 0x1ff #define FW_FILTER_WR_MACI_V(x) ((x) << FW_FILTER_WR_MACI_S) @@ -1127,6 +1199,7 @@ enum fw_params_param_dev { FW_PARAMS_PARAM_DEV_SCFGREV = 0x1A, FW_PARAMS_PARAM_DEV_VPDREV = 0x1B, FW_PARAMS_PARAM_DEV_RI_FR_NSMR_TPTE_WR = 0x1C, + FW_PARAMS_PARAM_DEV_FILTER2_WR = 0x1D, FW_PARAMS_PARAM_DEV_MPSBGMAP = 0x1E, }; -- cgit v1.2.3 From 557ccbf9dfa8de133b9247af42f0c5760bb103f0 Mon Sep 17 00:00:00 2001 From: Kumar Sanghvi Date: Wed, 18 Oct 2017 20:49:14 +0530 Subject: cxgb4: add tc flower support for L3/L4 rewrite Adds support to rewrite L3/L4 fields via TC-PEDIT action. Supported fields for rewrite are: IPv4 src/dst address, IPv6 src/dst address, TCP/UDP sport/dport. Also, process match fields first and then process the action items. Refactor pedit action validation to separate function to avoid excessive code indentation. Signed-off-by: Kumar Sanghvi Signed-off-by: Rahul Lakkireddy Signed-off-by: Ganesh Goudar Signed-off-by: David S. Miller --- drivers/net/ethernet/chelsio/cxgb4/cxgb4.h | 4 + .../net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c | 245 +++++++++++++++++---- .../net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.h | 32 +++ 3 files changed, 244 insertions(+), 37 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h index 6a1c0b1fe8d0..92a0b022687e 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h @@ -1097,6 +1097,10 @@ enum { VLAN_REWRITE }; +enum { + NAT_MODE_ALL = 7, /* NAT on entire 4-tuple */ +}; + /* Host shadow copy of ingress filter entry. This is in host native format * and doesn't match the ordering or bit order, etc. of the hardware of the * firmware command. The use of bit-field structure elements is purely to diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c index 34d67a2a86f4..892dfce1fa63 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c @@ -47,6 +47,20 @@ struct ch_tc_pedit_fields pedits[] = { PEDIT_FIELDS(ETH_, DMAC_47_32, 2, dmac, 4), PEDIT_FIELDS(ETH_, SMAC_15_0, 2, smac, 0), PEDIT_FIELDS(ETH_, SMAC_47_16, 4, smac, 2), + PEDIT_FIELDS(IP4_, SRC, 4, nat_fip, 0), + PEDIT_FIELDS(IP4_, DST, 4, nat_lip, 0), + PEDIT_FIELDS(IP6_, SRC_31_0, 4, nat_fip, 0), + PEDIT_FIELDS(IP6_, SRC_63_32, 4, nat_fip, 4), + PEDIT_FIELDS(IP6_, SRC_95_64, 4, nat_fip, 8), + PEDIT_FIELDS(IP6_, SRC_127_96, 4, nat_fip, 12), + PEDIT_FIELDS(IP6_, DST_31_0, 4, nat_lip, 0), + PEDIT_FIELDS(IP6_, DST_63_32, 4, nat_lip, 4), + PEDIT_FIELDS(IP6_, DST_95_64, 4, nat_lip, 8), + PEDIT_FIELDS(IP6_, DST_127_96, 4, nat_lip, 12), + PEDIT_FIELDS(TCP_, SPORT, 2, nat_fport, 0), + PEDIT_FIELDS(TCP_, DPORT, 2, nat_lport, 0), + PEDIT_FIELDS(UDP_, SPORT, 2, nat_fport, 0), + PEDIT_FIELDS(UDP_, DPORT, 2, nat_lport, 0), }; static struct ch_tc_flower_entry *allocate_flower_entry(void) @@ -121,6 +135,11 @@ static void cxgb4_process_flow_match(struct net_device *dev, memcpy(&fs->val.fip[0], &key->src, sizeof(key->src)); memcpy(&fs->mask.lip[0], &mask->dst, sizeof(mask->dst)); memcpy(&fs->mask.fip[0], &mask->src, sizeof(mask->src)); + + /* also initialize nat_lip/fip to same values */ + memcpy(&fs->nat_lip[0], &key->dst, sizeof(key->dst)); + memcpy(&fs->nat_fip[0], &key->src, sizeof(key->src)); + } if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) { @@ -138,6 +157,10 @@ static void cxgb4_process_flow_match(struct net_device *dev, memcpy(&fs->val.fip[0], key->src.s6_addr, sizeof(key->src)); memcpy(&fs->mask.lip[0], mask->dst.s6_addr, sizeof(mask->dst)); memcpy(&fs->mask.fip[0], mask->src.s6_addr, sizeof(mask->src)); + + /* also initialize nat_lip/fip to same values */ + memcpy(&fs->nat_lip[0], key->dst.s6_addr, sizeof(key->dst)); + memcpy(&fs->nat_fip[0], key->src.s6_addr, sizeof(key->src)); } if (dissector_uses_key(cls->dissector, FLOW_DISSECTOR_KEY_PORTS)) { @@ -153,6 +176,10 @@ static void cxgb4_process_flow_match(struct net_device *dev, fs->mask.lport = cpu_to_be16(mask->dst); fs->val.fport = cpu_to_be16(key->src); fs->mask.fport = cpu_to_be16(mask->src); + + /* also initialize nat_lport/fport to same values */ + fs->nat_lport = cpu_to_be16(key->dst); + fs->nat_fport = cpu_to_be16(key->src); } if (dissector_uses_key(cls->dissector, FLOW_DISSECTOR_KEY_IP)) { @@ -301,6 +328,70 @@ static void process_pedit_field(struct ch_filter_specification *fs, u32 val, fs->newsmac = 1; offload_pedit(fs, val, mask, ETH_SMAC_47_16); } + break; + case TCA_PEDIT_KEY_EX_HDR_TYPE_IP4: + switch (offset) { + case PEDIT_IP4_SRC: + offload_pedit(fs, val, mask, IP4_SRC); + break; + case PEDIT_IP4_DST: + offload_pedit(fs, val, mask, IP4_DST); + } + fs->nat_mode = NAT_MODE_ALL; + break; + case TCA_PEDIT_KEY_EX_HDR_TYPE_IP6: + switch (offset) { + case PEDIT_IP6_SRC_31_0: + offload_pedit(fs, val, mask, IP6_SRC_31_0); + break; + case PEDIT_IP6_SRC_63_32: + offload_pedit(fs, val, mask, IP6_SRC_63_32); + break; + case PEDIT_IP6_SRC_95_64: + offload_pedit(fs, val, mask, IP6_SRC_95_64); + break; + case PEDIT_IP6_SRC_127_96: + offload_pedit(fs, val, mask, IP6_SRC_127_96); + break; + case PEDIT_IP6_DST_31_0: + offload_pedit(fs, val, mask, IP6_DST_31_0); + break; + case PEDIT_IP6_DST_63_32: + offload_pedit(fs, val, mask, IP6_DST_63_32); + break; + case PEDIT_IP6_DST_95_64: + offload_pedit(fs, val, mask, IP6_DST_95_64); + break; + case PEDIT_IP6_DST_127_96: + offload_pedit(fs, val, mask, IP6_DST_127_96); + } + fs->nat_mode = NAT_MODE_ALL; + break; + case TCA_PEDIT_KEY_EX_HDR_TYPE_TCP: + switch (offset) { + case PEDIT_TCP_SPORT_DPORT: + if (~mask & PEDIT_TCP_UDP_SPORT_MASK) + offload_pedit(fs, cpu_to_be32(val) >> 16, + cpu_to_be32(mask) >> 16, + TCP_SPORT); + else + offload_pedit(fs, cpu_to_be32(val), + cpu_to_be32(mask), TCP_DPORT); + } + fs->nat_mode = NAT_MODE_ALL; + break; + case TCA_PEDIT_KEY_EX_HDR_TYPE_UDP: + switch (offset) { + case PEDIT_UDP_SPORT_DPORT: + if (~mask & PEDIT_TCP_UDP_SPORT_MASK) + offload_pedit(fs, cpu_to_be32(val) >> 16, + cpu_to_be32(mask) >> 16, + UDP_SPORT); + else + offload_pedit(fs, cpu_to_be32(val), + cpu_to_be32(mask), UDP_DPORT); + } + fs->nat_mode = NAT_MODE_ALL; } } @@ -365,6 +456,119 @@ static void cxgb4_process_flow_actions(struct net_device *in, } } +static bool valid_l4_mask(u32 mask) +{ + u16 hi, lo; + + /* Either the upper 16-bits (SPORT) OR the lower + * 16-bits (DPORT) can be set, but NOT BOTH. + */ + hi = (mask >> 16) & 0xFFFF; + lo = mask & 0xFFFF; + + return hi && lo ? false : true; +} + +static bool valid_pedit_action(struct net_device *dev, + const struct tc_action *a) +{ + u32 mask, offset; + u8 cmd, htype; + int nkeys, i; + + nkeys = tcf_pedit_nkeys(a); + for (i = 0; i < nkeys; i++) { + htype = tcf_pedit_htype(a, i); + cmd = tcf_pedit_cmd(a, i); + mask = tcf_pedit_mask(a, i); + offset = tcf_pedit_offset(a, i); + + if (cmd != TCA_PEDIT_KEY_EX_CMD_SET) { + netdev_err(dev, "%s: Unsupported pedit cmd\n", + __func__); + return false; + } + + switch (htype) { + case TCA_PEDIT_KEY_EX_HDR_TYPE_ETH: + switch (offset) { + case PEDIT_ETH_DMAC_31_0: + case PEDIT_ETH_DMAC_47_32_SMAC_15_0: + case PEDIT_ETH_SMAC_47_16: + break; + default: + netdev_err(dev, "%s: Unsupported pedit field\n", + __func__); + return false; + } + break; + case TCA_PEDIT_KEY_EX_HDR_TYPE_IP4: + switch (offset) { + case PEDIT_IP4_SRC: + case PEDIT_IP4_DST: + break; + default: + netdev_err(dev, "%s: Unsupported pedit field\n", + __func__); + return false; + } + break; + case TCA_PEDIT_KEY_EX_HDR_TYPE_IP6: + switch (offset) { + case PEDIT_IP6_SRC_31_0: + case PEDIT_IP6_SRC_63_32: + case PEDIT_IP6_SRC_95_64: + case PEDIT_IP6_SRC_127_96: + case PEDIT_IP6_DST_31_0: + case PEDIT_IP6_DST_63_32: + case PEDIT_IP6_DST_95_64: + case PEDIT_IP6_DST_127_96: + break; + default: + netdev_err(dev, "%s: Unsupported pedit field\n", + __func__); + return false; + } + break; + case TCA_PEDIT_KEY_EX_HDR_TYPE_TCP: + switch (offset) { + case PEDIT_TCP_SPORT_DPORT: + if (!valid_l4_mask(~mask)) { + netdev_err(dev, "%s: Unsupported mask for TCP L4 ports\n", + __func__); + return false; + } + break; + default: + netdev_err(dev, "%s: Unsupported pedit field\n", + __func__); + return false; + } + break; + case TCA_PEDIT_KEY_EX_HDR_TYPE_UDP: + switch (offset) { + case PEDIT_UDP_SPORT_DPORT: + if (!valid_l4_mask(~mask)) { + netdev_err(dev, "%s: Unsupported mask for UDP L4 ports\n", + __func__); + return false; + } + break; + default: + netdev_err(dev, "%s: Unsupported pedit field\n", + __func__); + return false; + } + break; + default: + netdev_err(dev, "%s: Unsupported pedit type\n", + __func__); + return false; + } + } + return true; +} + static int cxgb4_validate_flow_actions(struct net_device *dev, struct tc_cls_flower_offload *cls) { @@ -426,43 +630,10 @@ static int cxgb4_validate_flow_actions(struct net_device *dev, } act_vlan = true; } else if (is_tcf_pedit(a)) { - u32 mask, val, offset; - u8 cmd, htype; - int nkeys, i; + bool pedit_valid = valid_pedit_action(dev, a); - nkeys = tcf_pedit_nkeys(a); - for (i = 0; i < nkeys; i++) { - htype = tcf_pedit_htype(a, i); - cmd = tcf_pedit_cmd(a, i); - mask = tcf_pedit_mask(a, i); - val = tcf_pedit_val(a, i); - offset = tcf_pedit_offset(a, i); - - if (cmd != TCA_PEDIT_KEY_EX_CMD_SET) { - netdev_err(dev, "%s: Unsupported pedit cmd\n", - __func__); - return -EOPNOTSUPP; - } - - switch (htype) { - case TCA_PEDIT_KEY_EX_HDR_TYPE_ETH: - switch (offset) { - case PEDIT_ETH_DMAC_31_0: - case PEDIT_ETH_DMAC_47_32_SMAC_15_0: - case PEDIT_ETH_SMAC_47_16: - break; - default: - netdev_err(dev, "%s: Unsupported pedit field\n", - __func__); - return -EOPNOTSUPP; - } - break; - default: - netdev_err(dev, "%s: Unsupported pedit type\n", - __func__); - return -EOPNOTSUPP; - } - } + if (!pedit_valid) + return -EOPNOTSUPP; act_pedit = true; } else { netdev_err(dev, "%s: Unsupported action\n", __func__); @@ -503,8 +674,8 @@ int cxgb4_tc_flower_replace(struct net_device *dev, fs = &ch_flower->fs; fs->hitcnts = 1; - cxgb4_process_flow_actions(dev, cls, fs); cxgb4_process_flow_match(dev, cls, fs); + cxgb4_process_flow_actions(dev, cls, fs); fidx = cxgb4_get_free_ftid(dev, fs->type ? PF_INET6 : PF_INET); if (fidx < 0) { diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.h index a2acb782918f..202d5c9ec303 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.h @@ -59,6 +59,25 @@ enum { ETH_DMAC_47_32, /* dmac bits 32..47 */ ETH_SMAC_15_0, /* smac bits 0.. 15 */ ETH_SMAC_47_16, /* smac bits 16..47 */ + + IP4_SRC, /* 32-bit IPv4 src */ + IP4_DST, /* 32-bit IPv4 dst */ + + IP6_SRC_31_0, /* src bits 0.. 31 */ + IP6_SRC_63_32, /* src bits 63.. 32 */ + IP6_SRC_95_64, /* src bits 95.. 64 */ + IP6_SRC_127_96, /* src bits 127..96 */ + + IP6_DST_31_0, /* dst bits 0.. 31 */ + IP6_DST_63_32, /* dst bits 63.. 32 */ + IP6_DST_95_64, /* dst bits 95.. 64 */ + IP6_DST_127_96, /* dst bits 127..96 */ + + TCP_SPORT, /* 16-bit TCP sport */ + TCP_DPORT, /* 16-bit TCP dport */ + + UDP_SPORT, /* 16-bit UDP sport */ + UDP_DPORT, /* 16-bit UDP dport */ }; struct ch_tc_pedit_fields { @@ -72,9 +91,22 @@ struct ch_tc_pedit_fields { offsetof(struct ch_filter_specification, fs_field) + (offset) } #define PEDIT_ETH_DMAC_MASK 0xffff +#define PEDIT_TCP_UDP_SPORT_MASK 0xffff #define PEDIT_ETH_DMAC_31_0 0x0 #define PEDIT_ETH_DMAC_47_32_SMAC_15_0 0x4 #define PEDIT_ETH_SMAC_47_16 0x8 +#define PEDIT_IP4_SRC 0xC +#define PEDIT_IP4_DST 0x10 +#define PEDIT_IP6_SRC_31_0 0x8 +#define PEDIT_IP6_SRC_63_32 0xC +#define PEDIT_IP6_SRC_95_64 0x10 +#define PEDIT_IP6_SRC_127_96 0x14 +#define PEDIT_IP6_DST_31_0 0x18 +#define PEDIT_IP6_DST_63_32 0x1C +#define PEDIT_IP6_DST_95_64 0x20 +#define PEDIT_IP6_DST_127_96 0x24 +#define PEDIT_TCP_SPORT_DPORT 0x0 +#define PEDIT_UDP_SPORT_DPORT 0x0 int cxgb4_tc_flower_replace(struct net_device *dev, struct tc_cls_flower_offload *cls); -- cgit v1.2.3 From 89d5dd2efdb26c78ab83be59390386cc21f8dd71 Mon Sep 17 00:00:00 2001 From: David Ahern Date: Wed, 18 Oct 2017 09:56:55 -0700 Subject: mlxsw: spectrum: router: Add support for address validator notifier Add support for inetaddr_validator and inet6addr_validator. The notifiers provide a means for validating ipv4 and ipv6 addresses before the addresses are installed and on failure the error is propagated back to the user. Signed-off-by: David Ahern Reviewed-by: Ido Schimmel Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/spectrum.c | 15 ++++++- drivers/net/ethernet/mellanox/mlxsw/spectrum.h | 4 ++ .../net/ethernet/mellanox/mlxsw/spectrum_router.c | 52 ++++++++++++++++++++++ 3 files changed, 70 insertions(+), 1 deletion(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c index e1e11c726c16..e6519f2906a4 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c @@ -4521,9 +4521,16 @@ static int mlxsw_sp_netdevice_event(struct notifier_block *nb, return notifier_from_errno(err); } +static struct notifier_block mlxsw_sp_inetaddr_valid_nb __read_mostly = { + .notifier_call = mlxsw_sp_inetaddr_valid_event, +}; + static struct notifier_block mlxsw_sp_inetaddr_nb __read_mostly = { .notifier_call = mlxsw_sp_inetaddr_event, - .priority = 10, /* Must be called before FIB notifier block */ +}; + +static struct notifier_block mlxsw_sp_inet6addr_valid_nb __read_mostly = { + .notifier_call = mlxsw_sp_inet6addr_valid_event, }; static struct notifier_block mlxsw_sp_inet6addr_nb __read_mostly = { @@ -4548,7 +4555,9 @@ static int __init mlxsw_sp_module_init(void) { int err; + register_inetaddr_validator_notifier(&mlxsw_sp_inetaddr_valid_nb); register_inetaddr_notifier(&mlxsw_sp_inetaddr_nb); + register_inet6addr_validator_notifier(&mlxsw_sp_inet6addr_valid_nb); register_inet6addr_notifier(&mlxsw_sp_inet6addr_nb); register_netevent_notifier(&mlxsw_sp_router_netevent_nb); @@ -4567,7 +4576,9 @@ err_pci_driver_register: err_core_driver_register: unregister_netevent_notifier(&mlxsw_sp_router_netevent_nb); unregister_inet6addr_notifier(&mlxsw_sp_inet6addr_nb); + unregister_inet6addr_validator_notifier(&mlxsw_sp_inet6addr_valid_nb); unregister_inetaddr_notifier(&mlxsw_sp_inetaddr_nb); + unregister_inetaddr_validator_notifier(&mlxsw_sp_inetaddr_valid_nb); return err; } @@ -4577,7 +4588,9 @@ static void __exit mlxsw_sp_module_exit(void) mlxsw_core_driver_unregister(&mlxsw_sp_driver); unregister_netevent_notifier(&mlxsw_sp_router_netevent_nb); unregister_inet6addr_notifier(&mlxsw_sp_inet6addr_nb); + unregister_inet6addr_validator_notifier(&mlxsw_sp_inet6addr_valid_nb); unregister_inetaddr_notifier(&mlxsw_sp_inetaddr_nb); + unregister_inetaddr_validator_notifier(&mlxsw_sp_inetaddr_valid_nb); } module_init(mlxsw_sp_module_init); diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h index 28feb745a38a..2a2472a09d8c 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h @@ -391,8 +391,12 @@ int mlxsw_sp_router_netevent_event(struct notifier_block *unused, int mlxsw_sp_netdevice_router_port_event(struct net_device *dev); int mlxsw_sp_inetaddr_event(struct notifier_block *unused, unsigned long event, void *ptr); +int mlxsw_sp_inetaddr_valid_event(struct notifier_block *unused, + unsigned long event, void *ptr); int mlxsw_sp_inet6addr_event(struct notifier_block *unused, unsigned long event, void *ptr); +int mlxsw_sp_inet6addr_valid_event(struct notifier_block *unused, + unsigned long event, void *ptr); int mlxsw_sp_netdevice_vrf_event(struct net_device *l3_dev, unsigned long event, struct netdev_notifier_changeupper_info *info); bool mlxsw_sp_netdev_is_ipip(const struct mlxsw_sp *mlxsw_sp, diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c index 3330120f2f8e..66bab9ce2881 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c @@ -5781,6 +5781,32 @@ int mlxsw_sp_inetaddr_event(struct notifier_block *unused, struct mlxsw_sp_rif *rif; int err = 0; + /* NETDEV_UP event is handled by mlxsw_sp_inetaddr_valid_event */ + if (event == NETDEV_UP) + goto out; + + mlxsw_sp = mlxsw_sp_lower_get(dev); + if (!mlxsw_sp) + goto out; + + rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev); + if (!mlxsw_sp_rif_should_config(rif, dev, event)) + goto out; + + err = __mlxsw_sp_inetaddr_event(dev, event); +out: + return notifier_from_errno(err); +} + +int mlxsw_sp_inetaddr_valid_event(struct notifier_block *unused, + unsigned long event, void *ptr) +{ + struct in_validator_info *ivi = (struct in_validator_info *) ptr; + struct net_device *dev = ivi->ivi_dev->dev; + struct mlxsw_sp *mlxsw_sp; + struct mlxsw_sp_rif *rif; + int err = 0; + mlxsw_sp = mlxsw_sp_lower_get(dev); if (!mlxsw_sp) goto out; @@ -5833,6 +5859,10 @@ int mlxsw_sp_inet6addr_event(struct notifier_block *unused, struct mlxsw_sp_inet6addr_event_work *inet6addr_work; struct net_device *dev = if6->idev->dev; + /* NETDEV_UP event is handled by mlxsw_sp_inet6addr_valid_event */ + if (event == NETDEV_UP) + return NOTIFY_DONE; + if (!mlxsw_sp_port_dev_lower_find_rcu(dev)) return NOTIFY_DONE; @@ -5849,6 +5879,28 @@ int mlxsw_sp_inet6addr_event(struct notifier_block *unused, return NOTIFY_DONE; } +int mlxsw_sp_inet6addr_valid_event(struct notifier_block *unused, + unsigned long event, void *ptr) +{ + struct in6_validator_info *i6vi = (struct in6_validator_info *) ptr; + struct net_device *dev = i6vi->i6vi_dev->dev; + struct mlxsw_sp *mlxsw_sp; + struct mlxsw_sp_rif *rif; + int err = 0; + + mlxsw_sp = mlxsw_sp_lower_get(dev); + if (!mlxsw_sp) + goto out; + + rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev); + if (!mlxsw_sp_rif_should_config(rif, dev, event)) + goto out; + + err = __mlxsw_sp_inetaddr_event(dev, event); +out: + return notifier_from_errno(err); +} + static int mlxsw_sp_rif_edit(struct mlxsw_sp *mlxsw_sp, u16 rif_index, const char *mac, int mtu) { -- cgit v1.2.3 From f8fa9b4e6da77311791c7150a6ecc9368396df3b Mon Sep 17 00:00:00 2001 From: David Ahern Date: Wed, 18 Oct 2017 09:56:56 -0700 Subject: mlxsw: spectrum_router: Add extack message for RIF and VRF overflow Add extack argument down to mlxsw_sp_rif_create and mlxsw_sp_vr_create to set an error message on RIF or VR overflow. Now on overflow of either resource the user gets an informative message as opposed to failing with EBUSY. Signed-off-by: David Ahern Reviewed-by: Ido Schimmel Signed-off-by: David S. Miller --- .../net/ethernet/mellanox/mlxsw/spectrum_router.c | 114 +++++++++++++-------- 1 file changed, 69 insertions(+), 45 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c index 66bab9ce2881..2420f69797a9 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c @@ -731,14 +731,17 @@ static struct mlxsw_sp_fib *mlxsw_sp_vr_fib(const struct mlxsw_sp_vr *vr, } static struct mlxsw_sp_vr *mlxsw_sp_vr_create(struct mlxsw_sp *mlxsw_sp, - u32 tb_id) + u32 tb_id, + struct netlink_ext_ack *extack) { struct mlxsw_sp_vr *vr; int err; vr = mlxsw_sp_vr_find_unused(mlxsw_sp); - if (!vr) + if (!vr) { + NL_SET_ERR_MSG(extack, "spectrum: Exceeded number of supported virtual routers"); return ERR_PTR(-EBUSY); + } vr->fib4 = mlxsw_sp_fib_create(vr, MLXSW_SP_L3_PROTO_IPV4); if (IS_ERR(vr->fib4)) return ERR_CAST(vr->fib4); @@ -775,14 +778,15 @@ static void mlxsw_sp_vr_destroy(struct mlxsw_sp_vr *vr) vr->fib4 = NULL; } -static struct mlxsw_sp_vr *mlxsw_sp_vr_get(struct mlxsw_sp *mlxsw_sp, u32 tb_id) +static struct mlxsw_sp_vr *mlxsw_sp_vr_get(struct mlxsw_sp *mlxsw_sp, u32 tb_id, + struct netlink_ext_ack *extack) { struct mlxsw_sp_vr *vr; tb_id = mlxsw_sp_fix_tb_id(tb_id); vr = mlxsw_sp_vr_find(mlxsw_sp, tb_id); if (!vr) - vr = mlxsw_sp_vr_create(mlxsw_sp, tb_id); + vr = mlxsw_sp_vr_create(mlxsw_sp, tb_id, extack); return vr; } @@ -948,7 +952,8 @@ static u32 mlxsw_sp_ipip_dev_ul_tb_id(const struct net_device *ol_dev) static struct mlxsw_sp_rif * mlxsw_sp_rif_create(struct mlxsw_sp *mlxsw_sp, - const struct mlxsw_sp_rif_params *params); + const struct mlxsw_sp_rif_params *params, + struct netlink_ext_ack *extack); static struct mlxsw_sp_rif_ipip_lb * mlxsw_sp_ipip_ol_ipip_lb_create(struct mlxsw_sp *mlxsw_sp, @@ -966,7 +971,7 @@ mlxsw_sp_ipip_ol_ipip_lb_create(struct mlxsw_sp *mlxsw_sp, .lb_config = ipip_ops->ol_loopback_config(mlxsw_sp, ol_dev), }; - rif = mlxsw_sp_rif_create(mlxsw_sp, &lb_params.common); + rif = mlxsw_sp_rif_create(mlxsw_sp, &lb_params.common, NULL); if (IS_ERR(rif)) return ERR_CAST(rif); return container_of(rif, struct mlxsw_sp_rif_ipip_lb, common); @@ -3836,7 +3841,7 @@ mlxsw_sp_fib_node_get(struct mlxsw_sp *mlxsw_sp, u32 tb_id, const void *addr, struct mlxsw_sp_vr *vr; int err; - vr = mlxsw_sp_vr_get(mlxsw_sp, tb_id); + vr = mlxsw_sp_vr_get(mlxsw_sp, tb_id, NULL); if (IS_ERR(vr)) return ERR_CAST(vr); fib = mlxsw_sp_vr_fib(vr, proto); @@ -4875,7 +4880,7 @@ static int mlxsw_sp_router_fibmr_add(struct mlxsw_sp *mlxsw_sp, if (mlxsw_sp->router->aborted) return 0; - vr = mlxsw_sp_vr_get(mlxsw_sp, men_info->tb_id); + vr = mlxsw_sp_vr_get(mlxsw_sp, men_info->tb_id, NULL); if (IS_ERR(vr)) return PTR_ERR(vr); @@ -4908,7 +4913,7 @@ mlxsw_sp_router_fibmr_vif_add(struct mlxsw_sp *mlxsw_sp, if (mlxsw_sp->router->aborted) return 0; - vr = mlxsw_sp_vr_get(mlxsw_sp, ven_info->tb_id); + vr = mlxsw_sp_vr_get(mlxsw_sp, ven_info->tb_id, NULL); if (IS_ERR(vr)) return PTR_ERR(vr); @@ -5471,7 +5476,8 @@ const struct net_device *mlxsw_sp_rif_dev(const struct mlxsw_sp_rif *rif) static struct mlxsw_sp_rif * mlxsw_sp_rif_create(struct mlxsw_sp *mlxsw_sp, - const struct mlxsw_sp_rif_params *params) + const struct mlxsw_sp_rif_params *params, + struct netlink_ext_ack *extack) { u32 tb_id = l3mdev_fib_table(params->dev); const struct mlxsw_sp_rif_ops *ops; @@ -5485,14 +5491,16 @@ mlxsw_sp_rif_create(struct mlxsw_sp *mlxsw_sp, type = mlxsw_sp_dev_rif_type(mlxsw_sp, params->dev); ops = mlxsw_sp->router->rif_ops_arr[type]; - vr = mlxsw_sp_vr_get(mlxsw_sp, tb_id ? : RT_TABLE_MAIN); + vr = mlxsw_sp_vr_get(mlxsw_sp, tb_id ? : RT_TABLE_MAIN, extack); if (IS_ERR(vr)) return ERR_CAST(vr); vr->rif_count++; err = mlxsw_sp_rif_index_alloc(mlxsw_sp, &rif_index); - if (err) + if (err) { + NL_SET_ERR_MSG(extack, "spectrum: Exceeded number of supported router interfaces"); goto err_rif_index_alloc; + } rif = mlxsw_sp_rif_alloc(ops->rif_size, rif_index, vr->id, params->dev); if (!rif) { @@ -5579,7 +5587,8 @@ mlxsw_sp_rif_subport_params_init(struct mlxsw_sp_rif_params *params, static int mlxsw_sp_port_vlan_router_join(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan, - struct net_device *l3_dev) + struct net_device *l3_dev, + struct netlink_ext_ack *extack) { struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port; struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; @@ -5595,7 +5604,7 @@ mlxsw_sp_port_vlan_router_join(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan, }; mlxsw_sp_rif_subport_params_init(¶ms, mlxsw_sp_port_vlan); - rif = mlxsw_sp_rif_create(mlxsw_sp, ¶ms); + rif = mlxsw_sp_rif_create(mlxsw_sp, ¶ms, extack); if (IS_ERR(rif)) return PTR_ERR(rif); } @@ -5650,7 +5659,8 @@ mlxsw_sp_port_vlan_router_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan) static int mlxsw_sp_inetaddr_port_vlan_event(struct net_device *l3_dev, struct net_device *port_dev, - unsigned long event, u16 vid) + unsigned long event, u16 vid, + struct netlink_ext_ack *extack) { struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(port_dev); struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; @@ -5662,7 +5672,7 @@ static int mlxsw_sp_inetaddr_port_vlan_event(struct net_device *l3_dev, switch (event) { case NETDEV_UP: return mlxsw_sp_port_vlan_router_join(mlxsw_sp_port_vlan, - l3_dev); + l3_dev, extack); case NETDEV_DOWN: mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan); break; @@ -5672,19 +5682,22 @@ static int mlxsw_sp_inetaddr_port_vlan_event(struct net_device *l3_dev, } static int mlxsw_sp_inetaddr_port_event(struct net_device *port_dev, - unsigned long event) + unsigned long event, + struct netlink_ext_ack *extack) { if (netif_is_bridge_port(port_dev) || netif_is_lag_port(port_dev) || netif_is_ovs_port(port_dev)) return 0; - return mlxsw_sp_inetaddr_port_vlan_event(port_dev, port_dev, event, 1); + return mlxsw_sp_inetaddr_port_vlan_event(port_dev, port_dev, event, 1, + extack); } static int __mlxsw_sp_inetaddr_lag_event(struct net_device *l3_dev, struct net_device *lag_dev, - unsigned long event, u16 vid) + unsigned long event, u16 vid, + struct netlink_ext_ack *extack) { struct net_device *port_dev; struct list_head *iter; @@ -5694,7 +5707,8 @@ static int __mlxsw_sp_inetaddr_lag_event(struct net_device *l3_dev, if (mlxsw_sp_port_dev_check(port_dev)) { err = mlxsw_sp_inetaddr_port_vlan_event(l3_dev, port_dev, - event, vid); + event, vid, + extack); if (err) return err; } @@ -5704,16 +5718,19 @@ static int __mlxsw_sp_inetaddr_lag_event(struct net_device *l3_dev, } static int mlxsw_sp_inetaddr_lag_event(struct net_device *lag_dev, - unsigned long event) + unsigned long event, + struct netlink_ext_ack *extack) { if (netif_is_bridge_port(lag_dev)) return 0; - return __mlxsw_sp_inetaddr_lag_event(lag_dev, lag_dev, event, 1); + return __mlxsw_sp_inetaddr_lag_event(lag_dev, lag_dev, event, 1, + extack); } static int mlxsw_sp_inetaddr_bridge_event(struct net_device *l3_dev, - unsigned long event) + unsigned long event, + struct netlink_ext_ack *extack) { struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(l3_dev); struct mlxsw_sp_rif_params params = { @@ -5723,7 +5740,7 @@ static int mlxsw_sp_inetaddr_bridge_event(struct net_device *l3_dev, switch (event) { case NETDEV_UP: - rif = mlxsw_sp_rif_create(mlxsw_sp, ¶ms); + rif = mlxsw_sp_rif_create(mlxsw_sp, ¶ms, extack); if (IS_ERR(rif)) return PTR_ERR(rif); break; @@ -5737,7 +5754,8 @@ static int mlxsw_sp_inetaddr_bridge_event(struct net_device *l3_dev, } static int mlxsw_sp_inetaddr_vlan_event(struct net_device *vlan_dev, - unsigned long event) + unsigned long event, + struct netlink_ext_ack *extack) { struct net_device *real_dev = vlan_dev_real_dev(vlan_dev); u16 vid = vlan_dev_vlan_id(vlan_dev); @@ -5747,27 +5765,28 @@ static int mlxsw_sp_inetaddr_vlan_event(struct net_device *vlan_dev, if (mlxsw_sp_port_dev_check(real_dev)) return mlxsw_sp_inetaddr_port_vlan_event(vlan_dev, real_dev, - event, vid); + event, vid, extack); else if (netif_is_lag_master(real_dev)) return __mlxsw_sp_inetaddr_lag_event(vlan_dev, real_dev, event, - vid); + vid, extack); else if (netif_is_bridge_master(real_dev) && br_vlan_enabled(real_dev)) - return mlxsw_sp_inetaddr_bridge_event(vlan_dev, event); + return mlxsw_sp_inetaddr_bridge_event(vlan_dev, event, extack); return 0; } static int __mlxsw_sp_inetaddr_event(struct net_device *dev, - unsigned long event) + unsigned long event, + struct netlink_ext_ack *extack) { if (mlxsw_sp_port_dev_check(dev)) - return mlxsw_sp_inetaddr_port_event(dev, event); + return mlxsw_sp_inetaddr_port_event(dev, event, extack); else if (netif_is_lag_master(dev)) - return mlxsw_sp_inetaddr_lag_event(dev, event); + return mlxsw_sp_inetaddr_lag_event(dev, event, extack); else if (netif_is_bridge_master(dev)) - return mlxsw_sp_inetaddr_bridge_event(dev, event); + return mlxsw_sp_inetaddr_bridge_event(dev, event, extack); else if (is_vlan_dev(dev)) - return mlxsw_sp_inetaddr_vlan_event(dev, event); + return mlxsw_sp_inetaddr_vlan_event(dev, event, extack); else return 0; } @@ -5793,7 +5812,7 @@ int mlxsw_sp_inetaddr_event(struct notifier_block *unused, if (!mlxsw_sp_rif_should_config(rif, dev, event)) goto out; - err = __mlxsw_sp_inetaddr_event(dev, event); + err = __mlxsw_sp_inetaddr_event(dev, event, NULL); out: return notifier_from_errno(err); } @@ -5815,7 +5834,7 @@ int mlxsw_sp_inetaddr_valid_event(struct notifier_block *unused, if (!mlxsw_sp_rif_should_config(rif, dev, event)) goto out; - err = __mlxsw_sp_inetaddr_event(dev, event); + err = __mlxsw_sp_inetaddr_event(dev, event, ivi->extack); out: return notifier_from_errno(err); } @@ -5844,7 +5863,7 @@ static void mlxsw_sp_inet6addr_event_work(struct work_struct *work) if (!mlxsw_sp_rif_should_config(rif, dev, event)) goto out; - __mlxsw_sp_inetaddr_event(dev, event); + __mlxsw_sp_inetaddr_event(dev, event, NULL); out: rtnl_unlock(); dev_put(dev); @@ -5896,7 +5915,7 @@ int mlxsw_sp_inet6addr_valid_event(struct notifier_block *unused, if (!mlxsw_sp_rif_should_config(rif, dev, event)) goto out; - err = __mlxsw_sp_inetaddr_event(dev, event); + err = __mlxsw_sp_inetaddr_event(dev, event, i6vi->extack); out: return notifier_from_errno(err); } @@ -5973,7 +5992,8 @@ err_rif_edit: } static int mlxsw_sp_port_vrf_join(struct mlxsw_sp *mlxsw_sp, - struct net_device *l3_dev) + struct net_device *l3_dev, + struct netlink_ext_ack *extack) { struct mlxsw_sp_rif *rif; @@ -5982,9 +6002,9 @@ static int mlxsw_sp_port_vrf_join(struct mlxsw_sp *mlxsw_sp, */ rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev); if (rif) - __mlxsw_sp_inetaddr_event(l3_dev, NETDEV_DOWN); + __mlxsw_sp_inetaddr_event(l3_dev, NETDEV_DOWN, extack); - return __mlxsw_sp_inetaddr_event(l3_dev, NETDEV_UP); + return __mlxsw_sp_inetaddr_event(l3_dev, NETDEV_UP, extack); } static void mlxsw_sp_port_vrf_leave(struct mlxsw_sp *mlxsw_sp, @@ -5995,7 +6015,7 @@ static void mlxsw_sp_port_vrf_leave(struct mlxsw_sp *mlxsw_sp, rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev); if (!rif) return; - __mlxsw_sp_inetaddr_event(l3_dev, NETDEV_DOWN); + __mlxsw_sp_inetaddr_event(l3_dev, NETDEV_DOWN, NULL); } int mlxsw_sp_netdevice_vrf_event(struct net_device *l3_dev, unsigned long event, @@ -6011,10 +6031,14 @@ int mlxsw_sp_netdevice_vrf_event(struct net_device *l3_dev, unsigned long event, case NETDEV_PRECHANGEUPPER: return 0; case NETDEV_CHANGEUPPER: - if (info->linking) - err = mlxsw_sp_port_vrf_join(mlxsw_sp, l3_dev); - else + if (info->linking) { + struct netlink_ext_ack *extack; + + extack = netdev_notifier_info_to_extack(&info->info); + err = mlxsw_sp_port_vrf_join(mlxsw_sp, l3_dev, extack); + } else { mlxsw_sp_port_vrf_leave(mlxsw_sp, l3_dev); + } break; } @@ -6321,7 +6345,7 @@ mlxsw_sp_rif_ipip_lb_configure(struct mlxsw_sp_rif *rif) struct mlxsw_sp_vr *ul_vr; int err; - ul_vr = mlxsw_sp_vr_get(mlxsw_sp, ul_tb_id); + ul_vr = mlxsw_sp_vr_get(mlxsw_sp, ul_tb_id, NULL); if (IS_ERR(ul_vr)) return PTR_ERR(ul_vr); -- cgit v1.2.3 From 3c75f9b1b4de0a14f11b7e71a6bdc30b65648f20 Mon Sep 17 00:00:00 2001 From: David Ahern Date: Wed, 18 Oct 2017 15:01:38 -0700 Subject: spectrum: Convert fib event handlers to use container_of on info arg Use container_of to convert the generic fib_notifier_info into the event specific data structure. Signed-off-by: David Ahern Reviewed-by: Ido Schimmel Acked-by: Jiri Pirko Signed-off-by: David S. Miller --- .../net/ethernet/mellanox/mlxsw/spectrum_router.c | 29 +++++++++++++++++----- 1 file changed, 23 insertions(+), 6 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c index 2420f69797a9..12d471d2a90b 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c @@ -5209,25 +5209,35 @@ static void mlxsw_sp_router_fibmr_event_work(struct work_struct *work) static void mlxsw_sp_router_fib4_event(struct mlxsw_sp_fib_event_work *fib_work, struct fib_notifier_info *info) { + struct fib_entry_notifier_info *fen_info; + struct fib_rule_notifier_info *fr_info; + struct fib_nh_notifier_info *fnh_info; + switch (fib_work->event) { case FIB_EVENT_ENTRY_REPLACE: /* fall through */ case FIB_EVENT_ENTRY_APPEND: /* fall through */ case FIB_EVENT_ENTRY_ADD: /* fall through */ case FIB_EVENT_ENTRY_DEL: - memcpy(&fib_work->fen_info, info, sizeof(fib_work->fen_info)); - /* Take referece on fib_info to prevent it from being + fen_info = container_of(info, struct fib_entry_notifier_info, + info); + fib_work->fen_info = *fen_info; + /* Take reference on fib_info to prevent it from being * freed while work is queued. Release it afterwards. */ fib_info_hold(fib_work->fen_info.fi); break; case FIB_EVENT_RULE_ADD: /* fall through */ case FIB_EVENT_RULE_DEL: - memcpy(&fib_work->fr_info, info, sizeof(fib_work->fr_info)); + fr_info = container_of(info, struct fib_rule_notifier_info, + info); + fib_work->fr_info = *fr_info; fib_rule_get(fib_work->fr_info.rule); break; case FIB_EVENT_NH_ADD: /* fall through */ case FIB_EVENT_NH_DEL: - memcpy(&fib_work->fnh_info, info, sizeof(fib_work->fnh_info)); + fnh_info = container_of(info, struct fib_nh_notifier_info, + info); + fib_work->fnh_info = *fnh_info; fib_info_hold(fib_work->fnh_info.fib_nh->nh_parent); break; } @@ -5236,16 +5246,23 @@ static void mlxsw_sp_router_fib4_event(struct mlxsw_sp_fib_event_work *fib_work, static void mlxsw_sp_router_fib6_event(struct mlxsw_sp_fib_event_work *fib_work, struct fib_notifier_info *info) { + struct fib6_entry_notifier_info *fen6_info; + struct fib_rule_notifier_info *fr_info; + switch (fib_work->event) { case FIB_EVENT_ENTRY_REPLACE: /* fall through */ case FIB_EVENT_ENTRY_ADD: /* fall through */ case FIB_EVENT_ENTRY_DEL: - memcpy(&fib_work->fen6_info, info, sizeof(fib_work->fen6_info)); + fen6_info = container_of(info, struct fib6_entry_notifier_info, + info); + fib_work->fen6_info = *fen6_info; rt6_hold(fib_work->fen6_info.rt); break; case FIB_EVENT_RULE_ADD: /* fall through */ case FIB_EVENT_RULE_DEL: - memcpy(&fib_work->fr_info, info, sizeof(fib_work->fr_info)); + fr_info = container_of(info, struct fib_rule_notifier_info, + info); + fib_work->fr_info = *fr_info; fib_rule_get(fib_work->fr_info.rule); break; } -- cgit v1.2.3 From eb49cfaa6bfc164e53db72cde0058ef4768e05a8 Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Thu, 19 Oct 2017 15:50:37 +0200 Subject: mlxsw: spectrum: Convert ndo_setup_tc offloads to block callbacks Benefit from the newly introduced block callback infrastructure and convert ndo_setup_tc calls for matchall and flower offloads to block callbacks. Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/spectrum.c | 82 +++++++++++++++++++------- 1 file changed, 60 insertions(+), 22 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c index e6519f2906a4..7b60c75d96f4 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c @@ -1697,17 +1697,9 @@ static void mlxsw_sp_port_del_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port, } static int mlxsw_sp_setup_tc_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port, - struct tc_cls_matchall_offload *f) + struct tc_cls_matchall_offload *f, + bool ingress) { - bool ingress; - - if (is_classid_clsact_ingress(f->common.classid)) - ingress = true; - else if (is_classid_clsact_egress(f->common.classid)) - ingress = false; - else - return -EOPNOTSUPP; - if (f->common.chain_index) return -EOPNOTSUPP; @@ -1725,17 +1717,9 @@ static int mlxsw_sp_setup_tc_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port, static int mlxsw_sp_setup_tc_cls_flower(struct mlxsw_sp_port *mlxsw_sp_port, - struct tc_cls_flower_offload *f) + struct tc_cls_flower_offload *f, + bool ingress) { - bool ingress; - - if (is_classid_clsact_ingress(f->common.classid)) - ingress = true; - else if (is_classid_clsact_egress(f->common.classid)) - ingress = false; - else - return -EOPNOTSUPP; - switch (f->command) { case TC_CLSFLOWER_REPLACE: return mlxsw_sp_flower_replace(mlxsw_sp_port, ingress, f); @@ -1749,6 +1733,59 @@ mlxsw_sp_setup_tc_cls_flower(struct mlxsw_sp_port *mlxsw_sp_port, } } +static int mlxsw_sp_setup_tc_block_cb(enum tc_setup_type type, void *type_data, + void *cb_priv, bool ingress) +{ + struct mlxsw_sp_port *mlxsw_sp_port = cb_priv; + + switch (type) { + case TC_SETUP_CLSMATCHALL: + return mlxsw_sp_setup_tc_cls_matchall(mlxsw_sp_port, type_data, + ingress); + case TC_SETUP_CLSFLOWER: + return mlxsw_sp_setup_tc_cls_flower(mlxsw_sp_port, type_data, + ingress); + default: + return -EOPNOTSUPP; + } +} + +static int mlxsw_sp_setup_tc_block_cb_ig(enum tc_setup_type type, + void *type_data, void *cb_priv) +{ + return mlxsw_sp_setup_tc_block_cb(type, type_data, cb_priv, true); +} + +static int mlxsw_sp_setup_tc_block_cb_eg(enum tc_setup_type type, + void *type_data, void *cb_priv) +{ + return mlxsw_sp_setup_tc_block_cb(type, type_data, cb_priv, false); +} + +static int mlxsw_sp_setup_tc_block(struct mlxsw_sp_port *mlxsw_sp_port, + struct tc_block_offload *f) +{ + tc_setup_cb_t *cb; + + if (f->binder_type == TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS) + cb = mlxsw_sp_setup_tc_block_cb_ig; + else if (f->binder_type == TCF_BLOCK_BINDER_TYPE_CLSACT_EGRESS) + cb = mlxsw_sp_setup_tc_block_cb_eg; + else + return -EOPNOTSUPP; + + switch (f->command) { + case TC_BLOCK_BIND: + return tcf_block_cb_register(f->block, cb, mlxsw_sp_port, + mlxsw_sp_port); + case TC_BLOCK_UNBIND: + tcf_block_cb_unregister(f->block, cb, mlxsw_sp_port); + return 0; + default: + return -EOPNOTSUPP; + } +} + static int mlxsw_sp_setup_tc(struct net_device *dev, enum tc_setup_type type, void *type_data) { @@ -1756,9 +1793,10 @@ static int mlxsw_sp_setup_tc(struct net_device *dev, enum tc_setup_type type, switch (type) { case TC_SETUP_CLSMATCHALL: - return mlxsw_sp_setup_tc_cls_matchall(mlxsw_sp_port, type_data); case TC_SETUP_CLSFLOWER: - return mlxsw_sp_setup_tc_cls_flower(mlxsw_sp_port, type_data); + return 0; /* will be removed after conversion from ndo */ + case TC_SETUP_BLOCK: + return mlxsw_sp_setup_tc_block(mlxsw_sp_port, type_data); default: return -EOPNOTSUPP; } -- cgit v1.2.3 From d6c862baaf9fb27a385a101c7e86b6112bfd8f07 Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Thu, 19 Oct 2017 15:50:38 +0200 Subject: mlx5e: Convert ndo_setup_tc offloads to block callbacks Benefit from the newly introduced block callback infrastructure and convert ndo_setup_tc calls for flower offloads to block callbacks. Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx5/core/en.h | 4 +- drivers/net/ethernet/mellanox/mlx5/core/en_main.c | 45 ++++++++++++++++++++--- drivers/net/ethernet/mellanox/mlx5/core/en_rep.c | 24 +++++------- 3 files changed, 51 insertions(+), 22 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index ca8845b641c0..e613ce02216d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -1056,8 +1056,8 @@ int mlx5e_ethtool_get_ts_info(struct mlx5e_priv *priv, int mlx5e_ethtool_flash_device(struct mlx5e_priv *priv, struct ethtool_flash *flash); -int mlx5e_setup_tc(struct net_device *dev, enum tc_setup_type type, - void *type_data); +int mlx5e_setup_tc_block_cb(enum tc_setup_type type, void *type_data, + void *cb_priv); /* mlx5e generic netdev management API */ struct net_device* diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 3a1969a6d509..e8108688a7cf 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -3083,13 +3083,10 @@ out: } #ifdef CONFIG_MLX5_ESWITCH -static int mlx5e_setup_tc_cls_flower(struct net_device *dev, +static int mlx5e_setup_tc_cls_flower(struct mlx5e_priv *priv, struct tc_cls_flower_offload *cls_flower) { - struct mlx5e_priv *priv = netdev_priv(dev); - - if (!is_classid_clsact_ingress(cls_flower->common.classid) || - cls_flower->common.chain_index) + if (cls_flower->common.chain_index) return -EOPNOTSUPP; switch (cls_flower->command) { @@ -3103,6 +3100,40 @@ static int mlx5e_setup_tc_cls_flower(struct net_device *dev, return -EOPNOTSUPP; } } + +int mlx5e_setup_tc_block_cb(enum tc_setup_type type, void *type_data, + void *cb_priv) +{ + struct mlx5e_priv *priv = cb_priv; + + switch (type) { + case TC_SETUP_CLSFLOWER: + return mlx5e_setup_tc_cls_flower(priv, type_data); + default: + return -EOPNOTSUPP; + } +} + +static int mlx5e_setup_tc_block(struct net_device *dev, + struct tc_block_offload *f) +{ + struct mlx5e_priv *priv = netdev_priv(dev); + + if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS) + return -EOPNOTSUPP; + + switch (f->command) { + case TC_BLOCK_BIND: + return tcf_block_cb_register(f->block, mlx5e_setup_tc_block_cb, + priv, priv); + case TC_BLOCK_UNBIND: + tcf_block_cb_unregister(f->block, mlx5e_setup_tc_block_cb, + priv); + return 0; + default: + return -EOPNOTSUPP; + } +} #endif int mlx5e_setup_tc(struct net_device *dev, enum tc_setup_type type, @@ -3111,7 +3142,9 @@ int mlx5e_setup_tc(struct net_device *dev, enum tc_setup_type type, switch (type) { #ifdef CONFIG_MLX5_ESWITCH case TC_SETUP_CLSFLOWER: - return mlx5e_setup_tc_cls_flower(dev, type_data); + return 0; /* will be removed after conversion from ndo */ + case TC_SETUP_BLOCK: + return mlx5e_setup_tc_block(dev, type_data); #endif case TC_SETUP_MQPRIO: return mlx5e_setup_tc_mqprio(dev, type_data); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c index 765fc74fbb1b..4edd92d9e1de 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c @@ -691,14 +691,6 @@ static int mlx5e_rep_setup_tc(struct net_device *dev, enum tc_setup_type type, } } -static int mlx5e_rep_setup_tc_cb(enum tc_setup_type type, void *type_data, - void *cb_priv) -{ - struct net_device *dev = cb_priv; - - return mlx5e_setup_tc(dev, type, type_data); -} - bool mlx5e_is_uplink_rep(struct mlx5e_priv *priv) { struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; @@ -987,6 +979,7 @@ mlx5e_vport_rep_load(struct mlx5_eswitch *esw, struct mlx5_eswitch_rep *rep) { struct mlx5e_rep_priv *rpriv; struct net_device *netdev; + struct mlx5e_priv *upriv; int err; rpriv = kzalloc(sizeof(*rpriv), GFP_KERNEL); @@ -1018,8 +1011,9 @@ mlx5e_vport_rep_load(struct mlx5_eswitch *esw, struct mlx5_eswitch_rep *rep) goto err_detach_netdev; } - err = tc_setup_cb_egdev_register(netdev, mlx5e_rep_setup_tc_cb, - mlx5_eswitch_get_uplink_netdev(esw)); + upriv = netdev_priv(mlx5_eswitch_get_uplink_netdev(esw)); + err = tc_setup_cb_egdev_register(netdev, mlx5e_setup_tc_block_cb, + upriv); if (err) goto err_neigh_cleanup; @@ -1033,8 +1027,8 @@ mlx5e_vport_rep_load(struct mlx5_eswitch *esw, struct mlx5_eswitch_rep *rep) return 0; err_egdev_cleanup: - tc_setup_cb_egdev_unregister(netdev, mlx5e_rep_setup_tc_cb, - mlx5_eswitch_get_uplink_netdev(esw)); + tc_setup_cb_egdev_unregister(netdev, mlx5e_setup_tc_block_cb, + upriv); err_neigh_cleanup: mlx5e_rep_neigh_cleanup(rpriv); @@ -1055,10 +1049,12 @@ mlx5e_vport_rep_unload(struct mlx5_eswitch *esw, struct mlx5_eswitch_rep *rep) struct mlx5e_priv *priv = netdev_priv(netdev); struct mlx5e_rep_priv *rpriv = priv->ppriv; void *ppriv = priv->ppriv; + struct mlx5e_priv *upriv; unregister_netdev(rep->netdev); - tc_setup_cb_egdev_unregister(netdev, mlx5e_rep_setup_tc_cb, - mlx5_eswitch_get_uplink_netdev(esw)); + upriv = netdev_priv(mlx5_eswitch_get_uplink_netdev(esw)); + tc_setup_cb_egdev_unregister(netdev, mlx5e_setup_tc_block_cb, + upriv); mlx5e_rep_neigh_cleanup(rpriv); mlx5e_detach_netdev(priv); mlx5e_destroy_netdev(priv); -- cgit v1.2.3 From 9e0fd15dd6c981931a9e9f11dc0c940d17d6e051 Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Thu, 19 Oct 2017 15:50:39 +0200 Subject: bnxt: Convert ndo_setup_tc offloads to block callbacks Benefit from the newly introduced block callback infrastructure and convert ndo_setup_tc calls for flower offloads to block callbacks. Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnxt/bnxt.c | 37 +++++++++++++++++++---- drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c | 3 +- drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c | 43 +++++++++++++++++++++++++-- 3 files changed, 73 insertions(+), 10 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index 5ba49938ba55..4dde2b816092 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -7295,15 +7295,40 @@ int bnxt_setup_mq_tc(struct net_device *dev, u8 tc) return 0; } -static int bnxt_setup_flower(struct net_device *dev, - struct tc_cls_flower_offload *cls_flower) +static int bnxt_setup_tc_block_cb(enum tc_setup_type type, void *type_data, + void *cb_priv) { - struct bnxt *bp = netdev_priv(dev); + struct bnxt *bp = cb_priv; if (BNXT_VF(bp)) return -EOPNOTSUPP; - return bnxt_tc_setup_flower(bp, bp->pf.fw_fid, cls_flower); + switch (type) { + case TC_SETUP_CLSFLOWER: + return bnxt_tc_setup_flower(bp, bp->pf.fw_fid, type_data); + default: + return -EOPNOTSUPP; + } +} + +static int bnxt_setup_tc_block(struct net_device *dev, + struct tc_block_offload *f) +{ + struct bnxt *bp = netdev_priv(dev); + + if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS) + return -EOPNOTSUPP; + + switch (f->command) { + case TC_BLOCK_BIND: + return tcf_block_cb_register(f->block, bnxt_setup_tc_block_cb, + bp, bp); + case TC_BLOCK_UNBIND: + tcf_block_cb_unregister(f->block, bnxt_setup_tc_block_cb, bp); + return 0; + default: + return -EOPNOTSUPP; + } } static int bnxt_setup_tc(struct net_device *dev, enum tc_setup_type type, @@ -7311,7 +7336,9 @@ static int bnxt_setup_tc(struct net_device *dev, enum tc_setup_type type, { switch (type) { case TC_SETUP_CLSFLOWER: - return bnxt_setup_flower(dev, type_data); + return 0; /* will be removed after conversion from ndo */ + case TC_SETUP_BLOCK: + return bnxt_setup_tc_block(dev, type_data); case TC_SETUP_MQPRIO: { struct tc_mqprio_qopt *mqprio = type_data; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c index 4730c048ed9b..a9cb653b4d29 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c @@ -748,8 +748,7 @@ int bnxt_tc_setup_flower(struct bnxt *bp, u16 src_fid, { int rc = 0; - if (!is_classid_clsact_ingress(cls_flower->common.classid) || - cls_flower->common.chain_index) + if (cls_flower->common.chain_index) return -EOPNOTSUPP; switch (cls_flower->command) { diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c index e75db04c6cdc..cc278d7b56a4 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c @@ -115,10 +115,11 @@ bnxt_vf_rep_get_stats64(struct net_device *dev, stats->tx_bytes = vf_rep->tx_stats.bytes; } -static int bnxt_vf_rep_setup_tc(struct net_device *dev, enum tc_setup_type type, - void *type_data) +static int bnxt_vf_rep_setup_tc_block_cb(enum tc_setup_type type, + void *type_data, + void *cb_priv) { - struct bnxt_vf_rep *vf_rep = netdev_priv(dev); + struct bnxt_vf_rep *vf_rep = cb_priv; struct bnxt *bp = vf_rep->bp; int vf_fid = bp->pf.vf[vf_rep->vf_idx].fw_fid; @@ -130,6 +131,42 @@ static int bnxt_vf_rep_setup_tc(struct net_device *dev, enum tc_setup_type type, } } +static int bnxt_vf_rep_setup_tc_block(struct net_device *dev, + struct tc_block_offload *f) +{ + struct bnxt_vf_rep *vf_rep = netdev_priv(dev); + + if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS) + return -EOPNOTSUPP; + + switch (f->command) { + case TC_BLOCK_BIND: + return tcf_block_cb_register(f->block, + bnxt_vf_rep_setup_tc_block_cb, + vf_rep, vf_rep); + return 0; + case TC_BLOCK_UNBIND: + tcf_block_cb_unregister(f->block, + bnxt_vf_rep_setup_tc_block_cb, vf_rep); + return 0; + default: + return -EOPNOTSUPP; + } +} + +static int bnxt_vf_rep_setup_tc(struct net_device *dev, enum tc_setup_type type, + void *type_data) +{ + switch (type) { + case TC_SETUP_CLSFLOWER: + return 0; /* will be removed after conversion from ndo */ + case TC_SETUP_BLOCK: + return bnxt_vf_rep_setup_tc_block(dev, type_data); + default: + return -EOPNOTSUPP; + } +} + struct net_device *bnxt_get_vf_rep(struct bnxt *bp, u16 cfa_code) { u16 vf_idx; -- cgit v1.2.3 From cd019e91a837d83f599e0216794bdf1ad6fec053 Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Thu, 19 Oct 2017 15:50:40 +0200 Subject: cxgb4: Convert ndo_setup_tc offloads to block callbacks Benefit from the newly introduced block callback infrastructure and convert ndo_setup_tc calls for flower and u32 offloads to block callbacks. Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c | 45 +++++++++++++++++++++---- 1 file changed, 39 insertions(+), 6 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index c478291db93f..3cd82aea3bb6 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c @@ -2889,8 +2889,7 @@ static int cxgb_set_tx_maxrate(struct net_device *dev, int index, u32 rate) static int cxgb_setup_tc_flower(struct net_device *dev, struct tc_cls_flower_offload *cls_flower) { - if (!is_classid_clsact_ingress(cls_flower->common.classid) || - cls_flower->common.chain_index) + if (cls_flower->common.chain_index) return -EOPNOTSUPP; switch (cls_flower->command) { @@ -2908,8 +2907,7 @@ static int cxgb_setup_tc_flower(struct net_device *dev, static int cxgb_setup_tc_cls_u32(struct net_device *dev, struct tc_cls_u32_offload *cls_u32) { - if (!is_classid_clsact_ingress(cls_u32->common.classid) || - cls_u32->common.chain_index) + if (cls_u32->common.chain_index) return -EOPNOTSUPP; switch (cls_u32->command) { @@ -2923,9 +2921,10 @@ static int cxgb_setup_tc_cls_u32(struct net_device *dev, } } -static int cxgb_setup_tc(struct net_device *dev, enum tc_setup_type type, - void *type_data) +static int cxgb_setup_tc_block_cb(enum tc_setup_type type, void *type_data, + void *cb_priv) { + struct net_device *dev = cb_priv; struct port_info *pi = netdev2pinfo(dev); struct adapter *adap = netdev2adap(dev); @@ -2946,6 +2945,40 @@ static int cxgb_setup_tc(struct net_device *dev, enum tc_setup_type type, } } +static int cxgb_setup_tc_block(struct net_device *dev, + struct tc_block_offload *f) +{ + struct port_info *pi = netdev2pinfo(dev); + + if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS) + return -EOPNOTSUPP; + + switch (f->command) { + case TC_BLOCK_BIND: + return tcf_block_cb_register(f->block, cxgb_setup_tc_block_cb, + pi, dev); + case TC_BLOCK_UNBIND: + tcf_block_cb_unregister(f->block, cxgb_setup_tc_block_cb, pi); + return 0; + default: + return -EOPNOTSUPP; + } +} + +static int cxgb_setup_tc(struct net_device *dev, enum tc_setup_type type, + void *type_data) +{ + switch (type) { + case TC_SETUP_CLSU32: + case TC_SETUP_CLSFLOWER: + return 0; /* will be removed after conversion from ndo */ + case TC_SETUP_BLOCK: + return cxgb_setup_tc_block(dev, type_data); + default: + return -EOPNOTSUPP; + } +} + static netdev_features_t cxgb_fix_features(struct net_device *dev, netdev_features_t features) { -- cgit v1.2.3 From 6ea30f8a972c063c1072d7e7accc6a550a36770a Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Thu, 19 Oct 2017 15:50:41 +0200 Subject: ixgbe: Convert ndo_setup_tc offloads to block callbacks Benefit from the newly introduced block callback infrastructure and convert ndo_setup_tc calls for u32 offloads to block callbacks. Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | 45 +++++++++++++++++++++++---- 1 file changed, 39 insertions(+), 6 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 3e83edd10e23..38e01e0c8314 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -9365,13 +9365,10 @@ free_jump: return err; } -static int ixgbe_setup_tc_cls_u32(struct net_device *dev, +static int ixgbe_setup_tc_cls_u32(struct ixgbe_adapter *adapter, struct tc_cls_u32_offload *cls_u32) { - struct ixgbe_adapter *adapter = netdev_priv(dev); - - if (!is_classid_clsact_ingress(cls_u32->common.classid) || - cls_u32->common.chain_index) + if (cls_u32->common.chain_index) return -EOPNOTSUPP; switch (cls_u32->command) { @@ -9390,6 +9387,40 @@ static int ixgbe_setup_tc_cls_u32(struct net_device *dev, } } +static int ixgbe_setup_tc_block_cb(enum tc_setup_type type, void *type_data, + void *cb_priv) +{ + struct ixgbe_adapter *adapter = cb_priv; + + switch (type) { + case TC_SETUP_CLSU32: + return ixgbe_setup_tc_cls_u32(adapter, type_data); + default: + return -EOPNOTSUPP; + } +} + +static int ixgbe_setup_tc_block(struct net_device *dev, + struct tc_block_offload *f) +{ + struct ixgbe_adapter *adapter = netdev_priv(dev); + + if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS) + return -EOPNOTSUPP; + + switch (f->command) { + case TC_BLOCK_BIND: + return tcf_block_cb_register(f->block, ixgbe_setup_tc_block_cb, + adapter, adapter); + case TC_BLOCK_UNBIND: + tcf_block_cb_unregister(f->block, ixgbe_setup_tc_block_cb, + adapter); + return 0; + default: + return -EOPNOTSUPP; + } +} + static int ixgbe_setup_tc_mqprio(struct net_device *dev, struct tc_mqprio_qopt *mqprio) { @@ -9402,7 +9433,9 @@ static int __ixgbe_setup_tc(struct net_device *dev, enum tc_setup_type type, { switch (type) { case TC_SETUP_CLSU32: - return ixgbe_setup_tc_cls_u32(dev, type_data); + return 0; /* will be removed after conversion from ndo */ + case TC_SETUP_BLOCK: + return ixgbe_setup_tc_block(dev, type_data); case TC_SETUP_MQPRIO: return ixgbe_setup_tc_mqprio(dev, type_data); default: -- cgit v1.2.3 From 855afa0932086851e6c7df39ac3d2cd2247cc8ee Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Thu, 19 Oct 2017 15:50:42 +0200 Subject: mlx5e_rep: Convert ndo_setup_tc offloads to block callbacks Benefit from the newly introduced block callback infrastructure and convert ndo_setup_tc calls for flower offloads to block callbacks. Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx5/core/en_rep.c | 44 ++++++++++++++++++++---- 1 file changed, 38 insertions(+), 6 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c index 4edd92d9e1de..f59d81aa8a0c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c @@ -659,13 +659,10 @@ static int mlx5e_rep_get_phys_port_name(struct net_device *dev, } static int -mlx5e_rep_setup_tc_cls_flower(struct net_device *dev, +mlx5e_rep_setup_tc_cls_flower(struct mlx5e_priv *priv, struct tc_cls_flower_offload *cls_flower) { - struct mlx5e_priv *priv = netdev_priv(dev); - - if (!is_classid_clsact_ingress(cls_flower->common.classid) || - cls_flower->common.chain_index) + if (cls_flower->common.chain_index) return -EOPNOTSUPP; switch (cls_flower->command) { @@ -680,12 +677,47 @@ mlx5e_rep_setup_tc_cls_flower(struct net_device *dev, } } +static int mlx5e_rep_setup_tc_cb(enum tc_setup_type type, void *type_data, + void *cb_priv) +{ + struct mlx5e_priv *priv = cb_priv; + + switch (type) { + case TC_SETUP_CLSFLOWER: + return mlx5e_rep_setup_tc_cls_flower(priv, type_data); + default: + return -EOPNOTSUPP; + } +} + +static int mlx5e_rep_setup_tc_block(struct net_device *dev, + struct tc_block_offload *f) +{ + struct mlx5e_priv *priv = netdev_priv(dev); + + if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS) + return -EOPNOTSUPP; + + switch (f->command) { + case TC_BLOCK_BIND: + return tcf_block_cb_register(f->block, mlx5e_rep_setup_tc_cb, + priv, priv); + case TC_BLOCK_UNBIND: + tcf_block_cb_unregister(f->block, mlx5e_rep_setup_tc_cb, priv); + return 0; + default: + return -EOPNOTSUPP; + } +} + static int mlx5e_rep_setup_tc(struct net_device *dev, enum tc_setup_type type, void *type_data) { switch (type) { case TC_SETUP_CLSFLOWER: - return mlx5e_rep_setup_tc_cls_flower(dev, type_data); + return 0; /* will be removed after conversion from ndo */ + case TC_SETUP_BLOCK: + return mlx5e_rep_setup_tc_block(dev, type_data); default: return -EOPNOTSUPP; } -- cgit v1.2.3 From 363fc53b8b5803ce43debc1e66cebaaf509fd20a Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Thu, 19 Oct 2017 15:50:43 +0200 Subject: nfp: flower: Convert ndo_setup_tc offloads to block callbacks Benefit from the newly introduced block callback infrastructure and convert ndo_setup_tc calls for flower offloads to block callbacks. Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- .../net/ethernet/netronome/nfp/flower/offload.c | 56 ++++++++++++++++++---- 1 file changed, 48 insertions(+), 8 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/netronome/nfp/flower/offload.c b/drivers/net/ethernet/netronome/nfp/flower/offload.c index 6f239c27964e..f8523df827a6 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/offload.c +++ b/drivers/net/ethernet/netronome/nfp/flower/offload.c @@ -449,6 +449,10 @@ static int nfp_flower_repr_offload(struct nfp_app *app, struct net_device *netdev, struct tc_cls_flower_offload *flower) { + if (!eth_proto_is_802_3(flower->common.protocol) || + flower->common.chain_index) + return -EOPNOTSUPP; + switch (flower->command) { case TC_CLSFLOWER_REPLACE: return nfp_flower_add_offload(app, netdev, flower); @@ -461,16 +465,52 @@ nfp_flower_repr_offload(struct nfp_app *app, struct net_device *netdev, return -EOPNOTSUPP; } -int nfp_flower_setup_tc(struct nfp_app *app, struct net_device *netdev, - enum tc_setup_type type, void *type_data) +static int nfp_flower_setup_tc_block_cb(enum tc_setup_type type, + void *type_data, void *cb_priv) +{ + struct nfp_net *nn = cb_priv; + + switch (type) { + case TC_SETUP_CLSFLOWER: + return nfp_flower_repr_offload(nn->app, nn->port->netdev, + type_data); + default: + return -EOPNOTSUPP; + } +} + +static int nfp_flower_setup_tc_block(struct net_device *netdev, + struct tc_block_offload *f) { - struct tc_cls_flower_offload *cls_flower = type_data; + struct nfp_net *nn = netdev_priv(netdev); - if (type != TC_SETUP_CLSFLOWER || - !is_classid_clsact_ingress(cls_flower->common.classid) || - !eth_proto_is_802_3(cls_flower->common.protocol) || - cls_flower->common.chain_index) + if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS) return -EOPNOTSUPP; - return nfp_flower_repr_offload(app, netdev, cls_flower); + switch (f->command) { + case TC_BLOCK_BIND: + return tcf_block_cb_register(f->block, + nfp_flower_setup_tc_block_cb, + nn, nn); + case TC_BLOCK_UNBIND: + tcf_block_cb_unregister(f->block, + nfp_flower_setup_tc_block_cb, + nn); + return 0; + default: + return -EOPNOTSUPP; + } +} + +int nfp_flower_setup_tc(struct nfp_app *app, struct net_device *netdev, + enum tc_setup_type type, void *type_data) +{ + switch (type) { + case TC_SETUP_CLSFLOWER: + return 0; /* will be removed after conversion from ndo */ + case TC_SETUP_BLOCK: + return nfp_flower_setup_tc_block(netdev, type_data); + default: + return -EOPNOTSUPP; + } } -- cgit v1.2.3 From 90d97315b3e774450f06c035903fed246781fe35 Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Thu, 19 Oct 2017 15:50:44 +0200 Subject: nfp: bpf: Convert ndo_setup_tc offloads to block callbacks Benefit from the newly introduced block callback infrastructure and convert ndo_setup_tc calls for bpf offloads to block callbacks. Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- drivers/net/ethernet/netronome/nfp/bpf/main.c | 54 ++++++++++++++++++++++----- 1 file changed, 45 insertions(+), 9 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/netronome/nfp/bpf/main.c b/drivers/net/ethernet/netronome/nfp/bpf/main.c index 6e74f8db1cc1..64f97b3f8949 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/main.c +++ b/drivers/net/ethernet/netronome/nfp/bpf/main.c @@ -114,22 +114,58 @@ static void nfp_bpf_vnic_free(struct nfp_app *app, struct nfp_net *nn) kfree(nn->app_priv); } -static int nfp_bpf_setup_tc(struct nfp_app *app, struct net_device *netdev, - enum tc_setup_type type, void *type_data) +static int nfp_bpf_setup_tc_block_cb(enum tc_setup_type type, + void *type_data, void *cb_priv) { struct tc_cls_bpf_offload *cls_bpf = type_data; + struct nfp_net *nn = cb_priv; + + switch (type) { + case TC_SETUP_CLSBPF: + if (!nfp_net_ebpf_capable(nn) || + cls_bpf->common.protocol != htons(ETH_P_ALL) || + cls_bpf->common.chain_index) + return -EOPNOTSUPP; + return nfp_net_bpf_offload(nn, cls_bpf); + default: + return -EOPNOTSUPP; + } +} + +static int nfp_bpf_setup_tc_block(struct net_device *netdev, + struct tc_block_offload *f) +{ struct nfp_net *nn = netdev_priv(netdev); - if (type != TC_SETUP_CLSBPF || !nfp_net_ebpf_capable(nn) || - !is_classid_clsact_ingress(cls_bpf->common.classid) || - cls_bpf->common.protocol != htons(ETH_P_ALL) || - cls_bpf->common.chain_index) + if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS) return -EOPNOTSUPP; - if (nn->dp.bpf_offload_xdp) - return -EBUSY; + switch (f->command) { + case TC_BLOCK_BIND: + return tcf_block_cb_register(f->block, + nfp_bpf_setup_tc_block_cb, + nn, nn); + case TC_BLOCK_UNBIND: + tcf_block_cb_unregister(f->block, + nfp_bpf_setup_tc_block_cb, + nn); + return 0; + default: + return -EOPNOTSUPP; + } +} - return nfp_net_bpf_offload(nn, cls_bpf); +static int nfp_bpf_setup_tc(struct nfp_app *app, struct net_device *netdev, + enum tc_setup_type type, void *type_data) +{ + switch (type) { + case TC_SETUP_CLSBPF: + return 0; /* will be removed after conversion from ndo */ + case TC_SETUP_BLOCK: + return nfp_bpf_setup_tc_block(netdev, type_data); + default: + return -EOPNOTSUPP; + } } static bool nfp_bpf_tc_busy(struct nfp_app *app, struct nfp_net *nn) -- cgit v1.2.3 From 8d26d5636dff9fca30816579910aaa9a55b4d96d Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Thu, 19 Oct 2017 15:50:46 +0200 Subject: net: sched: avoid ndo_setup_tc calls for TC_SETUP_CLS* All drivers are converted to use block callbacks for TC_SETUP_CLS*. So it is now safe to remove the calls to ndo_setup_tc from cls_* Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnxt/bnxt.c | 2 -- drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c | 2 -- drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c | 3 --- drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | 2 -- drivers/net/ethernet/mellanox/mlx5/core/en_main.c | 2 -- drivers/net/ethernet/mellanox/mlx5/core/en_rep.c | 2 -- drivers/net/ethernet/mellanox/mlxsw/spectrum.c | 3 --- drivers/net/ethernet/netronome/nfp/bpf/main.c | 2 -- .../net/ethernet/netronome/nfp/flower/offload.c | 2 -- net/dsa/slave.c | 2 -- net/sched/cls_bpf.c | 14 ---------- net/sched/cls_flower.c | 20 -------------- net/sched/cls_matchall.c | 16 ----------- net/sched/cls_u32.c | 31 ---------------------- 14 files changed, 103 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index 4dde2b816092..22a94b16ebfa 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -7335,8 +7335,6 @@ static int bnxt_setup_tc(struct net_device *dev, enum tc_setup_type type, void *type_data) { switch (type) { - case TC_SETUP_CLSFLOWER: - return 0; /* will be removed after conversion from ndo */ case TC_SETUP_BLOCK: return bnxt_setup_tc_block(dev, type_data); case TC_SETUP_MQPRIO: { diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c index cc278d7b56a4..6dff5aa57f16 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c @@ -158,8 +158,6 @@ static int bnxt_vf_rep_setup_tc(struct net_device *dev, enum tc_setup_type type, void *type_data) { switch (type) { - case TC_SETUP_CLSFLOWER: - return 0; /* will be removed after conversion from ndo */ case TC_SETUP_BLOCK: return bnxt_vf_rep_setup_tc_block(dev, type_data); default: diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index 3cd82aea3bb6..e16078ddb39f 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c @@ -2969,9 +2969,6 @@ static int cxgb_setup_tc(struct net_device *dev, enum tc_setup_type type, void *type_data) { switch (type) { - case TC_SETUP_CLSU32: - case TC_SETUP_CLSFLOWER: - return 0; /* will be removed after conversion from ndo */ case TC_SETUP_BLOCK: return cxgb_setup_tc_block(dev, type_data); default: diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 38e01e0c8314..7f503d35eb1c 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -9432,8 +9432,6 @@ static int __ixgbe_setup_tc(struct net_device *dev, enum tc_setup_type type, void *type_data) { switch (type) { - case TC_SETUP_CLSU32: - return 0; /* will be removed after conversion from ndo */ case TC_SETUP_BLOCK: return ixgbe_setup_tc_block(dev, type_data); case TC_SETUP_MQPRIO: diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index e8108688a7cf..560b208c0483 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -3141,8 +3141,6 @@ int mlx5e_setup_tc(struct net_device *dev, enum tc_setup_type type, { switch (type) { #ifdef CONFIG_MLX5_ESWITCH - case TC_SETUP_CLSFLOWER: - return 0; /* will be removed after conversion from ndo */ case TC_SETUP_BLOCK: return mlx5e_setup_tc_block(dev, type_data); #endif diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c index f59d81aa8a0c..0edb7065d811 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c @@ -714,8 +714,6 @@ static int mlx5e_rep_setup_tc(struct net_device *dev, enum tc_setup_type type, void *type_data) { switch (type) { - case TC_SETUP_CLSFLOWER: - return 0; /* will be removed after conversion from ndo */ case TC_SETUP_BLOCK: return mlx5e_rep_setup_tc_block(dev, type_data); default: diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c index 7b60c75d96f4..4d73a6f7759e 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c @@ -1792,9 +1792,6 @@ static int mlxsw_sp_setup_tc(struct net_device *dev, enum tc_setup_type type, struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); switch (type) { - case TC_SETUP_CLSMATCHALL: - case TC_SETUP_CLSFLOWER: - return 0; /* will be removed after conversion from ndo */ case TC_SETUP_BLOCK: return mlxsw_sp_setup_tc_block(mlxsw_sp_port, type_data); default: diff --git a/drivers/net/ethernet/netronome/nfp/bpf/main.c b/drivers/net/ethernet/netronome/nfp/bpf/main.c index 64f97b3f8949..fa0ac90ed956 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/main.c +++ b/drivers/net/ethernet/netronome/nfp/bpf/main.c @@ -159,8 +159,6 @@ static int nfp_bpf_setup_tc(struct nfp_app *app, struct net_device *netdev, enum tc_setup_type type, void *type_data) { switch (type) { - case TC_SETUP_CLSBPF: - return 0; /* will be removed after conversion from ndo */ case TC_SETUP_BLOCK: return nfp_bpf_setup_tc_block(netdev, type_data); default: diff --git a/drivers/net/ethernet/netronome/nfp/flower/offload.c b/drivers/net/ethernet/netronome/nfp/flower/offload.c index f8523df827a6..c47753fdb55b 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/offload.c +++ b/drivers/net/ethernet/netronome/nfp/flower/offload.c @@ -506,8 +506,6 @@ int nfp_flower_setup_tc(struct nfp_app *app, struct net_device *netdev, enum tc_setup_type type, void *type_data) { switch (type) { - case TC_SETUP_CLSFLOWER: - return 0; /* will be removed after conversion from ndo */ case TC_SETUP_BLOCK: return nfp_flower_setup_tc_block(netdev, type_data); default: diff --git a/net/dsa/slave.c b/net/dsa/slave.c index 80142918d5d1..d0ae7010ea45 100644 --- a/net/dsa/slave.c +++ b/net/dsa/slave.c @@ -846,8 +846,6 @@ static int dsa_slave_setup_tc(struct net_device *dev, enum tc_setup_type type, void *type_data) { switch (type) { - case TC_SETUP_CLSMATCHALL: - return 0; /* will be removed after conversion from ndo */ case TC_SETUP_BLOCK: return dsa_slave_setup_tc_block(dev, type_data); default: diff --git a/net/sched/cls_bpf.c b/net/sched/cls_bpf.c index e379fdf928bd..0f8b51061c39 100644 --- a/net/sched/cls_bpf.c +++ b/net/sched/cls_bpf.c @@ -148,7 +148,6 @@ static int cls_bpf_offload_cmd(struct tcf_proto *tp, struct cls_bpf_prog *prog, enum tc_clsbpf_command cmd) { bool addorrep = cmd == TC_CLSBPF_ADD || cmd == TC_CLSBPF_REPLACE; - struct net_device *dev = tp->q->dev_queue->dev; struct tcf_block *block = tp->chain->block; bool skip_sw = tc_skip_sw(prog->gen_flags); struct tc_cls_bpf_offload cls_bpf = {}; @@ -162,19 +161,6 @@ static int cls_bpf_offload_cmd(struct tcf_proto *tp, struct cls_bpf_prog *prog, cls_bpf.exts_integrated = prog->exts_integrated; cls_bpf.gen_flags = prog->gen_flags; - if (tc_can_offload(dev)) { - err = dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_CLSBPF, - &cls_bpf); - if (addorrep) { - if (err) { - if (skip_sw) - return err; - } else { - prog->gen_flags |= TCA_CLS_FLAGS_IN_HW; - } - } - } - err = tc_setup_cb_call(block, NULL, TC_SETUP_CLSBPF, &cls_bpf, skip_sw); if (addorrep) { if (err < 0) { diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c index 76b4e0a1c92f..16f58abaa697 100644 --- a/net/sched/cls_flower.c +++ b/net/sched/cls_flower.c @@ -200,16 +200,12 @@ static void fl_destroy_filter(struct rcu_head *head) static void fl_hw_destroy_filter(struct tcf_proto *tp, struct cls_fl_filter *f) { struct tc_cls_flower_offload cls_flower = {}; - struct net_device *dev = tp->q->dev_queue->dev; struct tcf_block *block = tp->chain->block; tc_cls_common_offload_init(&cls_flower.common, tp); cls_flower.command = TC_CLSFLOWER_DESTROY; cls_flower.cookie = (unsigned long) f; - if (tc_can_offload(dev)) - dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_CLSFLOWER, - &cls_flower); tc_setup_cb_call(block, &f->exts, TC_SETUP_CLSFLOWER, &cls_flower, false); } @@ -219,7 +215,6 @@ static int fl_hw_replace_filter(struct tcf_proto *tp, struct fl_flow_key *mask, struct cls_fl_filter *f) { - struct net_device *dev = tp->q->dev_queue->dev; struct tc_cls_flower_offload cls_flower = {}; struct tcf_block *block = tp->chain->block; bool skip_sw = tc_skip_sw(f->flags); @@ -233,17 +228,6 @@ static int fl_hw_replace_filter(struct tcf_proto *tp, cls_flower.key = &f->mkey; cls_flower.exts = &f->exts; - if (tc_can_offload(dev)) { - err = dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_CLSFLOWER, - &cls_flower); - if (err) { - if (skip_sw) - return err; - } else { - f->flags |= TCA_CLS_FLAGS_IN_HW; - } - } - err = tc_setup_cb_call(block, &f->exts, TC_SETUP_CLSFLOWER, &cls_flower, skip_sw); if (err < 0) { @@ -262,7 +246,6 @@ static int fl_hw_replace_filter(struct tcf_proto *tp, static void fl_hw_update_stats(struct tcf_proto *tp, struct cls_fl_filter *f) { struct tc_cls_flower_offload cls_flower = {}; - struct net_device *dev = tp->q->dev_queue->dev; struct tcf_block *block = tp->chain->block; tc_cls_common_offload_init(&cls_flower.common, tp); @@ -270,9 +253,6 @@ static void fl_hw_update_stats(struct tcf_proto *tp, struct cls_fl_filter *f) cls_flower.cookie = (unsigned long) f; cls_flower.exts = &f->exts; - if (tc_can_offload(dev)) - dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_CLSFLOWER, - &cls_flower); tc_setup_cb_call(block, &f->exts, TC_SETUP_CLSFLOWER, &cls_flower, false); } diff --git a/net/sched/cls_matchall.c b/net/sched/cls_matchall.c index 5278534c7e87..70e78d74f6d3 100644 --- a/net/sched/cls_matchall.c +++ b/net/sched/cls_matchall.c @@ -54,7 +54,6 @@ static void mall_destroy_hw_filter(struct tcf_proto *tp, struct cls_mall_head *head, unsigned long cookie) { - struct net_device *dev = tp->q->dev_queue->dev; struct tc_cls_matchall_offload cls_mall = {}; struct tcf_block *block = tp->chain->block; @@ -62,9 +61,6 @@ static void mall_destroy_hw_filter(struct tcf_proto *tp, cls_mall.command = TC_CLSMATCHALL_DESTROY; cls_mall.cookie = cookie; - if (tc_can_offload(dev)) - dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_CLSMATCHALL, - &cls_mall); tc_setup_cb_call(block, NULL, TC_SETUP_CLSMATCHALL, &cls_mall, false); } @@ -72,7 +68,6 @@ static int mall_replace_hw_filter(struct tcf_proto *tp, struct cls_mall_head *head, unsigned long cookie) { - struct net_device *dev = tp->q->dev_queue->dev; struct tc_cls_matchall_offload cls_mall = {}; struct tcf_block *block = tp->chain->block; bool skip_sw = tc_skip_sw(head->flags); @@ -83,17 +78,6 @@ static int mall_replace_hw_filter(struct tcf_proto *tp, cls_mall.exts = &head->exts; cls_mall.cookie = cookie; - if (tc_can_offload(dev)) { - err = dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_CLSMATCHALL, - &cls_mall); - if (err) { - if (skip_sw) - return err; - } else { - head->flags |= TCA_CLS_FLAGS_IN_HW; - } - } - err = tc_setup_cb_call(block, NULL, TC_SETUP_CLSMATCHALL, &cls_mall, skip_sw); if (err < 0) { diff --git a/net/sched/cls_u32.c b/net/sched/cls_u32.c index d53da7968eda..9ff17159fb61 100644 --- a/net/sched/cls_u32.c +++ b/net/sched/cls_u32.c @@ -464,7 +464,6 @@ static int u32_delete_key(struct tcf_proto *tp, struct tc_u_knode *key) static void u32_clear_hw_hnode(struct tcf_proto *tp, struct tc_u_hnode *h) { - struct net_device *dev = tp->q->dev_queue->dev; struct tcf_block *block = tp->chain->block; struct tc_cls_u32_offload cls_u32 = {}; @@ -474,15 +473,12 @@ static void u32_clear_hw_hnode(struct tcf_proto *tp, struct tc_u_hnode *h) cls_u32.hnode.handle = h->handle; cls_u32.hnode.prio = h->prio; - if (tc_can_offload(dev)) - dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_CLSU32, &cls_u32); tc_setup_cb_call(block, NULL, TC_SETUP_CLSU32, &cls_u32, false); } static int u32_replace_hw_hnode(struct tcf_proto *tp, struct tc_u_hnode *h, u32 flags) { - struct net_device *dev = tp->q->dev_queue->dev; struct tcf_block *block = tp->chain->block; struct tc_cls_u32_offload cls_u32 = {}; bool skip_sw = tc_skip_sw(flags); @@ -495,17 +491,6 @@ static int u32_replace_hw_hnode(struct tcf_proto *tp, struct tc_u_hnode *h, cls_u32.hnode.handle = h->handle; cls_u32.hnode.prio = h->prio; - if (tc_can_offload(dev)) { - err = dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_CLSU32, - &cls_u32); - if (err) { - if (skip_sw) - return err; - } else { - offloaded = true; - } - } - err = tc_setup_cb_call(block, NULL, TC_SETUP_CLSU32, &cls_u32, skip_sw); if (err < 0) { u32_clear_hw_hnode(tp, h); @@ -522,7 +507,6 @@ static int u32_replace_hw_hnode(struct tcf_proto *tp, struct tc_u_hnode *h, static void u32_remove_hw_knode(struct tcf_proto *tp, u32 handle) { - struct net_device *dev = tp->q->dev_queue->dev; struct tcf_block *block = tp->chain->block; struct tc_cls_u32_offload cls_u32 = {}; @@ -530,15 +514,12 @@ static void u32_remove_hw_knode(struct tcf_proto *tp, u32 handle) cls_u32.command = TC_CLSU32_DELETE_KNODE; cls_u32.knode.handle = handle; - if (tc_can_offload(dev)) - dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_CLSU32, &cls_u32); tc_setup_cb_call(block, NULL, TC_SETUP_CLSU32, &cls_u32, false); } static int u32_replace_hw_knode(struct tcf_proto *tp, struct tc_u_knode *n, u32 flags) { - struct net_device *dev = tp->q->dev_queue->dev; struct tcf_block *block = tp->chain->block; struct tc_cls_u32_offload cls_u32 = {}; bool skip_sw = tc_skip_sw(flags); @@ -560,18 +541,6 @@ static int u32_replace_hw_knode(struct tcf_proto *tp, struct tc_u_knode *n, if (n->ht_down) cls_u32.knode.link_handle = n->ht_down->handle; - - if (tc_can_offload(dev)) { - err = dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_CLSU32, - &cls_u32); - if (err) { - if (skip_sw) - return err; - } else { - n->flags |= TCA_CLS_FLAGS_IN_HW; - } - } - err = tc_setup_cb_call(block, NULL, TC_SETUP_CLSU32, &cls_u32, skip_sw); if (err < 0) { u32_remove_hw_knode(tp, n->handle); -- cgit v1.2.3 From 3c467bf399106030d5a97d844ee119caec04e817 Mon Sep 17 00:00:00 2001 From: Steve Lin Date: Thu, 19 Oct 2017 10:45:56 -0400 Subject: bnxt: Move generic devlink code to new file Moving generic devlink code (registration) out of VF-R code into new bnxt_devlink file, in preparation for future work to add additional devlink functionality to bnxt. Signed-off-by: Steve Lin Acked-by: Andy Gospodarek Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnxt/Makefile | 2 +- drivers/net/ethernet/broadcom/bnxt/bnxt.c | 1 + drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c | 65 +++++++++++++++++++++++ drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h | 39 ++++++++++++++ drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c | 53 ++---------------- drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.h | 37 ++----------- 6 files changed, 112 insertions(+), 85 deletions(-) create mode 100644 drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c create mode 100644 drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/broadcom/bnxt/Makefile b/drivers/net/ethernet/broadcom/bnxt/Makefile index 457201f409a7..59c8ec9c1cad 100644 --- a/drivers/net/ethernet/broadcom/bnxt/Makefile +++ b/drivers/net/ethernet/broadcom/bnxt/Makefile @@ -1,4 +1,4 @@ obj-$(CONFIG_BNXT) += bnxt_en.o -bnxt_en-y := bnxt.o bnxt_sriov.o bnxt_ethtool.o bnxt_dcb.o bnxt_ulp.o bnxt_xdp.o bnxt_vfr.o +bnxt_en-y := bnxt.o bnxt_sriov.o bnxt_ethtool.o bnxt_dcb.o bnxt_ulp.o bnxt_xdp.o bnxt_vfr.o bnxt_devlink.o bnxt_en-$(CONFIG_BNXT_FLOWER_OFFLOAD) += bnxt_tc.o diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index 22a94b16ebfa..b31bdec26fce 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -61,6 +61,7 @@ #include "bnxt_xdp.h" #include "bnxt_vfr.h" #include "bnxt_tc.h" +#include "bnxt_devlink.h" #define BNXT_TX_TIMEOUT (5 * HZ) diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c new file mode 100644 index 000000000000..f3f6aa868d6c --- /dev/null +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c @@ -0,0 +1,65 @@ +/* Broadcom NetXtreme-C/E network driver. + * + * Copyright (c) 2017 Broadcom Limited + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + */ + +#include +#include +#include "bnxt_hsi.h" +#include "bnxt.h" +#include "bnxt_vfr.h" +#include "bnxt_devlink.h" + +static const struct devlink_ops bnxt_dl_ops = { +#ifdef CONFIG_BNXT_SRIOV + .eswitch_mode_set = bnxt_dl_eswitch_mode_set, + .eswitch_mode_get = bnxt_dl_eswitch_mode_get, +#endif /* CONFIG_BNXT_SRIOV */ +}; + +int bnxt_dl_register(struct bnxt *bp) +{ + struct devlink *dl; + int rc; + + if (!pci_find_ext_capability(bp->pdev, PCI_EXT_CAP_ID_SRIOV)) + return 0; + + if (bp->hwrm_spec_code < 0x10800) { + netdev_warn(bp->dev, "Firmware does not support SR-IOV E-Switch SWITCHDEV mode.\n"); + return -ENOTSUPP; + } + + dl = devlink_alloc(&bnxt_dl_ops, sizeof(struct bnxt_dl)); + if (!dl) { + netdev_warn(bp->dev, "devlink_alloc failed"); + return -ENOMEM; + } + + bnxt_link_bp_to_dl(bp, dl); + bp->eswitch_mode = DEVLINK_ESWITCH_MODE_LEGACY; + rc = devlink_register(dl, &bp->pdev->dev); + if (rc) { + bnxt_link_bp_to_dl(bp, NULL); + devlink_free(dl); + netdev_warn(bp->dev, "devlink_register failed. rc=%d", rc); + return rc; + } + + return 0; +} + +void bnxt_dl_unregister(struct bnxt *bp) +{ + struct devlink *dl = bp->dl; + + if (!dl) + return; + + devlink_unregister(dl); + devlink_free(dl); +} diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h new file mode 100644 index 000000000000..e92a35d8b642 --- /dev/null +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h @@ -0,0 +1,39 @@ +/* Broadcom NetXtreme-C/E network driver. + * + * Copyright (c) 2017 Broadcom Limited + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + */ + +#ifndef BNXT_DEVLINK_H +#define BNXT_DEVLINK_H + +/* Struct to hold housekeeping info needed by devlink interface */ +struct bnxt_dl { + struct bnxt *bp; /* back ptr to the controlling dev */ +}; + +static inline struct bnxt *bnxt_get_bp_from_dl(struct devlink *dl) +{ + return ((struct bnxt_dl *)devlink_priv(dl))->bp; +} + +/* To clear devlink pointer from bp, pass NULL dl */ +static inline void bnxt_link_bp_to_dl(struct bnxt *bp, struct devlink *dl) +{ + bp->dl = dl; + + /* add a back pointer in dl to bp */ + if (dl) { + struct bnxt_dl *bp_dl = devlink_priv(dl); + + bp_dl->bp = bp; + } +} + +int bnxt_dl_register(struct bnxt *bp); +void bnxt_dl_unregister(struct bnxt *bp); + +#endif /* BNXT_DEVLINK_H */ diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c index 6dff5aa57f16..4ae935999ebe 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c @@ -16,6 +16,7 @@ #include "bnxt_hsi.h" #include "bnxt.h" #include "bnxt_vfr.h" +#include "bnxt_devlink.h" #include "bnxt_tc.h" #ifdef CONFIG_BNXT_SRIOV @@ -451,7 +452,7 @@ err: } /* Devlink related routines */ -static int bnxt_dl_eswitch_mode_get(struct devlink *devlink, u16 *mode) +int bnxt_dl_eswitch_mode_get(struct devlink *devlink, u16 *mode) { struct bnxt *bp = bnxt_get_bp_from_dl(devlink); @@ -459,7 +460,7 @@ static int bnxt_dl_eswitch_mode_get(struct devlink *devlink, u16 *mode) return 0; } -static int bnxt_dl_eswitch_mode_set(struct devlink *devlink, u16 mode) +int bnxt_dl_eswitch_mode_set(struct devlink *devlink, u16 mode) { struct bnxt *bp = bnxt_get_bp_from_dl(devlink); int rc = 0; @@ -497,52 +498,4 @@ done: return rc; } -static const struct devlink_ops bnxt_dl_ops = { - .eswitch_mode_set = bnxt_dl_eswitch_mode_set, - .eswitch_mode_get = bnxt_dl_eswitch_mode_get -}; - -int bnxt_dl_register(struct bnxt *bp) -{ - struct devlink *dl; - int rc; - - if (!pci_find_ext_capability(bp->pdev, PCI_EXT_CAP_ID_SRIOV)) - return 0; - - if (bp->hwrm_spec_code < 0x10800) { - netdev_warn(bp->dev, "Firmware does not support SR-IOV E-Switch SWITCHDEV mode.\n"); - return -ENOTSUPP; - } - - dl = devlink_alloc(&bnxt_dl_ops, sizeof(struct bnxt_dl)); - if (!dl) { - netdev_warn(bp->dev, "devlink_alloc failed"); - return -ENOMEM; - } - - bnxt_link_bp_to_dl(bp, dl); - bp->eswitch_mode = DEVLINK_ESWITCH_MODE_LEGACY; - rc = devlink_register(dl, &bp->pdev->dev); - if (rc) { - bnxt_link_bp_to_dl(bp, NULL); - devlink_free(dl); - netdev_warn(bp->dev, "devlink_register failed. rc=%d", rc); - return rc; - } - - return 0; -} - -void bnxt_dl_unregister(struct bnxt *bp) -{ - struct devlink *dl = bp->dl; - - if (!dl) - return; - - devlink_unregister(dl); - devlink_free(dl); -} - #endif diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.h index 7787cd24606a..fb06bbe70e42 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.h @@ -14,31 +14,6 @@ #define MAX_CFA_CODE 65536 -/* Struct to hold housekeeping info needed by devlink interface */ -struct bnxt_dl { - struct bnxt *bp; /* back ptr to the controlling dev */ -}; - -static inline struct bnxt *bnxt_get_bp_from_dl(struct devlink *dl) -{ - return ((struct bnxt_dl *)devlink_priv(dl))->bp; -} - -/* To clear devlink pointer from bp, pass NULL dl */ -static inline void bnxt_link_bp_to_dl(struct bnxt *bp, struct devlink *dl) -{ - bp->dl = dl; - - /* add a back pointer in dl to bp */ - if (dl) { - struct bnxt_dl *bp_dl = devlink_priv(dl); - - bp_dl->bp = bp; - } -} - -int bnxt_dl_register(struct bnxt *bp); -void bnxt_dl_unregister(struct bnxt *bp); void bnxt_vf_reps_destroy(struct bnxt *bp); void bnxt_vf_reps_close(struct bnxt *bp); void bnxt_vf_reps_open(struct bnxt *bp); @@ -53,16 +28,10 @@ static inline u16 bnxt_vf_rep_get_fid(struct net_device *dev) return bp->pf.vf[vf_rep->vf_idx].fw_fid; } -#else - -static inline int bnxt_dl_register(struct bnxt *bp) -{ - return 0; -} +int bnxt_dl_eswitch_mode_get(struct devlink *devlink, u16 *mode); +int bnxt_dl_eswitch_mode_set(struct devlink *devlink, u16 mode); -static inline void bnxt_dl_unregister(struct bnxt *bp) -{ -} +#else static inline void bnxt_vf_reps_close(struct bnxt *bp) { -- cgit v1.2.3 From d43e5aca8780e4084cad0969c71669cf99dc6030 Mon Sep 17 00:00:00 2001 From: Yunsheng Lin Date: Fri, 20 Oct 2017 10:19:21 +0800 Subject: net: hns3: Refactor the skb receiving and transmitting function This patch refactors the skb receiving and transmitting functions and export them in order to support the ethtool's mac loopback selftest. Signed-off-by: Yunsheng Lin Signed-off-by: David S. Miller --- drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c | 17 ++++++++++++----- drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.h | 4 ++++ 2 files changed, 16 insertions(+), 5 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c index 8fa4e658b273..8383d6726ae4 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c @@ -900,8 +900,7 @@ static void hns_nic_dma_unmap(struct hns3_enet_ring *ring, int next_to_use_orig) } } -static netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, - struct net_device *netdev) +netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev) { struct hns3_nic_priv *priv = netdev_priv(netdev); struct hns3_nic_ring_data *ring_data = @@ -1943,6 +1942,11 @@ static void hns3_rx_checksum(struct hns3_enet_ring *ring, struct sk_buff *skb, } } +static void hns3_rx_skb(struct hns3_enet_ring *ring, struct sk_buff *skb) +{ + napi_gro_receive(&ring->tqp_vector->napi, skb); +} + static int hns3_handle_rx_bd(struct hns3_enet_ring *ring, struct sk_buff **out_skb, int *out_bnum) { @@ -2077,7 +2081,9 @@ static int hns3_handle_rx_bd(struct hns3_enet_ring *ring, return 0; } -static int hns3_clean_rx_ring(struct hns3_enet_ring *ring, int budget) +int hns3_clean_rx_ring( + struct hns3_enet_ring *ring, int budget, + void (*rx_fn)(struct hns3_enet_ring *, struct sk_buff *)) { #define RCB_NOF_ALLOC_RX_BUFF_ONCE 16 struct net_device *netdev = ring->tqp->handle->kinfo.netdev; @@ -2115,7 +2121,7 @@ static int hns3_clean_rx_ring(struct hns3_enet_ring *ring, int budget) /* Do update ip stack process */ skb->protocol = eth_type_trans(skb, netdev); - (void)napi_gro_receive(&ring->tqp_vector->napi, skb); + rx_fn(ring, skb); recv_pkts++; } @@ -2258,7 +2264,8 @@ static int hns3_nic_common_poll(struct napi_struct *napi, int budget) rx_budget = max(budget / tqp_vector->num_tqps, 1); hns3_for_each_ring(ring, tqp_vector->rx_group) { - int rx_cleaned = hns3_clean_rx_ring(ring, rx_budget); + int rx_cleaned = hns3_clean_rx_ring(ring, rx_budget, + hns3_rx_skb); if (rx_cleaned >= rx_budget) clean_complete = false; diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.h index 66599890b4d4..6228b2603d93 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.h @@ -597,6 +597,10 @@ void hns3_ethtool_set_ops(struct net_device *netdev); int hns3_clean_tx_ring(struct hns3_enet_ring *ring, int budget); int hns3_init_all_ring(struct hns3_nic_priv *priv); int hns3_uninit_all_ring(struct hns3_nic_priv *priv); +netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev); +int hns3_clean_rx_ring( + struct hns3_enet_ring *ring, int budget, + void (*rx_fn)(struct hns3_enet_ring *, struct sk_buff *)); #ifdef CONFIG_HNS3_DCB void hns3_dcbnl_setup(struct hnae3_handle *handle); -- cgit v1.2.3 From c39c4d98dc658f5d44b96982333f3611d9cc2be7 Mon Sep 17 00:00:00 2001 From: Yunsheng Lin Date: Fri, 20 Oct 2017 10:19:22 +0800 Subject: net: hns3: Add mac loopback selftest support in hns3 driver This patch adds mac loopback selftest support for ethtool cmd by checking if a transmitted packet can be received correctly when mac loopback is enabled. Signed-off-by: Yunsheng Lin Signed-off-by: David S. Miller --- .../ethernet/hisilicon/hns3/hns3pf/hclge_main.c | 54 ++++ .../ethernet/hisilicon/hns3/hns3pf/hns3_ethtool.c | 273 +++++++++++++++++++++ 2 files changed, 327 insertions(+) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c index 6e93943c489a..8508521c26e8 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c @@ -3149,6 +3149,59 @@ static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable) "mac enable fail, ret =%d.\n", ret); } +static int hclge_set_loopback(struct hnae3_handle *handle, + enum hnae3_loop loop_mode, bool en) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_config_mac_mode_cmd *req; + struct hclge_dev *hdev = vport->back; + struct hclge_desc desc; + u32 loop_en; + int ret; + + switch (loop_mode) { + case HNAE3_MAC_INTER_LOOP_MAC: + req = (struct hclge_config_mac_mode_cmd *)&desc.data[0]; + /* 1 Read out the MAC mode config at first */ + hclge_cmd_setup_basic_desc(&desc, + HCLGE_OPC_CONFIG_MAC_MODE, + true); + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "mac loopback get fail, ret =%d.\n", + ret); + return ret; + } + + /* 2 Then setup the loopback flag */ + loop_en = le32_to_cpu(req->txrx_pad_fcs_loop_en); + if (en) + hnae_set_bit(loop_en, HCLGE_MAC_APP_LP_B, 1); + else + hnae_set_bit(loop_en, HCLGE_MAC_APP_LP_B, 0); + + req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en); + + /* 3 Config mac work mode with loopback flag + * and its original configure parameters + */ + hclge_cmd_reuse_desc(&desc, false); + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) + dev_err(&hdev->pdev->dev, + "mac loopback set fail, ret =%d.\n", ret); + break; + default: + ret = -ENOTSUPP; + dev_err(&hdev->pdev->dev, + "loop_mode %d is not supported\n", loop_mode); + break; + } + + return ret; +} + static int hclge_tqp_enable(struct hclge_dev *hdev, int tqp_id, int stream_id, bool enable) { @@ -4485,6 +4538,7 @@ static const struct hnae3_ae_ops hclge_ops = { .unmap_ring_from_vector = hclge_unmap_ring_from_vector, .get_vector = hclge_get_vector, .set_promisc_mode = hclge_set_promisc_mode, + .set_loopback = hclge_set_loopback, .start = hclge_ae_start, .stop = hclge_ae_stop, .get_status = hclge_get_status, diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_ethtool.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_ethtool.c index ddbd7f30c6a4..6c469e49a04f 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_ethtool.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_ethtool.c @@ -59,6 +59,16 @@ static const struct hns3_stats hns3_rxq_stats[] = { #define HNS3_TQP_STATS_COUNT (HNS3_TXQ_STATS_COUNT + HNS3_RXQ_STATS_COUNT) +#define HNS3_SELF_TEST_TPYE_NUM 1 +#define HNS3_NIC_LB_TEST_PKT_NUM 1 +#define HNS3_NIC_LB_TEST_RING_ID 0 +#define HNS3_NIC_LB_TEST_PACKET_SIZE 128 + +/* Nic loopback test err */ +#define HNS3_NIC_LB_TEST_NO_MEM_ERR 1 +#define HNS3_NIC_LB_TEST_TX_CNT_ERR 2 +#define HNS3_NIC_LB_TEST_RX_CNT_ERR 3 + struct hns3_link_mode_mapping { u32 hns3_link_mode; u32 ethtool_link_mode; @@ -77,6 +87,268 @@ static const struct hns3_link_mode_mapping hns3_lm_map[] = { {HNS3_LM_1000BASET_FULL_BIT, ETHTOOL_LINK_MODE_1000baseT_Full_BIT}, }; +static int hns3_lp_setup(struct net_device *ndev, enum hnae3_loop loop) +{ + struct hnae3_handle *h = hns3_get_handle(ndev); + int ret; + + if (!h->ae_algo->ops->set_loopback || + !h->ae_algo->ops->set_promisc_mode) + return -EOPNOTSUPP; + + switch (loop) { + case HNAE3_MAC_INTER_LOOP_MAC: + ret = h->ae_algo->ops->set_loopback(h, loop, true); + break; + case HNAE3_MAC_LOOP_NONE: + ret = h->ae_algo->ops->set_loopback(h, + HNAE3_MAC_INTER_LOOP_MAC, false); + break; + default: + ret = -ENOTSUPP; + break; + } + + if (ret) + return ret; + + if (loop == HNAE3_MAC_LOOP_NONE) + h->ae_algo->ops->set_promisc_mode(h, ndev->flags & IFF_PROMISC); + else + h->ae_algo->ops->set_promisc_mode(h, 1); + + return ret; +} + +static int hns3_lp_up(struct net_device *ndev, enum hnae3_loop loop_mode) +{ + struct hnae3_handle *h = hns3_get_handle(ndev); + int ret; + + if (!h->ae_algo->ops->start) + return -EOPNOTSUPP; + + ret = h->ae_algo->ops->start(h); + if (ret) { + netdev_err(ndev, + "hns3_lb_up ae start return error: %d\n", ret); + return ret; + } + + ret = hns3_lp_setup(ndev, loop_mode); + usleep_range(10000, 20000); + + return ret; +} + +static int hns3_lp_down(struct net_device *ndev) +{ + struct hnae3_handle *h = hns3_get_handle(ndev); + int ret; + + if (!h->ae_algo->ops->stop) + return -EOPNOTSUPP; + + ret = hns3_lp_setup(ndev, HNAE3_MAC_LOOP_NONE); + if (ret) { + netdev_err(ndev, "lb_setup return error: %d\n", ret); + return ret; + } + + h->ae_algo->ops->stop(h); + usleep_range(10000, 20000); + + return 0; +} + +static void hns3_lp_setup_skb(struct sk_buff *skb) +{ + struct net_device *ndev = skb->dev; + unsigned char *packet; + struct ethhdr *ethh; + unsigned int i; + + skb_reserve(skb, NET_IP_ALIGN); + ethh = skb_put(skb, sizeof(struct ethhdr)); + packet = skb_put(skb, HNS3_NIC_LB_TEST_PACKET_SIZE); + + memcpy(ethh->h_dest, ndev->dev_addr, ETH_ALEN); + eth_zero_addr(ethh->h_source); + ethh->h_proto = htons(ETH_P_ARP); + skb_reset_mac_header(skb); + + for (i = 0; i < HNS3_NIC_LB_TEST_PACKET_SIZE; i++) + packet[i] = (unsigned char)(i & 0xff); +} + +static void hns3_lb_check_skb_data(struct hns3_enet_ring *ring, + struct sk_buff *skb) +{ + struct hns3_enet_tqp_vector *tqp_vector = ring->tqp_vector; + unsigned char *packet = skb->data; + u32 i; + + for (i = 0; i < skb->len; i++) + if (packet[i] != (unsigned char)(i & 0xff)) + break; + + /* The packet is correctly received */ + if (i == skb->len) + tqp_vector->rx_group.total_packets++; + else + print_hex_dump(KERN_ERR, "selftest:", DUMP_PREFIX_OFFSET, 16, 1, + skb->data, skb->len, true); + + dev_kfree_skb_any(skb); +} + +static u32 hns3_lb_check_rx_ring(struct hns3_nic_priv *priv, u32 budget) +{ + struct hnae3_handle *h = priv->ae_handle; + struct hnae3_knic_private_info *kinfo; + u32 i, rcv_good_pkt_total = 0; + + kinfo = &h->kinfo; + for (i = kinfo->num_tqps; i < kinfo->num_tqps * 2; i++) { + struct hns3_enet_ring *ring = priv->ring_data[i].ring; + struct hns3_enet_ring_group *rx_group; + u64 pre_rx_pkt; + + rx_group = &ring->tqp_vector->rx_group; + pre_rx_pkt = rx_group->total_packets; + + hns3_clean_rx_ring(ring, budget, hns3_lb_check_skb_data); + + rcv_good_pkt_total += (rx_group->total_packets - pre_rx_pkt); + rx_group->total_packets = pre_rx_pkt; + } + return rcv_good_pkt_total; +} + +static void hns3_lb_clear_tx_ring(struct hns3_nic_priv *priv, u32 start_ringid, + u32 end_ringid, u32 budget) +{ + u32 i; + + for (i = start_ringid; i <= end_ringid; i++) { + struct hns3_enet_ring *ring = priv->ring_data[i].ring; + + hns3_clean_tx_ring(ring, budget); + } +} + +/** + * hns3_lp_run_test - run loopback test + * @ndev: net device + * @mode: loopback type + */ +static int hns3_lp_run_test(struct net_device *ndev, enum hnae3_loop mode) +{ + struct hns3_nic_priv *priv = netdev_priv(ndev); + struct sk_buff *skb; + u32 i, good_cnt; + int ret_val = 0; + + skb = alloc_skb(HNS3_NIC_LB_TEST_PACKET_SIZE + ETH_HLEN + NET_IP_ALIGN, + GFP_KERNEL); + if (!skb) + return HNS3_NIC_LB_TEST_NO_MEM_ERR; + + skb->dev = ndev; + hns3_lp_setup_skb(skb); + skb->queue_mapping = HNS3_NIC_LB_TEST_RING_ID; + + good_cnt = 0; + for (i = 0; i < HNS3_NIC_LB_TEST_PKT_NUM; i++) { + netdev_tx_t tx_ret; + + skb_get(skb); + tx_ret = hns3_nic_net_xmit(skb, ndev); + if (tx_ret == NETDEV_TX_OK) + good_cnt++; + else + netdev_err(ndev, "hns3_lb_run_test xmit failed: %d\n", + tx_ret); + } + if (good_cnt != HNS3_NIC_LB_TEST_PKT_NUM) { + ret_val = HNS3_NIC_LB_TEST_TX_CNT_ERR; + netdev_err(ndev, "mode %d sent fail, cnt=0x%x, budget=0x%x\n", + mode, good_cnt, HNS3_NIC_LB_TEST_PKT_NUM); + goto out; + } + + /* Allow 200 milliseconds for packets to go from Tx to Rx */ + msleep(200); + + good_cnt = hns3_lb_check_rx_ring(priv, HNS3_NIC_LB_TEST_PKT_NUM); + if (good_cnt != HNS3_NIC_LB_TEST_PKT_NUM) { + ret_val = HNS3_NIC_LB_TEST_RX_CNT_ERR; + netdev_err(ndev, "mode %d recv fail, cnt=0x%x, budget=0x%x\n", + mode, good_cnt, HNS3_NIC_LB_TEST_PKT_NUM); + } + +out: + hns3_lb_clear_tx_ring(priv, HNS3_NIC_LB_TEST_RING_ID, + HNS3_NIC_LB_TEST_RING_ID, + HNS3_NIC_LB_TEST_PKT_NUM); + + kfree_skb(skb); + return ret_val; +} + +/** + * hns3_nic_self_test - self test + * @ndev: net device + * @eth_test: test cmd + * @data: test result + */ +static void hns3_self_test(struct net_device *ndev, + struct ethtool_test *eth_test, u64 *data) +{ + struct hns3_nic_priv *priv = netdev_priv(ndev); + struct hnae3_handle *h = priv->ae_handle; + int st_param[HNS3_SELF_TEST_TPYE_NUM][2]; + bool if_running = netif_running(ndev); + int test_index = 0; + u32 i; + + /* Only do offline selftest, or pass by default */ + if (eth_test->flags != ETH_TEST_FL_OFFLINE) + return; + + st_param[HNAE3_MAC_INTER_LOOP_MAC][0] = HNAE3_MAC_INTER_LOOP_MAC; + st_param[HNAE3_MAC_INTER_LOOP_MAC][1] = + h->flags & HNAE3_SUPPORT_MAC_LOOPBACK; + + if (if_running) + dev_close(ndev); + + set_bit(HNS3_NIC_STATE_TESTING, &priv->state); + + for (i = 0; i < HNS3_SELF_TEST_TPYE_NUM; i++) { + enum hnae3_loop loop_type = (enum hnae3_loop)st_param[i][0]; + + if (!st_param[i][1]) + continue; + + data[test_index] = hns3_lp_up(ndev, loop_type); + if (!data[test_index]) { + data[test_index] = hns3_lp_run_test(ndev, loop_type); + hns3_lp_down(ndev); + } + + if (data[test_index]) + eth_test->flags |= ETH_TEST_FL_FAILED; + + test_index++; + } + + clear_bit(HNS3_NIC_STATE_TESTING, &priv->state); + + if (if_running) + dev_open(ndev); +} + static void hns3_driv_to_eth_caps(u32 caps, struct ethtool_link_ksettings *cmd, bool is_advertised) { @@ -553,6 +825,7 @@ static int hns3_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd) } static const struct ethtool_ops hns3_ethtool_ops = { + .self_test = hns3_self_test, .get_drvinfo = hns3_get_drvinfo, .get_link = hns3_get_link, .get_ringparam = hns3_get_ringparam, -- cgit v1.2.3 From eaf6ab76430881c30695a9195ce37d6b11b04997 Mon Sep 17 00:00:00 2001 From: Elena Reshetova Date: Fri, 20 Oct 2017 10:23:35 +0300 Subject: drivers, net, ethernet: convert clip_entry.refcnt from atomic_t to refcount_t atomic_t variables are currently used to implement reference counters with the following properties: - counter is initialized to 1 using atomic_set() - a resource is freed upon counter reaching zero - once counter reaches zero, its further increments aren't allowed - counter schema uses basic atomic operations (set, inc, inc_not_zero, dec_and_test, etc.) Such atomic variables should be converted to a newly provided refcount_t type and API that prevents accidental counter overflows and underflows. This is important since overflows and underflows can lead to use-after-free situation and be exploitable. The variable clip_entry.refcnt is used as pure reference counter. Convert it to refcount_t and fix up the operations. Suggested-by: Kees Cook Reviewed-by: David Windsor Reviewed-by: Hans Liljestrand Signed-off-by: Elena Reshetova Signed-off-by: David S. Miller --- drivers/net/ethernet/chelsio/cxgb4/clip_tbl.c | 13 ++++++------- drivers/net/ethernet/chelsio/cxgb4/clip_tbl.h | 4 +++- 2 files changed, 9 insertions(+), 8 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.c b/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.c index 3103ef9b561d..290039026ece 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.c +++ b/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.c @@ -96,7 +96,8 @@ int cxgb4_clip_get(const struct net_device *dev, const u32 *lip, u8 v6) if (!ret) { ce = cte; read_unlock_bh(&ctbl->lock); - goto found; + refcount_inc(&ce->refcnt); + return 0; } } read_unlock_bh(&ctbl->lock); @@ -108,7 +109,7 @@ int cxgb4_clip_get(const struct net_device *dev, const u32 *lip, u8 v6) list_del(&ce->list); INIT_LIST_HEAD(&ce->list); spin_lock_init(&ce->lock); - atomic_set(&ce->refcnt, 0); + refcount_set(&ce->refcnt, 0); atomic_dec(&ctbl->nfree); list_add_tail(&ce->list, &ctbl->hash_list[hash]); if (v6) { @@ -138,9 +139,7 @@ int cxgb4_clip_get(const struct net_device *dev, const u32 *lip, u8 v6) return -ENOMEM; } write_unlock_bh(&ctbl->lock); -found: - atomic_inc(&ce->refcnt); - + refcount_set(&ce->refcnt, 1); return 0; } EXPORT_SYMBOL(cxgb4_clip_get); @@ -179,7 +178,7 @@ void cxgb4_clip_release(const struct net_device *dev, const u32 *lip, u8 v6) found: write_lock_bh(&ctbl->lock); spin_lock_bh(&ce->lock); - if (atomic_dec_and_test(&ce->refcnt)) { + if (refcount_dec_and_test(&ce->refcnt)) { list_del(&ce->list); INIT_LIST_HEAD(&ce->list); list_add_tail(&ce->list, &ctbl->ce_free_head); @@ -266,7 +265,7 @@ int clip_tbl_show(struct seq_file *seq, void *v) ip[0] = '\0'; sprintf(ip, "%pISc", &ce->addr); seq_printf(seq, "%-25s %u\n", ip, - atomic_read(&ce->refcnt)); + refcount_read(&ce->refcnt)); } } seq_printf(seq, "Free clip entries : %d\n", atomic_read(&ctbl->nfree)); diff --git a/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.h b/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.h index 35eb43c6bcbb..a0e0ae19649f 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.h +++ b/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.h @@ -10,9 +10,11 @@ * release for licensing terms and conditions. */ +#include + struct clip_entry { spinlock_t lock; /* Hold while modifying clip reference */ - atomic_t refcnt; + refcount_t refcnt; struct list_head list; union { struct sockaddr_in addr; -- cgit v1.2.3 From c6d4e63e065e796d2f2734c1e4e13f85f1c1c5e4 Mon Sep 17 00:00:00 2001 From: Elena Reshetova Date: Fri, 20 Oct 2017 10:23:36 +0300 Subject: drivers, net, ethernet: convert mtk_eth.dma_refcnt from atomic_t to refcount_t atomic_t variables are currently used to implement reference counters with the following properties: - counter is initialized to 1 using atomic_set() - a resource is freed upon counter reaching zero - once counter reaches zero, its further increments aren't allowed - counter schema uses basic atomic operations (set, inc, inc_not_zero, dec_and_test, etc.) Such atomic variables should be converted to a newly provided refcount_t type and API that prevents accidental counter overflows and underflows. This is important since overflows and underflows can lead to use-after-free situation and be exploitable. The variable mtk_eth.dma_refcnt is used as pure reference counter. Convert it to refcount_t and fix up the operations. Suggested-by: Kees Cook Reviewed-by: David Windsor Reviewed-by: Hans Liljestrand Signed-off-by: Elena Reshetova Signed-off-by: David S. Miller --- drivers/net/ethernet/mediatek/mtk_eth_soc.c | 8 +++++--- drivers/net/ethernet/mediatek/mtk_eth_soc.h | 4 +++- 2 files changed, 8 insertions(+), 4 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c index 5e81a7263654..54adfd967858 100644 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c @@ -1817,7 +1817,7 @@ static int mtk_open(struct net_device *dev) struct mtk_eth *eth = mac->hw; /* we run 2 netdevs on the same dma ring so we only bring it up once */ - if (!atomic_read(ð->dma_refcnt)) { + if (!refcount_read(ð->dma_refcnt)) { int err = mtk_start_dma(eth); if (err) @@ -1827,8 +1827,10 @@ static int mtk_open(struct net_device *dev) napi_enable(ð->rx_napi); mtk_tx_irq_enable(eth, MTK_TX_DONE_INT); mtk_rx_irq_enable(eth, MTK_RX_DONE_INT); + refcount_set(ð->dma_refcnt, 1); } - atomic_inc(ð->dma_refcnt); + else + refcount_inc(ð->dma_refcnt); phy_start(dev->phydev); netif_start_queue(dev); @@ -1868,7 +1870,7 @@ static int mtk_stop(struct net_device *dev) phy_stop(dev->phydev); /* only shutdown DMA if this is the last user */ - if (!atomic_dec_and_test(ð->dma_refcnt)) + if (!refcount_dec_and_test(ð->dma_refcnt)) return 0; mtk_tx_irq_disable(eth, MTK_TX_DONE_INT); diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.h b/drivers/net/ethernet/mediatek/mtk_eth_soc.h index 3d3c24a28112..a3af4660de81 100644 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h @@ -15,6 +15,8 @@ #ifndef MTK_ETH_H #define MTK_ETH_H +#include + #define MTK_QDMA_PAGE_SIZE 2048 #define MTK_MAX_RX_LENGTH 1536 #define MTK_TX_DMA_BUF_LEN 0x3fff @@ -632,7 +634,7 @@ struct mtk_eth { struct regmap *pctl; u32 chip_id; bool hwlro; - atomic_t dma_refcnt; + refcount_t dma_refcnt; struct mtk_tx_ring tx_ring; struct mtk_rx_ring rx_ring[MTK_MAX_RX_RING_NUM]; struct mtk_rx_ring rx_ring_qdma; -- cgit v1.2.3 From ff61b5e3f041c2f1aa8d7c700af3007889973889 Mon Sep 17 00:00:00 2001 From: Elena Reshetova Date: Fri, 20 Oct 2017 10:23:37 +0300 Subject: drivers, net, mlx4: convert mlx4_cq.refcount from atomic_t to refcount_t atomic_t variables are currently used to implement reference counters with the following properties: - counter is initialized to 1 using atomic_set() - a resource is freed upon counter reaching zero - once counter reaches zero, its further increments aren't allowed - counter schema uses basic atomic operations (set, inc, inc_not_zero, dec_and_test, etc.) Such atomic variables should be converted to a newly provided refcount_t type and API that prevents accidental counter overflows and underflows. This is important since overflows and underflows can lead to use-after-free situation and be exploitable. The variable mlx4_cq.refcount is used as pure reference counter. Convert it to refcount_t and fix up the operations. Suggested-by: Kees Cook Reviewed-by: David Windsor Reviewed-by: Hans Liljestrand Signed-off-by: Elena Reshetova Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx4/cq.c | 8 ++++---- include/linux/mlx4/device.h | 4 ++-- 2 files changed, 6 insertions(+), 6 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/mellanox/mlx4/cq.c b/drivers/net/ethernet/mellanox/mlx4/cq.c index 72eb50cd5ecd..d8e9a323122e 100644 --- a/drivers/net/ethernet/mellanox/mlx4/cq.c +++ b/drivers/net/ethernet/mellanox/mlx4/cq.c @@ -69,7 +69,7 @@ void mlx4_cq_tasklet_cb(unsigned long data) list_for_each_entry_safe(mcq, temp, &ctx->process_list, tasklet_ctx.list) { list_del_init(&mcq->tasklet_ctx.list); mcq->tasklet_ctx.comp(mcq); - if (atomic_dec_and_test(&mcq->refcount)) + if (refcount_dec_and_test(&mcq->refcount)) complete(&mcq->free); if (time_after(jiffies, end)) break; @@ -92,7 +92,7 @@ static void mlx4_add_cq_to_tasklet(struct mlx4_cq *cq) * still arrive. */ if (list_empty_careful(&cq->tasklet_ctx.list)) { - atomic_inc(&cq->refcount); + refcount_inc(&cq->refcount); kick = list_empty(&tasklet_ctx->list); list_add_tail(&cq->tasklet_ctx.list, &tasklet_ctx->list); if (kick) @@ -344,7 +344,7 @@ int mlx4_cq_alloc(struct mlx4_dev *dev, int nent, cq->cons_index = 0; cq->arm_sn = 1; cq->uar = uar; - atomic_set(&cq->refcount, 1); + refcount_set(&cq->refcount, 1); init_completion(&cq->free); cq->comp = mlx4_add_cq_to_tasklet; cq->tasklet_ctx.priv = @@ -386,7 +386,7 @@ void mlx4_cq_free(struct mlx4_dev *dev, struct mlx4_cq *cq) priv->eq_table.eq[MLX4_EQ_ASYNC].irq) synchronize_irq(priv->eq_table.eq[MLX4_EQ_ASYNC].irq); - if (atomic_dec_and_test(&cq->refcount)) + if (refcount_dec_and_test(&cq->refcount)) complete(&cq->free); wait_for_completion(&cq->free); diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h index b0a57e043fa3..daac2e3a1a58 100644 --- a/include/linux/mlx4/device.h +++ b/include/linux/mlx4/device.h @@ -40,7 +40,7 @@ #include #include -#include +#include #include @@ -751,7 +751,7 @@ struct mlx4_cq { int cqn; unsigned vector; - atomic_t refcount; + refcount_t refcount; struct completion free; struct { struct list_head list; -- cgit v1.2.3 From 0068895ff845c38e9e2b65c002c53c623379e436 Mon Sep 17 00:00:00 2001 From: Elena Reshetova Date: Fri, 20 Oct 2017 10:23:38 +0300 Subject: drivers, net, mlx4: convert mlx4_qp.refcount from atomic_t to refcount_t atomic_t variables are currently used to implement reference counters with the following properties: - counter is initialized to 1 using atomic_set() - a resource is freed upon counter reaching zero - once counter reaches zero, its further increments aren't allowed - counter schema uses basic atomic operations (set, inc, inc_not_zero, dec_and_test, etc.) Such atomic variables should be converted to a newly provided refcount_t type and API that prevents accidental counter overflows and underflows. This is important since overflows and underflows can lead to use-after-free situation and be exploitable. The variable mlx4_qp.refcount is used as pure reference counter. Convert it to refcount_t and fix up the operations. Suggested-by: Kees Cook Reviewed-by: David Windsor Reviewed-by: Hans Liljestrand Signed-off-by: Elena Reshetova Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx4/qp.c | 8 ++++---- include/linux/mlx4/device.h | 2 +- 2 files changed, 5 insertions(+), 5 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/mellanox/mlx4/qp.c b/drivers/net/ethernet/mellanox/mlx4/qp.c index 203320923340..769598f7b6c8 100644 --- a/drivers/net/ethernet/mellanox/mlx4/qp.c +++ b/drivers/net/ethernet/mellanox/mlx4/qp.c @@ -55,7 +55,7 @@ void mlx4_qp_event(struct mlx4_dev *dev, u32 qpn, int event_type) qp = __mlx4_qp_lookup(dev, qpn); if (qp) - atomic_inc(&qp->refcount); + refcount_inc(&qp->refcount); spin_unlock(&qp_table->lock); @@ -66,7 +66,7 @@ void mlx4_qp_event(struct mlx4_dev *dev, u32 qpn, int event_type) qp->event(qp, event_type); - if (atomic_dec_and_test(&qp->refcount)) + if (refcount_dec_and_test(&qp->refcount)) complete(&qp->free); } @@ -420,7 +420,7 @@ int mlx4_qp_alloc(struct mlx4_dev *dev, int qpn, struct mlx4_qp *qp) if (err) goto err_icm; - atomic_set(&qp->refcount, 1); + refcount_set(&qp->refcount, 1); init_completion(&qp->free); return 0; @@ -520,7 +520,7 @@ EXPORT_SYMBOL_GPL(mlx4_qp_remove); void mlx4_qp_free(struct mlx4_dev *dev, struct mlx4_qp *qp) { - if (atomic_dec_and_test(&qp->refcount)) + if (refcount_dec_and_test(&qp->refcount)) complete(&qp->free); wait_for_completion(&qp->free); diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h index daac2e3a1a58..b8e19c4d6caa 100644 --- a/include/linux/mlx4/device.h +++ b/include/linux/mlx4/device.h @@ -768,7 +768,7 @@ struct mlx4_qp { int qpn; - atomic_t refcount; + refcount_t refcount; struct completion free; u8 usage; }; -- cgit v1.2.3 From 17ac99b2b8d08ed40f4525491d6eff330329a6d2 Mon Sep 17 00:00:00 2001 From: Elena Reshetova Date: Fri, 20 Oct 2017 10:23:39 +0300 Subject: drivers, net, mlx4: convert mlx4_srq.refcount from atomic_t to refcount_t atomic_t variables are currently used to implement reference counters with the following properties: - counter is initialized to 1 using atomic_set() - a resource is freed upon counter reaching zero - once counter reaches zero, its further increments aren't allowed - counter schema uses basic atomic operations (set, inc, inc_not_zero, dec_and_test, etc.) Such atomic variables should be converted to a newly provided refcount_t type and API that prevents accidental counter overflows and underflows. This is important since overflows and underflows can lead to use-after-free situation and be exploitable. The variable mlx4_srq.refcount is used as pure reference counter. Convert it to refcount_t and fix up the operations. Suggested-by: Kees Cook Reviewed-by: David Windsor Reviewed-by: Hans Liljestrand Signed-off-by: Elena Reshetova Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx4/srq.c | 8 ++++---- include/linux/mlx4/device.h | 2 +- 2 files changed, 5 insertions(+), 5 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/mellanox/mlx4/srq.c b/drivers/net/ethernet/mellanox/mlx4/srq.c index bedf52126824..cbe4d9746ddf 100644 --- a/drivers/net/ethernet/mellanox/mlx4/srq.c +++ b/drivers/net/ethernet/mellanox/mlx4/srq.c @@ -49,7 +49,7 @@ void mlx4_srq_event(struct mlx4_dev *dev, u32 srqn, int event_type) srq = radix_tree_lookup(&srq_table->tree, srqn & (dev->caps.num_srqs - 1)); rcu_read_unlock(); if (srq) - atomic_inc(&srq->refcount); + refcount_inc(&srq->refcount); else { mlx4_warn(dev, "Async event for bogus SRQ %08x\n", srqn); return; @@ -57,7 +57,7 @@ void mlx4_srq_event(struct mlx4_dev *dev, u32 srqn, int event_type) srq->event(srq, event_type); - if (atomic_dec_and_test(&srq->refcount)) + if (refcount_dec_and_test(&srq->refcount)) complete(&srq->free); } @@ -203,7 +203,7 @@ int mlx4_srq_alloc(struct mlx4_dev *dev, u32 pdn, u32 cqn, u16 xrcd, if (err) goto err_radix; - atomic_set(&srq->refcount, 1); + refcount_set(&srq->refcount, 1); init_completion(&srq->free); return 0; @@ -232,7 +232,7 @@ void mlx4_srq_free(struct mlx4_dev *dev, struct mlx4_srq *srq) radix_tree_delete(&srq_table->tree, srq->srqn); spin_unlock_irq(&srq_table->lock); - if (atomic_dec_and_test(&srq->refcount)) + if (refcount_dec_and_test(&srq->refcount)) complete(&srq->free); wait_for_completion(&srq->free); diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h index b8e19c4d6caa..a9b5fed8f7c6 100644 --- a/include/linux/mlx4/device.h +++ b/include/linux/mlx4/device.h @@ -781,7 +781,7 @@ struct mlx4_srq { int max_gs; int wqe_shift; - atomic_t refcount; + refcount_t refcount; struct completion free; }; -- cgit v1.2.3 From a4b51a9f83c6d359ff8fc0c66009283b6fdeeaf8 Mon Sep 17 00:00:00 2001 From: Elena Reshetova Date: Fri, 20 Oct 2017 10:23:40 +0300 Subject: drivers, net, mlx5: convert mlx5_cq.refcount from atomic_t to refcount_t atomic_t variables are currently used to implement reference counters with the following properties: - counter is initialized to 1 using atomic_set() - a resource is freed upon counter reaching zero - once counter reaches zero, its further increments aren't allowed - counter schema uses basic atomic operations (set, inc, inc_not_zero, dec_and_test, etc.) Such atomic variables should be converted to a newly provided refcount_t type and API that prevents accidental counter overflows and underflows. This is important since overflows and underflows can lead to use-after-free situation and be exploitable. The variable mlx5_cq.refcount is used as pure reference counter. Convert it to refcount_t and fix up the operations. Suggested-by: Kees Cook Reviewed-by: David Windsor Reviewed-by: Hans Liljestrand Signed-off-by: Elena Reshetova Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx5/core/cq.c | 16 ++++++++-------- include/linux/mlx5/cq.h | 4 ++-- 2 files changed, 10 insertions(+), 10 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cq.c b/drivers/net/ethernet/mellanox/mlx5/core/cq.c index 336d4738b807..1016e05c7ec7 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/cq.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/cq.c @@ -58,7 +58,7 @@ void mlx5_cq_tasklet_cb(unsigned long data) tasklet_ctx.list) { list_del_init(&mcq->tasklet_ctx.list); mcq->tasklet_ctx.comp(mcq); - if (atomic_dec_and_test(&mcq->refcount)) + if (refcount_dec_and_test(&mcq->refcount)) complete(&mcq->free); if (time_after(jiffies, end)) break; @@ -80,7 +80,7 @@ static void mlx5_add_cq_to_tasklet(struct mlx5_core_cq *cq) * still arrive. */ if (list_empty_careful(&cq->tasklet_ctx.list)) { - atomic_inc(&cq->refcount); + refcount_inc(&cq->refcount); list_add_tail(&cq->tasklet_ctx.list, &tasklet_ctx->list); } spin_unlock_irqrestore(&tasklet_ctx->lock, flags); @@ -94,7 +94,7 @@ void mlx5_cq_completion(struct mlx5_core_dev *dev, u32 cqn) spin_lock(&table->lock); cq = radix_tree_lookup(&table->tree, cqn); if (likely(cq)) - atomic_inc(&cq->refcount); + refcount_inc(&cq->refcount); spin_unlock(&table->lock); if (!cq) { @@ -106,7 +106,7 @@ void mlx5_cq_completion(struct mlx5_core_dev *dev, u32 cqn) cq->comp(cq); - if (atomic_dec_and_test(&cq->refcount)) + if (refcount_dec_and_test(&cq->refcount)) complete(&cq->free); } @@ -119,7 +119,7 @@ void mlx5_cq_event(struct mlx5_core_dev *dev, u32 cqn, int event_type) cq = radix_tree_lookup(&table->tree, cqn); if (cq) - atomic_inc(&cq->refcount); + refcount_inc(&cq->refcount); spin_unlock(&table->lock); @@ -130,7 +130,7 @@ void mlx5_cq_event(struct mlx5_core_dev *dev, u32 cqn, int event_type) cq->event(cq, event_type); - if (atomic_dec_and_test(&cq->refcount)) + if (refcount_dec_and_test(&cq->refcount)) complete(&cq->free); } @@ -159,7 +159,7 @@ int mlx5_core_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq, cq->cqn = MLX5_GET(create_cq_out, out, cqn); cq->cons_index = 0; cq->arm_sn = 0; - atomic_set(&cq->refcount, 1); + refcount_set(&cq->refcount, 1); init_completion(&cq->free); if (!cq->comp) cq->comp = mlx5_add_cq_to_tasklet; @@ -222,7 +222,7 @@ int mlx5_core_destroy_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq) synchronize_irq(cq->irqn); mlx5_debug_cq_remove(dev, cq); - if (atomic_dec_and_test(&cq->refcount)) + if (refcount_dec_and_test(&cq->refcount)) complete(&cq->free); wait_for_completion(&cq->free); diff --git a/include/linux/mlx5/cq.h b/include/linux/mlx5/cq.h index 95898847c7d4..6a57ec2f1ef7 100644 --- a/include/linux/mlx5/cq.h +++ b/include/linux/mlx5/cq.h @@ -35,7 +35,7 @@ #include #include - +#include struct mlx5_core_cq { u32 cqn; @@ -43,7 +43,7 @@ struct mlx5_core_cq { __be32 *set_ci_db; __be32 *arm_db; struct mlx5_uars_page *uar; - atomic_t refcount; + refcount_t refcount; struct completion free; unsigned vector; unsigned int irqn; -- cgit v1.2.3 From dd8e19456d60a519de1852ae4b1be7d62690d2e0 Mon Sep 17 00:00:00 2001 From: Elena Reshetova Date: Fri, 20 Oct 2017 10:23:41 +0300 Subject: drivers, net, mlx5: convert fs_node.refcount from atomic_t to refcount_t atomic_t variables are currently used to implement reference counters with the following properties: - counter is initialized to 1 using atomic_set() - a resource is freed upon counter reaching zero - once counter reaches zero, its further increments aren't allowed - counter schema uses basic atomic operations (set, inc, inc_not_zero, dec_and_test, etc.) Such atomic variables should be converted to a newly provided refcount_t type and API that prevents accidental counter overflows and underflows. This is important since overflows and underflows can lead to use-after-free situation and be exploitable. The variable fs_node.refcount is used as pure reference counter. Convert it to refcount_t and fix up the operations. Suggested-by: Kees Cook Reviewed-by: David Windsor Reviewed-by: Hans Liljestrand Signed-off-by: Elena Reshetova Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx5/core/fs_core.c | 28 +++++++++++------------ drivers/net/ethernet/mellanox/mlx5/core/fs_core.h | 3 ++- 2 files changed, 16 insertions(+), 15 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c index f77e496f7053..c7fa1389bace 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c @@ -188,7 +188,7 @@ static void tree_init_node(struct fs_node *node, void (*del_hw_func)(struct fs_node *), void (*del_sw_func)(struct fs_node *)) { - atomic_set(&node->refcount, 1); + refcount_set(&node->refcount, 1); INIT_LIST_HEAD(&node->list); INIT_LIST_HEAD(&node->children); init_rwsem(&node->lock); @@ -200,7 +200,7 @@ static void tree_init_node(struct fs_node *node, static void tree_add_node(struct fs_node *node, struct fs_node *parent) { if (parent) - atomic_inc(&parent->refcount); + refcount_inc(&parent->refcount); node->parent = parent; /* Parent is the root */ @@ -212,7 +212,7 @@ static void tree_add_node(struct fs_node *node, struct fs_node *parent) static int tree_get_node(struct fs_node *node) { - return atomic_add_unless(&node->refcount, 1, 0); + return refcount_inc_not_zero(&node->refcount); } static void nested_down_read_ref_node(struct fs_node *node, @@ -220,7 +220,7 @@ static void nested_down_read_ref_node(struct fs_node *node, { if (node) { down_read_nested(&node->lock, class); - atomic_inc(&node->refcount); + refcount_inc(&node->refcount); } } @@ -229,7 +229,7 @@ static void nested_down_write_ref_node(struct fs_node *node, { if (node) { down_write_nested(&node->lock, class); - atomic_inc(&node->refcount); + refcount_inc(&node->refcount); } } @@ -237,19 +237,19 @@ static void down_write_ref_node(struct fs_node *node) { if (node) { down_write(&node->lock); - atomic_inc(&node->refcount); + refcount_inc(&node->refcount); } } static void up_read_ref_node(struct fs_node *node) { - atomic_dec(&node->refcount); + refcount_dec(&node->refcount); up_read(&node->lock); } static void up_write_ref_node(struct fs_node *node) { - atomic_dec(&node->refcount); + refcount_dec(&node->refcount); up_write(&node->lock); } @@ -257,7 +257,7 @@ static void tree_put_node(struct fs_node *node) { struct fs_node *parent_node = node->parent; - if (atomic_dec_and_test(&node->refcount)) { + if (refcount_dec_and_test(&node->refcount)) { if (node->del_hw_func) node->del_hw_func(node); if (parent_node) { @@ -280,8 +280,8 @@ static void tree_put_node(struct fs_node *node) static int tree_remove_node(struct fs_node *node) { - if (atomic_read(&node->refcount) > 1) { - atomic_dec(&node->refcount); + if (refcount_read(&node->refcount) > 1) { + refcount_dec(&node->refcount); return -EEXIST; } tree_put_node(node); @@ -1184,7 +1184,7 @@ static void destroy_flow_handle(struct fs_fte *fte, int i) { for (; --i >= 0;) { - if (atomic_dec_and_test(&handle->rule[i]->node.refcount)) { + if (refcount_dec_and_test(&handle->rule[i]->node.refcount)) { fte->dests_size--; list_del(&handle->rule[i]->node.list); kfree(handle->rule[i]); @@ -1215,7 +1215,7 @@ create_flow_handle(struct fs_fte *fte, if (dest) { rule = find_flow_rule(fte, dest + i); if (rule) { - atomic_inc(&rule->node.refcount); + refcount_inc(&rule->node.refcount); goto rule_found; } } @@ -1466,7 +1466,7 @@ static struct mlx5_flow_handle *add_rule_fg(struct mlx5_flow_group *fg, trace_mlx5_fs_set_fte(fte, false); for (i = 0; i < handle->num_rules; i++) { - if (atomic_read(&handle->rule[i]->node.refcount) == 1) { + if (refcount_read(&handle->rule[i]->node.refcount) == 1) { tree_add_node(&handle->rule[i]->node, &fte->node); trace_mlx5_fs_add_rule(handle->rule[i]); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h index 80f6f3c714c8..397d24a621a4 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h @@ -33,6 +33,7 @@ #ifndef _MLX5_FS_CORE_ #define _MLX5_FS_CORE_ +#include #include #include @@ -84,7 +85,7 @@ struct fs_node { struct fs_node *root; /* lock the node for writing and traversing */ struct rw_semaphore lock; - atomic_t refcount; + refcount_t refcount; bool active; void (*del_hw_func)(struct fs_node *); void (*del_sw_func)(struct fs_node *); -- cgit v1.2.3 From 62d3f60b4d065c09a3ccb9e862e71ae870c2d27b Mon Sep 17 00:00:00 2001 From: Pieter Jansen van Vuuren Date: Fri, 20 Oct 2017 19:49:52 +0200 Subject: nfp: use struct fields for 8 bit-wide access Use direct access struct fields rather than PREP_FIELD() macros to manipulate the jump ID and length, both of which are exactly 8-bits wide. This simplifies the code somewhat. Signed-off-by: Simon Horman Signed-off-by: Pieter Jansen van Vuuren Acked-by: Jakub Kicinski Signed-off-by: David S. Miller --- drivers/net/ethernet/netronome/nfp/flower/action.c | 87 +++++++--------------- drivers/net/ethernet/netronome/nfp/flower/cmsg.h | 26 ++++--- 2 files changed, 39 insertions(+), 74 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/netronome/nfp/flower/action.c b/drivers/net/ethernet/netronome/nfp/flower/action.c index 1194c47ef827..0a5fc9f8545f 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/action.c +++ b/drivers/net/ethernet/netronome/nfp/flower/action.c @@ -47,13 +47,9 @@ static void nfp_fl_pop_vlan(struct nfp_fl_pop_vlan *pop_vlan) { size_t act_size = sizeof(struct nfp_fl_pop_vlan); - u16 tmp_pop_vlan_op; - tmp_pop_vlan_op = - FIELD_PREP(NFP_FL_ACT_LEN_LW, act_size >> NFP_FL_LW_SIZ) | - FIELD_PREP(NFP_FL_ACT_JMP_ID, NFP_FL_ACTION_OPCODE_POP_VLAN); - - pop_vlan->a_op = cpu_to_be16(tmp_pop_vlan_op); + pop_vlan->head.jump_id = NFP_FL_ACTION_OPCODE_POP_VLAN; + pop_vlan->head.len_lw = act_size >> NFP_FL_LW_SIZ; pop_vlan->reserved = 0; } @@ -64,14 +60,9 @@ nfp_fl_push_vlan(struct nfp_fl_push_vlan *push_vlan, size_t act_size = sizeof(struct nfp_fl_push_vlan); struct tcf_vlan *vlan = to_vlan(action); u16 tmp_push_vlan_tci; - u16 tmp_push_vlan_op; - - tmp_push_vlan_op = - FIELD_PREP(NFP_FL_ACT_LEN_LW, act_size >> NFP_FL_LW_SIZ) | - FIELD_PREP(NFP_FL_ACT_JMP_ID, NFP_FL_ACTION_OPCODE_PUSH_VLAN); - push_vlan->a_op = cpu_to_be16(tmp_push_vlan_op); - /* Set action push vlan parameters. */ + push_vlan->head.jump_id = NFP_FL_ACTION_OPCODE_PUSH_VLAN; + push_vlan->head.len_lw = act_size >> NFP_FL_LW_SIZ; push_vlan->reserved = 0; push_vlan->vlan_tpid = tcf_vlan_push_proto(action); @@ -101,16 +92,12 @@ nfp_fl_output(struct nfp_fl_output *output, const struct tc_action *action, int *tun_out_cnt) { size_t act_size = sizeof(struct nfp_fl_output); - u16 tmp_output_op, tmp_flags; struct net_device *out_dev; + u16 tmp_flags; int ifindex; - /* Set action opcode to output action. */ - tmp_output_op = - FIELD_PREP(NFP_FL_ACT_LEN_LW, act_size >> NFP_FL_LW_SIZ) | - FIELD_PREP(NFP_FL_ACT_JMP_ID, NFP_FL_ACTION_OPCODE_OUTPUT); - - output->a_op = cpu_to_be16(tmp_output_op); + output->head.jump_id = NFP_FL_ACTION_OPCODE_OUTPUT; + output->head.len_lw = act_size >> NFP_FL_LW_SIZ; ifindex = tcf_mirred_ifindex(action); out_dev = __dev_get_by_index(dev_net(in_dev), ifindex); @@ -161,7 +148,6 @@ static struct nfp_fl_pre_tunnel *nfp_fl_pre_tunnel(char *act_data, int act_len) { size_t act_size = sizeof(struct nfp_fl_pre_tunnel); struct nfp_fl_pre_tunnel *pre_tun_act; - u16 tmp_pre_tun_op; /* Pre_tunnel action must be first on action list. * If other actions already exist they need pushed forward. @@ -173,11 +159,8 @@ static struct nfp_fl_pre_tunnel *nfp_fl_pre_tunnel(char *act_data, int act_len) memset(pre_tun_act, 0, act_size); - tmp_pre_tun_op = - FIELD_PREP(NFP_FL_ACT_LEN_LW, act_size >> NFP_FL_LW_SIZ) | - FIELD_PREP(NFP_FL_ACT_JMP_ID, NFP_FL_ACTION_OPCODE_PRE_TUNNEL); - - pre_tun_act->a_op = cpu_to_be16(tmp_pre_tun_op); + pre_tun_act->head.jump_id = NFP_FL_ACTION_OPCODE_PRE_TUNNEL; + pre_tun_act->head.len_lw = act_size >> NFP_FL_LW_SIZ; return pre_tun_act; } @@ -190,7 +173,6 @@ nfp_fl_set_vxlan(struct nfp_fl_set_vxlan *set_vxlan, struct ip_tunnel_info *vxlan = tcf_tunnel_info(action); size_t act_size = sizeof(struct nfp_fl_set_vxlan); u32 tmp_set_vxlan_type_index = 0; - u16 tmp_set_vxlan_op; /* Currently support one pre-tunnel so index is always 0. */ int pretun_idx = 0; @@ -199,12 +181,8 @@ nfp_fl_set_vxlan(struct nfp_fl_set_vxlan *set_vxlan, return -EOPNOTSUPP; } - tmp_set_vxlan_op = - FIELD_PREP(NFP_FL_ACT_LEN_LW, act_size >> NFP_FL_LW_SIZ) | - FIELD_PREP(NFP_FL_ACT_JMP_ID, - NFP_FL_ACTION_OPCODE_SET_IPV4_TUNNEL); - - set_vxlan->a_op = cpu_to_be16(tmp_set_vxlan_op); + set_vxlan->head.jump_id = NFP_FL_ACTION_OPCODE_SET_IPV4_TUNNEL; + set_vxlan->head.len_lw = act_size >> NFP_FL_LW_SIZ; /* Set tunnel type and pre-tunnel index. */ tmp_set_vxlan_type_index |= @@ -240,7 +218,6 @@ static int nfp_fl_set_eth(const struct tc_action *action, int idx, u32 off, struct nfp_fl_set_eth *set_eth) { - u16 tmp_set_eth_op; u32 exact, mask; if (off + 4 > ETH_ALEN * 2) @@ -256,11 +233,8 @@ nfp_fl_set_eth(const struct tc_action *action, int idx, u32 off, &set_eth->eth_addr_mask[off]); set_eth->reserved = cpu_to_be16(0); - tmp_set_eth_op = FIELD_PREP(NFP_FL_ACT_LEN_LW, - sizeof(*set_eth) >> NFP_FL_LW_SIZ) | - FIELD_PREP(NFP_FL_ACT_JMP_ID, - NFP_FL_ACTION_OPCODE_SET_ETHERNET); - set_eth->a_op = cpu_to_be16(tmp_set_eth_op); + set_eth->head.jump_id = NFP_FL_ACTION_OPCODE_SET_ETHERNET; + set_eth->head.len_lw = sizeof(*set_eth) >> NFP_FL_LW_SIZ; return 0; } @@ -269,7 +243,6 @@ static int nfp_fl_set_ip4(const struct tc_action *action, int idx, u32 off, struct nfp_fl_set_ip4_addrs *set_ip_addr) { - u16 tmp_set_ipv4_op; __be32 exact, mask; /* We are expecting tcf_pedit to return a big endian value */ @@ -293,11 +266,8 @@ nfp_fl_set_ip4(const struct tc_action *action, int idx, u32 off, } set_ip_addr->reserved = cpu_to_be16(0); - tmp_set_ipv4_op = FIELD_PREP(NFP_FL_ACT_LEN_LW, - sizeof(*set_ip_addr) >> NFP_FL_LW_SIZ) | - FIELD_PREP(NFP_FL_ACT_JMP_ID, - NFP_FL_ACTION_OPCODE_SET_IPV4_ADDRS); - set_ip_addr->a_op = cpu_to_be16(tmp_set_ipv4_op); + set_ip_addr->head.jump_id = NFP_FL_ACTION_OPCODE_SET_IPV4_ADDRS; + set_ip_addr->head.len_lw = sizeof(*set_ip_addr) >> NFP_FL_LW_SIZ; return 0; } @@ -306,16 +276,12 @@ static void nfp_fl_set_ip6_helper(int opcode_tag, int idx, __be32 exact, __be32 mask, struct nfp_fl_set_ipv6_addr *ip6) { - u16 tmp_set_op; - ip6->ipv6[idx % 4].mask = mask; ip6->ipv6[idx % 4].exact = exact; ip6->reserved = cpu_to_be16(0); - tmp_set_op = FIELD_PREP(NFP_FL_ACT_LEN_LW, sizeof(*ip6) >> - NFP_FL_LW_SIZ) | - FIELD_PREP(NFP_FL_ACT_JMP_ID, opcode_tag); - ip6->a_op = cpu_to_be16(tmp_set_op); + ip6->head.jump_id = opcode_tag; + ip6->head.len_lw = sizeof(*ip6) >> NFP_FL_LW_SIZ; } static int @@ -352,7 +318,6 @@ nfp_fl_set_tport(const struct tc_action *action, int idx, u32 off, struct nfp_fl_set_tport *set_tport, int opcode) { u32 exact, mask; - u16 tmp_set_op; if (off) return -EOPNOTSUPP; @@ -367,10 +332,8 @@ nfp_fl_set_tport(const struct tc_action *action, int idx, u32 off, set_tport->tp_port_mask); set_tport->reserved = cpu_to_be16(0); - tmp_set_op = FIELD_PREP(NFP_FL_ACT_LEN_LW, - sizeof(*set_tport) >> NFP_FL_LW_SIZ); - tmp_set_op |= FIELD_PREP(NFP_FL_ACT_JMP_ID, opcode); - set_tport->a_op = cpu_to_be16(tmp_set_op); + set_tport->head.jump_id = opcode; + set_tport->head.len_lw = sizeof(*set_tport) >> NFP_FL_LW_SIZ; return 0; } @@ -428,15 +391,15 @@ nfp_fl_pedit(const struct tc_action *action, char *nfp_action, int *a_len) return err; } - if (set_eth.a_op) { + if (set_eth.head.len_lw) { act_size = sizeof(set_eth); memcpy(nfp_action, &set_eth, act_size); *a_len += act_size; - } else if (set_ip_addr.a_op) { + } else if (set_ip_addr.head.len_lw) { act_size = sizeof(set_ip_addr); memcpy(nfp_action, &set_ip_addr, act_size); *a_len += act_size; - } else if (set_ip6_dst.a_op && set_ip6_src.a_op) { + } else if (set_ip6_dst.head.len_lw && set_ip6_src.head.len_lw) { /* TC compiles set src and dst IPv6 address as a single action, * the hardware requires this to be 2 separate actions. */ @@ -448,15 +411,15 @@ nfp_fl_pedit(const struct tc_action *action, char *nfp_action, int *a_len) memcpy(&nfp_action[sizeof(set_ip6_src)], &set_ip6_dst, act_size); *a_len += act_size; - } else if (set_ip6_dst.a_op) { + } else if (set_ip6_dst.head.len_lw) { act_size = sizeof(set_ip6_dst); memcpy(nfp_action, &set_ip6_dst, act_size); *a_len += act_size; - } else if (set_ip6_src.a_op) { + } else if (set_ip6_src.head.len_lw) { act_size = sizeof(set_ip6_src); memcpy(nfp_action, &set_ip6_src, act_size); *a_len += act_size; - } else if (set_tport.a_op) { + } else if (set_tport.head.len_lw) { act_size = sizeof(set_tport); memcpy(nfp_action, &set_tport, act_size); *a_len += act_size; diff --git a/drivers/net/ethernet/netronome/nfp/flower/cmsg.h b/drivers/net/ethernet/netronome/nfp/flower/cmsg.h index f7b7242a22bc..64e87f8e7089 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/cmsg.h +++ b/drivers/net/ethernet/netronome/nfp/flower/cmsg.h @@ -86,9 +86,6 @@ #define NFP_FL_ACTION_OPCODE_PRE_TUNNEL 17 #define NFP_FL_ACTION_OPCODE_NUM 32 -#define NFP_FL_ACT_JMP_ID GENMASK(15, 8) -#define NFP_FL_ACT_LEN_LW GENMASK(7, 0) - #define NFP_FL_OUT_FLAGS_LAST BIT(15) #define NFP_FL_OUT_FLAGS_USE_TUN BIT(4) #define NFP_FL_OUT_FLAGS_TYPE_IDX GENMASK(2, 0) @@ -113,15 +110,20 @@ enum nfp_flower_tun_type { NFP_FL_TUNNEL_VXLAN = 2, }; +struct nfp_fl_act_head { + u8 jump_id; + u8 len_lw; +}; + struct nfp_fl_set_eth { - __be16 a_op; + struct nfp_fl_act_head head; __be16 reserved; u8 eth_addr_mask[ETH_ALEN * 2]; u8 eth_addr_val[ETH_ALEN * 2]; }; struct nfp_fl_set_ip4_addrs { - __be16 a_op; + struct nfp_fl_act_head head; __be16 reserved; __be32 ipv4_src_mask; __be32 ipv4_src; @@ -130,7 +132,7 @@ struct nfp_fl_set_ip4_addrs { }; struct nfp_fl_set_ipv6_addr { - __be16 a_op; + struct nfp_fl_act_head head; __be16 reserved; struct { __be32 mask; @@ -139,27 +141,27 @@ struct nfp_fl_set_ipv6_addr { }; struct nfp_fl_set_tport { - __be16 a_op; + struct nfp_fl_act_head head; __be16 reserved; u8 tp_port_mask[4]; u8 tp_port_val[4]; }; struct nfp_fl_output { - __be16 a_op; + struct nfp_fl_act_head head; __be16 flags; __be32 port; }; struct nfp_fl_push_vlan { - __be16 a_op; + struct nfp_fl_act_head head; __be16 reserved; __be16 vlan_tpid; __be16 vlan_tci; }; struct nfp_fl_pop_vlan { - __be16 a_op; + struct nfp_fl_act_head head; __be16 reserved; }; @@ -178,7 +180,7 @@ struct nfp_flower_meta_one { }; struct nfp_fl_pre_tunnel { - __be16 a_op; + struct nfp_fl_act_head head; __be16 reserved; __be32 ipv4_dst; /* reserved for use with IPv6 addresses */ @@ -186,7 +188,7 @@ struct nfp_fl_pre_tunnel { }; struct nfp_fl_set_vxlan { - __be16 a_op; + struct nfp_fl_act_head head; __be16 reserved; __be64 tun_id; __be32 tun_type_index; -- cgit v1.2.3 From e69cd9d75ee797a46e1d2703226f0478d05bca10 Mon Sep 17 00:00:00 2001 From: Ido Schimmel Date: Sun, 22 Oct 2017 23:11:43 +0200 Subject: mlxsw: spectrum_dpipe: Add adjacency group size The adjacency group size is part of the match on the adjacency group and should therefore be exposed using dpipe. When non-equal-cost multi-path support will be introduced, the group's size will help users understand the exact number of adjacency entries each nexthop occupies, as a nexthop will no longer correspond to a single entry. The output for a multi-path route with two nexthops, one with weight 255 and the second 1 will be: Example: $ devlink dpipe table dump pci/0000:01:00.0 name mlxsw_adj pci/0000:01:00.0: index 0 match_value: type field_exact header mlxsw_meta field adj_index value 65536 type field_exact header mlxsw_meta field adj_size value 512 type field_exact header mlxsw_meta field adj_hash_index value 0 action_value: type field_modify header ethernet field destination mac value e4:1d:2d:a5:f3:64 type field_modify header mlxsw_meta field erif_port mapping ifindex mapping_value 3 value 1 index 1 match_value: type field_exact header mlxsw_meta field adj_index value 65536 type field_exact header mlxsw_meta field adj_size value 512 type field_exact header mlxsw_meta field adj_hash_index value 510 action_value: type field_modify header ethernet field destination mac value e4:1d:2d:a5:f3:65 type field_modify header mlxsw_meta field erif_port mapping ifindex mapping_value 4 value 2 Thus, the first nexthop occupies 510 adjacency entries and the second 2, which leads to a ratio of 255 to 1. Signed-off-by: Ido Schimmel Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- .../net/ethernet/mellanox/mlxsw/spectrum_dpipe.c | 46 +++++++++++++++++++--- .../net/ethernet/mellanox/mlxsw/spectrum_router.c | 3 +- .../net/ethernet/mellanox/mlxsw/spectrum_router.h | 2 +- 3 files changed, 44 insertions(+), 7 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c index a056f23d3a0e..6ea6435279c0 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c @@ -44,6 +44,7 @@ enum mlxsw_sp_field_metadata_id { MLXSW_SP_DPIPE_FIELD_METADATA_L3_FORWARD, MLXSW_SP_DPIPE_FIELD_METADATA_L3_DROP, MLXSW_SP_DPIPE_FIELD_METADATA_ADJ_INDEX, + MLXSW_SP_DPIPE_FIELD_METADATA_ADJ_SIZE, MLXSW_SP_DPIPE_FIELD_METADATA_ADJ_HASH_INDEX, }; @@ -69,6 +70,11 @@ static struct devlink_dpipe_field mlxsw_sp_dpipe_fields_metadata[] = { .id = MLXSW_SP_DPIPE_FIELD_METADATA_ADJ_INDEX, .bitwidth = 32, }, + { + .name = "adj_size", + .id = MLXSW_SP_DPIPE_FIELD_METADATA_ADJ_SIZE, + .bitwidth = 32, + }, { .name = "adj_hash_index", .id = MLXSW_SP_DPIPE_FIELD_METADATA_ADJ_HASH_INDEX, @@ -851,6 +857,14 @@ static int mlxsw_sp_dpipe_table_adj_matches_dump(void *priv, match.header = &mlxsw_sp_dpipe_header_metadata; match.field_id = MLXSW_SP_DPIPE_FIELD_METADATA_ADJ_INDEX; + err = devlink_dpipe_match_put(skb, &match); + if (err) + return err; + + match.type = DEVLINK_DPIPE_MATCH_TYPE_FIELD_EXACT; + match.header = &mlxsw_sp_dpipe_header_metadata; + match.field_id = MLXSW_SP_DPIPE_FIELD_METADATA_ADJ_SIZE; + err = devlink_dpipe_match_put(skb, &match); if (err) return err; @@ -897,6 +911,7 @@ static u64 mlxsw_sp_dpipe_table_adj_size(struct mlxsw_sp *mlxsw_sp) enum mlxsw_sp_dpipe_table_adj_match { MLXSW_SP_DPIPE_TABLE_ADJ_MATCH_INDEX, + MLXSW_SP_DPIPE_TABLE_ADJ_MATCH_SIZE, MLXSW_SP_DPIPE_TABLE_ADJ_MATCH_HASH_INDEX, MLXSW_SP_DPIPE_TABLE_ADJ_MATCH_COUNT, }; @@ -919,6 +934,11 @@ mlxsw_sp_dpipe_table_adj_match_action_prepare(struct devlink_dpipe_match *matche match->header = &mlxsw_sp_dpipe_header_metadata; match->field_id = MLXSW_SP_DPIPE_FIELD_METADATA_ADJ_INDEX; + match = &matches[MLXSW_SP_DPIPE_TABLE_ADJ_MATCH_SIZE]; + match->type = DEVLINK_DPIPE_MATCH_TYPE_FIELD_EXACT; + match->header = &mlxsw_sp_dpipe_header_metadata; + match->field_id = MLXSW_SP_DPIPE_FIELD_METADATA_ADJ_SIZE; + match = &matches[MLXSW_SP_DPIPE_TABLE_ADJ_MATCH_HASH_INDEX]; match->type = DEVLINK_DPIPE_MATCH_TYPE_FIELD_EXACT; match->header = &mlxsw_sp_dpipe_header_metadata; @@ -955,6 +975,15 @@ mlxsw_sp_dpipe_table_adj_entry_prepare(struct devlink_dpipe_entry *entry, match = &matches[MLXSW_SP_DPIPE_TABLE_ADJ_MATCH_INDEX]; match_value = &match_values[MLXSW_SP_DPIPE_TABLE_ADJ_MATCH_INDEX]; + match_value->match = match; + match_value->value_size = sizeof(u32); + match_value->value = kmalloc(match_value->value_size, GFP_KERNEL); + if (!match_value->value) + return -ENOMEM; + + match = &matches[MLXSW_SP_DPIPE_TABLE_ADJ_MATCH_SIZE]; + match_value = &match_values[MLXSW_SP_DPIPE_TABLE_ADJ_MATCH_SIZE]; + match_value->match = match; match_value->value_size = sizeof(u32); match_value->value = kmalloc(match_value->value_size, GFP_KERNEL); @@ -993,8 +1022,8 @@ mlxsw_sp_dpipe_table_adj_entry_prepare(struct devlink_dpipe_entry *entry, static void __mlxsw_sp_dpipe_table_adj_entry_fill(struct devlink_dpipe_entry *entry, - u32 adj_index, u32 adj_hash_index, - unsigned char *ha, + u32 adj_index, u32 adj_size, + u32 adj_hash_index, unsigned char *ha, struct mlxsw_sp_rif *rif) { struct devlink_dpipe_value *value; @@ -1005,6 +1034,10 @@ __mlxsw_sp_dpipe_table_adj_entry_fill(struct devlink_dpipe_entry *entry, p_index = value->value; *p_index = adj_index; + value = &entry->match_values[MLXSW_SP_DPIPE_TABLE_ADJ_MATCH_SIZE]; + p_index = value->value; + *p_index = adj_size; + value = &entry->match_values[MLXSW_SP_DPIPE_TABLE_ADJ_MATCH_HASH_INDEX]; p_index = value->value; *p_index = adj_hash_index; @@ -1027,10 +1060,11 @@ static void mlxsw_sp_dpipe_table_adj_entry_fill(struct mlxsw_sp *mlxsw_sp, unsigned char *ha = mlxsw_sp_nexthop_ha(nh); u32 adj_hash_index = 0; u32 adj_index = 0; + u32 adj_size = 0; int err; - mlxsw_sp_nexthop_indexes(nh, &adj_index, &adj_hash_index); - __mlxsw_sp_dpipe_table_adj_entry_fill(entry, adj_index, + mlxsw_sp_nexthop_indexes(nh, &adj_index, &adj_size, &adj_hash_index); + __mlxsw_sp_dpipe_table_adj_entry_fill(entry, adj_index, adj_size, adj_hash_index, ha, rif); err = mlxsw_sp_nexthop_counter_get(mlxsw_sp, nh, &entry->counter); if (!err) @@ -1138,13 +1172,15 @@ static int mlxsw_sp_dpipe_table_adj_counters_update(void *priv, bool enable) struct mlxsw_sp_nexthop *nh; u32 adj_hash_index = 0; u32 adj_index = 0; + u32 adj_size = 0; mlxsw_sp_nexthop_for_each(nh, mlxsw_sp->router) { if (!mlxsw_sp_nexthop_offload(nh) || mlxsw_sp_nexthop_group_has_ipip(nh)) continue; - mlxsw_sp_nexthop_indexes(nh, &adj_index, &adj_hash_index); + mlxsw_sp_nexthop_indexes(nh, &adj_index, &adj_size, + &adj_hash_index); if (enable) mlxsw_sp_nexthop_counter_alloc(mlxsw_sp, nh); else diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c index 5f2d100e3718..cb0d25ede9c6 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c @@ -2299,7 +2299,7 @@ unsigned char *mlxsw_sp_nexthop_ha(struct mlxsw_sp_nexthop *nh) } int mlxsw_sp_nexthop_indexes(struct mlxsw_sp_nexthop *nh, u32 *p_adj_index, - u32 *p_adj_hash_index) + u32 *p_adj_size, u32 *p_adj_hash_index) { struct mlxsw_sp_nexthop_group *nh_grp = nh->nh_grp; u32 adj_hash_index = 0; @@ -2309,6 +2309,7 @@ int mlxsw_sp_nexthop_indexes(struct mlxsw_sp_nexthop *nh, u32 *p_adj_index, return -EINVAL; *p_adj_index = nh_grp->adj_index; + *p_adj_size = nh_grp->ecmp_size; for (i = 0; i < nh_grp->count; i++) { struct mlxsw_sp_nexthop *nh_iter = &nh_grp->nexthops[i]; diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h index 3f2d840cb285..39e5811ed263 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h @@ -115,7 +115,7 @@ struct mlxsw_sp_nexthop *mlxsw_sp_nexthop_next(struct mlxsw_sp_router *router, bool mlxsw_sp_nexthop_offload(struct mlxsw_sp_nexthop *nh); unsigned char *mlxsw_sp_nexthop_ha(struct mlxsw_sp_nexthop *nh); int mlxsw_sp_nexthop_indexes(struct mlxsw_sp_nexthop *nh, u32 *p_adj_index, - u32 *p_adj_hash_index); + u32 *p_adj_size, u32 *p_adj_hash_index); struct mlxsw_sp_rif *mlxsw_sp_nexthop_rif(struct mlxsw_sp_nexthop *nh); bool mlxsw_sp_nexthop_group_has_ipip(struct mlxsw_sp_nexthop *nh); #define mlxsw_sp_nexthop_for_each(nh, router) \ -- cgit v1.2.3 From a875a2ee2db8970dd93b8d287e35b8eba72f0a89 Mon Sep 17 00:00:00 2001 From: Ido Schimmel Date: Sun, 22 Oct 2017 23:11:44 +0200 Subject: mlxsw: spectrum: Better represent KVDL partitions The KVD linear (KVDL) allocator currently consists of a very large bitmap that reflects the KVDL's usage. The boundaries of each partition as well as their allocation size are represented using defines. This representation requires us to patch all the functions that act on a partition whenever the partitioning scheme is changed. In addition, it does not enable the dynamic configuration of the KVDL using the up-coming resource manager. Add objects to represent these partitions as well as the accompanying code that acts on them to perform allocations and de-allocations. In the following patches, this will allow us to easily add another partition as well as new operations to act on these partitions. Signed-off-by: Ido Schimmel Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/spectrum.c | 11 +- drivers/net/ethernet/mellanox/mlxsw/spectrum.h | 7 +- .../net/ethernet/mellanox/mlxsw/spectrum_kvdl.c | 257 ++++++++++++++++++--- 3 files changed, 238 insertions(+), 37 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c index 4d73a6f7759e..1bc3fc356084 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c @@ -3726,10 +3726,16 @@ static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core, return err; } + err = mlxsw_sp_kvdl_init(mlxsw_sp); + if (err) { + dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize KVDL\n"); + return err; + } + err = mlxsw_sp_fids_init(mlxsw_sp); if (err) { dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize FIDs\n"); - return err; + goto err_fids_init; } err = mlxsw_sp_traps_init(mlxsw_sp); @@ -3834,6 +3840,8 @@ err_buffers_init: mlxsw_sp_traps_fini(mlxsw_sp); err_traps_init: mlxsw_sp_fids_fini(mlxsw_sp); +err_fids_init: + mlxsw_sp_kvdl_fini(mlxsw_sp); return err; } @@ -3854,6 +3862,7 @@ static void mlxsw_sp_fini(struct mlxsw_core *mlxsw_core) mlxsw_sp_buffers_fini(mlxsw_sp); mlxsw_sp_traps_fini(mlxsw_sp); mlxsw_sp_fids_fini(mlxsw_sp); + mlxsw_sp_kvdl_fini(mlxsw_sp); } static const struct mlxsw_config_profile mlxsw_sp_config_profile = { diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h index 2a2472a09d8c..035c753585a0 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h @@ -143,6 +143,7 @@ struct mlxsw_sp_mr; struct mlxsw_sp_acl; struct mlxsw_sp_counter_pool; struct mlxsw_sp_fid_core; +struct mlxsw_sp_kvdl; struct mlxsw_sp { struct mlxsw_sp_port **ports; @@ -158,9 +159,7 @@ struct mlxsw_sp { struct mlxsw_afa *afa; struct mlxsw_sp_acl *acl; struct mlxsw_sp_fid_core *fid_core; - struct { - DECLARE_BITMAP(usage, MLXSW_SP_KVD_LINEAR_SIZE); - } kvdl; + struct mlxsw_sp_kvdl *kvdl; struct notifier_block netdevice_nb; struct mlxsw_sp_counter_pool *counter_pool; @@ -411,6 +410,8 @@ mlxsw_sp_port_vlan_router_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan); void mlxsw_sp_rif_destroy(struct mlxsw_sp_rif *rif); /* spectrum_kvdl.c */ +int mlxsw_sp_kvdl_init(struct mlxsw_sp *mlxsw_sp); +void mlxsw_sp_kvdl_fini(struct mlxsw_sp *mlxsw_sp); int mlxsw_sp_kvdl_alloc(struct mlxsw_sp *mlxsw_sp, unsigned int entry_count, u32 *p_entry_index); void mlxsw_sp_kvdl_free(struct mlxsw_sp *mlxsw_sp, int entry_index); diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_kvdl.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_kvdl.c index 26c26cd30c3d..512537561483 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_kvdl.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_kvdl.c @@ -39,55 +39,246 @@ #define MLXSW_SP_KVDL_SINGLE_BASE 0 #define MLXSW_SP_KVDL_SINGLE_SIZE 16384 +#define MLXSW_SP_KVDL_SINGLE_END \ + (MLXSW_SP_KVDL_SINGLE_SIZE + MLXSW_SP_KVDL_SINGLE_BASE - 1) #define MLXSW_SP_KVDL_CHUNKS_BASE \ (MLXSW_SP_KVDL_SINGLE_BASE + MLXSW_SP_KVDL_SINGLE_SIZE) #define MLXSW_SP_KVDL_CHUNKS_SIZE \ (MLXSW_SP_KVD_LINEAR_SIZE - MLXSW_SP_KVDL_CHUNKS_BASE) +#define MLXSW_SP_KVDL_CHUNKS_END \ + (MLXSW_SP_KVDL_CHUNKS_SIZE + MLXSW_SP_KVDL_CHUNKS_BASE - 1) #define MLXSW_SP_CHUNK_MAX 32 +struct mlxsw_sp_kvdl_part_info { + unsigned int part_index; + unsigned int start_index; + unsigned int end_index; + unsigned int alloc_size; +}; + +struct mlxsw_sp_kvdl_part { + struct list_head list; + const struct mlxsw_sp_kvdl_part_info *info; + unsigned long usage[0]; /* Entries */ +}; + +struct mlxsw_sp_kvdl { + struct list_head parts_list; +}; + +static struct mlxsw_sp_kvdl_part * +mlxsw_sp_kvdl_alloc_size_part(struct mlxsw_sp_kvdl *kvdl, + unsigned int alloc_size) +{ + struct mlxsw_sp_kvdl_part *part, *min_part = NULL; + + list_for_each_entry(part, &kvdl->parts_list, list) { + if (alloc_size <= part->info->alloc_size && + (!min_part || + part->info->alloc_size <= min_part->info->alloc_size)) + min_part = part; + } + + return min_part ?: ERR_PTR(-ENOBUFS); +} + +static struct mlxsw_sp_kvdl_part * +mlxsw_sp_kvdl_index_part(struct mlxsw_sp_kvdl *kvdl, u32 kvdl_index) +{ + struct mlxsw_sp_kvdl_part *part; + + list_for_each_entry(part, &kvdl->parts_list, list) { + if (kvdl_index >= part->info->start_index && + kvdl_index <= part->info->end_index) + return part; + } + + return ERR_PTR(-EINVAL); +} + +static u32 +mlxsw_sp_entry_index_kvdl_index(const struct mlxsw_sp_kvdl_part_info *info, + unsigned int entry_index) +{ + return info->start_index + entry_index * info->alloc_size; +} + +static unsigned int +mlxsw_sp_kvdl_index_entry_index(const struct mlxsw_sp_kvdl_part_info *info, + u32 kvdl_index) +{ + return (kvdl_index - info->start_index) / info->alloc_size; +} + +static int mlxsw_sp_kvdl_part_alloc(struct mlxsw_sp_kvdl_part *part, + u32 *p_kvdl_index) +{ + const struct mlxsw_sp_kvdl_part_info *info = part->info; + unsigned int entry_index, nr_entries; + + nr_entries = (info->end_index - info->start_index + 1) / + info->alloc_size; + entry_index = find_first_zero_bit(part->usage, nr_entries); + if (entry_index == nr_entries) + return -ENOBUFS; + __set_bit(entry_index, part->usage); + + *p_kvdl_index = mlxsw_sp_entry_index_kvdl_index(part->info, + entry_index); + + return 0; +} + +static void mlxsw_sp_kvdl_part_free(struct mlxsw_sp_kvdl_part *part, + u32 kvdl_index) +{ + unsigned int entry_index; + + entry_index = mlxsw_sp_kvdl_index_entry_index(part->info, + kvdl_index); + __clear_bit(entry_index, part->usage); +} + int mlxsw_sp_kvdl_alloc(struct mlxsw_sp *mlxsw_sp, unsigned int entry_count, u32 *p_entry_index) { - int entry_index; - int size; - int type_base; - int type_size; - int type_entries; - - if (entry_count == 0 || entry_count > MLXSW_SP_CHUNK_MAX) { - return -EINVAL; - } else if (entry_count == 1) { - type_base = MLXSW_SP_KVDL_SINGLE_BASE; - type_size = MLXSW_SP_KVDL_SINGLE_SIZE; - type_entries = 1; - } else { - type_base = MLXSW_SP_KVDL_CHUNKS_BASE; - type_size = MLXSW_SP_KVDL_CHUNKS_SIZE; - type_entries = MLXSW_SP_CHUNK_MAX; + struct mlxsw_sp_kvdl_part *part; + + /* Find partition with smallest allocation size satisfying the + * requested size. + */ + part = mlxsw_sp_kvdl_alloc_size_part(mlxsw_sp->kvdl, entry_count); + if (IS_ERR(part)) + return PTR_ERR(part); + + return mlxsw_sp_kvdl_part_alloc(part, p_entry_index); +} + +void mlxsw_sp_kvdl_free(struct mlxsw_sp *mlxsw_sp, int entry_index) +{ + struct mlxsw_sp_kvdl_part *part; + + part = mlxsw_sp_kvdl_index_part(mlxsw_sp->kvdl, entry_index); + if (IS_ERR(part)) + return; + mlxsw_sp_kvdl_part_free(part, entry_index); +} + +static const struct mlxsw_sp_kvdl_part_info kvdl_parts_info[] = { + { + .part_index = 0, + .start_index = MLXSW_SP_KVDL_SINGLE_BASE, + .end_index = MLXSW_SP_KVDL_SINGLE_END, + .alloc_size = 1, + }, + { + .part_index = 1, + .start_index = MLXSW_SP_KVDL_CHUNKS_BASE, + .end_index = MLXSW_SP_KVDL_CHUNKS_END, + .alloc_size = MLXSW_SP_CHUNK_MAX, + }, +}; + +static struct mlxsw_sp_kvdl_part * +mlxsw_sp_kvdl_part_find(struct mlxsw_sp *mlxsw_sp, unsigned int part_index) +{ + struct mlxsw_sp_kvdl_part *part; + + list_for_each_entry(part, &mlxsw_sp->kvdl->parts_list, list) { + if (part->info->part_index == part_index) + return part; } - entry_index = type_base; - size = type_base + type_size; - for_each_clear_bit_from(entry_index, mlxsw_sp->kvdl.usage, size) { - int i; + return NULL; +} + +static int mlxsw_sp_kvdl_part_init(struct mlxsw_sp *mlxsw_sp, + unsigned int part_index) +{ + const struct mlxsw_sp_kvdl_part_info *info; + struct mlxsw_sp_kvdl_part *part; + unsigned int nr_entries; + size_t usage_size; + + info = &kvdl_parts_info[part_index]; + + nr_entries = (info->end_index - info->start_index + 1) / + info->alloc_size; + usage_size = BITS_TO_LONGS(nr_entries) * sizeof(unsigned long); + part = kzalloc(sizeof(*part) + usage_size, GFP_KERNEL); + if (!part) + return -ENOMEM; + + part->info = info; + list_add(&part->list, &mlxsw_sp->kvdl->parts_list); + + return 0; +} + +static void mlxsw_sp_kvdl_part_fini(struct mlxsw_sp *mlxsw_sp, + unsigned int part_index) +{ + struct mlxsw_sp_kvdl_part *part; + + part = mlxsw_sp_kvdl_part_find(mlxsw_sp, part_index); + if (!part) + return; - for (i = 0; i < type_entries; i++) - set_bit(entry_index + i, mlxsw_sp->kvdl.usage); - *p_entry_index = entry_index; - return 0; + list_del(&part->list); + kfree(part); +} + +static int mlxsw_sp_kvdl_parts_init(struct mlxsw_sp *mlxsw_sp) +{ + int err, i; + + INIT_LIST_HEAD(&mlxsw_sp->kvdl->parts_list); + + for (i = 0; i < ARRAY_SIZE(kvdl_parts_info); i++) { + err = mlxsw_sp_kvdl_part_init(mlxsw_sp, i); + if (err) + goto err_kvdl_part_init; } - return -ENOBUFS; + + return 0; + +err_kvdl_part_init: + for (i--; i >= 0; i--) + mlxsw_sp_kvdl_part_fini(mlxsw_sp, i); + return err; } -void mlxsw_sp_kvdl_free(struct mlxsw_sp *mlxsw_sp, int entry_index) +static void mlxsw_sp_kvdl_parts_fini(struct mlxsw_sp *mlxsw_sp) { - int type_entries; int i; - if (entry_index < MLXSW_SP_KVDL_CHUNKS_BASE) - type_entries = 1; - else - type_entries = MLXSW_SP_CHUNK_MAX; - for (i = 0; i < type_entries; i++) - clear_bit(entry_index + i, mlxsw_sp->kvdl.usage); + for (i = ARRAY_SIZE(kvdl_parts_info) - 1; i >= 0; i--) + mlxsw_sp_kvdl_part_fini(mlxsw_sp, i); +} + +int mlxsw_sp_kvdl_init(struct mlxsw_sp *mlxsw_sp) +{ + struct mlxsw_sp_kvdl *kvdl; + int err; + + kvdl = kzalloc(sizeof(*mlxsw_sp->kvdl), GFP_KERNEL); + if (!kvdl) + return -ENOMEM; + mlxsw_sp->kvdl = kvdl; + + err = mlxsw_sp_kvdl_parts_init(mlxsw_sp); + if (err) + goto err_kvdl_parts_init; + + return 0; + +err_kvdl_parts_init: + kfree(mlxsw_sp->kvdl); + return err; +} + +void mlxsw_sp_kvdl_fini(struct mlxsw_sp *mlxsw_sp) +{ + mlxsw_sp_kvdl_parts_fini(mlxsw_sp); + kfree(mlxsw_sp->kvdl); } -- cgit v1.2.3 From d672aec45fd4a1e060109fbce6739ef91c3bd135 Mon Sep 17 00:00:00 2001 From: Ido Schimmel Date: Sun, 22 Oct 2017 23:11:45 +0200 Subject: mlxsw: spectrum: Add ability to query KVDL allocation size The current KVDL allocation API allows the user to specify the requested number of entries, but the user has no way of knowing how many entries were actually allocated. This works because existing users (e.g., router) request the exact number they end up using. With the introduction of large adjacency groups, this will change, as the router will have the ability to choose from several allocation sizes, where larger allocations provide higher accuracy with respect to requested weights and better resilience against nexthop failures. One option is to have the router try several allocations of descending size until one succeeds, but a better way is to simply allow it to query the actual allocation size and then size its request accordingly. Signed-off-by: Ido Schimmel Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/spectrum.h | 3 +++ drivers/net/ethernet/mellanox/mlxsw/spectrum_kvdl.c | 15 +++++++++++++++ 2 files changed, 18 insertions(+) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h index 035c753585a0..78ff20d86db1 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h @@ -415,6 +415,9 @@ void mlxsw_sp_kvdl_fini(struct mlxsw_sp *mlxsw_sp); int mlxsw_sp_kvdl_alloc(struct mlxsw_sp *mlxsw_sp, unsigned int entry_count, u32 *p_entry_index); void mlxsw_sp_kvdl_free(struct mlxsw_sp *mlxsw_sp, int entry_index); +int mlxsw_sp_kvdl_alloc_size_query(struct mlxsw_sp *mlxsw_sp, + unsigned int entry_count, + unsigned int *p_alloc_size); struct mlxsw_sp_acl_rule_info { unsigned int priority; diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_kvdl.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_kvdl.c index 512537561483..266b3af6513c 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_kvdl.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_kvdl.c @@ -164,6 +164,21 @@ void mlxsw_sp_kvdl_free(struct mlxsw_sp *mlxsw_sp, int entry_index) mlxsw_sp_kvdl_part_free(part, entry_index); } +int mlxsw_sp_kvdl_alloc_size_query(struct mlxsw_sp *mlxsw_sp, + unsigned int entry_count, + unsigned int *p_alloc_size) +{ + struct mlxsw_sp_kvdl_part *part; + + part = mlxsw_sp_kvdl_alloc_size_part(mlxsw_sp->kvdl, entry_count); + if (IS_ERR(part)) + return PTR_ERR(part); + + *p_alloc_size = part->info->alloc_size; + + return 0; +} + static const struct mlxsw_sp_kvdl_part_info kvdl_parts_info[] = { { .part_index = 0, -- cgit v1.2.3 From 408bd946bfee69ec99937bd0f9ed9dcd2d19705b Mon Sep 17 00:00:00 2001 From: Ido Schimmel Date: Sun, 22 Oct 2017 23:11:46 +0200 Subject: mlxsw: spectrum_router: Store weight in nexthop struct As the first step towards non-equal-cost multi-path support, store each nexthop's weight. For IPv6 nexthops always set the weight to 1, as it only supports ECMP. Signed-off-by: Ido Schimmel Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c | 7 +++++++ 1 file changed, 7 insertions(+) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c index cb0d25ede9c6..8cd422d7640c 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c @@ -2203,6 +2203,7 @@ struct mlxsw_sp_nexthop { struct mlxsw_sp_nexthop_key key; unsigned char gw_addr[sizeof(struct in6_addr)]; int ifindex; + int nh_weight; struct mlxsw_sp_rif *rif; u8 should_offload:1, /* set indicates this neigh is connected and * should be put to KVD linear area of this group. @@ -3045,6 +3046,11 @@ static int mlxsw_sp_nexthop4_init(struct mlxsw_sp *mlxsw_sp, nh->nh_grp = nh_grp; nh->key.fib_nh = fib_nh; +#ifdef CONFIG_IP_ROUTE_MULTIPATH + nh->nh_weight = fib_nh->nh_weight; +#else + nh->nh_weight = 1; +#endif memcpy(&nh->gw_addr, &fib_nh->nh_gw, sizeof(fib_nh->nh_gw)); err = mlxsw_sp_nexthop_insert(mlxsw_sp, nh); if (err) @@ -4304,6 +4310,7 @@ static int mlxsw_sp_nexthop6_init(struct mlxsw_sp *mlxsw_sp, struct net_device *dev = rt->dst.dev; nh->nh_grp = nh_grp; + nh->nh_weight = 1; memcpy(&nh->gw_addr, &rt->rt6i_gateway, sizeof(nh->gw_addr)); mlxsw_sp_nexthop_counter_alloc(mlxsw_sp, nh); -- cgit v1.2.3 From 425a08c67317acee103b3ad58f57c762e8834faf Mon Sep 17 00:00:00 2001 From: Ido Schimmel Date: Sun, 22 Oct 2017 23:11:47 +0200 Subject: mlxsw: spectrum_router: Prepare for large adjacency groups The device has certain restrictions regarding the size of an adjacency group. Have the router determine the size of the adjacency group according to available KVDL allocation sizes and these restrictions. This was not needed until now since only allocations of up 32 entries were supported and these are all valid sizes for an adjacency group. Signed-off-by: Ido Schimmel Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- .../net/ethernet/mellanox/mlxsw/spectrum_router.c | 58 ++++++++++++++++++++++ 1 file changed, 58 insertions(+) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c index 8cd422d7640c..68ce5492aa0f 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c @@ -2708,6 +2708,59 @@ mlxsw_sp_nexthop_fib_entries_refresh(struct mlxsw_sp_nexthop_group *nh_grp) } } +static void mlxsw_sp_adj_grp_size_round_up(u16 *p_adj_grp_size) +{ + /* Valid sizes for an adjacency group are: + * 1-64, 512, 1024, 2048 and 4096. + */ + if (*p_adj_grp_size <= 64) + return; + else if (*p_adj_grp_size <= 512) + *p_adj_grp_size = 512; + else if (*p_adj_grp_size <= 1024) + *p_adj_grp_size = 1024; + else if (*p_adj_grp_size <= 2048) + *p_adj_grp_size = 2048; + else + *p_adj_grp_size = 4096; +} + +static void mlxsw_sp_adj_grp_size_round_down(u16 *p_adj_grp_size, + unsigned int alloc_size) +{ + if (alloc_size >= 4096) + *p_adj_grp_size = 4096; + else if (alloc_size >= 2048) + *p_adj_grp_size = 2048; + else if (alloc_size >= 1024) + *p_adj_grp_size = 1024; + else if (alloc_size >= 512) + *p_adj_grp_size = 512; +} + +static int mlxsw_sp_fix_adj_grp_size(struct mlxsw_sp *mlxsw_sp, + u16 *p_adj_grp_size) +{ + unsigned int alloc_size; + int err; + + /* Round up the requested group size to the next size supported + * by the device and make sure the request can be satisfied. + */ + mlxsw_sp_adj_grp_size_round_up(p_adj_grp_size); + err = mlxsw_sp_kvdl_alloc_size_query(mlxsw_sp, *p_adj_grp_size, + &alloc_size); + if (err) + return err; + /* It is possible the allocation results in more allocated + * entries than requested. Try to use as much of them as + * possible. + */ + mlxsw_sp_adj_grp_size_round_down(p_adj_grp_size, alloc_size); + + return 0; +} + static void mlxsw_sp_nexthop_group_refresh(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_nexthop_group *nh_grp) @@ -2755,6 +2808,11 @@ mlxsw_sp_nexthop_group_refresh(struct mlxsw_sp *mlxsw_sp, */ goto set_trap; + err = mlxsw_sp_fix_adj_grp_size(mlxsw_sp, &ecmp_size); + if (err) + /* No valid allocation size available. */ + goto set_trap; + err = mlxsw_sp_kvdl_alloc(mlxsw_sp, ecmp_size, &adj_index); if (err) { /* We ran out of KVD linear space, just set the -- cgit v1.2.3 From eb789980d0aa6cd2ebee3eb07792800bbe134bc0 Mon Sep 17 00:00:00 2001 From: Ido Schimmel Date: Sun, 22 Oct 2017 23:11:48 +0200 Subject: mlxsw: spectrum_router: Populate adjacency entries according to weights Up until now the driver assumed all the nexthops have an equal weight and wrote each to a single adjacency entry. This patch takes the `weight` parameter into account and populates the adjacency group according to the relative weight of each nexthop. Specifically, the weights of all the nexthops that should be offloaded are first normalized and then used to calculate the upper adjacency index of each nexthop. This is done according to the hash-threshold algorithm used by the kernel for IPv4 multi-path routing. Adjacency groups are currently limited to 32 entries which limits the weights that can be used, but follow-up patches will introduce groups of 512 entries. Signed-off-by: Ido Schimmel Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- .../net/ethernet/mellanox/mlxsw/spectrum_router.c | 111 ++++++++++++++++++--- 1 file changed, 99 insertions(+), 12 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c index 68ce5492aa0f..27b632cac991 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c @@ -46,6 +46,7 @@ #include #include #include +#include #include #include #include @@ -2204,6 +2205,8 @@ struct mlxsw_sp_nexthop { unsigned char gw_addr[sizeof(struct in6_addr)]; int ifindex; int nh_weight; + int norm_nh_weight; + int num_adj_entries; struct mlxsw_sp_rif *rif; u8 should_offload:1, /* set indicates this neigh is connected and * should be put to KVD linear area of this group. @@ -2233,6 +2236,7 @@ struct mlxsw_sp_nexthop_group { u32 adj_index; u16 ecmp_size; u16 count; + int sum_norm_weight; struct mlxsw_sp_nexthop nexthops[0]; #define nh_rif nexthops[0].rif }; @@ -2318,7 +2322,7 @@ int mlxsw_sp_nexthop_indexes(struct mlxsw_sp_nexthop *nh, u32 *p_adj_index, if (nh_iter == nh) break; if (nh_iter->offloaded) - adj_hash_index++; + adj_hash_index += nh_iter->num_adj_entries; } *p_adj_hash_index = adj_hash_index; @@ -2601,8 +2605,8 @@ static int mlxsw_sp_adj_index_mass_update(struct mlxsw_sp *mlxsw_sp, return 0; } -int mlxsw_sp_nexthop_update(struct mlxsw_sp *mlxsw_sp, u32 adj_index, - struct mlxsw_sp_nexthop *nh) +static int __mlxsw_sp_nexthop_update(struct mlxsw_sp *mlxsw_sp, u32 adj_index, + struct mlxsw_sp_nexthop *nh) { struct mlxsw_sp_neigh_entry *neigh_entry = nh->neigh_entry; char ratr_pl[MLXSW_REG_RATR_LEN]; @@ -2619,9 +2623,25 @@ int mlxsw_sp_nexthop_update(struct mlxsw_sp *mlxsw_sp, u32 adj_index, return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ratr), ratr_pl); } -static int mlxsw_sp_nexthop_ipip_update(struct mlxsw_sp *mlxsw_sp, - u32 adj_index, - struct mlxsw_sp_nexthop *nh) +int mlxsw_sp_nexthop_update(struct mlxsw_sp *mlxsw_sp, u32 adj_index, + struct mlxsw_sp_nexthop *nh) +{ + int i; + + for (i = 0; i < nh->num_adj_entries; i++) { + int err; + + err = __mlxsw_sp_nexthop_update(mlxsw_sp, adj_index + i, nh); + if (err) + return err; + } + + return 0; +} + +static int __mlxsw_sp_nexthop_ipip_update(struct mlxsw_sp *mlxsw_sp, + u32 adj_index, + struct mlxsw_sp_nexthop *nh) { const struct mlxsw_sp_ipip_ops *ipip_ops; @@ -2629,6 +2649,24 @@ static int mlxsw_sp_nexthop_ipip_update(struct mlxsw_sp *mlxsw_sp, return ipip_ops->nexthop_update(mlxsw_sp, adj_index, nh->ipip_entry); } +static int mlxsw_sp_nexthop_ipip_update(struct mlxsw_sp *mlxsw_sp, + u32 adj_index, + struct mlxsw_sp_nexthop *nh) +{ + int i; + + for (i = 0; i < nh->num_adj_entries; i++) { + int err; + + err = __mlxsw_sp_nexthop_ipip_update(mlxsw_sp, adj_index + i, + nh); + if (err) + return err; + } + + return 0; +} + static int mlxsw_sp_nexthop_group_update(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_nexthop_group *nh_grp, @@ -2663,7 +2701,7 @@ mlxsw_sp_nexthop_group_update(struct mlxsw_sp *mlxsw_sp, nh->update = 0; nh->offloaded = 1; } - adj_index++; + adj_index += nh->num_adj_entries; } return 0; } @@ -2761,17 +2799,65 @@ static int mlxsw_sp_fix_adj_grp_size(struct mlxsw_sp *mlxsw_sp, return 0; } +static void +mlxsw_sp_nexthop_group_normalize(struct mlxsw_sp_nexthop_group *nh_grp) +{ + int i, g = 0, sum_norm_weight = 0; + struct mlxsw_sp_nexthop *nh; + + for (i = 0; i < nh_grp->count; i++) { + nh = &nh_grp->nexthops[i]; + + if (!nh->should_offload) + continue; + if (g > 0) + g = gcd(nh->nh_weight, g); + else + g = nh->nh_weight; + } + + for (i = 0; i < nh_grp->count; i++) { + nh = &nh_grp->nexthops[i]; + + if (!nh->should_offload) + continue; + nh->norm_nh_weight = nh->nh_weight / g; + sum_norm_weight += nh->norm_nh_weight; + } + + nh_grp->sum_norm_weight = sum_norm_weight; +} + +static void +mlxsw_sp_nexthop_group_rebalance(struct mlxsw_sp_nexthop_group *nh_grp) +{ + int total = nh_grp->sum_norm_weight; + u16 ecmp_size = nh_grp->ecmp_size; + int i, weight = 0, lower_bound = 0; + + for (i = 0; i < nh_grp->count; i++) { + struct mlxsw_sp_nexthop *nh = &nh_grp->nexthops[i]; + int upper_bound; + + if (!nh->should_offload) + continue; + weight += nh->norm_nh_weight; + upper_bound = DIV_ROUND_CLOSEST(ecmp_size * weight, total); + nh->num_adj_entries = upper_bound - lower_bound; + lower_bound = upper_bound; + } +} + static void mlxsw_sp_nexthop_group_refresh(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_nexthop_group *nh_grp) { + u16 ecmp_size, old_ecmp_size; struct mlxsw_sp_nexthop *nh; bool offload_change = false; u32 adj_index; - u16 ecmp_size = 0; bool old_adj_index_valid; u32 old_adj_index; - u16 old_ecmp_size; int i; int err; @@ -2788,8 +2874,6 @@ mlxsw_sp_nexthop_group_refresh(struct mlxsw_sp *mlxsw_sp, if (nh->should_offload) nh->update = 1; } - if (nh->should_offload) - ecmp_size++; } if (!offload_change) { /* Nothing was added or removed, so no need to reallocate. Just @@ -2802,12 +2886,14 @@ mlxsw_sp_nexthop_group_refresh(struct mlxsw_sp *mlxsw_sp, } return; } - if (!ecmp_size) + mlxsw_sp_nexthop_group_normalize(nh_grp); + if (!nh_grp->sum_norm_weight) /* No neigh of this group is connected so we just set * the trap and let everthing flow through kernel. */ goto set_trap; + ecmp_size = nh_grp->sum_norm_weight; err = mlxsw_sp_fix_adj_grp_size(mlxsw_sp, &ecmp_size); if (err) /* No valid allocation size available. */ @@ -2827,6 +2913,7 @@ mlxsw_sp_nexthop_group_refresh(struct mlxsw_sp *mlxsw_sp, nh_grp->adj_index_valid = 1; nh_grp->adj_index = adj_index; nh_grp->ecmp_size = ecmp_size; + mlxsw_sp_nexthop_group_rebalance(nh_grp); err = mlxsw_sp_nexthop_group_update(mlxsw_sp, nh_grp, true); if (err) { dev_warn(mlxsw_sp->bus_info->dev, "Failed to update neigh MAC in adjacency table.\n"); -- cgit v1.2.3 From f11fbaf8b5a83608523b88cf62682914cf521546 Mon Sep 17 00:00:00 2001 From: Ido Schimmel Date: Sun, 22 Oct 2017 23:11:49 +0200 Subject: mlxsw: spectrum: Increase number of linear entries The memory region where adjacency entries (nexthops) are stored is called the KVD linear and is configured during initialization with a size of 64K. Extend this area with 32K more entries, that will be partitioned into 64 groups of 0.5K entries, thereby allowing us to support weighted nexthops with high accuracy. Change the ratio between both types of hash entries, so as to prevent reduction in the number of double hash entries, which are used for IPv6 neighbours and routes with a prefix length greater than 64. Note that the user will be able to control all these sizes once the devlink resource manager is introduced. Signed-off-by: Ido Schimmel Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/spectrum.c | 4 ++-- drivers/net/ethernet/mellanox/mlxsw/spectrum.h | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c index 1bc3fc356084..12b6ac487d8d 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c @@ -3885,8 +3885,8 @@ static const struct mlxsw_config_profile mlxsw_sp_config_profile = { .max_pkey = 0, .used_kvd_split_data = 1, .kvd_hash_granularity = MLXSW_SP_KVD_GRANULARITY, - .kvd_hash_single_parts = 2, - .kvd_hash_double_parts = 1, + .kvd_hash_single_parts = 59, + .kvd_hash_double_parts = 41, .kvd_linear_size = MLXSW_SP_KVD_LINEAR_SIZE, .swid_config = { { diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h index 78ff20d86db1..dc1b739c3ae1 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h @@ -62,7 +62,7 @@ #define MLXSW_SP_PORT_BASE_SPEED 25000 /* Mb/s */ -#define MLXSW_SP_KVD_LINEAR_SIZE 65536 /* entries */ +#define MLXSW_SP_KVD_LINEAR_SIZE 98304 /* entries */ #define MLXSW_SP_KVD_GRANULARITY 128 struct mlxsw_sp_port; -- cgit v1.2.3 From 330e2cc65d5f2c0545230e00f8f50b35a3e5995b Mon Sep 17 00:00:00 2001 From: Ido Schimmel Date: Sun, 22 Oct 2017 23:11:50 +0200 Subject: mlxsw: spectrum: Add another partition to KVD linear The KVD linear is currently partitioned into two partitions. One for single entries and another for groups of 32 entries. Add another partition consisting of groups of 512 entries which will allow us to more accurately represent the nexthop weights in non-equal cost multi-path routing. Signed-off-by: Ido Schimmel Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/spectrum_kvdl.c | 19 +++++++++++++++++-- 1 file changed, 17 insertions(+), 2 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_kvdl.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_kvdl.c index 266b3af6513c..310c38247b5c 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_kvdl.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_kvdl.c @@ -41,13 +41,22 @@ #define MLXSW_SP_KVDL_SINGLE_SIZE 16384 #define MLXSW_SP_KVDL_SINGLE_END \ (MLXSW_SP_KVDL_SINGLE_SIZE + MLXSW_SP_KVDL_SINGLE_BASE - 1) + #define MLXSW_SP_KVDL_CHUNKS_BASE \ (MLXSW_SP_KVDL_SINGLE_BASE + MLXSW_SP_KVDL_SINGLE_SIZE) -#define MLXSW_SP_KVDL_CHUNKS_SIZE \ - (MLXSW_SP_KVD_LINEAR_SIZE - MLXSW_SP_KVDL_CHUNKS_BASE) +#define MLXSW_SP_KVDL_CHUNKS_SIZE 49152 #define MLXSW_SP_KVDL_CHUNKS_END \ (MLXSW_SP_KVDL_CHUNKS_SIZE + MLXSW_SP_KVDL_CHUNKS_BASE - 1) + +#define MLXSW_SP_KVDL_LARGE_CHUNKS_BASE \ + (MLXSW_SP_KVDL_CHUNKS_BASE + MLXSW_SP_KVDL_CHUNKS_SIZE) +#define MLXSW_SP_KVDL_LARGE_CHUNKS_SIZE \ + (MLXSW_SP_KVD_LINEAR_SIZE - MLXSW_SP_KVDL_LARGE_CHUNKS_BASE) +#define MLXSW_SP_KVDL_LARGE_CHUNKS_END \ + (MLXSW_SP_KVDL_LARGE_CHUNKS_SIZE + MLXSW_SP_KVDL_LARGE_CHUNKS_BASE - 1) + #define MLXSW_SP_CHUNK_MAX 32 +#define MLXSW_SP_LARGE_CHUNK_MAX 512 struct mlxsw_sp_kvdl_part_info { unsigned int part_index; @@ -192,6 +201,12 @@ static const struct mlxsw_sp_kvdl_part_info kvdl_parts_info[] = { .end_index = MLXSW_SP_KVDL_CHUNKS_END, .alloc_size = MLXSW_SP_CHUNK_MAX, }, + { + .part_index = 2, + .start_index = MLXSW_SP_KVDL_LARGE_CHUNKS_BASE, + .end_index = MLXSW_SP_KVDL_LARGE_CHUNKS_END, + .alloc_size = MLXSW_SP_LARGE_CHUNK_MAX, + }, }; static struct mlxsw_sp_kvdl_part * -- cgit v1.2.3 From e83b171568e6a69cff5eb592907b71e480b535ac Mon Sep 17 00:00:00 2001 From: Florian Fainelli Date: Fri, 20 Oct 2017 15:59:30 -0700 Subject: net: systemport: Guard against unmapped TX ring Because SYSTEMPORT is a (semi) normal network device, the stack may attempt to queue packets on it oustide of the DSA slave transmit path. When that happens, the DSA layer has not had a chance to tag packets with the appropriate per-port and per-queue information, and if that happens and we don't have a port 0 queue 0 available (e.g: on boards where this does not exist), we will hit a NULL pointer de-reference in bcm_sysport_select_queue(). Guard against such cases by testing for the TX ring validity. Fixes: 84ff33eeb23d ("net: systemport: Establish DSA network device queue mapping") Signed-off-by: Florian Fainelli Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bcmsysport.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c index dafc26690555..1d9d5f986e14 100644 --- a/drivers/net/ethernet/broadcom/bcmsysport.c +++ b/drivers/net/ethernet/broadcom/bcmsysport.c @@ -2040,6 +2040,9 @@ static u16 bcm_sysport_select_queue(struct net_device *dev, struct sk_buff *skb, port = BRCM_TAG_GET_PORT(queue); tx_ring = priv->ring_map[q + port * priv->per_port_num_tx_queues]; + if (unlikely(!tx_ring)) + return fallback(dev, skb); + return tx_ring->index; } -- cgit v1.2.3 From b9077428ec5569aacb2952d8a2ffb51c8988d3c2 Mon Sep 17 00:00:00 2001 From: Lipeng Date: Mon, 23 Oct 2017 19:51:01 +0800 Subject: net: hns3: fix a bug when alloc new buffer When alloce new buffer to HW, should unmap the old buffer first. This old code map the old buffer but not unmap the old buffer, this patch fixes it. Fixes: 76ad4f0 (net: hns3: Add support of HNS3 Ethernet Driver for hip08 SoC) Signed-off-by: Lipeng Signed-off-by: David S. Miller --- drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c index 8383d6726ae4..3ddcd47fa61c 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c @@ -1595,7 +1595,7 @@ out_buffer_fail: static void hns3_replace_buffer(struct hns3_enet_ring *ring, int i, struct hns3_desc_cb *res_cb) { - hns3_map_buffer(ring, &ring->desc_cb[i]); + hns3_unmap_buffer(ring, &ring->desc_cb[i]); ring->desc_cb[i] = *res_cb; ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma); } -- cgit v1.2.3 From 564883bb4dc1a4f3cba6344e77743175694b0761 Mon Sep 17 00:00:00 2001 From: Lipeng Date: Mon, 23 Oct 2017 19:51:02 +0800 Subject: net: hns3: fix the bug when map buffer fail If one buffer had been recieved to stack, driver will alloc a new buffer, map the buffer to device and replace the old buffer. When map fail, should only free the new alloced buffer, but not free all buffers in the ring. Fixes: 76ad4f0 (net: hns3: Add support of HNS3 Ethernet Driver for hip08 SoC) Signed-off-by: Lipeng Signed-off-by: David S. Miller --- drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c index 3ddcd47fa61c..58aa2dd6ace0 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c @@ -1555,7 +1555,7 @@ static int hns3_reserve_buffer_map(struct hns3_enet_ring *ring, return 0; out_with_buf: - hns3_free_buffers(ring); + hns3_free_buffer(ring, cb); out: return ret; } -- cgit v1.2.3 From 7410343eab04088225267949477d1c7b5f9598fc Mon Sep 17 00:00:00 2001 From: Lipeng Date: Mon, 23 Oct 2017 19:51:03 +0800 Subject: net: hns3: fix the ops check in hns3_get_rxnfc 1# patch: 07d2995 net: hns3: add support for ETHTOOL_GRXFH. 2# patch: 5668abd net: hns3: add support for set_ringparam. 1# patch adds ae_algo->ops->get_rss_tuple to hns3_get_rxnfc and 2# patch delete ae_algo->ops->get_tc_size from hns3_get_rxnfc.This patch fix the ops check in hns3_get_rxnfc. Signed-off-by: Lipeng Signed-off-by: David S. Miller --- drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_ethtool.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_ethtool.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_ethtool.c index 6c469e49a04f..5cd163bdbf14 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_ethtool.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_ethtool.c @@ -717,7 +717,7 @@ static int hns3_get_rxnfc(struct net_device *netdev, { struct hnae3_handle *h = hns3_get_handle(netdev); - if (!h->ae_algo || !h->ae_algo->ops || !h->ae_algo->ops->get_tc_size) + if (!h->ae_algo || !h->ae_algo->ops || !h->ae_algo->ops->get_rss_tuple) return -EOPNOTSUPP; switch (cmd->cmd) { -- cgit v1.2.3 From 709eb41ad8cd56ee68f9ca5140cfd46839d35837 Mon Sep 17 00:00:00 2001 From: Lipeng Date: Mon, 23 Oct 2017 19:51:04 +0800 Subject: net: hns3: get vf count by pci_sriov_get_totalvfs This patch gets vf count by standard function pci_sriov_get_totalvfs, instead of info from NIC HW. Signed-off-by: Lipeng Signed-off-by: David S. Miller --- drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c index 8508521c26e8..443124177f05 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c @@ -837,7 +837,6 @@ static int hclge_parse_func_status(struct hclge_dev *hdev, else hdev->flag &= ~HCLGE_FLAG_MAIN; - hdev->num_req_vfs = status->vf_num / status->pf_num; return 0; } @@ -4361,6 +4360,8 @@ static int hclge_pci_init(struct hclge_dev *hdev) goto err_clr_master; } + hdev->num_req_vfs = pci_sriov_get_totalvfs(pdev); + return 0; err_clr_master: pci_clear_master(pdev); -- cgit v1.2.3 From 66b447301ac710ee237dba8b653244018fbb6168 Mon Sep 17 00:00:00 2001 From: Lipeng Date: Mon, 23 Oct 2017 19:51:05 +0800 Subject: net: hns3: fix the TX/RX ring.queue_index in hns3_ring_get_cfg The interface hns3_ring_get_cfg only update TX ring queue_index, but do not update RX ring queue_index. This patch fixes it. Fixes: 76ad4f0 (net: hns3: Add support of HNS3 Ethernet Driver for hip08 SoC) Signed-off-by: Lipeng Signed-off-by: David S. Miller --- drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c index 58aa2dd6ace0..14de0f7581c8 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c @@ -2506,16 +2506,16 @@ static int hns3_ring_get_cfg(struct hnae3_queue *q, struct hns3_nic_priv *priv, if (ring_type == HNAE3_RING_TYPE_TX) { ring_data[q->tqp_index].ring = ring; + ring_data[q->tqp_index].queue_index = q->tqp_index; ring->io_base = (u8 __iomem *)q->io_base + HNS3_TX_REG_OFFSET; } else { ring_data[q->tqp_index + queue_num].ring = ring; + ring_data[q->tqp_index + queue_num].queue_index = q->tqp_index; ring->io_base = q->io_base; } hnae_set_bit(ring->flag, HNAE3_RING_TYPE_B, ring_type); - ring_data[q->tqp_index].queue_index = q->tqp_index; - ring->tqp = q; ring->desc = NULL; ring->desc_cb = NULL; -- cgit v1.2.3 From 51145dae2748233315a7a411cd97f4bedf8cc22f Mon Sep 17 00:00:00 2001 From: Lipeng Date: Mon, 23 Oct 2017 19:51:06 +0800 Subject: net: hns3: remove redundant memset when alloc buffer HW will use packet length to write packets to buffer or read packets from buffer. There is a redundant memset when alloc buffer, the memset have no sense and will increase time-consuming. This patch removes it. Fixes: 76ad4f0 (net: hns3: Add support of HNS3 Ethernet Driver for hip08 SoC) Signed-off-by: Lipeng Signed-off-by: David S. Miller --- drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c | 2 -- 1 file changed, 2 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c index 14de0f7581c8..06af3c86b60c 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c @@ -1444,8 +1444,6 @@ static int hns3_alloc_buffer(struct hns3_enet_ring *ring, cb->length = hnae_page_size(ring); cb->type = DESC_TYPE_PAGE; - memset(cb->buf, 0, cb->length); - return 0; } -- cgit v1.2.3 From 24e750c410ae046d1236af50014cbc697bb375d7 Mon Sep 17 00:00:00 2001 From: Lipeng Date: Mon, 23 Oct 2017 19:51:07 +0800 Subject: net: hns3: fix a bug about hns3_clean_tx_ring The return value of hns3_clean_tx_ring means tx ring clean result. Return true means clean complete and there is no more pakcet need clean. Retrun false means there is packets need clean and napi need poll again. The last return of hns3_clean_tx_ring is "return !!budget" as budget will decrease when clean a buffer. If there is no valid BD in TX ring, return 0 for hns3_clean_tx_ring will cause napi poll again and never complete the napi poll. This patch fixes the bug. Fixes: 76ad4f0 (net: hns3: Add support of HNS3 Ethernet Driver for hip08 SoC) Signed-off-by: Lipeng Signed-off-by: David S. Miller --- drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c | 6 +++--- drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.h | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c index 06af3c86b60c..537f6c3babb7 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c @@ -1629,7 +1629,7 @@ static int is_valid_clean_head(struct hns3_enet_ring *ring, int h) return u > c ? (h > c && h <= u) : (h > c || h <= u); } -int hns3_clean_tx_ring(struct hns3_enet_ring *ring, int budget) +bool hns3_clean_tx_ring(struct hns3_enet_ring *ring, int budget) { struct net_device *netdev = ring->tqp->handle->kinfo.netdev; struct netdev_queue *dev_queue; @@ -1640,7 +1640,7 @@ int hns3_clean_tx_ring(struct hns3_enet_ring *ring, int budget) rmb(); /* Make sure head is ready before touch any data */ if (is_ring_empty(ring) || head == ring->next_to_clean) - return 0; /* no data to poll */ + return true; /* no data to poll */ if (!is_valid_clean_head(ring, head)) { netdev_err(netdev, "wrong head (%d, %d-%d)\n", head, @@ -1649,7 +1649,7 @@ int hns3_clean_tx_ring(struct hns3_enet_ring *ring, int budget) u64_stats_update_begin(&ring->syncp); ring->stats.io_err_cnt++; u64_stats_update_end(&ring->syncp); - return -EIO; + return true; } bytes = 0; diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.h index 6228b2603d93..58dc30bf893c 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.h @@ -594,7 +594,7 @@ static inline void hns3_write_reg(void __iomem *base, u32 reg, u32 value) void hns3_ethtool_set_ops(struct net_device *netdev); -int hns3_clean_tx_ring(struct hns3_enet_ring *ring, int budget); +bool hns3_clean_tx_ring(struct hns3_enet_ring *ring, int budget); int hns3_init_all_ring(struct hns3_nic_priv *priv); int hns3_uninit_all_ring(struct hns3_nic_priv *priv); netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev); -- cgit v1.2.3 From ff42bb9fe3091d996c763848afa3e57c2a780217 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Mon, 23 Oct 2017 11:58:06 -0700 Subject: nfp: bpf: add helper for emitting nops The need to emitting a few nops will become more common soon as we add stack and map support. Add a helper. This allows for code to be shorter but also may be handy for marking the nops with a "reason" to ease applying optimizations. Signed-off-by: Jakub Kicinski Reviewed-by: Quentin Monnet Signed-off-by: David S. Miller --- drivers/net/ethernet/netronome/nfp/bpf/jit.c | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/netronome/nfp/bpf/jit.c b/drivers/net/ethernet/netronome/nfp/bpf/jit.c index 23fb11a41cc4..eb8c905936ac 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/jit.c +++ b/drivers/net/ethernet/netronome/nfp/bpf/jit.c @@ -494,6 +494,12 @@ static swreg re_load_imm_any(struct nfp_prog *nfp_prog, u32 imm, swreg tmp_reg) return tmp_reg; } +static void wrp_nops(struct nfp_prog *nfp_prog, unsigned int count) +{ + while (count--) + emit_nop(nfp_prog); +} + static void wrp_br_special(struct nfp_prog *nfp_prog, enum br_mask mask, enum br_special special) @@ -1799,7 +1805,7 @@ static void nfp_outro(struct nfp_prog *nfp_prog) static int nfp_translate(struct nfp_prog *nfp_prog) { struct nfp_insn_meta *meta; - int i, err; + int err; nfp_intro(nfp_prog); if (nfp_prog->error) @@ -1831,8 +1837,7 @@ static int nfp_translate(struct nfp_prog *nfp_prog) if (nfp_prog->error) return nfp_prog->error; - for (i = 0; i < NFP_USTORE_PREFETCH_WINDOW; i++) - emit_nop(nfp_prog); + wrp_nops(nfp_prog, NFP_USTORE_PREFETCH_WINDOW); if (nfp_prog->error) return nfp_prog->error; -- cgit v1.2.3 From 70c78fc138b6d0ef76d9920034e25082dd3a36ac Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Mon, 23 Oct 2017 11:58:07 -0700 Subject: nfp: bpf: refactor nfp_bpf_check_ptr() nfp_bpf_check_ptr() mostly looks at the pointer register. Add a temporary variable to shorten the code. While at it make sure we print error messages if translation fails to help users identify the problem (to be carried in ext_ack in due course). Signed-off-by: Jakub Kicinski Reviewed-by: Quentin Monnet Signed-off-by: David S. Miller --- drivers/net/ethernet/netronome/nfp/bpf/verifier.c | 18 ++++++++++++------ 1 file changed, 12 insertions(+), 6 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/netronome/nfp/bpf/verifier.c b/drivers/net/ethernet/netronome/nfp/bpf/verifier.c index e361c0e3b788..4d2ed84a82e0 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/verifier.c +++ b/drivers/net/ethernet/netronome/nfp/bpf/verifier.c @@ -113,17 +113,23 @@ nfp_bpf_check_exit(struct nfp_prog *nfp_prog, static int nfp_bpf_check_ptr(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, - const struct bpf_verifier_env *env, u8 reg) + const struct bpf_verifier_env *env, u8 reg_no) { - if (env->cur_state.regs[reg].type != PTR_TO_CTX && - env->cur_state.regs[reg].type != PTR_TO_PACKET) + const struct bpf_reg_state *reg = &env->cur_state.regs[reg_no]; + + if (reg->type != PTR_TO_CTX && + reg->type != PTR_TO_PACKET) { + pr_info("unsupported ptr type: %d\n", reg->type); return -EINVAL; + } - if (meta->ptr.type != NOT_INIT && - meta->ptr.type != env->cur_state.regs[reg].type) + if (meta->ptr.type != NOT_INIT && meta->ptr.type != reg->type) { + pr_info("ptr type changed for instruction %d -> %d\n", + meta->ptr.type, reg->type); return -EINVAL; + } - meta->ptr = env->cur_state.regs[reg]; + meta->ptr = *reg; return 0; } -- cgit v1.2.3 From ee9133a845fe8ad15f989e29bf8e2c8abe7986b8 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Mon, 23 Oct 2017 11:58:08 -0700 Subject: nfp: bpf: add stack write support Stack is implemented by the LMEM register file. Unaligned accesses to LMEM are not allowed. Accesses also have to be 4B wide. To support stack we need to make sure offsets of pointers are known at translation time (for now) and perform correct load/mask/shift operations. Since we can access first 64B of LMEM without much effort support only stacks not bigger than 64B. Following commits will extend the possible sizes beyond that. Signed-off-by: Jakub Kicinski Reviewed-by: Quentin Monnet Signed-off-by: David S. Miller --- drivers/net/ethernet/netronome/nfp/bpf/jit.c | 105 ++++++++++++++++++++++ drivers/net/ethernet/netronome/nfp/bpf/main.h | 3 + drivers/net/ethernet/netronome/nfp/bpf/offload.c | 14 +++ drivers/net/ethernet/netronome/nfp/bpf/verifier.c | 30 +++++-- 4 files changed, 147 insertions(+), 5 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/netronome/nfp/bpf/jit.c b/drivers/net/ethernet/netronome/nfp/bpf/jit.c index eb8c905936ac..d2a3e9065dbe 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/jit.c +++ b/drivers/net/ethernet/netronome/nfp/bpf/jit.c @@ -642,6 +642,100 @@ data_st_host_order(struct nfp_prog *nfp_prog, u8 dst_gpr, swreg offset, return 0; } +typedef int +(*lmem_step)(struct nfp_prog *nfp_prog, u8 gpr, u8 gpr_byte, s32 off, + unsigned int size); + +static int +wrp_lmem_store(struct nfp_prog *nfp_prog, u8 src, u8 src_byte, s32 off, + unsigned int size) +{ + u32 idx, dst_byte; + enum shf_sc sc; + swreg reg; + int shf; + u8 mask; + + if (WARN_ON_ONCE(src_byte + size > 4 || off % 4 + size > 4)) + return -EOPNOTSUPP; + + idx = off / 4; + + /* Move the entire word */ + if (size == 4) { + wrp_mov(nfp_prog, reg_lm(0, idx), reg_b(src)); + return 0; + } + + dst_byte = off % 4; + + mask = (1 << size) - 1; + mask <<= dst_byte; + + if (WARN_ON_ONCE(mask > 0xf)) + return -EOPNOTSUPP; + + shf = abs(src_byte - dst_byte) * 8; + if (src_byte == dst_byte) { + sc = SHF_SC_NONE; + } else if (src_byte < dst_byte) { + shf = 32 - shf; + sc = SHF_SC_L_SHF; + } else { + sc = SHF_SC_R_SHF; + } + + /* ld_field can address fewer indexes, if offset too large do RMW. + * Because we RMV twice we waste 2 cycles on unaligned 8 byte writes. + */ + if (idx <= RE_REG_LM_IDX_MAX) { + reg = reg_lm(0, idx); + } else { + reg = imm_a(nfp_prog); + wrp_mov(nfp_prog, reg, reg_lm(0, idx)); + } + + emit_ld_field(nfp_prog, reg, mask, reg_b(src), sc, shf); + + if (idx > RE_REG_LM_IDX_MAX) + wrp_mov(nfp_prog, reg_lm(0, idx), reg); + + return 0; +} + +static int +mem_op_stack(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, + unsigned int size, u8 gpr, lmem_step step) +{ + s32 off = nfp_prog->stack_depth + meta->insn.off; + u32 gpr_byte = 0; + int ret; + + while (size) { + u32 slice_end; + u8 slice_size; + + slice_size = min(size, 4 - gpr_byte); + slice_end = min(off + slice_size, round_up(off + 1, 4)); + slice_size = slice_end - off; + + ret = step(nfp_prog, gpr, gpr_byte, off, slice_size); + if (ret) + return ret; + + gpr_byte += slice_size; + if (gpr_byte >= 4) { + gpr_byte -= 4; + gpr++; + } + + size -= slice_size; + off += slice_size; + } + + return 0; +} + static void wrp_alu_imm(struct nfp_prog *nfp_prog, u8 dst, enum alu_op alu_op, u32 imm) { @@ -1298,6 +1392,14 @@ mem_stx_data(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, meta->insn.src_reg * 2, size); } +static int +mem_stx_stack(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, + unsigned int size) +{ + return mem_op_stack(nfp_prog, meta, size, meta->insn.src_reg * 2, + wrp_lmem_store); +} + static int mem_stx(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, unsigned int size) @@ -1305,6 +1407,9 @@ mem_stx(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, if (meta->ptr.type == PTR_TO_PACKET) return mem_stx_data(nfp_prog, meta, size); + if (meta->ptr.type == PTR_TO_STACK) + return mem_stx_stack(nfp_prog, meta, size); + return -EOPNOTSUPP; } diff --git a/drivers/net/ethernet/netronome/nfp/bpf/main.h b/drivers/net/ethernet/netronome/nfp/bpf/main.h index d77e88a45409..a31632681e79 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/main.h +++ b/drivers/net/ethernet/netronome/nfp/bpf/main.h @@ -151,6 +151,7 @@ static inline u8 mbpf_mode(const struct nfp_insn_meta *meta) * @tgt_done: jump target to get the next packet * @n_translated: number of successfully translated instructions (for errors) * @error: error code if something went wrong + * @stack_depth: max stack depth from the verifier * @insns: list of BPF instruction wrappers (struct nfp_insn_meta) */ struct nfp_prog { @@ -171,6 +172,8 @@ struct nfp_prog { unsigned int n_translated; int error; + unsigned int stack_depth; + struct list_head insns; }; diff --git a/drivers/net/ethernet/netronome/nfp/bpf/offload.c b/drivers/net/ethernet/netronome/nfp/bpf/offload.c index a88bb5bc0082..f215abcbc18e 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/offload.c +++ b/drivers/net/ethernet/netronome/nfp/bpf/offload.c @@ -146,6 +146,7 @@ nfp_net_bpf_offload_prepare(struct nfp_net *nn, { unsigned int code_sz = max_instr * sizeof(u64); enum nfp_bpf_action_type act; + unsigned int stack_size; u16 start_off, done_off; unsigned int max_mtu; int ret; @@ -167,6 +168,19 @@ nfp_net_bpf_offload_prepare(struct nfp_net *nn, start_off = nn_readw(nn, NFP_NET_CFG_BPF_START); done_off = nn_readw(nn, NFP_NET_CFG_BPF_DONE); + if (cls_bpf->prog->aux->stack_depth > 64) { + nn_info(nn, "large stack not supported: program %dB > 64B\n", + cls_bpf->prog->aux->stack_depth); + return -EOPNOTSUPP; + } + + stack_size = nn_readb(nn, NFP_NET_CFG_BPF_STACK_SZ) * 64; + if (cls_bpf->prog->aux->stack_depth > stack_size) { + nn_info(nn, "stack too large: program %dB > FW stack %dB\n", + cls_bpf->prog->aux->stack_depth, stack_size); + return -EOPNOTSUPP; + } + *code = dma_zalloc_coherent(nn->dp.dev, code_sz, dma_addr, GFP_KERNEL); if (!*code) return -ENOMEM; diff --git a/drivers/net/ethernet/netronome/nfp/bpf/verifier.c b/drivers/net/ethernet/netronome/nfp/bpf/verifier.c index 4d2ed84a82e0..376d9938b823 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/verifier.c +++ b/drivers/net/ethernet/netronome/nfp/bpf/verifier.c @@ -111,18 +111,41 @@ nfp_bpf_check_exit(struct nfp_prog *nfp_prog, return 0; } +static int nfp_bpf_check_stack_access(const struct bpf_reg_state *reg) +{ + if (!tnum_is_const(reg->var_off)) { + pr_info("variable ptr stack access\n"); + return -EINVAL; + } + + if (reg->var_off.value || reg->off) { + pr_info("stack access via modified register\n"); + return -EINVAL; + } + + return 0; +} + static int nfp_bpf_check_ptr(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, const struct bpf_verifier_env *env, u8 reg_no) { const struct bpf_reg_state *reg = &env->cur_state.regs[reg_no]; + int err; if (reg->type != PTR_TO_CTX && + reg->type != PTR_TO_STACK && reg->type != PTR_TO_PACKET) { pr_info("unsupported ptr type: %d\n", reg->type); return -EINVAL; } + if (reg->type == PTR_TO_STACK) { + err = nfp_bpf_check_stack_access(reg); + if (err) + return err; + } + if (meta->ptr.type != NOT_INIT && meta->ptr.type != reg->type) { pr_info("ptr type changed for instruction %d -> %d\n", meta->ptr.type, reg->type); @@ -143,11 +166,6 @@ nfp_verify_insn(struct bpf_verifier_env *env, int insn_idx, int prev_insn_idx) meta = nfp_bpf_goto_meta(priv->prog, meta, insn_idx, env->prog->len); priv->meta = meta; - if (meta->insn.src_reg == BPF_REG_10 || - meta->insn.dst_reg == BPF_REG_10) { - pr_err("stack not yet supported\n"); - return -EINVAL; - } if (meta->insn.src_reg >= MAX_BPF_REG || meta->insn.dst_reg >= MAX_BPF_REG) { pr_err("program uses extended registers - jit hardening?\n"); @@ -176,6 +194,8 @@ int nfp_prog_verify(struct nfp_prog *nfp_prog, struct bpf_prog *prog) struct nfp_bpf_analyzer_priv *priv; int ret; + nfp_prog->stack_depth = prog->aux->stack_depth; + priv = kzalloc(sizeof(*priv), GFP_KERNEL); if (!priv) return -ENOMEM; -- cgit v1.2.3 From a82b23fb38eaaaad89332b90029fc4cd7c3f2545 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Mon, 23 Oct 2017 11:58:09 -0700 Subject: nfp: bpf: add stack read support Add simple stack read support, similar to write in every aspect, but data flowing the other way. Note that unlike write which can be done in smaller than word quantities, if registers are loaded with less-than-word of stack contents - the values have to be zero extended. Signed-off-by: Jakub Kicinski Reviewed-by: Quentin Monnet Signed-off-by: David S. Miller --- drivers/net/ethernet/netronome/nfp/bpf/jit.c | 81 ++++++++++++++++++++++++++-- 1 file changed, 76 insertions(+), 5 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/netronome/nfp/bpf/jit.c b/drivers/net/ethernet/netronome/nfp/bpf/jit.c index d2a3e9065dbe..094acea35326 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/jit.c +++ b/drivers/net/ethernet/netronome/nfp/bpf/jit.c @@ -644,11 +644,65 @@ data_st_host_order(struct nfp_prog *nfp_prog, u8 dst_gpr, swreg offset, typedef int (*lmem_step)(struct nfp_prog *nfp_prog, u8 gpr, u8 gpr_byte, s32 off, - unsigned int size); + unsigned int size, bool new_gpr); + +static int +wrp_lmem_load(struct nfp_prog *nfp_prog, u8 dst, u8 dst_byte, s32 off, + unsigned int size, bool new_gpr) +{ + u32 idx, src_byte; + enum shf_sc sc; + swreg reg; + int shf; + u8 mask; + + if (WARN_ON_ONCE(dst_byte + size > 4 || off % 4 + size > 4)) + return -EOPNOTSUPP; + + idx = off / 4; + + /* Move the entire word */ + if (size == 4) { + wrp_mov(nfp_prog, reg_both(dst), reg_lm(0, idx)); + return 0; + } + + src_byte = off % 4; + + mask = (1 << size) - 1; + mask <<= dst_byte; + + if (WARN_ON_ONCE(mask > 0xf)) + return -EOPNOTSUPP; + + shf = abs(src_byte - dst_byte) * 8; + if (src_byte == dst_byte) { + sc = SHF_SC_NONE; + } else if (src_byte < dst_byte) { + shf = 32 - shf; + sc = SHF_SC_L_SHF; + } else { + sc = SHF_SC_R_SHF; + } + + /* ld_field can address fewer indexes, if offset too large do RMW. + * Because we RMV twice we waste 2 cycles on unaligned 8 byte writes. + */ + if (idx <= RE_REG_LM_IDX_MAX) { + reg = reg_lm(0, idx); + } else { + reg = imm_a(nfp_prog); + wrp_mov(nfp_prog, reg, reg_lm(0, idx)); + } + + emit_ld_field_any(nfp_prog, reg_both(dst), mask, reg, sc, shf, new_gpr); + + return 0; +} static int wrp_lmem_store(struct nfp_prog *nfp_prog, u8 src, u8 src_byte, s32 off, - unsigned int size) + unsigned int size, bool new_gpr) { u32 idx, dst_byte; enum shf_sc sc; @@ -705,12 +759,16 @@ wrp_lmem_store(struct nfp_prog *nfp_prog, u8 src, u8 src_byte, s32 off, static int mem_op_stack(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, - unsigned int size, u8 gpr, lmem_step step) + unsigned int size, u8 gpr, bool clr_gpr, lmem_step step) { s32 off = nfp_prog->stack_depth + meta->insn.off; + u8 prev_gpr = 255; u32 gpr_byte = 0; int ret; + if (clr_gpr && size < 8) + wrp_immed(nfp_prog, reg_both(gpr + 1), 0); + while (size) { u32 slice_end; u8 slice_size; @@ -719,10 +777,12 @@ mem_op_stack(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, slice_end = min(off + slice_size, round_up(off + 1, 4)); slice_size = slice_end - off; - ret = step(nfp_prog, gpr, gpr_byte, off, slice_size); + ret = step(nfp_prog, gpr, gpr_byte, off, slice_size, + gpr != prev_gpr); if (ret) return ret; + prev_gpr = gpr; gpr_byte += slice_size; if (gpr_byte >= 4) { gpr_byte -= 4; @@ -1232,6 +1292,14 @@ static int data_ind_ld4(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) meta->insn.src_reg * 2, 4); } +static int +mem_ldx_stack(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, + unsigned int size) +{ + return mem_op_stack(nfp_prog, meta, size, meta->insn.dst_reg * 2, true, + wrp_lmem_load); +} + static int mem_ldx_skb(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, u8 size) { @@ -1315,6 +1383,9 @@ mem_ldx(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, if (meta->ptr.type == PTR_TO_PACKET) return mem_ldx_data(nfp_prog, meta, size); + if (meta->ptr.type == PTR_TO_STACK) + return mem_ldx_stack(nfp_prog, meta, size); + return -EOPNOTSUPP; } @@ -1396,7 +1467,7 @@ static int mem_stx_stack(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, unsigned int size) { - return mem_op_stack(nfp_prog, meta, size, meta->insn.src_reg * 2, + return mem_op_stack(nfp_prog, meta, size, meta->insn.src_reg * 2, false, wrp_lmem_store); } -- cgit v1.2.3 From 9a90c83c09874a2fd03905ef0f73512c9de18799 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Mon, 23 Oct 2017 11:58:10 -0700 Subject: nfp: bpf: optimize the RMW for stack accesses When we are performing unaligned stack accesses in the 32-64B window we have to do a read-modify-write cycle. E.g. for reading 8 bytes from address 17: 0: tmp = stack[16] 1: gprLo = tmp >> 8 2: tmp = stack[20] 3: gprLo |= tmp << 24 4: tmp = stack[20] 5: gprHi = tmp >> 8 6: tmp = stack[24] 7: gprHi |= tmp << 24 The load on line 4 is unnecessary, because tmp already contains data from stack[20]. For write we can optimize both loads and writebacks away. Signed-off-by: Jakub Kicinski Reviewed-by: Quentin Monnet Signed-off-by: David S. Miller --- drivers/net/ethernet/netronome/nfp/bpf/jit.c | 33 +++++++++++++++++++++------- 1 file changed, 25 insertions(+), 8 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/netronome/nfp/bpf/jit.c b/drivers/net/ethernet/netronome/nfp/bpf/jit.c index 094acea35326..6730690cf9d8 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/jit.c +++ b/drivers/net/ethernet/netronome/nfp/bpf/jit.c @@ -644,11 +644,11 @@ data_st_host_order(struct nfp_prog *nfp_prog, u8 dst_gpr, swreg offset, typedef int (*lmem_step)(struct nfp_prog *nfp_prog, u8 gpr, u8 gpr_byte, s32 off, - unsigned int size, bool new_gpr); + unsigned int size, bool first, bool new_gpr, bool last); static int wrp_lmem_load(struct nfp_prog *nfp_prog, u8 dst, u8 dst_byte, s32 off, - unsigned int size, bool new_gpr) + unsigned int size, bool first, bool new_gpr, bool last) { u32 idx, src_byte; enum shf_sc sc; @@ -692,7 +692,13 @@ wrp_lmem_load(struct nfp_prog *nfp_prog, u8 dst, u8 dst_byte, s32 off, reg = reg_lm(0, idx); } else { reg = imm_a(nfp_prog); - wrp_mov(nfp_prog, reg, reg_lm(0, idx)); + /* If it's not the first part of the load and we start a new GPR + * that means we are loading a second part of the LMEM word into + * a new GPR. IOW we've already looked that LMEM word and + * therefore it has been loaded into imm_a(). + */ + if (first || !new_gpr) + wrp_mov(nfp_prog, reg, reg_lm(0, idx)); } emit_ld_field_any(nfp_prog, reg_both(dst), mask, reg, sc, shf, new_gpr); @@ -702,7 +708,7 @@ wrp_lmem_load(struct nfp_prog *nfp_prog, u8 dst, u8 dst_byte, s32 off, static int wrp_lmem_store(struct nfp_prog *nfp_prog, u8 src, u8 src_byte, s32 off, - unsigned int size, bool new_gpr) + unsigned int size, bool first, bool new_gpr, bool last) { u32 idx, dst_byte; enum shf_sc sc; @@ -746,13 +752,19 @@ wrp_lmem_store(struct nfp_prog *nfp_prog, u8 src, u8 src_byte, s32 off, reg = reg_lm(0, idx); } else { reg = imm_a(nfp_prog); - wrp_mov(nfp_prog, reg, reg_lm(0, idx)); + /* Only first and last LMEM locations are going to need RMW, + * the middle location will be overwritten fully. + */ + if (first || last) + wrp_mov(nfp_prog, reg, reg_lm(0, idx)); } emit_ld_field(nfp_prog, reg, mask, reg_b(src), sc, shf); - if (idx > RE_REG_LM_IDX_MAX) - wrp_mov(nfp_prog, reg_lm(0, idx), reg); + if (new_gpr || last) { + if (idx > RE_REG_LM_IDX_MAX) + wrp_mov(nfp_prog, reg_lm(0, idx), reg); + } return 0; } @@ -762,6 +774,7 @@ mem_op_stack(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, unsigned int size, u8 gpr, bool clr_gpr, lmem_step step) { s32 off = nfp_prog->stack_depth + meta->insn.off; + bool first = true, last; u8 prev_gpr = 255; u32 gpr_byte = 0; int ret; @@ -777,12 +790,16 @@ mem_op_stack(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, slice_end = min(off + slice_size, round_up(off + 1, 4)); slice_size = slice_end - off; + last = slice_size == size; + ret = step(nfp_prog, gpr, gpr_byte, off, slice_size, - gpr != prev_gpr); + first, gpr != prev_gpr, last); if (ret) return ret; prev_gpr = gpr; + first = false; + gpr_byte += slice_size; if (gpr_byte >= 4) { gpr_byte -= 4; -- cgit v1.2.3 From d3488480635f453410fd27cea3fc370cedc7e28a Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Mon, 23 Oct 2017 11:58:11 -0700 Subject: nfp: bpf: allow stack accesses via modified stack registers As long as the verifier tells us the stack offset exactly we can render the LMEM reads quite easily. Simply make sure that the offset is constant for a given instruction and add it to the instruction's offset. Signed-off-by: Jakub Kicinski Reviewed-by: Quentin Monnet Signed-off-by: David S. Miller --- drivers/net/ethernet/netronome/nfp/bpf/jit.c | 23 ++++++++++++---------- drivers/net/ethernet/netronome/nfp/bpf/main.h | 3 +++ drivers/net/ethernet/netronome/nfp/bpf/verifier.c | 24 ++++++++++++++++------- 3 files changed, 33 insertions(+), 17 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/netronome/nfp/bpf/jit.c b/drivers/net/ethernet/netronome/nfp/bpf/jit.c index 6730690cf9d8..073e382cba04 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/jit.c +++ b/drivers/net/ethernet/netronome/nfp/bpf/jit.c @@ -771,9 +771,10 @@ wrp_lmem_store(struct nfp_prog *nfp_prog, u8 src, u8 src_byte, s32 off, static int mem_op_stack(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, - unsigned int size, u8 gpr, bool clr_gpr, lmem_step step) + unsigned int size, unsigned int ptr_off, u8 gpr, bool clr_gpr, + lmem_step step) { - s32 off = nfp_prog->stack_depth + meta->insn.off; + s32 off = nfp_prog->stack_depth + meta->insn.off + ptr_off; bool first = true, last; u8 prev_gpr = 255; u32 gpr_byte = 0; @@ -1311,10 +1312,10 @@ static int data_ind_ld4(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) static int mem_ldx_stack(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, - unsigned int size) + unsigned int size, unsigned int ptr_off) { - return mem_op_stack(nfp_prog, meta, size, meta->insn.dst_reg * 2, true, - wrp_lmem_load); + return mem_op_stack(nfp_prog, meta, size, ptr_off, + meta->insn.dst_reg * 2, true, wrp_lmem_load); } static int mem_ldx_skb(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, @@ -1401,7 +1402,8 @@ mem_ldx(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, return mem_ldx_data(nfp_prog, meta, size); if (meta->ptr.type == PTR_TO_STACK) - return mem_ldx_stack(nfp_prog, meta, size); + return mem_ldx_stack(nfp_prog, meta, size, + meta->ptr.off + meta->ptr.var_off.value); return -EOPNOTSUPP; } @@ -1482,10 +1484,10 @@ mem_stx_data(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, static int mem_stx_stack(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, - unsigned int size) + unsigned int size, unsigned int ptr_off) { - return mem_op_stack(nfp_prog, meta, size, meta->insn.src_reg * 2, false, - wrp_lmem_store); + return mem_op_stack(nfp_prog, meta, size, ptr_off, + meta->insn.src_reg * 2, false, wrp_lmem_store); } static int @@ -1496,7 +1498,8 @@ mem_stx(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, return mem_stx_data(nfp_prog, meta, size); if (meta->ptr.type == PTR_TO_STACK) - return mem_stx_stack(nfp_prog, meta, size); + return mem_stx_stack(nfp_prog, meta, size, + meta->ptr.off + meta->ptr.var_off.value); return -EOPNOTSUPP; } diff --git a/drivers/net/ethernet/netronome/nfp/bpf/main.h b/drivers/net/ethernet/netronome/nfp/bpf/main.h index a31632681e79..d4f144a62f0f 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/main.h +++ b/drivers/net/ethernet/netronome/nfp/bpf/main.h @@ -56,6 +56,7 @@ enum br_special { enum static_regs { STATIC_REG_IMM = 21, /* Bank AB */ + STATIC_REG_STACK = 22, /* Bank A */ STATIC_REG_PKT_LEN = 22, /* Bank B */ }; @@ -74,6 +75,8 @@ enum nfp_bpf_action_type { #define pv_len(np) reg_lm(1, PKT_VEC_PKT_LEN) #define pv_ctm_ptr(np) reg_lm(1, PKT_VEC_PKT_PTR) +#define stack_reg(np) reg_a(STATIC_REG_STACK) +#define stack_imm(np) imm_b(np) #define plen_reg(np) reg_b(STATIC_REG_PKT_LEN) #define pptr_reg(np) pv_ctm_ptr(np) #define imm_a(np) reg_a(STATIC_REG_IMM) diff --git a/drivers/net/ethernet/netronome/nfp/bpf/verifier.c b/drivers/net/ethernet/netronome/nfp/bpf/verifier.c index 376d9938b823..633db3e1a11e 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/verifier.c +++ b/drivers/net/ethernet/netronome/nfp/bpf/verifier.c @@ -111,19 +111,29 @@ nfp_bpf_check_exit(struct nfp_prog *nfp_prog, return 0; } -static int nfp_bpf_check_stack_access(const struct bpf_reg_state *reg) +static int +nfp_bpf_check_stack_access(struct nfp_insn_meta *meta, + const struct bpf_reg_state *reg) { + s32 old_off, new_off; + if (!tnum_is_const(reg->var_off)) { pr_info("variable ptr stack access\n"); return -EINVAL; } - if (reg->var_off.value || reg->off) { - pr_info("stack access via modified register\n"); - return -EINVAL; - } + if (meta->ptr.type == NOT_INIT) + return 0; - return 0; + old_off = meta->ptr.off + meta->ptr.var_off.value; + new_off = reg->off + reg->var_off.value; + + if (old_off == new_off) + return 0; + + pr_info("stack access changed location was:%d is:%d\n", + old_off, new_off); + return -EINVAL; } static int @@ -141,7 +151,7 @@ nfp_bpf_check_ptr(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, } if (reg->type == PTR_TO_STACK) { - err = nfp_bpf_check_stack_access(reg); + err = nfp_bpf_check_stack_access(meta, reg); if (err) return err; } -- cgit v1.2.3 From 2df03a50f14ab6d888c212aa332536933ded040a Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Mon, 23 Oct 2017 11:58:12 -0700 Subject: nfp: bpf: support accessing the stack beyond 64 bytes To access beyond 64th byte of the stack we need to set a new stack pointer register (LMEM is accessed indirectly through those pointers). Add a function for encoding local CSR access instruction. Use stack pointer number 3. Note that stack pointer registers allow us to index into 32 bytes of LMEM (with shift operations i.e. when operands are restricted). This means if access is crossing 32 byte boundary we must not use offsetting, we have to set the pointer to the exact address and move it with post-increments. We depend on the datapath placing the stack base address in GPR A22 for our use. Signed-off-by: Jakub Kicinski Reviewed-by: Quentin Monnet Signed-off-by: David S. Miller --- drivers/net/ethernet/netronome/nfp/bpf/jit.c | 114 +++++++++++++++++++++-- drivers/net/ethernet/netronome/nfp/bpf/offload.c | 6 -- drivers/net/ethernet/netronome/nfp/nfp_asm.h | 5 + 3 files changed, 111 insertions(+), 14 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/netronome/nfp/bpf/jit.c b/drivers/net/ethernet/netronome/nfp/bpf/jit.c index 073e382cba04..5105b9247839 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/jit.c +++ b/drivers/net/ethernet/netronome/nfp/bpf/jit.c @@ -427,6 +427,48 @@ emit_ld_field(struct nfp_prog *nfp_prog, swreg dst, u8 bmask, swreg src, emit_ld_field_any(nfp_prog, dst, bmask, src, sc, shift, false); } +static void +__emit_lcsr(struct nfp_prog *nfp_prog, u16 areg, u16 breg, bool wr, u16 addr, + bool dst_lmextn, bool src_lmextn) +{ + u64 insn; + + insn = OP_LCSR_BASE | + FIELD_PREP(OP_LCSR_A_SRC, areg) | + FIELD_PREP(OP_LCSR_B_SRC, breg) | + FIELD_PREP(OP_LCSR_WRITE, wr) | + FIELD_PREP(OP_LCSR_ADDR, addr) | + FIELD_PREP(OP_LCSR_SRC_LMEXTN, src_lmextn) | + FIELD_PREP(OP_LCSR_DST_LMEXTN, dst_lmextn); + + nfp_prog_push(nfp_prog, insn); +} + +static void emit_csr_wr(struct nfp_prog *nfp_prog, swreg src, u16 addr) +{ + struct nfp_insn_ur_regs reg; + int err; + + /* This instruction takes immeds instead of reg_none() for the ignored + * operand, but we can't encode 2 immeds in one instr with our normal + * swreg infra so if param is an immed, we encode as reg_none() and + * copy the immed to both operands. + */ + if (swreg_type(src) == NN_REG_IMM) { + err = swreg_to_unrestricted(reg_none(), src, reg_none(), ®); + reg.breg = reg.areg; + } else { + err = swreg_to_unrestricted(reg_none(), src, reg_imm(0), ®); + } + if (err) { + nfp_prog->error = err; + return; + } + + __emit_lcsr(nfp_prog, reg.areg, reg.breg, true, addr / 4, + false, reg.src_lmextn); +} + static void emit_nop(struct nfp_prog *nfp_prog) { __emit_immed(nfp_prog, UR_REG_IMM, UR_REG_IMM, 0, 0, 0, 0, 0, 0, 0); @@ -644,12 +686,15 @@ data_st_host_order(struct nfp_prog *nfp_prog, u8 dst_gpr, swreg offset, typedef int (*lmem_step)(struct nfp_prog *nfp_prog, u8 gpr, u8 gpr_byte, s32 off, - unsigned int size, bool first, bool new_gpr, bool last); + unsigned int size, bool first, bool new_gpr, bool last, bool lm3, + bool needs_inc); static int wrp_lmem_load(struct nfp_prog *nfp_prog, u8 dst, u8 dst_byte, s32 off, - unsigned int size, bool first, bool new_gpr, bool last) + unsigned int size, bool first, bool new_gpr, bool last, bool lm3, + bool needs_inc) { + bool should_inc = needs_inc && new_gpr && !last; u32 idx, src_byte; enum shf_sc sc; swreg reg; @@ -663,10 +708,14 @@ wrp_lmem_load(struct nfp_prog *nfp_prog, u8 dst, u8 dst_byte, s32 off, /* Move the entire word */ if (size == 4) { - wrp_mov(nfp_prog, reg_both(dst), reg_lm(0, idx)); + wrp_mov(nfp_prog, reg_both(dst), + should_inc ? reg_lm_inc(3) : reg_lm(lm3 ? 3 : 0, idx)); return 0; } + if (WARN_ON_ONCE(lm3 && idx > RE_REG_LM_IDX_MAX)) + return -EOPNOTSUPP; + src_byte = off % 4; mask = (1 << size) - 1; @@ -689,7 +738,7 @@ wrp_lmem_load(struct nfp_prog *nfp_prog, u8 dst, u8 dst_byte, s32 off, * Because we RMV twice we waste 2 cycles on unaligned 8 byte writes. */ if (idx <= RE_REG_LM_IDX_MAX) { - reg = reg_lm(0, idx); + reg = reg_lm(lm3 ? 3 : 0, idx); } else { reg = imm_a(nfp_prog); /* If it's not the first part of the load and we start a new GPR @@ -703,13 +752,18 @@ wrp_lmem_load(struct nfp_prog *nfp_prog, u8 dst, u8 dst_byte, s32 off, emit_ld_field_any(nfp_prog, reg_both(dst), mask, reg, sc, shf, new_gpr); + if (should_inc) + wrp_mov(nfp_prog, reg_none(), reg_lm_inc(3)); + return 0; } static int wrp_lmem_store(struct nfp_prog *nfp_prog, u8 src, u8 src_byte, s32 off, - unsigned int size, bool first, bool new_gpr, bool last) + unsigned int size, bool first, bool new_gpr, bool last, bool lm3, + bool needs_inc) { + bool should_inc = needs_inc && new_gpr && !last; u32 idx, dst_byte; enum shf_sc sc; swreg reg; @@ -723,10 +777,15 @@ wrp_lmem_store(struct nfp_prog *nfp_prog, u8 src, u8 src_byte, s32 off, /* Move the entire word */ if (size == 4) { - wrp_mov(nfp_prog, reg_lm(0, idx), reg_b(src)); + wrp_mov(nfp_prog, + should_inc ? reg_lm_inc(3) : reg_lm(lm3 ? 3 : 0, idx), + reg_b(src)); return 0; } + if (WARN_ON_ONCE(lm3 && idx > RE_REG_LM_IDX_MAX)) + return -EOPNOTSUPP; + dst_byte = off % 4; mask = (1 << size) - 1; @@ -749,7 +808,7 @@ wrp_lmem_store(struct nfp_prog *nfp_prog, u8 src, u8 src_byte, s32 off, * Because we RMV twice we waste 2 cycles on unaligned 8 byte writes. */ if (idx <= RE_REG_LM_IDX_MAX) { - reg = reg_lm(0, idx); + reg = reg_lm(lm3 ? 3 : 0, idx); } else { reg = imm_a(nfp_prog); /* Only first and last LMEM locations are going to need RMW, @@ -764,6 +823,8 @@ wrp_lmem_store(struct nfp_prog *nfp_prog, u8 src, u8 src_byte, s32 off, if (new_gpr || last) { if (idx > RE_REG_LM_IDX_MAX) wrp_mov(nfp_prog, reg_lm(0, idx), reg); + if (should_inc) + wrp_mov(nfp_prog, reg_none(), reg_lm_inc(3)); } return 0; @@ -776,10 +837,44 @@ mem_op_stack(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, { s32 off = nfp_prog->stack_depth + meta->insn.off + ptr_off; bool first = true, last; + bool needs_inc = false; + swreg stack_off_reg; u8 prev_gpr = 255; u32 gpr_byte = 0; + bool lm3 = true; int ret; + if (off + size <= 64) { + /* We can reach bottom 64B with LMaddr0 */ + lm3 = false; + } else if (round_down(off, 32) == round_down(off + size - 1, 32)) { + /* We have to set up a new pointer. If we know the offset + * and the entire access falls into a single 32 byte aligned + * window we won't have to increment the LM pointer. + * The 32 byte alignment is imporant because offset is ORed in + * not added when doing *l$indexN[off]. + */ + stack_off_reg = ur_load_imm_any(nfp_prog, round_down(off, 32), + stack_imm(nfp_prog)); + emit_alu(nfp_prog, imm_b(nfp_prog), + stack_reg(nfp_prog), ALU_OP_ADD, stack_off_reg); + + off %= 32; + } else { + stack_off_reg = ur_load_imm_any(nfp_prog, round_down(off, 4), + stack_imm(nfp_prog)); + + emit_alu(nfp_prog, imm_b(nfp_prog), + stack_reg(nfp_prog), ALU_OP_ADD, stack_off_reg); + + needs_inc = true; + } + if (lm3) { + emit_csr_wr(nfp_prog, imm_b(nfp_prog), NFP_CSR_ACT_LM_ADDR3); + /* For size < 4 one slot will be filled by zeroing of upper. */ + wrp_nops(nfp_prog, clr_gpr && size < 8 ? 2 : 3); + } + if (clr_gpr && size < 8) wrp_immed(nfp_prog, reg_both(gpr + 1), 0); @@ -793,8 +888,11 @@ mem_op_stack(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, last = slice_size == size; + if (needs_inc) + off %= 4; + ret = step(nfp_prog, gpr, gpr_byte, off, slice_size, - first, gpr != prev_gpr, last); + first, gpr != prev_gpr, last, lm3, needs_inc); if (ret) return ret; diff --git a/drivers/net/ethernet/netronome/nfp/bpf/offload.c b/drivers/net/ethernet/netronome/nfp/bpf/offload.c index f215abcbc18e..fbca1ca1f39b 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/offload.c +++ b/drivers/net/ethernet/netronome/nfp/bpf/offload.c @@ -168,12 +168,6 @@ nfp_net_bpf_offload_prepare(struct nfp_net *nn, start_off = nn_readw(nn, NFP_NET_CFG_BPF_START); done_off = nn_readw(nn, NFP_NET_CFG_BPF_DONE); - if (cls_bpf->prog->aux->stack_depth > 64) { - nn_info(nn, "large stack not supported: program %dB > 64B\n", - cls_bpf->prog->aux->stack_depth); - return -EOPNOTSUPP; - } - stack_size = nn_readb(nn, NFP_NET_CFG_BPF_STACK_SZ) * 64; if (cls_bpf->prog->aux->stack_depth > stack_size) { nn_info(nn, "stack too large: program %dB > FW stack %dB\n", diff --git a/drivers/net/ethernet/netronome/nfp/nfp_asm.h b/drivers/net/ethernet/netronome/nfp/nfp_asm.h index 86e7daee6099..f4d1df3a1925 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_asm.h +++ b/drivers/net/ethernet/netronome/nfp/nfp_asm.h @@ -257,6 +257,11 @@ enum lcsr_wr_src { #define OP_CARB_BASE 0x0e000000000ULL #define OP_CARB_OR 0x00000010000ULL +#define NFP_CSR_ACT_LM_ADDR0 0x64 +#define NFP_CSR_ACT_LM_ADDR1 0x6c +#define NFP_CSR_ACT_LM_ADDR2 0x94 +#define NFP_CSR_ACT_LM_ADDR3 0x9c + /* Software register representation, independent of operand type */ #define NN_REG_TYPE GENMASK(31, 24) #define NN_REG_LM_IDX GENMASK(23, 22) -- cgit v1.2.3 From b14157eeed4eff2b293e0ca7738f6a3dbfff51cc Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Mon, 23 Oct 2017 11:58:13 -0700 Subject: nfp: bpf: support stack accesses via non-constant pointers If stack pointer has a different value on different paths but the alignment to words (4B) remains the same, we can set a new LMEM access pointer to the calculated value and access whichever word it's pointing to. Signed-off-by: Jakub Kicinski Reviewed-by: Quentin Monnet Signed-off-by: David S. Miller --- drivers/net/ethernet/netronome/nfp/bpf/jit.c | 41 +++++++++++++++++++---- drivers/net/ethernet/netronome/nfp/bpf/main.h | 2 ++ drivers/net/ethernet/netronome/nfp/bpf/verifier.c | 12 +++++-- 3 files changed, 45 insertions(+), 10 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/netronome/nfp/bpf/jit.c b/drivers/net/ethernet/netronome/nfp/bpf/jit.c index 5105b9247839..d84f00b80aac 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/jit.c +++ b/drivers/net/ethernet/netronome/nfp/bpf/jit.c @@ -832,8 +832,8 @@ wrp_lmem_store(struct nfp_prog *nfp_prog, u8 src, u8 src_byte, s32 off, static int mem_op_stack(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, - unsigned int size, unsigned int ptr_off, u8 gpr, bool clr_gpr, - lmem_step step) + unsigned int size, unsigned int ptr_off, u8 gpr, u8 ptr_gpr, + bool clr_gpr, lmem_step step) { s32 off = nfp_prog->stack_depth + meta->insn.off + ptr_off; bool first = true, last; @@ -844,7 +844,19 @@ mem_op_stack(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, bool lm3 = true; int ret; - if (off + size <= 64) { + if (meta->ptr_not_const) { + /* Use of the last encountered ptr_off is OK, they all have + * the same alignment. Depend on low bits of value being + * discarded when written to LMaddr register. + */ + stack_off_reg = ur_load_imm_any(nfp_prog, meta->insn.off, + stack_imm(nfp_prog)); + + emit_alu(nfp_prog, imm_b(nfp_prog), + reg_a(ptr_gpr), ALU_OP_ADD, stack_off_reg); + + needs_inc = true; + } else if (off + size <= 64) { /* We can reach bottom 64B with LMaddr0 */ lm3 = false; } else if (round_down(off, 32) == round_down(off + size - 1, 32)) { @@ -1096,9 +1108,22 @@ static void wrp_end32(struct nfp_prog *nfp_prog, swreg reg_in, u8 gpr_out) static int mov_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) { const struct bpf_insn *insn = &meta->insn; + u8 dst = insn->dst_reg * 2; + u8 src = insn->src_reg * 2; + + if (insn->src_reg == BPF_REG_10) { + swreg stack_depth_reg; - wrp_reg_mov(nfp_prog, insn->dst_reg * 2, insn->src_reg * 2); - wrp_reg_mov(nfp_prog, insn->dst_reg * 2 + 1, insn->src_reg * 2 + 1); + stack_depth_reg = ur_load_imm_any(nfp_prog, + nfp_prog->stack_depth, + stack_imm(nfp_prog)); + emit_alu(nfp_prog, reg_both(dst), + stack_reg(nfp_prog), ALU_OP_ADD, stack_depth_reg); + wrp_immed(nfp_prog, reg_both(dst + 1), 0); + } else { + wrp_reg_mov(nfp_prog, dst, src); + wrp_reg_mov(nfp_prog, dst + 1, src + 1); + } return 0; } @@ -1413,7 +1438,8 @@ mem_ldx_stack(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, unsigned int size, unsigned int ptr_off) { return mem_op_stack(nfp_prog, meta, size, ptr_off, - meta->insn.dst_reg * 2, true, wrp_lmem_load); + meta->insn.dst_reg * 2, meta->insn.src_reg * 2, + true, wrp_lmem_load); } static int mem_ldx_skb(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, @@ -1585,7 +1611,8 @@ mem_stx_stack(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, unsigned int size, unsigned int ptr_off) { return mem_op_stack(nfp_prog, meta, size, ptr_off, - meta->insn.src_reg * 2, false, wrp_lmem_store); + meta->insn.src_reg * 2, meta->insn.dst_reg * 2, + false, wrp_lmem_store); } static int diff --git a/drivers/net/ethernet/netronome/nfp/bpf/main.h b/drivers/net/ethernet/netronome/nfp/bpf/main.h index d4f144a62f0f..86edc0691a5f 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/main.h +++ b/drivers/net/ethernet/netronome/nfp/bpf/main.h @@ -101,6 +101,7 @@ typedef int (*instr_cb_t)(struct nfp_prog *, struct nfp_insn_meta *); * struct nfp_insn_meta - BPF instruction wrapper * @insn: BPF instruction * @ptr: pointer type for memory operations + * @ptr_not_const: pointer is not always constant * @off: index of first generated machine instruction (in nfp_prog.prog) * @n: eBPF instruction number * @skip: skip this instruction (optimized out) @@ -110,6 +111,7 @@ typedef int (*instr_cb_t)(struct nfp_prog *, struct nfp_insn_meta *); struct nfp_insn_meta { struct bpf_insn insn; struct bpf_reg_state ptr; + bool ptr_not_const; unsigned int off; unsigned short n; bool skip; diff --git a/drivers/net/ethernet/netronome/nfp/bpf/verifier.c b/drivers/net/ethernet/netronome/nfp/bpf/verifier.c index 633db3e1a11e..3d3dcac1c942 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/verifier.c +++ b/drivers/net/ethernet/netronome/nfp/bpf/verifier.c @@ -112,7 +112,8 @@ nfp_bpf_check_exit(struct nfp_prog *nfp_prog, } static int -nfp_bpf_check_stack_access(struct nfp_insn_meta *meta, +nfp_bpf_check_stack_access(struct nfp_prog *nfp_prog, + struct nfp_insn_meta *meta, const struct bpf_reg_state *reg) { s32 old_off, new_off; @@ -128,7 +129,12 @@ nfp_bpf_check_stack_access(struct nfp_insn_meta *meta, old_off = meta->ptr.off + meta->ptr.var_off.value; new_off = reg->off + reg->var_off.value; - if (old_off == new_off) + meta->ptr_not_const |= old_off != new_off; + + if (!meta->ptr_not_const) + return 0; + + if (old_off % 4 == new_off % 4) return 0; pr_info("stack access changed location was:%d is:%d\n", @@ -151,7 +157,7 @@ nfp_bpf_check_ptr(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, } if (reg->type == PTR_TO_STACK) { - err = nfp_bpf_check_stack_access(meta, reg); + err = nfp_bpf_check_stack_access(nfp_prog, meta, reg); if (err) return err; } -- cgit v1.2.3 From 9f16c8abcd79fc31a74d3af64f085a009c9d4b5a Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Mon, 23 Oct 2017 11:58:14 -0700 Subject: nfp: bpf: optimize mov64 a little Loading 64bit constants require up to 4 load immediates, since we can only load 16 bits at a time. If the 32bit halves of the 64bit constant are the same, however, we can save a cycle by doing a register move instead of two loads of 16 bits. Note that we don't optimize the normal ALU64 load because even though it's a 64 bit load the upper half of the register is a coming from sign extension so we can load it in one cycle anyway. Signed-off-by: Jakub Kicinski Reviewed-by: Quentin Monnet Signed-off-by: David S. Miller --- drivers/net/ethernet/netronome/nfp/bpf/jit.c | 21 +++++++++++++++------ 1 file changed, 15 insertions(+), 6 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/netronome/nfp/bpf/jit.c b/drivers/net/ethernet/netronome/nfp/bpf/jit.c index d84f00b80aac..e7eeb7a07f81 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/jit.c +++ b/drivers/net/ethernet/netronome/nfp/bpf/jit.c @@ -1384,19 +1384,28 @@ static int end_reg32(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) static int imm_ld8_part2(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) { - wrp_immed(nfp_prog, reg_both(nfp_meta_prev(meta)->insn.dst_reg * 2 + 1), - meta->insn.imm); + struct nfp_insn_meta *prev = nfp_meta_prev(meta); + u32 imm_lo, imm_hi; + u8 dst; + + dst = prev->insn.dst_reg * 2; + imm_lo = prev->insn.imm; + imm_hi = meta->insn.imm; + + wrp_immed(nfp_prog, reg_both(dst), imm_lo); + + /* mov is always 1 insn, load imm may be two, so try to use mov */ + if (imm_hi == imm_lo) + wrp_mov(nfp_prog, reg_both(dst + 1), reg_a(dst)); + else + wrp_immed(nfp_prog, reg_both(dst + 1), imm_hi); return 0; } static int imm_ld8(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) { - const struct bpf_insn *insn = &meta->insn; - meta->double_cb = imm_ld8_part2; - wrp_immed(nfp_prog, reg_both(insn->dst_reg * 2), insn->imm); - return 0; } -- cgit v1.2.3 From 907aaa6babe1a606f3da4eb76e76e3ce6286f97f Mon Sep 17 00:00:00 2001 From: Veerasenareddy Burru Date: Mon, 23 Oct 2017 20:33:25 -0700 Subject: liquidio: pass date and time info to NIC firmware Pass date and time information to NIC at the time of loading firmware and periodically update the host time to NIC firmware. This is to make NIC firmware use the same time reference as Host, so that it is easy to correlate logs from firmware and host for debugging. Signed-off-by: Veerasenareddy Burru Signed-off-by: Felix Manlunas Signed-off-by: David S. Miller --- drivers/net/ethernet/cavium/liquidio/lio_main.c | 126 +++++++++++++++++++++ .../net/ethernet/cavium/liquidio/liquidio_common.h | 8 ++ .../net/ethernet/cavium/liquidio/octeon_console.c | 32 +++++- .../net/ethernet/cavium/liquidio/octeon_network.h | 3 + 4 files changed, 166 insertions(+), 3 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c index eafae3eb4fed..b4f753c56308 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_main.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c @@ -83,6 +83,11 @@ static int octeon_console_debug_enabled(u32 console) /* runtime link query interval */ #define LIQUIDIO_LINK_QUERY_INTERVAL_MS 1000 +/* update localtime to octeon firmware every 60 seconds. + * make firmware to use same time reference, so that it will be easy to + * correlate firmware logged events/errors with host events, for debugging. + */ +#define LIO_SYNC_OCTEON_TIME_INTERVAL_MS 60000 struct liquidio_if_cfg_context { int octeon_id; @@ -901,6 +906,121 @@ static inline void update_link_status(struct net_device *netdev, } } +/** + * lio_sync_octeon_time_cb - callback that is invoked when soft command + * sent by lio_sync_octeon_time() has completed successfully or failed + * + * @oct - octeon device structure + * @status - indicates success or failure + * @buf - pointer to the command that was sent to firmware + **/ +static void lio_sync_octeon_time_cb(struct octeon_device *oct, + u32 status, void *buf) +{ + struct octeon_soft_command *sc = (struct octeon_soft_command *)buf; + + if (status) + dev_err(&oct->pci_dev->dev, + "Failed to sync time to octeon; error=%d\n", status); + + octeon_free_soft_command(oct, sc); +} + +/** + * lio_sync_octeon_time - send latest localtime to octeon firmware so that + * firmware will correct it's time, in case there is a time skew + * + * @work: work scheduled to send time update to octeon firmware + **/ +static void lio_sync_octeon_time(struct work_struct *work) +{ + struct cavium_wk *wk = (struct cavium_wk *)work; + struct lio *lio = (struct lio *)wk->ctxptr; + struct octeon_device *oct = lio->oct_dev; + struct octeon_soft_command *sc; + struct timespec64 ts; + struct lio_time *lt; + int ret; + + sc = octeon_alloc_soft_command(oct, sizeof(struct lio_time), 0, 0); + if (!sc) { + dev_err(&oct->pci_dev->dev, + "Failed to sync time to octeon: soft command allocation failed\n"); + return; + } + + lt = (struct lio_time *)sc->virtdptr; + + /* Get time of the day */ + getnstimeofday64(&ts); + lt->sec = ts.tv_sec; + lt->nsec = ts.tv_nsec; + octeon_swap_8B_data((u64 *)lt, (sizeof(struct lio_time)) / 8); + + sc->iq_no = lio->linfo.txpciq[0].s.q_no; + octeon_prepare_soft_command(oct, sc, OPCODE_NIC, + OPCODE_NIC_SYNC_OCTEON_TIME, 0, 0, 0); + + sc->callback = lio_sync_octeon_time_cb; + sc->callback_arg = sc; + sc->wait_time = 1000; + + ret = octeon_send_soft_command(oct, sc); + if (ret == IQ_SEND_FAILED) { + dev_err(&oct->pci_dev->dev, + "Failed to sync time to octeon: failed to send soft command\n"); + octeon_free_soft_command(oct, sc); + } + + queue_delayed_work(lio->sync_octeon_time_wq.wq, + &lio->sync_octeon_time_wq.wk.work, + msecs_to_jiffies(LIO_SYNC_OCTEON_TIME_INTERVAL_MS)); +} + +/** + * setup_sync_octeon_time_wq - Sets up the work to periodically update + * local time to octeon firmware + * + * @netdev - network device which should send time update to firmware + **/ +static inline int setup_sync_octeon_time_wq(struct net_device *netdev) +{ + struct lio *lio = GET_LIO(netdev); + struct octeon_device *oct = lio->oct_dev; + + lio->sync_octeon_time_wq.wq = + alloc_workqueue("update-octeon-time", WQ_MEM_RECLAIM, 0); + if (!lio->sync_octeon_time_wq.wq) { + dev_err(&oct->pci_dev->dev, "Unable to create wq to update octeon time\n"); + return -1; + } + INIT_DELAYED_WORK(&lio->sync_octeon_time_wq.wk.work, + lio_sync_octeon_time); + lio->sync_octeon_time_wq.wk.ctxptr = lio; + queue_delayed_work(lio->sync_octeon_time_wq.wq, + &lio->sync_octeon_time_wq.wk.work, + msecs_to_jiffies(LIO_SYNC_OCTEON_TIME_INTERVAL_MS)); + + return 0; +} + +/** + * cleanup_sync_octeon_time_wq - stop scheduling and destroy the work created + * to periodically update local time to octeon firmware + * + * @netdev - network device which should send time update to firmware + **/ +static inline void cleanup_sync_octeon_time_wq(struct net_device *netdev) +{ + struct lio *lio = GET_LIO(netdev); + struct cavium_wq *time_wq = &lio->sync_octeon_time_wq; + + if (time_wq->wq) { + cancel_delayed_work_sync(&time_wq->wk.work); + destroy_workqueue(time_wq->wq); + } +} + static struct octeon_device *get_other_octeon_device(struct octeon_device *oct) { struct octeon_device *other_oct; @@ -1455,6 +1575,7 @@ static void liquidio_destroy_nic_device(struct octeon_device *oct, int ifidx) if (atomic_read(&lio->ifstate) & LIO_IFSTATE_REGISTERED) unregister_netdev(netdev); + cleanup_sync_octeon_time_wq(netdev); cleanup_link_status_change_wq(netdev); cleanup_rx_oom_poll_fn(netdev); @@ -3611,6 +3732,11 @@ static int setup_nic_devices(struct octeon_device *octeon_dev) if (setup_link_status_change_wq(netdev)) goto setup_nic_dev_fail; + if ((octeon_dev->fw_info.app_cap_flags & + LIQUIDIO_TIME_SYNC_CAP) && + setup_sync_octeon_time_wq(netdev)) + goto setup_nic_dev_fail; + if (setup_rx_oom_poll_fn(netdev)) goto setup_nic_dev_fail; diff --git a/drivers/net/ethernet/cavium/liquidio/liquidio_common.h b/drivers/net/ethernet/cavium/liquidio/liquidio_common.h index 3788c8cd082a..2033a65cd97a 100644 --- a/drivers/net/ethernet/cavium/liquidio/liquidio_common.h +++ b/drivers/net/ethernet/cavium/liquidio/liquidio_common.h @@ -84,6 +84,7 @@ enum octeon_tag_type { #define OPCODE_NIC_IF_CFG 0x09 #define OPCODE_NIC_VF_DRV_NOTICE 0x0A #define OPCODE_NIC_INTRMOD_PARAMS 0x0B +#define OPCODE_NIC_SYNC_OCTEON_TIME 0x14 #define VF_DRV_LOADED 1 #define VF_DRV_REMOVED -1 #define VF_DRV_MACADDR_CHANGED 2 @@ -108,6 +109,9 @@ enum octeon_tag_type { #define SCR2_BIT_FW_LOADED 63 +/* App specific capabilities from firmware to pf driver */ +#define LIQUIDIO_TIME_SYNC_CAP 0x1 + static inline u32 incr_index(u32 index, u32 count, u32 max) { if ((index + count) >= max) @@ -901,4 +905,8 @@ union oct_nic_if_cfg { } s; }; +struct lio_time { + s64 sec; /* seconds */ + s64 nsec; /* nanoseconds */ +}; #endif diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_console.c b/drivers/net/ethernet/cavium/liquidio/octeon_console.c index ec3dd69cd6b2..7f97ae48efed 100644 --- a/drivers/net/ethernet/cavium/liquidio/octeon_console.c +++ b/drivers/net/ethernet/cavium/liquidio/octeon_console.c @@ -803,15 +803,18 @@ static int octeon_console_read(struct octeon_device *oct, u32 console_num, } #define FBUF_SIZE (4 * 1024 * 1024) +#define MAX_BOOTTIME_SIZE 80 int octeon_download_firmware(struct octeon_device *oct, const u8 *data, size_t size) { - int ret = 0; + struct octeon_firmware_file_header *h; + char boottime[MAX_BOOTTIME_SIZE]; + struct timespec64 ts; u32 crc32_result; u64 load_addr; u32 image_len; - struct octeon_firmware_file_header *h; + int ret = 0; u32 i, rem; if (size < sizeof(struct octeon_firmware_file_header)) { @@ -890,11 +893,34 @@ int octeon_download_firmware(struct octeon_device *oct, const u8 *data, load_addr += size; } } + + /* Pass date and time information to NIC at the time of loading + * firmware and periodically update the host time to NIC firmware. + * This is to make NIC firmware use the same time reference as Host, + * so that it is easy to correlate logs from firmware and host for + * debugging. + * + * Octeon always uses UTC time. so timezone information is not sent. + */ + getnstimeofday64(&ts); + ret = snprintf(boottime, MAX_BOOTTIME_SIZE, + " time_sec=%lld time_nsec=%ld", + (s64)ts.tv_sec, ts.tv_nsec); + if ((sizeof(h->bootcmd) - strnlen(h->bootcmd, sizeof(h->bootcmd))) < + ret) { + dev_err(&oct->pci_dev->dev, "Boot command buffer too small\n"); + return -EINVAL; + } + strncat(h->bootcmd, boottime, + sizeof(h->bootcmd) - strnlen(h->bootcmd, sizeof(h->bootcmd))); + dev_info(&oct->pci_dev->dev, "Writing boot command: %s\n", h->bootcmd); /* Invoke the bootcmd */ ret = octeon_console_send_cmd(oct, h->bootcmd, 50); + if (ret) + dev_info(&oct->pci_dev->dev, "Boot command send failed\n"); - return 0; + return ret; } diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_network.h b/drivers/net/ethernet/cavium/liquidio/octeon_network.h index 9e36319cead6..433f3619de8f 100644 --- a/drivers/net/ethernet/cavium/liquidio/octeon_network.h +++ b/drivers/net/ethernet/cavium/liquidio/octeon_network.h @@ -136,6 +136,9 @@ struct lio { /* work queue for link status */ struct cavium_wq link_status_wq; + /* work queue to regularly send local time to octeon firmware */ + struct cavium_wq sync_octeon_time_wq; + int netdev_uc_count; }; -- cgit v1.2.3 From 69715dd50d28deb52f84932794a2fce0b18c8b56 Mon Sep 17 00:00:00 2001 From: Arkadi Sharshevsky Date: Tue, 24 Oct 2017 10:11:42 +0200 Subject: mlxsw: spectrum_dpipe: Fix entries dump of the adjacency table During the dump the per netlink packet entry counter should be zeroed out when new packet is created. Fixes: 190d38a52a73 ("mlxsw: spectrum_dpipe: Add support for adjacency table dump") Signed-off-by: Arkadi Sharshevsky Reported-by: David Ahern Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c index 6ea6435279c0..96fdba78acab 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c @@ -1093,6 +1093,7 @@ start_again: goto err_ctx_prepare; j = 0; nh_skip = nh_count; + nh_count = 0; mlxsw_sp_nexthop_for_each(nh, mlxsw_sp->router) { if (!mlxsw_sp_nexthop_offload(nh) || mlxsw_sp_nexthop_group_has_ipip(nh)) -- cgit v1.2.3 From de3872cd1863dcd077e966dd467f8e50876302aa Mon Sep 17 00:00:00 2001 From: Yotam Gigi Date: Tue, 24 Oct 2017 11:17:15 +0200 Subject: mlxsw: spectrum: mr: Fix various endianness issues Fix various endianness issues in comparisons and assignments. The fix is entirely cosmetic as all the values fixed are endianness-agnostic. Cleans up sparse warnings: spectrum_mr.c:156:49: warning: restricted __be32 degrades to integer spectrum_mr.c:206:26: warning: restricted __be32 degrades to integer spectrum_mr.c:212:31: warning: incorrect type in assignment (different base types) spectrum_mr.c:212:31: expected restricted __be32 [usertype] addr4 spectrum_mr.c:212:31: got unsigned int spectrum_mr.c:214:32: warning: incorrect type in assignment (different base types) spectrum_mr.c:214:32: expected restricted __be32 [usertype] addr4 spectrum_mr.c:214:32: got unsigned int spectrum_mr.c:461:16: warning: restricted __be32 degrades to integer spectrum_mr.c:461:49: warning: restricted __be32 degrades to integer Fixes: c011ec1bbfd6 ("mlxsw: spectrum: Add the multicast routing offloading logic") Signed-off-by: Yotam Gigi Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c index 1f84bb8e9135..3f7d2d1282b2 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c @@ -153,7 +153,7 @@ static bool mlxsw_sp_mr_route_starg(const struct mlxsw_sp_mr_route *mr_route) { switch (mr_route->mr_table->proto) { case MLXSW_SP_L3_PROTO_IPV4: - return mr_route->key.source_mask.addr4 == INADDR_ANY; + return mr_route->key.source_mask.addr4 == htonl(INADDR_ANY); case MLXSW_SP_L3_PROTO_IPV6: /* fall through */ default: @@ -203,15 +203,15 @@ static void mlxsw_sp_mr_route4_key(struct mlxsw_sp_mr_table *mr_table, struct mlxsw_sp_mr_route_key *key, const struct mfc_cache *mfc) { - bool starg = (mfc->mfc_origin == INADDR_ANY); + bool starg = (mfc->mfc_origin == htonl(INADDR_ANY)); memset(key, 0, sizeof(*key)); key->vrid = mr_table->vr_id; key->proto = mr_table->proto; key->group.addr4 = mfc->mfc_mcastgrp; - key->group_mask.addr4 = 0xffffffff; + key->group_mask.addr4 = htonl(0xffffffff); key->source.addr4 = mfc->mfc_origin; - key->source_mask.addr4 = starg ? 0 : 0xffffffff; + key->source_mask.addr4 = htonl(starg ? 0 : 0xffffffff); } static int mlxsw_sp_mr_route_evif_link(struct mlxsw_sp_mr_route *mr_route, @@ -458,7 +458,8 @@ int mlxsw_sp_mr_route4_add(struct mlxsw_sp_mr_table *mr_table, /* If the route is a (*,*) route, abort, as these kind of routes are * used for proxy routes. */ - if (mfc->mfc_origin == INADDR_ANY && mfc->mfc_mcastgrp == INADDR_ANY) { + if (mfc->mfc_origin == htonl(INADDR_ANY) && + mfc->mfc_mcastgrp == htonl(INADDR_ANY)) { dev_warn(mr_table->mlxsw_sp->bus_info->dev, "Offloading proxy routes is not supported.\n"); return -EINVAL; -- cgit v1.2.3 From 6a30dc29a450566e9c8e07dd16c05b11cb41be20 Mon Sep 17 00:00:00 2001 From: Yotam Gigi Date: Tue, 24 Oct 2017 11:17:16 +0200 Subject: mlxsw: spectrum: mr: Make the function mlxsw_sp_mr_dev_vif_lookup static The function is only used internally in spectrum_mr.c and is not declared in the header file, thus make it static. Cleans up sparse warning: symbol 'mlxsw_sp_mr_dev_vif_lookup' was not declared. Should it be static? Fixes: c011ec1bbfd6 ("mlxsw: spectrum: Add the multicast routing offloading logic") Signed-off-by: Yotam Gigi Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c index 3f7d2d1282b2..d20b143de3b4 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c @@ -768,7 +768,7 @@ void mlxsw_sp_mr_vif_del(struct mlxsw_sp_mr_table *mr_table, vifi_t vif_index) mlxsw_sp_mr_vif_unresolve(mr_table, NULL, mr_vif); } -struct mlxsw_sp_mr_vif * +static struct mlxsw_sp_mr_vif * mlxsw_sp_mr_dev_vif_lookup(struct mlxsw_sp_mr_table *mr_table, const struct net_device *dev) { -- cgit v1.2.3 From ea00aa3a27c3a52c5d2a1444a90e690d55b6c221 Mon Sep 17 00:00:00 2001 From: Yotam Gigi Date: Tue, 24 Oct 2017 11:17:17 +0200 Subject: mlxsw: spectrum: mr_tcam: Include the mr_tcam header file Make the spectrum_mr_tcam.c include the spectrum_mr_tcam.h header file. Cleans up sparse warning: symbol 'mlxsw_sp_mr_tcam_ops' was not declared. Should it be static? Fixes: 0e14c7777acb6 ("mlxsw: spectrum: Add the multicast routing hardware logic") Signed-off-by: Yotam Gigi Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.c index 39c21c70ac32..34a0b632e5dd 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.c @@ -37,6 +37,7 @@ #include #include +#include "spectrum_mr_tcam.h" #include "reg.h" #include "spectrum.h" #include "core_acl_flex_actions.h" -- cgit v1.2.3 From 7aa1402e2eb4988b09bf1671e9f968e6e5689b1d Mon Sep 17 00:00:00 2001 From: Kees Cook Date: Tue, 24 Oct 2017 01:45:59 -0700 Subject: net: ethernet/sfc: Convert timers to use timer_setup() In preparation for unconditionally passing the struct timer_list pointer to all timer callbacks, switch to using the new timer_setup() and from_timer() to pass the timer pointer explicitly. Cc: Solarflare linux maintainers Cc: Edward Cree Cc: Bert Kenward Cc: "David S. Miller" Cc: Eric Dumazet Cc: Jiri Pirko Cc: Jamal Hadi Salim Cc: Ingo Molnar Cc: netdev@vger.kernel.org Signed-off-by: Kees Cook Acked-by: Bert Kenward Signed-off-by: David S. Miller --- drivers/net/ethernet/sfc/efx.c | 6 ++---- drivers/net/ethernet/sfc/efx.h | 2 +- drivers/net/ethernet/sfc/falcon/efx.c | 6 ++---- drivers/net/ethernet/sfc/falcon/efx.h | 2 +- drivers/net/ethernet/sfc/falcon/falcon.c | 11 ++++++----- drivers/net/ethernet/sfc/falcon/nic.h | 2 ++ drivers/net/ethernet/sfc/falcon/rx.c | 4 ++-- drivers/net/ethernet/sfc/mcdi.c | 9 ++++----- drivers/net/ethernet/sfc/rx.c | 4 ++-- 9 files changed, 22 insertions(+), 24 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c index b9cb697b2818..8fdcf7aaf997 100644 --- a/drivers/net/ethernet/sfc/efx.c +++ b/drivers/net/ethernet/sfc/efx.c @@ -471,8 +471,7 @@ efx_alloc_channel(struct efx_nic *efx, int i, struct efx_channel *old_channel) rx_queue = &channel->rx_queue; rx_queue->efx = efx; - setup_timer(&rx_queue->slow_fill, efx_rx_slow_fill, - (unsigned long)rx_queue); + timer_setup(&rx_queue->slow_fill, efx_rx_slow_fill, 0); return channel; } @@ -511,8 +510,7 @@ efx_copy_channel(const struct efx_channel *old_channel) rx_queue = &channel->rx_queue; rx_queue->buffer = NULL; memset(&rx_queue->rxd, 0, sizeof(rx_queue->rxd)); - setup_timer(&rx_queue->slow_fill, efx_rx_slow_fill, - (unsigned long)rx_queue); + timer_setup(&rx_queue->slow_fill, efx_rx_slow_fill, 0); return channel; } diff --git a/drivers/net/ethernet/sfc/efx.h b/drivers/net/ethernet/sfc/efx.h index d407adf59610..52c84b782901 100644 --- a/drivers/net/ethernet/sfc/efx.h +++ b/drivers/net/ethernet/sfc/efx.h @@ -46,7 +46,7 @@ void efx_remove_rx_queue(struct efx_rx_queue *rx_queue); void efx_init_rx_queue(struct efx_rx_queue *rx_queue); void efx_fini_rx_queue(struct efx_rx_queue *rx_queue); void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue, bool atomic); -void efx_rx_slow_fill(unsigned long context); +void efx_rx_slow_fill(struct timer_list *t); void __efx_rx_packet(struct efx_channel *channel); void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index, unsigned int n_frags, unsigned int len, u16 flags); diff --git a/drivers/net/ethernet/sfc/falcon/efx.c b/drivers/net/ethernet/sfc/falcon/efx.c index 29614da91cbf..6685a66ee1a3 100644 --- a/drivers/net/ethernet/sfc/falcon/efx.c +++ b/drivers/net/ethernet/sfc/falcon/efx.c @@ -449,8 +449,7 @@ ef4_alloc_channel(struct ef4_nic *efx, int i, struct ef4_channel *old_channel) rx_queue = &channel->rx_queue; rx_queue->efx = efx; - setup_timer(&rx_queue->slow_fill, ef4_rx_slow_fill, - (unsigned long)rx_queue); + timer_setup(&rx_queue->slow_fill, ef4_rx_slow_fill, 0); return channel; } @@ -489,8 +488,7 @@ ef4_copy_channel(const struct ef4_channel *old_channel) rx_queue = &channel->rx_queue; rx_queue->buffer = NULL; memset(&rx_queue->rxd, 0, sizeof(rx_queue->rxd)); - setup_timer(&rx_queue->slow_fill, ef4_rx_slow_fill, - (unsigned long)rx_queue); + timer_setup(&rx_queue->slow_fill, ef4_rx_slow_fill, 0); return channel; } diff --git a/drivers/net/ethernet/sfc/falcon/efx.h b/drivers/net/ethernet/sfc/falcon/efx.h index 4f3bb30661ea..a4e4d8ea4078 100644 --- a/drivers/net/ethernet/sfc/falcon/efx.h +++ b/drivers/net/ethernet/sfc/falcon/efx.h @@ -45,7 +45,7 @@ void ef4_remove_rx_queue(struct ef4_rx_queue *rx_queue); void ef4_init_rx_queue(struct ef4_rx_queue *rx_queue); void ef4_fini_rx_queue(struct ef4_rx_queue *rx_queue); void ef4_fast_push_rx_descriptors(struct ef4_rx_queue *rx_queue, bool atomic); -void ef4_rx_slow_fill(unsigned long context); +void ef4_rx_slow_fill(struct timer_list *t); void __ef4_rx_packet(struct ef4_channel *channel); void ef4_rx_packet(struct ef4_rx_queue *rx_queue, unsigned int index, unsigned int n_frags, unsigned int len, u16 flags); diff --git a/drivers/net/ethernet/sfc/falcon/falcon.c b/drivers/net/ethernet/sfc/falcon/falcon.c index 93c713c1f627..ccda017b6794 100644 --- a/drivers/net/ethernet/sfc/falcon/falcon.c +++ b/drivers/net/ethernet/sfc/falcon/falcon.c @@ -1454,10 +1454,11 @@ static void falcon_stats_complete(struct ef4_nic *efx) } } -static void falcon_stats_timer_func(unsigned long context) +static void falcon_stats_timer_func(struct timer_list *t) { - struct ef4_nic *efx = (struct ef4_nic *)context; - struct falcon_nic_data *nic_data = efx->nic_data; + struct falcon_nic_data *nic_data = from_timer(nic_data, t, + stats_timer); + struct ef4_nic *efx = nic_data->efx; spin_lock(&efx->stats_lock); @@ -2295,6 +2296,7 @@ static int falcon_probe_nic(struct ef4_nic *efx) if (!nic_data) return -ENOMEM; efx->nic_data = nic_data; + nic_data->efx = efx; rc = -ENODEV; @@ -2402,8 +2404,7 @@ static int falcon_probe_nic(struct ef4_nic *efx) } nic_data->stats_disable_count = 1; - setup_timer(&nic_data->stats_timer, &falcon_stats_timer_func, - (unsigned long)efx); + timer_setup(&nic_data->stats_timer, falcon_stats_timer_func, 0); return 0; diff --git a/drivers/net/ethernet/sfc/falcon/nic.h b/drivers/net/ethernet/sfc/falcon/nic.h index a4c4592f6023..e2e3c008d073 100644 --- a/drivers/net/ethernet/sfc/falcon/nic.h +++ b/drivers/net/ethernet/sfc/falcon/nic.h @@ -267,6 +267,7 @@ enum { /** * struct falcon_nic_data - Falcon NIC state * @pci_dev2: Secondary function of Falcon A + * @efx: ef4_nic pointer * @board: Board state and functions * @stats: Hardware statistics * @stats_disable_count: Nest count for disabling statistics fetches @@ -280,6 +281,7 @@ enum { */ struct falcon_nic_data { struct pci_dev *pci_dev2; + struct ef4_nic *efx; struct falcon_board board; u64 stats[FALCON_STAT_COUNT]; unsigned int stats_disable_count; diff --git a/drivers/net/ethernet/sfc/falcon/rx.c b/drivers/net/ethernet/sfc/falcon/rx.c index 6a8406dc0c2b..382019b302db 100644 --- a/drivers/net/ethernet/sfc/falcon/rx.c +++ b/drivers/net/ethernet/sfc/falcon/rx.c @@ -376,9 +376,9 @@ void ef4_fast_push_rx_descriptors(struct ef4_rx_queue *rx_queue, bool atomic) ef4_nic_notify_rx_desc(rx_queue); } -void ef4_rx_slow_fill(unsigned long context) +void ef4_rx_slow_fill(struct timer_list *t) { - struct ef4_rx_queue *rx_queue = (struct ef4_rx_queue *)context; + struct ef4_rx_queue *rx_queue = from_timer(rx_queue, t, slow_fill); /* Post an event to cause NAPI to run and refill the queue */ ef4_nic_generate_fill_event(rx_queue); diff --git a/drivers/net/ethernet/sfc/mcdi.c b/drivers/net/ethernet/sfc/mcdi.c index 3df872f56289..9c2567b0d93e 100644 --- a/drivers/net/ethernet/sfc/mcdi.c +++ b/drivers/net/ethernet/sfc/mcdi.c @@ -48,7 +48,7 @@ struct efx_mcdi_async_param { /* followed by request/response buffer */ }; -static void efx_mcdi_timeout_async(unsigned long context); +static void efx_mcdi_timeout_async(struct timer_list *t); static int efx_mcdi_drv_attach(struct efx_nic *efx, bool driver_operating, bool *was_attached_out); static bool efx_mcdi_poll_once(struct efx_nic *efx); @@ -87,8 +87,7 @@ int efx_mcdi_init(struct efx_nic *efx) mcdi->mode = MCDI_MODE_POLL; spin_lock_init(&mcdi->async_lock); INIT_LIST_HEAD(&mcdi->async_list); - setup_timer(&mcdi->async_timer, efx_mcdi_timeout_async, - (unsigned long)mcdi); + timer_setup(&mcdi->async_timer, efx_mcdi_timeout_async, 0); (void) efx_mcdi_poll_reboot(efx); mcdi->new_epoch = true; @@ -608,9 +607,9 @@ static void efx_mcdi_ev_cpl(struct efx_nic *efx, unsigned int seqno, } } -static void efx_mcdi_timeout_async(unsigned long context) +static void efx_mcdi_timeout_async(struct timer_list *t) { - struct efx_mcdi_iface *mcdi = (struct efx_mcdi_iface *)context; + struct efx_mcdi_iface *mcdi = from_timer(mcdi, t, async_timer); efx_mcdi_complete_async(mcdi, true); } diff --git a/drivers/net/ethernet/sfc/rx.c b/drivers/net/ethernet/sfc/rx.c index 42443f434569..8cb60513dca2 100644 --- a/drivers/net/ethernet/sfc/rx.c +++ b/drivers/net/ethernet/sfc/rx.c @@ -376,9 +376,9 @@ void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue, bool atomic) efx_nic_notify_rx_desc(rx_queue); } -void efx_rx_slow_fill(unsigned long context) +void efx_rx_slow_fill(struct timer_list *t) { - struct efx_rx_queue *rx_queue = (struct efx_rx_queue *)context; + struct efx_rx_queue *rx_queue = from_timer(rx_queue, t, slow_fill); /* Post an event to cause NAPI to run and refill the queue */ efx_nic_generate_fill_event(rx_queue); -- cgit v1.2.3 From fd71e13bc7c6ac2d34f08380707662cf07f8234c Mon Sep 17 00:00:00 2001 From: Kees Cook Date: Tue, 24 Oct 2017 01:46:52 -0700 Subject: drivers/net: sis: Convert timers to use timer_setup() In preparation for unconditionally passing the struct timer_list pointer to all timer callbacks, switch to using the new timer_setup() and from_timer() to pass the timer pointer explicitly. Cc: Francois Romieu Cc: Daniele Venzano Cc: netdev@vger.kernel.org Signed-off-by: Kees Cook Signed-off-by: Daniele Venzano Signed-off-by: David S. Miller --- drivers/net/ethernet/sis/sis190.c | 10 ++++------ drivers/net/ethernet/sis/sis900.c | 10 +++++----- 2 files changed, 9 insertions(+), 11 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/sis/sis190.c b/drivers/net/ethernet/sis/sis190.c index 445109bd6910..c2c50522b96d 100644 --- a/drivers/net/ethernet/sis/sis190.c +++ b/drivers/net/ethernet/sis/sis190.c @@ -1018,10 +1018,10 @@ out_unlock: rtnl_unlock(); } -static void sis190_phy_timer(unsigned long __opaque) +static void sis190_phy_timer(struct timer_list *t) { - struct net_device *dev = (struct net_device *)__opaque; - struct sis190_private *tp = netdev_priv(dev); + struct sis190_private *tp = from_timer(tp, t, timer); + struct net_device *dev = tp->dev; if (likely(netif_running(dev))) schedule_work(&tp->phy_task); @@ -1039,10 +1039,8 @@ static inline void sis190_request_timer(struct net_device *dev) struct sis190_private *tp = netdev_priv(dev); struct timer_list *timer = &tp->timer; - init_timer(timer); + timer_setup(timer, sis190_phy_timer, 0); timer->expires = jiffies + SIS190_PHY_TIMEOUT; - timer->data = (unsigned long)dev; - timer->function = sis190_phy_timer; add_timer(timer); } diff --git a/drivers/net/ethernet/sis/sis900.c b/drivers/net/ethernet/sis/sis900.c index cb61247b0526..4bb89f74742c 100644 --- a/drivers/net/ethernet/sis/sis900.c +++ b/drivers/net/ethernet/sis/sis900.c @@ -218,7 +218,7 @@ static void sis900_init_rxfilter (struct net_device * net_dev); static u16 read_eeprom(void __iomem *ioaddr, int location); static int mdio_read(struct net_device *net_dev, int phy_id, int location); static void mdio_write(struct net_device *net_dev, int phy_id, int location, int val); -static void sis900_timer(unsigned long data); +static void sis900_timer(struct timer_list *t); static void sis900_check_mode (struct net_device *net_dev, struct mii_phy *mii_phy); static void sis900_tx_timeout(struct net_device *net_dev); static void sis900_init_tx_ring(struct net_device *net_dev); @@ -1065,7 +1065,7 @@ sis900_open(struct net_device *net_dev) /* Set the timer to switch to check for link beat and perhaps switch to an alternate media type. */ - setup_timer(&sis_priv->timer, sis900_timer, (unsigned long)net_dev); + timer_setup(&sis_priv->timer, sis900_timer, 0); sis_priv->timer.expires = jiffies + HZ; add_timer(&sis_priv->timer); @@ -1300,10 +1300,10 @@ static void sis630_set_eq(struct net_device *net_dev, u8 revision) * link status (ON/OFF) and link mode (10/100/Full/Half) */ -static void sis900_timer(unsigned long data) +static void sis900_timer(struct timer_list *t) { - struct net_device *net_dev = (struct net_device *)data; - struct sis900_private *sis_priv = netdev_priv(net_dev); + struct sis900_private *sis_priv = from_timer(sis_priv, t, timer); + struct net_device *net_dev = sis_priv->mii_info.dev; struct mii_phy *mii_phy = sis_priv->mii; static const int next_tick = 5*HZ; int speed = 0, duplex = 0; -- cgit v1.2.3 From 1769af432a9451774edb4c1221c52437936a01af Mon Sep 17 00:00:00 2001 From: "Steven J. Hill" Date: Wed, 25 Oct 2017 11:44:32 -0500 Subject: ethernet: cavium: octeon: Switch to using netdev_info(). Signed-off-by: Steven J. Hill Signed-off-by: David Daney Signed-off-by: David S. Miller --- drivers/net/ethernet/cavium/octeon/octeon_mgmt.c | 24 +++++++++++------------- 1 file changed, 11 insertions(+), 13 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c b/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c index 2887bcaf6af5..3f6afb54a5eb 100644 --- a/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c +++ b/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c @@ -705,14 +705,15 @@ static int octeon_mgmt_ioctl_hwtstamp(struct net_device *netdev, u64 clock_comp = (NSEC_PER_SEC << 32) / octeon_get_io_clock_rate(); if (!ptp.s.ptp_en) cvmx_write_csr(CVMX_MIO_PTP_CLOCK_COMP, clock_comp); - pr_info("PTP Clock: Using sclk reference at %lld Hz\n", - (NSEC_PER_SEC << 32) / clock_comp); + netdev_info(netdev, + "PTP Clock using sclk reference @ %lldHz\n", + (NSEC_PER_SEC << 32) / clock_comp); } else { /* The clock is already programmed to use a GPIO */ u64 clock_comp = cvmx_read_csr(CVMX_MIO_PTP_CLOCK_COMP); - pr_info("PTP Clock: Using GPIO %d at %lld Hz\n", - ptp.s.ext_clk_in, - (NSEC_PER_SEC << 32) / clock_comp); + netdev_info(netdev, + "PTP Clock using GPIO%d @ %lld Hz\n", + ptp.s.ext_clk_in, (NSEC_PER_SEC << 32) / clock_comp); } /* Enable the clock if it wasn't done already */ @@ -926,14 +927,11 @@ static void octeon_mgmt_adjust_link(struct net_device *netdev) spin_unlock_irqrestore(&p->lock, flags); if (link_changed != 0) { - if (link_changed > 0) { - pr_info("%s: Link is up - %d/%s\n", netdev->name, - phydev->speed, - phydev->duplex == DUPLEX_FULL ? - "Full" : "Half"); - } else { - pr_info("%s: Link is down\n", netdev->name); - } + if (link_changed > 0) + netdev_info(netdev, "Link is up - %d/%s\n", + phydev->speed, phydev->duplex == DUPLEX_FULL ? "Full" : "Half"); + else + netdev_info(netdev, "Link is down\n"); } } -- cgit v1.2.3 From 0d314502bbfbef7560e5a3e817722128d5c5fc5d Mon Sep 17 00:00:00 2001 From: Doug Berger Date: Wed, 25 Oct 2017 15:04:11 -0700 Subject: net: bcmgenet: correct bad merge As noted in the net-next submission for GENETv5 support [1], there were merge conflicts with an earlier net submission [2] that had not yet found its way to the net-next repository. Unfortunately, when the branches were merged the conflicts were not correctly resolved. This commit attempts to correct that. [1] https://lkml.org/lkml/2017/3/13/1145 [2] https://lkml.org/lkml/2017/3/9/890 Fixes: 101c431492d2 ("Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net") Signed-off-by: Doug Berger Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/genet/bcmgenet.c | 18 +----------------- 1 file changed, 1 insertion(+), 17 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c index 9cebca896913..f6e8e01be1c8 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c @@ -2602,12 +2602,6 @@ static void bcmgenet_irq_task(struct work_struct *work) priv->irq0_stat = 0; spin_unlock_irqrestore(&priv->lock, flags); - if (status & UMAC_IRQ_MPD_R) { - netif_dbg(priv, wol, priv->dev, - "magic packet detected, waking up\n"); - bcmgenet_power_up(priv, GENET_POWER_WOL_MAGIC); - } - /* Link UP/DOWN event */ if (status & UMAC_IRQ_LINK_EVENT) phy_mac_interrupt(priv->phydev, @@ -2698,23 +2692,13 @@ static irqreturn_t bcmgenet_isr0(int irq, void *dev_id) } } - if (priv->irq0_stat & (UMAC_IRQ_PHY_DET_R | - UMAC_IRQ_PHY_DET_F | - UMAC_IRQ_LINK_EVENT | - UMAC_IRQ_HFB_SM | - UMAC_IRQ_HFB_MM)) { - /* all other interested interrupts handled in bottom half */ - schedule_work(&priv->bcmgenet_irq_work); - } - if ((priv->hw_params->flags & GENET_HAS_MDIO_INTR) && status & (UMAC_IRQ_MDIO_DONE | UMAC_IRQ_MDIO_ERROR)) { wake_up(&priv->wq); } /* all other interested interrupts handled in bottom half */ - status &= (UMAC_IRQ_LINK_EVENT | - UMAC_IRQ_MPD_R); + status &= UMAC_IRQ_LINK_EVENT; if (status) { /* Save irq status for bottom-half processing. */ spin_lock_irqsave(&priv->lock, flags); -- cgit v1.2.3 From 4fd6dc98c19369d24c4a4819b27c114948720d16 Mon Sep 17 00:00:00 2001 From: Doug Berger Date: Wed, 25 Oct 2017 15:04:12 -0700 Subject: net: bcmgenet: prevent duplicate calls of bcmgenet_dma_teardown When bcmgenet_dma_teardown is called from bcmgenet_fini_dma it ends up getting called twice from the bcmgenet_close and bcmgenet_suspend functions (once directly and once inside the bcmgenet_fini_dma call). This commit removes the call from bcmgenet_fini_dma and ensures that bcmgenet_dma_teardown is called before bcmgenet_fini_dma in all paths of execution. Fixes: 4a0c081eff43 ("net: bcmgenet: call bcmgenet_dma_teardown in bcmgenet_fini_dma") Signed-off-by: Doug Berger Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/genet/bcmgenet.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c index f6e8e01be1c8..78368466eb70 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c @@ -2505,9 +2505,6 @@ static void bcmgenet_fini_dma(struct bcmgenet_priv *priv) bcmgenet_fini_rx_napi(priv); bcmgenet_fini_tx_napi(priv); - /* disable DMA */ - bcmgenet_dma_teardown(priv); - for (i = 0; i < priv->num_tx_bds; i++) { cb = priv->tx_cbs + i; skb = bcmgenet_free_tx_cb(&priv->pdev->dev, cb); @@ -2930,6 +2927,7 @@ err_irq1: err_irq0: free_irq(priv->irq0, priv); err_fini_dma: + bcmgenet_dma_teardown(priv); bcmgenet_fini_dma(priv); err_clk_disable: if (priv->internal_phy) -- cgit v1.2.3 From 28c2d1a7a0bfdf3617800d2beae1c67983c03d15 Mon Sep 17 00:00:00 2001 From: Doug Berger Date: Wed, 25 Oct 2017 15:04:13 -0700 Subject: net: bcmgenet: enable loopback during UniMAC sw_reset It is necessary for the UniMAC to be clocked at least 5 cycles while the sw_reset is asserted to ensure a clean reset. It was discovered that this condition was not being met when connected to an external RGMII PHY that disabled the Rx clock in the Power Save state. This commit modifies the reset_umac function to place the (RG)MII interface into a local loopback mode where the Rx clock comes from the GENET sourced Tx clk during the sw_reset to ensure the presence and stability of the clock. In addition, it turns out that the sw_reset of the UniMAC is not self clearing, but this was masked by a bug in the timeout code. The sw_reset is now explicitly cleared by zeroing the UMAC_CMD register before returning from reset_umac which makes it no longer necessary to do so in init_umac and makes the clearing of CMD_TX_EN and CMD_RX_EN by umac_enable_set redundant. The timeout code (and its associated bug) are removed so reset_umac no longer needs to return a result, and that means init_umac that calls reset_umac does not need to as well. Fixes: 1c1008c793fa ("net: bcmgenet: add main driver file") Signed-off-by: Doug Berger Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/genet/bcmgenet.c | 55 +++++--------------------- 1 file changed, 10 insertions(+), 45 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c index 78368466eb70..3da177fa2659 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c @@ -1935,12 +1935,8 @@ static void umac_enable_set(struct bcmgenet_priv *priv, u32 mask, bool enable) usleep_range(1000, 2000); } -static int reset_umac(struct bcmgenet_priv *priv) +static void reset_umac(struct bcmgenet_priv *priv) { - struct device *kdev = &priv->pdev->dev; - unsigned int timeout = 0; - u32 reg; - /* 7358a0/7552a0: bad default in RBUF_FLUSH_CTRL.umac_sw_rst */ bcmgenet_rbuf_ctrl_set(priv, 0); udelay(10); @@ -1948,23 +1944,10 @@ static int reset_umac(struct bcmgenet_priv *priv) /* disable MAC while updating its registers */ bcmgenet_umac_writel(priv, 0, UMAC_CMD); - /* issue soft reset, wait for it to complete */ - bcmgenet_umac_writel(priv, CMD_SW_RESET, UMAC_CMD); - while (timeout++ < 1000) { - reg = bcmgenet_umac_readl(priv, UMAC_CMD); - if (!(reg & CMD_SW_RESET)) - return 0; - - udelay(1); - } - - if (timeout == 1000) { - dev_err(kdev, - "timeout waiting for MAC to come out of reset\n"); - return -ETIMEDOUT; - } - - return 0; + /* issue soft reset with (rg)mii loopback to ensure a stable rxclk */ + bcmgenet_umac_writel(priv, CMD_SW_RESET | CMD_LCL_LOOP_EN, UMAC_CMD); + udelay(2); + bcmgenet_umac_writel(priv, 0, UMAC_CMD); } static void bcmgenet_intr_disable(struct bcmgenet_priv *priv) @@ -1994,20 +1977,16 @@ static void bcmgenet_link_intr_enable(struct bcmgenet_priv *priv) bcmgenet_intrl2_0_writel(priv, int0_enable, INTRL2_CPU_MASK_CLEAR); } -static int init_umac(struct bcmgenet_priv *priv) +static void init_umac(struct bcmgenet_priv *priv) { struct device *kdev = &priv->pdev->dev; - int ret; u32 reg; u32 int0_enable = 0; dev_dbg(&priv->pdev->dev, "bcmgenet: init_umac\n"); - ret = reset_umac(priv); - if (ret) - return ret; + reset_umac(priv); - bcmgenet_umac_writel(priv, 0, UMAC_CMD); /* clear tx/rx counter */ bcmgenet_umac_writel(priv, MIB_RESET_RX | MIB_RESET_TX | MIB_RESET_RUNT, @@ -2046,8 +2025,6 @@ static int init_umac(struct bcmgenet_priv *priv) bcmgenet_intrl2_0_writel(priv, int0_enable, INTRL2_CPU_MASK_CLEAR); dev_dbg(kdev, "done init umac\n"); - - return 0; } /* Initialize a Tx ring along with corresponding hardware registers */ @@ -2863,12 +2840,7 @@ static int bcmgenet_open(struct net_device *dev) /* take MAC out of reset */ bcmgenet_umac_reset(priv); - ret = init_umac(priv); - if (ret) - goto err_clk_disable; - - /* disable ethernet MAC while updating its registers */ - umac_enable_set(priv, CMD_TX_EN | CMD_RX_EN, false); + init_umac(priv); /* Make sure we reflect the value of CRC_CMD_FWD */ reg = bcmgenet_umac_readl(priv, UMAC_CMD); @@ -3546,9 +3518,7 @@ static int bcmgenet_probe(struct platform_device *pdev) !strcasecmp(phy_mode_str, "internal")) bcmgenet_power_up(priv, GENET_POWER_PASSIVE); - err = reset_umac(priv); - if (err) - goto err_clk_disable; + reset_umac(priv); err = bcmgenet_mii_init(dev); if (err) @@ -3660,9 +3630,7 @@ static int bcmgenet_resume(struct device *d) bcmgenet_umac_reset(priv); - ret = init_umac(priv); - if (ret) - goto out_clk_disable; + init_umac(priv); /* From WOL-enabled suspend, switch to regular clock */ if (priv->wolopts) @@ -3672,9 +3640,6 @@ static int bcmgenet_resume(struct device *d) /* Speed settings must be restored */ bcmgenet_mii_config(priv->dev, false); - /* disable ethernet MAC while updating its registers */ - umac_enable_set(priv, CMD_TX_EN | CMD_RX_EN, false); - bcmgenet_set_hw_addr(priv, dev->dev_addr); if (priv->internal_phy) { -- cgit v1.2.3 From 7587935cfa119e122215e37e002a481d7374198b Mon Sep 17 00:00:00 2001 From: Doug Berger Date: Wed, 25 Oct 2017 15:04:14 -0700 Subject: net: bcmgenet: move NAPI initialization to ring initialization Since each ring has its own NAPI instance it might as well be initialized along with the other ring context. Signed-off-by: Doug Berger Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/genet/bcmgenet.c | 42 +++++--------------------- 1 file changed, 8 insertions(+), 34 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c index 3da177fa2659..9ce6671e8916 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c @@ -2081,6 +2081,10 @@ static void bcmgenet_init_tx_ring(struct bcmgenet_priv *priv, TDMA_WRITE_PTR); bcmgenet_tdma_ring_writel(priv, index, end_ptr * words_per_bd - 1, DMA_END_ADDR); + + /* Initialize Tx NAPI */ + netif_napi_add(priv->dev, &ring->napi, bcmgenet_tx_poll, + NAPI_POLL_WEIGHT); } /* Initialize a RDMA ring */ @@ -2112,6 +2116,10 @@ static int bcmgenet_init_rx_ring(struct bcmgenet_priv *priv, if (ret) return ret; + /* Initialize Rx NAPI */ + netif_napi_add(priv->dev, &ring->napi, bcmgenet_rx_poll, + NAPI_POLL_WEIGHT); + bcmgenet_rdma_ring_writel(priv, index, 0, RDMA_PROD_INDEX); bcmgenet_rdma_ring_writel(priv, index, 0, RDMA_CONS_INDEX); bcmgenet_rdma_ring_writel(priv, index, 1, DMA_MBUF_DONE_THRESH); @@ -2136,20 +2144,6 @@ static int bcmgenet_init_rx_ring(struct bcmgenet_priv *priv, return ret; } -static void bcmgenet_init_tx_napi(struct bcmgenet_priv *priv) -{ - unsigned int i; - struct bcmgenet_tx_ring *ring; - - for (i = 0; i < priv->hw_params->tx_queues; ++i) { - ring = &priv->tx_rings[i]; - netif_tx_napi_add(priv->dev, &ring->napi, bcmgenet_tx_poll, 64); - } - - ring = &priv->tx_rings[DESC_INDEX]; - netif_tx_napi_add(priv->dev, &ring->napi, bcmgenet_tx_poll, 64); -} - static void bcmgenet_enable_tx_napi(struct bcmgenet_priv *priv) { unsigned int i; @@ -2263,9 +2257,6 @@ static void bcmgenet_init_tx_queues(struct net_device *dev) bcmgenet_tdma_writel(priv, dma_priority[1], DMA_PRIORITY_1); bcmgenet_tdma_writel(priv, dma_priority[2], DMA_PRIORITY_2); - /* Initialize Tx NAPI */ - bcmgenet_init_tx_napi(priv); - /* Enable Tx queues */ bcmgenet_tdma_writel(priv, ring_cfg, DMA_RING_CFG); @@ -2275,20 +2266,6 @@ static void bcmgenet_init_tx_queues(struct net_device *dev) bcmgenet_tdma_writel(priv, dma_ctrl, DMA_CTRL); } -static void bcmgenet_init_rx_napi(struct bcmgenet_priv *priv) -{ - unsigned int i; - struct bcmgenet_rx_ring *ring; - - for (i = 0; i < priv->hw_params->rx_queues; ++i) { - ring = &priv->rx_rings[i]; - netif_napi_add(priv->dev, &ring->napi, bcmgenet_rx_poll, 64); - } - - ring = &priv->rx_rings[DESC_INDEX]; - netif_napi_add(priv->dev, &ring->napi, bcmgenet_rx_poll, 64); -} - static void bcmgenet_enable_rx_napi(struct bcmgenet_priv *priv) { unsigned int i; @@ -2391,9 +2368,6 @@ static int bcmgenet_init_rx_queues(struct net_device *dev) ring_cfg |= (1 << DESC_INDEX); dma_ctrl |= (1 << (DESC_INDEX + DMA_RING_BUF_EN_SHIFT)); - /* Initialize Rx NAPI */ - bcmgenet_init_rx_napi(priv); - /* Enable rings */ bcmgenet_rdma_writel(priv, ring_cfg, DMA_RING_CFG); -- cgit v1.2.3 From fbf557d9d1bf93892db70121061c81aaded41607 Mon Sep 17 00:00:00 2001 From: Doug Berger Date: Wed, 25 Oct 2017 15:04:15 -0700 Subject: net: bcmgenet: cleanup ring interrupt masking and unmasking Since the NAPI interrupts are basically ignored when NAPI is disabled we don't need to mask them within the functions bcmgenet_disable_tx_napi() and bcmgenet_disable_rx_napi(). So wait until all NAPI instances are disabled and mask all of the bcmgenet driver interrupts together in bcmgenet_netif_stop(). The interrupts can still be enabled in the functions bcmgenet_enable_tx_napi() and bcmgenet_enable_rx_napi(), but use the ring context int_enable() method to keep the functionality consistent and the code cleaner. Signed-off-by: Doug Berger Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/genet/bcmgenet.c | 28 +++++--------------------- 1 file changed, 5 insertions(+), 23 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c index 9ce6671e8916..88aacf3bf44f 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c @@ -2147,33 +2147,24 @@ static int bcmgenet_init_rx_ring(struct bcmgenet_priv *priv, static void bcmgenet_enable_tx_napi(struct bcmgenet_priv *priv) { unsigned int i; - u32 int0_enable = UMAC_IRQ_TXDMA_DONE; - u32 int1_enable = 0; struct bcmgenet_tx_ring *ring; for (i = 0; i < priv->hw_params->tx_queues; ++i) { ring = &priv->tx_rings[i]; napi_enable(&ring->napi); - int1_enable |= (1 << i); + ring->int_enable(ring); } ring = &priv->tx_rings[DESC_INDEX]; napi_enable(&ring->napi); - - bcmgenet_intrl2_0_writel(priv, int0_enable, INTRL2_CPU_MASK_CLEAR); - bcmgenet_intrl2_1_writel(priv, int1_enable, INTRL2_CPU_MASK_CLEAR); + ring->int_enable(ring); } static void bcmgenet_disable_tx_napi(struct bcmgenet_priv *priv) { unsigned int i; - u32 int0_disable = UMAC_IRQ_TXDMA_DONE; - u32 int1_disable = 0xffff; struct bcmgenet_tx_ring *ring; - bcmgenet_intrl2_0_writel(priv, int0_disable, INTRL2_CPU_MASK_SET); - bcmgenet_intrl2_1_writel(priv, int1_disable, INTRL2_CPU_MASK_SET); - for (i = 0; i < priv->hw_params->tx_queues; ++i) { ring = &priv->tx_rings[i]; napi_disable(&ring->napi); @@ -2269,33 +2260,24 @@ static void bcmgenet_init_tx_queues(struct net_device *dev) static void bcmgenet_enable_rx_napi(struct bcmgenet_priv *priv) { unsigned int i; - u32 int0_enable = UMAC_IRQ_RXDMA_DONE; - u32 int1_enable = 0; struct bcmgenet_rx_ring *ring; for (i = 0; i < priv->hw_params->rx_queues; ++i) { ring = &priv->rx_rings[i]; napi_enable(&ring->napi); - int1_enable |= (1 << (UMAC_IRQ1_RX_INTR_SHIFT + i)); + ring->int_enable(ring); } ring = &priv->rx_rings[DESC_INDEX]; napi_enable(&ring->napi); - - bcmgenet_intrl2_0_writel(priv, int0_enable, INTRL2_CPU_MASK_CLEAR); - bcmgenet_intrl2_1_writel(priv, int1_enable, INTRL2_CPU_MASK_CLEAR); + ring->int_enable(ring); } static void bcmgenet_disable_rx_napi(struct bcmgenet_priv *priv) { unsigned int i; - u32 int0_disable = UMAC_IRQ_RXDMA_DONE; - u32 int1_disable = 0xffff << UMAC_IRQ1_RX_INTR_SHIFT; struct bcmgenet_rx_ring *ring; - bcmgenet_intrl2_0_writel(priv, int0_disable, INTRL2_CPU_MASK_SET); - bcmgenet_intrl2_1_writel(priv, int1_disable, INTRL2_CPU_MASK_SET); - for (i = 0; i < priv->hw_params->rx_queues; ++i) { ring = &priv->rx_rings[i]; napi_disable(&ring->napi); @@ -2888,9 +2870,9 @@ static void bcmgenet_netif_stop(struct net_device *dev) netif_tx_stop_all_queues(dev); phy_stop(priv->phydev); - bcmgenet_intr_disable(priv); bcmgenet_disable_rx_napi(priv); bcmgenet_disable_tx_napi(priv); + bcmgenet_intr_disable(priv); /* Wait for pending work items to complete. Since interrupts are * disabled no new work will be scheduled. -- cgit v1.2.3 From d215dbac48ab9e77c680fcd28863ccc227a5657e Mon Sep 17 00:00:00 2001 From: Doug Berger Date: Wed, 25 Oct 2017 15:04:16 -0700 Subject: net: bcmgenet: rework bcmgenet_netif_start and bcmgenet_netif_stop This commit consolidates more common functionality from bcmgenet_close and bcmgenet_suspend into bcmgenet_netif_stop and modifies the start and stop sequences to better suit the design of the GENET hardware. Signed-off-by: Doug Berger Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/genet/bcmgenet.c | 49 +++++++++----------------- 1 file changed, 17 insertions(+), 32 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c index 88aacf3bf44f..747224714394 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c @@ -2763,11 +2763,11 @@ static void bcmgenet_netif_start(struct net_device *dev) /* Start the network engine */ bcmgenet_enable_rx_napi(priv); - bcmgenet_enable_tx_napi(priv); umac_enable_set(priv, CMD_TX_EN | CMD_RX_EN, true); netif_tx_start_all_queues(dev); + bcmgenet_enable_tx_napi(priv); /* Monitor link interrupts now */ bcmgenet_link_intr_enable(priv); @@ -2868,10 +2868,19 @@ static void bcmgenet_netif_stop(struct net_device *dev) { struct bcmgenet_priv *priv = netdev_priv(dev); + bcmgenet_disable_tx_napi(priv); netif_tx_stop_all_queues(dev); + + /* Disable MAC receive */ + umac_enable_set(priv, CMD_RX_EN, false); + + bcmgenet_dma_teardown(priv); + + /* Disable MAC transmit. TX DMA disabled must be done before this */ + umac_enable_set(priv, CMD_TX_EN, false); + phy_stop(priv->phydev); bcmgenet_disable_rx_napi(priv); - bcmgenet_disable_tx_napi(priv); bcmgenet_intr_disable(priv); /* Wait for pending work items to complete. Since interrupts are @@ -2883,12 +2892,16 @@ static void bcmgenet_netif_stop(struct net_device *dev) priv->old_speed = -1; priv->old_duplex = -1; priv->old_pause = -1; + + /* tx reclaim */ + bcmgenet_tx_reclaim_all(dev); + bcmgenet_fini_dma(priv); } static int bcmgenet_close(struct net_device *dev) { struct bcmgenet_priv *priv = netdev_priv(dev); - int ret; + int ret = 0; netif_dbg(priv, ifdown, dev, "bcmgenet_close\n"); @@ -2897,20 +2910,6 @@ static int bcmgenet_close(struct net_device *dev) /* Really kill the PHY state machine and disconnect from it */ phy_disconnect(priv->phydev); - /* Disable MAC receive */ - umac_enable_set(priv, CMD_RX_EN, false); - - ret = bcmgenet_dma_teardown(priv); - if (ret) - return ret; - - /* Disable MAC transmit. TX DMA disabled must be done before this */ - umac_enable_set(priv, CMD_TX_EN, false); - - /* tx reclaim */ - bcmgenet_tx_reclaim_all(dev); - bcmgenet_fini_dma(priv); - free_irq(priv->irq0, priv); free_irq(priv->irq1, priv); @@ -3522,7 +3521,7 @@ static int bcmgenet_suspend(struct device *d) { struct net_device *dev = dev_get_drvdata(d); struct bcmgenet_priv *priv = netdev_priv(dev); - int ret; + int ret = 0; if (!netif_running(dev)) return 0; @@ -3534,20 +3533,6 @@ static int bcmgenet_suspend(struct device *d) netif_device_detach(dev); - /* Disable MAC receive */ - umac_enable_set(priv, CMD_RX_EN, false); - - ret = bcmgenet_dma_teardown(priv); - if (ret) - return ret; - - /* Disable MAC transmit. TX DMA disabled must be done before this */ - umac_enable_set(priv, CMD_TX_EN, false); - - /* tx reclaim */ - bcmgenet_tx_reclaim_all(dev); - bcmgenet_fini_dma(priv); - /* Prepare the device for Wake-on-LAN and switch to the slow clock */ if (device_may_wakeup(d) && priv->wolopts) { ret = bcmgenet_power_down(priv, GENET_POWER_WOL_MAGIC); -- cgit v1.2.3 From b0447ecb533270cf857ebee1133cb8ff67115423 Mon Sep 17 00:00:00 2001 From: Doug Berger Date: Wed, 25 Oct 2017 15:04:17 -0700 Subject: net: bcmgenet: relax lock constraints to reduce IRQ latency Since the ring locks are not used in a hard IRQ context it is often not necessary to disable global IRQs while waiting on a lock. Using less restrictive lock and unlock calls improves the real-time responsiveness of the system. Signed-off-by: Doug Berger Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/genet/bcmgenet.c | 25 ++++++++++--------------- 1 file changed, 10 insertions(+), 15 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c index 747224714394..91f52c1b5108 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c @@ -1405,11 +1405,10 @@ static unsigned int bcmgenet_tx_reclaim(struct net_device *dev, struct bcmgenet_tx_ring *ring) { unsigned int released; - unsigned long flags; - spin_lock_irqsave(&ring->lock, flags); + spin_lock_bh(&ring->lock); released = __bcmgenet_tx_reclaim(dev, ring); - spin_unlock_irqrestore(&ring->lock, flags); + spin_unlock_bh(&ring->lock); return released; } @@ -1420,15 +1419,14 @@ static int bcmgenet_tx_poll(struct napi_struct *napi, int budget) container_of(napi, struct bcmgenet_tx_ring, napi); unsigned int work_done = 0; struct netdev_queue *txq; - unsigned long flags; - spin_lock_irqsave(&ring->lock, flags); + spin_lock(&ring->lock); work_done = __bcmgenet_tx_reclaim(ring->priv->dev, ring); if (ring->free_bds > (MAX_SKB_FRAGS + 1)) { txq = netdev_get_tx_queue(ring->priv->dev, ring->queue); netif_tx_wake_queue(txq); } - spin_unlock_irqrestore(&ring->lock, flags); + spin_unlock(&ring->lock); if (work_done == 0) { napi_complete(napi); @@ -1523,7 +1521,6 @@ static netdev_tx_t bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev) struct bcmgenet_tx_ring *ring = NULL; struct enet_cb *tx_cb_ptr; struct netdev_queue *txq; - unsigned long flags = 0; int nr_frags, index; dma_addr_t mapping; unsigned int size; @@ -1550,7 +1547,7 @@ static netdev_tx_t bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev) nr_frags = skb_shinfo(skb)->nr_frags; - spin_lock_irqsave(&ring->lock, flags); + spin_lock(&ring->lock); if (ring->free_bds <= (nr_frags + 1)) { if (!netif_tx_queue_stopped(txq)) { netif_tx_stop_queue(txq); @@ -1645,7 +1642,7 @@ static netdev_tx_t bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev) bcmgenet_tdma_ring_writel(priv, ring->index, ring->prod_index, TDMA_PROD_INDEX); out: - spin_unlock_irqrestore(&ring->lock, flags); + spin_unlock(&ring->lock); return ret; @@ -2520,17 +2517,16 @@ static int bcmgenet_init_dma(struct bcmgenet_priv *priv) /* Interrupt bottom half */ static void bcmgenet_irq_task(struct work_struct *work) { - unsigned long flags; unsigned int status; struct bcmgenet_priv *priv = container_of( work, struct bcmgenet_priv, bcmgenet_irq_work); netif_dbg(priv, intr, priv->dev, "%s\n", __func__); - spin_lock_irqsave(&priv->lock, flags); + spin_lock_irq(&priv->lock); status = priv->irq0_stat; priv->irq0_stat = 0; - spin_unlock_irqrestore(&priv->lock, flags); + spin_unlock_irq(&priv->lock); /* Link UP/DOWN event */ if (status & UMAC_IRQ_LINK_EVENT) @@ -2927,7 +2923,6 @@ static void bcmgenet_dump_tx_queue(struct bcmgenet_tx_ring *ring) u32 p_index, c_index, intsts, intmsk; struct netdev_queue *txq; unsigned int free_bds; - unsigned long flags; bool txq_stopped; if (!netif_msg_tx_err(priv)) @@ -2935,7 +2930,7 @@ static void bcmgenet_dump_tx_queue(struct bcmgenet_tx_ring *ring) txq = netdev_get_tx_queue(priv->dev, ring->queue); - spin_lock_irqsave(&ring->lock, flags); + spin_lock(&ring->lock); if (ring->index == DESC_INDEX) { intsts = ~bcmgenet_intrl2_0_readl(priv, INTRL2_CPU_MASK_STATUS); intmsk = UMAC_IRQ_TXDMA_DONE | UMAC_IRQ_TXDMA_MBDONE; @@ -2947,7 +2942,7 @@ static void bcmgenet_dump_tx_queue(struct bcmgenet_tx_ring *ring) p_index = bcmgenet_tdma_ring_readl(priv, ring->index, TDMA_PROD_INDEX); txq_stopped = netif_tx_queue_stopped(txq); free_bds = ring->free_bds; - spin_unlock_irqrestore(&ring->lock, flags); + spin_unlock(&ring->lock); netif_err(priv, tx_err, priv->dev, "Ring %d queue %d status summary\n" "TX queue status: %s, interrupts: %s\n" -- cgit v1.2.3 From 484bfa1507bf71cecc7833ae7f7272d8af49badc Mon Sep 17 00:00:00 2001 From: Doug Berger Date: Wed, 25 Oct 2017 15:04:18 -0700 Subject: Revert "net: bcmgenet: Software reset EPHY after power on" With commit f7d72996e222 ("net: bcmgenet: enable loopback during UniMAC sw_reset") it is no longer necessary to force the software reset of the internal EPHY before resetting the UniMAC to ensure a clean reset. Therefore this commit reverts commit 5dbebbb44a6a ("net: bcmgenet: Software reset EPHY after power on"). Signed-off-by: Doug Berger Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/genet/bcmgenet.c | 1 - drivers/net/ethernet/broadcom/genet/bcmgenet.h | 1 - drivers/net/ethernet/broadcom/genet/bcmmii.c | 16 ---------------- 3 files changed, 18 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c index 91f52c1b5108..54b09a01cb2c 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c @@ -1172,7 +1172,6 @@ static void bcmgenet_power_up(struct bcmgenet_priv *priv, } bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT); bcmgenet_phy_power_set(priv->dev, true); - bcmgenet_mii_reset(priv->dev); break; case GENET_POWER_CABLE_SENSE: diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.h b/drivers/net/ethernet/broadcom/genet/bcmgenet.h index 4c49d0b97748..35f18a8d1ce6 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet.h +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.h @@ -711,7 +711,6 @@ int bcmgenet_mii_init(struct net_device *dev); int bcmgenet_mii_config(struct net_device *dev, bool init); int bcmgenet_mii_probe(struct net_device *dev); void bcmgenet_mii_exit(struct net_device *dev); -void bcmgenet_mii_reset(struct net_device *dev); void bcmgenet_phy_power_set(struct net_device *dev, bool enable); void bcmgenet_mii_setup(struct net_device *dev); diff --git a/drivers/net/ethernet/broadcom/genet/bcmmii.c b/drivers/net/ethernet/broadcom/genet/bcmmii.c index 18f5723be2c9..a5ae9b78389c 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmmii.c +++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c @@ -121,22 +121,6 @@ static int bcmgenet_fixed_phy_link_update(struct net_device *dev, return 0; } -/* Perform a voluntary PHY software reset, since the EPHY is very finicky about - * not doing it and will start corrupting packets - */ -void bcmgenet_mii_reset(struct net_device *dev) -{ - struct bcmgenet_priv *priv = netdev_priv(dev); - - if (GENET_IS_V4(priv)) - return; - - if (priv->phydev) { - phy_init_hw(priv->phydev); - phy_start_aneg(priv->phydev); - } -} - void bcmgenet_phy_power_set(struct net_device *dev, bool enable) { struct bcmgenet_priv *priv = netdev_priv(dev); -- cgit v1.2.3 From 6c97f010cee28e3f262c547215fb0e8702bdb654 Mon Sep 17 00:00:00 2001 From: Doug Berger Date: Wed, 25 Oct 2017 15:04:19 -0700 Subject: net: bcmgenet: use dev->phydev instead of priv->phydev Now that the software reset of the PHY has been removed it is no longer necessary to retain a private pointer to the phydev for use when the PHY is detached (which isn't generally safe anyway). The driver now uses the phydev member attached to the net_device. For ethtool commands that have a PHY component, an explicit check is made to prevent accessing an invalid phydev pointer when one is not attached (e.g. interface is down). Signed-off-by: Doug Berger Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/genet/bcmgenet.c | 47 +++++++++++++------------- drivers/net/ethernet/broadcom/genet/bcmgenet.h | 1 - drivers/net/ethernet/broadcom/genet/bcmmii.c | 17 ++++------ 3 files changed, 31 insertions(+), 34 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c index 54b09a01cb2c..9713374ebf14 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c @@ -488,15 +488,13 @@ static void bcmgenet_complete(struct net_device *dev) static int bcmgenet_get_link_ksettings(struct net_device *dev, struct ethtool_link_ksettings *cmd) { - struct bcmgenet_priv *priv = netdev_priv(dev); - if (!netif_running(dev)) return -EINVAL; - if (!priv->phydev) + if (!dev->phydev) return -ENODEV; - phy_ethtool_ksettings_get(priv->phydev, cmd); + phy_ethtool_ksettings_get(dev->phydev, cmd); return 0; } @@ -504,15 +502,13 @@ static int bcmgenet_get_link_ksettings(struct net_device *dev, static int bcmgenet_set_link_ksettings(struct net_device *dev, const struct ethtool_link_ksettings *cmd) { - struct bcmgenet_priv *priv = netdev_priv(dev); - if (!netif_running(dev)) return -EINVAL; - if (!priv->phydev) + if (!dev->phydev) return -ENODEV; - return phy_ethtool_ksettings_set(priv->phydev, cmd); + return phy_ethtool_ksettings_set(dev->phydev, cmd); } static int bcmgenet_set_rx_csum(struct net_device *dev, @@ -1042,11 +1038,14 @@ static int bcmgenet_get_eee(struct net_device *dev, struct ethtool_eee *e) if (GENET_IS_V1(priv)) return -EOPNOTSUPP; + if (!dev->phydev) + return -ENODEV; + e->eee_enabled = p->eee_enabled; e->eee_active = p->eee_active; e->tx_lpi_timer = bcmgenet_umac_readl(priv, UMAC_EEE_LPI_TIMER); - return phy_ethtool_get_eee(priv->phydev, e); + return phy_ethtool_get_eee(dev->phydev, e); } static int bcmgenet_set_eee(struct net_device *dev, struct ethtool_eee *e) @@ -1058,12 +1057,15 @@ static int bcmgenet_set_eee(struct net_device *dev, struct ethtool_eee *e) if (GENET_IS_V1(priv)) return -EOPNOTSUPP; + if (!dev->phydev) + return -ENODEV; + p->eee_enabled = e->eee_enabled; if (!p->eee_enabled) { bcmgenet_eee_enable_set(dev, false); } else { - ret = phy_init_eee(priv->phydev, 0); + ret = phy_init_eee(dev->phydev, 0); if (ret) { netif_err(priv, hw, dev, "EEE initialization failed\n"); return ret; @@ -1073,7 +1075,7 @@ static int bcmgenet_set_eee(struct net_device *dev, struct ethtool_eee *e) bcmgenet_eee_enable_set(dev, true); } - return phy_ethtool_set_eee(priv->phydev, e); + return phy_ethtool_set_eee(dev->phydev, e); } /* standard ethtool support functions. */ @@ -1107,7 +1109,7 @@ static int bcmgenet_power_down(struct bcmgenet_priv *priv, switch (mode) { case GENET_POWER_CABLE_SENSE: - phy_detach(priv->phydev); + phy_detach(priv->dev->phydev); break; case GENET_POWER_WOL_MAGIC: @@ -1192,15 +1194,13 @@ static void bcmgenet_power_up(struct bcmgenet_priv *priv, /* ioctl handle special commands that are not present in ethtool. */ static int bcmgenet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) { - struct bcmgenet_priv *priv = netdev_priv(dev); - if (!netif_running(dev)) return -EINVAL; - if (!priv->phydev) + if (!dev->phydev) return -ENODEV; - return phy_mii_ioctl(priv->phydev, rq, cmd); + return phy_mii_ioctl(dev->phydev, rq, cmd); } static struct enet_cb *bcmgenet_get_txcb(struct bcmgenet_priv *priv, @@ -2529,7 +2529,7 @@ static void bcmgenet_irq_task(struct work_struct *work) /* Link UP/DOWN event */ if (status & UMAC_IRQ_LINK_EVENT) - phy_mac_interrupt(priv->phydev, + phy_mac_interrupt(priv->dev->phydev, !!(status & UMAC_IRQ_LINK_UP)); } @@ -2767,7 +2767,7 @@ static void bcmgenet_netif_start(struct net_device *dev) /* Monitor link interrupts now */ bcmgenet_link_intr_enable(priv); - phy_start(priv->phydev); + phy_start(dev->phydev); } static int bcmgenet_open(struct net_device *dev) @@ -2874,7 +2874,7 @@ static void bcmgenet_netif_stop(struct net_device *dev) /* Disable MAC transmit. TX DMA disabled must be done before this */ umac_enable_set(priv, CMD_TX_EN, false); - phy_stop(priv->phydev); + phy_stop(dev->phydev); bcmgenet_disable_rx_napi(priv); bcmgenet_intr_disable(priv); @@ -2903,7 +2903,7 @@ static int bcmgenet_close(struct net_device *dev) bcmgenet_netif_stop(dev); /* Really kill the PHY state machine and disconnect from it */ - phy_disconnect(priv->phydev); + phy_disconnect(dev->phydev); free_irq(priv->irq0, priv); free_irq(priv->irq1, priv); @@ -3523,7 +3523,7 @@ static int bcmgenet_suspend(struct device *d) bcmgenet_netif_stop(dev); if (!device_may_wakeup(d)) - phy_suspend(priv->phydev); + phy_suspend(dev->phydev); netif_device_detach(dev); @@ -3571,7 +3571,8 @@ static int bcmgenet_resume(struct device *d) if (priv->wolopts) clk_disable_unprepare(priv->clk_wol); - phy_init_hw(priv->phydev); + phy_init_hw(dev->phydev); + /* Speed settings must be restored */ bcmgenet_mii_config(priv->dev, false); @@ -3602,7 +3603,7 @@ static int bcmgenet_resume(struct device *d) netif_device_attach(dev); if (!device_may_wakeup(d)) - phy_resume(priv->phydev); + phy_resume(dev->phydev); if (priv->eee.eee_enabled) bcmgenet_eee_enable_set(dev, true); diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.h b/drivers/net/ethernet/broadcom/genet/bcmgenet.h index 35f18a8d1ce6..3c50431ccd2a 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet.h +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.h @@ -617,7 +617,6 @@ struct bcmgenet_priv { /* MDIO bus variables */ wait_queue_head_t wq; - struct phy_device *phydev; bool internal_phy; struct device_node *phy_dn; struct device_node *mdio_dn; diff --git a/drivers/net/ethernet/broadcom/genet/bcmmii.c b/drivers/net/ethernet/broadcom/genet/bcmmii.c index a5ae9b78389c..ba3fcfdaa0bc 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmmii.c +++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c @@ -34,7 +34,7 @@ void bcmgenet_mii_setup(struct net_device *dev) { struct bcmgenet_priv *priv = netdev_priv(dev); - struct phy_device *phydev = priv->phydev; + struct phy_device *phydev = dev->phydev; u32 reg, cmd_bits = 0; bool status_changed = false; @@ -166,14 +166,14 @@ static void bcmgenet_moca_phy_setup(struct bcmgenet_priv *priv) } if (priv->hw_params->flags & GENET_HAS_MOCA_LINK_DET) - fixed_phy_set_link_update(priv->phydev, + fixed_phy_set_link_update(priv->dev->phydev, bcmgenet_fixed_phy_link_update); } int bcmgenet_mii_config(struct net_device *dev, bool init) { struct bcmgenet_priv *priv = netdev_priv(dev); - struct phy_device *phydev = priv->phydev; + struct phy_device *phydev = dev->phydev; struct device *kdev = &priv->pdev->dev; const char *phy_name = NULL; u32 id_mode_dis = 0; @@ -220,7 +220,7 @@ int bcmgenet_mii_config(struct net_device *dev, bool init) * capabilities, use that knowledge to also configure the * Reverse MII interface correctly. */ - if ((priv->phydev->supported & PHY_BASIC_FEATURES) == + if ((dev->phydev->supported & PHY_BASIC_FEATURES) == PHY_BASIC_FEATURES) port_ctrl = PORT_MODE_EXT_RVMII_25; else @@ -290,7 +290,7 @@ int bcmgenet_mii_probe(struct net_device *dev) return -ENODEV; } } else { - phydev = priv->phydev; + phydev = dev->phydev; phydev->dev_flags = phy_flags; ret = phy_connect_direct(dev, phydev, bcmgenet_mii_setup, @@ -301,8 +301,6 @@ int bcmgenet_mii_probe(struct net_device *dev) } } - priv->phydev = phydev; - /* Configure port multiplexer based on what the probed PHY device since * reading the 'max-speed' property determines the maximum supported * PHY speed which is needed for bcmgenet_mii_config() to configure @@ -310,7 +308,7 @@ int bcmgenet_mii_probe(struct net_device *dev) */ ret = bcmgenet_mii_config(dev, true); if (ret) { - phy_disconnect(priv->phydev); + phy_disconnect(dev->phydev); return ret; } @@ -320,7 +318,7 @@ int bcmgenet_mii_probe(struct net_device *dev) * Ethernet MAC ISRs */ if (priv->internal_phy) - priv->phydev->irq = PHY_IGNORE_INTERRUPT; + dev->phydev->irq = PHY_IGNORE_INTERRUPT; return 0; } @@ -529,7 +527,6 @@ static int bcmgenet_mii_pd_init(struct bcmgenet_priv *priv) } - priv->phydev = phydev; priv->phy_interface = pd->phy_interface; return 0; -- cgit v1.2.3 From 7036d26f328f12a323069eb16d965055b4cb3795 Mon Sep 17 00:00:00 2001 From: Lipeng Date: Tue, 24 Oct 2017 21:02:09 +0800 Subject: net: hns3: fix the bug of hns3_set_txbd_baseinfo The SC bits of TX BD mean switch control. For this area, value 0 indicates no switch control, the packet is routed according to the forwarding table. Value 1 indicates that the packet is transmitted to the network bypassing the forwarding table. As HNS3 driver need support VF later, VF conmunicate with its own PF need forwarding table. This patch sets SC bits of TX BD 0 and use forwarding table. Fixes: 76ad4f0 (net: hns3: Add support of HNS3 Ethernet Driver for hip08 SoC) Signed-off-by: Lipeng Signed-off-by: David S. Miller --- drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c index 537f6c3babb7..c6c5b2a96aaa 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c @@ -716,7 +716,7 @@ static void hns3_set_txbd_baseinfo(u16 *bdtp_fe_sc_vld_ra_ri, int frag_end) HNS3_TXD_BDTYPE_M, 0); hnae_set_bit(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_FE_B, !!frag_end); hnae_set_bit(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_VLD_B, 1); - hnae_set_field(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_SC_M, HNS3_TXD_SC_S, 1); + hnae_set_field(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_SC_M, HNS3_TXD_SC_S, 0); } static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv, -- cgit v1.2.3 From 3a46f34d20d453f09defb76b11a567647939c0aa Mon Sep 17 00:00:00 2001 From: Lipeng Date: Tue, 24 Oct 2017 21:02:10 +0800 Subject: net: hns3: add nic_client check when initialize roce base information Roce driver works base on HNS3 driver.If insmod Roce driver before NIC driver there is a error because do not check nic_client. This patch adds nic_client check when initialize roce base information. Fixes: 46a3df9 (net: hns3: Add HNS3 Acceleration Engine & Compatibility Layer Support) Signed-off-by: Lipeng Signed-off-by: David S. Miller --- drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c index 443124177f05..2c22d3cf6d1e 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c @@ -4285,7 +4285,7 @@ static int hclge_init_client_instance(struct hnae3_client *client, vport->roce.client = client; } - if (hdev->roce_client) { + if (hdev->roce_client && hdev->nic_client) { ret = hclge_init_roce_base_info(vport); if (ret) goto err; -- cgit v1.2.3 From a17dcf3f0124698d1120da71574bf4c339e5a368 Mon Sep 17 00:00:00 2001 From: Lipeng Date: Tue, 24 Oct 2017 21:02:11 +0800 Subject: net: hns3: fix a bug in hclge_uninit_client_instance HNS3 driver initialize hdev->roce_client and vport->roce.client in hclge_init_client_instance, and need set hdev->roce_client and vport->roce.client NULL. If do not set them NULL when uninit, it will fail in the scene: insmod hns3.ko, hns-roce.ko, hns-roce-hw-v3.ko successfully, but rmmod hns3.ko after rmmod hns-roce-hw-v2.ko and hns-roce.ko. This patch fixes the issue. Fixes: 46a3df9 (net: hns3: Add HNS3 Acceleration Engine & Compatibility Layer Support) Signed-off-by: Lipeng Signed-off-by: David S. Miller --- drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c index 2c22d3cf6d1e..d11a9a56c7d8 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c @@ -4311,13 +4311,19 @@ static void hclge_uninit_client_instance(struct hnae3_client *client, for (i = 0; i < hdev->num_vmdq_vport + 1; i++) { vport = &hdev->vport[i]; - if (hdev->roce_client) + if (hdev->roce_client) { hdev->roce_client->ops->uninit_instance(&vport->roce, 0); + hdev->roce_client = NULL; + vport->roce.client = NULL; + } if (client->type == HNAE3_CLIENT_ROCE) return; - if (client->ops->uninit_instance) + if (client->ops->uninit_instance) { client->ops->uninit_instance(&vport->nic, 0); + hdev->nic_client = NULL; + vport->nic.client = NULL; + } } } -- cgit v1.2.3 From c3b6f755fdcd2c0d8342c01e630741928a7c62ab Mon Sep 17 00:00:00 2001 From: Lipeng Date: Tue, 24 Oct 2017 21:02:12 +0800 Subject: net: hns3: fix the bug when reuse command description in hclge_add_mac_vlan_tbl When reusing a command description read from HW, driver should set IN_VLD bit, WR bit and NO_INTR bit. If IN_VLD bit and NO_INTR bit are not set, the command fails and driver prints error message: [ 135.261284] hns3 0000:7d:00.0: cmdq execute failed for get_mac_vlan_cmd_status,status=2. [ 135.270983] hns3 0000:7d:00.0: add mac addr failed for cmd_send, ret =-5. This patch fixes the bug. Fixes: 46a3df9 (net: hns3: Add HNS3 Acceleration Engine & Compatibility Layer Support) Signed-off-by: Lipeng Signed-off-by: David S. Miller --- drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c index d11a9a56c7d8..0b95fbe63ac1 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c @@ -3600,11 +3600,11 @@ static int hclge_add_mac_vlan_tbl(struct hclge_vport *vport, resp_code, HCLGE_MAC_VLAN_ADD); } else { - mc_desc[0].flag &= cpu_to_le16(~HCLGE_CMD_FLAG_WR); + hclge_cmd_reuse_desc(&mc_desc[0], false); mc_desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); - mc_desc[1].flag &= cpu_to_le16(~HCLGE_CMD_FLAG_WR); + hclge_cmd_reuse_desc(&mc_desc[1], false); mc_desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); - mc_desc[2].flag &= cpu_to_le16(~HCLGE_CMD_FLAG_WR); + hclge_cmd_reuse_desc(&mc_desc[2], false); mc_desc[2].flag &= cpu_to_le16(~HCLGE_CMD_FLAG_NEXT); memcpy(mc_desc[0].data, req, sizeof(struct hclge_mac_vlan_tbl_entry_cmd)); -- cgit v1.2.3 From acfdf7eabea4186a386ba5e656f0c739563cb1a5 Mon Sep 17 00:00:00 2001 From: Rahul Lakkireddy Date: Tue, 24 Oct 2017 19:28:26 +0530 Subject: cxgb4: fix overflow in collecting IBQ and OBQ dump Destination buffer already has offset added. So, don't add offset again. Fetch actual size of configured OBQ from hardware, instead of using hardcoded value. Fixes: 7c075ce221cf ("cxgb4: collect IBQ and OBQ dumps") Signed-off-by: Rahul Lakkireddy Signed-off-by: Ganesh Goudar Signed-off-by: David S. Miller --- drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c | 20 ++++++++++++++------ drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.h | 1 + drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c | 16 +++++++++++++++- 3 files changed, 30 insertions(+), 7 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c index c451b2e42a6c..19da54f83e52 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c @@ -146,8 +146,7 @@ static int cudbg_read_cim_ibq(struct cudbg_init *pdbg_init, /* t4_read_cim_ibq will return no. of read words or error */ no_of_read_words = t4_read_cim_ibq(padap, qid, - (u32 *)((u32 *)temp_buff.data + - temp_buff.offset), qsize); + (u32 *)temp_buff.data, qsize); /* no_of_read_words is less than or equal to 0 means error */ if (no_of_read_words <= 0) { if (!no_of_read_words) @@ -204,6 +203,17 @@ int cudbg_collect_cim_ibq_ncsi(struct cudbg_init *pdbg_init, return cudbg_read_cim_ibq(pdbg_init, dbg_buff, cudbg_err, 5); } +u32 cudbg_cim_obq_size(struct adapter *padap, int qid) +{ + u32 value; + + t4_write_reg(padap, CIM_QUEUE_CONFIG_REF_A, OBQSELECT_F | + QUENUMSELECT_V(qid)); + value = t4_read_reg(padap, CIM_QUEUE_CONFIG_CTRL_A); + value = CIMQSIZE_G(value) * 64; /* size in number of words */ + return value * sizeof(u32); +} + static int cudbg_read_cim_obq(struct cudbg_init *pdbg_init, struct cudbg_buffer *dbg_buff, struct cudbg_error *cudbg_err, int qid) @@ -214,15 +224,14 @@ static int cudbg_read_cim_obq(struct cudbg_init *pdbg_init, u32 qsize; /* collect CIM OBQ */ - qsize = 6 * CIM_OBQ_SIZE * 4 * sizeof(u32); + qsize = cudbg_cim_obq_size(padap, qid); rc = cudbg_get_buff(dbg_buff, qsize, &temp_buff); if (rc) return rc; /* t4_read_cim_obq will return no. of read words or error */ no_of_read_words = t4_read_cim_obq(padap, qid, - (u32 *)((u32 *)temp_buff.data + - temp_buff.offset), qsize); + (u32 *)temp_buff.data, qsize); /* no_of_read_words is less than or equal to 0 means error */ if (no_of_read_words <= 0) { if (!no_of_read_words) @@ -233,7 +242,6 @@ static int cudbg_read_cim_obq(struct cudbg_init *pdbg_init, cudbg_put_buff(&temp_buff, dbg_buff); return rc; } - temp_buff.size = no_of_read_words * 4; cudbg_write_and_release_buff(&temp_buff, dbg_buff); return rc; } diff --git a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.h b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.h index c4440c1d0142..df24c409c82f 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.h @@ -100,4 +100,5 @@ int cudbg_collect_hma_indirect(struct cudbg_init *pdbg_init, struct cudbg_entity_hdr *cudbg_get_entity_hdr(void *outbuf, int i); void cudbg_align_debug_buffer(struct cudbg_buffer *dbg_buff, struct cudbg_entity_hdr *entity_hdr); +u32 cudbg_cim_obq_size(struct adapter *padap, int qid); #endif /* __CUDBG_LIB_H__ */ diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c index 9d97080a9d17..59740ac7e46e 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c @@ -82,14 +82,28 @@ static u32 cxgb4_get_entity_length(struct adapter *adap, u32 entity) len = CIM_IBQ_SIZE * 4 * sizeof(u32); break; case CUDBG_CIM_OBQ_ULP0: + len = cudbg_cim_obq_size(adap, 0); + break; case CUDBG_CIM_OBQ_ULP1: + len = cudbg_cim_obq_size(adap, 1); + break; case CUDBG_CIM_OBQ_ULP2: + len = cudbg_cim_obq_size(adap, 2); + break; case CUDBG_CIM_OBQ_ULP3: + len = cudbg_cim_obq_size(adap, 3); + break; case CUDBG_CIM_OBQ_SGE: + len = cudbg_cim_obq_size(adap, 4); + break; case CUDBG_CIM_OBQ_NCSI: + len = cudbg_cim_obq_size(adap, 5); + break; case CUDBG_CIM_OBQ_RXQ0: + len = cudbg_cim_obq_size(adap, 6); + break; case CUDBG_CIM_OBQ_RXQ1: - len = 6 * CIM_OBQ_SIZE * 4 * sizeof(u32); + len = cudbg_cim_obq_size(adap, 7); break; case CUDBG_EDC0: value = t4_read_reg(adap, MA_TARGET_MEM_ENABLE_A); -- cgit v1.2.3 From b5beecb580376cd8d959eb990abece6a748a3ce3 Mon Sep 17 00:00:00 2001 From: Corentin Labbe Date: Tue, 24 Oct 2017 19:57:12 +0200 Subject: net: stmmac: snps, dwmac-mdio MDIOs are automatically registered stmmac bindings docs said that its mdio node must have compatible = "snps,dwmac-mdio"; Since dwmac-sun8i does not have any good reasons to not doing it, all their MDIO node must have it. Since these compatible is automatically registered, dwmac-sun8i compatible does not need to be in need_mdio_ids. Signed-off-by: Corentin Labbe Signed-off-by: David S. Miller --- drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c | 4 ---- 1 file changed, 4 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c index 8a280b48e3a9..9e616da0745d 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c @@ -311,10 +311,6 @@ static int stmmac_dt_phy(struct plat_stmmacenet_data *plat, bool mdio = true; static const struct of_device_id need_mdio_ids[] = { { .compatible = "snps,dwc-qos-ethernet-4.10" }, - { .compatible = "allwinner,sun8i-a83t-emac" }, - { .compatible = "allwinner,sun8i-h3-emac" }, - { .compatible = "allwinner,sun8i-v3s-emac" }, - { .compatible = "allwinner,sun50i-a64-emac" }, {}, }; -- cgit v1.2.3 From 634db83b82658f4641d8026e340c6027cf74a6bb Mon Sep 17 00:00:00 2001 From: Corentin Labbe Date: Tue, 24 Oct 2017 19:57:13 +0200 Subject: net: stmmac: dwmac-sun8i: Handle integrated/external MDIOs The Allwinner H3 SoC have two distinct MDIO bus, only one could be active at the same time. The selection of the active MDIO bus are done via some bits in the EMAC register of the system controller. This patch implement this MDIO switch via a custom MDIO-mux. Signed-off-by: Corentin Labbe Reviewed-by: Andrew Lunn Signed-off-by: David S. Miller --- drivers/net/ethernet/stmicro/stmmac/Kconfig | 1 + drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c | 353 ++++++++++++++-------- 2 files changed, 224 insertions(+), 130 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/stmicro/stmmac/Kconfig b/drivers/net/ethernet/stmicro/stmmac/Kconfig index 97035766c291..e28c0d2c58e9 100644 --- a/drivers/net/ethernet/stmicro/stmmac/Kconfig +++ b/drivers/net/ethernet/stmicro/stmmac/Kconfig @@ -159,6 +159,7 @@ config DWMAC_SUN8I tristate "Allwinner sun8i GMAC support" default ARCH_SUNXI depends on OF && (ARCH_SUNXI || COMPILE_TEST) + select MDIO_BUS_MUX ---help--- Support for Allwinner H3 A83T A64 EMAC ethernet controllers. diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c index 39c2122a4f26..b3eb344bb158 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c @@ -17,6 +17,7 @@ #include #include #include +#include #include #include #include @@ -41,14 +42,14 @@ * This value is used for disabling properly EMAC * and used as a good starting value in case of the * boot process(uboot) leave some stuff. - * @internal_phy: Does the MAC embed an internal PHY + * @soc_has_internal_phy: Does the MAC embed an internal PHY * @support_mii: Does the MAC handle MII * @support_rmii: Does the MAC handle RMII * @support_rgmii: Does the MAC handle RGMII */ struct emac_variant { u32 default_syscon_value; - int internal_phy; + bool soc_has_internal_phy; bool support_mii; bool support_rmii; bool support_rgmii; @@ -61,7 +62,8 @@ struct emac_variant { * @rst_ephy: reference to the optional EPHY reset for the internal PHY * @variant: reference to the current board variant * @regmap: regmap for using the syscon - * @use_internal_phy: Does the current PHY choice imply using the internal PHY + * @internal_phy_powered: Does the internal PHY is enabled + * @mux_handle: Internal pointer used by mdio-mux lib */ struct sunxi_priv_data { struct clk *tx_clk; @@ -70,12 +72,13 @@ struct sunxi_priv_data { struct reset_control *rst_ephy; const struct emac_variant *variant; struct regmap *regmap; - bool use_internal_phy; + bool internal_phy_powered; + void *mux_handle; }; static const struct emac_variant emac_variant_h3 = { .default_syscon_value = 0x58000, - .internal_phy = PHY_INTERFACE_MODE_MII, + .soc_has_internal_phy = true, .support_mii = true, .support_rmii = true, .support_rgmii = true @@ -83,20 +86,20 @@ static const struct emac_variant emac_variant_h3 = { static const struct emac_variant emac_variant_v3s = { .default_syscon_value = 0x38000, - .internal_phy = PHY_INTERFACE_MODE_MII, + .soc_has_internal_phy = true, .support_mii = true }; static const struct emac_variant emac_variant_a83t = { .default_syscon_value = 0, - .internal_phy = 0, + .soc_has_internal_phy = false, .support_mii = true, .support_rgmii = true }; static const struct emac_variant emac_variant_a64 = { .default_syscon_value = 0, - .internal_phy = 0, + .soc_has_internal_phy = false, .support_mii = true, .support_rmii = true, .support_rgmii = true @@ -195,6 +198,9 @@ static const struct emac_variant emac_variant_a64 = { #define H3_EPHY_LED_POL BIT(17) /* 1: active low, 0: active high */ #define H3_EPHY_SHUTDOWN BIT(16) /* 1: shutdown, 0: power up */ #define H3_EPHY_SELECT BIT(15) /* 1: internal PHY, 0: external PHY */ +#define H3_EPHY_MUX_MASK (H3_EPHY_SHUTDOWN | H3_EPHY_SELECT) +#define DWMAC_SUN8I_MDIO_MUX_INTERNAL_ID 1 +#define DWMAC_SUN8I_MDIO_MUX_EXTERNAL_ID 2 /* H3/A64 specific bits */ #define SYSCON_RMII_EN BIT(13) /* 1: enable RMII (overrides EPIT) */ @@ -634,6 +640,159 @@ static int sun8i_dwmac_reset(struct stmmac_priv *priv) return 0; } +/* Search in mdio-mux node for internal PHY node and get its clk/reset */ +static int get_ephy_nodes(struct stmmac_priv *priv) +{ + struct sunxi_priv_data *gmac = priv->plat->bsp_priv; + struct device_node *mdio_mux, *iphynode; + struct device_node *mdio_internal; + int ret; + + mdio_mux = of_get_child_by_name(priv->device->of_node, "mdio-mux"); + if (!mdio_mux) { + dev_err(priv->device, "Cannot get mdio-mux node\n"); + return -ENODEV; + } + + mdio_internal = of_find_compatible_node(mdio_mux, NULL, + "allwinner,sun8i-h3-mdio-internal"); + if (!mdio_internal) { + dev_err(priv->device, "Cannot get internal_mdio node\n"); + return -ENODEV; + } + + /* Seek for internal PHY */ + for_each_child_of_node(mdio_internal, iphynode) { + gmac->ephy_clk = of_clk_get(iphynode, 0); + if (IS_ERR(gmac->ephy_clk)) + continue; + gmac->rst_ephy = of_reset_control_get_exclusive(iphynode, NULL); + if (IS_ERR(gmac->rst_ephy)) { + ret = PTR_ERR(gmac->rst_ephy); + if (ret == -EPROBE_DEFER) + return ret; + continue; + } + dev_info(priv->device, "Found internal PHY node\n"); + return 0; + } + return -ENODEV; +} + +static int sun8i_dwmac_power_internal_phy(struct stmmac_priv *priv) +{ + struct sunxi_priv_data *gmac = priv->plat->bsp_priv; + int ret; + + if (gmac->internal_phy_powered) { + dev_warn(priv->device, "Internal PHY already powered\n"); + return 0; + } + + dev_info(priv->device, "Powering internal PHY\n"); + ret = clk_prepare_enable(gmac->ephy_clk); + if (ret) { + dev_err(priv->device, "Cannot enable internal PHY\n"); + return ret; + } + + /* Make sure the EPHY is properly reseted, as U-Boot may leave + * it at deasserted state, and thus it may fail to reset EMAC. + */ + reset_control_assert(gmac->rst_ephy); + + ret = reset_control_deassert(gmac->rst_ephy); + if (ret) { + dev_err(priv->device, "Cannot deassert internal phy\n"); + clk_disable_unprepare(gmac->ephy_clk); + return ret; + } + + gmac->internal_phy_powered = true; + + return 0; +} + +static int sun8i_dwmac_unpower_internal_phy(struct sunxi_priv_data *gmac) +{ + if (!gmac->internal_phy_powered) + return 0; + + clk_disable_unprepare(gmac->ephy_clk); + reset_control_assert(gmac->rst_ephy); + gmac->internal_phy_powered = false; + return 0; +} + +/* MDIO multiplexing switch function + * This function is called by the mdio-mux layer when it thinks the mdio bus + * multiplexer needs to switch. + * 'current_child' is the current value of the mux register + * 'desired_child' is the value of the 'reg' property of the target child MDIO + * node. + * The first time this function is called, current_child == -1. + * If current_child == desired_child, then the mux is already set to the + * correct bus. + */ +static int mdio_mux_syscon_switch_fn(int current_child, int desired_child, + void *data) +{ + struct stmmac_priv *priv = data; + struct sunxi_priv_data *gmac = priv->plat->bsp_priv; + u32 reg, val; + int ret = 0; + bool need_power_ephy = false; + + if (current_child ^ desired_child) { + regmap_read(gmac->regmap, SYSCON_EMAC_REG, ®); + switch (desired_child) { + case DWMAC_SUN8I_MDIO_MUX_INTERNAL_ID: + dev_info(priv->device, "Switch mux to internal PHY"); + val = (reg & ~H3_EPHY_MUX_MASK) | H3_EPHY_SELECT; + + need_power_ephy = true; + break; + case DWMAC_SUN8I_MDIO_MUX_EXTERNAL_ID: + dev_info(priv->device, "Switch mux to external PHY"); + val = (reg & ~H3_EPHY_MUX_MASK) | H3_EPHY_SHUTDOWN; + need_power_ephy = false; + break; + default: + dev_err(priv->device, "Invalid child ID %x\n", + desired_child); + return -EINVAL; + } + regmap_write(gmac->regmap, SYSCON_EMAC_REG, val); + if (need_power_ephy) { + ret = sun8i_dwmac_power_internal_phy(priv); + if (ret) + return ret; + } else { + sun8i_dwmac_unpower_internal_phy(gmac); + } + /* After changing syscon value, the MAC need reset or it will + * use the last value (and so the last PHY set). + */ + ret = sun8i_dwmac_reset(priv); + } + return ret; +} + +static int sun8i_dwmac_register_mdio_mux(struct stmmac_priv *priv) +{ + int ret; + struct device_node *mdio_mux; + struct sunxi_priv_data *gmac = priv->plat->bsp_priv; + + mdio_mux = of_get_child_by_name(priv->device->of_node, "mdio-mux"); + if (!mdio_mux) + return -ENODEV; + + ret = mdio_mux_init(priv->device, mdio_mux, mdio_mux_syscon_switch_fn, + &gmac->mux_handle, priv, priv->mii); + return ret; +} + static int sun8i_dwmac_set_syscon(struct stmmac_priv *priv) { struct sunxi_priv_data *gmac = priv->plat->bsp_priv; @@ -648,35 +807,25 @@ static int sun8i_dwmac_set_syscon(struct stmmac_priv *priv) "Current syscon value is not the default %x (expect %x)\n", val, reg); - if (gmac->variant->internal_phy) { - if (!gmac->use_internal_phy) { - /* switch to external PHY interface */ - reg &= ~H3_EPHY_SELECT; - } else { - reg |= H3_EPHY_SELECT; - reg &= ~H3_EPHY_SHUTDOWN; - dev_dbg(priv->device, "Select internal_phy %x\n", reg); - - if (of_property_read_bool(priv->plat->phy_node, - "allwinner,leds-active-low")) - reg |= H3_EPHY_LED_POL; - else - reg &= ~H3_EPHY_LED_POL; - - /* Force EPHY xtal frequency to 24MHz. */ - reg |= H3_EPHY_CLK_SEL; - - ret = of_mdio_parse_addr(priv->device, - priv->plat->phy_node); - if (ret < 0) { - dev_err(priv->device, "Could not parse MDIO addr\n"); - return ret; - } - /* of_mdio_parse_addr returns a valid (0 ~ 31) PHY - * address. No need to mask it again. - */ - reg |= ret << H3_EPHY_ADDR_SHIFT; + if (gmac->variant->soc_has_internal_phy) { + if (of_property_read_bool(priv->plat->phy_node, + "allwinner,leds-active-low")) + reg |= H3_EPHY_LED_POL; + else + reg &= ~H3_EPHY_LED_POL; + + /* Force EPHY xtal frequency to 24MHz. */ + reg |= H3_EPHY_CLK_SEL; + + ret = of_mdio_parse_addr(priv->device, priv->plat->phy_node); + if (ret < 0) { + dev_err(priv->device, "Could not parse MDIO addr\n"); + return ret; } + /* of_mdio_parse_addr returns a valid (0 ~ 31) PHY + * address. No need to mask it again. + */ + reg |= 1 << H3_EPHY_ADDR_SHIFT; } if (!of_property_read_u32(node, "allwinner,tx-delay-ps", &val)) { @@ -746,81 +895,21 @@ static void sun8i_dwmac_unset_syscon(struct sunxi_priv_data *gmac) regmap_write(gmac->regmap, SYSCON_EMAC_REG, reg); } -static int sun8i_dwmac_power_internal_phy(struct stmmac_priv *priv) +static void sun8i_dwmac_exit(struct platform_device *pdev, void *priv) { - struct sunxi_priv_data *gmac = priv->plat->bsp_priv; - int ret; - - if (!gmac->use_internal_phy) - return 0; - - ret = clk_prepare_enable(gmac->ephy_clk); - if (ret) { - dev_err(priv->device, "Cannot enable ephy\n"); - return ret; - } - - /* Make sure the EPHY is properly reseted, as U-Boot may leave - * it at deasserted state, and thus it may fail to reset EMAC. - */ - reset_control_assert(gmac->rst_ephy); + struct sunxi_priv_data *gmac = priv; - ret = reset_control_deassert(gmac->rst_ephy); - if (ret) { - dev_err(priv->device, "Cannot deassert ephy\n"); - clk_disable_unprepare(gmac->ephy_clk); - return ret; + if (gmac->variant->soc_has_internal_phy) { + /* sun8i_dwmac_exit could be called with mdiomux uninit */ + if (gmac->mux_handle) + mdio_mux_uninit(gmac->mux_handle); + if (gmac->internal_phy_powered) + sun8i_dwmac_unpower_internal_phy(gmac); } - return 0; -} - -static int sun8i_dwmac_unpower_internal_phy(struct sunxi_priv_data *gmac) -{ - if (!gmac->use_internal_phy) - return 0; - - clk_disable_unprepare(gmac->ephy_clk); - reset_control_assert(gmac->rst_ephy); - return 0; -} - -/* sun8i_power_phy() - Activate the PHY: - * In case of error, no need to call sun8i_unpower_phy(), - * it will be called anyway by sun8i_dwmac_exit() - */ -static int sun8i_power_phy(struct stmmac_priv *priv) -{ - int ret; - - ret = sun8i_dwmac_power_internal_phy(priv); - if (ret) - return ret; - - ret = sun8i_dwmac_set_syscon(priv); - if (ret) - return ret; - - /* After changing syscon value, the MAC need reset or it will use - * the last value (and so the last PHY set. - */ - ret = sun8i_dwmac_reset(priv); - if (ret) - return ret; - return 0; -} - -static void sun8i_unpower_phy(struct sunxi_priv_data *gmac) -{ sun8i_dwmac_unset_syscon(gmac); - sun8i_dwmac_unpower_internal_phy(gmac); -} - -static void sun8i_dwmac_exit(struct platform_device *pdev, void *priv) -{ - struct sunxi_priv_data *gmac = priv; - sun8i_unpower_phy(gmac); + reset_control_put(gmac->rst_ephy); clk_disable_unprepare(gmac->tx_clk); @@ -849,7 +938,7 @@ static struct mac_device_info *sun8i_dwmac_setup(void *ppriv) if (!mac) return NULL; - ret = sun8i_power_phy(priv); + ret = sun8i_dwmac_set_syscon(priv); if (ret) return NULL; @@ -889,6 +978,8 @@ static int sun8i_dwmac_probe(struct platform_device *pdev) struct sunxi_priv_data *gmac; struct device *dev = &pdev->dev; int ret; + struct stmmac_priv *priv; + struct net_device *ndev; ret = stmmac_get_platform_resources(pdev, &stmmac_res); if (ret) @@ -932,29 +1023,6 @@ static int sun8i_dwmac_probe(struct platform_device *pdev) } plat_dat->interface = of_get_phy_mode(dev->of_node); - if (plat_dat->interface == gmac->variant->internal_phy) { - dev_info(&pdev->dev, "Will use internal PHY\n"); - gmac->use_internal_phy = true; - gmac->ephy_clk = of_clk_get(plat_dat->phy_node, 0); - if (IS_ERR(gmac->ephy_clk)) { - ret = PTR_ERR(gmac->ephy_clk); - dev_err(&pdev->dev, "Cannot get EPHY clock: %d\n", ret); - return -EINVAL; - } - - gmac->rst_ephy = of_reset_control_get(plat_dat->phy_node, NULL); - if (IS_ERR(gmac->rst_ephy)) { - ret = PTR_ERR(gmac->rst_ephy); - if (ret == -EPROBE_DEFER) - return ret; - dev_err(&pdev->dev, "No EPHY reset control found %d\n", - ret); - return -EINVAL; - } - } else { - dev_info(&pdev->dev, "Will use external PHY\n"); - gmac->use_internal_phy = false; - } /* platform data specifying hardware features and callbacks. * hardware features were copied from Allwinner drivers. @@ -973,9 +1041,34 @@ static int sun8i_dwmac_probe(struct platform_device *pdev) ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res); if (ret) - sun8i_dwmac_exit(pdev, plat_dat->bsp_priv); + goto dwmac_exit; + + ndev = dev_get_drvdata(&pdev->dev); + priv = netdev_priv(ndev); + /* The mux must be registered after parent MDIO + * so after stmmac_dvr_probe() + */ + if (gmac->variant->soc_has_internal_phy) { + ret = get_ephy_nodes(priv); + if (ret) + goto dwmac_exit; + ret = sun8i_dwmac_register_mdio_mux(priv); + if (ret) { + dev_err(&pdev->dev, "Failed to register mux\n"); + goto dwmac_mux; + } + } else { + ret = sun8i_dwmac_reset(priv); + if (ret) + goto dwmac_exit; + } return ret; +dwmac_mux: + sun8i_dwmac_unset_syscon(gmac); +dwmac_exit: + sun8i_dwmac_exit(pdev, plat_dat->bsp_priv); +return ret; } static const struct of_device_id sun8i_dwmac_match[] = { -- cgit v1.2.3 From a8ff8ccb45d37efa64476958fc5e9a8d9716b23b Mon Sep 17 00:00:00 2001 From: Corentin Labbe Date: Tue, 24 Oct 2017 19:57:14 +0200 Subject: net: stmmac: sun8i: Restore the compatibles The original dwmac-sun8i DT bindings have some issue on how to handle integrated PHY and was reverted in last RC of 4.13. But now we have a solution so we need to get back that was reverted. This patch restore compatibles about dwmac-sun8i This reverts commit ad4540cc5aa3 ("net: stmmac: sun8i: Remove the compatibles") Signed-off-by: Corentin Labbe Signed-off-by: David S. Miller --- drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c | 8 ++++++++ 1 file changed, 8 insertions(+) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c index b3eb344bb158..e5ff734d4f9b 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c @@ -1072,6 +1072,14 @@ return ret; } static const struct of_device_id sun8i_dwmac_match[] = { + { .compatible = "allwinner,sun8i-h3-emac", + .data = &emac_variant_h3 }, + { .compatible = "allwinner,sun8i-v3s-emac", + .data = &emac_variant_v3s }, + { .compatible = "allwinner,sun8i-a83t-emac", + .data = &emac_variant_a83t }, + { .compatible = "allwinner,sun50i-a64-emac", + .data = &emac_variant_a64 }, { } }; MODULE_DEVICE_TABLE(of, sun8i_dwmac_match); -- cgit v1.2.3 From 0ff624fbfefbb96db62d100bda84e4fbdabaf628 Mon Sep 17 00:00:00 2001 From: Kees Cook Date: Wed, 25 Oct 2017 03:51:03 -0700 Subject: drivers/net: 3com/3c515: Convert timers to use timer_setup() In preparation for unconditionally passing the struct timer_list pointer to all timer callbacks, switch to using the new timer_setup() and from_timer() to pass the timer pointer explicitly. Cc: "David S. Miller" Cc: Thomas Gleixner Cc: Stephen Hemminger Cc: Johannes Berg Cc: netdev@vger.kernel.org Signed-off-by: Kees Cook Signed-off-by: David S. Miller --- drivers/net/ethernet/3com/3c515.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/3com/3c515.c b/drivers/net/ethernet/3com/3c515.c index c5987f518cb2..b648e3f95c01 100644 --- a/drivers/net/ethernet/3com/3c515.c +++ b/drivers/net/ethernet/3com/3c515.c @@ -367,7 +367,7 @@ static struct net_device *corkscrew_scan(int unit); static int corkscrew_setup(struct net_device *dev, int ioaddr, struct pnp_dev *idev, int card_number); static int corkscrew_open(struct net_device *dev); -static void corkscrew_timer(unsigned long arg); +static void corkscrew_timer(struct timer_list *t); static netdev_tx_t corkscrew_start_xmit(struct sk_buff *skb, struct net_device *dev); static int corkscrew_rx(struct net_device *dev); @@ -627,7 +627,7 @@ static int corkscrew_setup(struct net_device *dev, int ioaddr, spin_lock_init(&vp->lock); - setup_timer(&vp->timer, corkscrew_timer, (unsigned long) dev); + timer_setup(&vp->timer, corkscrew_timer, 0); /* Read the station address from the EEPROM. */ EL3WINDOW(0); @@ -869,11 +869,11 @@ static int corkscrew_open(struct net_device *dev) return 0; } -static void corkscrew_timer(unsigned long data) +static void corkscrew_timer(struct timer_list *t) { #ifdef AUTOMEDIA - struct net_device *dev = (struct net_device *) data; - struct corkscrew_private *vp = netdev_priv(dev); + struct corkscrew_private *vp = from_timer(vp, t, timer); + struct net_device *dev = vp->our_dev; int ioaddr = dev->base_addr; unsigned long flags; int ok = 0; -- cgit v1.2.3 From 3248f77fa3eec6014653166a9cd0d429e8d30890 Mon Sep 17 00:00:00 2001 From: Kees Cook Date: Wed, 25 Oct 2017 03:51:38 -0700 Subject: drivers/net: netronome: Convert timers to use timer_setup() In preparation for unconditionally passing the struct timer_list pointer to all timer callbacks, switch to using the new timer_setup() and from_timer() to pass the timer pointer explicitly. Cc: Jakub Kicinski Cc: "David S. Miller" Cc: Jiri Pirko Cc: Jamal Hadi Salim Cc: Simon Horman Cc: oss-drivers@netronome.com Cc: netdev@vger.kernel.org Signed-off-by: Kees Cook Acked-by: Jakub Kicinski Signed-off-by: David S. Miller --- drivers/net/ethernet/netronome/nfp/bpf/main.c | 5 +++-- drivers/net/ethernet/netronome/nfp/bpf/main.h | 3 ++- drivers/net/ethernet/netronome/nfp/bpf/offload.c | 9 ++++----- drivers/net/ethernet/netronome/nfp/nfp_net_common.c | 7 +++---- 4 files changed, 12 insertions(+), 12 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/netronome/nfp/bpf/main.c b/drivers/net/ethernet/netronome/nfp/bpf/main.c index fa0ac90ed956..f15a186f6c87 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/main.c +++ b/drivers/net/ethernet/netronome/nfp/bpf/main.c @@ -97,8 +97,9 @@ nfp_bpf_vnic_alloc(struct nfp_app *app, struct nfp_net *nn, unsigned int id) nn->app_priv = priv; spin_lock_init(&priv->rx_filter_lock); - setup_timer(&priv->rx_filter_stats_timer, - nfp_net_filter_stats_timer, (unsigned long)nn); + priv->nn = nn; + timer_setup(&priv->rx_filter_stats_timer, + nfp_net_filter_stats_timer, 0); ret = nfp_app_nic_vnic_alloc(app, nn, id); if (ret) diff --git a/drivers/net/ethernet/netronome/nfp/bpf/main.h b/drivers/net/ethernet/netronome/nfp/bpf/main.h index 86edc0691a5f..bc604030ff6c 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/main.h +++ b/drivers/net/ethernet/netronome/nfp/bpf/main.h @@ -209,10 +209,11 @@ struct nfp_net_bpf_priv { struct nfp_stat_pair rx_filter, rx_filter_prev; unsigned long rx_filter_change; struct timer_list rx_filter_stats_timer; + struct nfp_net *nn; spinlock_t rx_filter_lock; }; int nfp_net_bpf_offload(struct nfp_net *nn, struct tc_cls_bpf_offload *cls_bpf); -void nfp_net_filter_stats_timer(unsigned long data); +void nfp_net_filter_stats_timer(struct timer_list *t); #endif diff --git a/drivers/net/ethernet/netronome/nfp/bpf/offload.c b/drivers/net/ethernet/netronome/nfp/bpf/offload.c index fbca1ca1f39b..63c8f7847054 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/offload.c +++ b/drivers/net/ethernet/netronome/nfp/bpf/offload.c @@ -51,14 +51,13 @@ #include "../nfp_net_ctrl.h" #include "../nfp_net.h" -void nfp_net_filter_stats_timer(unsigned long data) +void nfp_net_filter_stats_timer(struct timer_list *t) { - struct nfp_net *nn = (void *)data; - struct nfp_net_bpf_priv *priv; + struct nfp_net_bpf_priv *priv = from_timer(priv, t, + rx_filter_stats_timer); + struct nfp_net *nn = priv->nn; struct nfp_stat_pair latest; - priv = nn->app_priv; - spin_lock_bh(&priv->rx_filter_lock); if (nn->dp.ctrl & NFP_NET_CFG_CTRL_BPF) diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c index 2c9109b09faf..eddf850a6a7f 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c @@ -177,9 +177,9 @@ static int nfp_net_reconfig_wait(struct nfp_net *nn, unsigned long deadline) return timed_out ? -EIO : 0; } -static void nfp_net_reconfig_timer(unsigned long data) +static void nfp_net_reconfig_timer(struct timer_list *t) { - struct nfp_net *nn = (void *)data; + struct nfp_net *nn = from_timer(nn, t, reconfig_timer); spin_lock_bh(&nn->reconfig_lock); @@ -3537,8 +3537,7 @@ struct nfp_net *nfp_net_alloc(struct pci_dev *pdev, bool needs_netdev, spin_lock_init(&nn->reconfig_lock); spin_lock_init(&nn->link_status_lock); - setup_timer(&nn->reconfig_timer, - nfp_net_reconfig_timer, (unsigned long)nn); + timer_setup(&nn->reconfig_timer, nfp_net_reconfig_timer, 0); return nn; } -- cgit v1.2.3 From 97815186d4f1ea0a14b0683ddc63f288809a94e9 Mon Sep 17 00:00:00 2001 From: Kees Cook Date: Wed, 25 Oct 2017 03:51:58 -0700 Subject: drivers/net: nuvoton: Convert timers to use timer_setup() In preparation for unconditionally passing the struct timer_list pointer to all timer callbacks, switch to using the new timer_setup() and from_timer() to pass the timer pointer explicitly. Cc: Wan ZongShun Cc: linux-arm-kernel@lists.infradead.org Cc: netdev@vger.kernel.org Signed-off-by: Kees Cook Signed-off-by: David S. Miller --- drivers/net/ethernet/nuvoton/w90p910_ether.c | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/nuvoton/w90p910_ether.c b/drivers/net/ethernet/nuvoton/w90p910_ether.c index 4a67c55aa9f1..052b3d2c07a1 100644 --- a/drivers/net/ethernet/nuvoton/w90p910_ether.c +++ b/drivers/net/ethernet/nuvoton/w90p910_ether.c @@ -253,10 +253,10 @@ static void update_linkspeed(struct net_device *dev) netif_carrier_on(dev); } -static void w90p910_check_link(unsigned long dev_id) +static void w90p910_check_link(struct timer_list *t) { - struct net_device *dev = (struct net_device *) dev_id; - struct w90p910_ether *ether = netdev_priv(dev); + struct w90p910_ether *ether = from_timer(ether, t, check_timer); + struct net_device *dev = ether->mii.dev; update_linkspeed(dev); mod_timer(ðer->check_timer, jiffies + msecs_to_jiffies(1000)); @@ -957,8 +957,7 @@ static int w90p910_ether_setup(struct net_device *dev) ether->mii.mdio_read = w90p910_mdio_read; ether->mii.mdio_write = w90p910_mdio_write; - setup_timer(ðer->check_timer, w90p910_check_link, - (unsigned long)dev); + timer_setup(ðer->check_timer, w90p910_check_link, 0); return 0; } -- cgit v1.2.3 From 9de36ccf0891fbdfcc347a34bda009977d8dc2a8 Mon Sep 17 00:00:00 2001 From: Kees Cook Date: Wed, 25 Oct 2017 03:53:12 -0700 Subject: drivers/net: realtek: Convert timers to use timer_setup() In preparation for unconditionally passing the struct timer_list pointer to all timer callbacks, switch to using the new timer_setup() and from_timer() to pass the timer pointer explicitly. Cc: Realtek linux nic maintainers Cc: "David S. Miller" Cc: David Howells Cc: Jay Vosburgh Cc: Allen Pais Cc: Eric Dumazet Cc: Tobias Klauser Cc: netdev@vger.kernel.org Signed-off-by: Kees Cook Signed-off-by: David S. Miller --- drivers/net/ethernet/realtek/atp.c | 12 +++++++----- drivers/net/ethernet/realtek/r8169.c | 7 +++---- 2 files changed, 10 insertions(+), 9 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/realtek/atp.c b/drivers/net/ethernet/realtek/atp.c index bdc3833fab7e..7e011c1c1e6e 100644 --- a/drivers/net/ethernet/realtek/atp.c +++ b/drivers/net/ethernet/realtek/atp.c @@ -170,6 +170,7 @@ struct net_local { spinlock_t lock; struct net_device *next_module; struct timer_list timer; /* Media selection timer. */ + struct net_device *dev; /* Timer dev. */ unsigned long last_rx_time; /* Last Rx, in jiffies, to handle Rx hang. */ int saved_tx_size; unsigned int tx_unit_busy:1; @@ -184,7 +185,7 @@ struct net_local { #define TIMED_CHECKER (HZ/4) #ifdef TIMED_CHECKER #include -static void atp_timed_checker(unsigned long ignored); +static void atp_timed_checker(struct timer_list *t); #endif /* Index to functions, as function prototypes. */ @@ -438,7 +439,8 @@ static int net_open(struct net_device *dev) hardware_init(dev); - setup_timer(&lp->timer, atp_timed_checker, (unsigned long)dev); + lp->dev = dev; + timer_setup(&lp->timer, atp_timed_checker, 0); lp->timer.expires = jiffies + TIMED_CHECKER; add_timer(&lp->timer); @@ -708,11 +710,11 @@ static irqreturn_t atp_interrupt(int irq, void *dev_instance) #ifdef TIMED_CHECKER /* This following code fixes a rare (and very difficult to track down) problem where the adapter forgets its ethernet address. */ -static void atp_timed_checker(unsigned long data) +static void atp_timed_checker(struct timer_list *t) { - struct net_device *dev = (struct net_device *)data; + struct net_local *lp = from_timer(lp, t, timer); + struct net_device *dev = lp->dev; long ioaddr = dev->base_addr; - struct net_local *lp = netdev_priv(dev); int tickssofar = jiffies - lp->last_rx_time; int i; diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c index a3c949ea7d1a..7dc4b6de31e6 100644 --- a/drivers/net/ethernet/realtek/r8169.c +++ b/drivers/net/ethernet/realtek/r8169.c @@ -4401,10 +4401,9 @@ static void rtl_schedule_task(struct rtl8169_private *tp, enum rtl_flag flag) schedule_work(&tp->wk.work); } -static void rtl8169_phy_timer(unsigned long __opaque) +static void rtl8169_phy_timer(struct timer_list *t) { - struct net_device *dev = (struct net_device *)__opaque; - struct rtl8169_private *tp = netdev_priv(dev); + struct rtl8169_private *tp = from_timer(tp, t, timer); rtl_schedule_task(tp, RTL_FLAG_TASK_PHY_PENDING); } @@ -8454,7 +8453,7 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) tp->opts1_mask = (tp->mac_version != RTL_GIGA_MAC_VER_01) ? ~(RxBOVF | RxFOVF) : ~0; - setup_timer(&tp->timer, rtl8169_phy_timer, (unsigned long)dev); + timer_setup(&tp->timer, rtl8169_phy_timer, 0); tp->rtl_fw = RTL_FIRMWARE_UNKNOWN; -- cgit v1.2.3 From c37631c7f686518157de4aa5fb456a54f27607b6 Mon Sep 17 00:00:00 2001 From: Kees Cook Date: Wed, 25 Oct 2017 03:53:20 -0700 Subject: drivers/net: sxgbe: Convert timers to use timer_setup() In preparation for unconditionally passing the struct timer_list pointer to all timer callbacks, switch to using the new timer_setup() and from_timer() to pass the timer pointer explicitly. Cc: Byungho An Cc: Girish K S Cc: Vipul Pandya Cc: netdev@vger.kernel.org Signed-off-by: Kees Cook Signed-off-by: David S. Miller --- drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c | 16 +++++++--------- 1 file changed, 7 insertions(+), 9 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c index 89831adb8eb7..fd35d8004a78 100644 --- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c +++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c @@ -105,9 +105,9 @@ void sxgbe_disable_eee_mode(struct sxgbe_priv_data * const priv) * If there is no data transfer and if we are not in LPI state, * then MAC Transmitter can be moved to LPI state. */ -static void sxgbe_eee_ctrl_timer(unsigned long arg) +static void sxgbe_eee_ctrl_timer(struct timer_list *t) { - struct sxgbe_priv_data *priv = (struct sxgbe_priv_data *)arg; + struct sxgbe_priv_data *priv = from_timer(priv, t, eee_ctrl_timer); sxgbe_enable_eee_mode(priv); mod_timer(&priv->eee_ctrl_timer, SXGBE_LPI_TIMER(eee_timer)); @@ -134,8 +134,7 @@ bool sxgbe_eee_init(struct sxgbe_priv_data * const priv) return false; priv->eee_active = 1; - setup_timer(&priv->eee_ctrl_timer, sxgbe_eee_ctrl_timer, - (unsigned long)priv); + timer_setup(&priv->eee_ctrl_timer, sxgbe_eee_ctrl_timer, 0); priv->eee_ctrl_timer.expires = SXGBE_LPI_TIMER(eee_timer); add_timer(&priv->eee_ctrl_timer); @@ -1002,13 +1001,13 @@ static void sxgbe_disable_mtl_engine(struct sxgbe_priv_data *priv) /** * sxgbe_tx_timer: mitigation sw timer for tx. - * @data: data pointer + * @t: timer pointer * Description: * This is the timer handler to directly invoke the sxgbe_tx_clean. */ -static void sxgbe_tx_timer(unsigned long data) +static void sxgbe_tx_timer(struct timer_list *t) { - struct sxgbe_tx_queue *p = (struct sxgbe_tx_queue *)data; + struct sxgbe_tx_queue *p = from_timer(p, t, txtimer); sxgbe_tx_queue_clean(p); } @@ -1028,8 +1027,7 @@ static void sxgbe_tx_init_coalesce(struct sxgbe_priv_data *priv) struct sxgbe_tx_queue *p = priv->txq[queue_num]; p->tx_coal_frames = SXGBE_TX_FRAMES; p->tx_coal_timer = SXGBE_COAL_TX_TIMER; - setup_timer(&p->txtimer, sxgbe_tx_timer, - (unsigned long)&priv->txq[queue_num]); + timer_setup(&p->txtimer, sxgbe_tx_timer, 0); p->txtimer.expires = SXGBE_COAL_TIMER(p->tx_coal_timer); add_timer(&p->txtimer); } -- cgit v1.2.3 From c0c21458d7d242adf9c3a3e2f3f95392164325ae Mon Sep 17 00:00:00 2001 From: Florian Fainelli Date: Wed, 25 Oct 2017 18:01:05 -0700 Subject: net: systemport: Check DSA notifier master against ourself Check that the master network device that is signaled through the DSA notifier is actually going to be ourself, otherwise, we could just be de-referencing garbage from other drivers. Fixes: 84ff33eeb23d ("net: systemport: Establish DSA network device queue mapping") Signed-off-by: Florian Fainelli Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bcmsysport.c | 33 ++++++++++++++++-------------- 1 file changed, 18 insertions(+), 15 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c index 1d9d5f986e14..dcee843d05d7 100644 --- a/drivers/net/ethernet/broadcom/bcmsysport.c +++ b/drivers/net/ethernet/broadcom/bcmsysport.c @@ -2046,6 +2046,21 @@ static u16 bcm_sysport_select_queue(struct net_device *dev, struct sk_buff *skb, return tx_ring->index; } +static const struct net_device_ops bcm_sysport_netdev_ops = { + .ndo_start_xmit = bcm_sysport_xmit, + .ndo_tx_timeout = bcm_sysport_tx_timeout, + .ndo_open = bcm_sysport_open, + .ndo_stop = bcm_sysport_stop, + .ndo_set_features = bcm_sysport_set_features, + .ndo_set_rx_mode = bcm_sysport_set_rx_mode, + .ndo_set_mac_address = bcm_sysport_change_mac, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = bcm_sysport_poll_controller, +#endif + .ndo_get_stats64 = bcm_sysport_get_stats64, + .ndo_select_queue = bcm_sysport_select_queue, +}; + static int bcm_sysport_map_queues(struct net_device *dev, struct dsa_notifier_register_info *info) { @@ -2061,6 +2076,9 @@ static int bcm_sysport_map_queues(struct net_device *dev, if (info->switch_number) return 0; + if (dev->netdev_ops != &bcm_sysport_netdev_ops) + return 0; + port = info->port_number; slave_dev = info->info.dev; @@ -2112,21 +2130,6 @@ static int bcm_sysport_dsa_notifier(struct notifier_block *unused, return notifier_from_errno(bcm_sysport_map_queues(info->master, info)); } -static const struct net_device_ops bcm_sysport_netdev_ops = { - .ndo_start_xmit = bcm_sysport_xmit, - .ndo_tx_timeout = bcm_sysport_tx_timeout, - .ndo_open = bcm_sysport_open, - .ndo_stop = bcm_sysport_stop, - .ndo_set_features = bcm_sysport_set_features, - .ndo_set_rx_mode = bcm_sysport_set_rx_mode, - .ndo_set_mac_address = bcm_sysport_change_mac, -#ifdef CONFIG_NET_POLL_CONTROLLER - .ndo_poll_controller = bcm_sysport_poll_controller, -#endif - .ndo_get_stats64 = bcm_sysport_get_stats64, - .ndo_select_queue = bcm_sysport_select_queue, -}; - #define REV_FMT "v%2x.%02x" static const struct bcm_sysport_hw_params bcm_sysport_params[] = { -- cgit v1.2.3 From 392209fa833287a1c5532ffbb098bba584a69dbc Mon Sep 17 00:00:00 2001 From: Felix Manlunas Date: Wed, 25 Oct 2017 18:04:56 -0700 Subject: liquidio: deprecate 1-bit flag indicating watchdog kernel thread is running Deprecate the 1-bit flag (bit 2 in the SLI_SCRATCH_1 Octeon register) that indicates that the liquidio watchdog kernel thread is running for this NIC. Reason is: it is incompatible with the firmware's use for SLI_SCRATCH_1. In lieu of checking that now-deprecated flag, check the value of oct_dev->adapter_refcount to determine whether or not to create the watchdog kernel thread. Signed-off-by: Felix Manlunas Signed-off-by: Raghu Vatsavayi Signed-off-by: David S. Miller --- drivers/net/ethernet/cavium/liquidio/lio_main.c | 14 ++++---------- 1 file changed, 4 insertions(+), 10 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c index b4f753c56308..accd038f3f34 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_main.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c @@ -1196,19 +1196,13 @@ liquidio_probe(struct pci_dev *pdev, } if (OCTEON_CN23XX_PF(oct_dev)) { - u64 scratch1; u8 bus, device, function; - scratch1 = octeon_read_csr64(oct_dev, CN23XX_SLI_SCRATCH1); - if (!(scratch1 & 4ULL)) { - /* Bit 2 of SLI_SCRATCH_1 is a flag that indicates that - * the lio watchdog kernel thread is running for this - * NIC. Each NIC gets one watchdog kernel thread. + if (atomic_read(oct_dev->adapter_refcount) == 1) { + /* Each NIC gets one watchdog kernel thread. The first + * PF (of each NIC) that gets pci_driver->probe()'d + * creates that thread. */ - scratch1 |= 4ULL; - octeon_write_csr64(oct_dev, CN23XX_SLI_SCRATCH1, - scratch1); - bus = pdev->bus->number; device = PCI_SLOT(pdev->devfn); function = PCI_FUNC(pdev->devfn); -- cgit v1.2.3 From 9deef43ddfb1dfa5f42e4066610cfe606212b0a1 Mon Sep 17 00:00:00 2001 From: Nogah Frankel Date: Thu, 26 Oct 2017 10:55:32 +0200 Subject: mlxsw: spectrum: Change stats cache to be local Change the HW stats cache to be local. Rename it for better clarity. It holds the results of the last result of HW stats that are being read periodically, in order to have answer for stats request immediately. Signed-off-by: Nogah Frankel Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/spectrum.c | 24 +++++++----------------- drivers/net/ethernet/mellanox/mlxsw/spectrum.h | 4 ++-- 2 files changed, 9 insertions(+), 19 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c index 12b6ac487d8d..021926974da6 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c @@ -1328,16 +1328,16 @@ static void update_stats_cache(struct work_struct *work) { struct mlxsw_sp_port *mlxsw_sp_port = container_of(work, struct mlxsw_sp_port, - hw_stats.update_dw.work); + periodic_hw_stats.update_dw.work); if (!netif_carrier_ok(mlxsw_sp_port->dev)) goto out; mlxsw_sp_port_get_hw_stats(mlxsw_sp_port->dev, - mlxsw_sp_port->hw_stats.cache); + &mlxsw_sp_port->periodic_hw_stats.stats); out: - mlxsw_core_schedule_dw(&mlxsw_sp_port->hw_stats.update_dw, + mlxsw_core_schedule_dw(&mlxsw_sp_port->periodic_hw_stats.update_dw, MLXSW_HW_STATS_UPDATE_TIME); } @@ -1350,7 +1350,7 @@ mlxsw_sp_port_get_stats64(struct net_device *dev, { struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); - memcpy(stats, mlxsw_sp_port->hw_stats.cache, sizeof(*stats)); + memcpy(stats, &mlxsw_sp_port->periodic_hw_stats.stats, sizeof(*stats)); } static int __mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, @@ -2905,14 +2905,7 @@ static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port, goto err_alloc_sample; } - mlxsw_sp_port->hw_stats.cache = - kzalloc(sizeof(*mlxsw_sp_port->hw_stats.cache), GFP_KERNEL); - - if (!mlxsw_sp_port->hw_stats.cache) { - err = -ENOMEM; - goto err_alloc_hw_stats; - } - INIT_DELAYED_WORK(&mlxsw_sp_port->hw_stats.update_dw, + INIT_DELAYED_WORK(&mlxsw_sp_port->periodic_hw_stats.update_dw, &update_stats_cache); dev->netdev_ops = &mlxsw_sp_port_netdev_ops; @@ -3026,7 +3019,7 @@ static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port, mlxsw_core_port_eth_set(mlxsw_sp->core, mlxsw_sp_port->local_port, mlxsw_sp_port, dev, mlxsw_sp_port->split, module); - mlxsw_core_schedule_dw(&mlxsw_sp_port->hw_stats.update_dw, 0); + mlxsw_core_schedule_dw(&mlxsw_sp_port->periodic_hw_stats.update_dw, 0); return 0; err_register_netdev: @@ -3049,8 +3042,6 @@ err_dev_addr_init: err_port_swid_set: mlxsw_sp_port_module_unmap(mlxsw_sp_port); err_port_module_map: - kfree(mlxsw_sp_port->hw_stats.cache); -err_alloc_hw_stats: kfree(mlxsw_sp_port->sample); err_alloc_sample: free_percpu(mlxsw_sp_port->pcpu_stats); @@ -3065,7 +3056,7 @@ static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port) { struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port]; - cancel_delayed_work_sync(&mlxsw_sp_port->hw_stats.update_dw); + cancel_delayed_work_sync(&mlxsw_sp_port->periodic_hw_stats.update_dw); mlxsw_core_port_clear(mlxsw_sp->core, local_port, mlxsw_sp); unregister_netdev(mlxsw_sp_port->dev); /* This calls ndo_stop */ mlxsw_sp->ports[local_port] = NULL; @@ -3075,7 +3066,6 @@ static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port) mlxsw_sp_port_dcb_fini(mlxsw_sp_port); mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT); mlxsw_sp_port_module_unmap(mlxsw_sp_port); - kfree(mlxsw_sp_port->hw_stats.cache); kfree(mlxsw_sp_port->sample); free_percpu(mlxsw_sp_port->pcpu_stats); WARN_ON_ONCE(!list_empty(&mlxsw_sp_port->vlans_list)); diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h index dc1b739c3ae1..aa0cefb25e18 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h @@ -231,9 +231,9 @@ struct mlxsw_sp_port { struct list_head mall_tc_list; struct { #define MLXSW_HW_STATS_UPDATE_TIME HZ - struct rtnl_link_stats64 *cache; + struct rtnl_link_stats64 stats; struct delayed_work update_dw; - } hw_stats; + } periodic_hw_stats; struct mlxsw_sp_port_sample *sample; struct list_head vlans_list; }; -- cgit v1.2.3 From 3e8c1fd31840bd84c31f08e69aa9c338b2856047 Mon Sep 17 00:00:00 2001 From: Nogah Frankel Date: Thu, 26 Oct 2017 10:55:33 +0200 Subject: mlxsw: reg: Avoid magic number in PPCNT Replace recurring magic number in PPCNT register with a define. Signed-off-by: Nogah Frankel Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/reg.h | 72 ++++++++++++++++++------------- 1 file changed, 42 insertions(+), 30 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h index a3f31f425550..5a26702267bd 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/reg.h +++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h @@ -3123,6 +3123,7 @@ static inline void mlxsw_reg_pfcc_pack(char *payload, u8 local_port) */ #define MLXSW_REG_PPCNT_ID 0x5008 #define MLXSW_REG_PPCNT_LEN 0x100 +#define MLXSW_REG_PPCNT_COUNTERS_OFFSET 0x08 MLXSW_REG_DEFINE(ppcnt, MLXSW_REG_PPCNT_ID, MLXSW_REG_PPCNT_LEN); @@ -3200,162 +3201,171 @@ MLXSW_ITEM32(reg, ppcnt, prio_tc, 0x04, 0, 5); * Access: RO */ MLXSW_ITEM64(reg, ppcnt, a_frames_transmitted_ok, - 0x08 + 0x00, 0, 64); + MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x00, 0, 64); /* reg_ppcnt_a_frames_received_ok * Access: RO */ MLXSW_ITEM64(reg, ppcnt, a_frames_received_ok, - 0x08 + 0x08, 0, 64); + MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x08, 0, 64); /* reg_ppcnt_a_frame_check_sequence_errors * Access: RO */ MLXSW_ITEM64(reg, ppcnt, a_frame_check_sequence_errors, - 0x08 + 0x10, 0, 64); + MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x10, 0, 64); /* reg_ppcnt_a_alignment_errors * Access: RO */ MLXSW_ITEM64(reg, ppcnt, a_alignment_errors, - 0x08 + 0x18, 0, 64); + MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x18, 0, 64); /* reg_ppcnt_a_octets_transmitted_ok * Access: RO */ MLXSW_ITEM64(reg, ppcnt, a_octets_transmitted_ok, - 0x08 + 0x20, 0, 64); + MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x20, 0, 64); /* reg_ppcnt_a_octets_received_ok * Access: RO */ MLXSW_ITEM64(reg, ppcnt, a_octets_received_ok, - 0x08 + 0x28, 0, 64); + MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x28, 0, 64); /* reg_ppcnt_a_multicast_frames_xmitted_ok * Access: RO */ MLXSW_ITEM64(reg, ppcnt, a_multicast_frames_xmitted_ok, - 0x08 + 0x30, 0, 64); + MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x30, 0, 64); /* reg_ppcnt_a_broadcast_frames_xmitted_ok * Access: RO */ MLXSW_ITEM64(reg, ppcnt, a_broadcast_frames_xmitted_ok, - 0x08 + 0x38, 0, 64); + MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x38, 0, 64); /* reg_ppcnt_a_multicast_frames_received_ok * Access: RO */ MLXSW_ITEM64(reg, ppcnt, a_multicast_frames_received_ok, - 0x08 + 0x40, 0, 64); + MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x40, 0, 64); /* reg_ppcnt_a_broadcast_frames_received_ok * Access: RO */ MLXSW_ITEM64(reg, ppcnt, a_broadcast_frames_received_ok, - 0x08 + 0x48, 0, 64); + MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x48, 0, 64); /* reg_ppcnt_a_in_range_length_errors * Access: RO */ MLXSW_ITEM64(reg, ppcnt, a_in_range_length_errors, - 0x08 + 0x50, 0, 64); + MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x50, 0, 64); /* reg_ppcnt_a_out_of_range_length_field * Access: RO */ MLXSW_ITEM64(reg, ppcnt, a_out_of_range_length_field, - 0x08 + 0x58, 0, 64); + MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x58, 0, 64); /* reg_ppcnt_a_frame_too_long_errors * Access: RO */ MLXSW_ITEM64(reg, ppcnt, a_frame_too_long_errors, - 0x08 + 0x60, 0, 64); + MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x60, 0, 64); /* reg_ppcnt_a_symbol_error_during_carrier * Access: RO */ MLXSW_ITEM64(reg, ppcnt, a_symbol_error_during_carrier, - 0x08 + 0x68, 0, 64); + MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x68, 0, 64); /* reg_ppcnt_a_mac_control_frames_transmitted * Access: RO */ MLXSW_ITEM64(reg, ppcnt, a_mac_control_frames_transmitted, - 0x08 + 0x70, 0, 64); + MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x70, 0, 64); /* reg_ppcnt_a_mac_control_frames_received * Access: RO */ MLXSW_ITEM64(reg, ppcnt, a_mac_control_frames_received, - 0x08 + 0x78, 0, 64); + MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x78, 0, 64); /* reg_ppcnt_a_unsupported_opcodes_received * Access: RO */ MLXSW_ITEM64(reg, ppcnt, a_unsupported_opcodes_received, - 0x08 + 0x80, 0, 64); + MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x80, 0, 64); /* reg_ppcnt_a_pause_mac_ctrl_frames_received * Access: RO */ MLXSW_ITEM64(reg, ppcnt, a_pause_mac_ctrl_frames_received, - 0x08 + 0x88, 0, 64); + MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x88, 0, 64); /* reg_ppcnt_a_pause_mac_ctrl_frames_transmitted * Access: RO */ MLXSW_ITEM64(reg, ppcnt, a_pause_mac_ctrl_frames_transmitted, - 0x08 + 0x90, 0, 64); + MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x90, 0, 64); /* Ethernet Per Priority Group Counters */ /* reg_ppcnt_rx_octets * Access: RO */ -MLXSW_ITEM64(reg, ppcnt, rx_octets, 0x08 + 0x00, 0, 64); +MLXSW_ITEM64(reg, ppcnt, rx_octets, + MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x00, 0, 64); /* reg_ppcnt_rx_frames * Access: RO */ -MLXSW_ITEM64(reg, ppcnt, rx_frames, 0x08 + 0x20, 0, 64); +MLXSW_ITEM64(reg, ppcnt, rx_frames, + MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x20, 0, 64); /* reg_ppcnt_tx_octets * Access: RO */ -MLXSW_ITEM64(reg, ppcnt, tx_octets, 0x08 + 0x28, 0, 64); +MLXSW_ITEM64(reg, ppcnt, tx_octets, + MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x28, 0, 64); /* reg_ppcnt_tx_frames * Access: RO */ -MLXSW_ITEM64(reg, ppcnt, tx_frames, 0x08 + 0x48, 0, 64); +MLXSW_ITEM64(reg, ppcnt, tx_frames, + MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x48, 0, 64); /* reg_ppcnt_rx_pause * Access: RO */ -MLXSW_ITEM64(reg, ppcnt, rx_pause, 0x08 + 0x50, 0, 64); +MLXSW_ITEM64(reg, ppcnt, rx_pause, + MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x50, 0, 64); /* reg_ppcnt_rx_pause_duration * Access: RO */ -MLXSW_ITEM64(reg, ppcnt, rx_pause_duration, 0x08 + 0x58, 0, 64); +MLXSW_ITEM64(reg, ppcnt, rx_pause_duration, + MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x58, 0, 64); /* reg_ppcnt_tx_pause * Access: RO */ -MLXSW_ITEM64(reg, ppcnt, tx_pause, 0x08 + 0x60, 0, 64); +MLXSW_ITEM64(reg, ppcnt, tx_pause, + MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x60, 0, 64); /* reg_ppcnt_tx_pause_duration * Access: RO */ -MLXSW_ITEM64(reg, ppcnt, tx_pause_duration, 0x08 + 0x68, 0, 64); +MLXSW_ITEM64(reg, ppcnt, tx_pause_duration, + MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x68, 0, 64); /* reg_ppcnt_rx_pause_transition * Access: RO */ -MLXSW_ITEM64(reg, ppcnt, tx_pause_transition, 0x08 + 0x70, 0, 64); +MLXSW_ITEM64(reg, ppcnt, tx_pause_transition, + MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x70, 0, 64); /* Ethernet Per Traffic Group Counters */ @@ -3365,14 +3375,16 @@ MLXSW_ITEM64(reg, ppcnt, tx_pause_transition, 0x08 + 0x70, 0, 64); * The field cannot be cleared. * Access: RO */ -MLXSW_ITEM64(reg, ppcnt, tc_transmit_queue, 0x08 + 0x00, 0, 64); +MLXSW_ITEM64(reg, ppcnt, tc_transmit_queue, + MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x00, 0, 64); /* reg_ppcnt_tc_no_buffer_discard_uc * The number of unicast packets dropped due to lack of shared * buffer resources. * Access: RO */ -MLXSW_ITEM64(reg, ppcnt, tc_no_buffer_discard_uc, 0x08 + 0x08, 0, 64); +MLXSW_ITEM64(reg, ppcnt, tc_no_buffer_discard_uc, + MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x08, 0, 64); static inline void mlxsw_reg_ppcnt_pack(char *payload, u8 local_port, enum mlxsw_reg_ppcnt_grp grp, -- cgit v1.2.3 From 27887bc7cb7fc5a0a3b8f4b0f27b332c8121515b Mon Sep 17 00:00:00 2001 From: Rahul Lakkireddy Date: Thu, 26 Oct 2017 17:18:33 +0530 Subject: cxgb4: collect hardware LA dumps Collect CIM, CIM_MA, ULP_RX, TP, CIM_PIF, and ULP_TX logic analyzer dumps. Signed-off-by: Rahul Lakkireddy Signed-off-by: Ganesh Goudar Signed-off-by: David S. Miller --- drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h | 26 ++++ drivers/net/ethernet/chelsio/cxgb4/cudbg_if.h | 6 + drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c | 164 ++++++++++++++++++++++ drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.h | 18 +++ drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c | 32 +++++ drivers/net/ethernet/chelsio/cxgb4/t4_regs.h | 4 + 6 files changed, 250 insertions(+) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h b/drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h index d7f3392f618f..50540a6379a4 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h @@ -45,6 +45,32 @@ struct ireg_buf { u32 outbuf[32]; }; +struct cudbg_ulprx_la { + u32 data[ULPRX_LA_SIZE * 8]; + u32 size; +}; + +struct cudbg_tp_la { + u32 size; + u32 mode; + u8 data[0]; +}; + +struct cudbg_cim_pif_la { + int size; + u8 data[0]; +}; + +#define CUDBG_NUM_ULPTX 11 +#define CUDBG_NUM_ULPTX_READ 512 + +struct cudbg_ulptx_la { + u32 rdptr[CUDBG_NUM_ULPTX]; + u32 wrptr[CUDBG_NUM_ULPTX]; + u32 rddata[CUDBG_NUM_ULPTX]; + u32 rd_data[CUDBG_NUM_ULPTX][CUDBG_NUM_ULPTX_READ]; +}; + #define IREG_NUM_ELEM 4 static const u32 t6_tp_pio_array[][IREG_NUM_ELEM] = { diff --git a/drivers/net/ethernet/chelsio/cxgb4/cudbg_if.h b/drivers/net/ethernet/chelsio/cxgb4/cudbg_if.h index 9b8005e67811..f65db1b89fdc 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cudbg_if.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cudbg_if.h @@ -29,6 +29,8 @@ enum cudbg_dbg_entity_type { CUDBG_REG_DUMP = 1, CUDBG_DEV_LOG = 2, + CUDBG_CIM_LA = 3, + CUDBG_CIM_MA_LA = 4, CUDBG_CIM_IBQ_TP0 = 6, CUDBG_CIM_IBQ_TP1 = 7, CUDBG_CIM_IBQ_ULP = 8, @@ -45,11 +47,15 @@ enum cudbg_dbg_entity_type { CUDBG_EDC1 = 19, CUDBG_TP_INDIRECT = 36, CUDBG_SGE_INDIRECT = 37, + CUDBG_ULPRX_LA = 41, + CUDBG_TP_LA = 43, + CUDBG_CIM_PIF_LA = 45, CUDBG_CIM_OBQ_RXQ0 = 47, CUDBG_CIM_OBQ_RXQ1 = 48, CUDBG_PCIE_INDIRECT = 50, CUDBG_PM_INDIRECT = 51, CUDBG_MA_INDIRECT = 61, + CUDBG_ULPTX_LA = 62, CUDBG_UP_CIM_INDIRECT = 64, CUDBG_MBOX_LOG = 66, CUDBG_HMA_INDIRECT = 67, diff --git a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c index 19da54f83e52..8b5a12b19844 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c @@ -129,6 +129,69 @@ int cudbg_collect_fw_devlog(struct cudbg_init *pdbg_init, return rc; } +int cudbg_collect_cim_la(struct cudbg_init *pdbg_init, + struct cudbg_buffer *dbg_buff, + struct cudbg_error *cudbg_err) +{ + struct adapter *padap = pdbg_init->adap; + struct cudbg_buffer temp_buff = { 0 }; + int size, rc; + u32 cfg = 0; + + if (is_t6(padap->params.chip)) { + size = padap->params.cim_la_size / 10 + 1; + size *= 11 * sizeof(u32); + } else { + size = padap->params.cim_la_size / 8; + size *= 8 * sizeof(u32); + } + + size += sizeof(cfg); + rc = cudbg_get_buff(dbg_buff, size, &temp_buff); + if (rc) + return rc; + + rc = t4_cim_read(padap, UP_UP_DBG_LA_CFG_A, 1, &cfg); + if (rc) { + cudbg_err->sys_err = rc; + cudbg_put_buff(&temp_buff, dbg_buff); + return rc; + } + + memcpy((char *)temp_buff.data, &cfg, sizeof(cfg)); + rc = t4_cim_read_la(padap, + (u32 *)((char *)temp_buff.data + sizeof(cfg)), + NULL); + if (rc < 0) { + cudbg_err->sys_err = rc; + cudbg_put_buff(&temp_buff, dbg_buff); + return rc; + } + cudbg_write_and_release_buff(&temp_buff, dbg_buff); + return rc; +} + +int cudbg_collect_cim_ma_la(struct cudbg_init *pdbg_init, + struct cudbg_buffer *dbg_buff, + struct cudbg_error *cudbg_err) +{ + struct adapter *padap = pdbg_init->adap; + struct cudbg_buffer temp_buff = { 0 }; + int size, rc; + + size = 2 * CIM_MALA_SIZE * 5 * sizeof(u32); + rc = cudbg_get_buff(dbg_buff, size, &temp_buff); + if (rc) + return rc; + + t4_cim_read_ma_la(padap, + (u32 *)temp_buff.data, + (u32 *)((char *)temp_buff.data + + 5 * CIM_MALA_SIZE)); + cudbg_write_and_release_buff(&temp_buff, dbg_buff); + return rc; +} + static int cudbg_read_cim_ibq(struct cudbg_init *pdbg_init, struct cudbg_buffer *dbg_buff, struct cudbg_error *cudbg_err, int qid) @@ -574,6 +637,72 @@ int cudbg_collect_sge_indirect(struct cudbg_init *pdbg_init, return rc; } +int cudbg_collect_ulprx_la(struct cudbg_init *pdbg_init, + struct cudbg_buffer *dbg_buff, + struct cudbg_error *cudbg_err) +{ + struct adapter *padap = pdbg_init->adap; + struct cudbg_buffer temp_buff = { 0 }; + struct cudbg_ulprx_la *ulprx_la_buff; + int rc; + + rc = cudbg_get_buff(dbg_buff, sizeof(struct cudbg_ulprx_la), + &temp_buff); + if (rc) + return rc; + + ulprx_la_buff = (struct cudbg_ulprx_la *)temp_buff.data; + t4_ulprx_read_la(padap, (u32 *)ulprx_la_buff->data); + ulprx_la_buff->size = ULPRX_LA_SIZE; + cudbg_write_and_release_buff(&temp_buff, dbg_buff); + return rc; +} + +int cudbg_collect_tp_la(struct cudbg_init *pdbg_init, + struct cudbg_buffer *dbg_buff, + struct cudbg_error *cudbg_err) +{ + struct adapter *padap = pdbg_init->adap; + struct cudbg_buffer temp_buff = { 0 }; + struct cudbg_tp_la *tp_la_buff; + int size, rc; + + size = sizeof(struct cudbg_tp_la) + TPLA_SIZE * sizeof(u64); + rc = cudbg_get_buff(dbg_buff, size, &temp_buff); + if (rc) + return rc; + + tp_la_buff = (struct cudbg_tp_la *)temp_buff.data; + tp_la_buff->mode = DBGLAMODE_G(t4_read_reg(padap, TP_DBG_LA_CONFIG_A)); + t4_tp_read_la(padap, (u64 *)tp_la_buff->data, NULL); + cudbg_write_and_release_buff(&temp_buff, dbg_buff); + return rc; +} + +int cudbg_collect_cim_pif_la(struct cudbg_init *pdbg_init, + struct cudbg_buffer *dbg_buff, + struct cudbg_error *cudbg_err) +{ + struct cudbg_cim_pif_la *cim_pif_la_buff; + struct adapter *padap = pdbg_init->adap; + struct cudbg_buffer temp_buff = { 0 }; + int size, rc; + + size = sizeof(struct cudbg_cim_pif_la) + + 2 * CIM_PIFLA_SIZE * 6 * sizeof(u32); + rc = cudbg_get_buff(dbg_buff, size, &temp_buff); + if (rc) + return rc; + + cim_pif_la_buff = (struct cudbg_cim_pif_la *)temp_buff.data; + cim_pif_la_buff->size = CIM_PIFLA_SIZE; + t4_cim_read_pif_la(padap, (u32 *)cim_pif_la_buff->data, + (u32 *)cim_pif_la_buff->data + 6 * CIM_PIFLA_SIZE, + NULL, NULL); + cudbg_write_and_release_buff(&temp_buff, dbg_buff); + return rc; +} + int cudbg_collect_pcie_indirect(struct cudbg_init *pdbg_init, struct cudbg_buffer *dbg_buff, struct cudbg_error *cudbg_err) @@ -743,6 +872,41 @@ int cudbg_collect_ma_indirect(struct cudbg_init *pdbg_init, return rc; } +int cudbg_collect_ulptx_la(struct cudbg_init *pdbg_init, + struct cudbg_buffer *dbg_buff, + struct cudbg_error *cudbg_err) +{ + struct adapter *padap = pdbg_init->adap; + struct cudbg_buffer temp_buff = { 0 }; + struct cudbg_ulptx_la *ulptx_la_buff; + u32 i, j; + int rc; + + rc = cudbg_get_buff(dbg_buff, sizeof(struct cudbg_ulptx_la), + &temp_buff); + if (rc) + return rc; + + ulptx_la_buff = (struct cudbg_ulptx_la *)temp_buff.data; + for (i = 0; i < CUDBG_NUM_ULPTX; i++) { + ulptx_la_buff->rdptr[i] = t4_read_reg(padap, + ULP_TX_LA_RDPTR_0_A + + 0x10 * i); + ulptx_la_buff->wrptr[i] = t4_read_reg(padap, + ULP_TX_LA_WRPTR_0_A + + 0x10 * i); + ulptx_la_buff->rddata[i] = t4_read_reg(padap, + ULP_TX_LA_RDDATA_0_A + + 0x10 * i); + for (j = 0; j < CUDBG_NUM_ULPTX_READ; j++) + ulptx_la_buff->rd_data[i][j] = + t4_read_reg(padap, + ULP_TX_LA_RDDATA_0_A + 0x10 * i); + } + cudbg_write_and_release_buff(&temp_buff, dbg_buff); + return rc; +} + int cudbg_collect_up_cim_indirect(struct cudbg_init *pdbg_init, struct cudbg_buffer *dbg_buff, struct cudbg_error *cudbg_err) diff --git a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.h b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.h index df24c409c82f..ad6eff3c33c3 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.h @@ -24,6 +24,12 @@ int cudbg_collect_reg_dump(struct cudbg_init *pdbg_init, int cudbg_collect_fw_devlog(struct cudbg_init *pdbg_init, struct cudbg_buffer *dbg_buff, struct cudbg_error *cudbg_err); +int cudbg_collect_cim_la(struct cudbg_init *pdbg_init, + struct cudbg_buffer *dbg_buff, + struct cudbg_error *cudbg_err); +int cudbg_collect_cim_ma_la(struct cudbg_init *pdbg_init, + struct cudbg_buffer *dbg_buff, + struct cudbg_error *cudbg_err); int cudbg_collect_cim_ibq_tp0(struct cudbg_init *pdbg_init, struct cudbg_buffer *dbg_buff, struct cudbg_error *cudbg_err); @@ -72,6 +78,15 @@ int cudbg_collect_tp_indirect(struct cudbg_init *pdbg_init, int cudbg_collect_sge_indirect(struct cudbg_init *pdbg_init, struct cudbg_buffer *dbg_buff, struct cudbg_error *cudbg_err); +int cudbg_collect_ulprx_la(struct cudbg_init *pdbg_init, + struct cudbg_buffer *dbg_buff, + struct cudbg_error *cudbg_err); +int cudbg_collect_tp_la(struct cudbg_init *pdbg_init, + struct cudbg_buffer *dbg_buff, + struct cudbg_error *cudbg_err); +int cudbg_collect_cim_pif_la(struct cudbg_init *pdbg_init, + struct cudbg_buffer *dbg_buff, + struct cudbg_error *cudbg_err); int cudbg_collect_obq_sge_rx_q0(struct cudbg_init *pdbg_init, struct cudbg_buffer *dbg_buff, struct cudbg_error *cudbg_err); @@ -87,6 +102,9 @@ int cudbg_collect_pm_indirect(struct cudbg_init *pdbg_init, int cudbg_collect_ma_indirect(struct cudbg_init *pdbg_init, struct cudbg_buffer *dbg_buff, struct cudbg_error *cudbg_err); +int cudbg_collect_ulptx_la(struct cudbg_init *pdbg_init, + struct cudbg_buffer *dbg_buff, + struct cudbg_error *cudbg_err); int cudbg_collect_up_cim_indirect(struct cudbg_init *pdbg_init, struct cudbg_buffer *dbg_buff, struct cudbg_error *cudbg_err); diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c index 59740ac7e46e..8bc1b1decf30 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c @@ -29,6 +29,8 @@ static const struct cxgb4_collect_entity cxgb4_collect_hw_dump[] = { { CUDBG_MBOX_LOG, cudbg_collect_mbox_log }, { CUDBG_DEV_LOG, cudbg_collect_fw_devlog }, { CUDBG_REG_DUMP, cudbg_collect_reg_dump }, + { CUDBG_CIM_LA, cudbg_collect_cim_la }, + { CUDBG_CIM_MA_LA, cudbg_collect_cim_ma_la }, { CUDBG_CIM_IBQ_TP0, cudbg_collect_cim_ibq_tp0 }, { CUDBG_CIM_IBQ_TP1, cudbg_collect_cim_ibq_tp1 }, { CUDBG_CIM_IBQ_ULP, cudbg_collect_cim_ibq_ulp }, @@ -43,11 +45,15 @@ static const struct cxgb4_collect_entity cxgb4_collect_hw_dump[] = { { CUDBG_CIM_OBQ_NCSI, cudbg_collect_cim_obq_ncsi }, { CUDBG_TP_INDIRECT, cudbg_collect_tp_indirect }, { CUDBG_SGE_INDIRECT, cudbg_collect_sge_indirect }, + { CUDBG_ULPRX_LA, cudbg_collect_ulprx_la }, + { CUDBG_TP_LA, cudbg_collect_tp_la }, + { CUDBG_CIM_PIF_LA, cudbg_collect_cim_pif_la }, { CUDBG_CIM_OBQ_RXQ0, cudbg_collect_obq_sge_rx_q0 }, { CUDBG_CIM_OBQ_RXQ1, cudbg_collect_obq_sge_rx_q1 }, { CUDBG_PCIE_INDIRECT, cudbg_collect_pcie_indirect }, { CUDBG_PM_INDIRECT, cudbg_collect_pm_indirect }, { CUDBG_MA_INDIRECT, cudbg_collect_ma_indirect }, + { CUDBG_ULPTX_LA, cudbg_collect_ulptx_la }, { CUDBG_UP_CIM_INDIRECT, cudbg_collect_up_cim_indirect }, { CUDBG_HMA_INDIRECT, cudbg_collect_hma_indirect }, }; @@ -73,6 +79,19 @@ static u32 cxgb4_get_entity_length(struct adapter *adap, u32 entity) case CUDBG_DEV_LOG: len = adap->params.devlog.size; break; + case CUDBG_CIM_LA: + if (is_t6(adap->params.chip)) { + len = adap->params.cim_la_size / 10 + 1; + len *= 11 * sizeof(u32); + } else { + len = adap->params.cim_la_size / 8; + len *= 8 * sizeof(u32); + } + len += sizeof(u32); /* for reading CIM LA configuration */ + break; + case CUDBG_CIM_MA_LA: + len = 2 * CIM_MALA_SIZE * 5 * sizeof(u32); + break; case CUDBG_CIM_IBQ_TP0: case CUDBG_CIM_IBQ_TP1: case CUDBG_CIM_IBQ_ULP: @@ -142,6 +161,16 @@ static u32 cxgb4_get_entity_length(struct adapter *adap, u32 entity) case CUDBG_SGE_INDIRECT: len = sizeof(struct ireg_buf) * 2; break; + case CUDBG_ULPRX_LA: + len = sizeof(struct cudbg_ulprx_la); + break; + case CUDBG_TP_LA: + len = sizeof(struct cudbg_tp_la) + TPLA_SIZE * sizeof(u64); + break; + case CUDBG_CIM_PIF_LA: + len = sizeof(struct cudbg_cim_pif_la); + len += 2 * CIM_PIFLA_SIZE * 6 * sizeof(u32); + break; case CUDBG_PCIE_INDIRECT: n = sizeof(t5_pcie_pdbg_array) / (IREG_NUM_ELEM * sizeof(u32)); len = sizeof(struct ireg_buf) * n * 2; @@ -157,6 +186,9 @@ static u32 cxgb4_get_entity_length(struct adapter *adap, u32 entity) len = sizeof(struct ireg_buf) * n * 2; } break; + case CUDBG_ULPTX_LA: + len = sizeof(struct cudbg_ulptx_la); + break; case CUDBG_UP_CIM_INDIRECT: n = sizeof(t5_up_cim_reg_array) / (IREG_NUM_ELEM * sizeof(u32)); len = sizeof(struct ireg_buf) * n; diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h index 82614e078f50..b5cd9a5ad808 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h @@ -1629,6 +1629,10 @@ #define IESPI_PAR_ERROR_V(x) ((x) << IESPI_PAR_ERROR_S) #define IESPI_PAR_ERROR_F IESPI_PAR_ERROR_V(1U) +#define ULP_TX_LA_RDPTR_0_A 0x8ec0 +#define ULP_TX_LA_RDDATA_0_A 0x8ec4 +#define ULP_TX_LA_WRPTR_0_A 0x8ec8 + #define PMRX_E_PCMD_PAR_ERROR_S 0 #define PMRX_E_PCMD_PAR_ERROR_V(x) ((x) << PMRX_E_PCMD_PAR_ERROR_S) #define PMRX_E_PCMD_PAR_ERROR_F PMRX_E_PCMD_PAR_ERROR_V(1U) -- cgit v1.2.3 From 3044d0fb016ecd953724c966bede8c8626f32bd5 Mon Sep 17 00:00:00 2001 From: Rahul Lakkireddy Date: Thu, 26 Oct 2017 17:18:34 +0530 Subject: cxgb4: collect CIM queue configuration dump Signed-off-by: Rahul Lakkireddy Signed-off-by: Ganesh Goudar Signed-off-by: David S. Miller --- drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h | 9 ++++++ drivers/net/ethernet/chelsio/cxgb4/cudbg_if.h | 1 + drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c | 39 +++++++++++++++++++++++ drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.h | 3 ++ drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c | 4 +++ 5 files changed, 56 insertions(+) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h b/drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h index 50540a6379a4..ab15c3dfa04e 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h @@ -33,6 +33,15 @@ struct cudbg_mbox_log { u32 lo[MBOX_LEN / 8]; }; +struct cudbg_cim_qcfg { + u8 chip; + u16 base[CIM_NUM_IBQ + CIM_NUM_OBQ_T5]; + u16 size[CIM_NUM_IBQ + CIM_NUM_OBQ_T5]; + u16 thres[CIM_NUM_IBQ]; + u32 obq_wr[2 * CIM_NUM_OBQ_T5]; + u32 stat[4 * (CIM_NUM_IBQ + CIM_NUM_OBQ_T5)]; +}; + struct ireg_field { u32 ireg_addr; u32 ireg_data; diff --git a/drivers/net/ethernet/chelsio/cxgb4/cudbg_if.h b/drivers/net/ethernet/chelsio/cxgb4/cudbg_if.h index f65db1b89fdc..be031aba2706 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cudbg_if.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cudbg_if.h @@ -31,6 +31,7 @@ enum cudbg_dbg_entity_type { CUDBG_DEV_LOG = 2, CUDBG_CIM_LA = 3, CUDBG_CIM_MA_LA = 4, + CUDBG_CIM_QCFG = 5, CUDBG_CIM_IBQ_TP0 = 6, CUDBG_CIM_IBQ_TP1 = 7, CUDBG_CIM_IBQ_ULP = 8, diff --git a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c index 8b5a12b19844..596f2b8e41cf 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c @@ -192,6 +192,45 @@ int cudbg_collect_cim_ma_la(struct cudbg_init *pdbg_init, return rc; } +int cudbg_collect_cim_qcfg(struct cudbg_init *pdbg_init, + struct cudbg_buffer *dbg_buff, + struct cudbg_error *cudbg_err) +{ + struct adapter *padap = pdbg_init->adap; + struct cudbg_buffer temp_buff = { 0 }; + struct cudbg_cim_qcfg *cim_qcfg_data; + int rc; + + rc = cudbg_get_buff(dbg_buff, sizeof(struct cudbg_cim_qcfg), + &temp_buff); + if (rc) + return rc; + + cim_qcfg_data = (struct cudbg_cim_qcfg *)temp_buff.data; + cim_qcfg_data->chip = padap->params.chip; + rc = t4_cim_read(padap, UP_IBQ_0_RDADDR_A, + ARRAY_SIZE(cim_qcfg_data->stat), cim_qcfg_data->stat); + if (rc) { + cudbg_err->sys_err = rc; + cudbg_put_buff(&temp_buff, dbg_buff); + return rc; + } + + rc = t4_cim_read(padap, UP_OBQ_0_REALADDR_A, + ARRAY_SIZE(cim_qcfg_data->obq_wr), + cim_qcfg_data->obq_wr); + if (rc) { + cudbg_err->sys_err = rc; + cudbg_put_buff(&temp_buff, dbg_buff); + return rc; + } + + t4_read_cimq_cfg(padap, cim_qcfg_data->base, cim_qcfg_data->size, + cim_qcfg_data->thres); + cudbg_write_and_release_buff(&temp_buff, dbg_buff); + return rc; +} + static int cudbg_read_cim_ibq(struct cudbg_init *pdbg_init, struct cudbg_buffer *dbg_buff, struct cudbg_error *cudbg_err, int qid) diff --git a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.h b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.h index ad6eff3c33c3..f42b7420ff09 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.h @@ -30,6 +30,9 @@ int cudbg_collect_cim_la(struct cudbg_init *pdbg_init, int cudbg_collect_cim_ma_la(struct cudbg_init *pdbg_init, struct cudbg_buffer *dbg_buff, struct cudbg_error *cudbg_err); +int cudbg_collect_cim_qcfg(struct cudbg_init *pdbg_init, + struct cudbg_buffer *dbg_buff, + struct cudbg_error *cudbg_err); int cudbg_collect_cim_ibq_tp0(struct cudbg_init *pdbg_init, struct cudbg_buffer *dbg_buff, struct cudbg_error *cudbg_err); diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c index 8bc1b1decf30..611ece7b7e5a 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c @@ -31,6 +31,7 @@ static const struct cxgb4_collect_entity cxgb4_collect_hw_dump[] = { { CUDBG_REG_DUMP, cudbg_collect_reg_dump }, { CUDBG_CIM_LA, cudbg_collect_cim_la }, { CUDBG_CIM_MA_LA, cudbg_collect_cim_ma_la }, + { CUDBG_CIM_QCFG, cudbg_collect_cim_qcfg }, { CUDBG_CIM_IBQ_TP0, cudbg_collect_cim_ibq_tp0 }, { CUDBG_CIM_IBQ_TP1, cudbg_collect_cim_ibq_tp1 }, { CUDBG_CIM_IBQ_ULP, cudbg_collect_cim_ibq_ulp }, @@ -92,6 +93,9 @@ static u32 cxgb4_get_entity_length(struct adapter *adap, u32 entity) case CUDBG_CIM_MA_LA: len = 2 * CIM_MALA_SIZE * 5 * sizeof(u32); break; + case CUDBG_CIM_QCFG: + len = sizeof(struct cudbg_cim_qcfg); + break; case CUDBG_CIM_IBQ_TP0: case CUDBG_CIM_IBQ_TP1: case CUDBG_CIM_IBQ_ULP: -- cgit v1.2.3 From 28b445561fbac2e3c9886231b0a414336878e20f Mon Sep 17 00:00:00 2001 From: Rahul Lakkireddy Date: Thu, 26 Oct 2017 17:18:35 +0530 Subject: cxgb4: collect RSS dumps Collect RSS table and RSS VF configuration dumps. Signed-off-by: Rahul Lakkireddy Signed-off-by: Ganesh Goudar Signed-off-by: David S. Miller --- drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h | 5 +++ drivers/net/ethernet/chelsio/cxgb4/cudbg_if.h | 2 + drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c | 46 +++++++++++++++++++++++ drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.h | 6 +++ drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c | 9 +++++ 5 files changed, 68 insertions(+) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h b/drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h index ab15c3dfa04e..b24175b2bb3c 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h @@ -42,6 +42,11 @@ struct cudbg_cim_qcfg { u32 stat[4 * (CIM_NUM_IBQ + CIM_NUM_OBQ_T5)]; }; +struct cudbg_rss_vf_conf { + u32 rss_vf_vfl; + u32 rss_vf_vfh; +}; + struct ireg_field { u32 ireg_addr; u32 ireg_data; diff --git a/drivers/net/ethernet/chelsio/cxgb4/cudbg_if.h b/drivers/net/ethernet/chelsio/cxgb4/cudbg_if.h index be031aba2706..34f241892fca 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cudbg_if.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cudbg_if.h @@ -46,6 +46,8 @@ enum cudbg_dbg_entity_type { CUDBG_CIM_OBQ_NCSI = 17, CUDBG_EDC0 = 18, CUDBG_EDC1 = 19, + CUDBG_RSS = 22, + CUDBG_RSS_VF_CONF = 25, CUDBG_TP_INDIRECT = 36, CUDBG_SGE_INDIRECT = 37, CUDBG_ULPRX_LA = 41, diff --git a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c index 596f2b8e41cf..efab94465337 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c @@ -528,6 +528,52 @@ int cudbg_collect_edc1_meminfo(struct cudbg_init *pdbg_init, MEM_EDC1); } +int cudbg_collect_rss(struct cudbg_init *pdbg_init, + struct cudbg_buffer *dbg_buff, + struct cudbg_error *cudbg_err) +{ + struct adapter *padap = pdbg_init->adap; + struct cudbg_buffer temp_buff = { 0 }; + int rc; + + rc = cudbg_get_buff(dbg_buff, RSS_NENTRIES * sizeof(u16), &temp_buff); + if (rc) + return rc; + + rc = t4_read_rss(padap, (u16 *)temp_buff.data); + if (rc) { + cudbg_err->sys_err = rc; + cudbg_put_buff(&temp_buff, dbg_buff); + return rc; + } + cudbg_write_and_release_buff(&temp_buff, dbg_buff); + return rc; +} + +int cudbg_collect_rss_vf_config(struct cudbg_init *pdbg_init, + struct cudbg_buffer *dbg_buff, + struct cudbg_error *cudbg_err) +{ + struct adapter *padap = pdbg_init->adap; + struct cudbg_buffer temp_buff = { 0 }; + struct cudbg_rss_vf_conf *vfconf; + int vf, rc, vf_count; + + vf_count = padap->params.arch.vfcount; + rc = cudbg_get_buff(dbg_buff, + vf_count * sizeof(struct cudbg_rss_vf_conf), + &temp_buff); + if (rc) + return rc; + + vfconf = (struct cudbg_rss_vf_conf *)temp_buff.data; + for (vf = 0; vf < vf_count; vf++) + t4_read_rss_vf_config(padap, vf, &vfconf[vf].rss_vf_vfl, + &vfconf[vf].rss_vf_vfh, true); + cudbg_write_and_release_buff(&temp_buff, dbg_buff); + return rc; +} + int cudbg_collect_tp_indirect(struct cudbg_init *pdbg_init, struct cudbg_buffer *dbg_buff, struct cudbg_error *cudbg_err) diff --git a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.h b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.h index f42b7420ff09..634c643fdd39 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.h @@ -75,6 +75,12 @@ int cudbg_collect_edc0_meminfo(struct cudbg_init *pdbg_init, int cudbg_collect_edc1_meminfo(struct cudbg_init *pdbg_init, struct cudbg_buffer *dbg_buff, struct cudbg_error *cudbg_err); +int cudbg_collect_rss(struct cudbg_init *pdbg_init, + struct cudbg_buffer *dbg_buff, + struct cudbg_error *cudbg_err); +int cudbg_collect_rss_vf_config(struct cudbg_init *pdbg_init, + struct cudbg_buffer *dbg_buff, + struct cudbg_error *cudbg_err); int cudbg_collect_tp_indirect(struct cudbg_init *pdbg_init, struct cudbg_buffer *dbg_buff, struct cudbg_error *cudbg_err); diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c index 611ece7b7e5a..7996220db485 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c @@ -44,6 +44,8 @@ static const struct cxgb4_collect_entity cxgb4_collect_hw_dump[] = { { CUDBG_CIM_OBQ_ULP3, cudbg_collect_cim_obq_ulp3 }, { CUDBG_CIM_OBQ_SGE, cudbg_collect_cim_obq_sge }, { CUDBG_CIM_OBQ_NCSI, cudbg_collect_cim_obq_ncsi }, + { CUDBG_RSS, cudbg_collect_rss }, + { CUDBG_RSS_VF_CONF, cudbg_collect_rss_vf_config }, { CUDBG_TP_INDIRECT, cudbg_collect_tp_indirect }, { CUDBG_SGE_INDIRECT, cudbg_collect_sge_indirect }, { CUDBG_ULPRX_LA, cudbg_collect_ulprx_la }, @@ -144,6 +146,13 @@ static u32 cxgb4_get_entity_length(struct adapter *adap, u32 entity) } len = cudbg_mbytes_to_bytes(len); break; + case CUDBG_RSS: + len = RSS_NENTRIES * sizeof(u16); + break; + case CUDBG_RSS_VF_CONF: + len = adap->params.arch.vfcount * + sizeof(struct cudbg_rss_vf_conf); + break; case CUDBG_TP_INDIRECT: switch (CHELSIO_CHIP_VERSION(adap->params.chip)) { case CHELSIO_T5: -- cgit v1.2.3 From 9030e49897f57dea3126e35d97a33588c5307aa1 Mon Sep 17 00:00:00 2001 From: Rahul Lakkireddy Date: Thu, 26 Oct 2017 17:18:36 +0530 Subject: cxgb4: collect TID info dump Signed-off-by: Rahul Lakkireddy Signed-off-by: Ganesh Goudar Signed-off-by: David S. Miller --- drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h | 39 ++++++++++ drivers/net/ethernet/chelsio/cxgb4/cudbg_if.h | 1 + drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c | 85 ++++++++++++++++++++++ drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.h | 3 + .../net/ethernet/chelsio/cxgb4/cudbg_lib_common.h | 6 ++ drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c | 4 + drivers/net/ethernet/chelsio/cxgb4/t4_regs.h | 1 + drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h | 5 +- 8 files changed, 143 insertions(+), 1 deletion(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h b/drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h index b24175b2bb3c..d023e3c5a029 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h @@ -21,6 +21,8 @@ #define EDC0_FLAG 3 #define EDC1_FLAG 4 +#define CUDBG_ENTITY_SIGNATURE 0xCCEDB001 + struct card_mem { u16 size_edc0; u16 size_edc1; @@ -75,6 +77,43 @@ struct cudbg_cim_pif_la { u8 data[0]; }; +struct cudbg_tid_info_region { + u32 ntids; + u32 nstids; + u32 stid_base; + u32 hash_base; + + u32 natids; + u32 nftids; + u32 ftid_base; + u32 aftid_base; + u32 aftid_end; + + u32 sftid_base; + u32 nsftids; + + u32 uotid_base; + u32 nuotids; + + u32 sb; + u32 flags; + u32 le_db_conf; + u32 ip_users; + u32 ipv6_users; + + u32 hpftid_base; + u32 nhpftids; +}; + +#define CUDBG_TID_INFO_REV 1 + +struct cudbg_tid_info_region_rev1 { + struct cudbg_ver_hdr ver_hdr; + struct cudbg_tid_info_region tid; + u32 tid_start; + u32 reserved[16]; +}; + #define CUDBG_NUM_ULPTX 11 #define CUDBG_NUM_ULPTX_READ 512 diff --git a/drivers/net/ethernet/chelsio/cxgb4/cudbg_if.h b/drivers/net/ethernet/chelsio/cxgb4/cudbg_if.h index 34f241892fca..655259f153f3 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cudbg_if.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cudbg_if.h @@ -57,6 +57,7 @@ enum cudbg_dbg_entity_type { CUDBG_CIM_OBQ_RXQ1 = 48, CUDBG_PCIE_INDIRECT = 50, CUDBG_PM_INDIRECT = 51, + CUDBG_TID_INFO = 54, CUDBG_MA_INDIRECT = 61, CUDBG_ULPTX_LA = 62, CUDBG_UP_CIM_INDIRECT = 64, diff --git a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c index efab94465337..4697f113f9c6 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c @@ -902,6 +902,91 @@ int cudbg_collect_pm_indirect(struct cudbg_init *pdbg_init, return rc; } +int cudbg_collect_tid(struct cudbg_init *pdbg_init, + struct cudbg_buffer *dbg_buff, + struct cudbg_error *cudbg_err) +{ + struct adapter *padap = pdbg_init->adap; + struct cudbg_tid_info_region_rev1 *tid1; + struct cudbg_buffer temp_buff = { 0 }; + struct cudbg_tid_info_region *tid; + u32 para[2], val[2]; + int rc; + + rc = cudbg_get_buff(dbg_buff, sizeof(struct cudbg_tid_info_region_rev1), + &temp_buff); + if (rc) + return rc; + + tid1 = (struct cudbg_tid_info_region_rev1 *)temp_buff.data; + tid = &tid1->tid; + tid1->ver_hdr.signature = CUDBG_ENTITY_SIGNATURE; + tid1->ver_hdr.revision = CUDBG_TID_INFO_REV; + tid1->ver_hdr.size = sizeof(struct cudbg_tid_info_region_rev1) - + sizeof(struct cudbg_ver_hdr); + +#define FW_PARAM_PFVF_A(param) \ + (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_PFVF) | \ + FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_PFVF_##param) | \ + FW_PARAMS_PARAM_Y_V(0) | \ + FW_PARAMS_PARAM_Z_V(0)) + + para[0] = FW_PARAM_PFVF_A(ETHOFLD_START); + para[1] = FW_PARAM_PFVF_A(ETHOFLD_END); + rc = t4_query_params(padap, padap->mbox, padap->pf, 0, 2, para, val); + if (rc < 0) { + cudbg_err->sys_err = rc; + cudbg_put_buff(&temp_buff, dbg_buff); + return rc; + } + tid->uotid_base = val[0]; + tid->nuotids = val[1] - val[0] + 1; + + if (is_t5(padap->params.chip)) { + tid->sb = t4_read_reg(padap, LE_DB_SERVER_INDEX_A) / 4; + } else if (is_t6(padap->params.chip)) { + tid1->tid_start = + t4_read_reg(padap, LE_DB_ACTIVE_TABLE_START_INDEX_A); + tid->sb = t4_read_reg(padap, LE_DB_SRVR_START_INDEX_A); + + para[0] = FW_PARAM_PFVF_A(HPFILTER_START); + para[1] = FW_PARAM_PFVF_A(HPFILTER_END); + rc = t4_query_params(padap, padap->mbox, padap->pf, 0, 2, + para, val); + if (rc < 0) { + cudbg_err->sys_err = rc; + cudbg_put_buff(&temp_buff, dbg_buff); + return rc; + } + tid->hpftid_base = val[0]; + tid->nhpftids = val[1] - val[0] + 1; + } + + tid->ntids = padap->tids.ntids; + tid->nstids = padap->tids.nstids; + tid->stid_base = padap->tids.stid_base; + tid->hash_base = padap->tids.hash_base; + + tid->natids = padap->tids.natids; + tid->nftids = padap->tids.nftids; + tid->ftid_base = padap->tids.ftid_base; + tid->aftid_base = padap->tids.aftid_base; + tid->aftid_end = padap->tids.aftid_end; + + tid->sftid_base = padap->tids.sftid_base; + tid->nsftids = padap->tids.nsftids; + + tid->flags = padap->flags; + tid->le_db_conf = t4_read_reg(padap, LE_DB_CONFIG_A); + tid->ip_users = t4_read_reg(padap, LE_DB_ACT_CNT_IPV4_A); + tid->ipv6_users = t4_read_reg(padap, LE_DB_ACT_CNT_IPV6_A); + +#undef FW_PARAM_PFVF_A + + cudbg_write_and_release_buff(&temp_buff, dbg_buff); + return rc; +} + int cudbg_collect_ma_indirect(struct cudbg_init *pdbg_init, struct cudbg_buffer *dbg_buff, struct cudbg_error *cudbg_err) diff --git a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.h b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.h index 634c643fdd39..f3bfd7cf4186 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.h @@ -108,6 +108,9 @@ int cudbg_collect_pcie_indirect(struct cudbg_init *pdbg_init, int cudbg_collect_pm_indirect(struct cudbg_init *pdbg_init, struct cudbg_buffer *dbg_buff, struct cudbg_error *cudbg_err); +int cudbg_collect_tid(struct cudbg_init *pdbg_init, + struct cudbg_buffer *dbg_buff, + struct cudbg_error *cudbg_err); int cudbg_collect_ma_indirect(struct cudbg_init *pdbg_init, struct cudbg_buffer *dbg_buff, struct cudbg_error *cudbg_err); diff --git a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib_common.h b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib_common.h index b150c5d1f7c0..24b33f28e548 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib_common.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib_common.h @@ -57,6 +57,12 @@ struct cudbg_entity_hdr { u32 reserved[5]; }; +struct cudbg_ver_hdr { + u32 signature; + u16 revision; + u16 size; +}; + struct cudbg_buffer { u32 size; u32 offset; diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c index 7996220db485..c5371e2ecf3c 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c @@ -55,6 +55,7 @@ static const struct cxgb4_collect_entity cxgb4_collect_hw_dump[] = { { CUDBG_CIM_OBQ_RXQ1, cudbg_collect_obq_sge_rx_q1 }, { CUDBG_PCIE_INDIRECT, cudbg_collect_pcie_indirect }, { CUDBG_PM_INDIRECT, cudbg_collect_pm_indirect }, + { CUDBG_TID_INFO, cudbg_collect_tid }, { CUDBG_MA_INDIRECT, cudbg_collect_ma_indirect }, { CUDBG_ULPTX_LA, cudbg_collect_ulptx_la }, { CUDBG_UP_CIM_INDIRECT, cudbg_collect_up_cim_indirect }, @@ -192,6 +193,9 @@ static u32 cxgb4_get_entity_length(struct adapter *adap, u32 entity) n = sizeof(t5_pm_rx_array) / (IREG_NUM_ELEM * sizeof(u32)); len = sizeof(struct ireg_buf) * n * 2; break; + case CUDBG_TID_INFO: + len = sizeof(struct cudbg_tid_info_region_rev1); + break; case CUDBG_MA_INDIRECT: if (CHELSIO_CHIP_VERSION(adap->params.chip) > CHELSIO_T5) { n = sizeof(t6_ma_ireg_array) / diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h index b5cd9a5ad808..57a36a048313 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h @@ -2856,6 +2856,7 @@ #define T6_LIPMISS_F T6_LIPMISS_V(1U) #define LE_DB_CONFIG_A 0x19c04 +#define LE_DB_ACTIVE_TABLE_START_INDEX_A 0x19c10 #define LE_DB_SERVER_INDEX_A 0x19c18 #define LE_DB_SRVR_START_INDEX_A 0x19c18 #define LE_DB_ACT_CNT_IPV4_A 0x19c20 diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h index 875d4a72b3ef..2ba890926c73 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h @@ -1244,9 +1244,12 @@ enum fw_params_param_pfvf { FW_PARAMS_PARAM_PFVF_EQ_END = 0x2C, FW_PARAMS_PARAM_PFVF_ACTIVE_FILTER_START = 0x2D, FW_PARAMS_PARAM_PFVF_ACTIVE_FILTER_END = 0x2E, + FW_PARAMS_PARAM_PFVF_ETHOFLD_START = 0x2F, FW_PARAMS_PARAM_PFVF_ETHOFLD_END = 0x30, FW_PARAMS_PARAM_PFVF_CPLFW4MSG_ENCAP = 0x31, - FW_PARAMS_PARAM_PFVF_NCRYPTO_LOOKASIDE = 0x32, + FW_PARAMS_PARAM_PFVF_HPFILTER_START = 0x32, + FW_PARAMS_PARAM_PFVF_HPFILTER_END = 0x33, + FW_PARAMS_PARAM_PFVF_NCRYPTO_LOOKASIDE = 0x39, FW_PARAMS_PARAM_PFVF_PORT_CAPS32 = 0x3A, }; -- cgit v1.2.3 From b289593e1398480f5ac1a1df2dae479516a21372 Mon Sep 17 00:00:00 2001 From: Rahul Lakkireddy Date: Thu, 26 Oct 2017 17:18:37 +0530 Subject: cxgb4: collect MPS-TCAM dump Signed-off-by: Rahul Lakkireddy Signed-off-by: Ganesh Goudar Signed-off-by: David S. Miller --- drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h | 21 +++ drivers/net/ethernet/chelsio/cxgb4/cudbg_if.h | 1 + drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c | 184 ++++++++++++++++++++++ drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.h | 3 + drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c | 5 + drivers/net/ethernet/chelsio/cxgb4/t4_regs.h | 16 ++ 6 files changed, 230 insertions(+) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h b/drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h index d023e3c5a029..1860cf2b5286 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h @@ -114,6 +114,27 @@ struct cudbg_tid_info_region_rev1 { u32 reserved[16]; }; +#define CUDBG_MAX_RPLC_SIZE 128 + +struct cudbg_mps_tcam { + u64 mask; + u32 rplc[8]; + u32 idx; + u32 cls_lo; + u32 cls_hi; + u32 rplc_size; + u32 vniy; + u32 vnix; + u32 dip_hit; + u32 vlan_vld; + u32 repli; + u16 ivlan; + u8 addr[ETH_ALEN]; + u8 lookup_type; + u8 port_num; + u8 reserved[2]; +}; + #define CUDBG_NUM_ULPTX 11 #define CUDBG_NUM_ULPTX_READ 512 diff --git a/drivers/net/ethernet/chelsio/cxgb4/cudbg_if.h b/drivers/net/ethernet/chelsio/cxgb4/cudbg_if.h index 655259f153f3..ce7bb909b8fd 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cudbg_if.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cudbg_if.h @@ -58,6 +58,7 @@ enum cudbg_dbg_entity_type { CUDBG_PCIE_INDIRECT = 50, CUDBG_PM_INDIRECT = 51, CUDBG_TID_INFO = 54, + CUDBG_MPS_TCAM = 57, CUDBG_MA_INDIRECT = 61, CUDBG_ULPTX_LA = 62, CUDBG_UP_CIM_INDIRECT = 64, diff --git a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c index 4697f113f9c6..03f1ec0c0160 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c @@ -987,6 +987,190 @@ int cudbg_collect_tid(struct cudbg_init *pdbg_init, return rc; } +static inline void cudbg_tcamxy2valmask(u64 x, u64 y, u8 *addr, u64 *mask) +{ + *mask = x | y; + y = (__force u64)cpu_to_be64(y); + memcpy(addr, (char *)&y + 2, ETH_ALEN); +} + +static void cudbg_mps_rpl_backdoor(struct adapter *padap, + struct fw_ldst_mps_rplc *mps_rplc) +{ + if (is_t5(padap->params.chip)) { + mps_rplc->rplc255_224 = htonl(t4_read_reg(padap, + MPS_VF_RPLCT_MAP3_A)); + mps_rplc->rplc223_192 = htonl(t4_read_reg(padap, + MPS_VF_RPLCT_MAP2_A)); + mps_rplc->rplc191_160 = htonl(t4_read_reg(padap, + MPS_VF_RPLCT_MAP1_A)); + mps_rplc->rplc159_128 = htonl(t4_read_reg(padap, + MPS_VF_RPLCT_MAP0_A)); + } else { + mps_rplc->rplc255_224 = htonl(t4_read_reg(padap, + MPS_VF_RPLCT_MAP7_A)); + mps_rplc->rplc223_192 = htonl(t4_read_reg(padap, + MPS_VF_RPLCT_MAP6_A)); + mps_rplc->rplc191_160 = htonl(t4_read_reg(padap, + MPS_VF_RPLCT_MAP5_A)); + mps_rplc->rplc159_128 = htonl(t4_read_reg(padap, + MPS_VF_RPLCT_MAP4_A)); + } + mps_rplc->rplc127_96 = htonl(t4_read_reg(padap, MPS_VF_RPLCT_MAP3_A)); + mps_rplc->rplc95_64 = htonl(t4_read_reg(padap, MPS_VF_RPLCT_MAP2_A)); + mps_rplc->rplc63_32 = htonl(t4_read_reg(padap, MPS_VF_RPLCT_MAP1_A)); + mps_rplc->rplc31_0 = htonl(t4_read_reg(padap, MPS_VF_RPLCT_MAP0_A)); +} + +static int cudbg_collect_tcam_index(struct adapter *padap, + struct cudbg_mps_tcam *tcam, u32 idx) +{ + u64 tcamy, tcamx, val; + u32 ctl, data2; + int rc = 0; + + if (CHELSIO_CHIP_VERSION(padap->params.chip) >= CHELSIO_T6) { + /* CtlReqID - 1: use Host Driver Requester ID + * CtlCmdType - 0: Read, 1: Write + * CtlTcamSel - 0: TCAM0, 1: TCAM1 + * CtlXYBitSel- 0: Y bit, 1: X bit + */ + + /* Read tcamy */ + ctl = CTLREQID_V(1) | CTLCMDTYPE_V(0) | CTLXYBITSEL_V(0); + if (idx < 256) + ctl |= CTLTCAMINDEX_V(idx) | CTLTCAMSEL_V(0); + else + ctl |= CTLTCAMINDEX_V(idx - 256) | CTLTCAMSEL_V(1); + + t4_write_reg(padap, MPS_CLS_TCAM_DATA2_CTL_A, ctl); + val = t4_read_reg(padap, MPS_CLS_TCAM_RDATA1_REQ_ID1_A); + tcamy = DMACH_G(val) << 32; + tcamy |= t4_read_reg(padap, MPS_CLS_TCAM_RDATA0_REQ_ID1_A); + data2 = t4_read_reg(padap, MPS_CLS_TCAM_RDATA2_REQ_ID1_A); + tcam->lookup_type = DATALKPTYPE_G(data2); + + /* 0 - Outer header, 1 - Inner header + * [71:48] bit locations are overloaded for + * outer vs. inner lookup types. + */ + if (tcam->lookup_type && tcam->lookup_type != DATALKPTYPE_M) { + /* Inner header VNI */ + tcam->vniy = (data2 & DATAVIDH2_F) | DATAVIDH1_G(data2); + tcam->vniy = (tcam->vniy << 16) | VIDL_G(val); + tcam->dip_hit = data2 & DATADIPHIT_F; + } else { + tcam->vlan_vld = data2 & DATAVIDH2_F; + tcam->ivlan = VIDL_G(val); + } + + tcam->port_num = DATAPORTNUM_G(data2); + + /* Read tcamx. Change the control param */ + ctl |= CTLXYBITSEL_V(1); + t4_write_reg(padap, MPS_CLS_TCAM_DATA2_CTL_A, ctl); + val = t4_read_reg(padap, MPS_CLS_TCAM_RDATA1_REQ_ID1_A); + tcamx = DMACH_G(val) << 32; + tcamx |= t4_read_reg(padap, MPS_CLS_TCAM_RDATA0_REQ_ID1_A); + data2 = t4_read_reg(padap, MPS_CLS_TCAM_RDATA2_REQ_ID1_A); + if (tcam->lookup_type && tcam->lookup_type != DATALKPTYPE_M) { + /* Inner header VNI mask */ + tcam->vnix = (data2 & DATAVIDH2_F) | DATAVIDH1_G(data2); + tcam->vnix = (tcam->vnix << 16) | VIDL_G(val); + } + } else { + tcamy = t4_read_reg64(padap, MPS_CLS_TCAM_Y_L(idx)); + tcamx = t4_read_reg64(padap, MPS_CLS_TCAM_X_L(idx)); + } + + /* If no entry, return */ + if (tcamx & tcamy) + return rc; + + tcam->cls_lo = t4_read_reg(padap, MPS_CLS_SRAM_L(idx)); + tcam->cls_hi = t4_read_reg(padap, MPS_CLS_SRAM_H(idx)); + + if (is_t5(padap->params.chip)) + tcam->repli = (tcam->cls_lo & REPLICATE_F); + else if (is_t6(padap->params.chip)) + tcam->repli = (tcam->cls_lo & T6_REPLICATE_F); + + if (tcam->repli) { + struct fw_ldst_cmd ldst_cmd; + struct fw_ldst_mps_rplc mps_rplc; + + memset(&ldst_cmd, 0, sizeof(ldst_cmd)); + ldst_cmd.op_to_addrspace = + htonl(FW_CMD_OP_V(FW_LDST_CMD) | + FW_CMD_REQUEST_F | FW_CMD_READ_F | + FW_LDST_CMD_ADDRSPACE_V(FW_LDST_ADDRSPC_MPS)); + ldst_cmd.cycles_to_len16 = htonl(FW_LEN16(ldst_cmd)); + ldst_cmd.u.mps.rplc.fid_idx = + htons(FW_LDST_CMD_FID_V(FW_LDST_MPS_RPLC) | + FW_LDST_CMD_IDX_V(idx)); + + rc = t4_wr_mbox(padap, padap->mbox, &ldst_cmd, sizeof(ldst_cmd), + &ldst_cmd); + if (rc) + cudbg_mps_rpl_backdoor(padap, &mps_rplc); + else + mps_rplc = ldst_cmd.u.mps.rplc; + + tcam->rplc[0] = ntohl(mps_rplc.rplc31_0); + tcam->rplc[1] = ntohl(mps_rplc.rplc63_32); + tcam->rplc[2] = ntohl(mps_rplc.rplc95_64); + tcam->rplc[3] = ntohl(mps_rplc.rplc127_96); + if (padap->params.arch.mps_rplc_size > CUDBG_MAX_RPLC_SIZE) { + tcam->rplc[4] = ntohl(mps_rplc.rplc159_128); + tcam->rplc[5] = ntohl(mps_rplc.rplc191_160); + tcam->rplc[6] = ntohl(mps_rplc.rplc223_192); + tcam->rplc[7] = ntohl(mps_rplc.rplc255_224); + } + } + cudbg_tcamxy2valmask(tcamx, tcamy, tcam->addr, &tcam->mask); + tcam->idx = idx; + tcam->rplc_size = padap->params.arch.mps_rplc_size; + return rc; +} + +int cudbg_collect_mps_tcam(struct cudbg_init *pdbg_init, + struct cudbg_buffer *dbg_buff, + struct cudbg_error *cudbg_err) +{ + struct adapter *padap = pdbg_init->adap; + struct cudbg_buffer temp_buff = { 0 }; + u32 size = 0, i, n, total_size = 0; + struct cudbg_mps_tcam *tcam; + int rc; + + n = padap->params.arch.mps_tcam_size; + size = sizeof(struct cudbg_mps_tcam) * n; + rc = cudbg_get_buff(dbg_buff, size, &temp_buff); + if (rc) + return rc; + + tcam = (struct cudbg_mps_tcam *)temp_buff.data; + for (i = 0; i < n; i++) { + rc = cudbg_collect_tcam_index(padap, tcam, i); + if (rc) { + cudbg_err->sys_err = rc; + cudbg_put_buff(&temp_buff, dbg_buff); + return rc; + } + total_size += sizeof(struct cudbg_mps_tcam); + tcam++; + } + + if (!total_size) { + rc = CUDBG_SYSTEM_ERROR; + cudbg_err->sys_err = rc; + cudbg_put_buff(&temp_buff, dbg_buff); + return rc; + } + cudbg_write_and_release_buff(&temp_buff, dbg_buff); + return rc; +} + int cudbg_collect_ma_indirect(struct cudbg_init *pdbg_init, struct cudbg_buffer *dbg_buff, struct cudbg_error *cudbg_err) diff --git a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.h b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.h index f3bfd7cf4186..280423f9869d 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.h @@ -111,6 +111,9 @@ int cudbg_collect_pm_indirect(struct cudbg_init *pdbg_init, int cudbg_collect_tid(struct cudbg_init *pdbg_init, struct cudbg_buffer *dbg_buff, struct cudbg_error *cudbg_err); +int cudbg_collect_mps_tcam(struct cudbg_init *pdbg_init, + struct cudbg_buffer *dbg_buff, + struct cudbg_error *cudbg_err); int cudbg_collect_ma_indirect(struct cudbg_init *pdbg_init, struct cudbg_buffer *dbg_buff, struct cudbg_error *cudbg_err); diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c index c5371e2ecf3c..f1a1ece45b97 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c @@ -56,6 +56,7 @@ static const struct cxgb4_collect_entity cxgb4_collect_hw_dump[] = { { CUDBG_PCIE_INDIRECT, cudbg_collect_pcie_indirect }, { CUDBG_PM_INDIRECT, cudbg_collect_pm_indirect }, { CUDBG_TID_INFO, cudbg_collect_tid }, + { CUDBG_MPS_TCAM, cudbg_collect_mps_tcam }, { CUDBG_MA_INDIRECT, cudbg_collect_ma_indirect }, { CUDBG_ULPTX_LA, cudbg_collect_ulptx_la }, { CUDBG_UP_CIM_INDIRECT, cudbg_collect_up_cim_indirect }, @@ -196,6 +197,10 @@ static u32 cxgb4_get_entity_length(struct adapter *adap, u32 entity) case CUDBG_TID_INFO: len = sizeof(struct cudbg_tid_info_region_rev1); break; + case CUDBG_MPS_TCAM: + len = sizeof(struct cudbg_mps_tcam) * + adap->params.arch.mps_tcam_size; + break; case CUDBG_MA_INDIRECT: if (CHELSIO_CHIP_VERSION(adap->params.chip) > CHELSIO_T5) { n = sizeof(t6_ma_ireg_array) / diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h index 57a36a048313..775a591c0fba 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h @@ -2439,6 +2439,18 @@ #define MPS_CLS_TCAM_DATA0_A 0xf000 #define MPS_CLS_TCAM_DATA1_A 0xf004 +#define CTLREQID_S 30 +#define CTLREQID_V(x) ((x) << CTLREQID_S) + +#define MPS_VF_RPLCT_MAP0_A 0x1111c +#define MPS_VF_RPLCT_MAP1_A 0x11120 +#define MPS_VF_RPLCT_MAP2_A 0x11124 +#define MPS_VF_RPLCT_MAP3_A 0x11128 +#define MPS_VF_RPLCT_MAP4_A 0x11300 +#define MPS_VF_RPLCT_MAP5_A 0x11304 +#define MPS_VF_RPLCT_MAP6_A 0x11308 +#define MPS_VF_RPLCT_MAP7_A 0x1130c + #define VIDL_S 16 #define VIDL_M 0xffffU #define VIDL_G(x) (((x) >> VIDL_S) & VIDL_M) @@ -2463,6 +2475,10 @@ #define DATAVIDH1_M 0x7fU #define DATAVIDH1_G(x) (((x) >> DATAVIDH1_S) & DATAVIDH1_M) +#define MPS_CLS_TCAM_RDATA0_REQ_ID1_A 0xf020 +#define MPS_CLS_TCAM_RDATA1_REQ_ID1_A 0xf024 +#define MPS_CLS_TCAM_RDATA2_REQ_ID1_A 0xf028 + #define USED_S 16 #define USED_M 0x7ffU #define USED_G(x) (((x) >> USED_S) & USED_M) -- cgit v1.2.3 From db8cd7ce208a7e7d440856b5c3e4e96af6dd9917 Mon Sep 17 00:00:00 2001 From: Rahul Lakkireddy Date: Thu, 26 Oct 2017 17:18:38 +0530 Subject: cxgb4: collect PBT tables dump Signed-off-by: Rahul Lakkireddy Signed-off-by: Ganesh Goudar Signed-off-by: David S. Miller --- drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h | 15 +++++ drivers/net/ethernet/chelsio/cxgb4/cudbg_if.h | 1 + drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c | 68 +++++++++++++++++++++++ drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.h | 3 + drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c | 4 ++ 5 files changed, 91 insertions(+) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h b/drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h index 1860cf2b5286..22853372abdf 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h @@ -145,6 +145,21 @@ struct cudbg_ulptx_la { u32 rd_data[CUDBG_NUM_ULPTX][CUDBG_NUM_ULPTX_READ]; }; +#define CUDBG_CHAC_PBT_ADDR 0x2800 +#define CUDBG_CHAC_PBT_LRF 0x3000 +#define CUDBG_CHAC_PBT_DATA 0x3800 +#define CUDBG_PBT_DYNAMIC_ENTRIES 8 +#define CUDBG_PBT_STATIC_ENTRIES 16 +#define CUDBG_LRF_ENTRIES 8 +#define CUDBG_PBT_DATA_ENTRIES 512 + +struct cudbg_pbt_tables { + u32 pbt_dynamic[CUDBG_PBT_DYNAMIC_ENTRIES]; + u32 pbt_static[CUDBG_PBT_STATIC_ENTRIES]; + u32 lrf_table[CUDBG_LRF_ENTRIES]; + u32 pbt_data[CUDBG_PBT_DATA_ENTRIES]; +}; + #define IREG_NUM_ELEM 4 static const u32 t6_tp_pio_array[][IREG_NUM_ELEM] = { diff --git a/drivers/net/ethernet/chelsio/cxgb4/cudbg_if.h b/drivers/net/ethernet/chelsio/cxgb4/cudbg_if.h index ce7bb909b8fd..f672799cc8d3 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cudbg_if.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cudbg_if.h @@ -62,6 +62,7 @@ enum cudbg_dbg_entity_type { CUDBG_MA_INDIRECT = 61, CUDBG_ULPTX_LA = 62, CUDBG_UP_CIM_INDIRECT = 64, + CUDBG_PBT_TABLE = 65, CUDBG_MBOX_LOG = 66, CUDBG_HMA_INDIRECT = 67, CUDBG_MAX_ENTITY = 70, diff --git a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c index 03f1ec0c0160..c4096967c434 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c @@ -1310,6 +1310,74 @@ int cudbg_collect_up_cim_indirect(struct cudbg_init *pdbg_init, return rc; } +int cudbg_collect_pbt_tables(struct cudbg_init *pdbg_init, + struct cudbg_buffer *dbg_buff, + struct cudbg_error *cudbg_err) +{ + struct adapter *padap = pdbg_init->adap; + struct cudbg_buffer temp_buff = { 0 }; + struct cudbg_pbt_tables *pbt; + int i, rc; + u32 addr; + + rc = cudbg_get_buff(dbg_buff, sizeof(struct cudbg_pbt_tables), + &temp_buff); + if (rc) + return rc; + + pbt = (struct cudbg_pbt_tables *)temp_buff.data; + /* PBT dynamic entries */ + addr = CUDBG_CHAC_PBT_ADDR; + for (i = 0; i < CUDBG_PBT_DYNAMIC_ENTRIES; i++) { + rc = t4_cim_read(padap, addr + (i * 4), 1, + &pbt->pbt_dynamic[i]); + if (rc) { + cudbg_err->sys_err = rc; + cudbg_put_buff(&temp_buff, dbg_buff); + return rc; + } + } + + /* PBT static entries */ + /* static entries start when bit 6 is set */ + addr = CUDBG_CHAC_PBT_ADDR + (1 << 6); + for (i = 0; i < CUDBG_PBT_STATIC_ENTRIES; i++) { + rc = t4_cim_read(padap, addr + (i * 4), 1, + &pbt->pbt_static[i]); + if (rc) { + cudbg_err->sys_err = rc; + cudbg_put_buff(&temp_buff, dbg_buff); + return rc; + } + } + + /* LRF entries */ + addr = CUDBG_CHAC_PBT_LRF; + for (i = 0; i < CUDBG_LRF_ENTRIES; i++) { + rc = t4_cim_read(padap, addr + (i * 4), 1, + &pbt->lrf_table[i]); + if (rc) { + cudbg_err->sys_err = rc; + cudbg_put_buff(&temp_buff, dbg_buff); + return rc; + } + } + + /* PBT data entries */ + addr = CUDBG_CHAC_PBT_DATA; + for (i = 0; i < CUDBG_PBT_DATA_ENTRIES; i++) { + rc = t4_cim_read(padap, addr + (i * 4), 1, + &pbt->pbt_data[i]); + if (rc) { + cudbg_err->sys_err = rc; + cudbg_put_buff(&temp_buff, dbg_buff); + return rc; + } + } + cudbg_write_and_release_buff(&temp_buff, dbg_buff); + return rc; +} + int cudbg_collect_mbox_log(struct cudbg_init *pdbg_init, struct cudbg_buffer *dbg_buff, struct cudbg_error *cudbg_err) diff --git a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.h b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.h index 280423f9869d..311b330bc3b2 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.h @@ -123,6 +123,9 @@ int cudbg_collect_ulptx_la(struct cudbg_init *pdbg_init, int cudbg_collect_up_cim_indirect(struct cudbg_init *pdbg_init, struct cudbg_buffer *dbg_buff, struct cudbg_error *cudbg_err); +int cudbg_collect_pbt_tables(struct cudbg_init *pdbg_init, + struct cudbg_buffer *dbg_buff, + struct cudbg_error *cudbg_err); int cudbg_collect_mbox_log(struct cudbg_init *pdbg_init, struct cudbg_buffer *dbg_buff, struct cudbg_error *cudbg_err); diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c index f1a1ece45b97..003deb345ff2 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c @@ -60,6 +60,7 @@ static const struct cxgb4_collect_entity cxgb4_collect_hw_dump[] = { { CUDBG_MA_INDIRECT, cudbg_collect_ma_indirect }, { CUDBG_ULPTX_LA, cudbg_collect_ulptx_la }, { CUDBG_UP_CIM_INDIRECT, cudbg_collect_up_cim_indirect }, + { CUDBG_PBT_TABLE, cudbg_collect_pbt_tables }, { CUDBG_HMA_INDIRECT, cudbg_collect_hma_indirect }, }; @@ -215,6 +216,9 @@ static u32 cxgb4_get_entity_length(struct adapter *adap, u32 entity) n = sizeof(t5_up_cim_reg_array) / (IREG_NUM_ELEM * sizeof(u32)); len = sizeof(struct ireg_buf) * n; break; + case CUDBG_PBT_TABLE: + len = sizeof(struct cudbg_pbt_tables); + break; case CUDBG_MBOX_LOG: len = sizeof(struct cudbg_mbox_log) * adap->mbox_log->size; break; -- cgit v1.2.3 From 08c4901bfe0b3beb12e7a5d7749e3522d7b1471e Mon Sep 17 00:00:00 2001 From: Rahul Lakkireddy Date: Thu, 26 Oct 2017 17:18:39 +0530 Subject: cxgb4: collect hardware scheduler dumps Collect hardware TX traffic scheduler and pace tables. Signed-off-by: Rahul Lakkireddy Signed-off-by: Ganesh Goudar Signed-off-by: David S. Miller --- drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h | 8 ++++ drivers/net/ethernet/chelsio/cxgb4/cudbg_if.h | 2 + drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c | 25 ++++++++++ drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.h | 3 ++ drivers/net/ethernet/chelsio/cxgb4/cxgb4.h | 9 ++++ drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c | 4 ++ drivers/net/ethernet/chelsio/cxgb4/t4_hw.c | 57 +++++++++++++++++++++++ drivers/net/ethernet/chelsio/cxgb4/t4_hw.h | 1 + drivers/net/ethernet/chelsio/cxgb4/t4_regs.h | 10 ++++ 9 files changed, 119 insertions(+) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h b/drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h index 22853372abdf..9757609a86b9 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h @@ -49,6 +49,14 @@ struct cudbg_rss_vf_conf { u32 rss_vf_vfh; }; +struct cudbg_hw_sched { + u32 kbps[NTX_SCHED]; + u32 ipg[NTX_SCHED]; + u32 pace_tab[NTX_SCHED]; + u32 mode; + u32 map; +}; + struct ireg_field { u32 ireg_addr; u32 ireg_data; diff --git a/drivers/net/ethernet/chelsio/cxgb4/cudbg_if.h b/drivers/net/ethernet/chelsio/cxgb4/cudbg_if.h index f672799cc8d3..e5c44b96d0a7 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cudbg_if.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cudbg_if.h @@ -22,6 +22,7 @@ #define CUDBG_STATUS_NO_MEM -19 #define CUDBG_STATUS_ENTITY_NOT_FOUND -24 #define CUDBG_SYSTEM_ERROR -29 +#define CUDBG_STATUS_CCLK_NOT_DEFINED -32 #define CUDBG_MAJOR_VERSION 1 #define CUDBG_MINOR_VERSION 14 @@ -48,6 +49,7 @@ enum cudbg_dbg_entity_type { CUDBG_EDC1 = 19, CUDBG_RSS = 22, CUDBG_RSS_VF_CONF = 25, + CUDBG_HW_SCHED = 31, CUDBG_TP_INDIRECT = 36, CUDBG_SGE_INDIRECT = 37, CUDBG_ULPRX_LA = 41, diff --git a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c index c4096967c434..0e01a2916ab8 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c @@ -574,6 +574,31 @@ int cudbg_collect_rss_vf_config(struct cudbg_init *pdbg_init, return rc; } +int cudbg_collect_hw_sched(struct cudbg_init *pdbg_init, + struct cudbg_buffer *dbg_buff, + struct cudbg_error *cudbg_err) +{ + struct adapter *padap = pdbg_init->adap; + struct cudbg_buffer temp_buff = { 0 }; + struct cudbg_hw_sched *hw_sched_buff; + int i, rc = 0; + + if (!padap->params.vpd.cclk) + return CUDBG_STATUS_CCLK_NOT_DEFINED; + + rc = cudbg_get_buff(dbg_buff, sizeof(struct cudbg_hw_sched), + &temp_buff); + hw_sched_buff = (struct cudbg_hw_sched *)temp_buff.data; + hw_sched_buff->map = t4_read_reg(padap, TP_TX_MOD_QUEUE_REQ_MAP_A); + hw_sched_buff->mode = TIMERMODE_G(t4_read_reg(padap, TP_MOD_CONFIG_A)); + t4_read_pace_tbl(padap, hw_sched_buff->pace_tab); + for (i = 0; i < NTX_SCHED; ++i) + t4_get_tx_sched(padap, i, &hw_sched_buff->kbps[i], + &hw_sched_buff->ipg[i], true); + cudbg_write_and_release_buff(&temp_buff, dbg_buff); + return rc; +} + int cudbg_collect_tp_indirect(struct cudbg_init *pdbg_init, struct cudbg_buffer *dbg_buff, struct cudbg_error *cudbg_err) diff --git a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.h b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.h index 311b330bc3b2..3f62c1900fe3 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.h @@ -84,6 +84,9 @@ int cudbg_collect_rss_vf_config(struct cudbg_init *pdbg_init, int cudbg_collect_tp_indirect(struct cudbg_init *pdbg_init, struct cudbg_buffer *dbg_buff, struct cudbg_error *cudbg_err); +int cudbg_collect_hw_sched(struct cudbg_init *pdbg_init, + struct cudbg_buffer *dbg_buff, + struct cudbg_error *cudbg_err); int cudbg_collect_sge_indirect(struct cudbg_init *pdbg_init, struct cudbg_buffer *dbg_buff, struct cudbg_error *cudbg_err); diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h index 92a0b022687e..a57761b28edc 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h @@ -1335,6 +1335,12 @@ static inline unsigned int core_ticks_to_us(const struct adapter *adapter, adapter->params.vpd.cclk); } +static inline unsigned int dack_ticks_to_usec(const struct adapter *adap, + unsigned int ticks) +{ + return (ticks << adap->params.tp.dack_re) / core_ticks_per_usec(adap); +} + void t4_set_reg_field(struct adapter *adap, unsigned int addr, u32 mask, u32 val); @@ -1636,6 +1642,9 @@ void t4_get_trace_filter(struct adapter *adapter, struct trace_params *tp, int filter_index, int *enabled); int t4_fwaddrspace_write(struct adapter *adap, unsigned int mbox, u32 addr, u32 val); +void t4_read_pace_tbl(struct adapter *adap, unsigned int pace_vals[NTX_SCHED]); +void t4_get_tx_sched(struct adapter *adap, unsigned int sched, + unsigned int *kbps, unsigned int *ipg, bool sleep_ok); int t4_sched_params(struct adapter *adapter, int type, int level, int mode, int rateunit, int ratemode, int channel, int class, int minrate, int maxrate, int weight, int pktsize); diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c index 003deb345ff2..35575e4d020c 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c @@ -46,6 +46,7 @@ static const struct cxgb4_collect_entity cxgb4_collect_hw_dump[] = { { CUDBG_CIM_OBQ_NCSI, cudbg_collect_cim_obq_ncsi }, { CUDBG_RSS, cudbg_collect_rss }, { CUDBG_RSS_VF_CONF, cudbg_collect_rss_vf_config }, + { CUDBG_HW_SCHED, cudbg_collect_hw_sched }, { CUDBG_TP_INDIRECT, cudbg_collect_tp_indirect }, { CUDBG_SGE_INDIRECT, cudbg_collect_sge_indirect }, { CUDBG_ULPRX_LA, cudbg_collect_ulprx_la }, @@ -156,6 +157,9 @@ static u32 cxgb4_get_entity_length(struct adapter *adap, u32 entity) len = adap->params.arch.vfcount * sizeof(struct cudbg_rss_vf_conf); break; + case CUDBG_HW_SCHED: + len = sizeof(struct cudbg_hw_sched); + break; case CUDBG_TP_INDIRECT: switch (CHELSIO_CHIP_VERSION(adap->params.chip)) { case CHELSIO_T5: diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c index 006414758f65..c289ca1efc1b 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c @@ -9547,6 +9547,63 @@ int t4_set_vf_mac_acl(struct adapter *adapter, unsigned int vf, return t4_wr_mbox(adapter, adapter->mbox, &cmd, sizeof(cmd), &cmd); } +/** + * t4_read_pace_tbl - read the pace table + * @adap: the adapter + * @pace_vals: holds the returned values + * + * Returns the values of TP's pace table in microseconds. + */ +void t4_read_pace_tbl(struct adapter *adap, unsigned int pace_vals[NTX_SCHED]) +{ + unsigned int i, v; + + for (i = 0; i < NTX_SCHED; i++) { + t4_write_reg(adap, TP_PACE_TABLE_A, 0xffff0000 + i); + v = t4_read_reg(adap, TP_PACE_TABLE_A); + pace_vals[i] = dack_ticks_to_usec(adap, v); + } +} + +/** + * t4_get_tx_sched - get the configuration of a Tx HW traffic scheduler + * @adap: the adapter + * @sched: the scheduler index + * @kbps: the byte rate in Kbps + * @ipg: the interpacket delay in tenths of nanoseconds + * @sleep_ok: if true we may sleep while awaiting command completion + * + * Return the current configuration of a HW Tx scheduler. + */ +void t4_get_tx_sched(struct adapter *adap, unsigned int sched, + unsigned int *kbps, unsigned int *ipg, bool sleep_ok) +{ + unsigned int v, addr, bpt, cpt; + + if (kbps) { + addr = TP_TX_MOD_Q1_Q0_RATE_LIMIT_A - sched / 2; + t4_tp_tm_pio_read(adap, &v, 1, addr, sleep_ok); + if (sched & 1) + v >>= 16; + bpt = (v >> 8) & 0xff; + cpt = v & 0xff; + if (!cpt) { + *kbps = 0; /* scheduler disabled */ + } else { + v = (adap->params.vpd.cclk * 1000) / cpt; /* ticks/s */ + *kbps = (v * bpt) / 125; + } + } + if (ipg) { + addr = TP_TX_MOD_Q1_Q0_TIMER_SEPARATOR_A - sched / 2; + t4_tp_tm_pio_read(adap, &v, 1, addr, sleep_ok); + if (sched & 1) + v >>= 16; + v &= 0xffff; + *ipg = (10000 * v) / core_ticks_per_usec(adap); + } +} + int t4_sched_params(struct adapter *adapter, int type, int level, int mode, int rateunit, int ratemode, int channel, int class, int minrate, int maxrate, int weight, int pktsize) diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h index 7f59ca458431..7c6af14905c2 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h @@ -47,6 +47,7 @@ enum { TCB_SIZE = 128, /* TCB size */ NMTUS = 16, /* size of MTU table */ NCCTRL_WIN = 32, /* # of congestion control windows */ + NTX_SCHED = 8, /* # of HW Tx scheduling queues */ PM_NSTATS = 5, /* # of PM stats */ T6_PM_NSTATS = 7, /* # of PM stats in T6 */ MBOX_LEN = 64, /* mailbox size in bytes */ diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h index 775a591c0fba..483fb7644355 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h @@ -1415,6 +1415,7 @@ #define ROWINDEX_V(x) ((x) << ROWINDEX_S) #define TP_CCTRL_TABLE_A 0x7ddc +#define TP_PACE_TABLE_A 0x7dd8 #define TP_MTU_TABLE_A 0x7de4 #define MTUINDEX_S 24 @@ -1449,6 +1450,15 @@ #define TP_TM_PIO_ADDR_A 0x7e18 #define TP_TM_PIO_DATA_A 0x7e1c +#define TP_MOD_CONFIG_A 0x7e24 + +#define TIMERMODE_S 8 +#define TIMERMODE_M 0xffU +#define TIMERMODE_G(x) (((x) >> TIMERMODE_S) & TIMERMODE_M) + +#define TP_TX_MOD_Q1_Q0_TIMER_SEPARATOR_A 0x3 +#define TP_TX_MOD_Q1_Q0_RATE_LIMIT_A 0x8 + #define TP_PIO_ADDR_A 0x7e40 #define TP_PIO_DATA_A 0x7e44 #define TP_MIB_INDEX_A 0x7e50 -- cgit v1.2.3 From 6f92a6544f1a4ed2d495a937283f01ee7d590fec Mon Sep 17 00:00:00 2001 From: Rahul Lakkireddy Date: Thu, 26 Oct 2017 17:18:40 +0530 Subject: cxgb4: collect hardware misc dumps Collect path mtu, PM stats, TP clock info, congestion control, and VPD data dumps. Signed-off-by: Rahul Lakkireddy Signed-off-by: Ganesh Goudar Signed-off-by: David S. Miller --- drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h | 36 ++++++ drivers/net/ethernet/chelsio/cxgb4/cudbg_if.h | 5 + drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c | 135 ++++++++++++++++++++++ drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.h | 15 +++ drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c | 20 ++++ 5 files changed, 211 insertions(+) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h b/drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h index 9757609a86b9..239c43084e77 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h @@ -49,6 +49,13 @@ struct cudbg_rss_vf_conf { u32 rss_vf_vfh; }; +struct cudbg_pm_stats { + u32 tx_cnt[T6_PM_NSTATS]; + u32 rx_cnt[T6_PM_NSTATS]; + u64 tx_cyc[T6_PM_NSTATS]; + u64 rx_cyc[T6_PM_NSTATS]; +}; + struct cudbg_hw_sched { u32 kbps[NTX_SCHED]; u32 ipg[NTX_SCHED]; @@ -85,6 +92,22 @@ struct cudbg_cim_pif_la { u8 data[0]; }; +struct cudbg_clk_info { + u64 retransmit_min; + u64 retransmit_max; + u64 persist_timer_min; + u64 persist_timer_max; + u64 keepalive_idle_timer; + u64 keepalive_interval; + u64 initial_srtt; + u64 finwait2_timer; + u32 dack_timer; + u32 res; + u32 cclk_ps; + u32 tre; + u32 dack_re; +}; + struct cudbg_tid_info_region { u32 ntids; u32 nstids; @@ -143,6 +166,19 @@ struct cudbg_mps_tcam { u8 reserved[2]; }; +struct cudbg_vpd_data { + u8 sn[SERNUM_LEN + 1]; + u8 bn[PN_LEN + 1]; + u8 na[MACADDR_LEN + 1]; + u8 mn[ID_LEN + 1]; + u16 fw_major; + u16 fw_minor; + u16 fw_micro; + u16 fw_build; + u32 scfg_vers; + u32 vpd_vers; +}; + #define CUDBG_NUM_ULPTX 11 #define CUDBG_NUM_ULPTX_READ 512 diff --git a/drivers/net/ethernet/chelsio/cxgb4/cudbg_if.h b/drivers/net/ethernet/chelsio/cxgb4/cudbg_if.h index e5c44b96d0a7..e484c514e9ae 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cudbg_if.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cudbg_if.h @@ -49,18 +49,23 @@ enum cudbg_dbg_entity_type { CUDBG_EDC1 = 19, CUDBG_RSS = 22, CUDBG_RSS_VF_CONF = 25, + CUDBG_PATH_MTU = 27, + CUDBG_PM_STATS = 30, CUDBG_HW_SCHED = 31, CUDBG_TP_INDIRECT = 36, CUDBG_SGE_INDIRECT = 37, CUDBG_ULPRX_LA = 41, CUDBG_TP_LA = 43, CUDBG_CIM_PIF_LA = 45, + CUDBG_CLK = 46, CUDBG_CIM_OBQ_RXQ0 = 47, CUDBG_CIM_OBQ_RXQ1 = 48, CUDBG_PCIE_INDIRECT = 50, CUDBG_PM_INDIRECT = 51, CUDBG_TID_INFO = 54, CUDBG_MPS_TCAM = 57, + CUDBG_VPD_DATA = 58, + CUDBG_CCTRL = 60, CUDBG_MA_INDIRECT = 61, CUDBG_ULPTX_LA = 62, CUDBG_UP_CIM_INDIRECT = 64, diff --git a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c index 0e01a2916ab8..fe3a9ef0ec3f 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c @@ -574,6 +574,44 @@ int cudbg_collect_rss_vf_config(struct cudbg_init *pdbg_init, return rc; } +int cudbg_collect_path_mtu(struct cudbg_init *pdbg_init, + struct cudbg_buffer *dbg_buff, + struct cudbg_error *cudbg_err) +{ + struct adapter *padap = pdbg_init->adap; + struct cudbg_buffer temp_buff = { 0 }; + int rc; + + rc = cudbg_get_buff(dbg_buff, NMTUS * sizeof(u16), &temp_buff); + if (rc) + return rc; + + t4_read_mtu_tbl(padap, (u16 *)temp_buff.data, NULL); + cudbg_write_and_release_buff(&temp_buff, dbg_buff); + return rc; +} + +int cudbg_collect_pm_stats(struct cudbg_init *pdbg_init, + struct cudbg_buffer *dbg_buff, + struct cudbg_error *cudbg_err) +{ + struct adapter *padap = pdbg_init->adap; + struct cudbg_buffer temp_buff = { 0 }; + struct cudbg_pm_stats *pm_stats_buff; + int rc; + + rc = cudbg_get_buff(dbg_buff, sizeof(struct cudbg_pm_stats), + &temp_buff); + if (rc) + return rc; + + pm_stats_buff = (struct cudbg_pm_stats *)temp_buff.data; + t4_pmtx_get_stats(padap, pm_stats_buff->tx_cnt, pm_stats_buff->tx_cyc); + t4_pmrx_get_stats(padap, pm_stats_buff->rx_cnt, pm_stats_buff->rx_cyc); + cudbg_write_and_release_buff(&temp_buff, dbg_buff); + return rc; +} + int cudbg_collect_hw_sched(struct cudbg_init *pdbg_init, struct cudbg_buffer *dbg_buff, struct cudbg_error *cudbg_err) @@ -813,6 +851,55 @@ int cudbg_collect_cim_pif_la(struct cudbg_init *pdbg_init, return rc; } +int cudbg_collect_clk_info(struct cudbg_init *pdbg_init, + struct cudbg_buffer *dbg_buff, + struct cudbg_error *cudbg_err) +{ + struct adapter *padap = pdbg_init->adap; + struct cudbg_buffer temp_buff = { 0 }; + struct cudbg_clk_info *clk_info_buff; + u64 tp_tick_us; + int rc; + + if (!padap->params.vpd.cclk) + return CUDBG_STATUS_CCLK_NOT_DEFINED; + + rc = cudbg_get_buff(dbg_buff, sizeof(struct cudbg_clk_info), + &temp_buff); + if (rc) + return rc; + + clk_info_buff = (struct cudbg_clk_info *)temp_buff.data; + clk_info_buff->cclk_ps = 1000000000 / padap->params.vpd.cclk; /* psec */ + clk_info_buff->res = t4_read_reg(padap, TP_TIMER_RESOLUTION_A); + clk_info_buff->tre = TIMERRESOLUTION_G(clk_info_buff->res); + clk_info_buff->dack_re = DELAYEDACKRESOLUTION_G(clk_info_buff->res); + tp_tick_us = (clk_info_buff->cclk_ps << clk_info_buff->tre) / 1000000; + + clk_info_buff->dack_timer = + (clk_info_buff->cclk_ps << clk_info_buff->dack_re) / 1000000 * + t4_read_reg(padap, TP_DACK_TIMER_A); + clk_info_buff->retransmit_min = + tp_tick_us * t4_read_reg(padap, TP_RXT_MIN_A); + clk_info_buff->retransmit_max = + tp_tick_us * t4_read_reg(padap, TP_RXT_MAX_A); + clk_info_buff->persist_timer_min = + tp_tick_us * t4_read_reg(padap, TP_PERS_MIN_A); + clk_info_buff->persist_timer_max = + tp_tick_us * t4_read_reg(padap, TP_PERS_MAX_A); + clk_info_buff->keepalive_idle_timer = + tp_tick_us * t4_read_reg(padap, TP_KEEP_IDLE_A); + clk_info_buff->keepalive_interval = + tp_tick_us * t4_read_reg(padap, TP_KEEP_INTVL_A); + clk_info_buff->initial_srtt = + tp_tick_us * INITSRTT_G(t4_read_reg(padap, TP_INIT_SRTT_A)); + clk_info_buff->finwait2_timer = + tp_tick_us * t4_read_reg(padap, TP_FINWAIT2_TIMER_A); + + cudbg_write_and_release_buff(&temp_buff, dbg_buff); + return rc; +} + int cudbg_collect_pcie_indirect(struct cudbg_init *pdbg_init, struct cudbg_buffer *dbg_buff, struct cudbg_error *cudbg_err) @@ -1196,6 +1283,54 @@ int cudbg_collect_mps_tcam(struct cudbg_init *pdbg_init, return rc; } +int cudbg_collect_vpd_data(struct cudbg_init *pdbg_init, + struct cudbg_buffer *dbg_buff, + struct cudbg_error *cudbg_err) +{ + struct adapter *padap = pdbg_init->adap; + struct cudbg_buffer temp_buff = { 0 }; + struct cudbg_vpd_data *vpd_data; + int rc; + + rc = cudbg_get_buff(dbg_buff, sizeof(struct cudbg_vpd_data), + &temp_buff); + if (rc) + return rc; + + vpd_data = (struct cudbg_vpd_data *)temp_buff.data; + memcpy(vpd_data->sn, padap->params.vpd.sn, SERNUM_LEN + 1); + memcpy(vpd_data->bn, padap->params.vpd.pn, PN_LEN + 1); + memcpy(vpd_data->na, padap->params.vpd.na, MACADDR_LEN + 1); + memcpy(vpd_data->mn, padap->params.vpd.id, ID_LEN + 1); + vpd_data->scfg_vers = padap->params.scfg_vers; + vpd_data->vpd_vers = padap->params.vpd_vers; + vpd_data->fw_major = FW_HDR_FW_VER_MAJOR_G(padap->params.fw_vers); + vpd_data->fw_minor = FW_HDR_FW_VER_MINOR_G(padap->params.fw_vers); + vpd_data->fw_micro = FW_HDR_FW_VER_MICRO_G(padap->params.fw_vers); + vpd_data->fw_build = FW_HDR_FW_VER_BUILD_G(padap->params.fw_vers); + cudbg_write_and_release_buff(&temp_buff, dbg_buff); + return rc; +} + +int cudbg_collect_cctrl(struct cudbg_init *pdbg_init, + struct cudbg_buffer *dbg_buff, + struct cudbg_error *cudbg_err) +{ + struct adapter *padap = pdbg_init->adap; + struct cudbg_buffer temp_buff = { 0 }; + u32 size; + int rc; + + size = sizeof(u16) * NMTUS * NCCTRL_WIN; + rc = cudbg_get_buff(dbg_buff, size, &temp_buff); + if (rc) + return rc; + + t4_read_cong_tbl(padap, (void *)temp_buff.data); + cudbg_write_and_release_buff(&temp_buff, dbg_buff); + return rc; +} + int cudbg_collect_ma_indirect(struct cudbg_init *pdbg_init, struct cudbg_buffer *dbg_buff, struct cudbg_error *cudbg_err) diff --git a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.h b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.h index 3f62c1900fe3..230ba88a6a81 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.h @@ -84,6 +84,12 @@ int cudbg_collect_rss_vf_config(struct cudbg_init *pdbg_init, int cudbg_collect_tp_indirect(struct cudbg_init *pdbg_init, struct cudbg_buffer *dbg_buff, struct cudbg_error *cudbg_err); +int cudbg_collect_path_mtu(struct cudbg_init *pdbg_init, + struct cudbg_buffer *dbg_buff, + struct cudbg_error *cudbg_err); +int cudbg_collect_pm_stats(struct cudbg_init *pdbg_init, + struct cudbg_buffer *dbg_buff, + struct cudbg_error *cudbg_err); int cudbg_collect_hw_sched(struct cudbg_init *pdbg_init, struct cudbg_buffer *dbg_buff, struct cudbg_error *cudbg_err); @@ -99,6 +105,9 @@ int cudbg_collect_tp_la(struct cudbg_init *pdbg_init, int cudbg_collect_cim_pif_la(struct cudbg_init *pdbg_init, struct cudbg_buffer *dbg_buff, struct cudbg_error *cudbg_err); +int cudbg_collect_clk_info(struct cudbg_init *pdbg_init, + struct cudbg_buffer *dbg_buff, + struct cudbg_error *cudbg_err); int cudbg_collect_obq_sge_rx_q0(struct cudbg_init *pdbg_init, struct cudbg_buffer *dbg_buff, struct cudbg_error *cudbg_err); @@ -117,6 +126,12 @@ int cudbg_collect_tid(struct cudbg_init *pdbg_init, int cudbg_collect_mps_tcam(struct cudbg_init *pdbg_init, struct cudbg_buffer *dbg_buff, struct cudbg_error *cudbg_err); +int cudbg_collect_vpd_data(struct cudbg_init *pdbg_init, + struct cudbg_buffer *dbg_buff, + struct cudbg_error *cudbg_err); +int cudbg_collect_cctrl(struct cudbg_init *pdbg_init, + struct cudbg_buffer *dbg_buff, + struct cudbg_error *cudbg_err); int cudbg_collect_ma_indirect(struct cudbg_init *pdbg_init, struct cudbg_buffer *dbg_buff, struct cudbg_error *cudbg_err); diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c index 35575e4d020c..7373617da635 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c @@ -46,18 +46,23 @@ static const struct cxgb4_collect_entity cxgb4_collect_hw_dump[] = { { CUDBG_CIM_OBQ_NCSI, cudbg_collect_cim_obq_ncsi }, { CUDBG_RSS, cudbg_collect_rss }, { CUDBG_RSS_VF_CONF, cudbg_collect_rss_vf_config }, + { CUDBG_PATH_MTU, cudbg_collect_path_mtu }, + { CUDBG_PM_STATS, cudbg_collect_pm_stats }, { CUDBG_HW_SCHED, cudbg_collect_hw_sched }, { CUDBG_TP_INDIRECT, cudbg_collect_tp_indirect }, { CUDBG_SGE_INDIRECT, cudbg_collect_sge_indirect }, { CUDBG_ULPRX_LA, cudbg_collect_ulprx_la }, { CUDBG_TP_LA, cudbg_collect_tp_la }, { CUDBG_CIM_PIF_LA, cudbg_collect_cim_pif_la }, + { CUDBG_CLK, cudbg_collect_clk_info }, { CUDBG_CIM_OBQ_RXQ0, cudbg_collect_obq_sge_rx_q0 }, { CUDBG_CIM_OBQ_RXQ1, cudbg_collect_obq_sge_rx_q1 }, { CUDBG_PCIE_INDIRECT, cudbg_collect_pcie_indirect }, { CUDBG_PM_INDIRECT, cudbg_collect_pm_indirect }, { CUDBG_TID_INFO, cudbg_collect_tid }, { CUDBG_MPS_TCAM, cudbg_collect_mps_tcam }, + { CUDBG_VPD_DATA, cudbg_collect_vpd_data }, + { CUDBG_CCTRL, cudbg_collect_cctrl }, { CUDBG_MA_INDIRECT, cudbg_collect_ma_indirect }, { CUDBG_ULPTX_LA, cudbg_collect_ulptx_la }, { CUDBG_UP_CIM_INDIRECT, cudbg_collect_up_cim_indirect }, @@ -157,6 +162,12 @@ static u32 cxgb4_get_entity_length(struct adapter *adap, u32 entity) len = adap->params.arch.vfcount * sizeof(struct cudbg_rss_vf_conf); break; + case CUDBG_PATH_MTU: + len = NMTUS * sizeof(u16); + break; + case CUDBG_PM_STATS: + len = sizeof(struct cudbg_pm_stats); + break; case CUDBG_HW_SCHED: len = sizeof(struct cudbg_hw_sched); break; @@ -191,6 +202,9 @@ static u32 cxgb4_get_entity_length(struct adapter *adap, u32 entity) len = sizeof(struct cudbg_cim_pif_la); len += 2 * CIM_PIFLA_SIZE * 6 * sizeof(u32); break; + case CUDBG_CLK: + len = sizeof(struct cudbg_clk_info); + break; case CUDBG_PCIE_INDIRECT: n = sizeof(t5_pcie_pdbg_array) / (IREG_NUM_ELEM * sizeof(u32)); len = sizeof(struct ireg_buf) * n * 2; @@ -206,6 +220,12 @@ static u32 cxgb4_get_entity_length(struct adapter *adap, u32 entity) len = sizeof(struct cudbg_mps_tcam) * adap->params.arch.mps_tcam_size; break; + case CUDBG_VPD_DATA: + len = sizeof(struct cudbg_vpd_data); + break; + case CUDBG_CCTRL: + len = sizeof(u16) * NMTUS * NCCTRL_WIN; + break; case CUDBG_MA_INDIRECT: if (CHELSIO_CHIP_VERSION(adap->params.chip) > CHELSIO_T5) { n = sizeof(t6_ma_ireg_array) / -- cgit v1.2.3 From 4fa112f6b5fb055478e76f2b446673565d2ea35f Mon Sep 17 00:00:00 2001 From: "Gustavo A. R. Silva" Date: Thu, 26 Oct 2017 07:16:01 -0500 Subject: net: bcmgenet: Use BUG_ON instead of if condition followed by BUG Use BUG_ON instead of if condition followed by BUG. Something to notice in this particular case is that unlikely() is already being called inside BUG_ON macro. This issue was detected with the help of Coccinelle. Signed-off-by: Gustavo A. R. Silva Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/genet/bcmgenet.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c index 9713374ebf14..24b4f4ceceef 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c @@ -1580,8 +1580,7 @@ static netdev_tx_t bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev) for (i = 0; i <= nr_frags; i++) { tx_cb_ptr = bcmgenet_get_txcb(priv, ring); - if (unlikely(!tx_cb_ptr)) - BUG(); + BUG_ON(!tx_cb_ptr); if (!i) { /* Transmit single SKB or head of fragment list */ -- cgit v1.2.3 From 5bca178eed601cd4584c38c5290f7abbcacf3fb3 Mon Sep 17 00:00:00 2001 From: "Gustavo A. R. Silva" Date: Thu, 26 Oct 2017 07:27:45 -0500 Subject: net: faraday: ftmac100: Use BUG_ON instead of if condition followed by BUG. Notice that in this particular case unlikely() is already being called inside BUG_ON macro. This issue was detected with the help of Coccinelle. Signed-off-by: Gustavo A. R. Silva Signed-off-by: David S. Miller --- drivers/net/ethernet/faraday/ftmac100.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/faraday/ftmac100.c b/drivers/net/ethernet/faraday/ftmac100.c index 66928a922824..aecc76504b69 100644 --- a/drivers/net/ethernet/faraday/ftmac100.c +++ b/drivers/net/ethernet/faraday/ftmac100.c @@ -402,6 +402,7 @@ static bool ftmac100_rx_packet(struct ftmac100 *priv, int *processed) struct page *page; dma_addr_t map; int length; + bool ret; rxdes = ftmac100_rx_locate_first_segment(priv); if (!rxdes) @@ -416,8 +417,8 @@ static bool ftmac100_rx_packet(struct ftmac100 *priv, int *processed) * It is impossible to get multi-segment packets * because we always provide big enough receive buffers. */ - if (unlikely(!ftmac100_rxdes_last_segment(rxdes))) - BUG(); + ret = ftmac100_rxdes_last_segment(rxdes); + BUG_ON(!ret); /* start processing */ skb = netdev_alloc_skb_ip_align(netdev, 128); -- cgit v1.2.3 From 57922b0a2f7ef9effbcdbbf7d1f8dad95aa567f7 Mon Sep 17 00:00:00 2001 From: Michael Chan Date: Thu, 26 Oct 2017 11:51:19 -0400 Subject: bnxt_en: Update firmware interface to 1.8.3.1 Vxlan encap/decap filters are added to this firmware spec. Signed-off-by: Michael Chan Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h | 420 ++++++++++++++++++++++++-- 1 file changed, 396 insertions(+), 24 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h index cb04cc76e8ad..c99f4d0880e4 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h @@ -11,21 +11,21 @@ #ifndef BNXT_HSI_H #define BNXT_HSI_H -/* HSI and HWRM Specification 1.8.1 */ +/* HSI and HWRM Specification 1.8.3 */ #define HWRM_VERSION_MAJOR 1 #define HWRM_VERSION_MINOR 8 -#define HWRM_VERSION_UPDATE 1 +#define HWRM_VERSION_UPDATE 3 -#define HWRM_VERSION_RSVD 4 /* non-zero means beta version */ +#define HWRM_VERSION_RSVD 1 /* non-zero means beta version */ -#define HWRM_VERSION_STR "1.8.1.4" +#define HWRM_VERSION_STR "1.8.3.1" /* * Following is the signature for HWRM message field that indicates not * applicable (All F's). Need to cast it the size of the field if needed. */ #define HWRM_NA_SIGNATURE ((__le32)(-1)) #define HWRM_MAX_REQ_LEN (128) /* hwrm_func_buf_rgtr */ -#define HWRM_MAX_RESP_LEN (248) /* hwrm_selftest_qlist */ +#define HWRM_MAX_RESP_LEN (280) /* hwrm_selftest_qlist */ #define HW_HASH_INDEX_SIZE 0x80 /* 7 bit indirection table index. */ #define HW_HASH_KEY_SIZE 40 #define HWRM_RESP_VALID_KEY 1 /* valid key for HWRM response */ @@ -111,6 +111,7 @@ struct hwrm_async_event_cmpl { #define ASYNC_EVENT_CMPL_EVENT_ID_VF_MAC_ADDR_CHANGE 0x31UL #define ASYNC_EVENT_CMPL_EVENT_ID_PF_VF_COMM_STATUS_CHANGE 0x32UL #define ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE 0x33UL + #define ASYNC_EVENT_CMPL_EVENT_ID_LLFC_PFC_CHANGE 0x34UL #define ASYNC_EVENT_CMPL_EVENT_ID_HWRM_ERROR 0xffUL __le32 event_data2; u8 opaque_v; @@ -835,8 +836,7 @@ struct hwrm_func_qcfg_output { u8 port_pf_cnt; #define FUNC_QCFG_RESP_PORT_PF_CNT_UNAVAIL 0x0UL __le16 dflt_vnic_id; - u8 unused_0; - u8 unused_1; + __le16 max_mtu_configured; __le32 min_bw; #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_MASK 0xfffffffUL #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_SFT 0 @@ -873,12 +873,12 @@ struct hwrm_func_qcfg_output { #define FUNC_QCFG_RESP_EVB_MODE_NO_EVB 0x0UL #define FUNC_QCFG_RESP_EVB_MODE_VEB 0x1UL #define FUNC_QCFG_RESP_EVB_MODE_VEPA 0x2UL - u8 unused_2; + u8 unused_0; __le16 alloc_vfs; __le32 alloc_mcast_filters; __le32 alloc_hw_ring_grps; __le16 alloc_sp_tx_rings; - u8 unused_3; + u8 unused_1; u8 valid; }; @@ -3407,6 +3407,7 @@ struct hwrm_vnic_cfg_input { #define VNIC_CFG_REQ_FLAGS_ROCE_DUAL_VNIC_MODE 0x8UL #define VNIC_CFG_REQ_FLAGS_ROCE_ONLY_VNIC_MODE 0x10UL #define VNIC_CFG_REQ_FLAGS_RSS_DFLT_CR_MODE 0x20UL + #define VNIC_CFG_REQ_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_MODE 0x40UL __le32 enables; #define VNIC_CFG_REQ_ENABLES_DFLT_RING_GRP 0x1UL #define VNIC_CFG_REQ_ENABLES_RSS_RULE 0x2UL @@ -3463,6 +3464,7 @@ struct hwrm_vnic_qcaps_output { #define VNIC_QCAPS_RESP_FLAGS_ROCE_DUAL_VNIC_CAP 0x8UL #define VNIC_QCAPS_RESP_FLAGS_ROCE_ONLY_VNIC_CAP 0x10UL #define VNIC_QCAPS_RESP_FLAGS_RSS_DFLT_CR_CAP 0x20UL + #define VNIC_QCAPS_RESP_FLAGS_ROCE_MIRROING_CAPABLE_VNIC_CAP 0x40UL __le32 unused_2; u8 unused_3; u8 unused_4; @@ -3994,6 +3996,7 @@ struct hwrm_cfa_l2_filter_alloc_input { #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_MPLS 0x6UL #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_STT 0x7UL #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE 0x8UL + #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL 0xffUL u8 unused_7; __le16 dst_id; @@ -4122,6 +4125,14 @@ struct hwrm_cfa_l2_set_rx_mask_output { u8 valid; }; +/* Command specific Error Codes (8 bytes) */ +struct hwrm_cfa_l2_set_rx_mask_cmd_err { + u8 code; + #define CFA_L2_SET_RX_MASK_CMD_ERR_CODE_UNKNOWN 0x0UL + #define CFA_L2_SET_RX_MASK_CMD_ERR_CODE_NTUPLE_FILTER_CONFLICT_ERR 0x1UL + u8 unused_0[7]; +}; + /* hwrm_cfa_tunnel_filter_alloc */ /* Input (88 bytes) */ struct hwrm_cfa_tunnel_filter_alloc_input { @@ -4161,6 +4172,7 @@ struct hwrm_cfa_tunnel_filter_alloc_input { #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_MPLS 0x6UL #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_STT 0x7UL #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE 0x8UL + #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL 0xffUL u8 unused_0; __le32 vni; @@ -4323,6 +4335,7 @@ struct hwrm_cfa_ntuple_filter_alloc_input { #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_MPLS 0x6UL #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_STT 0x7UL #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE 0x8UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL 0xffUL u8 pri_hint; #define CFA_NTUPLE_FILTER_ALLOC_REQ_PRI_HINT_NO_PREFER 0x0UL @@ -4355,6 +4368,14 @@ struct hwrm_cfa_ntuple_filter_alloc_output { u8 valid; }; +/* Command specific Error Codes (8 bytes) */ +struct hwrm_cfa_ntuple_filter_alloc_cmd_err { + u8 code; + #define CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_UNKNOWN 0x0UL + #define CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_RX_MASK_VLAN_CONFLICT_ERR 0x1UL + u8 unused_0[7]; +}; + /* hwrm_cfa_ntuple_filter_free */ /* Input (24 bytes) */ struct hwrm_cfa_ntuple_filter_free_input { @@ -4413,6 +4434,116 @@ struct hwrm_cfa_ntuple_filter_cfg_output { u8 valid; }; +/* hwrm_cfa_decap_filter_alloc */ +/* Input (104 bytes) */ +struct hwrm_cfa_decap_filter_alloc_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 flags; + #define CFA_DECAP_FILTER_ALLOC_REQ_FLAGS_OVS_TUNNEL 0x1UL + __le32 enables; + #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE 0x1UL + #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_TUNNEL_ID 0x2UL + #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_SRC_MACADDR 0x4UL + #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_DST_MACADDR 0x8UL + #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_OVLAN_VID 0x10UL + #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_IVLAN_VID 0x20UL + #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_T_OVLAN_VID 0x40UL + #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_T_IVLAN_VID 0x80UL + #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_ETHERTYPE 0x100UL + #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR 0x200UL + #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR 0x400UL + #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_IPADDR_TYPE 0x800UL + #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_IP_PROTOCOL 0x1000UL + #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_SRC_PORT 0x2000UL + #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_DST_PORT 0x4000UL + #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_DST_ID 0x8000UL + #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_MIRROR_VNIC_ID 0x10000UL + __be32 tunnel_id; + u8 tunnel_type; + #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_NONTUNNEL 0x0UL + #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN 0x1UL + #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE 0x2UL + #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2GRE 0x3UL + #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPIP 0x4UL + #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_GENEVE 0x5UL + #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_MPLS 0x6UL + #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_STT 0x7UL + #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE 0x8UL + #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL + #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL 0xffUL + u8 unused_0; + __le16 unused_1; + u8 src_macaddr[6]; + u8 unused_2; + u8 unused_3; + u8 dst_macaddr[6]; + __be16 ovlan_vid; + __be16 ivlan_vid; + __be16 t_ovlan_vid; + __be16 t_ivlan_vid; + __be16 ethertype; + u8 ip_addr_type; + #define CFA_DECAP_FILTER_ALLOC_REQ_IP_ADDR_TYPE_UNKNOWN 0x0UL + #define CFA_DECAP_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV4 0x4UL + #define CFA_DECAP_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV6 0x6UL + u8 ip_protocol; + #define CFA_DECAP_FILTER_ALLOC_REQ_IP_PROTOCOL_UNKNOWN 0x0UL + #define CFA_DECAP_FILTER_ALLOC_REQ_IP_PROTOCOL_TCP 0x6UL + #define CFA_DECAP_FILTER_ALLOC_REQ_IP_PROTOCOL_UDP 0x11UL + u8 unused_4; + u8 unused_5; + u8 unused_6[3]; + u8 unused_7; + __be32 src_ipaddr[4]; + __be32 dst_ipaddr[4]; + __be16 src_port; + __be16 dst_port; + __le16 dst_id; + __le16 l2_ctxt_ref_id; +}; + +/* Output (16 bytes) */ +struct hwrm_cfa_decap_filter_alloc_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 decap_filter_id; + u8 unused_0; + u8 unused_1; + u8 unused_2; + u8 valid; +}; + +/* hwrm_cfa_decap_filter_free */ +/* Input (24 bytes) */ +struct hwrm_cfa_decap_filter_free_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 decap_filter_id; + __le32 unused_0; +}; + +/* Output (16 bytes) */ +struct hwrm_cfa_decap_filter_free_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 unused_0; + u8 unused_1; + u8 unused_2; + u8 unused_3; + u8 valid; +}; + /* hwrm_cfa_flow_alloc */ /* Input (128 bytes) */ struct hwrm_cfa_flow_alloc_input { @@ -4634,6 +4765,7 @@ struct hwrm_tunnel_dst_port_query_input { u8 tunnel_type; #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_VXLAN 0x1UL #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_GENEVE 0x5UL + #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL u8 unused_0[7]; }; @@ -4662,9 +4794,10 @@ struct hwrm_tunnel_dst_port_alloc_input { u8 tunnel_type; #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN 0x1UL #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GENEVE 0x5UL + #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL u8 unused_0; __be16 tunnel_dst_port_val; - __le32 unused_1; + __be32 unused_1; }; /* Output (16 bytes) */ @@ -4693,6 +4826,7 @@ struct hwrm_tunnel_dst_port_free_input { u8 tunnel_type; #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN 0x1UL #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE 0x5UL + #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL u8 unused_0; __le16 tunnel_dst_port_id; __le32 unused_1; @@ -4848,6 +4982,8 @@ struct hwrm_fw_reset_input { #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_NETCTRL 0x2UL #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_ROCE 0x3UL #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_HOST 0x4UL + #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_AP 0x5UL + #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_CHIP 0x6UL u8 selfrst_status; #define FW_RESET_REQ_SELFRST_STATUS_SELFRSTNONE 0x0UL #define FW_RESET_REQ_SELFRST_STATUS_SELFRSTASAP 0x1UL @@ -4888,6 +5024,8 @@ struct hwrm_fw_qstatus_input { #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_NETCTRL 0x2UL #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_ROCE 0x3UL #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_HOST 0x4UL + #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_AP 0x5UL + #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_CHIP 0x6UL u8 unused_0[7]; }; @@ -5324,6 +5462,32 @@ struct hwrm_wol_reason_qcfg_output { u8 valid; }; +/* hwrm_dbg_read_direct */ +/* Input (32 bytes) */ +struct hwrm_dbg_read_direct_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le64 host_dest_addr; + __le32 read_addr; + __le32 read_len32; +}; + +/* Output (16 bytes) */ +struct hwrm_dbg_read_direct_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 unused_0; + u8 unused_1; + u8 unused_2; + u8 unused_3; + u8 valid; +}; + /* hwrm_nvm_read */ /* Input (40 bytes) */ struct hwrm_nvm_read_input { @@ -5676,6 +5840,105 @@ struct hwrm_nvm_install_update_cmd_err { u8 unused_0[7]; }; +/* hwrm_nvm_get_variable */ +/* Input (40 bytes) */ +struct hwrm_nvm_get_variable_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le64 dest_data_addr; + __le16 data_len; + __le16 option_num; + #define NVM_GET_VARIABLE_REQ_OPTION_NUM_RSVD_0 0x0UL + #define NVM_GET_VARIABLE_REQ_OPTION_NUM_RSVD_FFFF 0xffffUL + __le16 dimensions; + __le16 index_0; + __le16 index_1; + __le16 index_2; + __le16 index_3; + u8 flags; + #define NVM_GET_VARIABLE_REQ_FLAGS_FACTORY_DFLT 0x1UL + u8 unused_0; +}; + +/* Output (16 bytes) */ +struct hwrm_nvm_get_variable_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 data_len; + __le16 option_num; + #define NVM_GET_VARIABLE_RESP_OPTION_NUM_RSVD_0 0x0UL + #define NVM_GET_VARIABLE_RESP_OPTION_NUM_RSVD_FFFF 0xffffUL + u8 unused_0; + u8 unused_1; + u8 unused_2; + u8 valid; +}; + +/* Command specific Error Codes (8 bytes) */ +struct hwrm_nvm_get_variable_cmd_err { + u8 code; + #define NVM_GET_VARIABLE_CMD_ERR_CODE_UNKNOWN 0x0UL + #define NVM_GET_VARIABLE_CMD_ERR_CODE_VAR_NOT_EXIST 0x1UL + #define NVM_GET_VARIABLE_CMD_ERR_CODE_CORRUPT_VAR 0x2UL + #define NVM_GET_VARIABLE_CMD_ERR_CODE_LEN_TOO_SHORT 0x3UL + u8 unused_0[7]; +}; + +/* hwrm_nvm_set_variable */ +/* Input (40 bytes) */ +struct hwrm_nvm_set_variable_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le64 src_data_addr; + __le16 data_len; + __le16 option_num; + #define NVM_SET_VARIABLE_REQ_OPTION_NUM_RSVD_0 0x0UL + #define NVM_SET_VARIABLE_REQ_OPTION_NUM_RSVD_FFFF 0xffffUL + __le16 dimensions; + __le16 index_0; + __le16 index_1; + __le16 index_2; + __le16 index_3; + u8 flags; + #define NVM_SET_VARIABLE_REQ_FLAGS_FORCE_FLUSH 0x1UL + #define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_MASK 0xeUL + #define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_SFT 1 + #define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_NONE (0x0UL << 1) + #define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_HMAC_SHA1 (0x1UL << 1) + #define NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_LAST NVM_SET_VARIABLE_REQ_FLAGS_ENCRYPT_MODE_HMAC_SHA1 + u8 unused_0; +}; + +/* Output (16 bytes) */ +struct hwrm_nvm_set_variable_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 unused_0; + u8 unused_1; + u8 unused_2; + u8 unused_3; + u8 valid; +}; + +/* Command specific Error Codes (8 bytes) */ +struct hwrm_nvm_set_variable_cmd_err { + u8 code; + #define NVM_SET_VARIABLE_CMD_ERR_CODE_UNKNOWN 0x0UL + #define NVM_SET_VARIABLE_CMD_ERR_CODE_VAR_NOT_EXIST 0x1UL + #define NVM_SET_VARIABLE_CMD_ERR_CODE_CORRUPT_VAR 0x2UL + u8 unused_0[7]; +}; + /* hwrm_selftest_qlist */ /* Input (16 bytes) */ struct hwrm_selftest_qlist_input { @@ -5686,7 +5949,7 @@ struct hwrm_selftest_qlist_input { __le64 resp_addr; }; -/* Output (248 bytes) */ +/* Output (280 bytes) */ struct hwrm_selftest_qlist_output { __le16 error_code; __le16 req_type; @@ -5698,15 +5961,15 @@ struct hwrm_selftest_qlist_output { #define SELFTEST_QLIST_RESP_AVAILABLE_TESTS_LINK_TEST 0x2UL #define SELFTEST_QLIST_RESP_AVAILABLE_TESTS_REGISTER_TEST 0x4UL #define SELFTEST_QLIST_RESP_AVAILABLE_TESTS_MEMORY_TEST 0x8UL - #define SELFTEST_QLIST_RESP_AVAILABLE_TESTS_PCIE_EYE_TEST 0x10UL - #define SELFTEST_QLIST_RESP_AVAILABLE_TESTS_ETHERNET_EYE_TEST 0x20UL + #define SELFTEST_QLIST_RESP_AVAILABLE_TESTS_PCIE_SERDES_TEST 0x10UL + #define SELFTEST_QLIST_RESP_AVAILABLE_TESTS_ETHERNET_SERDES_TEST 0x20UL u8 offline_tests; #define SELFTEST_QLIST_RESP_OFFLINE_TESTS_NVM_TEST 0x1UL #define SELFTEST_QLIST_RESP_OFFLINE_TESTS_LINK_TEST 0x2UL #define SELFTEST_QLIST_RESP_OFFLINE_TESTS_REGISTER_TEST 0x4UL #define SELFTEST_QLIST_RESP_OFFLINE_TESTS_MEMORY_TEST 0x8UL - #define SELFTEST_QLIST_RESP_OFFLINE_TESTS_PCIE_EYE_TEST 0x10UL - #define SELFTEST_QLIST_RESP_OFFLINE_TESTS_ETHERNET_EYE_TEST 0x20UL + #define SELFTEST_QLIST_RESP_OFFLINE_TESTS_PCIE_SERDES_TEST 0x10UL + #define SELFTEST_QLIST_RESP_OFFLINE_TESTS_ETHERNET_SERDES_TEST 0x20UL u8 unused_0; __le16 test_timeout; u8 unused_1; @@ -5719,6 +5982,11 @@ struct hwrm_selftest_qlist_output { char test5_name[32]; char test6_name[32]; char test7_name[32]; + __le32 unused_3; + u8 unused_4; + u8 unused_5; + u8 unused_6; + u8 valid; }; /* hwrm_selftest_exec */ @@ -5734,8 +6002,8 @@ struct hwrm_selftest_exec_input { #define SELFTEST_EXEC_REQ_FLAGS_LINK_TEST 0x2UL #define SELFTEST_EXEC_REQ_FLAGS_REGISTER_TEST 0x4UL #define SELFTEST_EXEC_REQ_FLAGS_MEMORY_TEST 0x8UL - #define SELFTEST_EXEC_REQ_FLAGS_PCIE_EYE_TEST 0x10UL - #define SELFTEST_EXEC_REQ_FLAGS_ETHERNET_EYE_TEST 0x20UL + #define SELFTEST_EXEC_REQ_FLAGS_PCIE_SERDES_TEST 0x10UL + #define SELFTEST_EXEC_REQ_FLAGS_ETHERNET_SERDES_TEST 0x20UL u8 unused_0[7]; }; @@ -5750,16 +6018,21 @@ struct hwrm_selftest_exec_output { #define SELFTEST_EXEC_RESP_REQUESTED_TESTS_LINK_TEST 0x2UL #define SELFTEST_EXEC_RESP_REQUESTED_TESTS_REGISTER_TEST 0x4UL #define SELFTEST_EXEC_RESP_REQUESTED_TESTS_MEMORY_TEST 0x8UL - #define SELFTEST_EXEC_RESP_REQUESTED_TESTS_PCIE_EYE_TEST 0x10UL - #define SELFTEST_EXEC_RESP_REQUESTED_TESTS_ETHERNET_EYE_TEST 0x20UL + #define SELFTEST_EXEC_RESP_REQUESTED_TESTS_PCIE_SERDES_TEST 0x10UL + #define SELFTEST_EXEC_RESP_REQUESTED_TESTS_ETHERNET_SERDES_TEST 0x20UL u8 test_success; #define SELFTEST_EXEC_RESP_TEST_SUCCESS_NVM_TEST 0x1UL #define SELFTEST_EXEC_RESP_TEST_SUCCESS_LINK_TEST 0x2UL #define SELFTEST_EXEC_RESP_TEST_SUCCESS_REGISTER_TEST 0x4UL #define SELFTEST_EXEC_RESP_TEST_SUCCESS_MEMORY_TEST 0x8UL - #define SELFTEST_EXEC_RESP_TEST_SUCCESS_PCIE_EYE_TEST 0x10UL - #define SELFTEST_EXEC_RESP_TEST_SUCCESS_ETHERNET_EYE_TEST 0x20UL - __le16 unused_0[3]; + #define SELFTEST_EXEC_RESP_TEST_SUCCESS_PCIE_SERDES_TEST 0x10UL + #define SELFTEST_EXEC_RESP_TEST_SUCCESS_ETHERNET_SERDES_TEST 0x20UL + u8 unused_0; + u8 unused_1; + u8 unused_2; + u8 unused_3; + u8 unused_4; + u8 valid; }; /* hwrm_selftest_irq */ @@ -5772,12 +6045,50 @@ struct hwrm_selftest_irq_input { __le64 resp_addr; }; -/* Output (8 bytes) */ +/* Output (16 bytes) */ struct hwrm_selftest_irq_output { __le16 error_code; __le16 req_type; __le16 seq_id; __le16 resp_len; + __le32 unused_0; + u8 unused_1; + u8 unused_2; + u8 unused_3; + u8 valid; +}; + +/* hwrm_selftest_retrieve_serdes_data */ +/* Input (32 bytes) */ +struct hwrm_selftest_retrieve_serdes_data_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le64 resp_data_addr; + __le32 resp_data_offset; + __le16 data_len; + u8 flags; + #define SELFTEST_RETRIEVE_SERDES_DATA_REQ_FLAGS_UNUSED_TEST_MASK 0xfUL + #define SELFTEST_RETRIEVE_SERDES_DATA_REQ_FLAGS_UNUSED_TEST_SFT 0 + #define SELFTEST_RETRIEVE_SERDES_DATA_REQ_FLAGS_PCIE_SERDES_TEST 0x10UL + #define SELFTEST_RETRIEVE_SERDES_DATA_REQ_FLAGS_ETHERNET_SERDES_TEST 0x20UL + u8 unused_0; +}; + +/* Output (16 bytes) */ +struct hwrm_selftest_retrieve_serdes_data_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 total_data_len; + __le16 copied_data_len; + u8 unused_0; + u8 unused_1; + u8 unused_2; + u8 valid; }; /* Hardware Resource Manager Specification */ @@ -5938,10 +6249,16 @@ struct cmd_nums { #define HWRM_CFA_DECAP_FILTER_ALLOC (0x108UL) #define HWRM_CFA_DECAP_FILTER_FREE (0x109UL) #define HWRM_CFA_VLAN_ANTISPOOF_QCFG (0x10aUL) + #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_ALLOC (0x10bUL) + #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_FREE (0x10cUL) + #define HWRM_CFA_PAIR_ALLOC (0x10dUL) + #define HWRM_CFA_PAIR_FREE (0x10eUL) + #define HWRM_CFA_PAIR_INFO (0x10fUL) + #define HWRM_FW_IPC_MSG (0x110UL) #define HWRM_SELFTEST_QLIST (0x200UL) #define HWRM_SELFTEST_EXEC (0x201UL) #define HWRM_SELFTEST_IRQ (0x202UL) - #define HWRM_SELFTEST_RETREIVE_EYE_DATA (0x203UL) + #define HWRM_SELFTEST_RETRIEVE_SERDES_DATA (0x203UL) #define HWRM_DBG_READ_DIRECT (0xff10UL) #define HWRM_DBG_READ_INDIRECT (0xff11UL) #define HWRM_DBG_WRITE_DIRECT (0xff12UL) @@ -5949,6 +6266,9 @@ struct cmd_nums { #define HWRM_DBG_DUMP (0xff14UL) #define HWRM_DBG_ERASE_NVM (0xff15UL) #define HWRM_DBG_CFG (0xff16UL) + #define HWRM_DBG_COREDUMP_LIST (0xff17UL) + #define HWRM_DBG_COREDUMP_INITIATE (0xff18UL) + #define HWRM_DBG_COREDUMP_RETRIEVE (0xff19UL) #define HWRM_NVM_FACTORY_DEFAULTS (0xffeeUL) #define HWRM_NVM_VALIDATE_OPTION (0xffefUL) #define HWRM_NVM_FLUSH (0xfff0UL) @@ -6123,6 +6443,58 @@ struct rx_port_stats { __le64 rx_stat_err; }; +/* VXLAN IPv4 encapsulation structure (16 bytes) */ +struct hwrm_vxlan_ipv4_hdr { + u8 ver_hlen; + #define VXLAN_IPV4_HDR_VER_HLEN_HEADER_LENGTH_MASK 0xfUL + #define VXLAN_IPV4_HDR_VER_HLEN_HEADER_LENGTH_SFT 0 + #define VXLAN_IPV4_HDR_VER_HLEN_VERSION_MASK 0xf0UL + #define VXLAN_IPV4_HDR_VER_HLEN_VERSION_SFT 4 + u8 tos; + __be16 ip_id; + __be16 flags_frag_offset; + u8 ttl; + u8 protocol; + __be32 src_ip_addr; + __be32 dest_ip_addr; +}; + +/* VXLAN IPv6 encapsulation structure (32 bytes) */ +struct hwrm_vxlan_ipv6_hdr { + __be32 ver_tc_flow_label; + #define VXLAN_IPV6_HDR_VER_TC_FLOW_LABEL_VER_SFT 0x1cUL + #define VXLAN_IPV6_HDR_VER_TC_FLOW_LABEL_VER_MASK 0xf0000000UL + #define VXLAN_IPV6_HDR_VER_TC_FLOW_LABEL_TC_SFT 0x14UL + #define VXLAN_IPV6_HDR_VER_TC_FLOW_LABEL_TC_MASK 0xff00000UL + #define VXLAN_IPV6_HDR_VER_TC_FLOW_LABEL_FLOW_LABEL_SFT 0x0UL + #define VXLAN_IPV6_HDR_VER_TC_FLOW_LABEL_FLOW_LABEL_MASK 0xfffffUL + __be16 payload_len; + u8 next_hdr; + u8 ttl; + __be32 src_ip_addr[4]; + __be32 dest_ip_addr[4]; +}; + +/* VXLAN encapsulation structure (72 bytes) */ +struct hwrm_cfa_encap_data_vxlan { + u8 src_mac_addr[6]; + __le16 unused_0; + u8 dst_mac_addr[6]; + u8 num_vlan_tags; + u8 unused_1; + __be16 ovlan_tpid; + __be16 ovlan_tci; + __be16 ivlan_tpid; + __be16 ivlan_tci; + __le32 l3[10]; + #define CFA_ENCAP_DATA_VXLAN_L3_VER_MASK 0xfUL + #define CFA_ENCAP_DATA_VXLAN_L3_VER_IPV4 0x4UL + #define CFA_ENCAP_DATA_VXLAN_L3_VER_IPV6 0x6UL + __be16 src_port; + __be16 dst_port; + __be32 vni; +}; + /* Periodic Statistics Context DMA to host (160 bytes) */ struct ctx_hw_stats { __le64 rx_ucast_pkts; -- cgit v1.2.3 From 8ed693b7bbd179949f6947adaae5eff2e386a534 Mon Sep 17 00:00:00 2001 From: Ray Jui Date: Thu, 26 Oct 2017 11:51:20 -0400 Subject: bnxt_en: Add PCIe device ID for bcm58804 Add new PCIe device ID and chip number for bcm58804 Signed-off-by: Ray Jui Signed-off-by: Michael Chan Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnxt/bnxt.c | 3 +++ drivers/net/ethernet/broadcom/bnxt/bnxt.h | 2 ++ 2 files changed, 5 insertions(+) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index 24d55724ceff..a9db4e677d24 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -108,6 +108,7 @@ enum board_idx { BCM57452, BCM57454, BCM58802, + BCM58804, BCM58808, NETXTREME_E_VF, NETXTREME_C_VF, @@ -146,6 +147,7 @@ static const struct { [BCM57452] = { "Broadcom BCM57452 NetXtreme-E 10Gb/25Gb/40Gb/50Gb Ethernet" }, [BCM57454] = { "Broadcom BCM57454 NetXtreme-E 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" }, [BCM58802] = { "Broadcom BCM58802 NetXtreme-S 10Gb/25Gb/40Gb/50Gb Ethernet" }, + [BCM58804] = { "Broadcom BCM58804 NetXtreme-S 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" }, [BCM58808] = { "Broadcom BCM58808 NetXtreme-S 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" }, [NETXTREME_E_VF] = { "Broadcom NetXtreme-E Ethernet Virtual Function" }, [NETXTREME_C_VF] = { "Broadcom NetXtreme-C Ethernet Virtual Function" }, @@ -186,6 +188,7 @@ static const struct pci_device_id bnxt_pci_tbl[] = { { PCI_VDEVICE(BROADCOM, 0x16f0), .driver_data = BCM58808 }, { PCI_VDEVICE(BROADCOM, 0x16f1), .driver_data = BCM57452 }, { PCI_VDEVICE(BROADCOM, 0xd802), .driver_data = BCM58802 }, + { PCI_VDEVICE(BROADCOM, 0xd804), .driver_data = BCM58804 }, #ifdef CONFIG_BNXT_SRIOV { PCI_VDEVICE(BROADCOM, 0x1606), .driver_data = NETXTREME_E_VF }, { PCI_VDEVICE(BROADCOM, 0x1609), .driver_data = NETXTREME_E_VF }, diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h index c911e69ff25f..d193923d20a6 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h @@ -1013,6 +1013,7 @@ struct bnxt { #define CHIP_NUM_5745X 0xd730 #define CHIP_NUM_58802 0xd802 +#define CHIP_NUM_58804 0xd804 #define CHIP_NUM_58808 0xd808 #define BNXT_CHIP_NUM_5730X(chip_num) \ @@ -1048,6 +1049,7 @@ struct bnxt { #define BNXT_CHIP_NUM_588XX(chip_num) \ ((chip_num) == CHIP_NUM_58802 || \ + (chip_num) == CHIP_NUM_58804 || \ (chip_num) == CHIP_NUM_58808) struct net_device *dev; -- cgit v1.2.3 From 618784e3ee1870e43e50e1c7922cc123cc050566 Mon Sep 17 00:00:00 2001 From: Rob Miller Date: Thu, 26 Oct 2017 11:51:21 -0400 Subject: bnxt_en: adding PCI ID for SMARTNIC VF support Signed-off-by: Rob Miller Signed-off-by: Michael Chan Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnxt/bnxt.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index a9db4e677d24..d2403f47e102 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -112,6 +112,7 @@ enum board_idx { BCM58808, NETXTREME_E_VF, NETXTREME_C_VF, + NETXTREME_S_VF, }; /* indexed by enum above */ @@ -151,6 +152,7 @@ static const struct { [BCM58808] = { "Broadcom BCM58808 NetXtreme-S 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" }, [NETXTREME_E_VF] = { "Broadcom NetXtreme-E Ethernet Virtual Function" }, [NETXTREME_C_VF] = { "Broadcom NetXtreme-C Ethernet Virtual Function" }, + [NETXTREME_S_VF] = { "Broadcom NetXtreme-S Ethernet Virtual Function" }, }; static const struct pci_device_id bnxt_pci_tbl[] = { @@ -198,6 +200,7 @@ static const struct pci_device_id bnxt_pci_tbl[] = { { PCI_VDEVICE(BROADCOM, 0x16dc), .driver_data = NETXTREME_E_VF }, { PCI_VDEVICE(BROADCOM, 0x16e1), .driver_data = NETXTREME_C_VF }, { PCI_VDEVICE(BROADCOM, 0x16e5), .driver_data = NETXTREME_C_VF }, + { PCI_VDEVICE(BROADCOM, 0xd800), .driver_data = NETXTREME_S_VF }, #endif { 0 } }; @@ -222,7 +225,8 @@ static struct workqueue_struct *bnxt_pf_wq; static bool bnxt_vf_pciid(enum board_idx idx) { - return (idx == NETXTREME_C_VF || idx == NETXTREME_E_VF); + return (idx == NETXTREME_C_VF || idx == NETXTREME_E_VF || + idx == NETXTREME_S_VF); } #define DB_CP_REARM_FLAGS (DB_KEY_CP | DB_IDX_VALID) -- cgit v1.2.3 From e0ad8fc5980b362028cfd63ec037f4b491e726c6 Mon Sep 17 00:00:00 2001 From: Michael Chan Date: Thu, 26 Oct 2017 11:51:22 -0400 Subject: bnxt_en: Check for zero length value in bnxt_get_nvram_item(). Return -EINVAL if the length is zero and not proceed to do essentially nothing. Signed-off-by: Michael Chan Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c index 3cbe771b3352..85be1cb41f1a 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c @@ -1773,6 +1773,9 @@ static int bnxt_get_nvram_item(struct net_device *dev, u32 index, u32 offset, dma_addr_t dma_handle; struct hwrm_nvm_read_input req = {0}; + if (!length) + return -EINVAL; + buf = dma_alloc_coherent(&bp->pdev->dev, length, &dma_handle, GFP_KERNEL); if (!buf) { -- cgit v1.2.3 From 431aa1eb20d8ae2674723292adb832b968da868e Mon Sep 17 00:00:00 2001 From: Michael Chan Date: Thu, 26 Oct 2017 11:51:23 -0400 Subject: bnxt_en: Get firmware package version one time. The current code retrieves the firmware package version from firmware everytime ethtool -i is run. There is no reason to do that as the firmware will not change while the driver is loaded. Get the version once at init time. Also, display the full 4-part firmware version string and remove the less useful interface spec version. Signed-off-by: Michael Chan Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnxt/bnxt.c | 4 +-- drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c | 30 ++++++++++++----------- 2 files changed, 18 insertions(+), 16 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index d2403f47e102..ffc6c494f6ea 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -4892,9 +4892,9 @@ static int bnxt_hwrm_ver_get(struct bnxt *bp) resp->hwrm_intf_upd); netdev_warn(bp->dev, "Please update firmware with HWRM interface 1.0.0 or newer.\n"); } - snprintf(bp->fw_ver_str, BC_HWRM_STR_LEN, "%d.%d.%d/%d.%d.%d", + snprintf(bp->fw_ver_str, BC_HWRM_STR_LEN, "%d.%d.%d.%d", resp->hwrm_fw_maj, resp->hwrm_fw_min, resp->hwrm_fw_bld, - resp->hwrm_intf_maj, resp->hwrm_intf_min, resp->hwrm_intf_upd); + resp->hwrm_fw_rsvd); bp->hwrm_cmd_timeout = le16_to_cpu(resp->def_req_timeout); if (!bp->hwrm_cmd_timeout) diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c index 85be1cb41f1a..fe93625e0539 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c @@ -26,8 +26,6 @@ #define FLASH_PACKAGE_TIMEOUT ((HWRM_CMD_TIMEOUT) * 200) #define INSTALL_PACKAGE_TIMEOUT ((HWRM_CMD_TIMEOUT) * 200) -static char *bnxt_get_pkgver(struct net_device *dev, char *buf, size_t buflen); - static u32 bnxt_get_msglevel(struct net_device *dev) { struct bnxt *bp = netdev_priv(dev); @@ -822,20 +820,10 @@ static void bnxt_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) { struct bnxt *bp = netdev_priv(dev); - char *pkglog; - char *pkgver = NULL; - pkglog = kmalloc(BNX_PKG_LOG_MAX_LENGTH, GFP_KERNEL); - if (pkglog) - pkgver = bnxt_get_pkgver(dev, pkglog, BNX_PKG_LOG_MAX_LENGTH); strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver)); strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version)); - if (pkgver && *pkgver != 0 && isdigit(*pkgver)) - snprintf(info->fw_version, sizeof(info->fw_version) - 1, - "%s pkg %s", bp->fw_ver_str, pkgver); - else - strlcpy(info->fw_version, bp->fw_ver_str, - sizeof(info->fw_version)); + strlcpy(info->fw_version, bp->fw_ver_str, sizeof(info->fw_version)); strlcpy(info->bus_info, pci_name(bp->pdev), sizeof(info->bus_info)); info->n_stats = bnxt_get_num_stats(bp); info->testinfo_len = bp->num_tests; @@ -843,7 +831,6 @@ static void bnxt_get_drvinfo(struct net_device *dev, info->eedump_len = 0; /* TODO CHIMP FW: reg dump details */ info->regdump_len = 0; - kfree(pkglog); } static void bnxt_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) @@ -2503,8 +2490,23 @@ void bnxt_ethtool_init(struct bnxt *bp) struct hwrm_selftest_qlist_output *resp = bp->hwrm_cmd_resp_addr; struct hwrm_selftest_qlist_input req = {0}; struct bnxt_test_info *test_info; + struct net_device *dev = bp->dev; + char *pkglog; int i, rc; + pkglog = kzalloc(BNX_PKG_LOG_MAX_LENGTH, GFP_KERNEL); + if (pkglog) { + char *pkgver; + int len; + + pkgver = bnxt_get_pkgver(dev, pkglog, BNX_PKG_LOG_MAX_LENGTH); + if (pkgver && *pkgver != 0 && isdigit(*pkgver)) { + len = strlen(bp->fw_ver_str); + snprintf(bp->fw_ver_str + len, FW_VER_STR_LEN - len - 1, + "/pkg %s", pkgver); + } + kfree(pkglog); + } if (bp->hwrm_spec_code < 0x10704 || !BNXT_SINGLE_PF(bp)) return; -- cgit v1.2.3 From c1a7bdff17247332ecff7f243e42d269b3f74c65 Mon Sep 17 00:00:00 2001 From: Michael Chan Date: Thu, 26 Oct 2017 11:51:24 -0400 Subject: bnxt_en: Optimize .ndo_set_mac_address() for VFs. No need to call bnxt_approve_mac() which will send a message to the PF if the MAC address hasn't changed. Signed-off-by: Michael Chan Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnxt/bnxt.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index ffc6c494f6ea..ef5a5e52eb04 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -7249,13 +7249,13 @@ static int bnxt_change_mac_addr(struct net_device *dev, void *p) if (!is_valid_ether_addr(addr->sa_data)) return -EADDRNOTAVAIL; + if (ether_addr_equal(addr->sa_data, dev->dev_addr)) + return 0; + rc = bnxt_approve_mac(bp, addr->sa_data); if (rc) return rc; - if (ether_addr_equal(addr->sa_data, dev->dev_addr)) - return 0; - memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); if (netif_running(dev)) { bnxt_close_nic(bp, false, false); -- cgit v1.2.3 From 7eb9bb3a0c7c29741df2249cc3b99f06a7978d61 Mon Sep 17 00:00:00 2001 From: Michael Chan Date: Thu, 26 Oct 2017 11:51:25 -0400 Subject: bnxt_en: Check maximum supported MTU from firmware. Some NICs have a firmware enforced maximum MTU setting by management firmware. Set up netdev->max_mtu accordingly. Signed-off-by: Michael Chan Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnxt/bnxt.c | 17 +++++++++++------ drivers/net/ethernet/broadcom/bnxt/bnxt.h | 1 + 2 files changed, 12 insertions(+), 6 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index ef5a5e52eb04..285863ba51a3 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -2835,7 +2835,8 @@ int bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode) if (page_mode) { if (bp->dev->mtu > BNXT_MAX_PAGE_MODE_MTU) return -EOPNOTSUPP; - bp->dev->max_mtu = BNXT_MAX_PAGE_MODE_MTU; + bp->dev->max_mtu = + min_t(u16, bp->max_mtu, BNXT_MAX_PAGE_MODE_MTU); bp->flags &= ~BNXT_FLAG_AGG_RINGS; bp->flags |= BNXT_FLAG_NO_AGG_RINGS | BNXT_FLAG_RX_PAGE_MODE; bp->dev->hw_features &= ~NETIF_F_LRO; @@ -2843,7 +2844,7 @@ int bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode) bp->rx_dir = DMA_BIDIRECTIONAL; bp->rx_skb_func = bnxt_rx_page_skb; } else { - bp->dev->max_mtu = BNXT_MAX_MTU; + bp->dev->max_mtu = bp->max_mtu; bp->flags &= ~BNXT_FLAG_RX_PAGE_MODE; bp->rx_dir = DMA_FROM_DEVICE; bp->rx_skb_func = bnxt_rx_skb; @@ -4732,6 +4733,10 @@ static int bnxt_hwrm_func_qcfg(struct bnxt *bp) else bp->br_mode = BRIDGE_MODE_UNDEF; + bp->max_mtu = le16_to_cpu(resp->max_mtu_configured); + if (!bp->max_mtu) + bp->max_mtu = BNXT_MAX_MTU; + func_qcfg_exit: mutex_unlock(&bp->hwrm_cmd_lock); return rc; @@ -8095,10 +8100,6 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) dev->features |= dev->hw_features | NETIF_F_HIGHDMA; dev->priv_flags |= IFF_UNICAST_FLT; - /* MTU range: 60 - 9500 */ - dev->min_mtu = ETH_ZLEN; - dev->max_mtu = BNXT_MAX_MTU; - #ifdef CONFIG_BNXT_SRIOV init_waitqueue_head(&bp->sriov_cfg_wait); mutex_init(&bp->sriov_lock); @@ -8146,6 +8147,10 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) bnxt_ethtool_init(bp); bnxt_dcb_init(bp); + /* MTU range: 60 - FW defined max */ + dev->min_mtu = ETH_ZLEN; + dev->max_mtu = bp->max_mtu; + rc = bnxt_probe_phy(bp); if (rc) goto init_err_pci_clean; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h index d193923d20a6..5f1fce4a724d 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h @@ -1172,6 +1172,7 @@ struct bnxt { int nr_vnics; u32 rss_hash_cfg; + u16 max_mtu; u8 max_tc; u8 max_lltc; /* lossless TCs */ struct bnxt_queue_info q_info[BNXT_MAX_QUEUE]; -- cgit v1.2.3 From 49f7972fd16407b3d1f03c2d447d2f1e1b95e9ba Mon Sep 17 00:00:00 2001 From: Vasundhara Volam Date: Thu, 26 Oct 2017 11:51:26 -0400 Subject: bnxt_en: Add ethtool reset method This is a firmware internal reset after driver is unloaded. Signed-off-by: Vasundhara Volam Signed-off-by: Michael Chan Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c | 37 ++++++++++++++++++++++- drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h | 2 ++ 2 files changed, 38 insertions(+), 1 deletion(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c index fe93625e0539..fc32df7f979f 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c @@ -1337,7 +1337,6 @@ static int bnxt_firmware_reset(struct net_device *dev, bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FW_RESET, -1, -1); - /* TODO: Support ASAP ChiMP self-reset (e.g. upon PF driver unload) */ /* TODO: Address self-reset of APE/KONG/BONO/TANG or ungraceful reset */ /* (e.g. when firmware isn't already running) */ switch (dir_type) { @@ -1363,6 +1362,10 @@ static int bnxt_firmware_reset(struct net_device *dev, case BNX_DIR_TYPE_BONO_PATCH: req.embedded_proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_ROCE; break; + case BNXT_FW_RESET_CHIP: + req.embedded_proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_CHIP; + req.selfrst_status = FW_RESET_REQ_SELFRST_STATUS_SELFRSTASAP; + break; default: return -EINVAL; } @@ -2485,6 +2488,37 @@ static void bnxt_self_test(struct net_device *dev, struct ethtool_test *etest, } } +static int bnxt_reset(struct net_device *dev, u32 *flags) +{ + struct bnxt *bp = netdev_priv(dev); + int rc = 0; + + if (!BNXT_PF(bp)) { + netdev_err(dev, "Reset is not supported from a VF\n"); + return -EOPNOTSUPP; + } + + if (pci_vfs_assigned(bp->pdev)) { + netdev_err(dev, + "Reset not allowed when VFs are assigned to VMs\n"); + return -EBUSY; + } + + if (*flags == ETH_RESET_ALL) { + /* This feature is not supported in older firmware versions */ + if (bp->hwrm_spec_code < 0x10803) + return -EOPNOTSUPP; + + rc = bnxt_firmware_reset(dev, BNXT_FW_RESET_CHIP); + if (!rc) + netdev_info(dev, "Reset request successful. Reload driver to complete reset\n"); + } else { + rc = -EINVAL; + } + + return rc; +} + void bnxt_ethtool_init(struct bnxt *bp) { struct hwrm_selftest_qlist_output *resp = bp->hwrm_cmd_resp_addr; @@ -2597,4 +2631,5 @@ const struct ethtool_ops bnxt_ethtool_ops = { .nway_reset = bnxt_nway_reset, .set_phys_id = bnxt_set_phys_id, .self_test = bnxt_self_test, + .reset = bnxt_reset, }; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h index f1bc90b6fb5b..ff601b42fcc8 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h @@ -34,6 +34,8 @@ struct bnxt_led_cfg { #define BNXT_LED_DFLT_ENABLES(x) \ cpu_to_le32(BNXT_LED_DFLT_ENA << (BNXT_LED_DFLT_ENA_SHIFT * (x))) +#define BNXT_FW_RESET_CHIP 0xffff + extern const struct ethtool_ops bnxt_ethtool_ops; u32 _bnxt_fw_to_ethtool_adv_spds(u16, u8); -- cgit v1.2.3 From 18775aa8a91fcd4cd07c722d575b4b852e3624c3 Mon Sep 17 00:00:00 2001 From: Michael Chan Date: Thu, 26 Oct 2017 11:51:27 -0400 Subject: bnxt_en: Reorganize the coalescing parameters. The current IRQ coalescing logic is a little messy. The ethtool parameters are mapped to hardware parameters in a way that is difficult to understand. The first step is to better organize the parameters by adding the new structure bnxt_coal. The structure is used by both the RX and TX sets of coalescing parameters. Adjust the default coal_ticks to 14 us and 28 us for RX and TX. Signed-off-by: Michael Chan Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnxt/bnxt.c | 60 ++++++++++++++--------- drivers/net/ethernet/broadcom/bnxt/bnxt.h | 21 +++++--- drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c | 50 +++++++++++-------- 3 files changed, 79 insertions(+), 52 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index 285863ba51a3..52fff1605172 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -4569,34 +4569,31 @@ int bnxt_hwrm_set_coal(struct bnxt *bp) /* Each rx completion (2 records) should be DMAed immediately. * DMA 1/4 of the completion buffers at a time. */ - max_buf = min_t(u16, bp->rx_coal_bufs / 4, 2); + max_buf = min_t(u16, bp->rx_coal.coal_bufs / 4, 2); /* max_buf must not be zero */ max_buf = clamp_t(u16, max_buf, 1, 63); - max_buf_irq = clamp_t(u16, bp->rx_coal_bufs_irq, 1, 63); - buf_tmr = BNXT_USEC_TO_COAL_TIMER(bp->rx_coal_ticks); + max_buf_irq = clamp_t(u16, bp->rx_coal.coal_bufs_irq, 1, 63); + buf_tmr = BNXT_USEC_TO_COAL_TIMER(bp->rx_coal.coal_ticks); /* buf timer set to 1/4 of interrupt timer */ buf_tmr = max_t(u16, buf_tmr / 4, 1); - buf_tmr_irq = BNXT_USEC_TO_COAL_TIMER(bp->rx_coal_ticks_irq); + buf_tmr_irq = BNXT_USEC_TO_COAL_TIMER(bp->rx_coal.coal_ticks_irq); buf_tmr_irq = max_t(u16, buf_tmr_irq, 1); flags = RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET; - /* RING_IDLE generates more IRQs for lower latency. Enable it only - * if coal_ticks is less than 25 us. - */ - if (bp->rx_coal_ticks < 25) + if (bp->rx_coal.coal_ticks < bp->rx_coal.idle_thresh) flags |= RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_RING_IDLE; bnxt_hwrm_set_coal_params(bp, max_buf_irq << 16 | max_buf, buf_tmr_irq << 16 | buf_tmr, flags, &req_rx); /* max_buf must not be zero */ - max_buf = clamp_t(u16, bp->tx_coal_bufs, 1, 63); - max_buf_irq = clamp_t(u16, bp->tx_coal_bufs_irq, 1, 63); - buf_tmr = BNXT_USEC_TO_COAL_TIMER(bp->tx_coal_ticks); + max_buf = clamp_t(u16, bp->tx_coal.coal_bufs, 1, 63); + max_buf_irq = clamp_t(u16, bp->tx_coal.coal_bufs_irq, 1, 63); + buf_tmr = BNXT_USEC_TO_COAL_TIMER(bp->tx_coal.coal_ticks); /* buf timer set to 1/4 of interrupt timer */ buf_tmr = max_t(u16, buf_tmr / 4, 1); - buf_tmr_irq = BNXT_USEC_TO_COAL_TIMER(bp->tx_coal_ticks_irq); + buf_tmr_irq = BNXT_USEC_TO_COAL_TIMER(bp->tx_coal.coal_ticks_irq); buf_tmr_irq = max_t(u16, buf_tmr_irq, 1); flags = RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET; @@ -7146,6 +7143,32 @@ static void bnxt_cleanup_pci(struct bnxt *bp) pci_disable_device(bp->pdev); } +static void bnxt_init_dflt_coal(struct bnxt *bp) +{ + struct bnxt_coal *coal; + + /* Tick values in micro seconds. + * 1 coal_buf x bufs_per_record = 1 completion record. + */ + coal = &bp->rx_coal; + coal->coal_ticks = 14; + coal->coal_bufs = 30; + coal->coal_ticks_irq = 1; + coal->coal_bufs_irq = 2; + coal->idle_thresh = 25; + coal->bufs_per_record = 2; + coal->budget = 64; /* NAPI budget */ + + coal = &bp->tx_coal; + coal->coal_ticks = 28; + coal->coal_bufs = 30; + coal->coal_ticks_irq = 2; + coal->coal_bufs_irq = 2; + coal->bufs_per_record = 1; + + bp->stats_coal_ticks = BNXT_DEF_STATS_COAL_TICKS; +} + static int bnxt_init_board(struct pci_dev *pdev, struct net_device *dev) { int rc; @@ -7214,18 +7237,7 @@ static int bnxt_init_board(struct pci_dev *pdev, struct net_device *dev) bp->rx_ring_size = BNXT_DEFAULT_RX_RING_SIZE; bp->tx_ring_size = BNXT_DEFAULT_TX_RING_SIZE; - /* tick values in micro seconds */ - bp->rx_coal_ticks = 12; - bp->rx_coal_bufs = 30; - bp->rx_coal_ticks_irq = 1; - bp->rx_coal_bufs_irq = 2; - - bp->tx_coal_ticks = 25; - bp->tx_coal_bufs = 30; - bp->tx_coal_ticks_irq = 2; - bp->tx_coal_bufs_irq = 2; - - bp->stats_coal_ticks = BNXT_DEF_STATS_COAL_TICKS; + bnxt_init_dflt_coal(bp); setup_timer(&bp->timer, bnxt_timer, (unsigned long)bp); bp->current_interval = BNXT_TIMER_INTERVAL; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h index 5f1fce4a724d..2188f1606209 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h @@ -944,6 +944,17 @@ struct bnxt_test_info { #define BNXT_CAG_REG_LEGACY_INT_STATUS 0x4014 #define BNXT_CAG_REG_BASE 0x300000 +struct bnxt_coal { + u16 coal_ticks; + u16 coal_ticks_irq; + u16 coal_bufs; + u16 coal_bufs_irq; + /* RING_IDLE enabled when coal ticks < idle_thresh */ + u16 idle_thresh; + u8 bufs_per_record; + u8 budget; +}; + struct bnxt_tc_info { bool enabled; @@ -1235,14 +1246,8 @@ struct bnxt { u8 port_count; u16 br_mode; - u16 rx_coal_ticks; - u16 rx_coal_ticks_irq; - u16 rx_coal_bufs; - u16 rx_coal_bufs_irq; - u16 tx_coal_ticks; - u16 tx_coal_ticks_irq; - u16 tx_coal_bufs; - u16 tx_coal_bufs_irq; + struct bnxt_coal rx_coal; + struct bnxt_coal tx_coal; #define BNXT_USEC_TO_COAL_TIMER(x) ((x) * 25 / 2) diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c index fc32df7f979f..5cd1a501c62b 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c @@ -44,19 +44,24 @@ static int bnxt_get_coalesce(struct net_device *dev, struct ethtool_coalesce *coal) { struct bnxt *bp = netdev_priv(dev); + struct bnxt_coal *hw_coal; + u16 mult; memset(coal, 0, sizeof(*coal)); - coal->rx_coalesce_usecs = bp->rx_coal_ticks; - /* 2 completion records per rx packet */ - coal->rx_max_coalesced_frames = bp->rx_coal_bufs / 2; - coal->rx_coalesce_usecs_irq = bp->rx_coal_ticks_irq; - coal->rx_max_coalesced_frames_irq = bp->rx_coal_bufs_irq / 2; + hw_coal = &bp->rx_coal; + mult = hw_coal->bufs_per_record; + coal->rx_coalesce_usecs = hw_coal->coal_ticks; + coal->rx_max_coalesced_frames = hw_coal->coal_bufs / mult; + coal->rx_coalesce_usecs_irq = hw_coal->coal_ticks_irq; + coal->rx_max_coalesced_frames_irq = hw_coal->coal_bufs_irq / mult; - coal->tx_coalesce_usecs = bp->tx_coal_ticks; - coal->tx_max_coalesced_frames = bp->tx_coal_bufs; - coal->tx_coalesce_usecs_irq = bp->tx_coal_ticks_irq; - coal->tx_max_coalesced_frames_irq = bp->tx_coal_bufs_irq; + hw_coal = &bp->tx_coal; + mult = hw_coal->bufs_per_record; + coal->tx_coalesce_usecs = hw_coal->coal_ticks; + coal->tx_max_coalesced_frames = hw_coal->coal_bufs / mult; + coal->tx_coalesce_usecs_irq = hw_coal->coal_ticks_irq; + coal->tx_max_coalesced_frames_irq = hw_coal->coal_bufs_irq / mult; coal->stats_block_coalesce_usecs = bp->stats_coal_ticks; @@ -68,18 +73,23 @@ static int bnxt_set_coalesce(struct net_device *dev, { struct bnxt *bp = netdev_priv(dev); bool update_stats = false; + struct bnxt_coal *hw_coal; int rc = 0; - - bp->rx_coal_ticks = coal->rx_coalesce_usecs; - /* 2 completion records per rx packet */ - bp->rx_coal_bufs = coal->rx_max_coalesced_frames * 2; - bp->rx_coal_ticks_irq = coal->rx_coalesce_usecs_irq; - bp->rx_coal_bufs_irq = coal->rx_max_coalesced_frames_irq * 2; - - bp->tx_coal_ticks = coal->tx_coalesce_usecs; - bp->tx_coal_bufs = coal->tx_max_coalesced_frames; - bp->tx_coal_ticks_irq = coal->tx_coalesce_usecs_irq; - bp->tx_coal_bufs_irq = coal->tx_max_coalesced_frames_irq; + u16 mult; + + hw_coal = &bp->rx_coal; + mult = hw_coal->bufs_per_record; + hw_coal->coal_ticks = coal->rx_coalesce_usecs; + hw_coal->coal_bufs = coal->rx_max_coalesced_frames * mult; + hw_coal->coal_ticks_irq = coal->rx_coalesce_usecs_irq; + hw_coal->coal_bufs_irq = coal->rx_max_coalesced_frames_irq * mult; + + hw_coal = &bp->rx_coal; + mult = hw_coal->bufs_per_record; + hw_coal->coal_ticks = coal->tx_coalesce_usecs; + hw_coal->coal_bufs = coal->tx_max_coalesced_frames * mult; + hw_coal->coal_ticks_irq = coal->tx_coalesce_usecs_irq; + hw_coal->coal_bufs_irq = coal->tx_max_coalesced_frames_irq * mult; if (bp->stats_coal_ticks != coal->stats_block_coalesce_usecs) { u32 stats_ticks = coal->stats_block_coalesce_usecs; -- cgit v1.2.3 From f8503969d27b2b26ff0adbce4b7d7cf4ba5e43c2 Mon Sep 17 00:00:00 2001 From: Michael Chan Date: Thu, 26 Oct 2017 11:51:28 -0400 Subject: bnxt_en: Refactor and simplify coalescing code. The mapping of the ethtool coalescing parameters to hardware parameters is now done in bnxt_hwrm_set_coal_params(). The same function can handle both RX and TX settings. The code is now more clear. Some adjustments have been made to get better hardware settings. The coal_frames setting is now accurately set in hardware. The max_timer is set to coal_ticks value. Signed-off-by: Michael Chan Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnxt/bnxt.c | 81 +++++++++++++------------------ 1 file changed, 35 insertions(+), 46 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index 52fff1605172..c76729122143 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -4537,19 +4537,42 @@ static int bnxt_hwrm_check_tx_rings(struct bnxt *bp, int tx_rings) return 0; } -static void bnxt_hwrm_set_coal_params(struct bnxt *bp, u32 max_bufs, - u32 buf_tmrs, u16 flags, +static void bnxt_hwrm_set_coal_params(struct bnxt_coal *hw_coal, struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req) { + u16 val, tmr, max, flags; + + max = hw_coal->bufs_per_record * 128; + if (hw_coal->budget) + max = hw_coal->bufs_per_record * hw_coal->budget; + + val = clamp_t(u16, hw_coal->coal_bufs, 1, max); + req->num_cmpl_aggr_int = cpu_to_le16(val); + req->num_cmpl_dma_aggr = cpu_to_le16(val); + + val = clamp_t(u16, hw_coal->coal_bufs_irq, 1, max); + req->num_cmpl_dma_aggr_during_int = cpu_to_le16(val); + + tmr = BNXT_USEC_TO_COAL_TIMER(hw_coal->coal_ticks); + tmr = max_t(u16, tmr, 1); + req->int_lat_tmr_max = cpu_to_le16(tmr); + + /* min timer set to 1/2 of interrupt timer */ + val = tmr / 2; + req->int_lat_tmr_min = cpu_to_le16(val); + + /* buf timer set to 1/4 of interrupt timer */ + val = max_t(u16, tmr / 4, 1); + req->cmpl_aggr_dma_tmr = cpu_to_le16(val); + + tmr = BNXT_USEC_TO_COAL_TIMER(hw_coal->coal_ticks_irq); + tmr = max_t(u16, tmr, 1); + req->cmpl_aggr_dma_tmr_during_int = cpu_to_le16(tmr); + + flags = RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET; + if (hw_coal->idle_thresh && hw_coal->coal_ticks < hw_coal->idle_thresh) + flags |= RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_RING_IDLE; req->flags = cpu_to_le16(flags); - req->num_cmpl_dma_aggr = cpu_to_le16((u16)max_bufs); - req->num_cmpl_dma_aggr_during_int = cpu_to_le16(max_bufs >> 16); - req->cmpl_aggr_dma_tmr = cpu_to_le16((u16)buf_tmrs); - req->cmpl_aggr_dma_tmr_during_int = cpu_to_le16(buf_tmrs >> 16); - /* Minimum time between 2 interrupts set to buf_tmr x 2 */ - req->int_lat_tmr_min = cpu_to_le16((u16)buf_tmrs * 2); - req->int_lat_tmr_max = cpu_to_le16((u16)buf_tmrs * 4); - req->num_cmpl_aggr_int = cpu_to_le16((u16)max_bufs * 4); } int bnxt_hwrm_set_coal(struct bnxt *bp) @@ -4557,48 +4580,14 @@ int bnxt_hwrm_set_coal(struct bnxt *bp) int i, rc = 0; struct hwrm_ring_cmpl_ring_cfg_aggint_params_input req_rx = {0}, req_tx = {0}, *req; - u16 max_buf, max_buf_irq; - u16 buf_tmr, buf_tmr_irq; - u32 flags; bnxt_hwrm_cmd_hdr_init(bp, &req_rx, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS, -1, -1); bnxt_hwrm_cmd_hdr_init(bp, &req_tx, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS, -1, -1); - /* Each rx completion (2 records) should be DMAed immediately. - * DMA 1/4 of the completion buffers at a time. - */ - max_buf = min_t(u16, bp->rx_coal.coal_bufs / 4, 2); - /* max_buf must not be zero */ - max_buf = clamp_t(u16, max_buf, 1, 63); - max_buf_irq = clamp_t(u16, bp->rx_coal.coal_bufs_irq, 1, 63); - buf_tmr = BNXT_USEC_TO_COAL_TIMER(bp->rx_coal.coal_ticks); - /* buf timer set to 1/4 of interrupt timer */ - buf_tmr = max_t(u16, buf_tmr / 4, 1); - buf_tmr_irq = BNXT_USEC_TO_COAL_TIMER(bp->rx_coal.coal_ticks_irq); - buf_tmr_irq = max_t(u16, buf_tmr_irq, 1); - - flags = RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET; - - if (bp->rx_coal.coal_ticks < bp->rx_coal.idle_thresh) - flags |= RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_RING_IDLE; - - bnxt_hwrm_set_coal_params(bp, max_buf_irq << 16 | max_buf, - buf_tmr_irq << 16 | buf_tmr, flags, &req_rx); - - /* max_buf must not be zero */ - max_buf = clamp_t(u16, bp->tx_coal.coal_bufs, 1, 63); - max_buf_irq = clamp_t(u16, bp->tx_coal.coal_bufs_irq, 1, 63); - buf_tmr = BNXT_USEC_TO_COAL_TIMER(bp->tx_coal.coal_ticks); - /* buf timer set to 1/4 of interrupt timer */ - buf_tmr = max_t(u16, buf_tmr / 4, 1); - buf_tmr_irq = BNXT_USEC_TO_COAL_TIMER(bp->tx_coal.coal_ticks_irq); - buf_tmr_irq = max_t(u16, buf_tmr_irq, 1); - - flags = RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET; - bnxt_hwrm_set_coal_params(bp, max_buf_irq << 16 | max_buf, - buf_tmr_irq << 16 | buf_tmr, flags, &req_tx); + bnxt_hwrm_set_coal_params(&bp->rx_coal, &req_rx); + bnxt_hwrm_set_coal_params(&bp->tx_coal, &req_tx); mutex_lock(&bp->hwrm_cmd_lock); for (i = 0; i < bp->cp_nr_rings; i++) { -- cgit v1.2.3 From 8c95f773b4a367f7b9bcca7ab5f85675cfc812e9 Mon Sep 17 00:00:00 2001 From: Sathya Perla Date: Thu, 26 Oct 2017 11:51:29 -0400 Subject: bnxt_en: add support for Flower based vxlan encap/decap offload This patch adds IPv4 vxlan encap/decap action support to TC-flower offload. For vxlan encap, the driver maintains a tunnel encap hash-table. When a new flow with a tunnel encap action arrives, this table is looked up; if an encap entry exists, it uses the already programmed encap_record_handle as the tunnel_handle in the hwrm_cfa_flow_alloc cmd. Else, a new encap node is added and the L2 header fields are queried via a route lookup. hwrm_cfa_encap_record_alloc cmd is used to create a new encap record and the encap_record_handle is used as the tunnel_handle while adding the flow. For vxlan decap, the driver maintains a tunnel decap hash-table. When a new flow with a tunnel decap action arrives, this table is looked up; if a decap entry exists, it uses the already programmed decap_filter_handle as the tunnel_handle in the hwrm_cfa_flow_alloc cmd. Else, a new decap node is added and a decap_filter_handle is alloc'd via the hwrm_cfa_decap_filter_alloc cmd. This handle is used as the tunnel_handle while adding the flow. The code to issue the HWRM FW cmds is introduced in a follow-up patch. Signed-off-by: Sathya Perla Signed-off-by: Michael Chan Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnxt/bnxt.h | 9 + drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c | 2 +- drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c | 566 +++++++++++++++++++++- drivers/net/ethernet/broadcom/bnxt/bnxt_tc.h | 66 ++- 4 files changed, 631 insertions(+), 12 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h index 2188f1606209..d88d864db7d4 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h @@ -965,6 +965,15 @@ struct bnxt_tc_info { /* hash table to store L2 keys of TC flows */ struct rhashtable l2_table; struct rhashtable_params l2_ht_params; + /* hash table to store L2 keys for TC tunnel decap */ + struct rhashtable decap_l2_table; + struct rhashtable_params decap_l2_ht_params; + /* hash table to store tunnel decap entries */ + struct rhashtable decap_table; + struct rhashtable_params decap_ht_params; + /* hash table to store tunnel encap entries */ + struct rhashtable encap_table; + struct rhashtable_params encap_ht_params; /* lock to atomically add/del an l2 node when a flow is * added or deleted. diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c index f3f6aa868d6c..402fa32f7a88 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c @@ -29,7 +29,7 @@ int bnxt_dl_register(struct bnxt *bp) if (!pci_find_ext_capability(bp->pdev, PCI_EXT_CAP_ID_SRIOV)) return 0; - if (bp->hwrm_spec_code < 0x10800) { + if (bp->hwrm_spec_code < 0x10803) { netdev_warn(bp->dev, "Firmware does not support SR-IOV E-Switch SWITCHDEV mode.\n"); return -ENOTSUPP; } diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c index a9cb653b4d29..f14edc9c1412 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c @@ -16,6 +16,7 @@ #include #include #include +#include #include "bnxt_hsi.h" #include "bnxt.h" @@ -89,6 +90,23 @@ static void bnxt_tc_parse_vlan(struct bnxt *bp, } } +static int bnxt_tc_parse_tunnel_set(struct bnxt *bp, + struct bnxt_tc_actions *actions, + const struct tc_action *tc_act) +{ + struct ip_tunnel_info *tun_info = tcf_tunnel_info(tc_act); + struct ip_tunnel_key *tun_key = &tun_info->key; + + if (ip_tunnel_info_af(tun_info) != AF_INET) { + netdev_info(bp->dev, "only IPv4 tunnel-encap is supported"); + return -EOPNOTSUPP; + } + + actions->tun_encap_key = *tun_key; + actions->flags |= BNXT_TC_ACTION_FLAG_TUNNEL_ENCAP; + return 0; +} + static int bnxt_tc_parse_actions(struct bnxt *bp, struct bnxt_tc_actions *actions, struct tcf_exts *tc_exts) @@ -123,9 +141,35 @@ static int bnxt_tc_parse_actions(struct bnxt *bp, bnxt_tc_parse_vlan(bp, actions, tc_act); continue; } + + /* Tunnel encap */ + if (is_tcf_tunnel_set(tc_act)) { + rc = bnxt_tc_parse_tunnel_set(bp, actions, tc_act); + if (rc) + return rc; + continue; + } + + /* Tunnel decap */ + if (is_tcf_tunnel_release(tc_act)) { + actions->flags |= BNXT_TC_ACTION_FLAG_TUNNEL_DECAP; + continue; + } } - return 0; + if (rc) + return rc; + + /* Tunnel encap/decap action must be accompanied by a redirect action */ + if ((actions->flags & BNXT_TC_ACTION_FLAG_TUNNEL_ENCAP || + actions->flags & BNXT_TC_ACTION_FLAG_TUNNEL_DECAP) && + !(actions->flags & BNXT_TC_ACTION_FLAG_FWD)) { + netdev_info(bp->dev, + "error: no redir action along with encap/decap"); + return -EINVAL; + } + + return rc; } #define GET_KEY(flow_cmd, key_type) \ @@ -252,6 +296,54 @@ static int bnxt_tc_parse_flow(struct bnxt *bp, flow->l4_mask.icmp.code = mask->code; } + if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_ENC_CONTROL)) { + struct flow_dissector_key_control *key = + GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_ENC_CONTROL); + + addr_type = key->addr_type; + } + + if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS)) { + struct flow_dissector_key_ipv4_addrs *key = + GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS); + struct flow_dissector_key_ipv4_addrs *mask = + GET_MASK(tc_flow_cmd, + FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS); + + flow->flags |= BNXT_TC_FLOW_FLAGS_TUNL_IPV4_ADDRS; + flow->tun_key.u.ipv4.dst = key->dst; + flow->tun_mask.u.ipv4.dst = mask->dst; + flow->tun_key.u.ipv4.src = key->src; + flow->tun_mask.u.ipv4.src = mask->src; + } else if (dissector_uses_key(dissector, + FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS)) { + return -EOPNOTSUPP; + } + + if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_ENC_KEYID)) { + struct flow_dissector_key_keyid *key = + GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_ENC_KEYID); + struct flow_dissector_key_keyid *mask = + GET_MASK(tc_flow_cmd, FLOW_DISSECTOR_KEY_ENC_KEYID); + + flow->flags |= BNXT_TC_FLOW_FLAGS_TUNL_ID; + flow->tun_key.tun_id = key32_to_tunnel_id(key->keyid); + flow->tun_mask.tun_id = key32_to_tunnel_id(mask->keyid); + } + + if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_ENC_PORTS)) { + struct flow_dissector_key_ports *key = + GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_ENC_PORTS); + struct flow_dissector_key_ports *mask = + GET_MASK(tc_flow_cmd, FLOW_DISSECTOR_KEY_ENC_PORTS); + + flow->flags |= BNXT_TC_FLOW_FLAGS_TUNL_PORTS; + flow->tun_key.tp_dst = key->dst; + flow->tun_mask.tp_dst = mask->dst; + flow->tun_key.tp_src = key->src; + flow->tun_mask.tp_src = mask->src; + } + return bnxt_tc_parse_actions(bp, &flow->actions, tc_flow_cmd->exts); } @@ -293,7 +385,8 @@ static bool is_wildcard(void *mask, int len) } static int bnxt_hwrm_cfa_flow_alloc(struct bnxt *bp, struct bnxt_tc_flow *flow, - __le16 ref_flow_handle, __le16 *flow_handle) + __le16 ref_flow_handle, + __le32 tunnel_handle, __le16 *flow_handle) { struct hwrm_cfa_flow_alloc_output *resp = bp->hwrm_cmd_resp_addr; struct bnxt_tc_actions *actions = &flow->actions; @@ -307,6 +400,14 @@ static int bnxt_hwrm_cfa_flow_alloc(struct bnxt *bp, struct bnxt_tc_flow *flow, req.src_fid = cpu_to_le16(flow->src_fid); req.ref_flow_handle = ref_flow_handle; + + if (actions->flags & BNXT_TC_ACTION_FLAG_TUNNEL_DECAP || + actions->flags & BNXT_TC_ACTION_FLAG_TUNNEL_ENCAP) { + req.tunnel_handle = tunnel_handle; + flow_flags |= CFA_FLOW_ALLOC_REQ_FLAGS_TUNNEL; + action_flags |= CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_TUNNEL; + } + req.ethertype = flow->l2_key.ether_type; req.ip_proto = flow->l4_key.ip_proto; @@ -478,6 +579,35 @@ static int bnxt_hwrm_cfa_flow_stats_get(struct bnxt *bp, return rc; } +static int hwrm_cfa_decap_filter_alloc(struct bnxt *bp, + struct bnxt_tc_flow *flow, + struct bnxt_tc_l2_key *l2_info, + __le32 ref_decap_handle, + __le32 *decap_filter_handle) +{ + return 0; +} + +static int hwrm_cfa_decap_filter_free(struct bnxt *bp, + __le32 decap_filter_handle) +{ + return 0; +} + +static int hwrm_cfa_encap_record_alloc(struct bnxt *bp, + struct ip_tunnel_key *encap_key, + struct bnxt_tc_l2_key *l2_info, + __le32 *encap_record_handle) +{ + return 0; +} + +static int hwrm_cfa_encap_record_free(struct bnxt *bp, + __le32 encap_record_handle) +{ + return 0; +} + static int bnxt_tc_put_l2_node(struct bnxt *bp, struct bnxt_tc_flow_node *flow_node) { @@ -519,7 +649,7 @@ bnxt_tc_get_l2_node(struct bnxt *bp, struct rhashtable *l2_table, rc = rhashtable_insert_fast(l2_table, &l2_node->node, ht_params); if (rc) { - kfree(l2_node); + kfree_rcu(l2_node, rcu); netdev_err(bp->dev, "Error: %s: rhashtable_insert_fast: %d", __func__, rc); @@ -588,6 +718,376 @@ static bool bnxt_tc_can_offload(struct bnxt *bp, struct bnxt_tc_flow *flow) return true; } +/* Returns the final refcount of the node on success + * or a -ve error code on failure + */ +static int bnxt_tc_put_tunnel_node(struct bnxt *bp, + struct rhashtable *tunnel_table, + struct rhashtable_params *ht_params, + struct bnxt_tc_tunnel_node *tunnel_node) +{ + int rc; + + if (--tunnel_node->refcount == 0) { + rc = rhashtable_remove_fast(tunnel_table, &tunnel_node->node, + *ht_params); + if (rc) { + netdev_err(bp->dev, "rhashtable_remove_fast rc=%d", rc); + rc = -1; + } + kfree_rcu(tunnel_node, rcu); + return rc; + } else { + return tunnel_node->refcount; + } +} + +/* Get (or add) either encap or decap tunnel node from/to the supplied + * hash table. + */ +static struct bnxt_tc_tunnel_node * +bnxt_tc_get_tunnel_node(struct bnxt *bp, struct rhashtable *tunnel_table, + struct rhashtable_params *ht_params, + struct ip_tunnel_key *tun_key) +{ + struct bnxt_tc_tunnel_node *tunnel_node; + int rc; + + tunnel_node = rhashtable_lookup_fast(tunnel_table, tun_key, *ht_params); + if (!tunnel_node) { + tunnel_node = kzalloc(sizeof(*tunnel_node), GFP_KERNEL); + if (!tunnel_node) { + rc = -ENOMEM; + goto err; + } + + tunnel_node->key = *tun_key; + tunnel_node->tunnel_handle = INVALID_TUNNEL_HANDLE; + rc = rhashtable_insert_fast(tunnel_table, &tunnel_node->node, + *ht_params); + if (rc) { + kfree_rcu(tunnel_node, rcu); + goto err; + } + } + tunnel_node->refcount++; + return tunnel_node; +err: + netdev_info(bp->dev, "error rc=%d", rc); + return NULL; +} + +static int bnxt_tc_get_ref_decap_handle(struct bnxt *bp, + struct bnxt_tc_flow *flow, + struct bnxt_tc_l2_key *l2_key, + struct bnxt_tc_flow_node *flow_node, + __le32 *ref_decap_handle) +{ + struct bnxt_tc_info *tc_info = &bp->tc_info; + struct bnxt_tc_flow_node *ref_flow_node; + struct bnxt_tc_l2_node *decap_l2_node; + + decap_l2_node = bnxt_tc_get_l2_node(bp, &tc_info->decap_l2_table, + tc_info->decap_l2_ht_params, + l2_key); + if (!decap_l2_node) + return -1; + + /* If any other flow is using this decap_l2_node, use it's decap_handle + * as the ref_decap_handle + */ + if (decap_l2_node->refcount > 0) { + ref_flow_node = + list_first_entry(&decap_l2_node->common_l2_flows, + struct bnxt_tc_flow_node, + decap_l2_list_node); + *ref_decap_handle = ref_flow_node->decap_node->tunnel_handle; + } else { + *ref_decap_handle = INVALID_TUNNEL_HANDLE; + } + + /* Insert the l2_node into the flow_node so that subsequent flows + * with a matching decap l2 key can use the decap_filter_handle of + * this flow as their ref_decap_handle + */ + flow_node->decap_l2_node = decap_l2_node; + list_add(&flow_node->decap_l2_list_node, + &decap_l2_node->common_l2_flows); + decap_l2_node->refcount++; + return 0; +} + +static void bnxt_tc_put_decap_l2_node(struct bnxt *bp, + struct bnxt_tc_flow_node *flow_node) +{ + struct bnxt_tc_l2_node *decap_l2_node = flow_node->decap_l2_node; + struct bnxt_tc_info *tc_info = &bp->tc_info; + int rc; + + /* remove flow_node from the decap L2 sharing flow list */ + list_del(&flow_node->decap_l2_list_node); + if (--decap_l2_node->refcount == 0) { + rc = rhashtable_remove_fast(&tc_info->decap_l2_table, + &decap_l2_node->node, + tc_info->decap_l2_ht_params); + if (rc) + netdev_err(bp->dev, "rhashtable_remove_fast rc=%d", rc); + kfree_rcu(decap_l2_node, rcu); + } +} + +static void bnxt_tc_put_decap_handle(struct bnxt *bp, + struct bnxt_tc_flow_node *flow_node) +{ + __le32 decap_handle = flow_node->decap_node->tunnel_handle; + struct bnxt_tc_info *tc_info = &bp->tc_info; + int rc; + + if (flow_node->decap_l2_node) + bnxt_tc_put_decap_l2_node(bp, flow_node); + + rc = bnxt_tc_put_tunnel_node(bp, &tc_info->decap_table, + &tc_info->decap_ht_params, + flow_node->decap_node); + if (!rc && decap_handle != INVALID_TUNNEL_HANDLE) + hwrm_cfa_decap_filter_free(bp, decap_handle); +} + +static int bnxt_tc_resolve_tunnel_hdrs(struct bnxt *bp, + struct ip_tunnel_key *tun_key, + struct bnxt_tc_l2_key *l2_info, + struct net_device *real_dst_dev) +{ + struct flowi4 flow = { {0} }; + struct net_device *dst_dev; + struct neighbour *nbr; + struct rtable *rt; + int rc; + + flow.flowi4_proto = IPPROTO_UDP; + flow.fl4_dport = tun_key->tp_dst; + flow.daddr = tun_key->u.ipv4.dst; + + rt = ip_route_output_key(dev_net(real_dst_dev), &flow); + if (IS_ERR(rt)) { + netdev_info(bp->dev, "no route to %pI4b", &flow.daddr); + return -EOPNOTSUPP; + } + + /* The route must either point to the real_dst_dev or a dst_dev that + * uses the real_dst_dev. + */ + dst_dev = rt->dst.dev; + if (is_vlan_dev(dst_dev)) { + struct vlan_dev_priv *vlan = vlan_dev_priv(dst_dev); + + if (vlan->real_dev != real_dst_dev) { + netdev_info(bp->dev, + "dst_dev(%s) doesn't use PF-if(%s)", + netdev_name(dst_dev), + netdev_name(real_dst_dev)); + rc = -EOPNOTSUPP; + goto put_rt; + } + l2_info->inner_vlan_tci = htons(vlan->vlan_id); + l2_info->inner_vlan_tpid = vlan->vlan_proto; + l2_info->num_vlans = 1; + } else if (dst_dev != real_dst_dev) { + netdev_info(bp->dev, + "dst_dev(%s) for %pI4b is not PF-if(%s)", + netdev_name(dst_dev), &flow.daddr, + netdev_name(real_dst_dev)); + rc = -EOPNOTSUPP; + goto put_rt; + } + + nbr = dst_neigh_lookup(&rt->dst, &flow.daddr); + if (!nbr) { + netdev_info(bp->dev, "can't lookup neighbor for %pI4b", + &flow.daddr); + rc = -EOPNOTSUPP; + goto put_rt; + } + + tun_key->u.ipv4.src = flow.saddr; + tun_key->ttl = ip4_dst_hoplimit(&rt->dst); + neigh_ha_snapshot(l2_info->dmac, nbr, dst_dev); + ether_addr_copy(l2_info->smac, dst_dev->dev_addr); + neigh_release(nbr); + ip_rt_put(rt); + + return 0; +put_rt: + ip_rt_put(rt); + return rc; +} + +static int bnxt_tc_get_decap_handle(struct bnxt *bp, struct bnxt_tc_flow *flow, + struct bnxt_tc_flow_node *flow_node, + __le32 *decap_filter_handle) +{ + struct ip_tunnel_key *decap_key = &flow->tun_key; + struct bnxt_tc_info *tc_info = &bp->tc_info; + struct bnxt_tc_l2_key l2_info = { {0} }; + struct bnxt_tc_tunnel_node *decap_node; + struct ip_tunnel_key tun_key = { 0 }; + struct bnxt_tc_l2_key *decap_l2_info; + __le32 ref_decap_handle; + int rc; + + /* Check if there's another flow using the same tunnel decap. + * If not, add this tunnel to the table and resolve the other + * tunnel header fileds + */ + decap_node = bnxt_tc_get_tunnel_node(bp, &tc_info->decap_table, + &tc_info->decap_ht_params, + decap_key); + if (!decap_node) + return -ENOMEM; + + flow_node->decap_node = decap_node; + + if (decap_node->tunnel_handle != INVALID_TUNNEL_HANDLE) + goto done; + + /* Resolve the L2 fields for tunnel decap + * Resolve the route for remote vtep (saddr) of the decap key + * Find it's next-hop mac addrs + */ + tun_key.u.ipv4.dst = flow->tun_key.u.ipv4.src; + tun_key.tp_dst = flow->tun_key.tp_dst; + rc = bnxt_tc_resolve_tunnel_hdrs(bp, &tun_key, &l2_info, bp->dev); + if (rc) + goto put_decap; + + decap_key->ttl = tun_key.ttl; + decap_l2_info = &decap_node->l2_info; + ether_addr_copy(decap_l2_info->dmac, l2_info.smac); + ether_addr_copy(decap_l2_info->smac, l2_info.dmac); + if (l2_info.num_vlans) { + decap_l2_info->num_vlans = l2_info.num_vlans; + decap_l2_info->inner_vlan_tpid = l2_info.inner_vlan_tpid; + decap_l2_info->inner_vlan_tci = l2_info.inner_vlan_tci; + } + flow->flags |= BNXT_TC_FLOW_FLAGS_TUNL_ETH_ADDRS; + + /* For getting a decap_filter_handle we first need to check if + * there are any other decap flows that share the same tunnel L2 + * key and if so, pass that flow's decap_filter_handle as the + * ref_decap_handle for this flow. + */ + rc = bnxt_tc_get_ref_decap_handle(bp, flow, decap_l2_info, flow_node, + &ref_decap_handle); + if (rc) + goto put_decap; + + /* Issue the hwrm cmd to allocate a decap filter handle */ + rc = hwrm_cfa_decap_filter_alloc(bp, flow, decap_l2_info, + ref_decap_handle, + &decap_node->tunnel_handle); + if (rc) + goto put_decap_l2; + +done: + *decap_filter_handle = decap_node->tunnel_handle; + return 0; + +put_decap_l2: + bnxt_tc_put_decap_l2_node(bp, flow_node); +put_decap: + bnxt_tc_put_tunnel_node(bp, &tc_info->decap_table, + &tc_info->decap_ht_params, + flow_node->decap_node); + return rc; +} + +static void bnxt_tc_put_encap_handle(struct bnxt *bp, + struct bnxt_tc_tunnel_node *encap_node) +{ + __le32 encap_handle = encap_node->tunnel_handle; + struct bnxt_tc_info *tc_info = &bp->tc_info; + int rc; + + rc = bnxt_tc_put_tunnel_node(bp, &tc_info->encap_table, + &tc_info->encap_ht_params, encap_node); + if (!rc && encap_handle != INVALID_TUNNEL_HANDLE) + hwrm_cfa_encap_record_free(bp, encap_handle); +} + +/* Lookup the tunnel encap table and check if there's an encap_handle + * alloc'd already. + * If not, query L2 info via a route lookup and issue an encap_record_alloc + * cmd to FW. + */ +static int bnxt_tc_get_encap_handle(struct bnxt *bp, struct bnxt_tc_flow *flow, + struct bnxt_tc_flow_node *flow_node, + __le32 *encap_handle) +{ + struct ip_tunnel_key *encap_key = &flow->actions.tun_encap_key; + struct bnxt_tc_info *tc_info = &bp->tc_info; + struct bnxt_tc_tunnel_node *encap_node; + int rc; + + /* Check if there's another flow using the same tunnel encap. + * If not, add this tunnel to the table and resolve the other + * tunnel header fileds + */ + encap_node = bnxt_tc_get_tunnel_node(bp, &tc_info->encap_table, + &tc_info->encap_ht_params, + encap_key); + if (!encap_node) + return -ENOMEM; + + flow_node->encap_node = encap_node; + + if (encap_node->tunnel_handle != INVALID_TUNNEL_HANDLE) + goto done; + + rc = bnxt_tc_resolve_tunnel_hdrs(bp, encap_key, &encap_node->l2_info, + flow->actions.dst_dev); + if (rc) + goto put_encap; + + /* Allocate a new tunnel encap record */ + rc = hwrm_cfa_encap_record_alloc(bp, encap_key, &encap_node->l2_info, + &encap_node->tunnel_handle); + if (rc) + goto put_encap; + +done: + *encap_handle = encap_node->tunnel_handle; + return 0; + +put_encap: + bnxt_tc_put_tunnel_node(bp, &tc_info->encap_table, + &tc_info->encap_ht_params, encap_node); + return rc; +} + +static void bnxt_tc_put_tunnel_handle(struct bnxt *bp, + struct bnxt_tc_flow *flow, + struct bnxt_tc_flow_node *flow_node) +{ + if (flow->actions.flags & BNXT_TC_ACTION_FLAG_TUNNEL_DECAP) + bnxt_tc_put_decap_handle(bp, flow_node); + else if (flow->actions.flags & BNXT_TC_ACTION_FLAG_TUNNEL_ENCAP) + bnxt_tc_put_encap_handle(bp, flow_node->encap_node); +} + +static int bnxt_tc_get_tunnel_handle(struct bnxt *bp, + struct bnxt_tc_flow *flow, + struct bnxt_tc_flow_node *flow_node, + __le32 *tunnel_handle) +{ + if (flow->actions.flags & BNXT_TC_ACTION_FLAG_TUNNEL_DECAP) + return bnxt_tc_get_decap_handle(bp, flow, flow_node, + tunnel_handle); + else if (flow->actions.flags & BNXT_TC_ACTION_FLAG_TUNNEL_ENCAP) + return bnxt_tc_get_encap_handle(bp, flow, flow_node, + tunnel_handle); + else + return 0; +} static int __bnxt_tc_del_flow(struct bnxt *bp, struct bnxt_tc_flow_node *flow_node) { @@ -599,6 +1099,9 @@ static int __bnxt_tc_del_flow(struct bnxt *bp, mutex_lock(&tc_info->lock); + /* release references to any tunnel encap/decap nodes */ + bnxt_tc_put_tunnel_handle(bp, &flow_node->flow, flow_node); + /* release reference to l2 node */ bnxt_tc_put_l2_node(bp, flow_node); @@ -633,6 +1136,7 @@ static int bnxt_tc_add_flow(struct bnxt *bp, u16 src_fid, struct bnxt_tc_flow_node *new_node, *old_node; struct bnxt_tc_info *tc_info = &bp->tc_info; struct bnxt_tc_flow *flow; + __le32 tunnel_handle = 0; __le16 ref_flow_handle; int rc; @@ -670,11 +1174,16 @@ static int bnxt_tc_add_flow(struct bnxt *bp, u16 src_fid, if (rc) goto unlock; + /* If the flow involves tunnel encap/decap, get tunnel_handle */ + rc = bnxt_tc_get_tunnel_handle(bp, flow, new_node, &tunnel_handle); + if (rc) + goto put_l2; + /* send HWRM cmd to alloc the flow */ rc = bnxt_hwrm_cfa_flow_alloc(bp, flow, ref_flow_handle, - &new_node->flow_handle); + tunnel_handle, &new_node->flow_handle); if (rc) - goto put_l2; + goto put_tunnel; /* add new flow to flow-table */ rc = rhashtable_insert_fast(&tc_info->flow_table, &new_node->node, @@ -687,12 +1196,14 @@ static int bnxt_tc_add_flow(struct bnxt *bp, u16 src_fid, hwrm_flow_free: bnxt_hwrm_cfa_flow_free(bp, new_node->flow_handle); +put_tunnel: + bnxt_tc_put_tunnel_handle(bp, flow, new_node); put_l2: bnxt_tc_put_l2_node(bp, new_node); unlock: mutex_unlock(&tc_info->lock); free_node: - kfree(new_node); + kfree_rcu(new_node, rcu); done: netdev_err(bp->dev, "Error: %s: cookie=0x%lx error=%d", __func__, tc_flow_cmd->cookie, rc); @@ -781,6 +1292,20 @@ static const struct rhashtable_params bnxt_tc_l2_ht_params = { .automatic_shrinking = true }; +static const struct rhashtable_params bnxt_tc_decap_l2_ht_params = { + .head_offset = offsetof(struct bnxt_tc_l2_node, node), + .key_offset = offsetof(struct bnxt_tc_l2_node, key), + .key_len = BNXT_TC_L2_KEY_LEN, + .automatic_shrinking = true +}; + +static const struct rhashtable_params bnxt_tc_tunnel_ht_params = { + .head_offset = offsetof(struct bnxt_tc_tunnel_node, node), + .key_offset = offsetof(struct bnxt_tc_tunnel_node, key), + .key_len = sizeof(struct ip_tunnel_key), + .automatic_shrinking = true +}; + /* convert counter width in bits to a mask */ #define mask(width) ((u64)~0 >> (64 - (width))) @@ -789,7 +1314,7 @@ int bnxt_init_tc(struct bnxt *bp) struct bnxt_tc_info *tc_info = &bp->tc_info; int rc; - if (bp->hwrm_spec_code < 0x10800) { + if (bp->hwrm_spec_code < 0x10803) { netdev_warn(bp->dev, "Firmware does not support TC flower offload.\n"); return -ENOTSUPP; @@ -810,11 +1335,35 @@ int bnxt_init_tc(struct bnxt *bp) if (rc) goto destroy_flow_table; + tc_info->decap_l2_ht_params = bnxt_tc_decap_l2_ht_params; + rc = rhashtable_init(&tc_info->decap_l2_table, + &tc_info->decap_l2_ht_params); + if (rc) + goto destroy_l2_table; + + tc_info->decap_ht_params = bnxt_tc_tunnel_ht_params; + rc = rhashtable_init(&tc_info->decap_table, + &tc_info->decap_ht_params); + if (rc) + goto destroy_decap_l2_table; + + tc_info->encap_ht_params = bnxt_tc_tunnel_ht_params; + rc = rhashtable_init(&tc_info->encap_table, + &tc_info->encap_ht_params); + if (rc) + goto destroy_decap_table; + tc_info->enabled = true; bp->dev->hw_features |= NETIF_F_HW_TC; bp->dev->features |= NETIF_F_HW_TC; return 0; +destroy_decap_table: + rhashtable_destroy(&tc_info->decap_table); +destroy_decap_l2_table: + rhashtable_destroy(&tc_info->decap_l2_table); +destroy_l2_table: + rhashtable_destroy(&tc_info->l2_table); destroy_flow_table: rhashtable_destroy(&tc_info->flow_table); return rc; @@ -829,4 +1378,7 @@ void bnxt_shutdown_tc(struct bnxt *bp) rhashtable_destroy(&tc_info->flow_table); rhashtable_destroy(&tc_info->l2_table); + rhashtable_destroy(&tc_info->decap_l2_table); + rhashtable_destroy(&tc_info->decap_table); + rhashtable_destroy(&tc_info->encap_table); } diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.h index 6c4c1ed279ef..2beccd41c886 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.h @@ -12,6 +12,8 @@ #ifdef CONFIG_BNXT_FLOWER_OFFLOAD +#include + /* Structs used for storing the filter/actions of the TC cmd. */ struct bnxt_tc_l2_key { @@ -50,6 +52,13 @@ struct bnxt_tc_l4_key { }; }; +struct bnxt_tc_tunnel_key { + struct bnxt_tc_l2_key l2; + struct bnxt_tc_l3_key l3; + struct bnxt_tc_l4_key l4; + __be32 id; +}; + struct bnxt_tc_actions { u32 flags; #define BNXT_TC_ACTION_FLAG_FWD BIT(0) @@ -57,11 +66,16 @@ struct bnxt_tc_actions { #define BNXT_TC_ACTION_FLAG_PUSH_VLAN BIT(3) #define BNXT_TC_ACTION_FLAG_POP_VLAN BIT(4) #define BNXT_TC_ACTION_FLAG_DROP BIT(5) +#define BNXT_TC_ACTION_FLAG_TUNNEL_ENCAP BIT(6) +#define BNXT_TC_ACTION_FLAG_TUNNEL_DECAP BIT(7) u16 dst_fid; struct net_device *dst_dev; __be16 push_vlan_tpid; __be16 push_vlan_tci; + + /* tunnel encap */ + struct ip_tunnel_key tun_encap_key; }; struct bnxt_tc_flow_stats { @@ -76,6 +90,16 @@ struct bnxt_tc_flow { #define BNXT_TC_FLOW_FLAGS_IPV6_ADDRS BIT(3) #define BNXT_TC_FLOW_FLAGS_PORTS BIT(4) #define BNXT_TC_FLOW_FLAGS_ICMP BIT(5) +#define BNXT_TC_FLOW_FLAGS_TUNL_ETH_ADDRS BIT(6) +#define BNXT_TC_FLOW_FLAGS_TUNL_IPV4_ADDRS BIT(7) +#define BNXT_TC_FLOW_FLAGS_TUNL_IPV6_ADDRS BIT(8) +#define BNXT_TC_FLOW_FLAGS_TUNL_PORTS BIT(9) +#define BNXT_TC_FLOW_FLAGS_TUNL_ID BIT(10) +#define BNXT_TC_FLOW_FLAGS_TUNNEL (BNXT_TC_FLOW_FLAGS_TUNL_ETH_ADDRS | \ + BNXT_TC_FLOW_FLAGS_TUNL_IPV4_ADDRS | \ + BNXT_TC_FLOW_FLAGS_TUNL_IPV6_ADDRS |\ + BNXT_TC_FLOW_FLAGS_TUNL_PORTS |\ + BNXT_TC_FLOW_FLAGS_TUNL_ID) /* flow applicable to pkts ingressing on this fid */ u16 src_fid; @@ -85,6 +109,8 @@ struct bnxt_tc_flow { struct bnxt_tc_l3_key l3_mask; struct bnxt_tc_l4_key l4_key; struct bnxt_tc_l4_key l4_mask; + struct ip_tunnel_key tun_key; + struct ip_tunnel_key tun_mask; struct bnxt_tc_actions actions; @@ -95,11 +121,33 @@ struct bnxt_tc_flow { unsigned long lastused; /* jiffies */ }; +/* Tunnel encap/decap hash table + * This table is used to maintain a list of flows that use + * the same tunnel encap/decap params (ip_daddrs, vni, udp_dport) + * and the FW returned handle. + * A separate table is maintained for encap and decap + */ +struct bnxt_tc_tunnel_node { + struct ip_tunnel_key key; + struct rhash_head node; + + /* tunnel l2 info */ + struct bnxt_tc_l2_key l2_info; + +#define INVALID_TUNNEL_HANDLE cpu_to_le32(0xffffffff) + /* tunnel handle returned by FW */ + __le32 tunnel_handle; + + u32 refcount; + struct rcu_head rcu; +}; + /* L2 hash table - * This data-struct is used for L2-flow table. - * The L2 part of a flow is stored in a hash table. + * The same data-struct is used for L2-flow table and L2-tunnel table. + * The L2 part of a flow or tunnel is stored in a hash table. * A flow that shares the same L2 key/mask with an - * already existing flow must refer to it's flow handle. + * already existing flow/tunnel must refer to it's flow handle or + * decap_filter_id respectively. */ struct bnxt_tc_l2_node { /* hash key: first 16b of key */ @@ -110,7 +158,7 @@ struct bnxt_tc_l2_node { /* a linked list of flows that share the same l2 key */ struct list_head common_l2_flows; - /* number of flows sharing the l2 key */ + /* number of flows/tunnels sharing the l2 key */ u16 refcount; struct rcu_head rcu; @@ -130,6 +178,16 @@ struct bnxt_tc_flow_node { /* for the shared_flows list maintained in l2_node */ struct list_head l2_list_node; + /* tunnel encap related */ + struct bnxt_tc_tunnel_node *encap_node; + + /* tunnel decap related */ + struct bnxt_tc_tunnel_node *decap_node; + /* L2 node in tunnel-l2 hashtable that shares flow's tunnel l2 key */ + struct bnxt_tc_l2_node *decap_l2_node; + /* for the shared_flows list maintained in tunnel decap l2_node */ + struct list_head decap_l2_list_node; + struct rcu_head rcu; }; -- cgit v1.2.3 From f484f6782e013138946122ae09c100c9e4b547e3 Mon Sep 17 00:00:00 2001 From: Sathya Perla Date: Thu, 26 Oct 2017 11:51:30 -0400 Subject: bnxt_en: add hwrm FW cmds for cfa_encap_record and decap_filter Add routines for issuing the hwrm_cfa_encap_record_alloc/free and hwrm_cfa_decap_filter_alloc/free FW cmds needed for supporting vxlan encap/decap offload. Signed-off-by: Sathya Perla Signed-off-by: Michael Chan Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c | 129 ++++++++++++++++++++++++++- 1 file changed, 125 insertions(+), 4 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c index f14edc9c1412..0d258d303eef 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c @@ -585,13 +585,85 @@ static int hwrm_cfa_decap_filter_alloc(struct bnxt *bp, __le32 ref_decap_handle, __le32 *decap_filter_handle) { - return 0; + struct hwrm_cfa_decap_filter_alloc_output *resp = + bp->hwrm_cmd_resp_addr; + struct hwrm_cfa_decap_filter_alloc_input req = { 0 }; + struct ip_tunnel_key *tun_key = &flow->tun_key; + u32 enables = 0; + int rc; + + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_DECAP_FILTER_ALLOC, -1, -1); + + req.flags = cpu_to_le32(CFA_DECAP_FILTER_ALLOC_REQ_FLAGS_OVS_TUNNEL); + enables |= CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE | + CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_IP_PROTOCOL; + req.tunnel_type = CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN; + req.ip_protocol = CFA_DECAP_FILTER_ALLOC_REQ_IP_PROTOCOL_UDP; + + if (flow->flags & BNXT_TC_FLOW_FLAGS_TUNL_ID) { + enables |= CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_TUNNEL_ID; + /* tunnel_id is wrongly defined in hsi defn. as __le32 */ + req.tunnel_id = tunnel_id_to_key32(tun_key->tun_id); + } + + if (flow->flags & BNXT_TC_FLOW_FLAGS_TUNL_ETH_ADDRS) { + enables |= CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_DST_MACADDR | + CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_SRC_MACADDR; + ether_addr_copy(req.dst_macaddr, l2_info->dmac); + ether_addr_copy(req.src_macaddr, l2_info->smac); + } + if (l2_info->num_vlans) { + enables |= CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_T_IVLAN_VID; + req.t_ivlan_vid = l2_info->inner_vlan_tci; + } + + enables |= CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_ETHERTYPE; + req.ethertype = htons(ETH_P_IP); + + if (flow->flags & BNXT_TC_FLOW_FLAGS_TUNL_IPV4_ADDRS) { + enables |= CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR | + CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR | + CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_IPADDR_TYPE; + req.ip_addr_type = CFA_DECAP_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV4; + req.dst_ipaddr[0] = tun_key->u.ipv4.dst; + req.src_ipaddr[0] = tun_key->u.ipv4.src; + } + + if (flow->flags & BNXT_TC_FLOW_FLAGS_TUNL_PORTS) { + enables |= CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_DST_PORT; + req.dst_port = tun_key->tp_dst; + } + + /* Eventhough the decap_handle returned by hwrm_cfa_decap_filter_alloc + * is defined as __le32, l2_ctxt_ref_id is defined in HSI as __le16. + */ + req.l2_ctxt_ref_id = (__force __le16)ref_decap_handle; + req.enables = cpu_to_le32(enables); + + mutex_lock(&bp->hwrm_cmd_lock); + rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + if (!rc) + *decap_filter_handle = resp->decap_filter_id; + else + netdev_info(bp->dev, "%s: Error rc=%d", __func__, rc); + mutex_unlock(&bp->hwrm_cmd_lock); + + return rc; } static int hwrm_cfa_decap_filter_free(struct bnxt *bp, __le32 decap_filter_handle) { - return 0; + struct hwrm_cfa_decap_filter_free_input req = { 0 }; + int rc; + + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_DECAP_FILTER_FREE, -1, -1); + req.decap_filter_id = decap_filter_handle; + + rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + if (rc) + netdev_info(bp->dev, "%s: Error rc=%d", __func__, rc); + return rc; } static int hwrm_cfa_encap_record_alloc(struct bnxt *bp, @@ -599,13 +671,62 @@ static int hwrm_cfa_encap_record_alloc(struct bnxt *bp, struct bnxt_tc_l2_key *l2_info, __le32 *encap_record_handle) { - return 0; + struct hwrm_cfa_encap_record_alloc_output *resp = + bp->hwrm_cmd_resp_addr; + struct hwrm_cfa_encap_record_alloc_input req = { 0 }; + struct hwrm_cfa_encap_data_vxlan *encap = + (struct hwrm_cfa_encap_data_vxlan *)&req.encap_data; + struct hwrm_vxlan_ipv4_hdr *encap_ipv4 = + (struct hwrm_vxlan_ipv4_hdr *)encap->l3; + int rc; + + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_ENCAP_RECORD_ALLOC, -1, -1); + + req.encap_type = CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_VXLAN; + + ether_addr_copy(encap->dst_mac_addr, l2_info->dmac); + ether_addr_copy(encap->src_mac_addr, l2_info->smac); + if (l2_info->num_vlans) { + encap->num_vlan_tags = l2_info->num_vlans; + encap->ovlan_tci = l2_info->inner_vlan_tci; + encap->ovlan_tpid = l2_info->inner_vlan_tpid; + } + + encap_ipv4->ver_hlen = 4 << VXLAN_IPV4_HDR_VER_HLEN_VERSION_SFT; + encap_ipv4->ver_hlen |= 5 << VXLAN_IPV4_HDR_VER_HLEN_HEADER_LENGTH_SFT; + encap_ipv4->ttl = encap_key->ttl; + + encap_ipv4->dest_ip_addr = encap_key->u.ipv4.dst; + encap_ipv4->src_ip_addr = encap_key->u.ipv4.src; + encap_ipv4->protocol = IPPROTO_UDP; + + encap->dst_port = encap_key->tp_dst; + encap->vni = tunnel_id_to_key32(encap_key->tun_id); + + mutex_lock(&bp->hwrm_cmd_lock); + rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + if (!rc) + *encap_record_handle = resp->encap_record_id; + else + netdev_info(bp->dev, "%s: Error rc=%d", __func__, rc); + mutex_unlock(&bp->hwrm_cmd_lock); + + return rc; } static int hwrm_cfa_encap_record_free(struct bnxt *bp, __le32 encap_record_handle) { - return 0; + struct hwrm_cfa_encap_record_free_input req = { 0 }; + int rc; + + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_ENCAP_RECORD_FREE, -1, -1); + req.encap_record_id = encap_record_handle; + + rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + if (rc) + netdev_info(bp->dev, "%s: Error rc=%d", __func__, rc); + return rc; } static int bnxt_tc_put_l2_node(struct bnxt *bp, -- cgit v1.2.3 From 5a84acbebb22f93dfc9ce1e5f0427c45c94acb33 Mon Sep 17 00:00:00 2001 From: Sathya Perla Date: Thu, 26 Oct 2017 11:51:31 -0400 Subject: bnxt_en: query cfa flow stats periodically to compute 'lastused' attribute This patch implements periodic querying of cfa flow stats in batches to compute the 'lastused' attribute of TC flow stats. Signed-off-by: Sathya Perla Signed-off-by: Michael Chan Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnxt/bnxt.c | 9 + drivers/net/ethernet/broadcom/bnxt/bnxt.h | 14 ++ drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c | 256 ++++++++++++++++++--------- drivers/net/ethernet/broadcom/bnxt/bnxt_tc.h | 24 ++- 4 files changed, 218 insertions(+), 85 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index c76729122143..bbf6da389f86 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -6979,6 +6979,11 @@ static void bnxt_timer(unsigned long data) set_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event); bnxt_queue_sp_work(bp); } + + if (bnxt_tc_flower_enabled(bp)) { + set_bit(BNXT_FLOW_STATS_SP_EVENT, &bp->sp_event); + bnxt_queue_sp_work(bp); + } bnxt_restart_timer: mod_timer(&bp->timer, jiffies + bp->current_interval); } @@ -7069,6 +7074,10 @@ static void bnxt_sp_task(struct work_struct *work) bnxt_get_port_module_status(bp); mutex_unlock(&bp->link_lock); } + + if (test_and_clear_bit(BNXT_FLOW_STATS_SP_EVENT, &bp->sp_event)) + bnxt_tc_flow_stats_work(bp); + /* These functions below will clear BNXT_STATE_IN_SP_TASK. They * must be the last functions to be called before exiting. */ diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h index d88d864db7d4..b8343ee4182c 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h @@ -955,6 +955,11 @@ struct bnxt_coal { u8 budget; }; +struct bnxt_tc_flow_stats { + u64 packets; + u64 bytes; +}; + struct bnxt_tc_info { bool enabled; @@ -980,6 +985,14 @@ struct bnxt_tc_info { */ struct mutex lock; + /* Fields used for batching stats query */ + struct rhashtable_iter iter; +#define BNXT_FLOW_STATS_BATCH_MAX 10 + struct bnxt_tc_stats_batch { + void *flow_node; + struct bnxt_tc_flow_stats hw_stats; + } stats_batch[BNXT_FLOW_STATS_BATCH_MAX]; + /* Stat counter mask (width) */ u64 bytes_mask; u64 packets_mask; @@ -1282,6 +1295,7 @@ struct bnxt { #define BNXT_GENEVE_ADD_PORT_SP_EVENT 12 #define BNXT_GENEVE_DEL_PORT_SP_EVENT 13 #define BNXT_LINK_SPEED_CHNG_SP_EVENT 14 +#define BNXT_FLOW_STATS_SP_EVENT 15 struct bnxt_pf_info pf; #ifdef CONFIG_BNXT_SRIOV diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c index 0d258d303eef..71828a5beefe 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c @@ -504,81 +504,6 @@ static int bnxt_hwrm_cfa_flow_alloc(struct bnxt *bp, struct bnxt_tc_flow *flow, return rc; } -/* Add val to accum while handling a possible wraparound - * of val. Eventhough val is of type u64, its actual width - * is denoted by mask and will wrap-around beyond that width. - */ -static void accumulate_val(u64 *accum, u64 val, u64 mask) -{ -#define low_bits(x, mask) ((x) & (mask)) -#define high_bits(x, mask) ((x) & ~(mask)) - bool wrapped = val < low_bits(*accum, mask); - - *accum = high_bits(*accum, mask) + val; - if (wrapped) - *accum += (mask + 1); -} - -/* The HW counters' width is much less than 64bits. - * Handle possible wrap-around while updating the stat counters - */ -static void bnxt_flow_stats_fix_wraparound(struct bnxt_tc_info *tc_info, - struct bnxt_tc_flow_stats *stats, - struct bnxt_tc_flow_stats *hw_stats) -{ - accumulate_val(&stats->bytes, hw_stats->bytes, tc_info->bytes_mask); - accumulate_val(&stats->packets, hw_stats->packets, - tc_info->packets_mask); -} - -/* Fix possible wraparound of the stats queried from HW, calculate - * the delta from prev_stats, and also update the prev_stats. - * The HW flow stats are fetched under the hwrm_cmd_lock mutex. - * This routine is best called while under the mutex so that the - * stats processing happens atomically. - */ -static void bnxt_flow_stats_calc(struct bnxt_tc_info *tc_info, - struct bnxt_tc_flow *flow, - struct bnxt_tc_flow_stats *stats) -{ - struct bnxt_tc_flow_stats *acc_stats, *prev_stats; - - acc_stats = &flow->stats; - bnxt_flow_stats_fix_wraparound(tc_info, acc_stats, stats); - - prev_stats = &flow->prev_stats; - stats->bytes = acc_stats->bytes - prev_stats->bytes; - stats->packets = acc_stats->packets - prev_stats->packets; - *prev_stats = *acc_stats; -} - -static int bnxt_hwrm_cfa_flow_stats_get(struct bnxt *bp, - __le16 flow_handle, - struct bnxt_tc_flow *flow, - struct bnxt_tc_flow_stats *stats) -{ - struct hwrm_cfa_flow_stats_output *resp = bp->hwrm_cmd_resp_addr; - struct hwrm_cfa_flow_stats_input req = { 0 }; - int rc; - - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_FLOW_STATS, -1, -1); - req.num_flows = cpu_to_le16(1); - req.flow_handle_0 = flow_handle; - - mutex_lock(&bp->hwrm_cmd_lock); - rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); - if (!rc) { - stats->packets = le64_to_cpu(resp->packet_0); - stats->bytes = le64_to_cpu(resp->byte_0); - bnxt_flow_stats_calc(&bp->tc_info, flow, stats); - } else { - netdev_info(bp->dev, "error rc=%d", rc); - } - - mutex_unlock(&bp->hwrm_cmd_lock); - return rc; -} - static int hwrm_cfa_decap_filter_alloc(struct bnxt *bp, struct bnxt_tc_flow *flow, struct bnxt_tc_l2_key *l2_info, @@ -1306,6 +1231,8 @@ static int bnxt_tc_add_flow(struct bnxt *bp, u16 src_fid, if (rc) goto put_tunnel; + flow->lastused = jiffies; + spin_lock_init(&flow->stats_lock); /* add new flow to flow-table */ rc = rhashtable_insert_fast(&tc_info->flow_table, &new_node->node, tc_info->flow_ht_params); @@ -1352,10 +1279,11 @@ static int bnxt_tc_del_flow(struct bnxt *bp, static int bnxt_tc_get_flow_stats(struct bnxt *bp, struct tc_cls_flower_offload *tc_flow_cmd) { + struct bnxt_tc_flow_stats stats, *curr_stats, *prev_stats; struct bnxt_tc_info *tc_info = &bp->tc_info; struct bnxt_tc_flow_node *flow_node; - struct bnxt_tc_flow_stats stats; - int rc; + struct bnxt_tc_flow *flow; + unsigned long lastused; flow_node = rhashtable_lookup_fast(&tc_info->flow_table, &tc_flow_cmd->cookie, @@ -1366,15 +1294,183 @@ static int bnxt_tc_get_flow_stats(struct bnxt *bp, return -1; } - rc = bnxt_hwrm_cfa_flow_stats_get(bp, flow_node->flow_handle, - &flow_node->flow, &stats); + flow = &flow_node->flow; + curr_stats = &flow->stats; + prev_stats = &flow->prev_stats; + + spin_lock(&flow->stats_lock); + stats.packets = curr_stats->packets - prev_stats->packets; + stats.bytes = curr_stats->bytes - prev_stats->bytes; + *prev_stats = *curr_stats; + lastused = flow->lastused; + spin_unlock(&flow->stats_lock); + + tcf_exts_stats_update(tc_flow_cmd->exts, stats.bytes, stats.packets, + lastused); + return 0; +} + +static int +bnxt_hwrm_cfa_flow_stats_get(struct bnxt *bp, int num_flows, + struct bnxt_tc_stats_batch stats_batch[]) +{ + struct hwrm_cfa_flow_stats_output *resp = bp->hwrm_cmd_resp_addr; + struct hwrm_cfa_flow_stats_input req = { 0 }; + __le16 *req_flow_handles = &req.flow_handle_0; + int rc, i; + + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_FLOW_STATS, -1, -1); + req.num_flows = cpu_to_le16(num_flows); + for (i = 0; i < num_flows; i++) { + struct bnxt_tc_flow_node *flow_node = stats_batch[i].flow_node; + + req_flow_handles[i] = flow_node->flow_handle; + } + + mutex_lock(&bp->hwrm_cmd_lock); + rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + if (!rc) { + __le64 *resp_packets = &resp->packet_0; + __le64 *resp_bytes = &resp->byte_0; + + for (i = 0; i < num_flows; i++) { + stats_batch[i].hw_stats.packets = + le64_to_cpu(resp_packets[i]); + stats_batch[i].hw_stats.bytes = + le64_to_cpu(resp_bytes[i]); + } + } else { + netdev_info(bp->dev, "error rc=%d", rc); + } + + mutex_unlock(&bp->hwrm_cmd_lock); + return rc; +} + +/* Add val to accum while handling a possible wraparound + * of val. Eventhough val is of type u64, its actual width + * is denoted by mask and will wrap-around beyond that width. + */ +static void accumulate_val(u64 *accum, u64 val, u64 mask) +{ +#define low_bits(x, mask) ((x) & (mask)) +#define high_bits(x, mask) ((x) & ~(mask)) + bool wrapped = val < low_bits(*accum, mask); + + *accum = high_bits(*accum, mask) + val; + if (wrapped) + *accum += (mask + 1); +} + +/* The HW counters' width is much less than 64bits. + * Handle possible wrap-around while updating the stat counters + */ +static void bnxt_flow_stats_accum(struct bnxt_tc_info *tc_info, + struct bnxt_tc_flow_stats *acc_stats, + struct bnxt_tc_flow_stats *hw_stats) +{ + accumulate_val(&acc_stats->bytes, hw_stats->bytes, tc_info->bytes_mask); + accumulate_val(&acc_stats->packets, hw_stats->packets, + tc_info->packets_mask); +} + +static int +bnxt_tc_flow_stats_batch_update(struct bnxt *bp, int num_flows, + struct bnxt_tc_stats_batch stats_batch[]) +{ + struct bnxt_tc_info *tc_info = &bp->tc_info; + int rc, i; + + rc = bnxt_hwrm_cfa_flow_stats_get(bp, num_flows, stats_batch); if (rc) return rc; - tcf_exts_stats_update(tc_flow_cmd->exts, stats.bytes, stats.packets, 0); + for (i = 0; i < num_flows; i++) { + struct bnxt_tc_flow_node *flow_node = stats_batch[i].flow_node; + struct bnxt_tc_flow *flow = &flow_node->flow; + + spin_lock(&flow->stats_lock); + bnxt_flow_stats_accum(tc_info, &flow->stats, + &stats_batch[i].hw_stats); + if (flow->stats.packets != flow->prev_stats.packets) + flow->lastused = jiffies; + spin_unlock(&flow->stats_lock); + } + return 0; } +static int +bnxt_tc_flow_stats_batch_prep(struct bnxt *bp, + struct bnxt_tc_stats_batch stats_batch[], + int *num_flows) +{ + struct bnxt_tc_info *tc_info = &bp->tc_info; + struct rhashtable_iter *iter = &tc_info->iter; + void *flow_node; + int rc, i; + + rc = rhashtable_walk_start(iter); + if (rc && rc != -EAGAIN) { + i = 0; + goto done; + } + + rc = 0; + for (i = 0; i < BNXT_FLOW_STATS_BATCH_MAX; i++) { + flow_node = rhashtable_walk_next(iter); + if (IS_ERR(flow_node)) { + i = 0; + if (PTR_ERR(flow_node) == -EAGAIN) { + continue; + } else { + rc = PTR_ERR(flow_node); + goto done; + } + } + + /* No more flows */ + if (!flow_node) + goto done; + + stats_batch[i].flow_node = flow_node; + } +done: + rhashtable_walk_stop(iter); + *num_flows = i; + return rc; +} + +void bnxt_tc_flow_stats_work(struct bnxt *bp) +{ + struct bnxt_tc_info *tc_info = &bp->tc_info; + int num_flows, rc; + + num_flows = atomic_read(&tc_info->flow_table.nelems); + if (!num_flows) + return; + + rhashtable_walk_enter(&tc_info->flow_table, &tc_info->iter); + + for (;;) { + rc = bnxt_tc_flow_stats_batch_prep(bp, tc_info->stats_batch, + &num_flows); + if (rc) { + if (rc == -EAGAIN) + continue; + break; + } + + if (!num_flows) + break; + + bnxt_tc_flow_stats_batch_update(bp, num_flows, + tc_info->stats_batch); + } + + rhashtable_walk_exit(&tc_info->iter); +} + int bnxt_tc_setup_flower(struct bnxt *bp, u16 src_fid, struct tc_cls_flower_offload *cls_flower) { diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.h index 2beccd41c886..404f98dea0ab 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.h @@ -78,11 +78,6 @@ struct bnxt_tc_actions { struct ip_tunnel_key tun_encap_key; }; -struct bnxt_tc_flow_stats { - u64 packets; - u64 bytes; -}; - struct bnxt_tc_flow { u32 flags; #define BNXT_TC_FLOW_FLAGS_ETH_ADDRS BIT(1) @@ -119,6 +114,10 @@ struct bnxt_tc_flow { /* previous snap-shot of stats */ struct bnxt_tc_flow_stats prev_stats; unsigned long lastused; /* jiffies */ + /* for calculating delta from prev_stats and + * updating prev_stats atomically. + */ + spinlock_t stats_lock; }; /* Tunnel encap/decap hash table @@ -195,6 +194,12 @@ int bnxt_tc_setup_flower(struct bnxt *bp, u16 src_fid, struct tc_cls_flower_offload *cls_flower); int bnxt_init_tc(struct bnxt *bp); void bnxt_shutdown_tc(struct bnxt *bp); +void bnxt_tc_flow_stats_work(struct bnxt *bp); + +static inline bool bnxt_tc_flower_enabled(struct bnxt *bp) +{ + return bp->tc_info.enabled; +} #else /* CONFIG_BNXT_FLOWER_OFFLOAD */ @@ -212,5 +217,14 @@ static inline int bnxt_init_tc(struct bnxt *bp) static inline void bnxt_shutdown_tc(struct bnxt *bp) { } + +static inline void bnxt_tc_flow_stats_work(struct bnxt *bp) +{ +} + +static inline bool bnxt_tc_flower_enabled(struct bnxt *bp) +{ + return false; +} #endif /* CONFIG_BNXT_FLOWER_OFFLOAD */ #endif /* BNXT_TC_H */ -- cgit v1.2.3 From cd66358e52f74585f043ef63089727273b3421d3 Mon Sep 17 00:00:00 2001 From: Sathya Perla Date: Thu, 26 Oct 2017 11:51:32 -0400 Subject: bnxt_en: alloc tc_info{} struct only when tc flower is enabled TC flower is not enabled on VFs and when there's no FW support. Alloc the tc_info{} struct at init time only when TC flower is being enabled. Signed-off-by: Sathya Perla Signed-off-by: Michael Chan Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnxt/bnxt.c | 2 +- drivers/net/ethernet/broadcom/bnxt/bnxt.h | 2 +- drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c | 47 ++++++++++++++++----------- drivers/net/ethernet/broadcom/bnxt/bnxt_tc.h | 2 +- drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c | 3 ++ 5 files changed, 34 insertions(+), 22 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index bbf6da389f86..5ce950629ce9 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -7347,7 +7347,7 @@ static int bnxt_setup_tc_block_cb(enum tc_setup_type type, void *type_data, { struct bnxt *bp = cb_priv; - if (BNXT_VF(bp)) + if (!bnxt_tc_flower_enabled(bp)) return -EOPNOTSUPP; switch (type) { diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h index b8343ee4182c..5359a1f0045f 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h @@ -1346,7 +1346,7 @@ struct bnxt { enum devlink_eswitch_mode eswitch_mode; struct bnxt_vf_rep **vf_reps; /* array of vf-rep ptrs */ u16 *cfa_code_map; /* cfa_code -> vf_idx map */ - struct bnxt_tc_info tc_info; + struct bnxt_tc_info *tc_info; }; #define BNXT_RX_STATS_OFFSET(counter) \ diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c index 71828a5beefe..798d13964274 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c @@ -658,7 +658,7 @@ static int bnxt_tc_put_l2_node(struct bnxt *bp, struct bnxt_tc_flow_node *flow_node) { struct bnxt_tc_l2_node *l2_node = flow_node->l2_node; - struct bnxt_tc_info *tc_info = &bp->tc_info; + struct bnxt_tc_info *tc_info = bp->tc_info; int rc; /* remove flow_node from the L2 shared flow list */ @@ -714,7 +714,7 @@ bnxt_tc_get_ref_flow_handle(struct bnxt *bp, struct bnxt_tc_flow *flow, struct bnxt_tc_flow_node *flow_node, __le16 *ref_flow_handle) { - struct bnxt_tc_info *tc_info = &bp->tc_info; + struct bnxt_tc_info *tc_info = bp->tc_info; struct bnxt_tc_flow_node *ref_flow_node; struct bnxt_tc_l2_node *l2_node; @@ -829,7 +829,7 @@ static int bnxt_tc_get_ref_decap_handle(struct bnxt *bp, struct bnxt_tc_flow_node *flow_node, __le32 *ref_decap_handle) { - struct bnxt_tc_info *tc_info = &bp->tc_info; + struct bnxt_tc_info *tc_info = bp->tc_info; struct bnxt_tc_flow_node *ref_flow_node; struct bnxt_tc_l2_node *decap_l2_node; @@ -867,7 +867,7 @@ static void bnxt_tc_put_decap_l2_node(struct bnxt *bp, struct bnxt_tc_flow_node *flow_node) { struct bnxt_tc_l2_node *decap_l2_node = flow_node->decap_l2_node; - struct bnxt_tc_info *tc_info = &bp->tc_info; + struct bnxt_tc_info *tc_info = bp->tc_info; int rc; /* remove flow_node from the decap L2 sharing flow list */ @@ -886,7 +886,7 @@ static void bnxt_tc_put_decap_handle(struct bnxt *bp, struct bnxt_tc_flow_node *flow_node) { __le32 decap_handle = flow_node->decap_node->tunnel_handle; - struct bnxt_tc_info *tc_info = &bp->tc_info; + struct bnxt_tc_info *tc_info = bp->tc_info; int rc; if (flow_node->decap_l2_node) @@ -973,7 +973,7 @@ static int bnxt_tc_get_decap_handle(struct bnxt *bp, struct bnxt_tc_flow *flow, __le32 *decap_filter_handle) { struct ip_tunnel_key *decap_key = &flow->tun_key; - struct bnxt_tc_info *tc_info = &bp->tc_info; + struct bnxt_tc_info *tc_info = bp->tc_info; struct bnxt_tc_l2_key l2_info = { {0} }; struct bnxt_tc_tunnel_node *decap_node; struct ip_tunnel_key tun_key = { 0 }; @@ -1051,7 +1051,7 @@ static void bnxt_tc_put_encap_handle(struct bnxt *bp, struct bnxt_tc_tunnel_node *encap_node) { __le32 encap_handle = encap_node->tunnel_handle; - struct bnxt_tc_info *tc_info = &bp->tc_info; + struct bnxt_tc_info *tc_info = bp->tc_info; int rc; rc = bnxt_tc_put_tunnel_node(bp, &tc_info->encap_table, @@ -1070,7 +1070,7 @@ static int bnxt_tc_get_encap_handle(struct bnxt *bp, struct bnxt_tc_flow *flow, __le32 *encap_handle) { struct ip_tunnel_key *encap_key = &flow->actions.tun_encap_key; - struct bnxt_tc_info *tc_info = &bp->tc_info; + struct bnxt_tc_info *tc_info = bp->tc_info; struct bnxt_tc_tunnel_node *encap_node; int rc; @@ -1137,7 +1137,7 @@ static int bnxt_tc_get_tunnel_handle(struct bnxt *bp, static int __bnxt_tc_del_flow(struct bnxt *bp, struct bnxt_tc_flow_node *flow_node) { - struct bnxt_tc_info *tc_info = &bp->tc_info; + struct bnxt_tc_info *tc_info = bp->tc_info; int rc; /* send HWRM cmd to free the flow-id */ @@ -1180,7 +1180,7 @@ static int bnxt_tc_add_flow(struct bnxt *bp, u16 src_fid, struct tc_cls_flower_offload *tc_flow_cmd) { struct bnxt_tc_flow_node *new_node, *old_node; - struct bnxt_tc_info *tc_info = &bp->tc_info; + struct bnxt_tc_info *tc_info = bp->tc_info; struct bnxt_tc_flow *flow; __le32 tunnel_handle = 0; __le16 ref_flow_handle; @@ -1261,7 +1261,7 @@ done: static int bnxt_tc_del_flow(struct bnxt *bp, struct tc_cls_flower_offload *tc_flow_cmd) { - struct bnxt_tc_info *tc_info = &bp->tc_info; + struct bnxt_tc_info *tc_info = bp->tc_info; struct bnxt_tc_flow_node *flow_node; flow_node = rhashtable_lookup_fast(&tc_info->flow_table, @@ -1280,7 +1280,7 @@ static int bnxt_tc_get_flow_stats(struct bnxt *bp, struct tc_cls_flower_offload *tc_flow_cmd) { struct bnxt_tc_flow_stats stats, *curr_stats, *prev_stats; - struct bnxt_tc_info *tc_info = &bp->tc_info; + struct bnxt_tc_info *tc_info = bp->tc_info; struct bnxt_tc_flow_node *flow_node; struct bnxt_tc_flow *flow; unsigned long lastused; @@ -1378,7 +1378,7 @@ static int bnxt_tc_flow_stats_batch_update(struct bnxt *bp, int num_flows, struct bnxt_tc_stats_batch stats_batch[]) { - struct bnxt_tc_info *tc_info = &bp->tc_info; + struct bnxt_tc_info *tc_info = bp->tc_info; int rc, i; rc = bnxt_hwrm_cfa_flow_stats_get(bp, num_flows, stats_batch); @@ -1405,7 +1405,7 @@ bnxt_tc_flow_stats_batch_prep(struct bnxt *bp, struct bnxt_tc_stats_batch stats_batch[], int *num_flows) { - struct bnxt_tc_info *tc_info = &bp->tc_info; + struct bnxt_tc_info *tc_info = bp->tc_info; struct rhashtable_iter *iter = &tc_info->iter; void *flow_node; int rc, i; @@ -1443,7 +1443,7 @@ done: void bnxt_tc_flow_stats_work(struct bnxt *bp) { - struct bnxt_tc_info *tc_info = &bp->tc_info; + struct bnxt_tc_info *tc_info = bp->tc_info; int num_flows, rc; num_flows = atomic_read(&tc_info->flow_table.nelems); @@ -1528,7 +1528,7 @@ static const struct rhashtable_params bnxt_tc_tunnel_ht_params = { int bnxt_init_tc(struct bnxt *bp) { - struct bnxt_tc_info *tc_info = &bp->tc_info; + struct bnxt_tc_info *tc_info; int rc; if (bp->hwrm_spec_code < 0x10803) { @@ -1536,6 +1536,10 @@ int bnxt_init_tc(struct bnxt *bp) "Firmware does not support TC flower offload.\n"); return -ENOTSUPP; } + + tc_info = kzalloc(sizeof(*tc_info), GFP_KERNEL); + if (!tc_info) + return -ENOMEM; mutex_init(&tc_info->lock); /* Counter widths are programmed by FW */ @@ -1545,7 +1549,7 @@ int bnxt_init_tc(struct bnxt *bp) tc_info->flow_ht_params = bnxt_tc_flow_ht_params; rc = rhashtable_init(&tc_info->flow_table, &tc_info->flow_ht_params); if (rc) - return rc; + goto free_tc_info; tc_info->l2_ht_params = bnxt_tc_l2_ht_params; rc = rhashtable_init(&tc_info->l2_table, &tc_info->l2_ht_params); @@ -1573,6 +1577,7 @@ int bnxt_init_tc(struct bnxt *bp) tc_info->enabled = true; bp->dev->hw_features |= NETIF_F_HW_TC; bp->dev->features |= NETIF_F_HW_TC; + bp->tc_info = tc_info; return 0; destroy_decap_table: @@ -1583,14 +1588,16 @@ destroy_l2_table: rhashtable_destroy(&tc_info->l2_table); destroy_flow_table: rhashtable_destroy(&tc_info->flow_table); +free_tc_info: + kfree(tc_info); return rc; } void bnxt_shutdown_tc(struct bnxt *bp) { - struct bnxt_tc_info *tc_info = &bp->tc_info; + struct bnxt_tc_info *tc_info = bp->tc_info; - if (!tc_info->enabled) + if (!bnxt_tc_flower_enabled(bp)) return; rhashtable_destroy(&tc_info->flow_table); @@ -1598,4 +1605,6 @@ void bnxt_shutdown_tc(struct bnxt *bp) rhashtable_destroy(&tc_info->decap_l2_table); rhashtable_destroy(&tc_info->decap_table); rhashtable_destroy(&tc_info->encap_table); + kfree(tc_info); + bp->tc_info = NULL; } diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.h index 404f98dea0ab..97e09a880693 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.h @@ -198,7 +198,7 @@ void bnxt_tc_flow_stats_work(struct bnxt *bp); static inline bool bnxt_tc_flower_enabled(struct bnxt *bp) { - return bp->tc_info.enabled; + return bp->tc_info && bp->tc_info->enabled; } #else /* CONFIG_BNXT_FLOWER_OFFLOAD */ diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c index 4ae935999ebe..c1761ed5785e 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c @@ -124,6 +124,9 @@ static int bnxt_vf_rep_setup_tc_block_cb(enum tc_setup_type type, struct bnxt *bp = vf_rep->bp; int vf_fid = bp->pf.vf[vf_rep->vf_idx].fw_fid; + if (!bnxt_tc_flower_enabled(vf_rep->bp)) + return -EOPNOTSUPP; + switch (type) { case TC_SETUP_CLSFLOWER: return bnxt_tc_setup_flower(bp, vf_fid, type_data); -- cgit v1.2.3 From 2ffbbf0f91288f909b3d495cbf029d8e4cc7db66 Mon Sep 17 00:00:00 2001 From: Subash Abhinov Kasiviswanathan Date: Thu, 26 Oct 2017 11:06:46 -0600 Subject: net: qualcomm: rmnet: Fix the return value of rmnet_rx_handler() Since packet is always consumed by rmnet_rx_handler(), we always return RX_HANDLER_CONSUMED. There is no need to pass on this value through multiple functions. Signed-off-by: Subash Abhinov Kasiviswanathan Signed-off-by: David S. Miller --- .../net/ethernet/qualcomm/rmnet/rmnet_handlers.c | 31 +++++++++------------- drivers/net/ethernet/qualcomm/rmnet/rmnet_map.h | 3 +-- .../ethernet/qualcomm/rmnet/rmnet_map_command.c | 4 +-- 3 files changed, 14 insertions(+), 24 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c index df3d2d16ce55..5dd186d4d0e4 100644 --- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c +++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c @@ -43,7 +43,7 @@ static void rmnet_set_skb_proto(struct sk_buff *skb) /* Generic handler */ -static rx_handler_result_t +static void rmnet_deliver_skb(struct sk_buff *skb) { skb_reset_transport_header(skb); @@ -53,12 +53,11 @@ rmnet_deliver_skb(struct sk_buff *skb) skb->pkt_type = PACKET_HOST; skb_set_mac_header(skb, 0); netif_receive_skb(skb); - return RX_HANDLER_CONSUMED; } /* MAP handler */ -static rx_handler_result_t +static void __rmnet_map_ingress_handler(struct sk_buff *skb, struct rmnet_port *port) { @@ -91,31 +90,27 @@ __rmnet_map_ingress_handler(struct sk_buff *skb, skb_pull(skb, sizeof(struct rmnet_map_header)); skb_trim(skb, len); rmnet_set_skb_proto(skb); - return rmnet_deliver_skb(skb); + rmnet_deliver_skb(skb); + return; free_skb: kfree_skb(skb); - return RX_HANDLER_CONSUMED; } -static rx_handler_result_t +static void rmnet_map_ingress_handler(struct sk_buff *skb, struct rmnet_port *port) { struct sk_buff *skbn; - int rc; if (port->ingress_data_format & RMNET_INGRESS_FORMAT_DEAGGREGATION) { while ((skbn = rmnet_map_deaggregate(skb)) != NULL) __rmnet_map_ingress_handler(skbn, port); consume_skb(skb); - rc = RX_HANDLER_CONSUMED; } else { - rc = __rmnet_map_ingress_handler(skb, port); + __rmnet_map_ingress_handler(skb, port); } - - return rc; } static int rmnet_map_egress_handler(struct sk_buff *skb, @@ -149,15 +144,13 @@ static int rmnet_map_egress_handler(struct sk_buff *skb, return RMNET_MAP_SUCCESS; } -static rx_handler_result_t +static void rmnet_bridge_handler(struct sk_buff *skb, struct net_device *bridge_dev) { if (bridge_dev) { skb->dev = bridge_dev; dev_queue_xmit(skb); } - - return RX_HANDLER_CONSUMED; } /* Ingress / Egress Entry Points */ @@ -168,13 +161,12 @@ rmnet_bridge_handler(struct sk_buff *skb, struct net_device *bridge_dev) */ rx_handler_result_t rmnet_rx_handler(struct sk_buff **pskb) { - int rc = RX_HANDLER_CONSUMED; struct sk_buff *skb = *pskb; struct rmnet_port *port; struct net_device *dev; if (!skb) - return RX_HANDLER_CONSUMED; + goto done; dev = skb->dev; port = rmnet_get_port(dev); @@ -182,14 +174,15 @@ rx_handler_result_t rmnet_rx_handler(struct sk_buff **pskb) switch (port->rmnet_mode) { case RMNET_EPMODE_VND: if (port->ingress_data_format & RMNET_INGRESS_FORMAT_MAP) - rc = rmnet_map_ingress_handler(skb, port); + rmnet_map_ingress_handler(skb, port); break; case RMNET_EPMODE_BRIDGE: - rc = rmnet_bridge_handler(skb, port->bridge_ep); + rmnet_bridge_handler(skb, port->bridge_ep); break; } - return rc; +done: + return RX_HANDLER_CONSUMED; } /* Modifies packet as per logical endpoint configuration and egress data format diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map.h b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map.h index ce2302c25b12..3af3fe7b5457 100644 --- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map.h +++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map.h @@ -80,7 +80,6 @@ u8 rmnet_map_demultiplex(struct sk_buff *skb); struct sk_buff *rmnet_map_deaggregate(struct sk_buff *skb); struct rmnet_map_header *rmnet_map_add_map_header(struct sk_buff *skb, int hdrlen, int pad); -rx_handler_result_t rmnet_map_command(struct sk_buff *skb, - struct rmnet_port *port); +void rmnet_map_command(struct sk_buff *skb, struct rmnet_port *port); #endif /* _RMNET_MAP_H_ */ diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_command.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_command.c index 74d362f71cce..51e604923ac1 100644 --- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_command.c +++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_command.c @@ -76,8 +76,7 @@ static void rmnet_map_send_ack(struct sk_buff *skb, /* Process MAP command frame and send N/ACK message as appropriate. Message cmd * name is decoded here and appropriate handler is called. */ -rx_handler_result_t rmnet_map_command(struct sk_buff *skb, - struct rmnet_port *port) +void rmnet_map_command(struct sk_buff *skb, struct rmnet_port *port) { struct rmnet_map_control_command *cmd; unsigned char command_name; @@ -102,5 +101,4 @@ rx_handler_result_t rmnet_map_command(struct sk_buff *skb, } if (rc == RMNET_MAP_COMMAND_ACK) rmnet_map_send_ack(skb, rc); - return RX_HANDLER_CONSUMED; } -- cgit v1.2.3 From 85355d775ff70d9ba77a5f94dd786b23852e1c72 Mon Sep 17 00:00:00 2001 From: Subash Abhinov Kasiviswanathan Date: Thu, 26 Oct 2017 11:06:47 -0600 Subject: net: qualcomm: rmnet: Always assign rmnet dev in deaggregation path The rmnet device needs to assigned for all packets in the deaggregation path based on the mux id, so the check is not needed. Signed-off-by: Subash Abhinov Kasiviswanathan Signed-off-by: David S. Miller --- drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c index 5dd186d4d0e4..1ea978335da3 100644 --- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c +++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c @@ -83,8 +83,7 @@ __rmnet_map_ingress_handler(struct sk_buff *skb, if (!ep) goto free_skb; - if (port->ingress_data_format & RMNET_INGRESS_FORMAT_DEMUXING) - skb->dev = ep->egress_dev; + skb->dev = ep->egress_dev; /* Subtract MAP header */ skb_pull(skb, sizeof(struct rmnet_map_header)); -- cgit v1.2.3 From 192c4b5d48f2ae25a4ce323b4cb8b024fac3efd2 Mon Sep 17 00:00:00 2001 From: Subash Abhinov Kasiviswanathan Date: Thu, 26 Oct 2017 11:06:48 -0600 Subject: net: qualcomm: rmnet: Add support for 64 bit stats Implement 64 bit per cpu stats. Signed-off-by: Subash Abhinov Kasiviswanathan Signed-off-by: David S. Miller --- drivers/net/ethernet/qualcomm/rmnet/rmnet_config.h | 14 ++++ drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c | 76 ++++++++++++++++++++-- 2 files changed, 85 insertions(+), 5 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.h b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.h index 60115e69e415..9586703d2d58 100644 --- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.h +++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.h @@ -41,9 +41,23 @@ struct rmnet_port { extern struct rtnl_link_ops rmnet_link_ops; +struct rmnet_vnd_stats { + u64 rx_pkts; + u64 rx_bytes; + u64 tx_pkts; + u64 tx_bytes; + u32 tx_drops; +}; + +struct rmnet_pcpu_stats { + struct rmnet_vnd_stats stats; + struct u64_stats_sync syncp; +}; + struct rmnet_priv { u8 mux_id; struct net_device *real_dev; + struct rmnet_pcpu_stats __percpu *pcpu_stats; }; struct rmnet_port *rmnet_get_port(struct net_device *real_dev); diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c index 12bd0bbd5235..b0befa18cb10 100644 --- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c +++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c @@ -27,14 +27,28 @@ void rmnet_vnd_rx_fixup(struct sk_buff *skb, struct net_device *dev) { - dev->stats.rx_packets++; - dev->stats.rx_bytes += skb->len; + struct rmnet_priv *priv = netdev_priv(dev); + struct rmnet_pcpu_stats *pcpu_ptr; + + pcpu_ptr = this_cpu_ptr(priv->pcpu_stats); + + u64_stats_update_begin(&pcpu_ptr->syncp); + pcpu_ptr->stats.rx_pkts++; + pcpu_ptr->stats.rx_bytes += skb->len; + u64_stats_update_end(&pcpu_ptr->syncp); } void rmnet_vnd_tx_fixup(struct sk_buff *skb, struct net_device *dev) { - dev->stats.tx_packets++; - dev->stats.tx_bytes += skb->len; + struct rmnet_priv *priv = netdev_priv(dev); + struct rmnet_pcpu_stats *pcpu_ptr; + + pcpu_ptr = this_cpu_ptr(priv->pcpu_stats); + + u64_stats_update_begin(&pcpu_ptr->syncp); + pcpu_ptr->stats.tx_pkts++; + pcpu_ptr->stats.tx_bytes += skb->len; + u64_stats_update_end(&pcpu_ptr->syncp); } /* Network Device Operations */ @@ -48,7 +62,7 @@ static netdev_tx_t rmnet_vnd_start_xmit(struct sk_buff *skb, if (priv->real_dev) { rmnet_egress_handler(skb); } else { - dev->stats.tx_dropped++; + this_cpu_inc(priv->pcpu_stats->stats.tx_drops); kfree_skb(skb); } return NETDEV_TX_OK; @@ -70,12 +84,64 @@ static int rmnet_vnd_get_iflink(const struct net_device *dev) return priv->real_dev->ifindex; } +static int rmnet_vnd_init(struct net_device *dev) +{ + struct rmnet_priv *priv = netdev_priv(dev); + + priv->pcpu_stats = alloc_percpu(struct rmnet_pcpu_stats); + if (!priv->pcpu_stats) + return -ENOMEM; + + return 0; +} + +static void rmnet_vnd_uninit(struct net_device *dev) +{ + struct rmnet_priv *priv = netdev_priv(dev); + + free_percpu(priv->pcpu_stats); +} + +static void rmnet_get_stats64(struct net_device *dev, + struct rtnl_link_stats64 *s) +{ + struct rmnet_priv *priv = netdev_priv(dev); + struct rmnet_vnd_stats total_stats; + struct rmnet_pcpu_stats *pcpu_ptr; + unsigned int cpu, start; + + memset(&total_stats, 0, sizeof(struct rmnet_vnd_stats)); + + for_each_possible_cpu(cpu) { + pcpu_ptr = this_cpu_ptr(priv->pcpu_stats); + + do { + start = u64_stats_fetch_begin_irq(&pcpu_ptr->syncp); + total_stats.rx_pkts += pcpu_ptr->stats.rx_pkts; + total_stats.rx_bytes += pcpu_ptr->stats.rx_bytes; + total_stats.tx_pkts += pcpu_ptr->stats.tx_pkts; + total_stats.tx_bytes += pcpu_ptr->stats.tx_bytes; + } while (u64_stats_fetch_retry_irq(&pcpu_ptr->syncp, start)); + + total_stats.tx_drops += pcpu_ptr->stats.tx_drops; + } + + s->rx_packets = total_stats.rx_pkts; + s->rx_bytes = total_stats.rx_bytes; + s->tx_packets = total_stats.tx_pkts; + s->tx_bytes = total_stats.tx_bytes; + s->tx_dropped = total_stats.tx_drops; +} + static const struct net_device_ops rmnet_vnd_ops = { .ndo_start_xmit = rmnet_vnd_start_xmit, .ndo_change_mtu = rmnet_vnd_change_mtu, .ndo_get_iflink = rmnet_vnd_get_iflink, .ndo_add_slave = rmnet_add_bridge, .ndo_del_slave = rmnet_del_bridge, + .ndo_init = rmnet_vnd_init, + .ndo_uninit = rmnet_vnd_uninit, + .ndo_get_stats64 = rmnet_get_stats64, }; /* Called by kernel whenever a new rmnet device is created. Sets MTU, -- cgit v1.2.3 From ca32fb034c19e00cfb5e0fd7217eb92f81302048 Mon Sep 17 00:00:00 2001 From: Subash Abhinov Kasiviswanathan Date: Thu, 26 Oct 2017 11:06:49 -0600 Subject: net: qualcomm: rmnet: Add support for GRO Add gro_cells so that rmnet devices can call gro_cells_receive instead of netif_receive_skb. Signed-off-by: Subash Abhinov Kasiviswanathan Cc: Eric Dumazet Signed-off-by: David S. Miller --- drivers/net/ethernet/qualcomm/rmnet/Kconfig | 1 + drivers/net/ethernet/qualcomm/rmnet/rmnet_config.h | 2 ++ drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c | 4 +++- drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c | 8 ++++++++ 4 files changed, 14 insertions(+), 1 deletion(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/qualcomm/rmnet/Kconfig b/drivers/net/ethernet/qualcomm/rmnet/Kconfig index 6e2587af47a4..9bb06d284644 100644 --- a/drivers/net/ethernet/qualcomm/rmnet/Kconfig +++ b/drivers/net/ethernet/qualcomm/rmnet/Kconfig @@ -5,6 +5,7 @@ menuconfig RMNET tristate "RmNet MAP driver" default n + select GRO_CELLS ---help--- If you select this, you will enable the RMNET module which is used for handling data in the multiplexing and aggregation protocol (MAP) diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.h b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.h index 9586703d2d58..c19259eea99e 100644 --- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.h +++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.h @@ -14,6 +14,7 @@ */ #include +#include #ifndef _RMNET_CONFIG_H_ #define _RMNET_CONFIG_H_ @@ -58,6 +59,7 @@ struct rmnet_priv { u8 mux_id; struct net_device *real_dev; struct rmnet_pcpu_stats __percpu *pcpu_stats; + struct gro_cells gro_cells; }; struct rmnet_port *rmnet_get_port(struct net_device *real_dev); diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c index 1ea978335da3..29842ccc91a9 100644 --- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c +++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c @@ -46,13 +46,15 @@ static void rmnet_set_skb_proto(struct sk_buff *skb) static void rmnet_deliver_skb(struct sk_buff *skb) { + struct rmnet_priv *priv = netdev_priv(skb->dev); + skb_reset_transport_header(skb); skb_reset_network_header(skb); rmnet_vnd_rx_fixup(skb, skb->dev); skb->pkt_type = PACKET_HOST; skb_set_mac_header(skb, 0); - netif_receive_skb(skb); + gro_cells_receive(&priv->gro_cells, skb); } /* MAP handler */ diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c index b0befa18cb10..9caa5e387450 100644 --- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c +++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c @@ -87,11 +87,18 @@ static int rmnet_vnd_get_iflink(const struct net_device *dev) static int rmnet_vnd_init(struct net_device *dev) { struct rmnet_priv *priv = netdev_priv(dev); + int err; priv->pcpu_stats = alloc_percpu(struct rmnet_pcpu_stats); if (!priv->pcpu_stats) return -ENOMEM; + err = gro_cells_init(&priv->gro_cells, dev); + if (err) { + free_percpu(priv->pcpu_stats); + return err; + } + return 0; } @@ -99,6 +106,7 @@ static void rmnet_vnd_uninit(struct net_device *dev) { struct rmnet_priv *priv = netdev_priv(dev); + gro_cells_destroy(&priv->gro_cells); free_percpu(priv->pcpu_stats); } -- cgit v1.2.3 From c26eba03e4073bd32ef6c0ea2ba2a3ff5eed11da Mon Sep 17 00:00:00 2001 From: John Allen Date: Thu, 26 Oct 2017 16:23:25 -0500 Subject: ibmvnic: Update reset infrastructure to support tunable parameters Update ibmvnic reset infrastructure to include a new reset option that will allow changing of tunable parameters. There currently is no way to request different capabilities from the vnic server on the fly so this patch achieves this by resetting the driver and attempting to log in with the requested changes. If the reset operation fails, the old values of the tunable parameters are stored in the "fallback" struct and we attempt to login with the fallback values. Signed-off-by: John Allen Signed-off-by: David S. Miller --- drivers/net/ethernet/ibm/ibmvnic.c | 213 ++++++++++++++++++++++++++++++++----- drivers/net/ethernet/ibm/ibmvnic.h | 24 ++++- 2 files changed, 209 insertions(+), 28 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c index 11eba8277132..3d0280196fdc 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.c +++ b/drivers/net/ethernet/ibm/ibmvnic.c @@ -115,6 +115,7 @@ static int init_sub_crqs(struct ibmvnic_adapter *); static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter); static int ibmvnic_init(struct ibmvnic_adapter *); static void release_crq_queue(struct ibmvnic_adapter *); +static int __ibmvnic_set_mac(struct net_device *netdev, struct sockaddr *p); struct ibmvnic_stat { char name[ETH_GSTRING_LEN]; @@ -926,6 +927,11 @@ static int ibmvnic_open(struct net_device *netdev) mutex_lock(&adapter->reset_lock); + if (adapter->mac_change_pending) { + __ibmvnic_set_mac(netdev, &adapter->desired.mac); + adapter->mac_change_pending = false; + } + if (adapter->state != VNIC_CLOSED) { rc = ibmvnic_login(netdev); if (rc) { @@ -1426,7 +1432,7 @@ static void ibmvnic_set_multi(struct net_device *netdev) } } -static int ibmvnic_set_mac(struct net_device *netdev, void *p) +static int __ibmvnic_set_mac(struct net_device *netdev, struct sockaddr *p) { struct ibmvnic_adapter *adapter = netdev_priv(netdev); struct sockaddr *addr = p; @@ -1444,6 +1450,22 @@ static int ibmvnic_set_mac(struct net_device *netdev, void *p) return 0; } +static int ibmvnic_set_mac(struct net_device *netdev, void *p) +{ + struct ibmvnic_adapter *adapter = netdev_priv(netdev); + struct sockaddr *addr = p; + + if (adapter->state != VNIC_OPEN) { + memcpy(&adapter->desired.mac, addr, sizeof(struct sockaddr)); + adapter->mac_change_pending = true; + return 0; + } + + __ibmvnic_set_mac(netdev, addr); + + return 0; +} + /** * do_reset returns zero if we are able to keep processing reset events, or * non-zero if we hit a fatal error and must halt. @@ -1470,6 +1492,13 @@ static int do_reset(struct ibmvnic_adapter *adapter, if (rc) return rc; + if (adapter->reset_reason == VNIC_RESET_CHANGE_PARAM || + adapter->wait_for_reset) { + release_resources(adapter); + release_sub_crqs(adapter); + release_crq_queue(adapter); + } + if (adapter->reset_reason != VNIC_RESET_NON_FATAL) { /* remove the closed state so when we call open it appears * we are coming from the probed state. @@ -1492,16 +1521,23 @@ static int do_reset(struct ibmvnic_adapter *adapter, return 0; } - rc = reset_tx_pools(adapter); - if (rc) - return rc; + if (adapter->reset_reason == VNIC_RESET_CHANGE_PARAM || + adapter->wait_for_reset) { + rc = init_resources(adapter); + if (rc) + return rc; + } else { + rc = reset_tx_pools(adapter); + if (rc) + return rc; - rc = reset_rx_pools(adapter); - if (rc) - return rc; + rc = reset_rx_pools(adapter); + if (rc) + return rc; - if (reset_state == VNIC_CLOSED) - return 0; + if (reset_state == VNIC_CLOSED) + return 0; + } } rc = __ibmvnic_open(netdev); @@ -1561,7 +1597,7 @@ static void __ibmvnic_reset(struct work_struct *work) struct ibmvnic_adapter *adapter; struct net_device *netdev; u32 reset_state; - int rc; + int rc = 0; adapter = container_of(work, struct ibmvnic_adapter, ibmvnic_reset); netdev = adapter->netdev; @@ -1580,6 +1616,12 @@ static void __ibmvnic_reset(struct work_struct *work) rwi = get_next_rwi(adapter); } + if (adapter->wait_for_reset) { + adapter->wait_for_reset = false; + adapter->reset_done_rc = rc; + complete(&adapter->reset_done); + } + if (rc) { netdev_dbg(adapter->netdev, "Reset failed\n"); free_all_rwi(adapter); @@ -1759,9 +1801,42 @@ static void ibmvnic_netpoll_controller(struct net_device *dev) } #endif +static int wait_for_reset(struct ibmvnic_adapter *adapter) +{ + adapter->fallback.mtu = adapter->req_mtu; + adapter->fallback.rx_queues = adapter->req_rx_queues; + adapter->fallback.tx_queues = adapter->req_tx_queues; + adapter->fallback.rx_entries = adapter->req_rx_add_entries_per_subcrq; + adapter->fallback.tx_entries = adapter->req_tx_entries_per_subcrq; + + init_completion(&adapter->reset_done); + ibmvnic_reset(adapter, VNIC_RESET_CHANGE_PARAM); + adapter->wait_for_reset = true; + wait_for_completion(&adapter->reset_done); + + if (adapter->reset_done_rc) { + adapter->desired.mtu = adapter->fallback.mtu; + adapter->desired.rx_queues = adapter->fallback.rx_queues; + adapter->desired.tx_queues = adapter->fallback.tx_queues; + adapter->desired.rx_entries = adapter->fallback.rx_entries; + adapter->desired.tx_entries = adapter->fallback.tx_entries; + + init_completion(&adapter->reset_done); + ibmvnic_reset(adapter, VNIC_RESET_CHANGE_PARAM); + wait_for_completion(&adapter->reset_done); + } + adapter->wait_for_reset = false; + + return adapter->reset_done_rc; +} + static int ibmvnic_change_mtu(struct net_device *netdev, int new_mtu) { - return -EOPNOTSUPP; + struct ibmvnic_adapter *adapter = netdev_priv(netdev); + + adapter->desired.mtu = new_mtu + ETH_HLEN; + + return wait_for_reset(adapter); } static const struct net_device_ops ibmvnic_netdev_ops = { @@ -1849,6 +1924,27 @@ static void ibmvnic_get_ringparam(struct net_device *netdev, ring->rx_jumbo_pending = 0; } +static int ibmvnic_set_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring) +{ + struct ibmvnic_adapter *adapter = netdev_priv(netdev); + + if (ring->rx_pending > adapter->max_rx_add_entries_per_subcrq || + ring->tx_pending > adapter->max_tx_entries_per_subcrq) { + netdev_err(netdev, "Invalid request.\n"); + netdev_err(netdev, "Max tx buffers = %llu\n", + adapter->max_rx_add_entries_per_subcrq); + netdev_err(netdev, "Max rx buffers = %llu\n", + adapter->max_tx_entries_per_subcrq); + return -EINVAL; + } + + adapter->desired.rx_entries = ring->rx_pending; + adapter->desired.tx_entries = ring->tx_pending; + + return wait_for_reset(adapter); +} + static void ibmvnic_get_channels(struct net_device *netdev, struct ethtool_channels *channels) { @@ -1864,6 +1960,17 @@ static void ibmvnic_get_channels(struct net_device *netdev, channels->combined_count = 0; } +static int ibmvnic_set_channels(struct net_device *netdev, + struct ethtool_channels *channels) +{ + struct ibmvnic_adapter *adapter = netdev_priv(netdev); + + adapter->desired.rx_queues = channels->rx_count; + adapter->desired.tx_queues = channels->tx_count; + + return wait_for_reset(adapter); +} + static void ibmvnic_get_strings(struct net_device *dev, u32 stringset, u8 *data) { struct ibmvnic_adapter *adapter = netdev_priv(dev); @@ -1960,7 +2067,9 @@ static const struct ethtool_ops ibmvnic_ethtool_ops = { .set_msglevel = ibmvnic_set_msglevel, .get_link = ibmvnic_get_link, .get_ringparam = ibmvnic_get_ringparam, + .set_ringparam = ibmvnic_set_ringparam, .get_channels = ibmvnic_get_channels, + .set_channels = ibmvnic_set_channels, .get_strings = ibmvnic_get_strings, .get_sset_count = ibmvnic_get_sset_count, .get_ethtool_stats = ibmvnic_get_ethtool_stats, @@ -2426,6 +2535,7 @@ static void ibmvnic_send_req_caps(struct ibmvnic_adapter *adapter, int retry) { struct device *dev = &adapter->vdev->dev; union ibmvnic_crq crq; + int max_entries; if (!retry) { /* Sub-CRQ entries are 32 byte long */ @@ -2437,21 +2547,60 @@ static void ibmvnic_send_req_caps(struct ibmvnic_adapter *adapter, int retry) return; } - /* Get the minimum between the queried max and the entries - * that fit in our PAGE_SIZE - */ - adapter->req_tx_entries_per_subcrq = - adapter->max_tx_entries_per_subcrq > entries_page ? - entries_page : adapter->max_tx_entries_per_subcrq; - adapter->req_rx_add_entries_per_subcrq = - adapter->max_rx_add_entries_per_subcrq > entries_page ? - entries_page : adapter->max_rx_add_entries_per_subcrq; - - adapter->req_tx_queues = adapter->opt_tx_comp_sub_queues; - adapter->req_rx_queues = adapter->opt_rx_comp_queues; - adapter->req_rx_add_queues = adapter->max_rx_add_queues; + if (adapter->desired.mtu) + adapter->req_mtu = adapter->desired.mtu; + else + adapter->req_mtu = adapter->netdev->mtu + ETH_HLEN; + + if (!adapter->desired.tx_entries) + adapter->desired.tx_entries = + adapter->max_tx_entries_per_subcrq; + if (!adapter->desired.rx_entries) + adapter->desired.rx_entries = + adapter->max_rx_add_entries_per_subcrq; + + max_entries = IBMVNIC_MAX_LTB_SIZE / + (adapter->req_mtu + IBMVNIC_BUFFER_HLEN); + + if ((adapter->req_mtu + IBMVNIC_BUFFER_HLEN) * + adapter->desired.tx_entries > IBMVNIC_MAX_LTB_SIZE) { + adapter->desired.tx_entries = max_entries; + } - adapter->req_mtu = adapter->netdev->mtu + ETH_HLEN; + if ((adapter->req_mtu + IBMVNIC_BUFFER_HLEN) * + adapter->desired.rx_entries > IBMVNIC_MAX_LTB_SIZE) { + adapter->desired.rx_entries = max_entries; + } + + if (adapter->desired.tx_entries) + adapter->req_tx_entries_per_subcrq = + adapter->desired.tx_entries; + else + adapter->req_tx_entries_per_subcrq = + adapter->max_tx_entries_per_subcrq; + + if (adapter->desired.rx_entries) + adapter->req_rx_add_entries_per_subcrq = + adapter->desired.rx_entries; + else + adapter->req_rx_add_entries_per_subcrq = + adapter->max_rx_add_entries_per_subcrq; + + if (adapter->desired.tx_queues) + adapter->req_tx_queues = + adapter->desired.tx_queues; + else + adapter->req_tx_queues = + adapter->opt_tx_comp_sub_queues; + + if (adapter->desired.rx_queues) + adapter->req_rx_queues = + adapter->desired.rx_queues; + else + adapter->req_rx_queues = + adapter->opt_rx_comp_queues; + + adapter->req_rx_add_queues = adapter->max_rx_add_queues; } memset(&crq, 0, sizeof(crq)); @@ -3272,6 +3421,7 @@ static int handle_login_rsp(union ibmvnic_crq *login_rsp_crq, struct ibmvnic_adapter *adapter) { struct device *dev = &adapter->vdev->dev; + struct net_device *netdev = adapter->netdev; struct ibmvnic_login_rsp_buffer *login_rsp = adapter->login_rsp_buf; struct ibmvnic_login_buffer *login = adapter->login_buf; int i; @@ -3291,6 +3441,8 @@ static int handle_login_rsp(union ibmvnic_crq *login_rsp_crq, return 0; } + netdev->mtu = adapter->req_mtu - ETH_HLEN; + netdev_dbg(adapter->netdev, "Login Response Buffer:\n"); for (i = 0; i < (adapter->login_rsp_buf_sz - 1) / 8 + 1; i++) { netdev_dbg(adapter->netdev, "%016lx\n", @@ -3846,7 +3998,7 @@ static int ibmvnic_init(struct ibmvnic_adapter *adapter) unsigned long timeout = msecs_to_jiffies(30000); int rc; - if (adapter->resetting) { + if (adapter->resetting && !adapter->wait_for_reset) { rc = ibmvnic_reset_crq(adapter); if (!rc) rc = vio_enable_interrupts(adapter->vdev); @@ -3880,7 +4032,7 @@ static int ibmvnic_init(struct ibmvnic_adapter *adapter) return -1; } - if (adapter->resetting) + if (adapter->resetting && !adapter->wait_for_reset) rc = reset_sub_crq_queues(adapter); else rc = init_sub_crqs(adapter); @@ -3949,6 +4101,8 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id) mutex_init(&adapter->rwi_lock); adapter->resetting = false; + adapter->mac_change_pending = false; + do { rc = ibmvnic_init(adapter); if (rc && rc != EAGAIN) @@ -3956,6 +4110,8 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id) } while (rc == EAGAIN); netdev->mtu = adapter->req_mtu - ETH_HLEN; + netdev->min_mtu = adapter->min_mtu - ETH_HLEN; + netdev->max_mtu = adapter->max_mtu - ETH_HLEN; rc = device_create_file(&dev->dev, &dev_attr_failover); if (rc) @@ -3970,6 +4126,9 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id) dev_info(&dev->dev, "ibmvnic registered\n"); adapter->state = VNIC_PROBED; + + adapter->wait_for_reset = false; + return 0; ibmvnic_register_fail: diff --git a/drivers/net/ethernet/ibm/ibmvnic.h b/drivers/net/ethernet/ibm/ibmvnic.h index 7aa347a21e78..27107f33755b 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.h +++ b/drivers/net/ethernet/ibm/ibmvnic.h @@ -42,6 +42,9 @@ #define IBMVNIC_TSO_BUF_SZ 65536 #define IBMVNIC_TSO_BUFS 64 +#define IBMVNIC_MAX_LTB_SIZE ((1 << (MAX_ORDER - 1)) * PAGE_SIZE) +#define IBMVNIC_BUFFER_HLEN 500 + struct ibmvnic_login_buffer { __be32 len; __be32 version; @@ -945,13 +948,23 @@ enum ibmvnic_reset_reason {VNIC_RESET_FAILOVER = 1, VNIC_RESET_MOBILITY, VNIC_RESET_FATAL, VNIC_RESET_NON_FATAL, - VNIC_RESET_TIMEOUT}; + VNIC_RESET_TIMEOUT, + VNIC_RESET_CHANGE_PARAM}; struct ibmvnic_rwi { enum ibmvnic_reset_reason reset_reason; struct list_head list; }; +struct ibmvnic_tunables { + u64 rx_queues; + u64 tx_queues; + u64 rx_entries; + u64 tx_entries; + u64 mtu; + struct sockaddr mac; +}; + struct ibmvnic_adapter { struct vio_dev *vdev; struct net_device *netdev; @@ -1012,6 +1025,10 @@ struct ibmvnic_adapter { struct completion fw_done; int fw_done_rc; + struct completion reset_done; + int reset_done_rc; + bool wait_for_reset; + /* partner capabilities */ u64 min_tx_queues; u64 min_rx_queues; @@ -1056,4 +1073,9 @@ struct ibmvnic_adapter { struct work_struct ibmvnic_reset; bool resetting; bool napi_enabled, from_passive_init; + + bool mac_change_pending; + + struct ibmvnic_tunables desired; + struct ibmvnic_tunables fallback; }; -- cgit v1.2.3 From 2a1bf51111975846f412f47449edefdf6fa17ee4 Mon Sep 17 00:00:00 2001 From: John Allen Date: Thu, 26 Oct 2017 16:24:15 -0500 Subject: ibmvnic: Fix failover error path for non-fatal resets For all non-fatal reset conditions, the hypervisor will send a failover when we attempt to initialize the crq and the vnic client is expected to handle that failover instead of the existing non-fatal reset. To handle this, we need to return from init with a return code that indicates that we have hit this case. Signed-off-by: John Allen Signed-off-by: David S. Miller --- drivers/net/ethernet/ibm/ibmvnic.c | 4 ++-- drivers/net/ethernet/ibm/ibmvnic.h | 2 ++ 2 files changed, 4 insertions(+), 2 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c index 3d0280196fdc..d0cff2807d0b 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.c +++ b/drivers/net/ethernet/ibm/ibmvnic.c @@ -1507,7 +1507,7 @@ static int do_reset(struct ibmvnic_adapter *adapter, rc = ibmvnic_init(adapter); if (rc) - return 0; + return IBMVNIC_INIT_FAILED; /* If the adapter was in PROBE state prior to the reset, * exit here. @@ -1610,7 +1610,7 @@ static void __ibmvnic_reset(struct work_struct *work) while (rwi) { rc = do_reset(adapter, rwi, reset_state); kfree(rwi); - if (rc) + if (rc && rc != IBMVNIC_INIT_FAILED) break; rwi = get_next_rwi(adapter); diff --git a/drivers/net/ethernet/ibm/ibmvnic.h b/drivers/net/ethernet/ibm/ibmvnic.h index 27107f33755b..4670af80d612 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.h +++ b/drivers/net/ethernet/ibm/ibmvnic.h @@ -30,6 +30,8 @@ #define IBMVNIC_DRIVER_VERSION "1.0.1" #define IBMVNIC_INVALID_MAP -1 #define IBMVNIC_STATS_TIMEOUT 1 +#define IBMVNIC_INIT_FAILED 2 + /* basic structures plus 100 2k buffers */ #define IBMVNIC_IO_ENTITLEMENT_DEFAULT 610305 -- cgit v1.2.3 From c859e21a35ce5604dde0b618169680aa3c7e3bdb Mon Sep 17 00:00:00 2001 From: Intiyaz Basha Date: Thu, 26 Oct 2017 16:18:20 -0700 Subject: liquidio: xmit_more support Defer ringing the Tx doorbell if skb->xmit_more is set unless the Tx queue is full or stopped. To keep latency low, use a deferral limit of 8 packets. We chose 8 because Octeon can fetch at most 8 packets in a single PCI read, and our tests show that 8 results in low latency. Signed-off-by: Intiyaz Basha Signed-off-by: Satanand Burla Signed-off-by: Felix Manlunas Signed-off-by: David S. Miller --- drivers/net/ethernet/cavium/liquidio/lio_core.c | 6 ++++-- drivers/net/ethernet/cavium/liquidio/lio_main.c | 18 ++++++++++++------ drivers/net/ethernet/cavium/liquidio/lio_vf_main.c | 17 ++++++++++++----- drivers/net/ethernet/cavium/liquidio/octeon_config.h | 2 ++ drivers/net/ethernet/cavium/liquidio/octeon_iq.h | 3 +++ drivers/net/ethernet/cavium/liquidio/octeon_main.h | 2 +- drivers/net/ethernet/cavium/liquidio/octeon_nic.c | 5 +++-- drivers/net/ethernet/cavium/liquidio/octeon_nic.h | 3 ++- drivers/net/ethernet/cavium/liquidio/request_manager.c | 18 ++++++++++++++++-- 9 files changed, 55 insertions(+), 19 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/cavium/liquidio/lio_core.c b/drivers/net/ethernet/cavium/liquidio/lio_core.c index 23f6b60030c5..b891d858e416 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_core.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_core.c @@ -91,7 +91,7 @@ void octeon_update_tx_completion_counters(void *buf, int reqtype, *bytes_compl += skb->len; } -void octeon_report_sent_bytes_to_bql(void *buf, int reqtype) +int octeon_report_sent_bytes_to_bql(void *buf, int reqtype) { struct octnet_buf_free_info *finfo; struct sk_buff *skb; @@ -112,11 +112,13 @@ void octeon_report_sent_bytes_to_bql(void *buf, int reqtype) break; default: - return; + return 0; } txq = netdev_get_tx_queue(skb->dev, skb_get_queue_mapping(skb)); netdev_tx_sent_queue(txq, skb->len); + + return netif_xmit_stopped(txq); } void liquidio_link_ctrl_cmd_completion(void *nctrl_ptr) diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c index accd038f3f34..8ea24d68e824 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_main.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c @@ -2594,7 +2594,8 @@ static void handle_timestamp(struct octeon_device *oct, */ static inline int send_nic_timestamp_pkt(struct octeon_device *oct, struct octnic_data_pkt *ndata, - struct octnet_buf_free_info *finfo) + struct octnet_buf_free_info *finfo, + int xmit_more) { int retval; struct octeon_soft_command *sc; @@ -2629,7 +2630,7 @@ static inline int send_nic_timestamp_pkt(struct octeon_device *oct, len = (u32)((struct octeon_instr_ih2 *) (&sc->cmd.cmd2.ih2))->dlengsz; - ring_doorbell = 1; + ring_doorbell = !xmit_more; retval = octeon_send_command(oct, sc->iq_no, ring_doorbell, &sc->cmd, sc, len, ndata->reqtype); @@ -2663,7 +2664,7 @@ static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev) union tx_info *tx_info; int status = 0; int q_idx = 0, iq_no = 0; - int j; + int j, xmit_more = 0; u64 dptr = 0; u32 tag = 0; @@ -2868,17 +2869,19 @@ static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev) irh->vlan = skb_vlan_tag_get(skb) & 0xfff; } + xmit_more = skb->xmit_more; + if (unlikely(cmdsetup.s.timestamp)) - status = send_nic_timestamp_pkt(oct, &ndata, finfo); + status = send_nic_timestamp_pkt(oct, &ndata, finfo, xmit_more); else - status = octnet_send_nic_data_pkt(oct, &ndata); + status = octnet_send_nic_data_pkt(oct, &ndata, xmit_more); if (status == IQ_SEND_FAILED) goto lio_xmit_failed; netif_info(lio, tx_queued, lio->netdev, "Transmit queued successfully\n"); if (status == IQ_SEND_STOP) - stop_q(lio->netdev, q_idx); + stop_q(netdev, q_idx); netif_trans_update(netdev); @@ -2897,6 +2900,9 @@ lio_xmit_failed: if (dptr) dma_unmap_single(&oct->pci_dev->dev, dptr, ndata.datasize, DMA_TO_DEVICE); + + octeon_ring_doorbell_locked(oct, iq_no); + tx_buffer_free(skb); return NETDEV_TX_OK; } diff --git a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c index 4c3b5688529b..00c19306ecee 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c @@ -1690,7 +1690,8 @@ static void handle_timestamp(struct octeon_device *oct, u32 status, void *buf) */ static int send_nic_timestamp_pkt(struct octeon_device *oct, struct octnic_data_pkt *ndata, - struct octnet_buf_free_info *finfo) + struct octnet_buf_free_info *finfo, + int xmit_more) { struct octeon_soft_command *sc; int ring_doorbell; @@ -1720,7 +1721,7 @@ static int send_nic_timestamp_pkt(struct octeon_device *oct, len = (u32)((struct octeon_instr_ih3 *)(&sc->cmd.cmd3.ih3))->dlengsz; - ring_doorbell = 1; + ring_doorbell = !xmit_more; retval = octeon_send_command(oct, sc->iq_no, ring_doorbell, &sc->cmd, sc, len, ndata->reqtype); @@ -1752,6 +1753,7 @@ static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev) struct octeon_device *oct; int q_idx = 0, iq_no = 0; union tx_info *tx_info; + int xmit_more = 0; struct lio *lio; int status = 0; u64 dptr = 0; @@ -1940,10 +1942,12 @@ static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev) irh->vlan = skb_vlan_tag_get(skb) & VLAN_VID_MASK; } + xmit_more = skb->xmit_more; + if (unlikely(cmdsetup.s.timestamp)) - status = send_nic_timestamp_pkt(oct, &ndata, finfo); + status = send_nic_timestamp_pkt(oct, &ndata, finfo, xmit_more); else - status = octnet_send_nic_data_pkt(oct, &ndata); + status = octnet_send_nic_data_pkt(oct, &ndata, xmit_more); if (status == IQ_SEND_FAILED) goto lio_xmit_failed; @@ -1952,7 +1956,7 @@ static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev) if (status == IQ_SEND_STOP) { dev_err(&oct->pci_dev->dev, "Rcvd IQ_SEND_STOP signal; stopping IQ-%d\n", iq_no); - stop_q(lio->netdev, q_idx); + stop_q(netdev, q_idx); } netif_trans_update(netdev); @@ -1972,6 +1976,9 @@ lio_xmit_failed: if (dptr) dma_unmap_single(&oct->pci_dev->dev, dptr, ndata.datasize, DMA_TO_DEVICE); + + octeon_ring_doorbell_locked(oct, iq_no); + tx_buffer_free(skb); return NETDEV_TX_OK; } diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_config.h b/drivers/net/ethernet/cavium/liquidio/octeon_config.h index 63bd9c94e547..ceac74388e09 100644 --- a/drivers/net/ethernet/cavium/liquidio/octeon_config.h +++ b/drivers/net/ethernet/cavium/liquidio/octeon_config.h @@ -37,6 +37,8 @@ #define MAX_OCTEON_LINKS MAX_OCTEON_NICIF #define MAX_OCTEON_MULTICAST_ADDR 32 +#define MAX_OCTEON_FILL_COUNT 8 + /* CN6xxx IQ configuration macros */ #define CN6XXX_MAX_INPUT_QUEUES 32 #define CN6XXX_MAX_IQ_DESCRIPTORS 2048 diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_iq.h b/drivers/net/ethernet/cavium/liquidio/octeon_iq.h index 5c3c8da976f7..81c987682941 100644 --- a/drivers/net/ethernet/cavium/liquidio/octeon_iq.h +++ b/drivers/net/ethernet/cavium/liquidio/octeon_iq.h @@ -343,6 +343,9 @@ int octeon_delete_instr_queue(struct octeon_device *octeon_dev, u32 iq_no); int lio_wait_for_instr_fetch(struct octeon_device *oct); +void +octeon_ring_doorbell_locked(struct octeon_device *oct, u32 iq_no); + int octeon_register_reqtype_free_fn(struct octeon_device *oct, int reqtype, void (*fn)(void *)); diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_main.h b/drivers/net/ethernet/cavium/liquidio/octeon_main.h index 32ef3a7d88d8..c846eec11a45 100644 --- a/drivers/net/ethernet/cavium/liquidio/octeon_main.h +++ b/drivers/net/ethernet/cavium/liquidio/octeon_main.h @@ -63,7 +63,7 @@ struct octnet_buf_free_info { }; /* BQL-related functions */ -void octeon_report_sent_bytes_to_bql(void *buf, int reqtype); +int octeon_report_sent_bytes_to_bql(void *buf, int reqtype); void octeon_update_tx_completion_counters(void *buf, int reqtype, unsigned int *pkts_compl, unsigned int *bytes_compl); diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_nic.c b/drivers/net/ethernet/cavium/liquidio/octeon_nic.c index b457cf23fce6..150609bd8849 100644 --- a/drivers/net/ethernet/cavium/liquidio/octeon_nic.c +++ b/drivers/net/ethernet/cavium/liquidio/octeon_nic.c @@ -82,9 +82,10 @@ octeon_alloc_soft_command_resp(struct octeon_device *oct, } int octnet_send_nic_data_pkt(struct octeon_device *oct, - struct octnic_data_pkt *ndata) + struct octnic_data_pkt *ndata, + int xmit_more) { - int ring_doorbell = 1; + int ring_doorbell = !xmit_more; return octeon_send_command(oct, ndata->q_no, ring_doorbell, &ndata->cmd, ndata->buf, ndata->datasize, diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_nic.h b/drivers/net/ethernet/cavium/liquidio/octeon_nic.h index 6480ef863441..de4130d26a98 100644 --- a/drivers/net/ethernet/cavium/liquidio/octeon_nic.h +++ b/drivers/net/ethernet/cavium/liquidio/octeon_nic.h @@ -279,7 +279,8 @@ octeon_alloc_soft_command_resp(struct octeon_device *oct, * queue should be stopped, and IQ_SEND_OK if it sent okay. */ int octnet_send_nic_data_pkt(struct octeon_device *oct, - struct octnic_data_pkt *ndata); + struct octnic_data_pkt *ndata, + int xmit_more); /** Send a NIC control packet to the device * @param oct - octeon device pointer diff --git a/drivers/net/ethernet/cavium/liquidio/request_manager.c b/drivers/net/ethernet/cavium/liquidio/request_manager.c index 1e0fbce86d60..a10459742ae4 100644 --- a/drivers/net/ethernet/cavium/liquidio/request_manager.c +++ b/drivers/net/ethernet/cavium/liquidio/request_manager.c @@ -278,6 +278,18 @@ ring_doorbell(struct octeon_device *oct, struct octeon_instr_queue *iq) } } +void +octeon_ring_doorbell_locked(struct octeon_device *oct, u32 iq_no) +{ + struct octeon_instr_queue *iq; + + iq = oct->instr_queue[iq_no]; + spin_lock(&iq->post_lock); + if (iq->fill_cnt) + ring_doorbell(oct, iq); + spin_unlock(&iq->post_lock); +} + static inline void __copy_cmd_into_iq(struct octeon_instr_queue *iq, u8 *cmd) { @@ -543,6 +555,7 @@ octeon_send_command(struct octeon_device *oct, u32 iq_no, u32 force_db, void *cmd, void *buf, u32 datasize, u32 reqtype) { + int xmit_stopped; struct iq_post_status st; struct octeon_instr_queue *iq = oct->instr_queue[iq_no]; @@ -554,12 +567,13 @@ octeon_send_command(struct octeon_device *oct, u32 iq_no, st = __post_command2(iq, cmd); if (st.status != IQ_SEND_FAILED) { - octeon_report_sent_bytes_to_bql(buf, reqtype); + xmit_stopped = octeon_report_sent_bytes_to_bql(buf, reqtype); __add_to_request_list(iq, st.index, buf, reqtype); INCR_INSTRQUEUE_PKT_COUNT(oct, iq_no, bytes_sent, datasize); INCR_INSTRQUEUE_PKT_COUNT(oct, iq_no, instr_posted, 1); - if (force_db) + if (iq->fill_cnt >= MAX_OCTEON_FILL_COUNT || force_db || + xmit_stopped || st.status == IQ_SEND_STOP) ring_doorbell(oct, iq); } else { INCR_INSTRQUEUE_PKT_COUNT(oct, iq_no, instr_dropped, 1); -- cgit v1.2.3 From 05f9d3e1ae6eaf7507e3bd95b0eef2acd4b84ea8 Mon Sep 17 00:00:00 2001 From: Andre Guedes Date: Mon, 16 Oct 2017 18:01:28 -0700 Subject: igb: Add support for CBS offload This patch adds support for Credit-Based Shaper (CBS) qdisc offload from Traffic Control system. This support enable us to leverage the Forwarding and Queuing for Time-Sensitive Streams (FQTSS) features from Intel i210 Ethernet Controller. FQTSS is the former 802.1Qav standard which was merged into 802.1Q in 2014. It enables traffic prioritization and bandwidth reservation via the Credit-Based Shaper which is implemented in hardware by i210 controller. The patch introduces the igb_setup_tc() function which implements the support for CBS qdisc hardware offload in the IGB driver. CBS offload is the only traffic control offload supported by the driver at the moment. FQTSS transmission mode from i210 controller is automatically enabled by the IGB driver when the CBS is enabled for the first hardware queue. Likewise, FQTSS mode is automatically disabled when CBS is disabled for the last hardware queue. Changing FQTSS mode requires NIC reset. FQTSS feature is supported by i210 controller only. Signed-off-by: Andre Guedes Tested-by: Aaron Brown Tested-by: Henrik Austad Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/igb/e1000_defines.h | 23 ++ drivers/net/ethernet/intel/igb/e1000_regs.h | 8 + drivers/net/ethernet/intel/igb/igb.h | 6 + drivers/net/ethernet/intel/igb/igb_main.c | 347 +++++++++++++++++++++++++ 4 files changed, 384 insertions(+) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/intel/igb/e1000_defines.h b/drivers/net/ethernet/intel/igb/e1000_defines.h index 1de82f247312..83cabff1e0ab 100644 --- a/drivers/net/ethernet/intel/igb/e1000_defines.h +++ b/drivers/net/ethernet/intel/igb/e1000_defines.h @@ -353,7 +353,18 @@ #define E1000_RXPBS_CFG_TS_EN 0x80000000 #define I210_RXPBSIZE_DEFAULT 0x000000A2 /* RXPBSIZE default */ +#define I210_RXPBSIZE_MASK 0x0000003F +#define I210_RXPBSIZE_PB_32KB 0x00000020 #define I210_TXPBSIZE_DEFAULT 0x04000014 /* TXPBSIZE default */ +#define I210_TXPBSIZE_MASK 0xC0FFFFFF +#define I210_TXPBSIZE_PB0_8KB (8 << 0) +#define I210_TXPBSIZE_PB1_8KB (8 << 6) +#define I210_TXPBSIZE_PB2_4KB (4 << 12) +#define I210_TXPBSIZE_PB3_4KB (4 << 18) + +#define I210_DTXMXPKTSZ_DEFAULT 0x00000098 + +#define I210_SR_QUEUES_NUM 2 /* SerDes Control */ #define E1000_SCTL_DISABLE_SERDES_LOOPBACK 0x0400 @@ -1051,4 +1062,16 @@ #define E1000_VLAPQF_P_VALID(_n) (0x1 << (3 + (_n) * 4)) #define E1000_VLAPQF_QUEUE_MASK 0x03 +/* TX Qav Control fields */ +#define E1000_TQAVCTRL_XMIT_MODE BIT(0) +#define E1000_TQAVCTRL_DATAFETCHARB BIT(4) +#define E1000_TQAVCTRL_DATATRANARB BIT(8) + +/* TX Qav Credit Control fields */ +#define E1000_TQAVCC_IDLESLOPE_MASK 0xFFFF +#define E1000_TQAVCC_QUEUEMODE BIT(31) + +/* Transmit Descriptor Control fields */ +#define E1000_TXDCTL_PRIORITY BIT(27) + #endif diff --git a/drivers/net/ethernet/intel/igb/e1000_regs.h b/drivers/net/ethernet/intel/igb/e1000_regs.h index 58adbf234e07..8eee081d395f 100644 --- a/drivers/net/ethernet/intel/igb/e1000_regs.h +++ b/drivers/net/ethernet/intel/igb/e1000_regs.h @@ -421,6 +421,14 @@ do { \ #define E1000_I210_FLA 0x1201C +#define E1000_I210_DTXMXPKTSZ 0x355C + +#define E1000_I210_TXDCTL(_n) (0x0E028 + ((_n) * 0x40)) + +#define E1000_I210_TQAVCTRL 0x3570 +#define E1000_I210_TQAVCC(_n) (0x3004 + ((_n) * 0x40)) +#define E1000_I210_TQAVHC(_n) (0x300C + ((_n) * 0x40)) + #define E1000_INVM_DATA_REG(_n) (0x12120 + 4*(_n)) #define E1000_INVM_SIZE 64 /* Number of INVM Data Registers */ diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h index 06ffb2bc713e..92845692087a 100644 --- a/drivers/net/ethernet/intel/igb/igb.h +++ b/drivers/net/ethernet/intel/igb/igb.h @@ -281,6 +281,11 @@ struct igb_ring { u16 count; /* number of desc. in the ring */ u8 queue_index; /* logical index of the ring*/ u8 reg_idx; /* physical index of the ring */ + bool cbs_enable; /* indicates if CBS is enabled */ + s32 idleslope; /* idleSlope in kbps */ + s32 sendslope; /* sendSlope in kbps */ + s32 hicredit; /* hiCredit in bytes */ + s32 locredit; /* loCredit in bytes */ /* everything past this point are written often */ u16 next_to_clean; @@ -621,6 +626,7 @@ struct igb_adapter { #define IGB_FLAG_EEE BIT(14) #define IGB_FLAG_VLAN_PROMISC BIT(15) #define IGB_FLAG_RX_LEGACY BIT(16) +#define IGB_FLAG_FQTSS BIT(17) /* Media Auto Sense */ #define IGB_MAS_ENABLE_0 0X0001 diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index 58d01a211367..b3d730f4d695 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -34,6 +34,7 @@ #include #include #include +#include #include #include #include @@ -62,6 +63,17 @@ #define BUILD 0 #define DRV_VERSION __stringify(MAJ) "." __stringify(MIN) "." \ __stringify(BUILD) "-k" + +enum queue_mode { + QUEUE_MODE_STRICT_PRIORITY, + QUEUE_MODE_STREAM_RESERVATION, +}; + +enum tx_queue_prio { + TX_QUEUE_PRIO_HIGH, + TX_QUEUE_PRIO_LOW, +}; + char igb_driver_name[] = "igb"; char igb_driver_version[] = DRV_VERSION; static const char igb_driver_string[] = @@ -1271,6 +1283,12 @@ static int igb_alloc_q_vector(struct igb_adapter *adapter, ring->count = adapter->tx_ring_count; ring->queue_index = txr_idx; + ring->cbs_enable = false; + ring->idleslope = 0; + ring->sendslope = 0; + ring->hicredit = 0; + ring->locredit = 0; + u64_stats_init(&ring->tx_syncp); u64_stats_init(&ring->tx_syncp2); @@ -1598,6 +1616,284 @@ static void igb_get_hw_control(struct igb_adapter *adapter) ctrl_ext | E1000_CTRL_EXT_DRV_LOAD); } +static void enable_fqtss(struct igb_adapter *adapter, bool enable) +{ + struct net_device *netdev = adapter->netdev; + struct e1000_hw *hw = &adapter->hw; + + WARN_ON(hw->mac.type != e1000_i210); + + if (enable) + adapter->flags |= IGB_FLAG_FQTSS; + else + adapter->flags &= ~IGB_FLAG_FQTSS; + + if (netif_running(netdev)) + schedule_work(&adapter->reset_task); +} + +static bool is_fqtss_enabled(struct igb_adapter *adapter) +{ + return (adapter->flags & IGB_FLAG_FQTSS) ? true : false; +} + +static void set_tx_desc_fetch_prio(struct e1000_hw *hw, int queue, + enum tx_queue_prio prio) +{ + u32 val; + + WARN_ON(hw->mac.type != e1000_i210); + WARN_ON(queue < 0 || queue > 4); + + val = rd32(E1000_I210_TXDCTL(queue)); + + if (prio == TX_QUEUE_PRIO_HIGH) + val |= E1000_TXDCTL_PRIORITY; + else + val &= ~E1000_TXDCTL_PRIORITY; + + wr32(E1000_I210_TXDCTL(queue), val); +} + +static void set_queue_mode(struct e1000_hw *hw, int queue, enum queue_mode mode) +{ + u32 val; + + WARN_ON(hw->mac.type != e1000_i210); + WARN_ON(queue < 0 || queue > 1); + + val = rd32(E1000_I210_TQAVCC(queue)); + + if (mode == QUEUE_MODE_STREAM_RESERVATION) + val |= E1000_TQAVCC_QUEUEMODE; + else + val &= ~E1000_TQAVCC_QUEUEMODE; + + wr32(E1000_I210_TQAVCC(queue), val); +} + +/** + * igb_configure_cbs - Configure Credit-Based Shaper (CBS) + * @adapter: pointer to adapter struct + * @queue: queue number + * @enable: true = enable CBS, false = disable CBS + * @idleslope: idleSlope in kbps + * @sendslope: sendSlope in kbps + * @hicredit: hiCredit in bytes + * @locredit: loCredit in bytes + * + * Configure CBS for a given hardware queue. When disabling, idleslope, + * sendslope, hicredit, locredit arguments are ignored. Returns 0 if + * success. Negative otherwise. + **/ +static void igb_configure_cbs(struct igb_adapter *adapter, int queue, + bool enable, int idleslope, int sendslope, + int hicredit, int locredit) +{ + struct net_device *netdev = adapter->netdev; + struct e1000_hw *hw = &adapter->hw; + u32 tqavcc; + u16 value; + + WARN_ON(hw->mac.type != e1000_i210); + WARN_ON(queue < 0 || queue > 1); + + if (enable) { + set_tx_desc_fetch_prio(hw, queue, TX_QUEUE_PRIO_HIGH); + set_queue_mode(hw, queue, QUEUE_MODE_STREAM_RESERVATION); + + /* According to i210 datasheet section 7.2.7.7, we should set + * the 'idleSlope' field from TQAVCC register following the + * equation: + * + * For 100 Mbps link speed: + * + * value = BW * 0x7735 * 0.2 (E1) + * + * For 1000Mbps link speed: + * + * value = BW * 0x7735 * 2 (E2) + * + * E1 and E2 can be merged into one equation as shown below. + * Note that 'link-speed' is in Mbps. + * + * value = BW * 0x7735 * 2 * link-speed + * -------------- (E3) + * 1000 + * + * 'BW' is the percentage bandwidth out of full link speed + * which can be found with the following equation. Note that + * idleSlope here is the parameter from this function which + * is in kbps. + * + * BW = idleSlope + * ----------------- (E4) + * link-speed * 1000 + * + * That said, we can come up with a generic equation to + * calculate the value we should set it TQAVCC register by + * replacing 'BW' in E3 by E4. The resulting equation is: + * + * value = idleSlope * 0x7735 * 2 * link-speed + * ----------------- -------------- (E5) + * link-speed * 1000 1000 + * + * 'link-speed' is present in both sides of the fraction so + * it is canceled out. The final equation is the following: + * + * value = idleSlope * 61034 + * ----------------- (E6) + * 1000000 + */ + value = DIV_ROUND_UP_ULL(idleslope * 61034ULL, 1000000); + + tqavcc = rd32(E1000_I210_TQAVCC(queue)); + tqavcc &= ~E1000_TQAVCC_IDLESLOPE_MASK; + tqavcc |= value; + wr32(E1000_I210_TQAVCC(queue), tqavcc); + + wr32(E1000_I210_TQAVHC(queue), 0x80000000 + hicredit * 0x7735); + } else { + set_tx_desc_fetch_prio(hw, queue, TX_QUEUE_PRIO_LOW); + set_queue_mode(hw, queue, QUEUE_MODE_STRICT_PRIORITY); + + /* Set idleSlope to zero. */ + tqavcc = rd32(E1000_I210_TQAVCC(queue)); + tqavcc &= ~E1000_TQAVCC_IDLESLOPE_MASK; + wr32(E1000_I210_TQAVCC(queue), tqavcc); + + /* Set hiCredit to zero. */ + wr32(E1000_I210_TQAVHC(queue), 0); + } + + /* XXX: In i210 controller the sendSlope and loCredit parameters from + * CBS are not configurable by software so we don't do any 'controller + * configuration' in respect to these parameters. + */ + + netdev_dbg(netdev, "CBS %s: queue %d idleslope %d sendslope %d hiCredit %d locredit %d\n", + (enable) ? "enabled" : "disabled", queue, + idleslope, sendslope, hicredit, locredit); +} + +static int igb_save_cbs_params(struct igb_adapter *adapter, int queue, + bool enable, int idleslope, int sendslope, + int hicredit, int locredit) +{ + struct igb_ring *ring; + + if (queue < 0 || queue > adapter->num_tx_queues) + return -EINVAL; + + ring = adapter->tx_ring[queue]; + + ring->cbs_enable = enable; + ring->idleslope = idleslope; + ring->sendslope = sendslope; + ring->hicredit = hicredit; + ring->locredit = locredit; + + return 0; +} + +static bool is_any_cbs_enabled(struct igb_adapter *adapter) +{ + struct igb_ring *ring; + int i; + + for (i = 0; i < adapter->num_tx_queues; i++) { + ring = adapter->tx_ring[i]; + + if (ring->cbs_enable) + return true; + } + + return false; +} + +static void igb_setup_tx_mode(struct igb_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + struct e1000_hw *hw = &adapter->hw; + u32 val; + + /* Only i210 controller supports changing the transmission mode. */ + if (hw->mac.type != e1000_i210) + return; + + if (is_fqtss_enabled(adapter)) { + int i, max_queue; + + /* Configure TQAVCTRL register: set transmit mode to 'Qav', + * set data fetch arbitration to 'round robin' and set data + * transfer arbitration to 'credit shaper algorithm. + */ + val = rd32(E1000_I210_TQAVCTRL); + val |= E1000_TQAVCTRL_XMIT_MODE | E1000_TQAVCTRL_DATATRANARB; + val &= ~E1000_TQAVCTRL_DATAFETCHARB; + wr32(E1000_I210_TQAVCTRL, val); + + /* Configure Tx and Rx packet buffers sizes as described in + * i210 datasheet section 7.2.7.7. + */ + val = rd32(E1000_TXPBS); + val &= ~I210_TXPBSIZE_MASK; + val |= I210_TXPBSIZE_PB0_8KB | I210_TXPBSIZE_PB1_8KB | + I210_TXPBSIZE_PB2_4KB | I210_TXPBSIZE_PB3_4KB; + wr32(E1000_TXPBS, val); + + val = rd32(E1000_RXPBS); + val &= ~I210_RXPBSIZE_MASK; + val |= I210_RXPBSIZE_PB_32KB; + wr32(E1000_RXPBS, val); + + /* Section 8.12.9 states that MAX_TPKT_SIZE from DTXMXPKTSZ + * register should not exceed the buffer size programmed in + * TXPBS. The smallest buffer size programmed in TXPBS is 4kB + * so according to the datasheet we should set MAX_TPKT_SIZE to + * 4kB / 64. + * + * However, when we do so, no frame from queue 2 and 3 are + * transmitted. It seems the MAX_TPKT_SIZE should not be great + * or _equal_ to the buffer size programmed in TXPBS. For this + * reason, we set set MAX_ TPKT_SIZE to (4kB - 1) / 64. + */ + val = (4096 - 1) / 64; + wr32(E1000_I210_DTXMXPKTSZ, val); + + /* Since FQTSS mode is enabled, apply any CBS configuration + * previously set. If no previous CBS configuration has been + * done, then the initial configuration is applied, which means + * CBS is disabled. + */ + max_queue = (adapter->num_tx_queues < I210_SR_QUEUES_NUM) ? + adapter->num_tx_queues : I210_SR_QUEUES_NUM; + + for (i = 0; i < max_queue; i++) { + struct igb_ring *ring = adapter->tx_ring[i]; + + igb_configure_cbs(adapter, i, ring->cbs_enable, + ring->idleslope, ring->sendslope, + ring->hicredit, ring->locredit); + } + } else { + wr32(E1000_RXPBS, I210_RXPBSIZE_DEFAULT); + wr32(E1000_TXPBS, I210_TXPBSIZE_DEFAULT); + wr32(E1000_I210_DTXMXPKTSZ, I210_DTXMXPKTSZ_DEFAULT); + + val = rd32(E1000_I210_TQAVCTRL); + /* According to Section 8.12.21, the other flags we've set when + * enabling FQTSS are not relevant when disabling FQTSS so we + * don't set they here. + */ + val &= ~E1000_TQAVCTRL_XMIT_MODE; + wr32(E1000_I210_TQAVCTRL, val); + } + + netdev_dbg(netdev, "FQTSS %s\n", (is_fqtss_enabled(adapter)) ? + "enabled" : "disabled"); +} + /** * igb_configure - configure the hardware for RX and TX * @adapter: private board structure @@ -1609,6 +1905,7 @@ static void igb_configure(struct igb_adapter *adapter) igb_get_hw_control(adapter); igb_set_rx_mode(netdev); + igb_setup_tx_mode(adapter); igb_restore_vlan(adapter); @@ -2150,6 +2447,55 @@ igb_features_check(struct sk_buff *skb, struct net_device *dev, return features; } +static int igb_offload_cbs(struct igb_adapter *adapter, + struct tc_cbs_qopt_offload *qopt) +{ + struct e1000_hw *hw = &adapter->hw; + int err; + + /* CBS offloading is only supported by i210 controller. */ + if (hw->mac.type != e1000_i210) + return -EOPNOTSUPP; + + /* CBS offloading is only supported by queue 0 and queue 1. */ + if (qopt->queue < 0 || qopt->queue > 1) + return -EINVAL; + + err = igb_save_cbs_params(adapter, qopt->queue, qopt->enable, + qopt->idleslope, qopt->sendslope, + qopt->hicredit, qopt->locredit); + if (err) + return err; + + if (is_fqtss_enabled(adapter)) { + igb_configure_cbs(adapter, qopt->queue, qopt->enable, + qopt->idleslope, qopt->sendslope, + qopt->hicredit, qopt->locredit); + + if (!is_any_cbs_enabled(adapter)) + enable_fqtss(adapter, false); + + } else { + enable_fqtss(adapter, true); + } + + return 0; +} + +static int igb_setup_tc(struct net_device *dev, enum tc_setup_type type, + void *type_data) +{ + struct igb_adapter *adapter = netdev_priv(dev); + + switch (type) { + case TC_SETUP_CBS: + return igb_offload_cbs(adapter, type_data); + + default: + return -EOPNOTSUPP; + } +} + static const struct net_device_ops igb_netdev_ops = { .ndo_open = igb_open, .ndo_stop = igb_close, @@ -2175,6 +2521,7 @@ static const struct net_device_ops igb_netdev_ops = { .ndo_set_features = igb_set_features, .ndo_fdb_add = igb_ndo_fdb_add, .ndo_features_check = igb_features_check, + .ndo_setup_tc = igb_setup_tc, }; /** -- cgit v1.2.3 From 952c5719aac6587f1e0add97dca79f9e73887f9b Mon Sep 17 00:00:00 2001 From: Michael Chan Date: Sat, 28 Oct 2017 01:56:10 -0400 Subject: bnxt_en: Fix randconfig build errors. Fix undefined symbols when CONFIG_VLAN_8021Q or CONFIG_INET is not set. Fixes: 8c95f773b4a3 ("bnxt_en: add support for Flower based vxlan encap/decap offload") Reported-by: Jakub Kicinski Signed-off-by: Michael Chan Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c index 798d13964274..d5031f436f83 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c @@ -904,6 +904,7 @@ static int bnxt_tc_resolve_tunnel_hdrs(struct bnxt *bp, struct bnxt_tc_l2_key *l2_info, struct net_device *real_dst_dev) { +#ifdef CONFIG_INET struct flowi4 flow = { {0} }; struct net_device *dst_dev; struct neighbour *nbr; @@ -925,6 +926,7 @@ static int bnxt_tc_resolve_tunnel_hdrs(struct bnxt *bp, */ dst_dev = rt->dst.dev; if (is_vlan_dev(dst_dev)) { +#if IS_ENABLED(CONFIG_VLAN_8021Q) struct vlan_dev_priv *vlan = vlan_dev_priv(dst_dev); if (vlan->real_dev != real_dst_dev) { @@ -938,6 +940,7 @@ static int bnxt_tc_resolve_tunnel_hdrs(struct bnxt *bp, l2_info->inner_vlan_tci = htons(vlan->vlan_id); l2_info->inner_vlan_tpid = vlan->vlan_proto; l2_info->num_vlans = 1; +#endif } else if (dst_dev != real_dst_dev) { netdev_info(bp->dev, "dst_dev(%s) for %pI4b is not PF-if(%s)", @@ -966,6 +969,9 @@ static int bnxt_tc_resolve_tunnel_hdrs(struct bnxt *bp, put_rt: ip_rt_put(rt); return rc; +#else + return -EOPNOTSUPP; +#endif } static int bnxt_tc_get_decap_handle(struct bnxt *bp, struct bnxt_tc_flow *flow, -- cgit v1.2.3 From aa28667cfbe4ff6f14454dda210b1f2e485f99b5 Mon Sep 17 00:00:00 2001 From: Felix Manlunas Date: Thu, 26 Oct 2017 16:46:36 -0700 Subject: liquidio: fix kernel panic in VF driver Doing ifconfig down on VF driver in the middle of receiving line rate traffic causes a kernel panic: LiquidIO_VF 0000:02:00.3: should not come here should not get rx when poll mode = 0 for vf BUG: unable to handle kernel NULL pointer dereference at (null) . . . Call Trace: ? tasklet_action+0x102/0x120 __do_softirq+0x91/0x292 irq_exit+0xb6/0xc0 do_IRQ+0x4f/0xd0 common_interrupt+0x93/0x93 RIP: 0010:cpuidle_enter_state+0x142/0x2f0 RSP: 0018:ffffffffa6403e20 EFLAGS: 00000246 ORIG_RAX: ffffffffffffff59 RAX: 0000000000000000 RBX: 0000000000000003 RCX: 000000000000001f RDX: 0000000000000000 RSI: 000000002ab7519f RDI: 0000000000000000 RBP: ffffffffa6403e58 R08: 0000000000000084 R09: 0000000000000018 R10: ffffffffa6403df0 R11: 00000000000003c7 R12: 0000000000000003 R13: ffffd27ebd806800 R14: ffffffffa64d40d8 R15: 0000007be072823f cpuidle_enter+0x17/0x20 call_cpuidle+0x23/0x40 do_idle+0x18c/0x1f0 cpu_startup_entry+0x64/0x70 rest_init+0xa5/0xb0 start_kernel+0x45e/0x46b x86_64_start_reservations+0x24/0x26 x86_64_start_kernel+0x6f/0x72 secondary_startup_64+0xa5/0xa5 Code: Bad RIP value. RIP: (null) RSP: ffff9246ed003f28 CR2: 0000000000000000 ---[ end trace 92731e80f31b7d7d ]--- Kernel panic - not syncing: Fatal exception in interrupt Kernel Offset: 0x24000000 from 0xffffffff81000000 (relocation range: 0xffffffff80000000-0xffffffffbfffffff) ---[ end Kernel panic - not syncing: Fatal exception in interrupt Reason is: in the function assigned to net_device_ops->ndo_stop, the steps for bringing down the interface are done in the wrong order. The step that notifies the NIC firmware to stop forwarding packets to host is done too late. Fix it by moving that step to the beginning. Signed-off-by: Felix Manlunas Signed-off-by: Raghu Vatsavayi Signed-off-by: David S. Miller --- drivers/net/ethernet/cavium/liquidio/lio_vf_main.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c index 00c19306ecee..fd70a4844e2d 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c @@ -1288,6 +1288,9 @@ static int liquidio_stop(struct net_device *netdev) struct octeon_device *oct = lio->oct_dev; struct napi_struct *napi, *n; + /* tell Octeon to stop forwarding packets to host */ + send_rx_ctrl_cmd(lio, 0); + if (oct->props[lio->ifidx].napi_enabled) { list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list) napi_disable(napi); @@ -1305,9 +1308,6 @@ static int liquidio_stop(struct net_device *netdev) netif_carrier_off(netdev); lio->link_changes++; - /* tell Octeon to stop forwarding packets to host */ - send_rx_ctrl_cmd(lio, 0); - ifstate_reset(lio, LIO_IFSTATE_RUNNING); txqs_stop(netdev); -- cgit v1.2.3 From a267eaebfcaeb27ad3b83303b6c9f8f739d757aa Mon Sep 17 00:00:00 2001 From: Pablo Cascón Date: Thu, 26 Oct 2017 17:35:38 -0700 Subject: nfp: inform the VF driver needs to be restarted after changing the MAC MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add message to inform the VF MAC was changed and the need to restart the VF driver for the changes to be effective. Signed-off-by: Pablo Cascón Signed-off-by: Jakub Kicinski Signed-off-by: David S. Miller --- drivers/net/ethernet/netronome/nfp/nfp_net_sriov.c | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_sriov.c b/drivers/net/ethernet/netronome/nfp/nfp_net_sriov.c index e6d2e06b050c..8b1b962cf1d1 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_sriov.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_sriov.c @@ -112,7 +112,13 @@ int nfp_app_set_vf_mac(struct net_device *netdev, int vf, u8 *mac) writew(get_unaligned_be16(mac + 4), app->pf->vfcfg_tbl2 + vf_offset + NFP_NET_VF_CFG_MAC_LO); - return nfp_net_sriov_update(app, vf, NFP_NET_VF_CFG_MB_UPD_MAC, "MAC"); + err = nfp_net_sriov_update(app, vf, NFP_NET_VF_CFG_MB_UPD_MAC, "MAC"); + if (!err) + nfp_info(app->pf->cpp, + "MAC %pM set on VF %d, reload the VF driver to make this change effective.\n", + mac, vf); + + return err; } int nfp_app_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos, -- cgit v1.2.3 From a830405ee452ddc4101c3c9334e6fedd42c6b357 Mon Sep 17 00:00:00 2001 From: Bhadram Varka Date: Fri, 27 Oct 2017 08:22:02 +0530 Subject: stmmac: copy unicast mac address to MAC registers Currently stmmac driver not copying the valid ethernet MAC address to MAC registers. This patch takes care of updating the MAC register with MAC address. Signed-off-by: Bhadram Varka Signed-off-by: David S. Miller --- drivers/net/ethernet/stmicro/stmmac/stmmac_main.c | 16 +++++++++++++++- 1 file changed, 15 insertions(+), 1 deletion(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index c7a894ead274..ff4fb5eae1af 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -3749,6 +3749,20 @@ static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) return ret; } +static int stmmac_set_mac_address(struct net_device *ndev, void *addr) +{ + struct stmmac_priv *priv = netdev_priv(ndev); + int ret = 0; + + ret = eth_mac_addr(ndev, addr); + if (ret) + return ret; + + priv->hw->mac->set_umac_addr(priv->hw, ndev->dev_addr, 0); + + return ret; +} + #ifdef CONFIG_DEBUG_FS static struct dentry *stmmac_fs_dir; @@ -3976,7 +3990,7 @@ static const struct net_device_ops stmmac_netdev_ops = { #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = stmmac_poll_controller, #endif - .ndo_set_mac_address = eth_mac_addr, + .ndo_set_mac_address = stmmac_set_mac_address, }; /** -- cgit v1.2.3 From c63144e4dda7967f2419fa3c2cc5db1228a7fccf Mon Sep 17 00:00:00 2001 From: Kees Cook Date: Thu, 26 Oct 2017 22:54:25 -0700 Subject: drivers/net: 8390: Convert timers to use timer_setup() In preparation for unconditionally passing the struct timer_list pointer to all timer callbacks, switch to using the new timer_setup() and from_timer() to pass the timer pointer explicitly. Cc: netdev@vger.kernel.org Signed-off-by: Kees Cook Signed-off-by: David S. Miller --- drivers/net/ethernet/8390/axnet_cs.c | 10 +++++----- drivers/net/ethernet/8390/pcnet_cs.c | 10 +++++----- 2 files changed, 10 insertions(+), 10 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/8390/axnet_cs.c b/drivers/net/ethernet/8390/axnet_cs.c index 3da1fc539ef9..7bddb8efb6d5 100644 --- a/drivers/net/ethernet/8390/axnet_cs.c +++ b/drivers/net/ethernet/8390/axnet_cs.c @@ -85,7 +85,7 @@ static struct net_device_stats *get_stats(struct net_device *dev); static void set_multicast_list(struct net_device *dev); static void axnet_tx_timeout(struct net_device *dev); static irqreturn_t ei_irq_wrapper(int irq, void *dev_id); -static void ei_watchdog(u_long arg); +static void ei_watchdog(struct timer_list *t); static void axnet_reset_8390(struct net_device *dev); static int mdio_read(unsigned int addr, int phy_id, int loc); @@ -483,7 +483,7 @@ static int axnet_open(struct net_device *dev) link->open++; info->link_status = 0x00; - setup_timer(&info->watchdog, ei_watchdog, (u_long)dev); + timer_setup(&info->watchdog, ei_watchdog, 0); mod_timer(&info->watchdog, jiffies + HZ); return ax_open(dev); @@ -547,10 +547,10 @@ static irqreturn_t ei_irq_wrapper(int irq, void *dev_id) return ax_interrupt(irq, dev_id); } -static void ei_watchdog(u_long arg) +static void ei_watchdog(struct timer_list *t) { - struct net_device *dev = (struct net_device *)(arg); - struct axnet_dev *info = PRIV(dev); + struct axnet_dev *info = from_timer(info, t, watchdog); + struct net_device *dev = info->p_dev->priv; unsigned int nic_base = dev->base_addr; unsigned int mii_addr = nic_base + AXNET_MII_EEP; u_short link; diff --git a/drivers/net/ethernet/8390/pcnet_cs.c b/drivers/net/ethernet/8390/pcnet_cs.c index bd0a2a14b649..eae9827035dc 100644 --- a/drivers/net/ethernet/8390/pcnet_cs.c +++ b/drivers/net/ethernet/8390/pcnet_cs.c @@ -99,7 +99,7 @@ static int pcnet_open(struct net_device *dev); static int pcnet_close(struct net_device *dev); static int ei_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); static irqreturn_t ei_irq_wrapper(int irq, void *dev_id); -static void ei_watchdog(u_long arg); +static void ei_watchdog(struct timer_list *t); static void pcnet_reset_8390(struct net_device *dev); static int set_config(struct net_device *dev, struct ifmap *map); static int setup_shmem_window(struct pcmcia_device *link, int start_pg, @@ -917,7 +917,7 @@ static int pcnet_open(struct net_device *dev) info->phy_id = info->eth_phy; info->link_status = 0x00; - setup_timer(&info->watchdog, ei_watchdog, (u_long)dev); + timer_setup(&info->watchdog, ei_watchdog, 0); mod_timer(&info->watchdog, jiffies + HZ); return ei_open(dev); @@ -1006,10 +1006,10 @@ static irqreturn_t ei_irq_wrapper(int irq, void *dev_id) return ret; } -static void ei_watchdog(u_long arg) +static void ei_watchdog(struct timer_list *t) { - struct net_device *dev = (struct net_device *)arg; - struct pcnet_dev *info = PRIV(dev); + struct pcnet_dev *info = from_timer(info, t, watchdog); + struct net_device *dev = info->p_dev->priv; unsigned int nic_base = dev->base_addr; unsigned int mii_addr = nic_base + DLINK_GPIO; u_short link; -- cgit v1.2.3 From c6c52ba1514120db3ad2d36391ed37bafcfc43d7 Mon Sep 17 00:00:00 2001 From: Kees Cook Date: Thu, 26 Oct 2017 22:54:38 -0700 Subject: drivers/net: amd: Convert timers to use timer_setup() In preparation for unconditionally passing the struct timer_list pointer to all timer callbacks, switch to using the new timer_setup() and from_timer() to pass the timer pointer explicitly. Cc: Tom Lendacky Cc: "David S. Miller" Cc: Allen Pais Cc: netdev@vger.kernel.org Signed-off-by: Kees Cook Signed-off-by: David S. Miller --- drivers/net/ethernet/amd/a2065.c | 13 ++++++++++--- drivers/net/ethernet/amd/am79c961a.c | 9 +++++---- drivers/net/ethernet/amd/am79c961a.h | 1 + drivers/net/ethernet/amd/declance.c | 10 ++++++---- drivers/net/ethernet/amd/pcnet32.c | 10 +++++----- drivers/net/ethernet/amd/sunlance.c | 8 ++++---- drivers/net/ethernet/amd/xgbe/xgbe-drv.c | 14 ++++++-------- 7 files changed, 37 insertions(+), 28 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/amd/a2065.c b/drivers/net/ethernet/amd/a2065.c index 998d30e050a6..212fe72a190b 100644 --- a/drivers/net/ethernet/amd/a2065.c +++ b/drivers/net/ethernet/amd/a2065.c @@ -123,6 +123,7 @@ struct lance_private { int burst_sizes; /* ledma SBus burst sizes */ #endif struct timer_list multicast_timer; + struct net_device *dev; }; #define LANCE_ADDR(x) ((int)(x) & ~0xff000000) @@ -638,6 +639,13 @@ static void lance_set_multicast(struct net_device *dev) netif_wake_queue(dev); } +static void lance_set_multicast_retry(struct timer_list *t) +{ + struct lance_private *lp = from_timer(lp, t, multicast_timer); + + lance_set_multicast(lp->dev); +} + static int a2065_init_one(struct zorro_dev *z, const struct zorro_device_id *ent); static void a2065_remove_one(struct zorro_dev *z); @@ -728,14 +736,13 @@ static int a2065_init_one(struct zorro_dev *z, priv->lance_log_tx_bufs = LANCE_LOG_TX_BUFFERS; priv->rx_ring_mod_mask = RX_RING_MOD_MASK; priv->tx_ring_mod_mask = TX_RING_MOD_MASK; + priv->dev = dev; dev->netdev_ops = &lance_netdev_ops; dev->watchdog_timeo = 5*HZ; dev->dma = 0; - setup_timer(&priv->multicast_timer, - (void(*)(unsigned long))lance_set_multicast, - (unsigned long)dev); + timer_setup(&priv->multicast_timer, lance_set_multicast_retry, 0); err = register_netdev(dev); if (err) { diff --git a/drivers/net/ethernet/amd/am79c961a.c b/drivers/net/ethernet/amd/am79c961a.c index 0612dbee00d2..01d132c02ff9 100644 --- a/drivers/net/ethernet/amd/am79c961a.c +++ b/drivers/net/ethernet/amd/am79c961a.c @@ -302,10 +302,10 @@ am79c961_init_for_open(struct net_device *dev) write_rreg (dev->base_addr, CSR0, CSR0_IENA|CSR0_STRT); } -static void am79c961_timer(unsigned long data) +static void am79c961_timer(struct timer_list *t) { - struct net_device *dev = (struct net_device *)data; - struct dev_priv *priv = netdev_priv(dev); + struct dev_priv *priv = from_timer(priv, t, timer); + struct net_device *dev = priv->dev; unsigned int lnkstat, carrier; unsigned long flags; @@ -728,7 +728,8 @@ static int am79c961_probe(struct platform_device *pdev) am79c961_banner(); spin_lock_init(&priv->chip_lock); - setup_timer(&priv->timer, am79c961_timer, (unsigned long)dev); + priv->dev = dev; + timer_setup(&priv->timer, am79c961_timer, 0); if (am79c961_hw_init(dev)) goto release; diff --git a/drivers/net/ethernet/amd/am79c961a.h b/drivers/net/ethernet/amd/am79c961a.h index 9f384b79507b..fc5088c70731 100644 --- a/drivers/net/ethernet/amd/am79c961a.h +++ b/drivers/net/ethernet/amd/am79c961a.h @@ -140,6 +140,7 @@ struct dev_priv { unsigned long txhdr; spinlock_t chip_lock; struct timer_list timer; + struct net_device *dev; }; #endif diff --git a/drivers/net/ethernet/amd/declance.c b/drivers/net/ethernet/amd/declance.c index 9bdf81c2cd00..116997a8b593 100644 --- a/drivers/net/ethernet/amd/declance.c +++ b/drivers/net/ethernet/amd/declance.c @@ -260,6 +260,7 @@ struct lance_private { unsigned short busmaster_regval; struct timer_list multicast_timer; + struct net_device *dev; /* Pointers to the ring buffers as seen from the CPU */ char *rx_buf_ptr_cpu[RX_RING_SIZE]; @@ -1000,9 +1001,10 @@ static void lance_set_multicast(struct net_device *dev) netif_wake_queue(dev); } -static void lance_set_multicast_retry(unsigned long _opaque) +static void lance_set_multicast_retry(struct timer_list *t) { - struct net_device *dev = (struct net_device *) _opaque; + struct lance_private *lp = from_timer(lp, t, multicast_timer); + struct net_device *dev = lp->dev; lance_set_multicast(dev); } @@ -1246,8 +1248,8 @@ static int dec_lance_probe(struct device *bdev, const int type) * can occur from interrupts (ex. IPv6). So we * use a timer to try again later when necessary. -DaveM */ - setup_timer(&lp->multicast_timer, lance_set_multicast_retry, - (unsigned long)dev); + lp->dev = dev; + timer_setup(&lp->multicast_timer, lance_set_multicast_retry, 0); ret = register_netdev(dev); diff --git a/drivers/net/ethernet/amd/pcnet32.c b/drivers/net/ethernet/amd/pcnet32.c index e46153654016..a561705f232c 100644 --- a/drivers/net/ethernet/amd/pcnet32.c +++ b/drivers/net/ethernet/amd/pcnet32.c @@ -321,7 +321,7 @@ static struct net_device_stats *pcnet32_get_stats(struct net_device *); static void pcnet32_load_multicast(struct net_device *dev); static void pcnet32_set_multicast_list(struct net_device *); static int pcnet32_ioctl(struct net_device *, struct ifreq *, int); -static void pcnet32_watchdog(struct net_device *); +static void pcnet32_watchdog(struct timer_list *); static int mdio_read(struct net_device *dev, int phy_id, int reg_num); static void mdio_write(struct net_device *dev, int phy_id, int reg_num, int val); @@ -1970,8 +1970,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev) lp->options |= PCNET32_PORT_MII; } - setup_timer(&lp->watchdog_timer, (void *)&pcnet32_watchdog, - (unsigned long)dev); + timer_setup(&lp->watchdog_timer, pcnet32_watchdog, 0); /* The PCNET32-specific entries in the device structure. */ dev->netdev_ops = &pcnet32_netdev_ops; @@ -2901,9 +2900,10 @@ static void pcnet32_check_media(struct net_device *dev, int verbose) * Could possibly be changed to use mii_check_media instead. */ -static void pcnet32_watchdog(struct net_device *dev) +static void pcnet32_watchdog(struct timer_list *t) { - struct pcnet32_private *lp = netdev_priv(dev); + struct pcnet32_private *lp = from_timer(lp, t, watchdog_timer); + struct net_device *dev = lp->dev; unsigned long flags; /* Print the link status if it has changed */ diff --git a/drivers/net/ethernet/amd/sunlance.c b/drivers/net/ethernet/amd/sunlance.c index 0183ffb9d3ba..cdd7a611479b 100644 --- a/drivers/net/ethernet/amd/sunlance.c +++ b/drivers/net/ethernet/amd/sunlance.c @@ -1248,9 +1248,10 @@ static void lance_set_multicast(struct net_device *dev) netif_wake_queue(dev); } -static void lance_set_multicast_retry(unsigned long _opaque) +static void lance_set_multicast_retry(struct timer_list *t) { - struct net_device *dev = (struct net_device *) _opaque; + struct lance_private *lp = from_timer(lp, t, multicast_timer); + struct net_device *dev = lp->dev; lance_set_multicast(dev); } @@ -1459,8 +1460,7 @@ no_link_test: * can occur from interrupts (ex. IPv6). So we * use a timer to try again later when necessary. -DaveM */ - setup_timer(&lp->multicast_timer, lance_set_multicast_retry, - (unsigned long)dev); + timer_setup(&lp->multicast_timer, lance_set_multicast_retry, 0); if (register_netdev(dev)) { printk(KERN_ERR "SunLance: Cannot register device.\n"); diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c index 608693d11bd7..3d53153ce751 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c @@ -642,9 +642,9 @@ static irqreturn_t xgbe_dma_isr(int irq, void *data) return IRQ_HANDLED; } -static void xgbe_tx_timer(unsigned long data) +static void xgbe_tx_timer(struct timer_list *t) { - struct xgbe_channel *channel = (struct xgbe_channel *)data; + struct xgbe_channel *channel = from_timer(channel, t, tx_timer); struct xgbe_prv_data *pdata = channel->pdata; struct napi_struct *napi; @@ -680,9 +680,9 @@ static void xgbe_service(struct work_struct *work) pdata->phy_if.phy_status(pdata); } -static void xgbe_service_timer(unsigned long data) +static void xgbe_service_timer(struct timer_list *t) { - struct xgbe_prv_data *pdata = (struct xgbe_prv_data *)data; + struct xgbe_prv_data *pdata = from_timer(pdata, t, service_timer); queue_work(pdata->dev_workqueue, &pdata->service_work); @@ -694,16 +694,14 @@ static void xgbe_init_timers(struct xgbe_prv_data *pdata) struct xgbe_channel *channel; unsigned int i; - setup_timer(&pdata->service_timer, xgbe_service_timer, - (unsigned long)pdata); + timer_setup(&pdata->service_timer, xgbe_service_timer, 0); for (i = 0; i < pdata->channel_count; i++) { channel = pdata->channel[i]; if (!channel->tx_ring) break; - setup_timer(&channel->tx_timer, xgbe_tx_timer, - (unsigned long)channel); + timer_setup(&channel->tx_timer, xgbe_tx_timer, 0); } } -- cgit v1.2.3 From 0e23daeb640773adf5528e5e08e7cb81fc12775d Mon Sep 17 00:00:00 2001 From: Kees Cook Date: Thu, 26 Oct 2017 22:54:53 -0700 Subject: drivers/net: chelsio/cxgb*: Convert timers to use timer_setup() In preparation for unconditionally passing the struct timer_list pointer to all timer callbacks, switch to using the new timer_setup() and from_timer() to pass the timer pointer explicitly. Cc: Santosh Raspatur Cc: Ganesh Goudar Cc: Casey Leedom Cc: netdev@vger.kernel.org Signed-off-by: Kees Cook Signed-off-by: David S. Miller --- drivers/net/ethernet/chelsio/cxgb3/sge.c | 12 ++++++------ drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c | 7 +++---- drivers/net/ethernet/chelsio/cxgb4/sge.c | 12 ++++++------ drivers/net/ethernet/chelsio/cxgb4vf/sge.c | 12 ++++++------ 4 files changed, 21 insertions(+), 22 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/chelsio/cxgb3/sge.c b/drivers/net/ethernet/chelsio/cxgb3/sge.c index e2d342647b19..e3d28ae75360 100644 --- a/drivers/net/ethernet/chelsio/cxgb3/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb3/sge.c @@ -2853,9 +2853,9 @@ void t3_sge_err_intr_handler(struct adapter *adapter) * bother cleaning them up here. * */ -static void sge_timer_tx(unsigned long data) +static void sge_timer_tx(struct timer_list *t) { - struct sge_qset *qs = (struct sge_qset *)data; + struct sge_qset *qs = from_timer(qs, t, tx_reclaim_timer); struct port_info *pi = netdev_priv(qs->netdev); struct adapter *adap = pi->adapter; unsigned int tbd[SGE_TXQ_PER_SET] = {0, 0}; @@ -2893,10 +2893,10 @@ static void sge_timer_tx(unsigned long data) * starved. * */ -static void sge_timer_rx(unsigned long data) +static void sge_timer_rx(struct timer_list *t) { spinlock_t *lock; - struct sge_qset *qs = (struct sge_qset *)data; + struct sge_qset *qs = from_timer(qs, t, rx_reclaim_timer); struct port_info *pi = netdev_priv(qs->netdev); struct adapter *adap = pi->adapter; u32 status; @@ -2976,8 +2976,8 @@ int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports, struct sge_qset *q = &adapter->sge.qs[id]; init_qset_cntxt(q, id); - setup_timer(&q->tx_reclaim_timer, sge_timer_tx, (unsigned long)q); - setup_timer(&q->rx_reclaim_timer, sge_timer_rx, (unsigned long)q); + timer_setup(&q->tx_reclaim_timer, sge_timer_tx, 0); + timer_setup(&q->rx_reclaim_timer, sge_timer_rx, 0); q->fl[0].desc = alloc_ring(adapter->pdev, p->fl_size, sizeof(struct rx_desc), diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c index 9b6aabe4f963..614db014ef18 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c @@ -741,9 +741,9 @@ err: return ret; } -static void ch_flower_stats_cb(unsigned long data) +static void ch_flower_stats_cb(struct timer_list *t) { - struct adapter *adap = (struct adapter *)data; + struct adapter *adap = from_timer(adap, t, flower_stats_timer); struct ch_tc_flower_entry *flower_entry; struct ch_tc_flower_stats *ofld_stats; unsigned int i; @@ -815,8 +815,7 @@ err: void cxgb4_init_tc_flower(struct adapter *adap) { hash_init(adap->flower_anymatch_tbl); - setup_timer(&adap->flower_stats_timer, ch_flower_stats_cb, - (unsigned long)adap); + timer_setup(&adap->flower_stats_timer, ch_flower_stats_cb, 0); mod_timer(&adap->flower_stats_timer, jiffies + STATS_CHECK_PERIOD); } diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c index 4ef68f69b58c..486b01fe23bd 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c @@ -2583,11 +2583,11 @@ irq_handler_t t4_intr_handler(struct adapter *adap) return t4_intr_intx; } -static void sge_rx_timer_cb(unsigned long data) +static void sge_rx_timer_cb(struct timer_list *t) { unsigned long m; unsigned int i; - struct adapter *adap = (struct adapter *)data; + struct adapter *adap = from_timer(adap, t, sge.rx_timer); struct sge *s = &adap->sge; for (i = 0; i < BITS_TO_LONGS(s->egr_sz); i++) @@ -2620,11 +2620,11 @@ done: mod_timer(&s->rx_timer, jiffies + RX_QCHECK_PERIOD); } -static void sge_tx_timer_cb(unsigned long data) +static void sge_tx_timer_cb(struct timer_list *t) { unsigned long m; unsigned int i, budget; - struct adapter *adap = (struct adapter *)data; + struct adapter *adap = from_timer(adap, t, sge.tx_timer); struct sge *s = &adap->sge; for (i = 0; i < BITS_TO_LONGS(s->egr_sz); i++) @@ -3458,8 +3458,8 @@ int t4_sge_init(struct adapter *adap) /* Set up timers used for recuring callbacks to process RX and TX * administrative tasks. */ - setup_timer(&s->rx_timer, sge_rx_timer_cb, (unsigned long)adap); - setup_timer(&s->tx_timer, sge_tx_timer_cb, (unsigned long)adap); + timer_setup(&s->rx_timer, sge_rx_timer_cb, 0); + timer_setup(&s->tx_timer, sge_tx_timer_cb, 0); spin_lock_init(&s->intrq_lock); diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c index 05498e7f2840..14d7e673c656 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c @@ -2058,9 +2058,9 @@ irq_handler_t t4vf_intr_handler(struct adapter *adapter) * when out of memory a queue can become empty. We schedule NAPI to do * the actual refill. */ -static void sge_rx_timer_cb(unsigned long data) +static void sge_rx_timer_cb(struct timer_list *t) { - struct adapter *adapter = (struct adapter *)data; + struct adapter *adapter = from_timer(adapter, t, sge.rx_timer); struct sge *s = &adapter->sge; unsigned int i; @@ -2117,9 +2117,9 @@ static void sge_rx_timer_cb(unsigned long data) * when no new packets are being submitted. This is essential for pktgen, * at least. */ -static void sge_tx_timer_cb(unsigned long data) +static void sge_tx_timer_cb(struct timer_list *t) { - struct adapter *adapter = (struct adapter *)data; + struct adapter *adapter = from_timer(adapter, t, sge.tx_timer); struct sge *s = &adapter->sge; unsigned int i, budget; @@ -2676,8 +2676,8 @@ int t4vf_sge_init(struct adapter *adapter) /* * Set up tasklet timers. */ - setup_timer(&s->rx_timer, sge_rx_timer_cb, (unsigned long)adapter); - setup_timer(&s->tx_timer, sge_tx_timer_cb, (unsigned long)adapter); + timer_setup(&s->rx_timer, sge_rx_timer_cb, 0); + timer_setup(&s->tx_timer, sge_tx_timer_cb, 0); /* * Initialize Forwarded Interrupt Queue lock. -- cgit v1.2.3 From 9cb618c295016966b8781a57616b07d8b4d9cb21 Mon Sep 17 00:00:00 2001 From: Kees Cook Date: Thu, 26 Oct 2017 22:54:59 -0700 Subject: drivers/net: dlink: Convert timers to use timer_setup() In preparation for unconditionally passing the struct timer_list pointer to all timer callbacks, switch to using the new timer_setup() and from_timer() to pass the timer pointer explicitly. Cc: Denis Kirjanov Cc: netdev@vger.kernel.org Signed-off-by: Kees Cook Acked-by: Denis Kirjanov Signed-off-by: David S. Miller --- drivers/net/ethernet/dlink/sundance.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/dlink/sundance.c b/drivers/net/ethernet/dlink/sundance.c index 6ca9e981ad57..1a27176381fb 100644 --- a/drivers/net/ethernet/dlink/sundance.c +++ b/drivers/net/ethernet/dlink/sundance.c @@ -431,7 +431,7 @@ static void mdio_write(struct net_device *dev, int phy_id, int location, int val static int mdio_wait_link(struct net_device *dev, int wait); static int netdev_open(struct net_device *dev); static void check_duplex(struct net_device *dev); -static void netdev_timer(unsigned long data); +static void netdev_timer(struct timer_list *t); static void tx_timeout(struct net_device *dev); static void init_ring(struct net_device *dev); static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev); @@ -913,7 +913,7 @@ static int netdev_open(struct net_device *dev) ioread16(ioaddr + MACCtrl1), ioread16(ioaddr + MACCtrl0)); /* Set the timer to check for link beat. */ - setup_timer(&np->timer, netdev_timer, (unsigned long)dev); + timer_setup(&np->timer, netdev_timer, 0); np->timer.expires = jiffies + 3*HZ; add_timer(&np->timer); @@ -951,10 +951,10 @@ static void check_duplex(struct net_device *dev) } } -static void netdev_timer(unsigned long data) +static void netdev_timer(struct timer_list *t) { - struct net_device *dev = (struct net_device *)data; - struct netdev_private *np = netdev_priv(dev); + struct netdev_private *np = from_timer(np, t, timer); + struct net_device *dev = np->mii_if.dev; void __iomem *ioaddr = np->base; int next_tick = 10*HZ; -- cgit v1.2.3 From 8b3718dc2c3cf3043f474e067c374546ba17c403 Mon Sep 17 00:00:00 2001 From: Kees Cook Date: Thu, 26 Oct 2017 22:55:07 -0700 Subject: drivers/net: fealnx: Convert timers to use timer_setup() In preparation for unconditionally passing the struct timer_list pointer to all timer callbacks, switch to using the new timer_setup() and from_timer() to pass the timer pointer explicitly. Cc: "David S. Miller" Cc: "yuval.shaia@oracle.com" Cc: Allen Pais Cc: Stephen Hemminger Cc: Philippe Reynes Cc: Johannes Berg Cc: netdev@vger.kernel.org Signed-off-by: Kees Cook Signed-off-by: David S. Miller --- drivers/net/ethernet/fealnx.c | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/fealnx.c b/drivers/net/ethernet/fealnx.c index c8982313d850..23053919ebf5 100644 --- a/drivers/net/ethernet/fealnx.c +++ b/drivers/net/ethernet/fealnx.c @@ -426,8 +426,8 @@ static void mdio_write(struct net_device *dev, int phy_id, int location, int val static int netdev_open(struct net_device *dev); static void getlinktype(struct net_device *dev); static void getlinkstatus(struct net_device *dev); -static void netdev_timer(unsigned long data); -static void reset_timer(unsigned long data); +static void netdev_timer(struct timer_list *t); +static void reset_timer(struct timer_list *t); static void fealnx_tx_timeout(struct net_device *dev); static void init_ring(struct net_device *dev); static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev); @@ -909,13 +909,13 @@ static int netdev_open(struct net_device *dev) printk(KERN_DEBUG "%s: Done netdev_open().\n", dev->name); /* Set the timer to check for link beat. */ - setup_timer(&np->timer, netdev_timer, (unsigned long)dev); + timer_setup(&np->timer, netdev_timer, 0); np->timer.expires = RUN_AT(3 * HZ); /* timer handler */ add_timer(&np->timer); - setup_timer(&np->reset_timer, reset_timer, (unsigned long)dev); + timer_setup(&np->reset_timer, reset_timer, 0); np->reset_timer_armed = 0; return rc; } @@ -1078,10 +1078,10 @@ static void allocate_rx_buffers(struct net_device *dev) } -static void netdev_timer(unsigned long data) +static void netdev_timer(struct timer_list *t) { - struct net_device *dev = (struct net_device *) data; - struct netdev_private *np = netdev_priv(dev); + struct netdev_private *np = from_timer(np, t, timer); + struct net_device *dev = np->mii.dev; void __iomem *ioaddr = np->mem; int old_crvalue = np->crvalue; unsigned int old_linkok = np->linkok; @@ -1167,10 +1167,10 @@ static void enable_rxtx(struct net_device *dev) } -static void reset_timer(unsigned long data) +static void reset_timer(struct timer_list *t) { - struct net_device *dev = (struct net_device *) data; - struct netdev_private *np = netdev_priv(dev); + struct netdev_private *np = from_timer(np, t, reset_timer); + struct net_device *dev = np->mii.dev; unsigned long flags; printk(KERN_WARNING "%s: resetting tx and rx machinery\n", dev->name); -- cgit v1.2.3 From 34309b36e4f1ab53fcc696275c1fa2849bc80709 Mon Sep 17 00:00:00 2001 From: Kees Cook Date: Thu, 26 Oct 2017 22:55:13 -0700 Subject: drivers/net: korina: Convert timers to use timer_setup() In preparation for unconditionally passing the struct timer_list pointer to all timer callbacks, switch to using the new timer_setup() and from_timer() to pass the timer pointer explicitly. Cc: "David S. Miller" Cc: Roman Yeryomin Cc: Florian Fainelli Cc: netdev@vger.kernel.org Signed-off-by: Kees Cook Signed-off-by: David S. Miller --- drivers/net/ethernet/korina.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/korina.c b/drivers/net/ethernet/korina.c index 7cecd9dbc111..ae195f8adff5 100644 --- a/drivers/net/ethernet/korina.c +++ b/drivers/net/ethernet/korina.c @@ -653,10 +653,10 @@ static void korina_check_media(struct net_device *dev, unsigned int init_media) &lp->eth_regs->ethmac2); } -static void korina_poll_media(unsigned long data) +static void korina_poll_media(struct timer_list *t) { - struct net_device *dev = (struct net_device *) data; - struct korina_private *lp = netdev_priv(dev); + struct korina_private *lp = from_timer(lp, t, media_check_timer); + struct net_device *dev = lp->dev; korina_check_media(dev, 0); mod_timer(&lp->media_check_timer, jiffies + HZ); @@ -1103,7 +1103,7 @@ static int korina_probe(struct platform_device *pdev) ": cannot register net device: %d\n", rc); goto probe_err_register; } - setup_timer(&lp->media_check_timer, korina_poll_media, (unsigned long) dev); + timer_setup(&lp->media_check_timer, korina_poll_media, 0); INIT_WORK(&lp->restart_task, korina_restart_task); -- cgit v1.2.3 From 0365b047dea70ae931f99594bf8e5976ffec7fae Mon Sep 17 00:00:00 2001 From: Kees Cook Date: Thu, 26 Oct 2017 22:55:20 -0700 Subject: drivers/net: mellanox: Convert timers to use timer_setup() In preparation for unconditionally passing the struct timer_list pointer to all timer callbacks, switch to using the new timer_setup() and from_timer() to pass the timer pointer explicitly. Cc: Saeed Mahameed Cc: Matan Barak Cc: Leon Romanovsky Cc: netdev@vger.kernel.org Cc: linux-rdma@vger.kernel.org Signed-off-by: Kees Cook Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx5/core/health.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c b/drivers/net/ethernet/mellanox/mlx5/core/health.c index a89a68ce53ad..185dcac0abe7 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/health.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c @@ -285,9 +285,9 @@ void mlx5_trigger_health_work(struct mlx5_core_dev *dev) spin_unlock_irqrestore(&health->wq_lock, flags); } -static void poll_health(unsigned long data) +static void poll_health(struct timer_list *t) { - struct mlx5_core_dev *dev = (struct mlx5_core_dev *)data; + struct mlx5_core_dev *dev = from_timer(dev, t, priv.health.timer); struct mlx5_core_health *health = &dev->priv.health; u32 count; @@ -320,7 +320,7 @@ void mlx5_start_health_poll(struct mlx5_core_dev *dev) { struct mlx5_core_health *health = &dev->priv.health; - setup_timer(&health->timer, poll_health, (unsigned long)dev); + timer_setup(&health->timer, poll_health, 0); health->sick = 0; clear_bit(MLX5_DROP_NEW_HEALTH_WORK, &health->flags); clear_bit(MLX5_DROP_NEW_RECOVERY_WORK, &health->flags); -- cgit v1.2.3 From 15735c9d8a36cadb23ac5e9e29ea083f517767e4 Mon Sep 17 00:00:00 2001 From: Kees Cook Date: Thu, 26 Oct 2017 22:55:27 -0700 Subject: drivers/net: natsemi: Convert timers to use timer_setup() In preparation for unconditionally passing the struct timer_list pointer to all timer callbacks, switch to using the new timer_setup() and from_timer() to pass the timer pointer explicitly. Cc: "David S. Miller" Cc: Allen Pais Cc: Eric Dumazet Cc: Philippe Reynes Cc: Wei Yongjun Cc: netdev@vger.kernel.org Signed-off-by: Kees Cook Signed-off-by: David S. Miller --- drivers/net/ethernet/natsemi/natsemi.c | 10 +++++----- drivers/net/ethernet/natsemi/ns83820.c | 8 ++++---- 2 files changed, 9 insertions(+), 9 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/natsemi/natsemi.c b/drivers/net/ethernet/natsemi/natsemi.c index dedeacd0bbca..b9a1a9f999ea 100644 --- a/drivers/net/ethernet/natsemi/natsemi.c +++ b/drivers/net/ethernet/natsemi/natsemi.c @@ -610,7 +610,7 @@ static int netdev_open(struct net_device *dev); static void do_cable_magic(struct net_device *dev); static void undo_cable_magic(struct net_device *dev); static void check_link(struct net_device *dev); -static void netdev_timer(unsigned long data); +static void netdev_timer(struct timer_list *t); static void dump_ring(struct net_device *dev); static void ns_tx_timeout(struct net_device *dev); static int alloc_ring(struct net_device *dev); @@ -1571,7 +1571,7 @@ static int netdev_open(struct net_device *dev) dev->name, (int)readl(ioaddr + ChipCmd)); /* Set the timer to check for link beat. */ - setup_timer(&np->timer, netdev_timer, (unsigned long)dev); + timer_setup(&np->timer, netdev_timer, 0); np->timer.expires = round_jiffies(jiffies + NATSEMI_TIMER_FREQ); add_timer(&np->timer); @@ -1787,10 +1787,10 @@ static void init_registers(struct net_device *dev) * this check via dspcfg_workaround sysfs option. * 3) check of death of the RX path due to OOM */ -static void netdev_timer(unsigned long data) +static void netdev_timer(struct timer_list *t) { - struct net_device *dev = (struct net_device *)data; - struct netdev_private *np = netdev_priv(dev); + struct netdev_private *np = from_timer(np, t, timer); + struct net_device *dev = np->dev; void __iomem * ioaddr = ns_ioaddr(dev); int next_tick = NATSEMI_TIMER_FREQ; const int irq = np->pci_dev->irq; diff --git a/drivers/net/ethernet/natsemi/ns83820.c b/drivers/net/ethernet/natsemi/ns83820.c index 99d3c7884a4a..958fced4dacf 100644 --- a/drivers/net/ethernet/natsemi/ns83820.c +++ b/drivers/net/ethernet/natsemi/ns83820.c @@ -1600,10 +1600,10 @@ static void ns83820_tx_timeout(struct net_device *ndev) spin_unlock_irqrestore(&dev->tx_lock, flags); } -static void ns83820_tx_watch(unsigned long data) +static void ns83820_tx_watch(struct timer_list *t) { - struct net_device *ndev = (void *)data; - struct ns83820 *dev = PRIV(ndev); + struct ns83820 *dev = from_timer(dev, t, tx_watchdog); + struct net_device *ndev = dev->ndev; #if defined(DEBUG) printk("ns83820_tx_watch: %u %u %d\n", @@ -1652,7 +1652,7 @@ static int ns83820_open(struct net_device *ndev) writel(0, dev->base + TXDP_HI); writel(desc, dev->base + TXDP); - setup_timer(&dev->tx_watchdog, ns83820_tx_watch, (unsigned long)ndev); + timer_setup(&dev->tx_watchdog, ns83820_tx_watch, 0); mod_timer(&dev->tx_watchdog, jiffies + 2*HZ); netif_start_queue(ndev); /* FIXME: wait for phy to come up */ -- cgit v1.2.3 From 8089c6f4777f394407d63e217314f934b85a7947 Mon Sep 17 00:00:00 2001 From: Kees Cook Date: Thu, 26 Oct 2017 22:55:34 -0700 Subject: drivers/net: packetengines: Convert timers to use timer_setup() In preparation for unconditionally passing the struct timer_list pointer to all timer callbacks, switch to using the new timer_setup() and from_timer() to pass the timer pointer explicitly. Cc: "David S. Miller" Cc: Allen Pais Cc: yuan linyu Cc: Philippe Reynes Cc: netdev@vger.kernel.org Signed-off-by: Kees Cook Signed-off-by: David S. Miller --- drivers/net/ethernet/packetengines/hamachi.c | 14 +++++++------- drivers/net/ethernet/packetengines/yellowfin.c | 10 +++++----- 2 files changed, 12 insertions(+), 12 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/packetengines/hamachi.c b/drivers/net/ethernet/packetengines/hamachi.c index 77bc7cca8980..c9529c29a0a7 100644 --- a/drivers/net/ethernet/packetengines/hamachi.c +++ b/drivers/net/ethernet/packetengines/hamachi.c @@ -413,13 +413,13 @@ that case. /* The rest of these values should never change. */ -static void hamachi_timer(unsigned long data); +static void hamachi_timer(struct timer_list *t); enum capability_flags {CanHaveMII=1, }; static const struct chip_info { u16 vendor_id, device_id, device_id_mask, pad; const char *name; - void (*media_timer)(unsigned long data); + void (*media_timer)(struct timer_list *t); int flags; } chip_tbl[] = { {0x1318, 0x0911, 0xffff, 0, "Hamachi GNIC-II", hamachi_timer, 0}, @@ -547,7 +547,7 @@ static int mdio_read(struct net_device *dev, int phy_id, int location); static void mdio_write(struct net_device *dev, int phy_id, int location, int value); static int hamachi_open(struct net_device *dev); static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); -static void hamachi_timer(unsigned long data); +static void hamachi_timer(struct timer_list *t); static void hamachi_tx_timeout(struct net_device *dev); static void hamachi_init_ring(struct net_device *dev); static netdev_tx_t hamachi_start_xmit(struct sk_buff *skb, @@ -979,7 +979,7 @@ static int hamachi_open(struct net_device *dev) dev->name, readw(ioaddr + RxStatus), readw(ioaddr + TxStatus)); } /* Set the timer to check for link beat. */ - setup_timer(&hmp->timer, hamachi_timer, (unsigned long)dev); + timer_setup(&hmp->timer, hamachi_timer, 0); hmp->timer.expires = RUN_AT((24*HZ)/10); /* 2.4 sec. */ add_timer(&hmp->timer); @@ -1017,10 +1017,10 @@ static inline int hamachi_tx(struct net_device *dev) return 0; } -static void hamachi_timer(unsigned long data) +static void hamachi_timer(struct timer_list *t) { - struct net_device *dev = (struct net_device *)data; - struct hamachi_private *hmp = netdev_priv(dev); + struct hamachi_private *hmp = from_timer(hmp, t, timer); + struct net_device *dev = hmp->mii_if.dev; void __iomem *ioaddr = hmp->base; int next_tick = 10*HZ; diff --git a/drivers/net/ethernet/packetengines/yellowfin.c b/drivers/net/ethernet/packetengines/yellowfin.c index 33c241f52a71..54224d1822e3 100644 --- a/drivers/net/ethernet/packetengines/yellowfin.c +++ b/drivers/net/ethernet/packetengines/yellowfin.c @@ -343,7 +343,7 @@ static int mdio_read(void __iomem *ioaddr, int phy_id, int location); static void mdio_write(void __iomem *ioaddr, int phy_id, int location, int value); static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); static int yellowfin_open(struct net_device *dev); -static void yellowfin_timer(unsigned long data); +static void yellowfin_timer(struct timer_list *t); static void yellowfin_tx_timeout(struct net_device *dev); static int yellowfin_init_ring(struct net_device *dev); static netdev_tx_t yellowfin_start_xmit(struct sk_buff *skb, @@ -632,7 +632,7 @@ static int yellowfin_open(struct net_device *dev) } /* Set the timer to check for link beat. */ - setup_timer(&yp->timer, yellowfin_timer, (unsigned long)dev); + timer_setup(&yp->timer, yellowfin_timer, 0); yp->timer.expires = jiffies + 3*HZ; add_timer(&yp->timer); out: @@ -643,10 +643,10 @@ err_free_irq: goto out; } -static void yellowfin_timer(unsigned long data) +static void yellowfin_timer(struct timer_list *t) { - struct net_device *dev = (struct net_device *)data; - struct yellowfin_private *yp = netdev_priv(dev); + struct yellowfin_private *yp = from_timer(yp, t, timer); + struct net_device *dev = pci_get_drvdata(yp->pci_dev); void __iomem *ioaddr = yp->base; int next_tick = 60*HZ; -- cgit v1.2.3 From 267146d44718771aa0b375e78b33c81d137db09d Mon Sep 17 00:00:00 2001 From: Kees Cook Date: Thu, 26 Oct 2017 22:55:42 -0700 Subject: drivers/net: smsc: Convert timers to use timer_setup() In preparation for unconditionally passing the struct timer_list pointer to all timer callbacks, switch to using the new timer_setup() and from_timer() to pass the timer pointer explicitly. Cc: "David S. Miller" Cc: "yuval.shaia@oracle.com" Cc: Eric Dumazet Cc: Philippe Reynes Cc: Allen Pais Cc: Tobias Klauser Cc: netdev@vger.kernel.org Signed-off-by: Kees Cook Signed-off-by: David S. Miller --- drivers/net/ethernet/smsc/epic100.c | 10 +++++----- drivers/net/ethernet/smsc/smc91c92_cs.c | 10 +++++----- 2 files changed, 10 insertions(+), 10 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/smsc/epic100.c b/drivers/net/ethernet/smsc/epic100.c index 2a9724898fcf..949aaef390b6 100644 --- a/drivers/net/ethernet/smsc/epic100.c +++ b/drivers/net/ethernet/smsc/epic100.c @@ -290,7 +290,7 @@ static int read_eeprom(struct epic_private *, int); static int mdio_read(struct net_device *dev, int phy_id, int location); static void mdio_write(struct net_device *dev, int phy_id, int loc, int val); static void epic_restart(struct net_device *dev); -static void epic_timer(unsigned long data); +static void epic_timer(struct timer_list *t); static void epic_tx_timeout(struct net_device *dev); static void epic_init_ring(struct net_device *dev); static netdev_tx_t epic_start_xmit(struct sk_buff *skb, @@ -739,7 +739,7 @@ static int epic_open(struct net_device *dev) /* Set the timer to switch to check for link beat and perhaps switch to an alternate media type. */ - setup_timer(&ep->timer, epic_timer, (unsigned long)dev); + timer_setup(&ep->timer, epic_timer, 0); ep->timer.expires = jiffies + 3*HZ; add_timer(&ep->timer); @@ -843,10 +843,10 @@ static void check_media(struct net_device *dev) } } -static void epic_timer(unsigned long data) +static void epic_timer(struct timer_list *t) { - struct net_device *dev = (struct net_device *)data; - struct epic_private *ep = netdev_priv(dev); + struct epic_private *ep = from_timer(ep, t, timer); + struct net_device *dev = ep->mii.dev; void __iomem *ioaddr = ep->ioaddr; int next_tick = 5*HZ; diff --git a/drivers/net/ethernet/smsc/smc91c92_cs.c b/drivers/net/ethernet/smsc/smc91c92_cs.c index 92c927aec66d..a55f430f6a7b 100644 --- a/drivers/net/ethernet/smsc/smc91c92_cs.c +++ b/drivers/net/ethernet/smsc/smc91c92_cs.c @@ -280,7 +280,7 @@ static void set_rx_mode(struct net_device *dev); static int s9k_config(struct net_device *dev, struct ifmap *map); static void smc_set_xcvr(struct net_device *dev, int if_port); static void smc_reset(struct net_device *dev); -static void media_check(u_long arg); +static void media_check(struct timer_list *t); static void mdio_sync(unsigned int addr); static int mdio_read(struct net_device *dev, int phy_id, int loc); static void mdio_write(struct net_device *dev, int phy_id, int loc, int value); @@ -1070,7 +1070,7 @@ static int smc_open(struct net_device *dev) smc->packets_waiting = 0; smc_reset(dev); - setup_timer(&smc->media, media_check, (u_long)dev); + timer_setup(&smc->media, media_check, 0); mod_timer(&smc->media, jiffies + HZ); return 0; @@ -1708,10 +1708,10 @@ static void smc_reset(struct net_device *dev) ======================================================================*/ -static void media_check(u_long arg) +static void media_check(struct timer_list *t) { - struct net_device *dev = (struct net_device *) arg; - struct smc_private *smc = netdev_priv(dev); + struct smc_private *smc = from_timer(smc, t, media); + struct net_device *dev = smc->mii_if.dev; unsigned int ioaddr = dev->base_addr; u_short i, media, saved_bank; u_short link; -- cgit v1.2.3 From 509708310cf917a05fbceb41ad67da1416b81bd0 Mon Sep 17 00:00:00 2001 From: Francois Romieu Date: Fri, 27 Oct 2017 13:24:49 +0300 Subject: r8169: Add support for interrupt coalesce tuning (ethtool -C) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Kirr: In particular with ethtool -C rx-usecs 0 rx-frames 0 now it is possible to disable RX delays when NIC usage requires low-latency. See this thread for context: https://www.spinics.net/lists/netdev/msg217665.html My specific case is that: We have many computers with gigabit Realtek NICs. For 2 such computers connected to a gigabit store-and-forward switch the minimum round-trip time for small pings (`ping -i 0 -w 3 -s 56 -q peer`) is ~ 30μs. However it turned out that when Ethernet frame length transitions 127 -> 128 bytes (`ping -i 0 -w 3 -s {81 -> 82} -q peer`) the lowest RTT transitions step-wise to ~ 270μs. As David Light said this is RX interrupt mitigation done by NIC which creates the latency. For workloads when low-latency is required with e.g. Intel, BCM etc NIC drivers one just uses `ethtool -C rx-usecs ...` to reduce the time NIC delays before interrupting CPU, but it turned out `ethtool -C` is not supported by r8169 driver. Like Stéphane ANCELOT I've traced the problem down to IntrMitigate being hardcoded to != 0 for our chips (we have 8168 based NICs): https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/tree/drivers/net/ethernet/realtek/r8169.c#n5460 static void rtl_hw_start_8169(struct net_device *dev) { ... /* * Undocumented corner. Supposedly: * (TxTimer << 12) | (TxPackets << 8) | (RxTimer << 4) | RxPackets */ RTL_W16(IntrMitigate, 0x0000); https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/tree/drivers/net/ethernet/realtek/r8169.c#n6346 static void rtl_hw_start_8168(struct net_device *dev) { ... RTL_W16(IntrMitigate, 0x5151); and then I've also found https://www.spinics.net/lists/netdev/msg217665.html and original Francois' patch: https://www.spinics.net/lists/netdev/msg217984.html https://www.spinics.net/lists/netdev/msg218207.html So could we please finally get support for tuning r8169 interrupt coalescing in tree? (so that next poor soul who hits the problem does not need to go all the way to dig into driver sources and internet wildly and finally patch locally -RTL_W16(IntrMitigate, 0x5151); +RTL_W16(IntrMitigate, 0x5100); guessing whether it is right or not and also having to care to deploy the patch everywhere it needs to be used, etc...). To do so I've took original Francois's patch from 2012 and reworked it a bit: - updated to latest net-next.git; - adjusted scaling setup based on feedback from Hayes to pick up scaling vector depending not only on link speed but also on CPlusCmd[0:1] and to adjust CPlusCmd[0:1] correspondingly when setting timings; - improved a bit (I think so) error handling. I've tested the patch on "RTL8168d/8111d" (XID 083000c0) and with it and `ethtool -C rx-usecs 0 rx-frames 0` on both ends it improves: - minimum RTT latency: ~270μs -> ~30μs (small packet), ~330μs -> ~110μs (full 1.5K ethernet frame) - average RTT latency: ~480μs -> ~50μs (small packet), ~560μs -> ~125μs (full 1.5K ethernet frame) ( before: root@neo1:# ping -i 0 -w 3 -s 82 -q neo2 PING neo2.kirr.nexedi.com (192.168.102.21) 82(110) bytes of data. --- neo2.kirr.nexedi.com ping statistics --- 5906 packets transmitted, 5905 received, 0% packet loss, time 2999ms rtt min/avg/max/mdev = 0.274/0.485/0.607/0.026 ms, ipg/ewma 0.508/0.489 ms root@neo1:# ping -i 0 -w 3 -s 1472 -q neo2 PING neo2.kirr.nexedi.com (192.168.102.21) 1472(1500) bytes of data. --- neo2.kirr.nexedi.com ping statistics --- 5073 packets transmitted, 5073 received, 0% packet loss, time 2999ms rtt min/avg/max/mdev = 0.330/0.566/0.710/0.028 ms, ipg/ewma 0.591/0.544 ms after: root@neo1# ping -i 0 -w 3 -s 82 -q neo2 PING neo2.kirr.nexedi.com (192.168.102.21) 82(110) bytes of data. --- neo2.kirr.nexedi.com ping statistics --- 45815 packets transmitted, 45815 received, 0% packet loss, time 3000ms rtt min/avg/max/mdev = 0.036/0.051/0.368/0.010 ms, ipg/ewma 0.065/0.053 ms root@neo1:# ping -i 0 -w 3 -s 1472 -q neo2 PING neo2.kirr.nexedi.com (192.168.102.21) 1472(1500) bytes of data. --- neo2.kirr.nexedi.com ping statistics --- 21250 packets transmitted, 21250 received, 0% packet loss, time 3000ms rtt min/avg/max/mdev = 0.112/0.125/0.390/0.007 ms, ipg/ewma 0.141/0.125 ms the small -> 1.5K latency growth is understandable as it takes ~15μs to transmit 1.5K on 1Gbps on the wire and with 2 hosts and 1 switch and ICMP ECHO + ECHO reply the packet has to travel 4 ethernet segments which is already 60μs; probably something a bit else is also there as e.g. on Linux, even with `cpupower frequency-set -g performance`, on some computers I've noticed the kernel can be spending more time in software-only mode when incoming packets go in less frequently. E.g. this program can demonstrate the effect for ICMP ECHO processing: https://lab.nexedi.com/kirr/bcc/blob/43cfc13b/tools/pinglat.py (later this was found to be partly due to C-states exit latencies) ) We have this patch running in our testing setup for 1 months already without any issues observed. It remains to be clarified whether RX and TX timers use the same base. For now I've set them equally, but Francois's original patch version suggests it could be not the same. I've got no feedback at all to my original posting of this patch and questions https://www.spinics.net/lists/netdev/msg457173.html neither from Francois, nor from any people from Realtek during one month. So I suggest we simply apply it to net-next.git now. Cc: Francois Romieu Cc: Hayes Wang Cc: Realtek linux nic maintainers Cc: David Laight Cc: Stéphane ANCELOT Cc: Eric Dumazet Signed-off-by: Kirill Smelkov Tested-by: Holger Hoffstätte Signed-off-by: David S. Miller --- drivers/net/ethernet/realtek/r8169.c | 231 +++++++++++++++++++++++++++++++++++ 1 file changed, 231 insertions(+) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c index 7dc4b6de31e6..fd218fd9ef3c 100644 --- a/drivers/net/ethernet/realtek/r8169.c +++ b/drivers/net/ethernet/realtek/r8169.c @@ -399,6 +399,12 @@ enum rtl_registers { RxMaxSize = 0xda, CPlusCmd = 0xe0, IntrMitigate = 0xe2, + +#define RTL_COALESCE_MASK 0x0f +#define RTL_COALESCE_SHIFT 4 +#define RTL_COALESCE_T_MAX (RTL_COALESCE_MASK) +#define RTL_COALESCE_FRAME_MAX (RTL_COALESCE_MASK << 2) + RxDescAddrLow = 0xe4, RxDescAddrHigh = 0xe8, EarlyTxThres = 0xec, /* 8169. Unit of 32 bytes. */ @@ -795,6 +801,7 @@ struct rtl8169_private { u16 cp_cmd; u16 event_slow; + const struct rtl_coalesce_info *coalesce_info; struct mdio_ops { void (*write)(struct rtl8169_private *, int, int); @@ -2363,10 +2370,229 @@ static int rtl8169_nway_reset(struct net_device *dev) return mii_nway_restart(&tp->mii); } +/* + * Interrupt coalescing + * + * > 1 - the availability of the IntrMitigate (0xe2) register through the + * > 8169, 8168 and 810x line of chipsets + * + * 8169, 8168, and 8136(810x) serial chipsets support it. + * + * > 2 - the Tx timer unit at gigabit speed + * + * The unit of the timer depends on both the speed and the setting of CPlusCmd + * (0xe0) bit 1 and bit 0. + * + * For 8169 + * bit[1:0] \ speed 1000M 100M 10M + * 0 0 320ns 2.56us 40.96us + * 0 1 2.56us 20.48us 327.7us + * 1 0 5.12us 40.96us 655.4us + * 1 1 10.24us 81.92us 1.31ms + * + * For the other + * bit[1:0] \ speed 1000M 100M 10M + * 0 0 5us 2.56us 40.96us + * 0 1 40us 20.48us 327.7us + * 1 0 80us 40.96us 655.4us + * 1 1 160us 81.92us 1.31ms + */ + +/* rx/tx scale factors for one particular CPlusCmd[0:1] value */ +struct rtl_coalesce_scale { + /* Rx / Tx */ + u32 nsecs[2]; +}; + +/* rx/tx scale factors for all CPlusCmd[0:1] cases */ +struct rtl_coalesce_info { + u32 speed; + struct rtl_coalesce_scale scalev[4]; /* each CPlusCmd[0:1] case */ +}; + +/* produce (r,t) pairs with each being in series of *1, *8, *8*2, *8*2*2 */ +#define rxtx_x1822(r, t) { \ + {{(r), (t)}}, \ + {{(r)*8, (t)*8}}, \ + {{(r)*8*2, (t)*8*2}}, \ + {{(r)*8*2*2, (t)*8*2*2}}, \ +} +static const struct rtl_coalesce_info rtl_coalesce_info_8169[] = { + /* speed delays: rx00 tx00 */ + { SPEED_10, rxtx_x1822(40960, 40960) }, + { SPEED_100, rxtx_x1822( 2560, 2560) }, + { SPEED_1000, rxtx_x1822( 320, 320) }, + { 0 }, +}; + +static const struct rtl_coalesce_info rtl_coalesce_info_8168_8136[] = { + /* speed delays: rx00 tx00 */ + { SPEED_10, rxtx_x1822(40960, 40960) }, + { SPEED_100, rxtx_x1822( 2560, 2560) }, + { SPEED_1000, rxtx_x1822( 5000, 5000) }, + { 0 }, +}; +#undef rxtx_x1822 + +/* get rx/tx scale vector corresponding to current speed */ +static const struct rtl_coalesce_info *rtl_coalesce_info(struct net_device *dev) +{ + struct rtl8169_private *tp = netdev_priv(dev); + struct ethtool_link_ksettings ecmd; + const struct rtl_coalesce_info *ci; + int rc; + + rc = rtl8169_get_link_ksettings(dev, &ecmd); + if (rc < 0) + return ERR_PTR(rc); + + for (ci = tp->coalesce_info; ci->speed != 0; ci++) { + if (ecmd.base.speed == ci->speed) { + return ci; + } + } + + return ERR_PTR(-ELNRNG); +} + +static int rtl_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec) +{ + struct rtl8169_private *tp = netdev_priv(dev); + void __iomem *ioaddr = tp->mmio_addr; + const struct rtl_coalesce_info *ci; + const struct rtl_coalesce_scale *scale; + struct { + u32 *max_frames; + u32 *usecs; + } coal_settings [] = { + { &ec->rx_max_coalesced_frames, &ec->rx_coalesce_usecs }, + { &ec->tx_max_coalesced_frames, &ec->tx_coalesce_usecs } + }, *p = coal_settings; + int i; + u16 w; + + memset(ec, 0, sizeof(*ec)); + + /* get rx/tx scale corresponding to current speed and CPlusCmd[0:1] */ + ci = rtl_coalesce_info(dev); + if (IS_ERR(ci)) + return PTR_ERR(ci); + + scale = &ci->scalev[RTL_R16(CPlusCmd) & 3]; + + /* read IntrMitigate and adjust according to scale */ + for (w = RTL_R16(IntrMitigate); w; w >>= RTL_COALESCE_SHIFT, p++) { + *p->max_frames = (w & RTL_COALESCE_MASK) << 2; + w >>= RTL_COALESCE_SHIFT; + *p->usecs = w & RTL_COALESCE_MASK; + } + + for (i = 0; i < 2; i++) { + p = coal_settings + i; + *p->usecs = (*p->usecs * scale->nsecs[i]) / 1000; + + /* + * ethtool_coalesce says it is illegal to set both usecs and + * max_frames to 0. + */ + if (!*p->usecs && !*p->max_frames) + *p->max_frames = 1; + } + + return 0; +} + +/* choose appropriate scale factor and CPlusCmd[0:1] for (speed, nsec) */ +static const struct rtl_coalesce_scale *rtl_coalesce_choose_scale( + struct net_device *dev, u32 nsec, u16 *cp01) +{ + const struct rtl_coalesce_info *ci; + u16 i; + + ci = rtl_coalesce_info(dev); + if (IS_ERR(ci)) + return ERR_CAST(ci); + + for (i = 0; i < 4; i++) { + u32 rxtx_maxscale = max(ci->scalev[i].nsecs[0], + ci->scalev[i].nsecs[1]); + if (nsec <= rxtx_maxscale * RTL_COALESCE_T_MAX) { + *cp01 = i; + return &ci->scalev[i]; + } + } + + return ERR_PTR(-EINVAL); +} + +static int rtl_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec) +{ + struct rtl8169_private *tp = netdev_priv(dev); + void __iomem *ioaddr = tp->mmio_addr; + const struct rtl_coalesce_scale *scale; + struct { + u32 frames; + u32 usecs; + } coal_settings [] = { + { ec->rx_max_coalesced_frames, ec->rx_coalesce_usecs }, + { ec->tx_max_coalesced_frames, ec->tx_coalesce_usecs } + }, *p = coal_settings; + u16 w = 0, cp01; + int i; + + scale = rtl_coalesce_choose_scale(dev, + max(p[0].usecs, p[1].usecs) * 1000, &cp01); + if (IS_ERR(scale)) + return PTR_ERR(scale); + + for (i = 0; i < 2; i++, p++) { + u32 units; + + /* + * accept max_frames=1 we returned in rtl_get_coalesce. + * accept it not only when usecs=0 because of e.g. the following scenario: + * + * - both rx_usecs=0 & rx_frames=0 in hardware (no delay on RX) + * - rtl_get_coalesce returns rx_usecs=0, rx_frames=1 + * - then user does `ethtool -C eth0 rx-usecs 100` + * + * since ethtool sends to kernel whole ethtool_coalesce + * settings, if we do not handle rx_usecs=!0, rx_frames=1 + * we'll reject it below in `frames % 4 != 0`. + */ + if (p->frames == 1) { + p->frames = 0; + } + + units = p->usecs * 1000 / scale->nsecs[i]; + if (p->frames > RTL_COALESCE_FRAME_MAX || p->frames % 4) + return -EINVAL; + + w <<= RTL_COALESCE_SHIFT; + w |= units; + w <<= RTL_COALESCE_SHIFT; + w |= p->frames >> 2; + } + + rtl_lock_work(tp); + + RTL_W16(IntrMitigate, swab16(w)); + + tp->cp_cmd = (tp->cp_cmd & ~3) | cp01; + RTL_W16(CPlusCmd, tp->cp_cmd); + RTL_R16(CPlusCmd); + + rtl_unlock_work(tp); + + return 0; +} + static const struct ethtool_ops rtl8169_ethtool_ops = { .get_drvinfo = rtl8169_get_drvinfo, .get_regs_len = rtl8169_get_regs_len, .get_link = ethtool_op_get_link, + .get_coalesce = rtl_get_coalesce, + .set_coalesce = rtl_set_coalesce, .set_settings = rtl8169_set_settings, .get_msglevel = rtl8169_get_msglevel, .set_msglevel = rtl8169_set_msglevel, @@ -8061,6 +8287,7 @@ static const struct rtl_cfg_info { unsigned int align; u16 event_slow; unsigned features; + const struct rtl_coalesce_info *coalesce_info; u8 default_ver; } rtl_cfg_infos [] = { [RTL_CFG_0] = { @@ -8069,6 +8296,7 @@ static const struct rtl_cfg_info { .align = 0, .event_slow = SYSErr | LinkChg | RxOverflow | RxFIFOOver, .features = RTL_FEATURE_GMII, + .coalesce_info = rtl_coalesce_info_8169, .default_ver = RTL_GIGA_MAC_VER_01, }, [RTL_CFG_1] = { @@ -8077,6 +8305,7 @@ static const struct rtl_cfg_info { .align = 8, .event_slow = SYSErr | LinkChg | RxOverflow, .features = RTL_FEATURE_GMII | RTL_FEATURE_MSI, + .coalesce_info = rtl_coalesce_info_8168_8136, .default_ver = RTL_GIGA_MAC_VER_11, }, [RTL_CFG_2] = { @@ -8086,6 +8315,7 @@ static const struct rtl_cfg_info { .event_slow = SYSErr | LinkChg | RxOverflow | RxFIFOOver | PCSTimeout, .features = RTL_FEATURE_MSI, + .coalesce_info = rtl_coalesce_info_8168_8136, .default_ver = RTL_GIGA_MAC_VER_13, } }; @@ -8449,6 +8679,7 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) tp->hw_start = cfg->hw_start; tp->event_slow = cfg->event_slow; + tp->coalesce_info = cfg->coalesce_info; tp->opts1_mask = (tp->mac_version != RTL_GIGA_MAC_VER_01) ? ~(RxBOVF | RxFOVF) : ~0; -- cgit v1.2.3 From c69fe407803d4b554b7397fad9598a04717ac255 Mon Sep 17 00:00:00 2001 From: Arjun Vynipadath Date: Fri, 27 Oct 2017 18:08:21 +0530 Subject: cxgb3: Check and handle the dma mapping errors This patch adds checks at approprate places whether *dma_map*() call has succeeded or not. Original Work by: Santosh Rastapur Signed-off-by: Arjun Vynipadath Signed-off-by: Ganesh Goudar Signed-off-by: David S. Miller --- drivers/net/ethernet/chelsio/cxgb3/sge.c | 118 ++++++++++++++++++++++++------- 1 file changed, 92 insertions(+), 26 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/chelsio/cxgb3/sge.c b/drivers/net/ethernet/chelsio/cxgb3/sge.c index e3d28ae75360..e988caa797cb 100644 --- a/drivers/net/ethernet/chelsio/cxgb3/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb3/sge.c @@ -455,6 +455,11 @@ static int alloc_pg_chunk(struct adapter *adapter, struct sge_fl *q, q->pg_chunk.offset = 0; mapping = pci_map_page(adapter->pdev, q->pg_chunk.page, 0, q->alloc_size, PCI_DMA_FROMDEVICE); + if (unlikely(pci_dma_mapping_error(adapter->pdev, mapping))) { + __free_pages(q->pg_chunk.page, order); + q->pg_chunk.page = NULL; + return -EIO; + } q->pg_chunk.mapping = mapping; } sd->pg_chunk = q->pg_chunk; @@ -949,40 +954,78 @@ static inline unsigned int calc_tx_descs(const struct sk_buff *skb) return flits_to_desc(flits); } +/* map_skb - map a packet main body and its page fragments + * @pdev: the PCI device + * @skb: the packet + * @addr: placeholder to save the mapped addresses + * + * map the main body of an sk_buff and its page fragments, if any. + */ +static int map_skb(struct pci_dev *pdev, const struct sk_buff *skb, + dma_addr_t *addr) +{ + const skb_frag_t *fp, *end; + const struct skb_shared_info *si; + + if (skb_headlen(skb)) { + *addr = pci_map_single(pdev, skb->data, skb_headlen(skb), + PCI_DMA_TODEVICE); + if (pci_dma_mapping_error(pdev, *addr)) + goto out_err; + addr++; + } + + si = skb_shinfo(skb); + end = &si->frags[si->nr_frags]; + + for (fp = si->frags; fp < end; fp++) { + *addr = skb_frag_dma_map(&pdev->dev, fp, 0, skb_frag_size(fp), + DMA_TO_DEVICE); + if (pci_dma_mapping_error(pdev, *addr)) + goto unwind; + addr++; + } + return 0; + +unwind: + while (fp-- > si->frags) + dma_unmap_page(&pdev->dev, *--addr, skb_frag_size(fp), + DMA_TO_DEVICE); + + pci_unmap_single(pdev, addr[-1], skb_headlen(skb), PCI_DMA_TODEVICE); +out_err: + return -ENOMEM; +} + /** - * make_sgl - populate a scatter/gather list for a packet + * write_sgl - populate a scatter/gather list for a packet * @skb: the packet * @sgp: the SGL to populate * @start: start address of skb main body data to include in the SGL * @len: length of skb main body data to include in the SGL - * @pdev: the PCI device + * @addr: the list of the mapped addresses * - * Generates a scatter/gather list for the buffers that make up a packet + * Copies the scatter/gather list for the buffers that make up a packet * and returns the SGL size in 8-byte words. The caller must size the SGL * appropriately. */ -static inline unsigned int make_sgl(const struct sk_buff *skb, - struct sg_ent *sgp, unsigned char *start, - unsigned int len, struct pci_dev *pdev) +static inline unsigned int write_sgl(const struct sk_buff *skb, + struct sg_ent *sgp, unsigned char *start, + unsigned int len, const dma_addr_t *addr) { - dma_addr_t mapping; - unsigned int i, j = 0, nfrags; + unsigned int i, j = 0, k = 0, nfrags; if (len) { - mapping = pci_map_single(pdev, start, len, PCI_DMA_TODEVICE); sgp->len[0] = cpu_to_be32(len); - sgp->addr[0] = cpu_to_be64(mapping); - j = 1; + sgp->addr[j++] = cpu_to_be64(addr[k++]); } nfrags = skb_shinfo(skb)->nr_frags; for (i = 0; i < nfrags; i++) { const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; - mapping = skb_frag_dma_map(&pdev->dev, frag, 0, skb_frag_size(frag), - DMA_TO_DEVICE); sgp->len[j] = cpu_to_be32(skb_frag_size(frag)); - sgp->addr[j] = cpu_to_be64(mapping); + sgp->addr[j] = cpu_to_be64(addr[k++]); j ^= 1; if (j == 0) ++sgp; @@ -1138,7 +1181,7 @@ static void write_tx_pkt_wr(struct adapter *adap, struct sk_buff *skb, const struct port_info *pi, unsigned int pidx, unsigned int gen, struct sge_txq *q, unsigned int ndesc, - unsigned int compl) + unsigned int compl, const dma_addr_t *addr) { unsigned int flits, sgl_flits, cntrl, tso_info; struct sg_ent *sgp, sgl[MAX_SKB_FRAGS / 2 + 1]; @@ -1196,7 +1239,7 @@ static void write_tx_pkt_wr(struct adapter *adap, struct sk_buff *skb, } sgp = ndesc == 1 ? (struct sg_ent *)&d->flit[flits] : sgl; - sgl_flits = make_sgl(skb, sgp, skb->data, skb_headlen(skb), adap->pdev); + sgl_flits = write_sgl(skb, sgp, skb->data, skb_headlen(skb), addr); write_wr_hdr_sgl(ndesc, skb, d, pidx, q, sgl, flits, sgl_flits, gen, htonl(V_WR_OP(FW_WROPCODE_TUNNEL_TX_PKT) | compl), @@ -1227,6 +1270,7 @@ netdev_tx_t t3_eth_xmit(struct sk_buff *skb, struct net_device *dev) struct netdev_queue *txq; struct sge_qset *qs; struct sge_txq *q; + dma_addr_t addr[MAX_SKB_FRAGS + 1]; /* * The chip min packet length is 9 octets but play safe and reject @@ -1255,6 +1299,14 @@ netdev_tx_t t3_eth_xmit(struct sk_buff *skb, struct net_device *dev) return NETDEV_TX_BUSY; } + /* Check if ethernet packet can't be sent as immediate data */ + if (skb->len > (WR_LEN - sizeof(struct cpl_tx_pkt))) { + if (unlikely(map_skb(adap->pdev, skb, addr) < 0)) { + dev_kfree_skb(skb); + return NETDEV_TX_OK; + } + } + q->in_use += ndesc; if (unlikely(credits - ndesc < q->stop_thres)) { t3_stop_tx_queue(txq, qs, q); @@ -1312,7 +1364,7 @@ netdev_tx_t t3_eth_xmit(struct sk_buff *skb, struct net_device *dev) if (likely(!skb_shared(skb))) skb_orphan(skb); - write_tx_pkt_wr(adap, skb, pi, pidx, gen, q, ndesc, compl); + write_tx_pkt_wr(adap, skb, pi, pidx, gen, q, ndesc, compl, addr); check_ring_tx_db(adap, q); return NETDEV_TX_OK; } @@ -1577,7 +1629,8 @@ static void setup_deferred_unmapping(struct sk_buff *skb, struct pci_dev *pdev, */ static void write_ofld_wr(struct adapter *adap, struct sk_buff *skb, struct sge_txq *q, unsigned int pidx, - unsigned int gen, unsigned int ndesc) + unsigned int gen, unsigned int ndesc, + const dma_addr_t *addr) { unsigned int sgl_flits, flits; struct work_request_hdr *from; @@ -1598,10 +1651,9 @@ static void write_ofld_wr(struct adapter *adap, struct sk_buff *skb, flits = skb_transport_offset(skb) / 8; sgp = ndesc == 1 ? (struct sg_ent *)&d->flit[flits] : sgl; - sgl_flits = make_sgl(skb, sgp, skb_transport_header(skb), - skb_tail_pointer(skb) - - skb_transport_header(skb), - adap->pdev); + sgl_flits = write_sgl(skb, sgp, skb_transport_header(skb), + skb_tail_pointer(skb) - skb_transport_header(skb), + addr); if (need_skb_unmap()) { setup_deferred_unmapping(skb, adap->pdev, sgp, sgl_flits); skb->destructor = deferred_unmap_destructor; @@ -1659,6 +1711,12 @@ again: reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK); goto again; } + if (!immediate(skb) && + map_skb(adap->pdev, skb, (dma_addr_t *)skb->head)) { + spin_unlock(&q->lock); + return NET_XMIT_SUCCESS; + } + gen = q->gen; q->in_use += ndesc; pidx = q->pidx; @@ -1669,7 +1727,7 @@ again: reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK); } spin_unlock(&q->lock); - write_ofld_wr(adap, skb, q, pidx, gen, ndesc); + write_ofld_wr(adap, skb, q, pidx, gen, ndesc, (dma_addr_t *)skb->head); check_ring_tx_db(adap, q); return NET_XMIT_SUCCESS; } @@ -1687,6 +1745,7 @@ static void restart_offloadq(unsigned long data) struct sge_txq *q = &qs->txq[TXQ_OFLD]; const struct port_info *pi = netdev_priv(qs->netdev); struct adapter *adap = pi->adapter; + unsigned int written = 0; spin_lock(&q->lock); again: reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK); @@ -1706,10 +1765,15 @@ again: reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK); break; } + if (!immediate(skb) && + map_skb(adap->pdev, skb, (dma_addr_t *)skb->head)) + break; + gen = q->gen; q->in_use += ndesc; pidx = q->pidx; q->pidx += ndesc; + written += ndesc; if (q->pidx >= q->size) { q->pidx -= q->size; q->gen ^= 1; @@ -1717,7 +1781,8 @@ again: reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK); __skb_unlink(skb, &q->sendq); spin_unlock(&q->lock); - write_ofld_wr(adap, skb, q, pidx, gen, ndesc); + write_ofld_wr(adap, skb, q, pidx, gen, ndesc, + (dma_addr_t *)skb->head); spin_lock(&q->lock); } spin_unlock(&q->lock); @@ -1727,8 +1792,9 @@ again: reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK); set_bit(TXQ_LAST_PKT_DB, &q->flags); #endif wmb(); - t3_write_reg(adap, A_SG_KDOORBELL, - F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id)); + if (likely(written)) + t3_write_reg(adap, A_SG_KDOORBELL, + F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id)); } /** -- cgit v1.2.3 From 641da8ed3d8c54a5357d85a0f18d1d537c4205b9 Mon Sep 17 00:00:00 2001 From: Felix Manlunas Date: Fri, 27 Oct 2017 14:37:03 -0700 Subject: liquidio: get rid of false alarm "Unknown cmd 27" in dmesg Creating a macvtap interface with the liquidio VF driver as lower device causes this alarming message to show up in dmesg: liquidio_link_ctrl_cmd_completion Unknown cmd 27 That's actually a false alarm because cmd 27 is the value of the macro OCTNET_CMD_SET_UC_LIST which is known. It's a control command sent from host to NIC firmware to set the unicast MAC address list of the macvtap lower device. Make the false alarm go away by adding a case for OCTNET_CMD_SET_UC_LIST in liquidio_link_ctrl_cmd_completion(). Signed-off-by: Felix Manlunas Signed-off-by: Raghu Vatsavayi Signed-off-by: David S. Miller --- drivers/net/ethernet/cavium/liquidio/lio_core.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/cavium/liquidio/lio_core.c b/drivers/net/ethernet/cavium/liquidio/lio_core.c index b891d858e416..89b7820d59ce 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_core.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_core.c @@ -143,6 +143,7 @@ void liquidio_link_ctrl_cmd_completion(void *nctrl_ptr) switch (nctrl->ncmd.s.cmd) { case OCTNET_CMD_CHANGE_DEVFLAGS: case OCTNET_CMD_SET_MULTI_LIST: + case OCTNET_CMD_SET_UC_LIST: break; case OCTNET_CMD_CHANGE_MACADDR: -- cgit v1.2.3 From 2660d226d9901c2c82c81f0b3dc5e6737eed2dfe Mon Sep 17 00:00:00 2001 From: Wei Yongjun Date: Sat, 28 Oct 2017 05:03:38 +0000 Subject: net: aquantia: Make local functions static Fixes the following sparse warnings: drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c:224:5: warning: symbol 'aq_ethtool_get_coalesce' was not declared. Should it be static? drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c:245:5: warning: symbol 'aq_ethtool_set_coalesce' was not declared. Should it be static? Signed-off-by: Wei Yongjun Signed-off-by: David S. Miller --- drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c index d5e99b468870..70efb7467bf3 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c @@ -221,8 +221,8 @@ static int aq_ethtool_get_rxnfc(struct net_device *ndev, return err; } -int aq_ethtool_get_coalesce(struct net_device *ndev, - struct ethtool_coalesce *coal) +static int aq_ethtool_get_coalesce(struct net_device *ndev, + struct ethtool_coalesce *coal) { struct aq_nic_s *aq_nic = netdev_priv(ndev); struct aq_nic_cfg_s *cfg = aq_nic_get_cfg(aq_nic); @@ -242,8 +242,8 @@ int aq_ethtool_get_coalesce(struct net_device *ndev, return 0; } -int aq_ethtool_set_coalesce(struct net_device *ndev, - struct ethtool_coalesce *coal) +static int aq_ethtool_set_coalesce(struct net_device *ndev, + struct ethtool_coalesce *coal) { struct aq_nic_s *aq_nic = netdev_priv(ndev); struct aq_nic_cfg_s *cfg = aq_nic_get_cfg(aq_nic); -- cgit v1.2.3 From 254d152a216750f508442cc3e502130e5f539ab4 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Tue, 10 Oct 2017 10:17:38 +0200 Subject: i40e: mark PM functions as __maybe_unused A cleanup of the PM code left an incorrect #ifdef in place, leading to a harmless build warning: drivers/net/ethernet/intel/i40e/i40e_main.c:12223:12: error: 'i40e_resume' defined but not used [-Werror=unused-function] drivers/net/ethernet/intel/i40e/i40e_main.c:12185:12: error: 'i40e_suspend' defined but not used [-Werror=unused-function] It's easier to use __maybe_unused attributes here, since you can't pick the wrong one. Fixes: 0e5d3da40055 ("i40e: use newer generic PM support instead of legacy PM callbacks") Signed-off-by: Arnd Bergmann Tested-by: Andrew Bowers Acked-by: Jacob Keller Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_main.c | 11 ++--------- 1 file changed, 2 insertions(+), 9 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index 54ff34faca37..2a087319c09b 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -9557,7 +9557,6 @@ static int i40e_init_interrupt_scheme(struct i40e_pf *pf) return 0; } -#ifdef CONFIG_PM /** * i40e_restore_interrupt_scheme - Restore the interrupt scheme * @pf: private board data structure @@ -9606,7 +9605,6 @@ err_unwind: return err; } -#endif /* CONFIG_PM */ /** * i40e_setup_misc_vector - Setup the misc vector to handle non queue events @@ -13285,12 +13283,11 @@ static void i40e_shutdown(struct pci_dev *pdev) } } -#ifdef CONFIG_PM /** * i40e_suspend - PM callback for moving to D3 * @dev: generic device information structure **/ -static int i40e_suspend(struct device *dev) +static int __maybe_unused i40e_suspend(struct device *dev) { struct pci_dev *pdev = to_pci_dev(dev); struct i40e_pf *pf = pci_get_drvdata(pdev); @@ -13328,7 +13325,7 @@ static int i40e_suspend(struct device *dev) * i40e_resume - PM callback for waking up from D3 * @dev: generic device information structure **/ -static int i40e_resume(struct device *dev) +static int __maybe_unused i40e_resume(struct device *dev) { struct pci_dev *pdev = to_pci_dev(dev); struct i40e_pf *pf = pci_get_drvdata(pdev); @@ -13360,8 +13357,6 @@ static int i40e_resume(struct device *dev) return 0; } -#endif /* CONFIG_PM */ - static const struct pci_error_handlers i40e_err_handler = { .error_detected = i40e_pci_error_detected, .slot_reset = i40e_pci_error_slot_reset, @@ -13377,11 +13372,9 @@ static struct pci_driver i40e_driver = { .id_table = i40e_pci_tbl, .probe = i40e_probe, .remove = i40e_remove, -#ifdef CONFIG_PM .driver = { .pm = &i40e_pm_ops, }, -#endif /* CONFIG_PM */ .shutdown = i40e_shutdown, .err_handler = &i40e_err_handler, .sriov_configure = i40e_pci_sriov_configure, -- cgit v1.2.3 From 3e6b1cf7613393a7c8648133daf0e853c4e05220 Mon Sep 17 00:00:00 2001 From: Shannon Nelson Date: Tue, 10 Oct 2017 14:56:58 -0700 Subject: i40e: only redistribute MSI-X vectors when needed Whether or not there are vectors_left, we only need to redistribute our vectors if we didn't get as many as we requested. With the current check, the code will try to redistribute even if we did in fact get all the vectors we requested - this can happen when we have more CPUs than we do vectors. This restores an earlier check to be sure we only redistribute if we didn't get the full count we requested. Fixes: 4ce20abc645f (i40e: fix MSI-X vector redistribution if hw limit is reached) Signed-off-by: Shannon Nelson Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_main.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index 2a087319c09b..1cf9ba2d9a41 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -9346,7 +9346,7 @@ static int i40e_init_msix(struct i40e_pf *pf) pf->num_lan_qps = 1; pf->num_lan_msix = 1; - } else if (!vectors_left) { + } else if (v_actual != v_budget) { /* If we have limited resources, we will start with no vectors * for the special features and then allocate vectors to some * of these features based on the policy and at the end disable @@ -9355,7 +9355,8 @@ static int i40e_init_msix(struct i40e_pf *pf) int vec; dev_info(&pf->pdev->dev, - "MSI-X vector limit reached, attempting to redistribute vectors\n"); + "MSI-X vector limit reached with %d, wanted %d, attempting to redistribute vectors\n", + v_actual, v_budget); /* reserve the misc vector */ vec = v_actual - 1; -- cgit v1.2.3 From aa250f1186319f1e0b9b4a1d99022fe32251b8b6 Mon Sep 17 00:00:00 2001 From: Alexander Duyck Date: Sat, 21 Oct 2017 17:51:50 -0700 Subject: i40e/i40evf: Revert "i40e/i40evf: bump tail only in multiples of 8" This reverts commit 11f29003d6376fb123b7c3779dba49bb56fb0815. I am reverting this as I am fairly certain this can result in a memory leak when combined with the current page recycling scheme. Specifically we end up attempting to allocate fewer buffers than we recycled and this results in us rewinding the next to alloc pointer which leads to leaks when we overwrite the rx_buffer_info when processing the next frame. Fixes: 11f29003d637 ("i40e/i40evf: bump tail only in multiples of 8") Signed-off-by: Alexander Duyck Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_txrx.c | 9 --------- drivers/net/ethernet/intel/i40evf/i40e_txrx.c | 9 --------- 2 files changed, 18 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c index c5cd233c8fee..d6d352a6e6ea 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c @@ -1407,15 +1407,6 @@ bool i40e_alloc_rx_buffers(struct i40e_ring *rx_ring, u16 cleaned_count) union i40e_rx_desc *rx_desc; struct i40e_rx_buffer *bi; - /* Hardware only fetches new descriptors in cache lines of 8, - * essentially ignoring the lower 3 bits of the tail register. We want - * to ensure our tail writes are aligned to avoid unnecessary work. We - * can't simply round down the cleaned count, since we might fail to - * allocate some buffers. What we really want is to ensure that - * next_to_used + cleaned_count produces an aligned value. - */ - cleaned_count -= (ntu + cleaned_count) & 0x7; - /* do nothing if no valid netdev defined */ if (!rx_ring->netdev || !cleaned_count) return false; diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c index 6806ada11490..fe817e2b6fef 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c @@ -711,15 +711,6 @@ bool i40evf_alloc_rx_buffers(struct i40e_ring *rx_ring, u16 cleaned_count) union i40e_rx_desc *rx_desc; struct i40e_rx_buffer *bi; - /* Hardware only fetches new descriptors in cache lines of 8, - * essentially ignoring the lower 3 bits of the tail register. We want - * to ensure our tail writes are aligned to avoid unnecessary work. We - * can't simply round down the cleaned count, since we might fail to - * allocate some buffers. What we really want is to ensure that - * next_to_used + cleaned_count produces an aligned value. - */ - cleaned_count -= (ntu + cleaned_count) & 0x7; - /* do nothing if no valid netdev defined */ if (!rx_ring->netdev || !cleaned_count) return false; -- cgit v1.2.3 From aa5cb02ae938d450be882adac4023d8116a5acd5 Mon Sep 17 00:00:00 2001 From: Amritha Nambiar Date: Fri, 27 Oct 2017 02:35:40 -0700 Subject: i40e: Map TCs with the VSI seids Add mapping of TCs with the seids of the channel VSIs. TC0 will be mapped to the main VSI seid and all other TCs are mapped to the seid of the corresponding channel VSI. Signed-off-by: Amritha Nambiar Acked-by: Shannon Nelson Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e.h | 1 + drivers/net/ethernet/intel/i40e/i40e_main.c | 2 ++ 2 files changed, 3 insertions(+) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h index eb017763646d..f3c501efddc8 100644 --- a/drivers/net/ethernet/intel/i40e/i40e.h +++ b/drivers/net/ethernet/intel/i40e/i40e.h @@ -739,6 +739,7 @@ struct i40e_vsi { u16 next_base_queue; /* next queue to be used for channel setup */ struct list_head ch_list; + u16 tc_seid_map[I40E_MAX_TRAFFIC_CLASS]; void *priv; /* client driver data reference. */ diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index 1cf9ba2d9a41..2ff7384cb24b 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -6100,6 +6100,7 @@ static int i40e_configure_queue_channels(struct i40e_vsi *vsi) int ret = 0, i; /* Create app vsi with the TCs. Main VSI with TC0 is already set up */ + vsi->tc_seid_map[0] = vsi->seid; for (i = 1; i < I40E_MAX_TRAFFIC_CLASS; i++) { if (vsi->tc_config.enabled_tc & BIT(i)) { ch = kzalloc(sizeof(*ch), GFP_KERNEL); @@ -6130,6 +6131,7 @@ static int i40e_configure_queue_channels(struct i40e_vsi *vsi) i, ch->num_queue_pairs); goto err_free; } + vsi->tc_seid_map[i] = ch->seid; } } return ret; -- cgit v1.2.3 From 5efe0c6c2cafa7f458d793c85a1298e713af50e4 Mon Sep 17 00:00:00 2001 From: Amritha Nambiar Date: Fri, 27 Oct 2017 02:35:45 -0700 Subject: i40e: Cloud filter mode for set_switch_config command Add definitions for L4 filters and switch modes based on cloud filters modes and extend the set switch config command to include the additional cloud filter mode. Signed-off-by: Amritha Nambiar Signed-off-by: Kiran Patil Acked-by: Shannon Nelson Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h | 30 ++++++++++++++++++++++- drivers/net/ethernet/intel/i40e/i40e_common.c | 4 ++- drivers/net/ethernet/intel/i40e/i40e_ethtool.c | 2 +- drivers/net/ethernet/intel/i40e/i40e_main.c | 2 +- drivers/net/ethernet/intel/i40e/i40e_prototype.h | 2 +- drivers/net/ethernet/intel/i40e/i40e_type.h | 9 +++++++ 6 files changed, 44 insertions(+), 5 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h index 6a5db1b33fa2..444447d1a93f 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h +++ b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h @@ -790,7 +790,35 @@ struct i40e_aqc_set_switch_config { */ __le16 first_tag; __le16 second_tag; - u8 reserved[6]; + /* Next byte is split into following: + * Bit 7 : 0 : No action, 1: Switch to mode defined by bits 6:0 + * Bit 6 : 0 : Destination Port, 1: source port + * Bit 5..4 : L4 type + * 0: rsvd + * 1: TCP + * 2: UDP + * 3: Both TCP and UDP + * Bits 3:0 Mode + * 0: default mode + * 1: L4 port only mode + * 2: non-tunneled mode + * 3: tunneled mode + */ +#define I40E_AQ_SET_SWITCH_BIT7_VALID 0x80 + +#define I40E_AQ_SET_SWITCH_L4_SRC_PORT 0x40 + +#define I40E_AQ_SET_SWITCH_L4_TYPE_RSVD 0x00 +#define I40E_AQ_SET_SWITCH_L4_TYPE_TCP 0x10 +#define I40E_AQ_SET_SWITCH_L4_TYPE_UDP 0x20 +#define I40E_AQ_SET_SWITCH_L4_TYPE_BOTH 0x30 + +#define I40E_AQ_SET_SWITCH_MODE_DEFAULT 0x00 +#define I40E_AQ_SET_SWITCH_MODE_L4_PORT 0x01 +#define I40E_AQ_SET_SWITCH_MODE_NON_TUNNEL 0x02 +#define I40E_AQ_SET_SWITCH_MODE_TUNNEL 0x03 + u8 mode; + u8 rsvd5[5]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_set_switch_config); diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c index 8d0ee006606b..a9460e0e9cb7 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_common.c +++ b/drivers/net/ethernet/intel/i40e/i40e_common.c @@ -2407,13 +2407,14 @@ i40e_status i40e_aq_get_switch_config(struct i40e_hw *hw, * @hw: pointer to the hardware structure * @flags: bit flag values to set * @valid_flags: which bit flags to set + * @mode: cloud filter mode * @cmd_details: pointer to command details structure or NULL * * Set switch configuration bits **/ enum i40e_status_code i40e_aq_set_switch_config(struct i40e_hw *hw, u16 flags, - u16 valid_flags, + u16 valid_flags, u8 mode, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; @@ -2425,6 +2426,7 @@ enum i40e_status_code i40e_aq_set_switch_config(struct i40e_hw *hw, i40e_aqc_opc_set_switch_config); scfg->flags = cpu_to_le16(flags); scfg->valid_flags = cpu_to_le16(valid_flags); + scfg->mode = mode; if (hw->flags & I40E_HW_FLAG_802_1AD_CAPABLE) { scfg->switch_tag = cpu_to_le16(hw->switch_tag); scfg->first_tag = cpu_to_le16(hw->first_tag); diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c index 9eb618799a30..dc9b8dcf4a1e 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c @@ -4343,7 +4343,7 @@ flags_complete: sw_flags = I40E_AQ_SET_SWITCH_CFG_PROMISC; valid_flags = I40E_AQ_SET_SWITCH_CFG_PROMISC; ret = i40e_aq_set_switch_config(&pf->hw, sw_flags, valid_flags, - NULL); + 0, NULL); if (ret && pf->hw.aq.asq_last_status != I40E_AQ_RC_ESRCH) { dev_info(&pf->pdev->dev, "couldn't set switch config bits, err %s aq_err %s\n", diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index 2ff7384cb24b..128f2595fcd1 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -12166,7 +12166,7 @@ static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit) u16 valid_flags; valid_flags = I40E_AQ_SET_SWITCH_CFG_PROMISC; - ret = i40e_aq_set_switch_config(&pf->hw, flags, valid_flags, + ret = i40e_aq_set_switch_config(&pf->hw, flags, valid_flags, 0, NULL); if (ret && pf->hw.aq.asq_last_status != I40E_AQ_RC_ESRCH) { dev_info(&pf->pdev->dev, diff --git a/drivers/net/ethernet/intel/i40e/i40e_prototype.h b/drivers/net/ethernet/intel/i40e/i40e_prototype.h index 01502561035c..92869f57b52b 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_prototype.h +++ b/drivers/net/ethernet/intel/i40e/i40e_prototype.h @@ -190,7 +190,7 @@ i40e_status i40e_aq_get_switch_config(struct i40e_hw *hw, struct i40e_asq_cmd_details *cmd_details); enum i40e_status_code i40e_aq_set_switch_config(struct i40e_hw *hw, u16 flags, - u16 valid_flags, + u16 valid_flags, u8 mode, struct i40e_asq_cmd_details *cmd_details); i40e_status i40e_aq_request_resource(struct i40e_hw *hw, enum i40e_aq_resources_ids resource, diff --git a/drivers/net/ethernet/intel/i40e/i40e_type.h b/drivers/net/ethernet/intel/i40e/i40e_type.h index 17a99b53acd9..e4e5a0c864b7 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_type.h +++ b/drivers/net/ethernet/intel/i40e/i40e_type.h @@ -283,6 +283,15 @@ struct i40e_hw_capabilities { #define I40E_NVM_IMAGE_TYPE_CLOUD 0x2 #define I40E_NVM_IMAGE_TYPE_UDP_CLOUD 0x3 + /* Cloud filter modes: + * Mode1: Filter on L4 port only + * Mode2: Filter for non-tunneled traffic + * Mode3: Filter for tunnel traffic + */ +#define I40E_CLOUD_FILTER_MODE1 0x6 +#define I40E_CLOUD_FILTER_MODE2 0x7 +#define I40E_CLOUD_FILTER_MODE3 0x8 + u32 management_mode; u32 mng_protocols_over_mctp; #define I40E_MNG_PROTOCOL_PLDM 0x2 -- cgit v1.2.3 From 2c0015238f7d357f179249f101d6ed0327bc642a Mon Sep 17 00:00:00 2001 From: Amritha Nambiar Date: Fri, 27 Oct 2017 02:35:51 -0700 Subject: i40e: Admin queue definitions for cloud filters Add new admin queue definitions and extended fields for cloud filter support. Define big buffer for extended general fields in Add/Remove Cloud filters command. Signed-off-by: Amritha Nambiar Signed-off-by: Kiran Patil Signed-off-by: Jingjing Wu Acked-by: Shannon Nelson Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h | 107 ++++++++++++++++++++- .../net/ethernet/intel/i40evf/i40e_adminq_cmd.h | 107 ++++++++++++++++++++- 2 files changed, 210 insertions(+), 4 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h index 444447d1a93f..9f1f5786dcc2 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h +++ b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h @@ -1371,14 +1371,16 @@ struct i40e_aqc_add_remove_cloud_filters { #define I40E_AQC_ADD_CLOUD_CMD_SEID_NUM_SHIFT 0 #define I40E_AQC_ADD_CLOUD_CMD_SEID_NUM_MASK (0x3FF << \ I40E_AQC_ADD_CLOUD_CMD_SEID_NUM_SHIFT) - u8 reserved2[4]; + u8 big_buffer_flag; +#define I40E_AQC_ADD_CLOUD_CMD_BB 1 + u8 reserved2[3]; __le32 addr_high; __le32 addr_low; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_cloud_filters); -struct i40e_aqc_add_remove_cloud_filters_element_data { +struct i40e_aqc_cloud_filters_element_data { u8 outer_mac[6]; u8 inner_mac[6]; __le16 inner_vlan; @@ -1408,6 +1410,10 @@ struct i40e_aqc_add_remove_cloud_filters_element_data { #define I40E_AQC_ADD_CLOUD_FILTER_IMAC 0x000A #define I40E_AQC_ADD_CLOUD_FILTER_OMAC_TEN_ID_IMAC 0x000B #define I40E_AQC_ADD_CLOUD_FILTER_IIP 0x000C +/* 0x0010 to 0x0017 is for custom filters */ +#define I40E_AQC_ADD_CLOUD_FILTER_IP_PORT 0x0010 /* Dest IP + L4 Port */ +#define I40E_AQC_ADD_CLOUD_FILTER_MAC_PORT 0x0011 /* Dest MAC + L4 Port */ +#define I40E_AQC_ADD_CLOUD_FILTER_MAC_VLAN_PORT 0x0012 /* Dest MAC + VLAN + L4 Port */ #define I40E_AQC_ADD_CLOUD_FLAGS_TO_QUEUE 0x0080 #define I40E_AQC_ADD_CLOUD_VNK_SHIFT 6 @@ -1442,6 +1448,49 @@ struct i40e_aqc_add_remove_cloud_filters_element_data { u8 response_reserved[7]; }; +I40E_CHECK_STRUCT_LEN(0x40, i40e_aqc_cloud_filters_element_data); + +/* i40e_aqc_cloud_filters_element_bb is used when + * I40E_AQC_CLOUD_CMD_BB flag is set. + */ +struct i40e_aqc_cloud_filters_element_bb { + struct i40e_aqc_cloud_filters_element_data element; + u16 general_fields[32]; +#define I40E_AQC_ADD_CLOUD_FV_FLU_0X10_WORD0 0 +#define I40E_AQC_ADD_CLOUD_FV_FLU_0X10_WORD1 1 +#define I40E_AQC_ADD_CLOUD_FV_FLU_0X10_WORD2 2 +#define I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD0 3 +#define I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD1 4 +#define I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD2 5 +#define I40E_AQC_ADD_CLOUD_FV_FLU_0X12_WORD0 6 +#define I40E_AQC_ADD_CLOUD_FV_FLU_0X12_WORD1 7 +#define I40E_AQC_ADD_CLOUD_FV_FLU_0X12_WORD2 8 +#define I40E_AQC_ADD_CLOUD_FV_FLU_0X13_WORD0 9 +#define I40E_AQC_ADD_CLOUD_FV_FLU_0X13_WORD1 10 +#define I40E_AQC_ADD_CLOUD_FV_FLU_0X13_WORD2 11 +#define I40E_AQC_ADD_CLOUD_FV_FLU_0X14_WORD0 12 +#define I40E_AQC_ADD_CLOUD_FV_FLU_0X14_WORD1 13 +#define I40E_AQC_ADD_CLOUD_FV_FLU_0X14_WORD2 14 +#define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD0 15 +#define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD1 16 +#define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD2 17 +#define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD3 18 +#define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD4 19 +#define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD5 20 +#define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD6 21 +#define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD7 22 +#define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD0 23 +#define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD1 24 +#define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD2 25 +#define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD3 26 +#define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD4 27 +#define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD5 28 +#define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD6 29 +#define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD7 30 +}; + +I40E_CHECK_STRUCT_LEN(0x80, i40e_aqc_cloud_filters_element_bb); + struct i40e_aqc_remove_cloud_filters_completion { __le16 perfect_ovlan_used; __le16 perfect_ovlan_free; @@ -1453,6 +1502,60 @@ struct i40e_aqc_remove_cloud_filters_completion { I40E_CHECK_CMD_LENGTH(i40e_aqc_remove_cloud_filters_completion); +/* Replace filter Command 0x025F + * uses the i40e_aqc_replace_cloud_filters, + * and the generic indirect completion structure + */ +struct i40e_filter_data { + u8 filter_type; + u8 input[3]; +}; + +I40E_CHECK_STRUCT_LEN(4, i40e_filter_data); + +struct i40e_aqc_replace_cloud_filters_cmd { + u8 valid_flags; +#define I40E_AQC_REPLACE_L1_FILTER 0x0 +#define I40E_AQC_REPLACE_CLOUD_FILTER 0x1 +#define I40E_AQC_GET_CLOUD_FILTERS 0x2 +#define I40E_AQC_MIRROR_CLOUD_FILTER 0x4 +#define I40E_AQC_HIGH_PRIORITY_CLOUD_FILTER 0x8 + u8 old_filter_type; + u8 new_filter_type; + u8 tr_bit; + u8 reserved[4]; + __le32 addr_high; + __le32 addr_low; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_replace_cloud_filters_cmd); + +struct i40e_aqc_replace_cloud_filters_cmd_buf { + u8 data[32]; +/* Filter type INPUT codes*/ +#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_ENTRIES_MAX 3 +#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED BIT(7) + +/* Field Vector offsets */ +#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_MAC_DA 0 +#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG_ETH 6 +#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG 7 +#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_VLAN 8 +#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG_OVLAN 9 +#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG_IVLAN 10 +#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TUNNLE_KEY 11 +#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_IMAC 12 +/* big FLU */ +#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_IP_DA 14 +/* big FLU */ +#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_OIP_DA 15 + +#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_INNER_VLAN 37 + struct i40e_filter_data filters[8]; +}; + +I40E_CHECK_STRUCT_LEN(0x40, i40e_aqc_replace_cloud_filters_cmd_buf); + /* Add Mirror Rule (indirect or direct 0x0260) * Delete Mirror Rule (indirect or direct 0x0261) * note: some rule types (4,5) do not use an external buffer. diff --git a/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h index 463e331a70a9..af82c303de7b 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h +++ b/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h @@ -1339,14 +1339,16 @@ struct i40e_aqc_add_remove_cloud_filters { #define I40E_AQC_ADD_CLOUD_CMD_SEID_NUM_SHIFT 0 #define I40E_AQC_ADD_CLOUD_CMD_SEID_NUM_MASK (0x3FF << \ I40E_AQC_ADD_CLOUD_CMD_SEID_NUM_SHIFT) - u8 reserved2[4]; + u8 big_buffer_flag; +#define I40E_AQC_ADD_CLOUD_CMD_BB 1 + u8 reserved2[3]; __le32 addr_high; __le32 addr_low; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_cloud_filters); -struct i40e_aqc_add_remove_cloud_filters_element_data { +struct i40e_aqc_cloud_filters_element_data { u8 outer_mac[6]; u8 inner_mac[6]; __le16 inner_vlan; @@ -1376,6 +1378,10 @@ struct i40e_aqc_add_remove_cloud_filters_element_data { #define I40E_AQC_ADD_CLOUD_FILTER_IMAC 0x000A #define I40E_AQC_ADD_CLOUD_FILTER_OMAC_TEN_ID_IMAC 0x000B #define I40E_AQC_ADD_CLOUD_FILTER_IIP 0x000C +/* 0x0010 to 0x0017 is for custom filters */ +#define I40E_AQC_ADD_CLOUD_FILTER_IP_PORT 0x0010 /* Dest IP + L4 Port */ +#define I40E_AQC_ADD_CLOUD_FILTER_MAC_PORT 0x0011 /* Dest MAC + L4 Port */ +#define I40E_AQC_ADD_CLOUD_FILTER_MAC_VLAN_PORT 0x0012 /* Dest MAC + VLAN + L4 Port */ #define I40E_AQC_ADD_CLOUD_FLAGS_TO_QUEUE 0x0080 #define I40E_AQC_ADD_CLOUD_VNK_SHIFT 6 @@ -1410,6 +1416,49 @@ struct i40e_aqc_add_remove_cloud_filters_element_data { u8 response_reserved[7]; }; +I40E_CHECK_STRUCT_LEN(0x40, i40e_aqc_cloud_filters_element_data); + +/* i40e_aqc_cloud_filters_element_bb is used when + * I40E_AQC_ADD_CLOUD_CMD_BB flag is set. + */ +struct i40e_aqc_cloud_filters_element_bb { + struct i40e_aqc_cloud_filters_element_data element; + u16 general_fields[32]; +#define I40E_AQC_ADD_CLOUD_FV_FLU_0X10_WORD0 0 +#define I40E_AQC_ADD_CLOUD_FV_FLU_0X10_WORD1 1 +#define I40E_AQC_ADD_CLOUD_FV_FLU_0X10_WORD2 2 +#define I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD0 3 +#define I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD1 4 +#define I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD2 5 +#define I40E_AQC_ADD_CLOUD_FV_FLU_0X12_WORD0 6 +#define I40E_AQC_ADD_CLOUD_FV_FLU_0X12_WORD1 7 +#define I40E_AQC_ADD_CLOUD_FV_FLU_0X12_WORD2 8 +#define I40E_AQC_ADD_CLOUD_FV_FLU_0X13_WORD0 9 +#define I40E_AQC_ADD_CLOUD_FV_FLU_0X13_WORD1 10 +#define I40E_AQC_ADD_CLOUD_FV_FLU_0X13_WORD2 11 +#define I40E_AQC_ADD_CLOUD_FV_FLU_0X14_WORD0 12 +#define I40E_AQC_ADD_CLOUD_FV_FLU_0X14_WORD1 13 +#define I40E_AQC_ADD_CLOUD_FV_FLU_0X14_WORD2 14 +#define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD0 15 +#define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD1 16 +#define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD2 17 +#define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD3 18 +#define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD4 19 +#define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD5 20 +#define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD6 21 +#define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD7 22 +#define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD0 23 +#define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD1 24 +#define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD2 25 +#define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD3 26 +#define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD4 27 +#define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD5 28 +#define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD6 29 +#define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD7 30 +}; + +I40E_CHECK_STRUCT_LEN(0x80, i40e_aqc_cloud_filters_element_bb); + struct i40e_aqc_remove_cloud_filters_completion { __le16 perfect_ovlan_used; __le16 perfect_ovlan_free; @@ -1421,6 +1470,60 @@ struct i40e_aqc_remove_cloud_filters_completion { I40E_CHECK_CMD_LENGTH(i40e_aqc_remove_cloud_filters_completion); +/* Replace filter Command 0x025F + * uses the i40e_aqc_replace_cloud_filters, + * and the generic indirect completion structure + */ +struct i40e_filter_data { + u8 filter_type; + u8 input[3]; +}; + +I40E_CHECK_STRUCT_LEN(4, i40e_filter_data); + +struct i40e_aqc_replace_cloud_filters_cmd { + u8 valid_flags; +#define I40E_AQC_REPLACE_L1_FILTER 0x0 +#define I40E_AQC_REPLACE_CLOUD_FILTER 0x1 +#define I40E_AQC_GET_CLOUD_FILTERS 0x2 +#define I40E_AQC_MIRROR_CLOUD_FILTER 0x4 +#define I40E_AQC_HIGH_PRIORITY_CLOUD_FILTER 0x8 + u8 old_filter_type; + u8 new_filter_type; + u8 tr_bit; + u8 reserved[4]; + __le32 addr_high; + __le32 addr_low; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_replace_cloud_filters_cmd); + +struct i40e_aqc_replace_cloud_filters_cmd_buf { + u8 data[32]; +/* Filter type INPUT codes*/ +#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_ENTRIES_MAX 3 +#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED BIT(7) + +/* Field Vector offsets */ +#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_MAC_DA 0 +#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG_ETH 6 +#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG 7 +#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_VLAN 8 +#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG_OVLAN 9 +#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG_IVLAN 10 +#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TUNNLE_KEY 11 +#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_IMAC 12 +/* big FLU */ +#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_IP_DA 14 +/* big FLU */ +#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_OIP_DA 15 + +#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_INNER_VLAN 37 + struct i40e_filter_data filters[8]; +}; + +I40E_CHECK_STRUCT_LEN(0x40, i40e_aqc_replace_cloud_filters_cmd_buf); + /* Add Mirror Rule (indirect or direct 0x0260) * Delete Mirror Rule (indirect or direct 0x0261) * note: some rule types (4,5) do not use an external buffer. -- cgit v1.2.3 From aaf66502b624784c2ff3cd54834e2598d1c40027 Mon Sep 17 00:00:00 2001 From: Amritha Nambiar Date: Fri, 27 Oct 2017 02:35:56 -0700 Subject: i40e: Clean up of cloud filters Introduce the cloud filter data structure and cleanup of cloud filters associated with the device. Signed-off-by: Amritha Nambiar Acked-by: Shannon Nelson Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e.h | 9 +++++++++ drivers/net/ethernet/intel/i40e/i40e_main.c | 24 ++++++++++++++++++++++++ 2 files changed, 33 insertions(+) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h index f3c501efddc8..b938bb4a70f7 100644 --- a/drivers/net/ethernet/intel/i40e/i40e.h +++ b/drivers/net/ethernet/intel/i40e/i40e.h @@ -253,6 +253,12 @@ struct i40e_fdir_filter { u32 fd_id; }; +struct i40e_cloud_filter { + struct hlist_node cloud_node; + unsigned long cookie; + u16 seid; /* filter control */ +}; + #define I40E_ETH_P_LLDP 0x88cc #define I40E_DCB_PRIO_TYPE_STRICT 0 @@ -420,6 +426,9 @@ struct i40e_pf { struct i40e_udp_port_config udp_ports[I40E_MAX_PF_UDP_OFFLOAD_PORTS]; u16 pending_udp_bitmap; + struct hlist_head cloud_filter_list; + u16 num_cloud_filters; + enum i40e_interrupt_policy int_policy; u16 rx_itr_default; u16 tx_itr_default; diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index 128f2595fcd1..fbe34500ded2 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -6936,6 +6936,26 @@ static void i40e_fdir_filter_exit(struct i40e_pf *pf) I40E_L3_SRC_MASK | I40E_L3_DST_MASK); } +/** + * i40e_cloud_filter_exit - Cleans up the cloud filters + * @pf: Pointer to PF + * + * This function destroys the hlist where all the cloud filters + * were saved. + **/ +static void i40e_cloud_filter_exit(struct i40e_pf *pf) +{ + struct i40e_cloud_filter *cfilter; + struct hlist_node *node; + + hlist_for_each_entry_safe(cfilter, node, + &pf->cloud_filter_list, cloud_node) { + hlist_del(&cfilter->cloud_node); + kfree(cfilter); + } + pf->num_cloud_filters = 0; +} + /** * i40e_close - Disables a network interface * @netdev: network interface device structure @@ -12196,6 +12216,7 @@ static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit) vsi = i40e_vsi_reinit_setup(pf->vsi[pf->lan_vsi]); if (!vsi) { dev_info(&pf->pdev->dev, "setup of MAIN VSI failed\n"); + i40e_cloud_filter_exit(pf); i40e_fdir_teardown(pf); return -EAGAIN; } @@ -13030,6 +13051,8 @@ static void i40e_remove(struct pci_dev *pdev) if (pf->vsi[pf->lan_vsi]) i40e_vsi_release(pf->vsi[pf->lan_vsi]); + i40e_cloud_filter_exit(pf); + /* remove attached clients */ if (pf->flags & I40E_FLAG_IWARP_ENABLED) { ret_code = i40e_lan_del_device(pf); @@ -13261,6 +13284,7 @@ static void i40e_shutdown(struct pci_dev *pdev) del_timer_sync(&pf->service_timer); cancel_work_sync(&pf->service_task); + i40e_cloud_filter_exit(pf); i40e_fdir_teardown(pf); /* Client close must be called explicitly here because the timer -- cgit v1.2.3 From 2f4b411a3d6766e6362ffbf00e0495a2dfe92507 Mon Sep 17 00:00:00 2001 From: Amritha Nambiar Date: Fri, 27 Oct 2017 02:36:01 -0700 Subject: i40e: Enable cloud filters via tc-flower This patch enables tc-flower based hardware offloads. tc flower filter provided by the kernel is configured as driver specific cloud filter. The patch implements functions and admin queue commands needed to support cloud filters in the driver and adds cloud filters to configure these tc-flower filters. The classification function of the filter is to direct matched packets to a traffic class. The hardware traffic class is set based on the the classid reserved in the range :ffe0 - :ffef. Match Dst MAC and route to TC0: prio 1 flower dst_mac 3c:fd:fe:a0:d6:70 skip_sw\ hw_tc 1 Match Dst IPv4,Dst Port and route to TC1: prio 2 flower dst_ip 192.168.3.5/32\ ip_proto udp dst_port 25 skip_sw\ hw_tc 2 Match Dst IPv6,Dst Port and route to TC1: prio 3 flower dst_ip fe8::200:1\ ip_proto udp dst_port 66 skip_sw\ hw_tc 2 Delete tc flower filter: Example: Flow Director Sideband is disabled while configuring cloud filters via tc-flower and until any cloud filter exists. Unsupported matches when cloud filters are added using enhanced big buffer cloud filter mode of underlying switch include: 1. source port and source IP 2. Combined MAC address and IP fields. 3. Not specifying L4 port These filter matches can however be used to redirect traffic to the main VSI (tc 0) which does not require the enhanced big buffer cloud filter support. Signed-off-by: Amritha Nambiar Signed-off-by: Kiran Patil Signed-off-by: Anjali Singhai Jain Signed-off-by: Jingjing Wu Acked-by: Shannon Nelson Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e.h | 54 +- drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h | 3 + drivers/net/ethernet/intel/i40e/i40e_common.c | 189 ++++ drivers/net/ethernet/intel/i40e/i40e_main.c | 957 ++++++++++++++++++++- drivers/net/ethernet/intel/i40e/i40e_prototype.h | 16 + drivers/net/ethernet/intel/i40e/i40e_type.h | 1 + .../net/ethernet/intel/i40evf/i40e_adminq_cmd.h | 3 + 7 files changed, 1192 insertions(+), 31 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h index b938bb4a70f7..5829715fa342 100644 --- a/drivers/net/ethernet/intel/i40e/i40e.h +++ b/drivers/net/ethernet/intel/i40e/i40e.h @@ -55,6 +55,8 @@ #include #include #include +#include +#include #include "i40e_type.h" #include "i40e_prototype.h" #include "i40e_client.h" @@ -253,10 +255,56 @@ struct i40e_fdir_filter { u32 fd_id; }; +#define I40E_CLOUD_FIELD_OMAC 0x01 +#define I40E_CLOUD_FIELD_IMAC 0x02 +#define I40E_CLOUD_FIELD_IVLAN 0x04 +#define I40E_CLOUD_FIELD_TEN_ID 0x08 +#define I40E_CLOUD_FIELD_IIP 0x10 + +#define I40E_CLOUD_FILTER_FLAGS_OMAC I40E_CLOUD_FIELD_OMAC +#define I40E_CLOUD_FILTER_FLAGS_IMAC I40E_CLOUD_FIELD_IMAC +#define I40E_CLOUD_FILTER_FLAGS_IMAC_IVLAN (I40E_CLOUD_FIELD_IMAC | \ + I40E_CLOUD_FIELD_IVLAN) +#define I40E_CLOUD_FILTER_FLAGS_IMAC_TEN_ID (I40E_CLOUD_FIELD_IMAC | \ + I40E_CLOUD_FIELD_TEN_ID) +#define I40E_CLOUD_FILTER_FLAGS_OMAC_TEN_ID_IMAC (I40E_CLOUD_FIELD_OMAC | \ + I40E_CLOUD_FIELD_IMAC | \ + I40E_CLOUD_FIELD_TEN_ID) +#define I40E_CLOUD_FILTER_FLAGS_IMAC_IVLAN_TEN_ID (I40E_CLOUD_FIELD_IMAC | \ + I40E_CLOUD_FIELD_IVLAN | \ + I40E_CLOUD_FIELD_TEN_ID) +#define I40E_CLOUD_FILTER_FLAGS_IIP I40E_CLOUD_FIELD_IIP + struct i40e_cloud_filter { struct hlist_node cloud_node; unsigned long cookie; - u16 seid; /* filter control */ + /* cloud filter input set follows */ + u8 dst_mac[ETH_ALEN]; + u8 src_mac[ETH_ALEN]; + __be16 vlan_id; + u16 seid; /* filter control */ + __be16 dst_port; + __be16 src_port; + u32 tenant_id; + union { + struct { + struct in_addr dst_ip; + struct in_addr src_ip; + } v4; + struct { + struct in6_addr dst_ip6; + struct in6_addr src_ip6; + } v6; + } ip; +#define dst_ipv6 ip.v6.dst_ip6.s6_addr32 +#define src_ipv6 ip.v6.src_ip6.s6_addr32 +#define dst_ipv4 ip.v4.dst_ip.s_addr +#define src_ipv4 ip.v4.src_ip.s_addr + u16 n_proto; /* Ethernet Protocol */ + u8 ip_proto; /* IPPROTO value */ + u8 flags; +#define I40E_CLOUD_TNL_TYPE_NONE 0xff + u8 tunnel_type; }; #define I40E_ETH_P_LLDP 0x88cc @@ -492,6 +540,8 @@ struct i40e_pf { #define I40E_FLAG_LINK_DOWN_ON_CLOSE_ENABLED BIT(27) #define I40E_FLAG_SOURCE_PRUNING_DISABLED BIT(28) #define I40E_FLAG_TC_MQPRIO BIT(29) +#define I40E_FLAG_FD_SB_INACTIVE BIT(30) +#define I40E_FLAG_FD_SB_TO_CLOUD_FILTER BIT(31) struct i40e_client_instance *cinst; bool stat_offsets_loaded; @@ -574,6 +624,8 @@ struct i40e_pf { u16 phy_led_val; u16 override_q_count; + u16 last_sw_conf_flags; + u16 last_sw_conf_valid_flags; }; /** diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h index 9f1f5786dcc2..b0188b8f91ba 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h +++ b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h @@ -1392,6 +1392,9 @@ struct i40e_aqc_cloud_filters_element_data { struct { u8 data[16]; } v6; + struct { + __le16 data[8]; + } raw_v6; } ipaddr; __le16 flags; #define I40E_AQC_ADD_CLOUD_FILTER_SHIFT 0 diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c index a9460e0e9cb7..0203665cb53c 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_common.c +++ b/drivers/net/ethernet/intel/i40e/i40e_common.c @@ -5436,5 +5436,194 @@ i40e_add_pinfo_to_list(struct i40e_hw *hw, status = i40e_aq_write_ppp(hw, (void *)sec, sec->data_end, track_id, &offset, &info, NULL); + + return status; +} + +/** + * i40e_aq_add_cloud_filters + * @hw: pointer to the hardware structure + * @seid: VSI seid to add cloud filters from + * @filters: Buffer which contains the filters to be added + * @filter_count: number of filters contained in the buffer + * + * Set the cloud filters for a given VSI. The contents of the + * i40e_aqc_cloud_filters_element_data are filled in by the caller + * of the function. + * + **/ +enum i40e_status_code +i40e_aq_add_cloud_filters(struct i40e_hw *hw, u16 seid, + struct i40e_aqc_cloud_filters_element_data *filters, + u8 filter_count) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_add_remove_cloud_filters *cmd = + (struct i40e_aqc_add_remove_cloud_filters *)&desc.params.raw; + enum i40e_status_code status; + u16 buff_len; + + i40e_fill_default_direct_cmd_desc(&desc, + i40e_aqc_opc_add_cloud_filters); + + buff_len = filter_count * sizeof(*filters); + desc.datalen = cpu_to_le16(buff_len); + desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); + cmd->num_filters = filter_count; + cmd->seid = cpu_to_le16(seid); + + status = i40e_asq_send_command(hw, &desc, filters, buff_len, NULL); + + return status; +} + +/** + * i40e_aq_add_cloud_filters_bb + * @hw: pointer to the hardware structure + * @seid: VSI seid to add cloud filters from + * @filters: Buffer which contains the filters in big buffer to be added + * @filter_count: number of filters contained in the buffer + * + * Set the big buffer cloud filters for a given VSI. The contents of the + * i40e_aqc_cloud_filters_element_bb are filled in by the caller of the + * function. + * + **/ +i40e_status +i40e_aq_add_cloud_filters_bb(struct i40e_hw *hw, u16 seid, + struct i40e_aqc_cloud_filters_element_bb *filters, + u8 filter_count) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_add_remove_cloud_filters *cmd = + (struct i40e_aqc_add_remove_cloud_filters *)&desc.params.raw; + i40e_status status; + u16 buff_len; + int i; + + i40e_fill_default_direct_cmd_desc(&desc, + i40e_aqc_opc_add_cloud_filters); + + buff_len = filter_count * sizeof(*filters); + desc.datalen = cpu_to_le16(buff_len); + desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); + cmd->num_filters = filter_count; + cmd->seid = cpu_to_le16(seid); + cmd->big_buffer_flag = I40E_AQC_ADD_CLOUD_CMD_BB; + + for (i = 0; i < filter_count; i++) { + u16 tnl_type; + u32 ti; + + tnl_type = (le16_to_cpu(filters[i].element.flags) & + I40E_AQC_ADD_CLOUD_TNL_TYPE_MASK) >> + I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT; + + /* Due to hardware eccentricities, the VNI for Geneve is shifted + * one more byte further than normally used for Tenant ID in + * other tunnel types. + */ + if (tnl_type == I40E_AQC_ADD_CLOUD_TNL_TYPE_GENEVE) { + ti = le32_to_cpu(filters[i].element.tenant_id); + filters[i].element.tenant_id = cpu_to_le32(ti << 8); + } + } + + status = i40e_asq_send_command(hw, &desc, filters, buff_len, NULL); + + return status; +} + +/** + * i40e_aq_rem_cloud_filters + * @hw: pointer to the hardware structure + * @seid: VSI seid to remove cloud filters from + * @filters: Buffer which contains the filters to be removed + * @filter_count: number of filters contained in the buffer + * + * Remove the cloud filters for a given VSI. The contents of the + * i40e_aqc_cloud_filters_element_data are filled in by the caller + * of the function. + * + **/ +enum i40e_status_code +i40e_aq_rem_cloud_filters(struct i40e_hw *hw, u16 seid, + struct i40e_aqc_cloud_filters_element_data *filters, + u8 filter_count) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_add_remove_cloud_filters *cmd = + (struct i40e_aqc_add_remove_cloud_filters *)&desc.params.raw; + enum i40e_status_code status; + u16 buff_len; + + i40e_fill_default_direct_cmd_desc(&desc, + i40e_aqc_opc_remove_cloud_filters); + + buff_len = filter_count * sizeof(*filters); + desc.datalen = cpu_to_le16(buff_len); + desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); + cmd->num_filters = filter_count; + cmd->seid = cpu_to_le16(seid); + + status = i40e_asq_send_command(hw, &desc, filters, buff_len, NULL); + + return status; +} + +/** + * i40e_aq_rem_cloud_filters_bb + * @hw: pointer to the hardware structure + * @seid: VSI seid to remove cloud filters from + * @filters: Buffer which contains the filters in big buffer to be removed + * @filter_count: number of filters contained in the buffer + * + * Remove the big buffer cloud filters for a given VSI. The contents of the + * i40e_aqc_cloud_filters_element_bb are filled in by the caller of the + * function. + * + **/ +i40e_status +i40e_aq_rem_cloud_filters_bb(struct i40e_hw *hw, u16 seid, + struct i40e_aqc_cloud_filters_element_bb *filters, + u8 filter_count) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_add_remove_cloud_filters *cmd = + (struct i40e_aqc_add_remove_cloud_filters *)&desc.params.raw; + i40e_status status; + u16 buff_len; + int i; + + i40e_fill_default_direct_cmd_desc(&desc, + i40e_aqc_opc_remove_cloud_filters); + + buff_len = filter_count * sizeof(*filters); + desc.datalen = cpu_to_le16(buff_len); + desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); + cmd->num_filters = filter_count; + cmd->seid = cpu_to_le16(seid); + cmd->big_buffer_flag = I40E_AQC_ADD_CLOUD_CMD_BB; + + for (i = 0; i < filter_count; i++) { + u16 tnl_type; + u32 ti; + + tnl_type = (le16_to_cpu(filters[i].element.flags) & + I40E_AQC_ADD_CLOUD_TNL_TYPE_MASK) >> + I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT; + + /* Due to hardware eccentricities, the VNI for Geneve is shifted + * one more byte further than normally used for Tenant ID in + * other tunnel types. + */ + if (tnl_type == I40E_AQC_ADD_CLOUD_TNL_TYPE_GENEVE) { + ti = le32_to_cpu(filters[i].element.tenant_id); + filters[i].element.tenant_id = cpu_to_le32(ti << 8); + } + } + + status = i40e_asq_send_command(hw, &desc, filters, buff_len, NULL); + return status; } diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index fbe34500ded2..dfecaeda0654 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -69,6 +69,15 @@ static int i40e_reset(struct i40e_pf *pf); static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired); static void i40e_fdir_sb_setup(struct i40e_pf *pf); static int i40e_veb_get_bw_info(struct i40e_veb *veb); +static int i40e_add_del_cloud_filter(struct i40e_vsi *vsi, + struct i40e_cloud_filter *filter, + bool add); +static int i40e_add_del_cloud_filter_big_buf(struct i40e_vsi *vsi, + struct i40e_cloud_filter *filter, + bool add); +static int i40e_get_capabilities(struct i40e_pf *pf, + enum i40e_admin_queue_opc list_type); + /* i40e_pci_tbl - PCI Device ID Table * @@ -5480,7 +5489,11 @@ int i40e_set_bw_limit(struct i40e_vsi *vsi, u16 seid, u64 max_tx_rate) **/ static void i40e_remove_queue_channels(struct i40e_vsi *vsi) { + enum i40e_admin_queue_err last_aq_status; + struct i40e_cloud_filter *cfilter; struct i40e_channel *ch, *ch_tmp; + struct i40e_pf *pf = vsi->back; + struct hlist_node *node; int ret, i; /* Reset rss size that was stored when reconfiguring rss for @@ -5521,6 +5534,29 @@ static void i40e_remove_queue_channels(struct i40e_vsi *vsi) "Failed to reset tx rate for ch->seid %u\n", ch->seid); + /* delete cloud filters associated with this channel */ + hlist_for_each_entry_safe(cfilter, node, + &pf->cloud_filter_list, cloud_node) { + if (cfilter->seid != ch->seid) + continue; + + hash_del(&cfilter->cloud_node); + if (cfilter->dst_port) + ret = i40e_add_del_cloud_filter_big_buf(vsi, + cfilter, + false); + else + ret = i40e_add_del_cloud_filter(vsi, cfilter, + false); + last_aq_status = pf->hw.aq.asq_last_status; + if (ret) + dev_info(&pf->pdev->dev, + "Failed to delete cloud filter, err %s aq_err %s\n", + i40e_stat_str(&pf->hw, ret), + i40e_aq_str(&pf->hw, last_aq_status)); + kfree(cfilter); + } + /* delete VSI from FW */ ret = i40e_aq_delete_element(&vsi->back->hw, ch->seid, NULL); @@ -5971,6 +6007,63 @@ static bool i40e_setup_channel(struct i40e_pf *pf, struct i40e_vsi *vsi, return ch->initialized ? true : false; } +/** + * i40e_validate_and_set_switch_mode - sets up switch mode correctly + * @vsi: ptr to VSI which has PF backing + * + * Sets up switch mode correctly if it needs to be changed and perform + * what are allowed modes. + **/ +static int i40e_validate_and_set_switch_mode(struct i40e_vsi *vsi) +{ + u8 mode; + struct i40e_pf *pf = vsi->back; + struct i40e_hw *hw = &pf->hw; + int ret; + + ret = i40e_get_capabilities(pf, i40e_aqc_opc_list_dev_capabilities); + if (ret) + return -EINVAL; + + if (hw->dev_caps.switch_mode) { + /* if switch mode is set, support mode2 (non-tunneled for + * cloud filter) for now + */ + u32 switch_mode = hw->dev_caps.switch_mode & + I40E_SWITCH_MODE_MASK; + if (switch_mode >= I40E_CLOUD_FILTER_MODE1) { + if (switch_mode == I40E_CLOUD_FILTER_MODE2) + return 0; + dev_err(&pf->pdev->dev, + "Invalid switch_mode (%d), only non-tunneled mode for cloud filter is supported\n", + hw->dev_caps.switch_mode); + return -EINVAL; + } + } + + /* Set Bit 7 to be valid */ + mode = I40E_AQ_SET_SWITCH_BIT7_VALID; + + /* Set L4type to both TCP and UDP support */ + mode |= I40E_AQ_SET_SWITCH_L4_TYPE_BOTH; + + /* Set cloud filter mode */ + mode |= I40E_AQ_SET_SWITCH_MODE_NON_TUNNEL; + + /* Prep mode field for set_switch_config */ + ret = i40e_aq_set_switch_config(hw, pf->last_sw_conf_flags, + pf->last_sw_conf_valid_flags, + mode, NULL); + if (ret && hw->aq.asq_last_status != I40E_AQ_RC_ESRCH) + dev_err(&pf->pdev->dev, + "couldn't set switch config bits, err %s aq_err %s\n", + i40e_stat_str(hw, ret), + i40e_aq_str(hw, + hw->aq.asq_last_status)); + + return ret; +} + /** * i40e_create_queue_channel - function to create channel * @vsi: VSI to be configured @@ -6750,13 +6843,720 @@ exit: return ret; } +/** + * i40e_set_cld_element - sets cloud filter element data + * @filter: cloud filter rule + * @cld: ptr to cloud filter element data + * + * This is helper function to copy data into cloud filter element + **/ +static inline void +i40e_set_cld_element(struct i40e_cloud_filter *filter, + struct i40e_aqc_cloud_filters_element_data *cld) +{ + int i, j; + u32 ipa; + + memset(cld, 0, sizeof(*cld)); + ether_addr_copy(cld->outer_mac, filter->dst_mac); + ether_addr_copy(cld->inner_mac, filter->src_mac); + + if (filter->n_proto != ETH_P_IP && filter->n_proto != ETH_P_IPV6) + return; + + if (filter->n_proto == ETH_P_IPV6) { +#define IPV6_MAX_INDEX (ARRAY_SIZE(filter->dst_ipv6) - 1) + for (i = 0, j = 0; i < ARRAY_SIZE(filter->dst_ipv6); + i++, j += 2) { + ipa = be32_to_cpu(filter->dst_ipv6[IPV6_MAX_INDEX - i]); + ipa = cpu_to_le32(ipa); + memcpy(&cld->ipaddr.raw_v6.data[j], &ipa, sizeof(ipa)); + } + } else { + ipa = be32_to_cpu(filter->dst_ipv4); + memcpy(&cld->ipaddr.v4.data, &ipa, sizeof(ipa)); + } + + cld->inner_vlan = cpu_to_le16(ntohs(filter->vlan_id)); + + /* tenant_id is not supported by FW now, once the support is enabled + * fill the cld->tenant_id with cpu_to_le32(filter->tenant_id) + */ + if (filter->tenant_id) + return; +} + +/** + * i40e_add_del_cloud_filter - Add/del cloud filter + * @vsi: pointer to VSI + * @filter: cloud filter rule + * @add: if true, add, if false, delete + * + * Add or delete a cloud filter for a specific flow spec. + * Returns 0 if the filter were successfully added. + **/ +static int i40e_add_del_cloud_filter(struct i40e_vsi *vsi, + struct i40e_cloud_filter *filter, bool add) +{ + struct i40e_aqc_cloud_filters_element_data cld_filter; + struct i40e_pf *pf = vsi->back; + int ret; + static const u16 flag_table[128] = { + [I40E_CLOUD_FILTER_FLAGS_OMAC] = + I40E_AQC_ADD_CLOUD_FILTER_OMAC, + [I40E_CLOUD_FILTER_FLAGS_IMAC] = + I40E_AQC_ADD_CLOUD_FILTER_IMAC, + [I40E_CLOUD_FILTER_FLAGS_IMAC_IVLAN] = + I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN, + [I40E_CLOUD_FILTER_FLAGS_IMAC_TEN_ID] = + I40E_AQC_ADD_CLOUD_FILTER_IMAC_TEN_ID, + [I40E_CLOUD_FILTER_FLAGS_OMAC_TEN_ID_IMAC] = + I40E_AQC_ADD_CLOUD_FILTER_OMAC_TEN_ID_IMAC, + [I40E_CLOUD_FILTER_FLAGS_IMAC_IVLAN_TEN_ID] = + I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN_TEN_ID, + [I40E_CLOUD_FILTER_FLAGS_IIP] = + I40E_AQC_ADD_CLOUD_FILTER_IIP, + }; + + if (filter->flags >= ARRAY_SIZE(flag_table)) + return I40E_ERR_CONFIG; + + /* copy element needed to add cloud filter from filter */ + i40e_set_cld_element(filter, &cld_filter); + + if (filter->tunnel_type != I40E_CLOUD_TNL_TYPE_NONE) + cld_filter.flags = cpu_to_le16(filter->tunnel_type << + I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT); + + if (filter->n_proto == ETH_P_IPV6) + cld_filter.flags |= cpu_to_le16(flag_table[filter->flags] | + I40E_AQC_ADD_CLOUD_FLAGS_IPV6); + else + cld_filter.flags |= cpu_to_le16(flag_table[filter->flags] | + I40E_AQC_ADD_CLOUD_FLAGS_IPV4); + + if (add) + ret = i40e_aq_add_cloud_filters(&pf->hw, filter->seid, + &cld_filter, 1); + else + ret = i40e_aq_rem_cloud_filters(&pf->hw, filter->seid, + &cld_filter, 1); + if (ret) + dev_dbg(&pf->pdev->dev, + "Failed to %s cloud filter using l4 port %u, err %d aq_err %d\n", + add ? "add" : "delete", filter->dst_port, ret, + pf->hw.aq.asq_last_status); + else + dev_info(&pf->pdev->dev, + "%s cloud filter for VSI: %d\n", + add ? "Added" : "Deleted", filter->seid); + return ret; +} + +/** + * i40e_add_del_cloud_filter_big_buf - Add/del cloud filter using big_buf + * @vsi: pointer to VSI + * @filter: cloud filter rule + * @add: if true, add, if false, delete + * + * Add or delete a cloud filter for a specific flow spec using big buffer. + * Returns 0 if the filter were successfully added. + **/ +static int i40e_add_del_cloud_filter_big_buf(struct i40e_vsi *vsi, + struct i40e_cloud_filter *filter, + bool add) +{ + struct i40e_aqc_cloud_filters_element_bb cld_filter; + struct i40e_pf *pf = vsi->back; + int ret; + + /* Both (src/dst) valid mac_addr are not supported */ + if ((is_valid_ether_addr(filter->dst_mac) && + is_valid_ether_addr(filter->src_mac)) || + (is_multicast_ether_addr(filter->dst_mac) && + is_multicast_ether_addr(filter->src_mac))) + return -EINVAL; + + /* Make sure port is specified, otherwise bail out, for channel + * specific cloud filter needs 'L4 port' to be non-zero + */ + if (!filter->dst_port) + return -EINVAL; + + /* adding filter using src_port/src_ip is not supported at this stage */ + if (filter->src_port || filter->src_ipv4 || + !ipv6_addr_any(&filter->ip.v6.src_ip6)) + return -EINVAL; + + /* copy element needed to add cloud filter from filter */ + i40e_set_cld_element(filter, &cld_filter.element); + + if (is_valid_ether_addr(filter->dst_mac) || + is_valid_ether_addr(filter->src_mac) || + is_multicast_ether_addr(filter->dst_mac) || + is_multicast_ether_addr(filter->src_mac)) { + /* MAC + IP : unsupported mode */ + if (filter->dst_ipv4) + return -EINVAL; + + /* since we validated that L4 port must be valid before + * we get here, start with respective "flags" value + * and update if vlan is present or not + */ + cld_filter.element.flags = + cpu_to_le16(I40E_AQC_ADD_CLOUD_FILTER_MAC_PORT); + + if (filter->vlan_id) { + cld_filter.element.flags = + cpu_to_le16(I40E_AQC_ADD_CLOUD_FILTER_MAC_VLAN_PORT); + } + + } else if (filter->dst_ipv4 || + !ipv6_addr_any(&filter->ip.v6.dst_ip6)) { + cld_filter.element.flags = + cpu_to_le16(I40E_AQC_ADD_CLOUD_FILTER_IP_PORT); + if (filter->n_proto == ETH_P_IPV6) + cld_filter.element.flags |= + cpu_to_le16(I40E_AQC_ADD_CLOUD_FLAGS_IPV6); + else + cld_filter.element.flags |= + cpu_to_le16(I40E_AQC_ADD_CLOUD_FLAGS_IPV4); + } else { + dev_err(&pf->pdev->dev, + "either mac or ip has to be valid for cloud filter\n"); + return -EINVAL; + } + + /* Now copy L4 port in Byte 6..7 in general fields */ + cld_filter.general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD0] = + be16_to_cpu(filter->dst_port); + + if (add) { + /* Validate current device switch mode, change if necessary */ + ret = i40e_validate_and_set_switch_mode(vsi); + if (ret) { + dev_err(&pf->pdev->dev, + "failed to set switch mode, ret %d\n", + ret); + return ret; + } + + ret = i40e_aq_add_cloud_filters_bb(&pf->hw, filter->seid, + &cld_filter, 1); + } else { + ret = i40e_aq_rem_cloud_filters_bb(&pf->hw, filter->seid, + &cld_filter, 1); + } + + if (ret) + dev_dbg(&pf->pdev->dev, + "Failed to %s cloud filter(big buffer) err %d aq_err %d\n", + add ? "add" : "delete", ret, pf->hw.aq.asq_last_status); + else + dev_info(&pf->pdev->dev, + "%s cloud filter for VSI: %d, L4 port: %d\n", + add ? "add" : "delete", filter->seid, + ntohs(filter->dst_port)); + return ret; +} + +/** + * i40e_parse_cls_flower - Parse tc flower filters provided by kernel + * @vsi: Pointer to VSI + * @cls_flower: Pointer to struct tc_cls_flower_offload + * @filter: Pointer to cloud filter structure + * + **/ +static int i40e_parse_cls_flower(struct i40e_vsi *vsi, + struct tc_cls_flower_offload *f, + struct i40e_cloud_filter *filter) +{ + u16 n_proto_mask = 0, n_proto_key = 0, addr_type = 0; + struct i40e_pf *pf = vsi->back; + u8 field_flags = 0; + + if (f->dissector->used_keys & + ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) | + BIT(FLOW_DISSECTOR_KEY_BASIC) | + BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) | + BIT(FLOW_DISSECTOR_KEY_VLAN) | + BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) | + BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) | + BIT(FLOW_DISSECTOR_KEY_PORTS) | + BIT(FLOW_DISSECTOR_KEY_ENC_KEYID))) { + dev_err(&pf->pdev->dev, "Unsupported key used: 0x%x\n", + f->dissector->used_keys); + return -EOPNOTSUPP; + } + + if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_KEYID)) { + struct flow_dissector_key_keyid *key = + skb_flow_dissector_target(f->dissector, + FLOW_DISSECTOR_KEY_ENC_KEYID, + f->key); + + struct flow_dissector_key_keyid *mask = + skb_flow_dissector_target(f->dissector, + FLOW_DISSECTOR_KEY_ENC_KEYID, + f->mask); + + if (mask->keyid != 0) + field_flags |= I40E_CLOUD_FIELD_TEN_ID; + + filter->tenant_id = be32_to_cpu(key->keyid); + } + + if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_BASIC)) { + struct flow_dissector_key_basic *key = + skb_flow_dissector_target(f->dissector, + FLOW_DISSECTOR_KEY_BASIC, + f->key); + + struct flow_dissector_key_basic *mask = + skb_flow_dissector_target(f->dissector, + FLOW_DISSECTOR_KEY_BASIC, + f->mask); + + n_proto_key = ntohs(key->n_proto); + n_proto_mask = ntohs(mask->n_proto); + + if (n_proto_key == ETH_P_ALL) { + n_proto_key = 0; + n_proto_mask = 0; + } + filter->n_proto = n_proto_key & n_proto_mask; + filter->ip_proto = key->ip_proto; + } + + if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ETH_ADDRS)) { + struct flow_dissector_key_eth_addrs *key = + skb_flow_dissector_target(f->dissector, + FLOW_DISSECTOR_KEY_ETH_ADDRS, + f->key); + + struct flow_dissector_key_eth_addrs *mask = + skb_flow_dissector_target(f->dissector, + FLOW_DISSECTOR_KEY_ETH_ADDRS, + f->mask); + + /* use is_broadcast and is_zero to check for all 0xf or 0 */ + if (!is_zero_ether_addr(mask->dst)) { + if (is_broadcast_ether_addr(mask->dst)) { + field_flags |= I40E_CLOUD_FIELD_OMAC; + } else { + dev_err(&pf->pdev->dev, "Bad ether dest mask %pM\n", + mask->dst); + return I40E_ERR_CONFIG; + } + } + + if (!is_zero_ether_addr(mask->src)) { + if (is_broadcast_ether_addr(mask->src)) { + field_flags |= I40E_CLOUD_FIELD_IMAC; + } else { + dev_err(&pf->pdev->dev, "Bad ether src mask %pM\n", + mask->src); + return I40E_ERR_CONFIG; + } + } + ether_addr_copy(filter->dst_mac, key->dst); + ether_addr_copy(filter->src_mac, key->src); + } + + if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_VLAN)) { + struct flow_dissector_key_vlan *key = + skb_flow_dissector_target(f->dissector, + FLOW_DISSECTOR_KEY_VLAN, + f->key); + struct flow_dissector_key_vlan *mask = + skb_flow_dissector_target(f->dissector, + FLOW_DISSECTOR_KEY_VLAN, + f->mask); + + if (mask->vlan_id) { + if (mask->vlan_id == VLAN_VID_MASK) { + field_flags |= I40E_CLOUD_FIELD_IVLAN; + + } else { + dev_err(&pf->pdev->dev, "Bad vlan mask 0x%04x\n", + mask->vlan_id); + return I40E_ERR_CONFIG; + } + } + + filter->vlan_id = cpu_to_be16(key->vlan_id); + } + + if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_CONTROL)) { + struct flow_dissector_key_control *key = + skb_flow_dissector_target(f->dissector, + FLOW_DISSECTOR_KEY_CONTROL, + f->key); + + addr_type = key->addr_type; + } + + if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) { + struct flow_dissector_key_ipv4_addrs *key = + skb_flow_dissector_target(f->dissector, + FLOW_DISSECTOR_KEY_IPV4_ADDRS, + f->key); + struct flow_dissector_key_ipv4_addrs *mask = + skb_flow_dissector_target(f->dissector, + FLOW_DISSECTOR_KEY_IPV4_ADDRS, + f->mask); + + if (mask->dst) { + if (mask->dst == cpu_to_be32(0xffffffff)) { + field_flags |= I40E_CLOUD_FIELD_IIP; + } else { + mask->dst = be32_to_cpu(mask->dst); + dev_err(&pf->pdev->dev, "Bad ip dst mask %pI4\n", + &mask->dst); + return I40E_ERR_CONFIG; + } + } + + if (mask->src) { + if (mask->src == cpu_to_be32(0xffffffff)) { + field_flags |= I40E_CLOUD_FIELD_IIP; + } else { + mask->src = be32_to_cpu(mask->src); + dev_err(&pf->pdev->dev, "Bad ip src mask %pI4\n", + &mask->src); + return I40E_ERR_CONFIG; + } + } + + if (field_flags & I40E_CLOUD_FIELD_TEN_ID) { + dev_err(&pf->pdev->dev, "Tenant id not allowed for ip filter\n"); + return I40E_ERR_CONFIG; + } + filter->dst_ipv4 = key->dst; + filter->src_ipv4 = key->src; + } + + if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) { + struct flow_dissector_key_ipv6_addrs *key = + skb_flow_dissector_target(f->dissector, + FLOW_DISSECTOR_KEY_IPV6_ADDRS, + f->key); + struct flow_dissector_key_ipv6_addrs *mask = + skb_flow_dissector_target(f->dissector, + FLOW_DISSECTOR_KEY_IPV6_ADDRS, + f->mask); + + /* src and dest IPV6 address should not be LOOPBACK + * (0:0:0:0:0:0:0:1), which can be represented as ::1 + */ + if (ipv6_addr_loopback(&key->dst) || + ipv6_addr_loopback(&key->src)) { + dev_err(&pf->pdev->dev, + "Bad ipv6, addr is LOOPBACK\n"); + return I40E_ERR_CONFIG; + } + if (!ipv6_addr_any(&mask->dst) || !ipv6_addr_any(&mask->src)) + field_flags |= I40E_CLOUD_FIELD_IIP; + + memcpy(&filter->src_ipv6, &key->src.s6_addr32, + sizeof(filter->src_ipv6)); + memcpy(&filter->dst_ipv6, &key->dst.s6_addr32, + sizeof(filter->dst_ipv6)); + } + + if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_PORTS)) { + struct flow_dissector_key_ports *key = + skb_flow_dissector_target(f->dissector, + FLOW_DISSECTOR_KEY_PORTS, + f->key); + struct flow_dissector_key_ports *mask = + skb_flow_dissector_target(f->dissector, + FLOW_DISSECTOR_KEY_PORTS, + f->mask); + + if (mask->src) { + if (mask->src == cpu_to_be16(0xffff)) { + field_flags |= I40E_CLOUD_FIELD_IIP; + } else { + dev_err(&pf->pdev->dev, "Bad src port mask 0x%04x\n", + be16_to_cpu(mask->src)); + return I40E_ERR_CONFIG; + } + } + + if (mask->dst) { + if (mask->dst == cpu_to_be16(0xffff)) { + field_flags |= I40E_CLOUD_FIELD_IIP; + } else { + dev_err(&pf->pdev->dev, "Bad dst port mask 0x%04x\n", + be16_to_cpu(mask->dst)); + return I40E_ERR_CONFIG; + } + } + + filter->dst_port = key->dst; + filter->src_port = key->src; + + switch (filter->ip_proto) { + case IPPROTO_TCP: + case IPPROTO_UDP: + break; + default: + dev_err(&pf->pdev->dev, + "Only UDP and TCP transport are supported\n"); + return -EINVAL; + } + } + filter->flags = field_flags; + return 0; +} + +/** + * i40e_handle_tclass: Forward to a traffic class on the device + * @vsi: Pointer to VSI + * @tc: traffic class index on the device + * @filter: Pointer to cloud filter structure + * + **/ +static int i40e_handle_tclass(struct i40e_vsi *vsi, u32 tc, + struct i40e_cloud_filter *filter) +{ + struct i40e_channel *ch, *ch_tmp; + + /* direct to a traffic class on the same device */ + if (tc == 0) { + filter->seid = vsi->seid; + return 0; + } else if (vsi->tc_config.enabled_tc & BIT(tc)) { + if (!filter->dst_port) { + dev_err(&vsi->back->pdev->dev, + "Specify destination port to direct to traffic class that is not default\n"); + return -EINVAL; + } + if (list_empty(&vsi->ch_list)) + return -EINVAL; + list_for_each_entry_safe(ch, ch_tmp, &vsi->ch_list, + list) { + if (ch->seid == vsi->tc_seid_map[tc]) + filter->seid = ch->seid; + } + return 0; + } + dev_err(&vsi->back->pdev->dev, "TC is not enabled\n"); + return -EINVAL; +} + +/** + * i40e_configure_clsflower - Configure tc flower filters + * @vsi: Pointer to VSI + * @cls_flower: Pointer to struct tc_cls_flower_offload + * + **/ +static int i40e_configure_clsflower(struct i40e_vsi *vsi, + struct tc_cls_flower_offload *cls_flower) +{ + int tc = tc_classid_to_hwtc(vsi->netdev, cls_flower->classid); + struct i40e_cloud_filter *filter = NULL; + struct i40e_pf *pf = vsi->back; + int err = 0; + + if (tc < 0) { + dev_err(&vsi->back->pdev->dev, "Invalid traffic class\n"); + return -EINVAL; + } + + if (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state) || + test_bit(__I40E_RESET_INTR_RECEIVED, pf->state)) + return -EBUSY; + + if (pf->fdir_pf_active_filters || + (!hlist_empty(&pf->fdir_filter_list))) { + dev_err(&vsi->back->pdev->dev, + "Flow Director Sideband filters exists, turn ntuple off to configure cloud filters\n"); + return -EINVAL; + } + + if (vsi->back->flags & I40E_FLAG_FD_SB_ENABLED) { + dev_err(&vsi->back->pdev->dev, + "Disable Flow Director Sideband, configuring Cloud filters via tc-flower\n"); + vsi->back->flags &= ~I40E_FLAG_FD_SB_ENABLED; + vsi->back->flags |= I40E_FLAG_FD_SB_TO_CLOUD_FILTER; + } + + filter = kzalloc(sizeof(*filter), GFP_KERNEL); + if (!filter) + return -ENOMEM; + + filter->cookie = cls_flower->cookie; + + err = i40e_parse_cls_flower(vsi, cls_flower, filter); + if (err < 0) + goto err; + + err = i40e_handle_tclass(vsi, tc, filter); + if (err < 0) + goto err; + + /* Add cloud filter */ + if (filter->dst_port) + err = i40e_add_del_cloud_filter_big_buf(vsi, filter, true); + else + err = i40e_add_del_cloud_filter(vsi, filter, true); + + if (err) { + dev_err(&pf->pdev->dev, + "Failed to add cloud filter, err %s\n", + i40e_stat_str(&pf->hw, err)); + err = i40e_aq_rc_to_posix(err, pf->hw.aq.asq_last_status); + goto err; + } + + /* add filter to the ordered list */ + INIT_HLIST_NODE(&filter->cloud_node); + + hlist_add_head(&filter->cloud_node, &pf->cloud_filter_list); + + pf->num_cloud_filters++; + + return err; +err: + kfree(filter); + return err; +} + +/** + * i40e_find_cloud_filter - Find the could filter in the list + * @vsi: Pointer to VSI + * @cookie: filter specific cookie + * + **/ +static struct i40e_cloud_filter *i40e_find_cloud_filter(struct i40e_vsi *vsi, + unsigned long *cookie) +{ + struct i40e_cloud_filter *filter = NULL; + struct hlist_node *node2; + + hlist_for_each_entry_safe(filter, node2, + &vsi->back->cloud_filter_list, cloud_node) + if (!memcmp(cookie, &filter->cookie, sizeof(filter->cookie))) + return filter; + return NULL; +} + +/** + * i40e_delete_clsflower - Remove tc flower filters + * @vsi: Pointer to VSI + * @cls_flower: Pointer to struct tc_cls_flower_offload + * + **/ +static int i40e_delete_clsflower(struct i40e_vsi *vsi, + struct tc_cls_flower_offload *cls_flower) +{ + struct i40e_cloud_filter *filter = NULL; + struct i40e_pf *pf = vsi->back; + int err = 0; + + filter = i40e_find_cloud_filter(vsi, &cls_flower->cookie); + + if (!filter) + return -EINVAL; + + hash_del(&filter->cloud_node); + + if (filter->dst_port) + err = i40e_add_del_cloud_filter_big_buf(vsi, filter, false); + else + err = i40e_add_del_cloud_filter(vsi, filter, false); + + kfree(filter); + if (err) { + dev_err(&pf->pdev->dev, + "Failed to delete cloud filter, err %s\n", + i40e_stat_str(&pf->hw, err)); + return i40e_aq_rc_to_posix(err, pf->hw.aq.asq_last_status); + } + + pf->num_cloud_filters--; + if (!pf->num_cloud_filters) + if ((pf->flags & I40E_FLAG_FD_SB_TO_CLOUD_FILTER) && + !(pf->flags & I40E_FLAG_FD_SB_INACTIVE)) { + pf->flags |= I40E_FLAG_FD_SB_ENABLED; + pf->flags &= ~I40E_FLAG_FD_SB_TO_CLOUD_FILTER; + pf->flags &= ~I40E_FLAG_FD_SB_INACTIVE; + } + return 0; +} + +/** + * i40e_setup_tc_cls_flower - flower classifier offloads + * @netdev: net device to configure + * @type_data: offload data + **/ +static int i40e_setup_tc_cls_flower(struct i40e_netdev_priv *np, + struct tc_cls_flower_offload *cls_flower) +{ + struct i40e_vsi *vsi = np->vsi; + + if (cls_flower->common.chain_index) + return -EOPNOTSUPP; + + switch (cls_flower->command) { + case TC_CLSFLOWER_REPLACE: + return i40e_configure_clsflower(vsi, cls_flower); + case TC_CLSFLOWER_DESTROY: + return i40e_delete_clsflower(vsi, cls_flower); + case TC_CLSFLOWER_STATS: + return -EOPNOTSUPP; + default: + return -EINVAL; + } +} + +static int i40e_setup_tc_block_cb(enum tc_setup_type type, void *type_data, + void *cb_priv) +{ + struct i40e_netdev_priv *np = cb_priv; + + switch (type) { + case TC_SETUP_CLSFLOWER: + return i40e_setup_tc_cls_flower(np, type_data); + + default: + return -EOPNOTSUPP; + } +} + +static int i40e_setup_tc_block(struct net_device *dev, + struct tc_block_offload *f) +{ + struct i40e_netdev_priv *np = netdev_priv(dev); + + if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS) + return -EOPNOTSUPP; + + switch (f->command) { + case TC_BLOCK_BIND: + return tcf_block_cb_register(f->block, i40e_setup_tc_block_cb, + np, np); + case TC_BLOCK_UNBIND: + tcf_block_cb_unregister(f->block, i40e_setup_tc_block_cb, np); + return 0; + default: + return -EOPNOTSUPP; + } +} + static int __i40e_setup_tc(struct net_device *netdev, enum tc_setup_type type, void *type_data) { - if (type != TC_SETUP_MQPRIO) + switch (type) { + case TC_SETUP_MQPRIO: + return i40e_setup_tc(netdev, type_data); + case TC_SETUP_BLOCK: + return i40e_setup_tc_block(netdev, type_data); + default: return -EOPNOTSUPP; - - return i40e_setup_tc(netdev, type_data); + } } /** @@ -6954,6 +7754,13 @@ static void i40e_cloud_filter_exit(struct i40e_pf *pf) kfree(cfilter); } pf->num_cloud_filters = 0; + + if ((pf->flags & I40E_FLAG_FD_SB_TO_CLOUD_FILTER) && + !(pf->flags & I40E_FLAG_FD_SB_INACTIVE)) { + pf->flags |= I40E_FLAG_FD_SB_ENABLED; + pf->flags &= ~I40E_FLAG_FD_SB_TO_CLOUD_FILTER; + pf->flags &= ~I40E_FLAG_FD_SB_INACTIVE; + } } /** @@ -8061,7 +8868,8 @@ end_reconstitute: * i40e_get_capabilities - get info about the HW * @pf: the PF struct **/ -static int i40e_get_capabilities(struct i40e_pf *pf) +static int i40e_get_capabilities(struct i40e_pf *pf, + enum i40e_admin_queue_opc list_type) { struct i40e_aqc_list_capabilities_element_resp *cap_buf; u16 data_size; @@ -8076,9 +8884,8 @@ static int i40e_get_capabilities(struct i40e_pf *pf) /* this loads the data into the hw struct for us */ err = i40e_aq_discover_capabilities(&pf->hw, cap_buf, buf_len, - &data_size, - i40e_aqc_opc_list_func_capabilities, - NULL); + &data_size, list_type, + NULL); /* data loaded, buffer no longer needed */ kfree(cap_buf); @@ -8095,26 +8902,44 @@ static int i40e_get_capabilities(struct i40e_pf *pf) } } while (err); - if (pf->hw.debug_mask & I40E_DEBUG_USER) - dev_info(&pf->pdev->dev, - "pf=%d, num_vfs=%d, msix_pf=%d, msix_vf=%d, fd_g=%d, fd_b=%d, pf_max_q=%d num_vsi=%d\n", - pf->hw.pf_id, pf->hw.func_caps.num_vfs, - pf->hw.func_caps.num_msix_vectors, - pf->hw.func_caps.num_msix_vectors_vf, - pf->hw.func_caps.fd_filters_guaranteed, - pf->hw.func_caps.fd_filters_best_effort, - pf->hw.func_caps.num_tx_qp, - pf->hw.func_caps.num_vsis); - + if (pf->hw.debug_mask & I40E_DEBUG_USER) { + if (list_type == i40e_aqc_opc_list_func_capabilities) { + dev_info(&pf->pdev->dev, + "pf=%d, num_vfs=%d, msix_pf=%d, msix_vf=%d, fd_g=%d, fd_b=%d, pf_max_q=%d num_vsi=%d\n", + pf->hw.pf_id, pf->hw.func_caps.num_vfs, + pf->hw.func_caps.num_msix_vectors, + pf->hw.func_caps.num_msix_vectors_vf, + pf->hw.func_caps.fd_filters_guaranteed, + pf->hw.func_caps.fd_filters_best_effort, + pf->hw.func_caps.num_tx_qp, + pf->hw.func_caps.num_vsis); + } else if (list_type == i40e_aqc_opc_list_dev_capabilities) { + dev_info(&pf->pdev->dev, + "switch_mode=0x%04x, function_valid=0x%08x\n", + pf->hw.dev_caps.switch_mode, + pf->hw.dev_caps.valid_functions); + dev_info(&pf->pdev->dev, + "SR-IOV=%d, num_vfs for all function=%u\n", + pf->hw.dev_caps.sr_iov_1_1, + pf->hw.dev_caps.num_vfs); + dev_info(&pf->pdev->dev, + "num_vsis=%u, num_rx:%u, num_tx=%u\n", + pf->hw.dev_caps.num_vsis, + pf->hw.dev_caps.num_rx_qp, + pf->hw.dev_caps.num_tx_qp); + } + } + if (list_type == i40e_aqc_opc_list_func_capabilities) { #define DEF_NUM_VSI (1 + (pf->hw.func_caps.fcoe ? 1 : 0) \ + pf->hw.func_caps.num_vfs) - if (pf->hw.revision_id == 0 && (DEF_NUM_VSI > pf->hw.func_caps.num_vsis)) { - dev_info(&pf->pdev->dev, - "got num_vsis %d, setting num_vsis to %d\n", - pf->hw.func_caps.num_vsis, DEF_NUM_VSI); - pf->hw.func_caps.num_vsis = DEF_NUM_VSI; + if (pf->hw.revision_id == 0 && + pf->hw.func_caps.num_vsis < DEF_NUM_VSI) { + dev_info(&pf->pdev->dev, + "got num_vsis %d, setting num_vsis to %d\n", + pf->hw.func_caps.num_vsis, DEF_NUM_VSI); + pf->hw.func_caps.num_vsis = DEF_NUM_VSI; + } } - return 0; } @@ -8156,6 +8981,7 @@ static void i40e_fdir_sb_setup(struct i40e_pf *pf) if (!vsi) { dev_info(&pf->pdev->dev, "Couldn't create FDir VSI\n"); pf->flags &= ~I40E_FLAG_FD_SB_ENABLED; + pf->flags |= I40E_FLAG_FD_SB_INACTIVE; return; } } @@ -8177,6 +9003,45 @@ static void i40e_fdir_teardown(struct i40e_pf *pf) i40e_vsi_release(vsi); } +/** + * i40e_rebuild_cloud_filters - Rebuilds cloud filters for VSIs + * @vsi: PF main vsi + * @seid: seid of main or channel VSIs + * + * Rebuilds cloud filters associated with main VSI and channel VSIs if they + * existed before reset + **/ +static int i40e_rebuild_cloud_filters(struct i40e_vsi *vsi, u16 seid) +{ + struct i40e_cloud_filter *cfilter; + struct i40e_pf *pf = vsi->back; + struct hlist_node *node; + i40e_status ret; + + /* Add cloud filters back if they exist */ + hlist_for_each_entry_safe(cfilter, node, &pf->cloud_filter_list, + cloud_node) { + if (cfilter->seid != seid) + continue; + + if (cfilter->dst_port) + ret = i40e_add_del_cloud_filter_big_buf(vsi, cfilter, + true); + else + ret = i40e_add_del_cloud_filter(vsi, cfilter, true); + + if (ret) { + dev_dbg(&pf->pdev->dev, + "Failed to rebuild cloud filter, err %s aq_err %s\n", + i40e_stat_str(&pf->hw, ret), + i40e_aq_str(&pf->hw, + pf->hw.aq.asq_last_status)); + return ret; + } + } + return 0; +} + /** * i40e_rebuild_channels - Rebuilds channel VSIs if they existed before reset * @vsi: PF main vsi @@ -8216,6 +9081,13 @@ static int i40e_rebuild_channels(struct i40e_vsi *vsi) credits, ch->seid); } + ret = i40e_rebuild_cloud_filters(vsi, ch->seid); + if (ret) { + dev_dbg(&vsi->back->pdev->dev, + "Failed to rebuild cloud filters for channel VSI %u\n", + ch->seid); + return ret; + } } return 0; } @@ -8382,7 +9254,7 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired) i40e_verify_eeprom(pf); i40e_clear_pxe_mode(hw); - ret = i40e_get_capabilities(pf); + ret = i40e_get_capabilities(pf, i40e_aqc_opc_list_func_capabilities); if (ret) goto end_core_reset; @@ -8503,6 +9375,10 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired) vsi->seid); } + ret = i40e_rebuild_cloud_filters(vsi, vsi->seid); + if (ret) + goto end_unlock; + /* PF Main VSI is rebuild by now, go ahead and rebuild channel VSIs * for this main VSI if they exist */ @@ -9426,6 +10302,7 @@ static int i40e_init_msix(struct i40e_pf *pf) (pf->num_fdsb_msix == 0)) { dev_info(&pf->pdev->dev, "Sideband Flowdir disabled, not enough MSI-X vectors\n"); pf->flags &= ~I40E_FLAG_FD_SB_ENABLED; + pf->flags |= I40E_FLAG_FD_SB_INACTIVE; } if ((pf->flags & I40E_FLAG_VMDQ_ENABLED) && (pf->num_vmdq_msix == 0)) { @@ -9543,6 +10420,7 @@ static int i40e_init_interrupt_scheme(struct i40e_pf *pf) I40E_FLAG_FD_SB_ENABLED | I40E_FLAG_FD_ATR_ENABLED | I40E_FLAG_VMDQ_ENABLED); + pf->flags |= I40E_FLAG_FD_SB_INACTIVE; /* rework the queue expectations without MSIX */ i40e_determine_queue_usage(pf); @@ -10283,9 +11161,13 @@ bool i40e_set_ntuple(struct i40e_pf *pf, netdev_features_t features) /* Enable filters and mark for reset */ if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED)) need_reset = true; - /* enable FD_SB only if there is MSI-X vector */ - if (pf->num_fdsb_msix > 0) + /* enable FD_SB only if there is MSI-X vector and no cloud + * filters exist + */ + if (pf->num_fdsb_msix > 0 && !pf->num_cloud_filters) { pf->flags |= I40E_FLAG_FD_SB_ENABLED; + pf->flags &= ~I40E_FLAG_FD_SB_INACTIVE; + } } else { /* turn off filters, mark for reset and clear SW filter list */ if (pf->flags & I40E_FLAG_FD_SB_ENABLED) { @@ -10294,6 +11176,8 @@ bool i40e_set_ntuple(struct i40e_pf *pf, netdev_features_t features) } pf->flags &= ~(I40E_FLAG_FD_SB_ENABLED | I40E_FLAG_FD_SB_AUTO_DISABLED); + pf->flags |= I40E_FLAG_FD_SB_INACTIVE; + /* reset fd counters */ pf->fd_add_err = 0; pf->fd_atr_cnt = 0; @@ -10355,6 +11239,12 @@ static int i40e_set_features(struct net_device *netdev, else i40e_vlan_stripping_disable(vsi); + if (!(features & NETIF_F_HW_TC) && pf->num_cloud_filters) { + dev_err(&pf->pdev->dev, + "Offloaded tc filters active, can't turn hw_tc_offload off"); + return -EINVAL; + } + need_reset = i40e_set_ntuple(pf, features); if (need_reset) @@ -10874,7 +11764,8 @@ static int i40e_config_netdev(struct i40e_vsi *vsi) netdev->vlan_features |= hw_enc_features | NETIF_F_TSO_MANGLEID; if (!(pf->flags & I40E_FLAG_MFP_ENABLED)) - netdev->hw_features |= NETIF_F_NTUPLE; + netdev->hw_features |= NETIF_F_NTUPLE | NETIF_F_HW_TC; + hw_features = hw_enc_features | NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX; @@ -12179,8 +13070,10 @@ static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit) */ if ((pf->hw.pf_id == 0) && - !(pf->flags & I40E_FLAG_TRUE_PROMISC_SUPPORT)) + !(pf->flags & I40E_FLAG_TRUE_PROMISC_SUPPORT)) { flags = I40E_AQ_SET_SWITCH_CFG_PROMISC; + pf->last_sw_conf_flags = flags; + } if (pf->hw.pf_id == 0) { u16 valid_flags; @@ -12196,6 +13089,7 @@ static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit) pf->hw.aq.asq_last_status)); /* not a fatal problem, just keep going */ } + pf->last_sw_conf_valid_flags = valid_flags; } /* first time setup */ @@ -12293,6 +13187,7 @@ static void i40e_determine_queue_usage(struct i40e_pf *pf) I40E_FLAG_DCB_ENABLED | I40E_FLAG_SRIOV_ENABLED | I40E_FLAG_VMDQ_ENABLED); + pf->flags |= I40E_FLAG_FD_SB_INACTIVE; } else if (!(pf->flags & (I40E_FLAG_RSS_ENABLED | I40E_FLAG_FD_SB_ENABLED | I40E_FLAG_FD_ATR_ENABLED | @@ -12307,6 +13202,7 @@ static void i40e_determine_queue_usage(struct i40e_pf *pf) I40E_FLAG_FD_ATR_ENABLED | I40E_FLAG_DCB_ENABLED | I40E_FLAG_VMDQ_ENABLED); + pf->flags |= I40E_FLAG_FD_SB_INACTIVE; } else { /* Not enough queues for all TCs */ if ((pf->flags & I40E_FLAG_DCB_CAPABLE) && @@ -12330,6 +13226,7 @@ static void i40e_determine_queue_usage(struct i40e_pf *pf) queues_left -= 1; /* save 1 queue for FD */ } else { pf->flags &= ~I40E_FLAG_FD_SB_ENABLED; + pf->flags |= I40E_FLAG_FD_SB_INACTIVE; dev_info(&pf->pdev->dev, "not enough queues for Flow Director. Flow Director feature is disabled\n"); } } @@ -12633,7 +13530,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) dev_warn(&pdev->dev, "This device is a pre-production adapter/LOM. Please be aware there may be issues with your hardware. If you are experiencing problems please contact your Intel or hardware representative who provided you with this hardware.\n"); i40e_clear_pxe_mode(hw); - err = i40e_get_capabilities(pf); + err = i40e_get_capabilities(pf, i40e_aqc_opc_list_func_capabilities); if (err) goto err_adminq_setup; diff --git a/drivers/net/ethernet/intel/i40e/i40e_prototype.h b/drivers/net/ethernet/intel/i40e/i40e_prototype.h index 92869f57b52b..3bb6659db822 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_prototype.h +++ b/drivers/net/ethernet/intel/i40e/i40e_prototype.h @@ -283,6 +283,22 @@ i40e_status i40e_aq_query_switch_comp_bw_config(struct i40e_hw *hw, struct i40e_asq_cmd_details *cmd_details); i40e_status i40e_aq_resume_port_tx(struct i40e_hw *hw, struct i40e_asq_cmd_details *cmd_details); +i40e_status +i40e_aq_add_cloud_filters_bb(struct i40e_hw *hw, u16 seid, + struct i40e_aqc_cloud_filters_element_bb *filters, + u8 filter_count); +enum i40e_status_code +i40e_aq_add_cloud_filters(struct i40e_hw *hw, u16 vsi, + struct i40e_aqc_cloud_filters_element_data *filters, + u8 filter_count); +enum i40e_status_code +i40e_aq_rem_cloud_filters(struct i40e_hw *hw, u16 vsi, + struct i40e_aqc_cloud_filters_element_data *filters, + u8 filter_count); +i40e_status +i40e_aq_rem_cloud_filters_bb(struct i40e_hw *hw, u16 seid, + struct i40e_aqc_cloud_filters_element_bb *filters, + u8 filter_count); i40e_status i40e_read_lldp_cfg(struct i40e_hw *hw, struct i40e_lldp_variables *lldp_cfg); /* i40e_common */ diff --git a/drivers/net/ethernet/intel/i40e/i40e_type.h b/drivers/net/ethernet/intel/i40e/i40e_type.h index e4e5a0c864b7..00d4833e9925 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_type.h +++ b/drivers/net/ethernet/intel/i40e/i40e_type.h @@ -291,6 +291,7 @@ struct i40e_hw_capabilities { #define I40E_CLOUD_FILTER_MODE1 0x6 #define I40E_CLOUD_FILTER_MODE2 0x7 #define I40E_CLOUD_FILTER_MODE3 0x8 +#define I40E_SWITCH_MODE_MASK 0xF u32 management_mode; u32 mng_protocols_over_mctp; diff --git a/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h index af82c303de7b..06b04572c518 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h +++ b/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h @@ -1360,6 +1360,9 @@ struct i40e_aqc_cloud_filters_element_data { struct { u8 data[16]; } v6; + struct { + __le16 data[8]; + } raw_v6; } ipaddr; __le16 flags; #define I40E_AQC_ADD_CLOUD_FILTER_SHIFT 0 -- cgit v1.2.3 From c0752f2bd6ee77aa9334da4f69e8f54a325d282b Mon Sep 17 00:00:00 2001 From: Kamal Heib Date: Wed, 23 Aug 2017 14:42:03 +0300 Subject: net/mlx5e: Introduce stats group API Currently the mlx5e driver has multiple groups of stats, each group is used for different purposes and it may depend on hardware capabilities or not. The problem with the current implementation is that there is no clear API to create a new group of stats. This change define a new API to create a group of stats and simplifies the way of handling them by defining a new struct "mlx5e_stats_grp" which have the following three function pointers: - get_num_stats() - return the number of counters in the group. - fill_strings() - fill counters strings within the group. - fill_stats() - fill counters values within the group. The above function pointers are used within the ethtool callbaks while calling "ethtool -S" from userspace. This change also switch the SW group to use the new API. Signed-off-by: Kamal Heib Reviewed-by: Gal Pressman Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/Makefile | 2 +- .../net/ethernet/mellanox/mlx5/core/en_ethtool.c | 16 +-- drivers/net/ethernet/mellanox/mlx5/core/en_stats.c | 107 +++++++++++++++++++++ drivers/net/ethernet/mellanox/mlx5/core/en_stats.h | 50 ++-------- 4 files changed, 127 insertions(+), 48 deletions(-) create mode 100644 drivers/net/ethernet/mellanox/mlx5/core/en_stats.c (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/Makefile index 100fe4ecad9b..f391c7cb7656 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/Makefile +++ b/drivers/net/ethernet/mellanox/mlx5/core/Makefile @@ -13,7 +13,7 @@ mlx5_core-$(CONFIG_MLX5_FPGA) += fpga/cmd.o fpga/core.o fpga/conn.o fpga/sdk.o \ fpga/ipsec.o mlx5_core-$(CONFIG_MLX5_CORE_EN) += en_main.o en_common.o en_fs.o en_ethtool.o \ - en_tx.o en_rx.o en_rx_am.o en_txrx.o vxlan.o \ + en_tx.o en_rx.o en_rx_am.o en_txrx.o en_stats.o vxlan.o \ en_arfs.o en_fs_ethtool.o en_selftest.o mlx5_core-$(CONFIG_MLX5_MPFS) += lib/mpfs.o diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c index 81a112e40fe3..dfc440d7278d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c @@ -176,9 +176,13 @@ static bool mlx5e_query_global_pause_combined(struct mlx5e_priv *priv) int mlx5e_ethtool_get_sset_count(struct mlx5e_priv *priv, int sset) { + int i, num_stats = 0; + switch (sset) { case ETH_SS_STATS: - return NUM_SW_COUNTERS + + for (i = 0; i < mlx5e_num_stats_grps; i++) + num_stats += mlx5e_stats_grps[i].get_num_stats(priv); + return num_stats + MLX5E_NUM_Q_CNTRS(priv) + NUM_VPORT_COUNTERS + NUM_PPORT_COUNTERS(priv) + NUM_PCIE_COUNTERS(priv) + @@ -211,9 +215,8 @@ static void mlx5e_fill_stats_strings(struct mlx5e_priv *priv, u8 *data) int i, j, tc, prio, idx = 0; unsigned long pfc_combined; - /* SW counters */ - for (i = 0; i < NUM_SW_COUNTERS; i++) - strcpy(data + (idx++) * ETH_GSTRING_LEN, sw_stats_desc[i].format); + for (i = 0; i < mlx5e_num_stats_grps; i++) + idx = mlx5e_stats_grps[i].fill_strings(priv, data, idx); /* Q counters */ for (i = 0; i < MLX5E_NUM_Q_CNTRS(priv); i++) @@ -354,9 +357,8 @@ void mlx5e_ethtool_get_ethtool_stats(struct mlx5e_priv *priv, channels = &priv->channels; mutex_unlock(&priv->state_lock); - for (i = 0; i < NUM_SW_COUNTERS; i++) - data[idx++] = MLX5E_READ_CTR64_CPU(&priv->stats.sw, - sw_stats_desc, i); + for (i = 0; i < mlx5e_num_stats_grps; i++) + idx = mlx5e_stats_grps[i].fill_stats(priv, data, idx); for (i = 0; i < MLX5E_NUM_Q_CNTRS(priv); i++) data[idx++] = MLX5E_READ_CTR32_CPU(&priv->stats.qcnt, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c new file mode 100644 index 000000000000..25a1a6d8aa9d --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c @@ -0,0 +1,107 @@ +/* + * Copyright (c) 2017, Mellanox Technologies, Ltd. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include "en.h" + +static const struct counter_desc sw_stats_desc[] = { + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_packets) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_bytes) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_packets) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_bytes) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tso_packets) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tso_bytes) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tso_inner_packets) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tso_inner_bytes) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_lro_packets) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_lro_bytes) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_unnecessary) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_none) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_complete) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_unnecessary_inner) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_drop) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_full) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_none) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_partial) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_partial_inner) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_stopped) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_wake) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_dropped) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xmit_more) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_wqe_err) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_mpwqe_filler) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_buff_alloc_err) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cqe_compress_blks) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cqe_compress_pkts) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_page_reuse) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_reuse) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_full) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_empty) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_busy) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_waive) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, link_down_events_phy) }, +}; + +#define NUM_SW_COUNTERS ARRAY_SIZE(sw_stats_desc) + +static int mlx5e_grp_sw_get_num_stats(struct mlx5e_priv *priv) +{ + return NUM_SW_COUNTERS; +} + +static int mlx5e_grp_sw_fill_strings(struct mlx5e_priv *priv, u8 *data, int idx) +{ + int i; + + for (i = 0; i < NUM_SW_COUNTERS; i++) + strcpy(data + (idx++) * ETH_GSTRING_LEN, sw_stats_desc[i].format); + return idx; +} + +static int mlx5e_grp_sw_fill_stats(struct mlx5e_priv *priv, u64 *data, int idx) +{ + int i; + + for (i = 0; i < NUM_SW_COUNTERS; i++) + data[idx++] = MLX5E_READ_CTR64_CPU(&priv->stats.sw, sw_stats_desc, i); + return idx; +} + +const struct mlx5e_stats_grp mlx5e_stats_grps[] = { + { + .get_num_stats = mlx5e_grp_sw_get_num_stats, + .fill_strings = mlx5e_grp_sw_fill_strings, + .fill_stats = mlx5e_grp_sw_fill_stats, + } +}; + +const int mlx5e_num_stats_grps = ARRAY_SIZE(mlx5e_stats_grps); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h index f8637213afc0..6d2d8abf1929 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h @@ -91,45 +91,6 @@ struct mlx5e_sw_stats { u64 link_down_events_phy; }; -static const struct counter_desc sw_stats_desc[] = { - { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_packets) }, - { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_bytes) }, - { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_packets) }, - { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_bytes) }, - { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tso_packets) }, - { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tso_bytes) }, - { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tso_inner_packets) }, - { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tso_inner_bytes) }, - { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_lro_packets) }, - { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_lro_bytes) }, - { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_unnecessary) }, - { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_none) }, - { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_complete) }, - { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_unnecessary_inner) }, - { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_drop) }, - { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx) }, - { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_full) }, - { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_none) }, - { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_partial) }, - { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_partial_inner) }, - { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_stopped) }, - { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_wake) }, - { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_dropped) }, - { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xmit_more) }, - { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_wqe_err) }, - { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_mpwqe_filler) }, - { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_buff_alloc_err) }, - { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cqe_compress_blks) }, - { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cqe_compress_pkts) }, - { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_page_reuse) }, - { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_reuse) }, - { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_full) }, - { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_empty) }, - { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_busy) }, - { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_waive) }, - { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, link_down_events_phy) }, -}; - struct mlx5e_qcounter_stats { u32 rx_out_of_buffer; }; @@ -423,7 +384,6 @@ static const struct counter_desc sq_stats_desc[] = { { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, xmit_more) }, }; -#define NUM_SW_COUNTERS ARRAY_SIZE(sw_stats_desc) #define NUM_Q_COUNTERS ARRAY_SIZE(q_stats_desc) #define NUM_VPORT_COUNTERS ARRAY_SIZE(vport_stats_desc) #define NUM_PPORT_802_3_COUNTERS ARRAY_SIZE(pport_802_3_stats_desc) @@ -470,6 +430,16 @@ struct mlx5e_stats { struct mlx5e_pcie_stats pcie; }; +struct mlx5e_priv; +struct mlx5e_stats_grp { + int (*get_num_stats)(struct mlx5e_priv *priv); + int (*fill_strings)(struct mlx5e_priv *priv, u8 *data, int idx); + int (*fill_stats)(struct mlx5e_priv *priv, u64 *data, int idx); +}; + +extern const struct mlx5e_stats_grp mlx5e_stats_grps[]; +extern const int mlx5e_num_stats_grps; + static const struct counter_desc mlx5e_pme_status_desc[] = { { "module_unplug", 8 }, }; -- cgit v1.2.3 From fd8dcdb8d2e72f6ab7caf63392bece01344b4b47 Mon Sep 17 00:00:00 2001 From: Kamal Heib Date: Wed, 23 Aug 2017 14:46:33 +0300 Subject: net/mlx5e: Switch Q counters to use the stats group API Switch the Q counters to use the new stats group API. Signed-off-by: Kamal Heib Reviewed-by: Gal Pressman Signed-off-by: Saeed Mahameed --- .../net/ethernet/mellanox/mlx5/core/en_ethtool.c | 10 ------ drivers/net/ethernet/mellanox/mlx5/core/en_stats.c | 36 +++++++++++++++++++++- drivers/net/ethernet/mellanox/mlx5/core/en_stats.h | 5 --- 3 files changed, 35 insertions(+), 16 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c index dfc440d7278d..35ba40af2a73 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c @@ -166,7 +166,6 @@ static bool mlx5e_query_global_pause_combined(struct mlx5e_priv *priv) return err ? false : rx_pause | tx_pause; } -#define MLX5E_NUM_Q_CNTRS(priv) (NUM_Q_COUNTERS * (!!priv->q_counter)) #define MLX5E_NUM_RQ_STATS(priv) (NUM_RQ_STATS * (priv)->channels.num) #define MLX5E_NUM_SQ_STATS(priv) \ (NUM_SQ_STATS * (priv)->channels.num * (priv)->channels.params.num_tc) @@ -183,7 +182,6 @@ int mlx5e_ethtool_get_sset_count(struct mlx5e_priv *priv, int sset) for (i = 0; i < mlx5e_num_stats_grps; i++) num_stats += mlx5e_stats_grps[i].get_num_stats(priv); return num_stats + - MLX5E_NUM_Q_CNTRS(priv) + NUM_VPORT_COUNTERS + NUM_PPORT_COUNTERS(priv) + NUM_PCIE_COUNTERS(priv) + MLX5E_NUM_RQ_STATS(priv) + @@ -218,10 +216,6 @@ static void mlx5e_fill_stats_strings(struct mlx5e_priv *priv, u8 *data) for (i = 0; i < mlx5e_num_stats_grps; i++) idx = mlx5e_stats_grps[i].fill_strings(priv, data, idx); - /* Q counters */ - for (i = 0; i < MLX5E_NUM_Q_CNTRS(priv); i++) - strcpy(data + (idx++) * ETH_GSTRING_LEN, q_stats_desc[i].format); - /* VPORT counters */ for (i = 0; i < NUM_VPORT_COUNTERS; i++) strcpy(data + (idx++) * ETH_GSTRING_LEN, @@ -360,10 +354,6 @@ void mlx5e_ethtool_get_ethtool_stats(struct mlx5e_priv *priv, for (i = 0; i < mlx5e_num_stats_grps; i++) idx = mlx5e_stats_grps[i].fill_stats(priv, data, idx); - for (i = 0; i < MLX5E_NUM_Q_CNTRS(priv); i++) - data[idx++] = MLX5E_READ_CTR32_CPU(&priv->stats.qcnt, - q_stats_desc, i); - for (i = 0; i < NUM_VPORT_COUNTERS; i++) data[idx++] = MLX5E_READ_CTR64_BE(priv->stats.vport.query_vport_out, vport_stats_desc, i); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c index 25a1a6d8aa9d..64b344720a31 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c @@ -96,12 +96,46 @@ static int mlx5e_grp_sw_fill_stats(struct mlx5e_priv *priv, u64 *data, int idx) return idx; } +static const struct counter_desc q_stats_desc[] = { + { MLX5E_DECLARE_STAT(struct mlx5e_qcounter_stats, rx_out_of_buffer) }, +}; + +#define NUM_Q_COUNTERS ARRAY_SIZE(q_stats_desc) + +static int mlx5e_grp_q_get_num_stats(struct mlx5e_priv *priv) +{ + return priv->q_counter ? NUM_Q_COUNTERS : 0; +} + +static int mlx5e_grp_q_fill_strings(struct mlx5e_priv *priv, u8 *data, int idx) +{ + int i; + + for (i = 0; i < NUM_Q_COUNTERS && priv->q_counter; i++) + strcpy(data + (idx++) * ETH_GSTRING_LEN, q_stats_desc[i].format); + return idx; +} + +static int mlx5e_grp_q_fill_stats(struct mlx5e_priv *priv, u64 *data, int idx) +{ + int i; + + for (i = 0; i < NUM_Q_COUNTERS && priv->q_counter; i++) + data[idx++] = MLX5E_READ_CTR32_CPU(&priv->stats.qcnt, q_stats_desc, i); + return idx; +} + const struct mlx5e_stats_grp mlx5e_stats_grps[] = { { .get_num_stats = mlx5e_grp_sw_get_num_stats, .fill_strings = mlx5e_grp_sw_fill_strings, .fill_stats = mlx5e_grp_sw_fill_stats, - } + }, + { + .get_num_stats = mlx5e_grp_q_get_num_stats, + .fill_strings = mlx5e_grp_q_fill_strings, + .fill_stats = mlx5e_grp_q_fill_stats, + }, }; const int mlx5e_num_stats_grps = ARRAY_SIZE(mlx5e_stats_grps); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h index 6d2d8abf1929..b82ecb1fa353 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h @@ -95,10 +95,6 @@ struct mlx5e_qcounter_stats { u32 rx_out_of_buffer; }; -static const struct counter_desc q_stats_desc[] = { - { MLX5E_DECLARE_STAT(struct mlx5e_qcounter_stats, rx_out_of_buffer) }, -}; - #define VPORT_COUNTER_OFF(c) MLX5_BYTE_OFF(query_vport_counter_out, c) #define VPORT_COUNTER_GET(vstats, c) MLX5_GET64(query_vport_counter_out, \ vstats->query_vport_out, c) @@ -384,7 +380,6 @@ static const struct counter_desc sq_stats_desc[] = { { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, xmit_more) }, }; -#define NUM_Q_COUNTERS ARRAY_SIZE(q_stats_desc) #define NUM_VPORT_COUNTERS ARRAY_SIZE(vport_stats_desc) #define NUM_PPORT_802_3_COUNTERS ARRAY_SIZE(pport_802_3_stats_desc) #define NUM_PPORT_2863_COUNTERS ARRAY_SIZE(pport_2863_stats_desc) -- cgit v1.2.3 From 40cab9f16cc378f61f5cff0710cdd6caa7db549b Mon Sep 17 00:00:00 2001 From: Kamal Heib Date: Wed, 23 Aug 2017 14:47:58 +0300 Subject: net/mlx5e: Switch vport counters to use the stats group API Switch the vport counters to use the new stats group API. Signed-off-by: Kamal Heib Reviewed-by: Gal Pressman Signed-off-by: Saeed Mahameed --- .../net/ethernet/mellanox/mlx5/core/en_ethtool.c | 11 +--- drivers/net/ethernet/mellanox/mlx5/core/en_stats.c | 77 ++++++++++++++++++++++ drivers/net/ethernet/mellanox/mlx5/core/en_stats.h | 45 ------------- 3 files changed, 78 insertions(+), 55 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c index 35ba40af2a73..08089f1d2c4c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c @@ -182,7 +182,7 @@ int mlx5e_ethtool_get_sset_count(struct mlx5e_priv *priv, int sset) for (i = 0; i < mlx5e_num_stats_grps; i++) num_stats += mlx5e_stats_grps[i].get_num_stats(priv); return num_stats + - NUM_VPORT_COUNTERS + NUM_PPORT_COUNTERS(priv) + + NUM_PPORT_COUNTERS(priv) + NUM_PCIE_COUNTERS(priv) + MLX5E_NUM_RQ_STATS(priv) + MLX5E_NUM_SQ_STATS(priv) + @@ -216,11 +216,6 @@ static void mlx5e_fill_stats_strings(struct mlx5e_priv *priv, u8 *data) for (i = 0; i < mlx5e_num_stats_grps; i++) idx = mlx5e_stats_grps[i].fill_strings(priv, data, idx); - /* VPORT counters */ - for (i = 0; i < NUM_VPORT_COUNTERS; i++) - strcpy(data + (idx++) * ETH_GSTRING_LEN, - vport_stats_desc[i].format); - /* PPORT counters */ for (i = 0; i < NUM_PPORT_802_3_COUNTERS; i++) strcpy(data + (idx++) * ETH_GSTRING_LEN, @@ -354,10 +349,6 @@ void mlx5e_ethtool_get_ethtool_stats(struct mlx5e_priv *priv, for (i = 0; i < mlx5e_num_stats_grps; i++) idx = mlx5e_stats_grps[i].fill_stats(priv, data, idx); - for (i = 0; i < NUM_VPORT_COUNTERS; i++) - data[idx++] = MLX5E_READ_CTR64_BE(priv->stats.vport.query_vport_out, - vport_stats_desc, i); - for (i = 0; i < NUM_PPORT_802_3_COUNTERS; i++) data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.IEEE_802_3_counters, pport_802_3_stats_desc, i); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c index 64b344720a31..199e34f204a6 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c @@ -125,6 +125,78 @@ static int mlx5e_grp_q_fill_stats(struct mlx5e_priv *priv, u64 *data, int idx) return idx; } +#define VPORT_COUNTER_OFF(c) MLX5_BYTE_OFF(query_vport_counter_out, c) +static const struct counter_desc vport_stats_desc[] = { + { "rx_vport_unicast_packets", + VPORT_COUNTER_OFF(received_eth_unicast.packets) }, + { "rx_vport_unicast_bytes", + VPORT_COUNTER_OFF(received_eth_unicast.octets) }, + { "tx_vport_unicast_packets", + VPORT_COUNTER_OFF(transmitted_eth_unicast.packets) }, + { "tx_vport_unicast_bytes", + VPORT_COUNTER_OFF(transmitted_eth_unicast.octets) }, + { "rx_vport_multicast_packets", + VPORT_COUNTER_OFF(received_eth_multicast.packets) }, + { "rx_vport_multicast_bytes", + VPORT_COUNTER_OFF(received_eth_multicast.octets) }, + { "tx_vport_multicast_packets", + VPORT_COUNTER_OFF(transmitted_eth_multicast.packets) }, + { "tx_vport_multicast_bytes", + VPORT_COUNTER_OFF(transmitted_eth_multicast.octets) }, + { "rx_vport_broadcast_packets", + VPORT_COUNTER_OFF(received_eth_broadcast.packets) }, + { "rx_vport_broadcast_bytes", + VPORT_COUNTER_OFF(received_eth_broadcast.octets) }, + { "tx_vport_broadcast_packets", + VPORT_COUNTER_OFF(transmitted_eth_broadcast.packets) }, + { "tx_vport_broadcast_bytes", + VPORT_COUNTER_OFF(transmitted_eth_broadcast.octets) }, + { "rx_vport_rdma_unicast_packets", + VPORT_COUNTER_OFF(received_ib_unicast.packets) }, + { "rx_vport_rdma_unicast_bytes", + VPORT_COUNTER_OFF(received_ib_unicast.octets) }, + { "tx_vport_rdma_unicast_packets", + VPORT_COUNTER_OFF(transmitted_ib_unicast.packets) }, + { "tx_vport_rdma_unicast_bytes", + VPORT_COUNTER_OFF(transmitted_ib_unicast.octets) }, + { "rx_vport_rdma_multicast_packets", + VPORT_COUNTER_OFF(received_ib_multicast.packets) }, + { "rx_vport_rdma_multicast_bytes", + VPORT_COUNTER_OFF(received_ib_multicast.octets) }, + { "tx_vport_rdma_multicast_packets", + VPORT_COUNTER_OFF(transmitted_ib_multicast.packets) }, + { "tx_vport_rdma_multicast_bytes", + VPORT_COUNTER_OFF(transmitted_ib_multicast.octets) }, +}; + +#define NUM_VPORT_COUNTERS ARRAY_SIZE(vport_stats_desc) + +static int mlx5e_grp_vport_get_num_stats(struct mlx5e_priv *priv) +{ + return NUM_VPORT_COUNTERS; +} + +static int mlx5e_grp_vport_fill_strings(struct mlx5e_priv *priv, u8 *data, + int idx) +{ + int i; + + for (i = 0; i < NUM_VPORT_COUNTERS; i++) + strcpy(data + (idx++) * ETH_GSTRING_LEN, vport_stats_desc[i].format); + return idx; +} + +static int mlx5e_grp_vport_fill_stats(struct mlx5e_priv *priv, u64 *data, + int idx) +{ + int i; + + for (i = 0; i < NUM_VPORT_COUNTERS; i++) + data[idx++] = MLX5E_READ_CTR64_BE(priv->stats.vport.query_vport_out, + vport_stats_desc, i); + return idx; +} + const struct mlx5e_stats_grp mlx5e_stats_grps[] = { { .get_num_stats = mlx5e_grp_sw_get_num_stats, @@ -136,6 +208,11 @@ const struct mlx5e_stats_grp mlx5e_stats_grps[] = { .fill_strings = mlx5e_grp_q_fill_strings, .fill_stats = mlx5e_grp_q_fill_stats, }, + { + .get_num_stats = mlx5e_grp_vport_get_num_stats, + .fill_strings = mlx5e_grp_vport_fill_strings, + .fill_stats = mlx5e_grp_vport_fill_stats, + }, }; const int mlx5e_num_stats_grps = ARRAY_SIZE(mlx5e_stats_grps); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h index b82ecb1fa353..610208aed767 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h @@ -95,7 +95,6 @@ struct mlx5e_qcounter_stats { u32 rx_out_of_buffer; }; -#define VPORT_COUNTER_OFF(c) MLX5_BYTE_OFF(query_vport_counter_out, c) #define VPORT_COUNTER_GET(vstats, c) MLX5_GET64(query_vport_counter_out, \ vstats->query_vport_out, c) @@ -103,49 +102,6 @@ struct mlx5e_vport_stats { __be64 query_vport_out[MLX5_ST_SZ_QW(query_vport_counter_out)]; }; -static const struct counter_desc vport_stats_desc[] = { - { "rx_vport_unicast_packets", - VPORT_COUNTER_OFF(received_eth_unicast.packets) }, - { "rx_vport_unicast_bytes", - VPORT_COUNTER_OFF(received_eth_unicast.octets) }, - { "tx_vport_unicast_packets", - VPORT_COUNTER_OFF(transmitted_eth_unicast.packets) }, - { "tx_vport_unicast_bytes", - VPORT_COUNTER_OFF(transmitted_eth_unicast.octets) }, - { "rx_vport_multicast_packets", - VPORT_COUNTER_OFF(received_eth_multicast.packets) }, - { "rx_vport_multicast_bytes", - VPORT_COUNTER_OFF(received_eth_multicast.octets) }, - { "tx_vport_multicast_packets", - VPORT_COUNTER_OFF(transmitted_eth_multicast.packets) }, - { "tx_vport_multicast_bytes", - VPORT_COUNTER_OFF(transmitted_eth_multicast.octets) }, - { "rx_vport_broadcast_packets", - VPORT_COUNTER_OFF(received_eth_broadcast.packets) }, - { "rx_vport_broadcast_bytes", - VPORT_COUNTER_OFF(received_eth_broadcast.octets) }, - { "tx_vport_broadcast_packets", - VPORT_COUNTER_OFF(transmitted_eth_broadcast.packets) }, - { "tx_vport_broadcast_bytes", - VPORT_COUNTER_OFF(transmitted_eth_broadcast.octets) }, - { "rx_vport_rdma_unicast_packets", - VPORT_COUNTER_OFF(received_ib_unicast.packets) }, - { "rx_vport_rdma_unicast_bytes", - VPORT_COUNTER_OFF(received_ib_unicast.octets) }, - { "tx_vport_rdma_unicast_packets", - VPORT_COUNTER_OFF(transmitted_ib_unicast.packets) }, - { "tx_vport_rdma_unicast_bytes", - VPORT_COUNTER_OFF(transmitted_ib_unicast.octets) }, - { "rx_vport_rdma_multicast_packets", - VPORT_COUNTER_OFF(received_ib_multicast.packets) }, - { "rx_vport_rdma_multicast_bytes", - VPORT_COUNTER_OFF(received_ib_multicast.octets) }, - { "tx_vport_rdma_multicast_packets", - VPORT_COUNTER_OFF(transmitted_ib_multicast.packets) }, - { "tx_vport_rdma_multicast_bytes", - VPORT_COUNTER_OFF(transmitted_ib_multicast.octets) }, -}; - #define PPORT_802_3_OFF(c) \ MLX5_BYTE_OFF(ppcnt_reg, \ counter_set.eth_802_3_cntrs_grp_data_layout.c##_high) @@ -380,7 +336,6 @@ static const struct counter_desc sq_stats_desc[] = { { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, xmit_more) }, }; -#define NUM_VPORT_COUNTERS ARRAY_SIZE(vport_stats_desc) #define NUM_PPORT_802_3_COUNTERS ARRAY_SIZE(pport_802_3_stats_desc) #define NUM_PPORT_2863_COUNTERS ARRAY_SIZE(pport_2863_stats_desc) #define NUM_PPORT_2819_COUNTERS ARRAY_SIZE(pport_2819_stats_desc) -- cgit v1.2.3 From 6e6ef814d27b1fb3cd30fe62d700208065656401 Mon Sep 17 00:00:00 2001 From: Kamal Heib Date: Wed, 23 Aug 2017 14:49:57 +0300 Subject: net/mlx5e: Switch IEEE 802.3 counters to use stats group API Switch the IEEE 802.3 counters to use the new stats group API. Signed-off-by: Kamal Heib Reviewed-by: Gal Pressman Signed-off-by: Saeed Mahameed --- .../net/ethernet/mellanox/mlx5/core/en_ethtool.c | 9 ---- drivers/net/ethernet/mellanox/mlx5/core/en_stats.c | 57 ++++++++++++++++++++++ drivers/net/ethernet/mellanox/mlx5/core/en_stats.h | 28 +---------- 3 files changed, 58 insertions(+), 36 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c index 08089f1d2c4c..65e7c0a986ca 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c @@ -216,11 +216,6 @@ static void mlx5e_fill_stats_strings(struct mlx5e_priv *priv, u8 *data) for (i = 0; i < mlx5e_num_stats_grps; i++) idx = mlx5e_stats_grps[i].fill_strings(priv, data, idx); - /* PPORT counters */ - for (i = 0; i < NUM_PPORT_802_3_COUNTERS; i++) - strcpy(data + (idx++) * ETH_GSTRING_LEN, - pport_802_3_stats_desc[i].format); - for (i = 0; i < NUM_PPORT_2863_COUNTERS; i++) strcpy(data + (idx++) * ETH_GSTRING_LEN, pport_2863_stats_desc[i].format); @@ -349,10 +344,6 @@ void mlx5e_ethtool_get_ethtool_stats(struct mlx5e_priv *priv, for (i = 0; i < mlx5e_num_stats_grps; i++) idx = mlx5e_stats_grps[i].fill_stats(priv, data, idx); - for (i = 0; i < NUM_PPORT_802_3_COUNTERS; i++) - data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.IEEE_802_3_counters, - pport_802_3_stats_desc, i); - for (i = 0; i < NUM_PPORT_2863_COUNTERS; i++) data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.RFC_2863_counters, pport_2863_stats_desc, i); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c index 199e34f204a6..1ce296e4c9f5 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c @@ -197,6 +197,58 @@ static int mlx5e_grp_vport_fill_stats(struct mlx5e_priv *priv, u64 *data, return idx; } +#define PPORT_802_3_OFF(c) \ + MLX5_BYTE_OFF(ppcnt_reg, \ + counter_set.eth_802_3_cntrs_grp_data_layout.c##_high) +static const struct counter_desc pport_802_3_stats_desc[] = { + { "tx_packets_phy", PPORT_802_3_OFF(a_frames_transmitted_ok) }, + { "rx_packets_phy", PPORT_802_3_OFF(a_frames_received_ok) }, + { "rx_crc_errors_phy", PPORT_802_3_OFF(a_frame_check_sequence_errors) }, + { "tx_bytes_phy", PPORT_802_3_OFF(a_octets_transmitted_ok) }, + { "rx_bytes_phy", PPORT_802_3_OFF(a_octets_received_ok) }, + { "tx_multicast_phy", PPORT_802_3_OFF(a_multicast_frames_xmitted_ok) }, + { "tx_broadcast_phy", PPORT_802_3_OFF(a_broadcast_frames_xmitted_ok) }, + { "rx_multicast_phy", PPORT_802_3_OFF(a_multicast_frames_received_ok) }, + { "rx_broadcast_phy", PPORT_802_3_OFF(a_broadcast_frames_received_ok) }, + { "rx_in_range_len_errors_phy", PPORT_802_3_OFF(a_in_range_length_errors) }, + { "rx_out_of_range_len_phy", PPORT_802_3_OFF(a_out_of_range_length_field) }, + { "rx_oversize_pkts_phy", PPORT_802_3_OFF(a_frame_too_long_errors) }, + { "rx_symbol_err_phy", PPORT_802_3_OFF(a_symbol_error_during_carrier) }, + { "tx_mac_control_phy", PPORT_802_3_OFF(a_mac_control_frames_transmitted) }, + { "rx_mac_control_phy", PPORT_802_3_OFF(a_mac_control_frames_received) }, + { "rx_unsupported_op_phy", PPORT_802_3_OFF(a_unsupported_opcodes_received) }, + { "rx_pause_ctrl_phy", PPORT_802_3_OFF(a_pause_mac_ctrl_frames_received) }, + { "tx_pause_ctrl_phy", PPORT_802_3_OFF(a_pause_mac_ctrl_frames_transmitted) }, +}; + +#define NUM_PPORT_802_3_COUNTERS ARRAY_SIZE(pport_802_3_stats_desc) + +static int mlx5e_grp_802_3_get_num_stats(struct mlx5e_priv *priv) +{ + return NUM_PPORT_802_3_COUNTERS; +} + +static int mlx5e_grp_802_3_fill_strings(struct mlx5e_priv *priv, u8 *data, + int idx) +{ + int i; + + for (i = 0; i < NUM_PPORT_802_3_COUNTERS; i++) + strcpy(data + (idx++) * ETH_GSTRING_LEN, pport_802_3_stats_desc[i].format); + return idx; +} + +static int mlx5e_grp_802_3_fill_stats(struct mlx5e_priv *priv, u64 *data, + int idx) +{ + int i; + + for (i = 0; i < NUM_PPORT_802_3_COUNTERS; i++) + data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.IEEE_802_3_counters, + pport_802_3_stats_desc, i); + return idx; +} + const struct mlx5e_stats_grp mlx5e_stats_grps[] = { { .get_num_stats = mlx5e_grp_sw_get_num_stats, @@ -213,6 +265,11 @@ const struct mlx5e_stats_grp mlx5e_stats_grps[] = { .fill_strings = mlx5e_grp_vport_fill_strings, .fill_stats = mlx5e_grp_vport_fill_stats, }, + { + .get_num_stats = mlx5e_grp_802_3_get_num_stats, + .fill_strings = mlx5e_grp_802_3_fill_strings, + .fill_stats = mlx5e_grp_802_3_fill_stats, + }, }; const int mlx5e_num_stats_grps = ARRAY_SIZE(mlx5e_stats_grps); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h index 610208aed767..5a489aa8fef3 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h @@ -102,9 +102,6 @@ struct mlx5e_vport_stats { __be64 query_vport_out[MLX5_ST_SZ_QW(query_vport_counter_out)]; }; -#define PPORT_802_3_OFF(c) \ - MLX5_BYTE_OFF(ppcnt_reg, \ - counter_set.eth_802_3_cntrs_grp_data_layout.c##_high) #define PPORT_802_3_GET(pstats, c) \ MLX5_GET64(ppcnt_reg, pstats->IEEE_802_3_counters, \ counter_set.eth_802_3_cntrs_grp_data_layout.c##_high) @@ -150,27 +147,6 @@ struct mlx5e_pport_stats { __be64 eth_ext_counters[MLX5_ST_SZ_QW(ppcnt_reg)]; }; -static const struct counter_desc pport_802_3_stats_desc[] = { - { "tx_packets_phy", PPORT_802_3_OFF(a_frames_transmitted_ok) }, - { "rx_packets_phy", PPORT_802_3_OFF(a_frames_received_ok) }, - { "rx_crc_errors_phy", PPORT_802_3_OFF(a_frame_check_sequence_errors) }, - { "tx_bytes_phy", PPORT_802_3_OFF(a_octets_transmitted_ok) }, - { "rx_bytes_phy", PPORT_802_3_OFF(a_octets_received_ok) }, - { "tx_multicast_phy", PPORT_802_3_OFF(a_multicast_frames_xmitted_ok) }, - { "tx_broadcast_phy", PPORT_802_3_OFF(a_broadcast_frames_xmitted_ok) }, - { "rx_multicast_phy", PPORT_802_3_OFF(a_multicast_frames_received_ok) }, - { "rx_broadcast_phy", PPORT_802_3_OFF(a_broadcast_frames_received_ok) }, - { "rx_in_range_len_errors_phy", PPORT_802_3_OFF(a_in_range_length_errors) }, - { "rx_out_of_range_len_phy", PPORT_802_3_OFF(a_out_of_range_length_field) }, - { "rx_oversize_pkts_phy", PPORT_802_3_OFF(a_frame_too_long_errors) }, - { "rx_symbol_err_phy", PPORT_802_3_OFF(a_symbol_error_during_carrier) }, - { "tx_mac_control_phy", PPORT_802_3_OFF(a_mac_control_frames_transmitted) }, - { "rx_mac_control_phy", PPORT_802_3_OFF(a_mac_control_frames_received) }, - { "rx_unsupported_op_phy", PPORT_802_3_OFF(a_unsupported_opcodes_received) }, - { "rx_pause_ctrl_phy", PPORT_802_3_OFF(a_pause_mac_ctrl_frames_received) }, - { "tx_pause_ctrl_phy", PPORT_802_3_OFF(a_pause_mac_ctrl_frames_transmitted) }, -}; - static const struct counter_desc pport_2863_stats_desc[] = { { "rx_discards_phy", PPORT_2863_OFF(if_in_discards) }, { "tx_discards_phy", PPORT_2863_OFF(if_out_discards) }, @@ -336,7 +312,6 @@ static const struct counter_desc sq_stats_desc[] = { { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, xmit_more) }, }; -#define NUM_PPORT_802_3_COUNTERS ARRAY_SIZE(pport_802_3_stats_desc) #define NUM_PPORT_2863_COUNTERS ARRAY_SIZE(pport_2863_stats_desc) #define NUM_PPORT_2819_COUNTERS ARRAY_SIZE(pport_2819_stats_desc) #define NUM_PPORT_PHY_STATISTICAL_COUNTERS(priv) \ @@ -358,8 +333,7 @@ static const struct counter_desc sq_stats_desc[] = { #define NUM_PPORT_ETH_EXT_COUNTERS(priv) \ (ARRAY_SIZE(pport_eth_ext_stats_desc) * \ MLX5_CAP_PCAM_FEATURE((priv)->mdev, rx_buffer_fullness_counters)) -#define NUM_PPORT_COUNTERS(priv) (NUM_PPORT_802_3_COUNTERS + \ - NUM_PPORT_2863_COUNTERS + \ +#define NUM_PPORT_COUNTERS(priv) (NUM_PPORT_2863_COUNTERS + \ NUM_PPORT_2819_COUNTERS + \ NUM_PPORT_PHY_STATISTICAL_COUNTERS(priv) + \ NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS * \ -- cgit v1.2.3 From fc8e64a3118ee13bc4cafa6a31ea74daf2d644d9 Mon Sep 17 00:00:00 2001 From: Kamal Heib Date: Wed, 23 Aug 2017 14:51:27 +0300 Subject: net/mlx5e: Switch RFC 2863 counters to use stats group API Switch the RFC 2863 counters to use the new stats group API. Signed-off-by: Kamal Heib Reviewed-by: Gal Pressman Signed-off-by: Saeed Mahameed --- .../net/ethernet/mellanox/mlx5/core/en_ethtool.c | 8 ----- drivers/net/ethernet/mellanox/mlx5/core/en_stats.c | 42 ++++++++++++++++++++++ drivers/net/ethernet/mellanox/mlx5/core/en_stats.h | 13 +------ 3 files changed, 43 insertions(+), 20 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c index 65e7c0a986ca..79ac92a2834f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c @@ -216,10 +216,6 @@ static void mlx5e_fill_stats_strings(struct mlx5e_priv *priv, u8 *data) for (i = 0; i < mlx5e_num_stats_grps; i++) idx = mlx5e_stats_grps[i].fill_strings(priv, data, idx); - for (i = 0; i < NUM_PPORT_2863_COUNTERS; i++) - strcpy(data + (idx++) * ETH_GSTRING_LEN, - pport_2863_stats_desc[i].format); - for (i = 0; i < NUM_PPORT_2819_COUNTERS; i++) strcpy(data + (idx++) * ETH_GSTRING_LEN, pport_2819_stats_desc[i].format); @@ -344,10 +340,6 @@ void mlx5e_ethtool_get_ethtool_stats(struct mlx5e_priv *priv, for (i = 0; i < mlx5e_num_stats_grps; i++) idx = mlx5e_stats_grps[i].fill_stats(priv, data, idx); - for (i = 0; i < NUM_PPORT_2863_COUNTERS; i++) - data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.RFC_2863_counters, - pport_2863_stats_desc, i); - for (i = 0; i < NUM_PPORT_2819_COUNTERS; i++) data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.RFC_2819_counters, pport_2819_stats_desc, i); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c index 1ce296e4c9f5..1b2fc0eb1b3f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c @@ -249,6 +249,43 @@ static int mlx5e_grp_802_3_fill_stats(struct mlx5e_priv *priv, u64 *data, return idx; } +#define PPORT_2863_OFF(c) \ + MLX5_BYTE_OFF(ppcnt_reg, \ + counter_set.eth_2863_cntrs_grp_data_layout.c##_high) +static const struct counter_desc pport_2863_stats_desc[] = { + { "rx_discards_phy", PPORT_2863_OFF(if_in_discards) }, + { "tx_discards_phy", PPORT_2863_OFF(if_out_discards) }, + { "tx_errors_phy", PPORT_2863_OFF(if_out_errors) }, +}; + +#define NUM_PPORT_2863_COUNTERS ARRAY_SIZE(pport_2863_stats_desc) + +static int mlx5e_grp_2863_get_num_stats(struct mlx5e_priv *priv) +{ + return NUM_PPORT_2863_COUNTERS; +} + +static int mlx5e_grp_2863_fill_strings(struct mlx5e_priv *priv, u8 *data, + int idx) +{ + int i; + + for (i = 0; i < NUM_PPORT_2863_COUNTERS; i++) + strcpy(data + (idx++) * ETH_GSTRING_LEN, pport_2863_stats_desc[i].format); + return idx; +} + +static int mlx5e_grp_2863_fill_stats(struct mlx5e_priv *priv, u64 *data, + int idx) +{ + int i; + + for (i = 0; i < NUM_PPORT_2863_COUNTERS; i++) + data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.RFC_2863_counters, + pport_2863_stats_desc, i); + return idx; +} + const struct mlx5e_stats_grp mlx5e_stats_grps[] = { { .get_num_stats = mlx5e_grp_sw_get_num_stats, @@ -270,6 +307,11 @@ const struct mlx5e_stats_grp mlx5e_stats_grps[] = { .fill_strings = mlx5e_grp_802_3_fill_strings, .fill_stats = mlx5e_grp_802_3_fill_stats, }, + { + .get_num_stats = mlx5e_grp_2863_get_num_stats, + .fill_strings = mlx5e_grp_2863_fill_strings, + .fill_stats = mlx5e_grp_2863_fill_stats, + }, }; const int mlx5e_num_stats_grps = ARRAY_SIZE(mlx5e_stats_grps); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h index 5a489aa8fef3..58dc22e5ac53 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h @@ -105,9 +105,6 @@ struct mlx5e_vport_stats { #define PPORT_802_3_GET(pstats, c) \ MLX5_GET64(ppcnt_reg, pstats->IEEE_802_3_counters, \ counter_set.eth_802_3_cntrs_grp_data_layout.c##_high) -#define PPORT_2863_OFF(c) \ - MLX5_BYTE_OFF(ppcnt_reg, \ - counter_set.eth_2863_cntrs_grp_data_layout.c##_high) #define PPORT_2863_GET(pstats, c) \ MLX5_GET64(ppcnt_reg, pstats->RFC_2863_counters, \ counter_set.eth_2863_cntrs_grp_data_layout.c##_high) @@ -147,12 +144,6 @@ struct mlx5e_pport_stats { __be64 eth_ext_counters[MLX5_ST_SZ_QW(ppcnt_reg)]; }; -static const struct counter_desc pport_2863_stats_desc[] = { - { "rx_discards_phy", PPORT_2863_OFF(if_in_discards) }, - { "tx_discards_phy", PPORT_2863_OFF(if_out_discards) }, - { "tx_errors_phy", PPORT_2863_OFF(if_out_errors) }, -}; - static const struct counter_desc pport_2819_stats_desc[] = { { "rx_undersize_pkts_phy", PPORT_2819_OFF(ether_stats_undersize_pkts) }, { "rx_fragments_phy", PPORT_2819_OFF(ether_stats_fragments) }, @@ -312,7 +303,6 @@ static const struct counter_desc sq_stats_desc[] = { { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, xmit_more) }, }; -#define NUM_PPORT_2863_COUNTERS ARRAY_SIZE(pport_2863_stats_desc) #define NUM_PPORT_2819_COUNTERS ARRAY_SIZE(pport_2819_stats_desc) #define NUM_PPORT_PHY_STATISTICAL_COUNTERS(priv) \ (ARRAY_SIZE(pport_phy_statistical_stats_desc) * \ @@ -333,8 +323,7 @@ static const struct counter_desc sq_stats_desc[] = { #define NUM_PPORT_ETH_EXT_COUNTERS(priv) \ (ARRAY_SIZE(pport_eth_ext_stats_desc) * \ MLX5_CAP_PCAM_FEATURE((priv)->mdev, rx_buffer_fullness_counters)) -#define NUM_PPORT_COUNTERS(priv) (NUM_PPORT_2863_COUNTERS + \ - NUM_PPORT_2819_COUNTERS + \ +#define NUM_PPORT_COUNTERS(priv) (NUM_PPORT_2819_COUNTERS + \ NUM_PPORT_PHY_STATISTICAL_COUNTERS(priv) + \ NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS * \ NUM_PPORT_PRIO + \ -- cgit v1.2.3 From e0e0def9e2a8c6320b5d8cb5b79b795a5a7df732 Mon Sep 17 00:00:00 2001 From: Kamal Heib Date: Wed, 23 Aug 2017 14:56:48 +0300 Subject: net/mlx5e: Switch RFC 2819 counters to use stats group API Switch the RFC 2819 counters to use the new stats group API. Signed-off-by: Kamal Heib Reviewed-by: Gal Pressman Signed-off-by: Saeed Mahameed --- .../net/ethernet/mellanox/mlx5/core/en_ethtool.c | 8 ---- drivers/net/ethernet/mellanox/mlx5/core/en_stats.c | 52 ++++++++++++++++++++++ drivers/net/ethernet/mellanox/mlx5/core/en_stats.h | 23 +--------- 3 files changed, 53 insertions(+), 30 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c index 79ac92a2834f..b5a9de1150f9 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c @@ -216,10 +216,6 @@ static void mlx5e_fill_stats_strings(struct mlx5e_priv *priv, u8 *data) for (i = 0; i < mlx5e_num_stats_grps; i++) idx = mlx5e_stats_grps[i].fill_strings(priv, data, idx); - for (i = 0; i < NUM_PPORT_2819_COUNTERS; i++) - strcpy(data + (idx++) * ETH_GSTRING_LEN, - pport_2819_stats_desc[i].format); - for (i = 0; i < NUM_PPORT_PHY_STATISTICAL_COUNTERS(priv); i++) strcpy(data + (idx++) * ETH_GSTRING_LEN, pport_phy_statistical_stats_desc[i].format); @@ -340,10 +336,6 @@ void mlx5e_ethtool_get_ethtool_stats(struct mlx5e_priv *priv, for (i = 0; i < mlx5e_num_stats_grps; i++) idx = mlx5e_stats_grps[i].fill_stats(priv, data, idx); - for (i = 0; i < NUM_PPORT_2819_COUNTERS; i++) - data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.RFC_2819_counters, - pport_2819_stats_desc, i); - for (i = 0; i < NUM_PPORT_PHY_STATISTICAL_COUNTERS(priv); i++) data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.phy_statistical_counters, pport_phy_statistical_stats_desc, i); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c index 1b2fc0eb1b3f..7cdcc60e913b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c @@ -286,6 +286,53 @@ static int mlx5e_grp_2863_fill_stats(struct mlx5e_priv *priv, u64 *data, return idx; } +#define PPORT_2819_OFF(c) \ + MLX5_BYTE_OFF(ppcnt_reg, \ + counter_set.eth_2819_cntrs_grp_data_layout.c##_high) +static const struct counter_desc pport_2819_stats_desc[] = { + { "rx_undersize_pkts_phy", PPORT_2819_OFF(ether_stats_undersize_pkts) }, + { "rx_fragments_phy", PPORT_2819_OFF(ether_stats_fragments) }, + { "rx_jabbers_phy", PPORT_2819_OFF(ether_stats_jabbers) }, + { "rx_64_bytes_phy", PPORT_2819_OFF(ether_stats_pkts64octets) }, + { "rx_65_to_127_bytes_phy", PPORT_2819_OFF(ether_stats_pkts65to127octets) }, + { "rx_128_to_255_bytes_phy", PPORT_2819_OFF(ether_stats_pkts128to255octets) }, + { "rx_256_to_511_bytes_phy", PPORT_2819_OFF(ether_stats_pkts256to511octets) }, + { "rx_512_to_1023_bytes_phy", PPORT_2819_OFF(ether_stats_pkts512to1023octets) }, + { "rx_1024_to_1518_bytes_phy", PPORT_2819_OFF(ether_stats_pkts1024to1518octets) }, + { "rx_1519_to_2047_bytes_phy", PPORT_2819_OFF(ether_stats_pkts1519to2047octets) }, + { "rx_2048_to_4095_bytes_phy", PPORT_2819_OFF(ether_stats_pkts2048to4095octets) }, + { "rx_4096_to_8191_bytes_phy", PPORT_2819_OFF(ether_stats_pkts4096to8191octets) }, + { "rx_8192_to_10239_bytes_phy", PPORT_2819_OFF(ether_stats_pkts8192to10239octets) }, +}; + +#define NUM_PPORT_2819_COUNTERS ARRAY_SIZE(pport_2819_stats_desc) + +static int mlx5e_grp_2819_get_num_stats(struct mlx5e_priv *priv) +{ + return NUM_PPORT_2819_COUNTERS; +} + +static int mlx5e_grp_2819_fill_strings(struct mlx5e_priv *priv, u8 *data, + int idx) +{ + int i; + + for (i = 0; i < NUM_PPORT_2819_COUNTERS; i++) + strcpy(data + (idx++) * ETH_GSTRING_LEN, pport_2819_stats_desc[i].format); + return idx; +} + +static int mlx5e_grp_2819_fill_stats(struct mlx5e_priv *priv, u64 *data, + int idx) +{ + int i; + + for (i = 0; i < NUM_PPORT_2819_COUNTERS; i++) + data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.RFC_2819_counters, + pport_2819_stats_desc, i); + return idx; +} + const struct mlx5e_stats_grp mlx5e_stats_grps[] = { { .get_num_stats = mlx5e_grp_sw_get_num_stats, @@ -312,6 +359,11 @@ const struct mlx5e_stats_grp mlx5e_stats_grps[] = { .fill_strings = mlx5e_grp_2863_fill_strings, .fill_stats = mlx5e_grp_2863_fill_stats, }, + { + .get_num_stats = mlx5e_grp_2819_get_num_stats, + .fill_strings = mlx5e_grp_2819_fill_strings, + .fill_stats = mlx5e_grp_2819_fill_stats, + }, }; const int mlx5e_num_stats_grps = ARRAY_SIZE(mlx5e_stats_grps); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h index 58dc22e5ac53..168fad3ab2aa 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h @@ -108,9 +108,6 @@ struct mlx5e_vport_stats { #define PPORT_2863_GET(pstats, c) \ MLX5_GET64(ppcnt_reg, pstats->RFC_2863_counters, \ counter_set.eth_2863_cntrs_grp_data_layout.c##_high) -#define PPORT_2819_OFF(c) \ - MLX5_BYTE_OFF(ppcnt_reg, \ - counter_set.eth_2819_cntrs_grp_data_layout.c##_high) #define PPORT_2819_GET(pstats, c) \ MLX5_GET64(ppcnt_reg, pstats->RFC_2819_counters, \ counter_set.eth_2819_cntrs_grp_data_layout.c##_high) @@ -144,22 +141,6 @@ struct mlx5e_pport_stats { __be64 eth_ext_counters[MLX5_ST_SZ_QW(ppcnt_reg)]; }; -static const struct counter_desc pport_2819_stats_desc[] = { - { "rx_undersize_pkts_phy", PPORT_2819_OFF(ether_stats_undersize_pkts) }, - { "rx_fragments_phy", PPORT_2819_OFF(ether_stats_fragments) }, - { "rx_jabbers_phy", PPORT_2819_OFF(ether_stats_jabbers) }, - { "rx_64_bytes_phy", PPORT_2819_OFF(ether_stats_pkts64octets) }, - { "rx_65_to_127_bytes_phy", PPORT_2819_OFF(ether_stats_pkts65to127octets) }, - { "rx_128_to_255_bytes_phy", PPORT_2819_OFF(ether_stats_pkts128to255octets) }, - { "rx_256_to_511_bytes_phy", PPORT_2819_OFF(ether_stats_pkts256to511octets) }, - { "rx_512_to_1023_bytes_phy", PPORT_2819_OFF(ether_stats_pkts512to1023octets) }, - { "rx_1024_to_1518_bytes_phy", PPORT_2819_OFF(ether_stats_pkts1024to1518octets) }, - { "rx_1519_to_2047_bytes_phy", PPORT_2819_OFF(ether_stats_pkts1519to2047octets) }, - { "rx_2048_to_4095_bytes_phy", PPORT_2819_OFF(ether_stats_pkts2048to4095octets) }, - { "rx_4096_to_8191_bytes_phy", PPORT_2819_OFF(ether_stats_pkts4096to8191octets) }, - { "rx_8192_to_10239_bytes_phy", PPORT_2819_OFF(ether_stats_pkts8192to10239octets) }, -}; - static const struct counter_desc pport_phy_statistical_stats_desc[] = { { "rx_pcs_symbol_err_phy", PPORT_PHY_STATISTICAL_OFF(phy_symbol_errors) }, { "rx_corrected_bits_phy", PPORT_PHY_STATISTICAL_OFF(phy_corrected_bits) }, @@ -303,7 +284,6 @@ static const struct counter_desc sq_stats_desc[] = { { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, xmit_more) }, }; -#define NUM_PPORT_2819_COUNTERS ARRAY_SIZE(pport_2819_stats_desc) #define NUM_PPORT_PHY_STATISTICAL_COUNTERS(priv) \ (ARRAY_SIZE(pport_phy_statistical_stats_desc) * \ MLX5_CAP_PCAM_FEATURE((priv)->mdev, ppcnt_statistical_group)) @@ -323,8 +303,7 @@ static const struct counter_desc sq_stats_desc[] = { #define NUM_PPORT_ETH_EXT_COUNTERS(priv) \ (ARRAY_SIZE(pport_eth_ext_stats_desc) * \ MLX5_CAP_PCAM_FEATURE((priv)->mdev, rx_buffer_fullness_counters)) -#define NUM_PPORT_COUNTERS(priv) (NUM_PPORT_2819_COUNTERS + \ - NUM_PPORT_PHY_STATISTICAL_COUNTERS(priv) + \ +#define NUM_PPORT_COUNTERS(priv) (NUM_PPORT_PHY_STATISTICAL_COUNTERS(priv) + \ NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS * \ NUM_PPORT_PRIO + \ NUM_PPORT_ETH_EXT_COUNTERS(priv)) -- cgit v1.2.3 From 2e4df0b2415744548f612f26a33c701bd2ce37d8 Mon Sep 17 00:00:00 2001 From: Kamal Heib Date: Wed, 23 Aug 2017 14:58:47 +0300 Subject: net/mlx5e: Switch physical statistical counters to use stats group API Switch the physical statistical counters to use the new stats group API. Signed-off-by: Kamal Heib Reviewed-by: Gal Pressman Signed-off-by: Saeed Mahameed --- .../net/ethernet/mellanox/mlx5/core/en_ethtool.c | 8 ---- drivers/net/ethernet/mellanox/mlx5/core/en_stats.c | 45 ++++++++++++++++++++++ drivers/net/ethernet/mellanox/mlx5/core/en_stats.h | 14 +------ 3 files changed, 46 insertions(+), 21 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c index b5a9de1150f9..547b7fa48637 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c @@ -216,10 +216,6 @@ static void mlx5e_fill_stats_strings(struct mlx5e_priv *priv, u8 *data) for (i = 0; i < mlx5e_num_stats_grps; i++) idx = mlx5e_stats_grps[i].fill_strings(priv, data, idx); - for (i = 0; i < NUM_PPORT_PHY_STATISTICAL_COUNTERS(priv); i++) - strcpy(data + (idx++) * ETH_GSTRING_LEN, - pport_phy_statistical_stats_desc[i].format); - for (i = 0; i < NUM_PPORT_ETH_EXT_COUNTERS(priv); i++) strcpy(data + (idx++) * ETH_GSTRING_LEN, pport_eth_ext_stats_desc[i].format); @@ -336,10 +332,6 @@ void mlx5e_ethtool_get_ethtool_stats(struct mlx5e_priv *priv, for (i = 0; i < mlx5e_num_stats_grps; i++) idx = mlx5e_stats_grps[i].fill_stats(priv, data, idx); - for (i = 0; i < NUM_PPORT_PHY_STATISTICAL_COUNTERS(priv); i++) - data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.phy_statistical_counters, - pport_phy_statistical_stats_desc, i); - for (i = 0; i < NUM_PPORT_ETH_EXT_COUNTERS(priv); i++) data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.eth_ext_counters, pport_eth_ext_stats_desc, i); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c index 7cdcc60e913b..3838f109ceb1 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c @@ -333,6 +333,46 @@ static int mlx5e_grp_2819_fill_stats(struct mlx5e_priv *priv, u64 *data, return idx; } +#define PPORT_PHY_STATISTICAL_OFF(c) \ + MLX5_BYTE_OFF(ppcnt_reg, \ + counter_set.phys_layer_statistical_cntrs.c##_high) +static const struct counter_desc pport_phy_statistical_stats_desc[] = { + { "rx_pcs_symbol_err_phy", PPORT_PHY_STATISTICAL_OFF(phy_symbol_errors) }, + { "rx_corrected_bits_phy", PPORT_PHY_STATISTICAL_OFF(phy_corrected_bits) }, +}; + +#define NUM_PPORT_PHY_COUNTERS ARRAY_SIZE(pport_phy_statistical_stats_desc) + +static int mlx5e_grp_phy_get_num_stats(struct mlx5e_priv *priv) +{ + return MLX5_CAP_PCAM_FEATURE((priv)->mdev, ppcnt_statistical_group) ? + NUM_PPORT_PHY_COUNTERS : 0; +} + +static int mlx5e_grp_phy_fill_strings(struct mlx5e_priv *priv, u8 *data, + int idx) +{ + int i; + + if (MLX5_CAP_PCAM_FEATURE((priv)->mdev, ppcnt_statistical_group)) + for (i = 0; i < NUM_PPORT_PHY_COUNTERS; i++) + strcpy(data + (idx++) * ETH_GSTRING_LEN, + pport_phy_statistical_stats_desc[i].format); + return idx; +} + +static int mlx5e_grp_phy_fill_stats(struct mlx5e_priv *priv, u64 *data, int idx) +{ + int i; + + if (MLX5_CAP_PCAM_FEATURE((priv)->mdev, ppcnt_statistical_group)) + for (i = 0; i < NUM_PPORT_PHY_COUNTERS; i++) + data[idx++] = + MLX5E_READ_CTR64_BE(&priv->stats.pport.phy_statistical_counters, + pport_phy_statistical_stats_desc, i); + return idx; +} + const struct mlx5e_stats_grp mlx5e_stats_grps[] = { { .get_num_stats = mlx5e_grp_sw_get_num_stats, @@ -364,6 +404,11 @@ const struct mlx5e_stats_grp mlx5e_stats_grps[] = { .fill_strings = mlx5e_grp_2819_fill_strings, .fill_stats = mlx5e_grp_2819_fill_stats, }, + { + .get_num_stats = mlx5e_grp_phy_get_num_stats, + .fill_strings = mlx5e_grp_phy_fill_strings, + .fill_stats = mlx5e_grp_phy_fill_stats, + }, }; const int mlx5e_num_stats_grps = ARRAY_SIZE(mlx5e_stats_grps); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h index 168fad3ab2aa..964a1c69cb51 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h @@ -111,9 +111,6 @@ struct mlx5e_vport_stats { #define PPORT_2819_GET(pstats, c) \ MLX5_GET64(ppcnt_reg, pstats->RFC_2819_counters, \ counter_set.eth_2819_cntrs_grp_data_layout.c##_high) -#define PPORT_PHY_STATISTICAL_OFF(c) \ - MLX5_BYTE_OFF(ppcnt_reg, \ - counter_set.phys_layer_statistical_cntrs.c##_high) #define PPORT_PHY_STATISTICAL_GET(pstats, c) \ MLX5_GET64(ppcnt_reg, (pstats)->phy_statistical_counters, \ counter_set.phys_layer_statistical_cntrs.c##_high) @@ -141,11 +138,6 @@ struct mlx5e_pport_stats { __be64 eth_ext_counters[MLX5_ST_SZ_QW(ppcnt_reg)]; }; -static const struct counter_desc pport_phy_statistical_stats_desc[] = { - { "rx_pcs_symbol_err_phy", PPORT_PHY_STATISTICAL_OFF(phy_symbol_errors) }, - { "rx_corrected_bits_phy", PPORT_PHY_STATISTICAL_OFF(phy_corrected_bits) }, -}; - static const struct counter_desc pport_per_prio_traffic_stats_desc[] = { { "rx_prio%d_bytes", PPORT_PER_PRIO_OFF(rx_octets) }, { "rx_prio%d_packets", PPORT_PER_PRIO_OFF(rx_frames) }, @@ -284,9 +276,6 @@ static const struct counter_desc sq_stats_desc[] = { { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, xmit_more) }, }; -#define NUM_PPORT_PHY_STATISTICAL_COUNTERS(priv) \ - (ARRAY_SIZE(pport_phy_statistical_stats_desc) * \ - MLX5_CAP_PCAM_FEATURE((priv)->mdev, ppcnt_statistical_group)) #define NUM_PCIE_PERF_COUNTERS(priv) \ (ARRAY_SIZE(pcie_perf_stats_desc) * \ MLX5_CAP_MCAM_FEATURE((priv)->mdev, pcie_performance_group)) @@ -303,8 +292,7 @@ static const struct counter_desc sq_stats_desc[] = { #define NUM_PPORT_ETH_EXT_COUNTERS(priv) \ (ARRAY_SIZE(pport_eth_ext_stats_desc) * \ MLX5_CAP_PCAM_FEATURE((priv)->mdev, rx_buffer_fullness_counters)) -#define NUM_PPORT_COUNTERS(priv) (NUM_PPORT_PHY_STATISTICAL_COUNTERS(priv) + \ - NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS * \ +#define NUM_PPORT_COUNTERS(priv) (NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS * \ NUM_PPORT_PRIO + \ NUM_PPORT_ETH_EXT_COUNTERS(priv)) #define NUM_PCIE_COUNTERS(priv) (NUM_PCIE_PERF_COUNTERS(priv) + \ -- cgit v1.2.3 From 3488bd4c3549ea805c48de522a8ede1edc902e4b Mon Sep 17 00:00:00 2001 From: Kamal Heib Date: Wed, 23 Aug 2017 15:00:40 +0300 Subject: net/mlx5e: Switch ethernet extended counters to use stats group API Switch the ethernet extended counters to use the new stats group API. Signed-off-by: Kamal Heib Reviewed-by: Gal Pressman Signed-off-by: Saeed Mahameed --- .../net/ethernet/mellanox/mlx5/core/en_ethtool.c | 8 ---- drivers/net/ethernet/mellanox/mlx5/core/en_stats.c | 47 ++++++++++++++++++++++ drivers/net/ethernet/mellanox/mlx5/core/en_stats.h | 13 +----- 3 files changed, 48 insertions(+), 20 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c index 547b7fa48637..2da0bb88fa39 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c @@ -216,10 +216,6 @@ static void mlx5e_fill_stats_strings(struct mlx5e_priv *priv, u8 *data) for (i = 0; i < mlx5e_num_stats_grps; i++) idx = mlx5e_stats_grps[i].fill_strings(priv, data, idx); - for (i = 0; i < NUM_PPORT_ETH_EXT_COUNTERS(priv); i++) - strcpy(data + (idx++) * ETH_GSTRING_LEN, - pport_eth_ext_stats_desc[i].format); - for (i = 0; i < NUM_PCIE_PERF_COUNTERS(priv); i++) strcpy(data + (idx++) * ETH_GSTRING_LEN, pcie_perf_stats_desc[i].format); @@ -332,10 +328,6 @@ void mlx5e_ethtool_get_ethtool_stats(struct mlx5e_priv *priv, for (i = 0; i < mlx5e_num_stats_grps; i++) idx = mlx5e_stats_grps[i].fill_stats(priv, data, idx); - for (i = 0; i < NUM_PPORT_ETH_EXT_COUNTERS(priv); i++) - data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.eth_ext_counters, - pport_eth_ext_stats_desc, i); - for (i = 0; i < NUM_PCIE_PERF_COUNTERS(priv); i++) data[idx++] = MLX5E_READ_CTR32_BE(&priv->stats.pcie.pcie_perf_counters, pcie_perf_stats_desc, i); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c index 3838f109ceb1..a4edfd8726f3 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c @@ -373,6 +373,48 @@ static int mlx5e_grp_phy_fill_stats(struct mlx5e_priv *priv, u64 *data, int idx) return idx; } +#define PPORT_ETH_EXT_OFF(c) \ + MLX5_BYTE_OFF(ppcnt_reg, \ + counter_set.eth_extended_cntrs_grp_data_layout.c##_high) +static const struct counter_desc pport_eth_ext_stats_desc[] = { + { "rx_buffer_passed_thres_phy", PPORT_ETH_EXT_OFF(rx_buffer_almost_full) }, +}; + +#define NUM_PPORT_ETH_EXT_COUNTERS ARRAY_SIZE(pport_eth_ext_stats_desc) + +static int mlx5e_grp_eth_ext_get_num_stats(struct mlx5e_priv *priv) +{ + if (MLX5_CAP_PCAM_FEATURE((priv)->mdev, rx_buffer_fullness_counters)) + return NUM_PPORT_ETH_EXT_COUNTERS; + + return 0; +} + +static int mlx5e_grp_eth_ext_fill_strings(struct mlx5e_priv *priv, u8 *data, + int idx) +{ + int i; + + if (MLX5_CAP_PCAM_FEATURE((priv)->mdev, rx_buffer_fullness_counters)) + for (i = 0; i < NUM_PPORT_ETH_EXT_COUNTERS; i++) + strcpy(data + (idx++) * ETH_GSTRING_LEN, + pport_eth_ext_stats_desc[i].format); + return idx; +} + +static int mlx5e_grp_eth_ext_fill_stats(struct mlx5e_priv *priv, u64 *data, + int idx) +{ + int i; + + if (MLX5_CAP_PCAM_FEATURE((priv)->mdev, rx_buffer_fullness_counters)) + for (i = 0; i < NUM_PPORT_ETH_EXT_COUNTERS; i++) + data[idx++] = + MLX5E_READ_CTR64_BE(&priv->stats.pport.eth_ext_counters, + pport_eth_ext_stats_desc, i); + return idx; +} + const struct mlx5e_stats_grp mlx5e_stats_grps[] = { { .get_num_stats = mlx5e_grp_sw_get_num_stats, @@ -409,6 +451,11 @@ const struct mlx5e_stats_grp mlx5e_stats_grps[] = { .fill_strings = mlx5e_grp_phy_fill_strings, .fill_stats = mlx5e_grp_phy_fill_stats, }, + { + .get_num_stats = mlx5e_grp_eth_ext_get_num_stats, + .fill_strings = mlx5e_grp_eth_ext_fill_strings, + .fill_stats = mlx5e_grp_eth_ext_fill_stats, + } }; const int mlx5e_num_stats_grps = ARRAY_SIZE(mlx5e_stats_grps); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h index 964a1c69cb51..c0e84394c3fc 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h @@ -121,9 +121,6 @@ struct mlx5e_vport_stats { MLX5_GET64(ppcnt_reg, pstats->per_prio_counters[prio], \ counter_set.eth_per_prio_grp_data_layout.c##_high) #define NUM_PPORT_PRIO 8 -#define PPORT_ETH_EXT_OFF(c) \ - MLX5_BYTE_OFF(ppcnt_reg, \ - counter_set.eth_extended_cntrs_grp_data_layout.c##_high) #define PPORT_ETH_EXT_GET(pstats, c) \ MLX5_GET64(ppcnt_reg, (pstats)->eth_ext_counters, \ counter_set.eth_extended_cntrs_grp_data_layout.c##_high) @@ -154,10 +151,6 @@ static const struct counter_desc pport_per_prio_pfc_stats_desc[] = { { "rx_%s_pause_transition", PPORT_PER_PRIO_OFF(rx_pause_transition) }, }; -static const struct counter_desc pport_eth_ext_stats_desc[] = { - { "rx_buffer_passed_thres_phy", PPORT_ETH_EXT_OFF(rx_buffer_almost_full) }, -}; - #define PCIE_PERF_OFF(c) \ MLX5_BYTE_OFF(mpcnt_reg, counter_set.pcie_perf_cntrs_grp_data_layout.c) #define PCIE_PERF_GET(pcie_stats, c) \ @@ -289,12 +282,8 @@ static const struct counter_desc sq_stats_desc[] = { ARRAY_SIZE(pport_per_prio_traffic_stats_desc) #define NUM_PPORT_PER_PRIO_PFC_COUNTERS \ ARRAY_SIZE(pport_per_prio_pfc_stats_desc) -#define NUM_PPORT_ETH_EXT_COUNTERS(priv) \ - (ARRAY_SIZE(pport_eth_ext_stats_desc) * \ - MLX5_CAP_PCAM_FEATURE((priv)->mdev, rx_buffer_fullness_counters)) #define NUM_PPORT_COUNTERS(priv) (NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS * \ - NUM_PPORT_PRIO + \ - NUM_PPORT_ETH_EXT_COUNTERS(priv)) + NUM_PPORT_PRIO) #define NUM_PCIE_COUNTERS(priv) (NUM_PCIE_PERF_COUNTERS(priv) + \ NUM_PCIE_PERF_COUNTERS64(priv) +\ NUM_PCIE_PERF_STALL_COUNTERS(priv)) -- cgit v1.2.3 From 9fd2b5f137f5f723c03f5018acb822dcebfba8f0 Mon Sep 17 00:00:00 2001 From: Kamal Heib Date: Wed, 23 Aug 2017 15:01:58 +0300 Subject: net/mlx5e: Switch pcie counters to use stats group API Switch the pcie counters to use the new stats group API. Signed-off-by: Kamal Heib Reviewed-by: Gal Pressman Signed-off-by: Saeed Mahameed --- .../net/ethernet/mellanox/mlx5/core/en_ethtool.c | 25 ------ drivers/net/ethernet/mellanox/mlx5/core/en_stats.c | 94 +++++++++++++++++++++- drivers/net/ethernet/mellanox/mlx5/core/en_stats.h | 32 -------- 3 files changed, 93 insertions(+), 58 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c index 2da0bb88fa39..8515ae815cbc 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c @@ -183,7 +183,6 @@ int mlx5e_ethtool_get_sset_count(struct mlx5e_priv *priv, int sset) num_stats += mlx5e_stats_grps[i].get_num_stats(priv); return num_stats + NUM_PPORT_COUNTERS(priv) + - NUM_PCIE_COUNTERS(priv) + MLX5E_NUM_RQ_STATS(priv) + MLX5E_NUM_SQ_STATS(priv) + MLX5E_NUM_PFC_COUNTERS(priv) + @@ -216,18 +215,6 @@ static void mlx5e_fill_stats_strings(struct mlx5e_priv *priv, u8 *data) for (i = 0; i < mlx5e_num_stats_grps; i++) idx = mlx5e_stats_grps[i].fill_strings(priv, data, idx); - for (i = 0; i < NUM_PCIE_PERF_COUNTERS(priv); i++) - strcpy(data + (idx++) * ETH_GSTRING_LEN, - pcie_perf_stats_desc[i].format); - - for (i = 0; i < NUM_PCIE_PERF_COUNTERS64(priv); i++) - strcpy(data + (idx++) * ETH_GSTRING_LEN, - pcie_perf_stats_desc64[i].format); - - for (i = 0; i < NUM_PCIE_PERF_STALL_COUNTERS(priv); i++) - strcpy(data + (idx++) * ETH_GSTRING_LEN, - pcie_perf_stall_stats_desc[i].format); - for (prio = 0; prio < NUM_PPORT_PRIO; prio++) { for (i = 0; i < NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS; i++) sprintf(data + (idx++) * ETH_GSTRING_LEN, @@ -328,18 +315,6 @@ void mlx5e_ethtool_get_ethtool_stats(struct mlx5e_priv *priv, for (i = 0; i < mlx5e_num_stats_grps; i++) idx = mlx5e_stats_grps[i].fill_stats(priv, data, idx); - for (i = 0; i < NUM_PCIE_PERF_COUNTERS(priv); i++) - data[idx++] = MLX5E_READ_CTR32_BE(&priv->stats.pcie.pcie_perf_counters, - pcie_perf_stats_desc, i); - - for (i = 0; i < NUM_PCIE_PERF_COUNTERS64(priv); i++) - data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pcie.pcie_perf_counters, - pcie_perf_stats_desc64, i); - - for (i = 0; i < NUM_PCIE_PERF_STALL_COUNTERS(priv); i++) - data[idx++] = MLX5E_READ_CTR32_BE(&priv->stats.pcie.pcie_perf_counters, - pcie_perf_stall_stats_desc, i); - for (prio = 0; prio < NUM_PPORT_PRIO; prio++) { for (i = 0; i < NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS; i++) data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.per_prio_counters[prio], diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c index a4edfd8726f3..700362a00fd0 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c @@ -415,6 +415,93 @@ static int mlx5e_grp_eth_ext_fill_stats(struct mlx5e_priv *priv, u64 *data, return idx; } +#define PCIE_PERF_OFF(c) \ + MLX5_BYTE_OFF(mpcnt_reg, counter_set.pcie_perf_cntrs_grp_data_layout.c) +static const struct counter_desc pcie_perf_stats_desc[] = { + { "rx_pci_signal_integrity", PCIE_PERF_OFF(rx_errors) }, + { "tx_pci_signal_integrity", PCIE_PERF_OFF(tx_errors) }, +}; + +#define PCIE_PERF_OFF64(c) \ + MLX5_BYTE_OFF(mpcnt_reg, counter_set.pcie_perf_cntrs_grp_data_layout.c##_high) +static const struct counter_desc pcie_perf_stats_desc64[] = { + { "outbound_pci_buffer_overflow", PCIE_PERF_OFF64(tx_overflow_buffer_pkt) }, +}; + +static const struct counter_desc pcie_perf_stall_stats_desc[] = { + { "outbound_pci_stalled_rd", PCIE_PERF_OFF(outbound_stalled_reads) }, + { "outbound_pci_stalled_wr", PCIE_PERF_OFF(outbound_stalled_writes) }, + { "outbound_pci_stalled_rd_events", PCIE_PERF_OFF(outbound_stalled_reads_events) }, + { "outbound_pci_stalled_wr_events", PCIE_PERF_OFF(outbound_stalled_writes_events) }, +}; + +#define NUM_PCIE_PERF_COUNTERS ARRAY_SIZE(pcie_perf_stats_desc) +#define NUM_PCIE_PERF_COUNTERS64 ARRAY_SIZE(pcie_perf_stats_desc64) +#define NUM_PCIE_PERF_STALL_COUNTERS ARRAY_SIZE(pcie_perf_stall_stats_desc) + +static int mlx5e_grp_pcie_get_num_stats(struct mlx5e_priv *priv) +{ + int num_stats = 0; + + if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, pcie_performance_group)) + num_stats += NUM_PCIE_PERF_COUNTERS; + + if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, tx_overflow_buffer_pkt)) + num_stats += NUM_PCIE_PERF_COUNTERS64; + + if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, pcie_outbound_stalled)) + num_stats += NUM_PCIE_PERF_STALL_COUNTERS; + + return num_stats; +} + +static int mlx5e_grp_pcie_fill_strings(struct mlx5e_priv *priv, u8 *data, + int idx) +{ + int i; + + if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, pcie_performance_group)) + for (i = 0; i < NUM_PCIE_PERF_COUNTERS; i++) + strcpy(data + (idx++) * ETH_GSTRING_LEN, + pcie_perf_stats_desc[i].format); + + if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, tx_overflow_buffer_pkt)) + for (i = 0; i < NUM_PCIE_PERF_COUNTERS64; i++) + strcpy(data + (idx++) * ETH_GSTRING_LEN, + pcie_perf_stats_desc64[i].format); + + if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, pcie_outbound_stalled)) + for (i = 0; i < NUM_PCIE_PERF_STALL_COUNTERS; i++) + strcpy(data + (idx++) * ETH_GSTRING_LEN, + pcie_perf_stall_stats_desc[i].format); + return idx; +} + +static int mlx5e_grp_pcie_fill_stats(struct mlx5e_priv *priv, u64 *data, + int idx) +{ + int i; + + if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, pcie_performance_group)) + for (i = 0; i < NUM_PCIE_PERF_COUNTERS; i++) + data[idx++] = + MLX5E_READ_CTR32_BE(&priv->stats.pcie.pcie_perf_counters, + pcie_perf_stats_desc, i); + + if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, tx_overflow_buffer_pkt)) + for (i = 0; i < NUM_PCIE_PERF_COUNTERS64; i++) + data[idx++] = + MLX5E_READ_CTR64_BE(&priv->stats.pcie.pcie_perf_counters, + pcie_perf_stats_desc64, i); + + if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, pcie_outbound_stalled)) + for (i = 0; i < NUM_PCIE_PERF_STALL_COUNTERS; i++) + data[idx++] = + MLX5E_READ_CTR32_BE(&priv->stats.pcie.pcie_perf_counters, + pcie_perf_stall_stats_desc, i); + return idx; +} + const struct mlx5e_stats_grp mlx5e_stats_grps[] = { { .get_num_stats = mlx5e_grp_sw_get_num_stats, @@ -455,7 +542,12 @@ const struct mlx5e_stats_grp mlx5e_stats_grps[] = { .get_num_stats = mlx5e_grp_eth_ext_get_num_stats, .fill_strings = mlx5e_grp_eth_ext_fill_strings, .fill_stats = mlx5e_grp_eth_ext_fill_stats, - } + }, + { + .get_num_stats = mlx5e_grp_pcie_get_num_stats, + .fill_strings = mlx5e_grp_pcie_fill_strings, + .fill_stats = mlx5e_grp_pcie_fill_stats, + }, }; const int mlx5e_num_stats_grps = ARRAY_SIZE(mlx5e_stats_grps); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h index c0e84394c3fc..8afff1981b25 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h @@ -151,14 +151,10 @@ static const struct counter_desc pport_per_prio_pfc_stats_desc[] = { { "rx_%s_pause_transition", PPORT_PER_PRIO_OFF(rx_pause_transition) }, }; -#define PCIE_PERF_OFF(c) \ - MLX5_BYTE_OFF(mpcnt_reg, counter_set.pcie_perf_cntrs_grp_data_layout.c) #define PCIE_PERF_GET(pcie_stats, c) \ MLX5_GET(mpcnt_reg, (pcie_stats)->pcie_perf_counters, \ counter_set.pcie_perf_cntrs_grp_data_layout.c) -#define PCIE_PERF_OFF64(c) \ - MLX5_BYTE_OFF(mpcnt_reg, counter_set.pcie_perf_cntrs_grp_data_layout.c##_high) #define PCIE_PERF_GET64(pcie_stats, c) \ MLX5_GET64(mpcnt_reg, (pcie_stats)->pcie_perf_counters, \ counter_set.pcie_perf_cntrs_grp_data_layout.c##_high) @@ -167,22 +163,6 @@ struct mlx5e_pcie_stats { __be64 pcie_perf_counters[MLX5_ST_SZ_QW(mpcnt_reg)]; }; -static const struct counter_desc pcie_perf_stats_desc[] = { - { "rx_pci_signal_integrity", PCIE_PERF_OFF(rx_errors) }, - { "tx_pci_signal_integrity", PCIE_PERF_OFF(tx_errors) }, -}; - -static const struct counter_desc pcie_perf_stats_desc64[] = { - { "outbound_pci_buffer_overflow", PCIE_PERF_OFF64(tx_overflow_buffer_pkt) }, -}; - -static const struct counter_desc pcie_perf_stall_stats_desc[] = { - { "outbound_pci_stalled_rd", PCIE_PERF_OFF(outbound_stalled_reads) }, - { "outbound_pci_stalled_wr", PCIE_PERF_OFF(outbound_stalled_writes) }, - { "outbound_pci_stalled_rd_events", PCIE_PERF_OFF(outbound_stalled_reads_events) }, - { "outbound_pci_stalled_wr_events", PCIE_PERF_OFF(outbound_stalled_writes_events) }, -}; - struct mlx5e_rq_stats { u64 packets; u64 bytes; @@ -269,24 +249,12 @@ static const struct counter_desc sq_stats_desc[] = { { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, xmit_more) }, }; -#define NUM_PCIE_PERF_COUNTERS(priv) \ - (ARRAY_SIZE(pcie_perf_stats_desc) * \ - MLX5_CAP_MCAM_FEATURE((priv)->mdev, pcie_performance_group)) -#define NUM_PCIE_PERF_COUNTERS64(priv) \ - (ARRAY_SIZE(pcie_perf_stats_desc64) * \ - MLX5_CAP_MCAM_FEATURE((priv)->mdev, tx_overflow_buffer_pkt)) -#define NUM_PCIE_PERF_STALL_COUNTERS(priv) \ - (ARRAY_SIZE(pcie_perf_stall_stats_desc) * \ - MLX5_CAP_MCAM_FEATURE((priv)->mdev, pcie_outbound_stalled)) #define NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS \ ARRAY_SIZE(pport_per_prio_traffic_stats_desc) #define NUM_PPORT_PER_PRIO_PFC_COUNTERS \ ARRAY_SIZE(pport_per_prio_pfc_stats_desc) #define NUM_PPORT_COUNTERS(priv) (NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS * \ NUM_PPORT_PRIO) -#define NUM_PCIE_COUNTERS(priv) (NUM_PCIE_PERF_COUNTERS(priv) + \ - NUM_PCIE_PERF_COUNTERS64(priv) +\ - NUM_PCIE_PERF_STALL_COUNTERS(priv)) #define NUM_RQ_STATS ARRAY_SIZE(rq_stats_desc) #define NUM_SQ_STATS ARRAY_SIZE(sq_stats_desc) -- cgit v1.2.3 From e6000651cf009280fedee6cbf951747a0beaffb4 Mon Sep 17 00:00:00 2001 From: Kamal Heib Date: Wed, 23 Aug 2017 15:03:03 +0300 Subject: net/mlx5e: Switch per prio traffic counters to use stats group API Switch the per prio traffic counters to use the new stats group API. Signed-off-by: Kamal Heib Reviewed-by: Gal Pressman Signed-off-by: Saeed Mahameed --- .../net/ethernet/mellanox/mlx5/core/en_ethtool.c | 13 ------ drivers/net/ethernet/mellanox/mlx5/core/en_stats.c | 50 ++++++++++++++++++++++ drivers/net/ethernet/mellanox/mlx5/core/en_stats.h | 11 ----- 3 files changed, 50 insertions(+), 24 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c index 8515ae815cbc..b17460e14b29 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c @@ -182,7 +182,6 @@ int mlx5e_ethtool_get_sset_count(struct mlx5e_priv *priv, int sset) for (i = 0; i < mlx5e_num_stats_grps; i++) num_stats += mlx5e_stats_grps[i].get_num_stats(priv); return num_stats + - NUM_PPORT_COUNTERS(priv) + MLX5E_NUM_RQ_STATS(priv) + MLX5E_NUM_SQ_STATS(priv) + MLX5E_NUM_PFC_COUNTERS(priv) + @@ -215,12 +214,6 @@ static void mlx5e_fill_stats_strings(struct mlx5e_priv *priv, u8 *data) for (i = 0; i < mlx5e_num_stats_grps; i++) idx = mlx5e_stats_grps[i].fill_strings(priv, data, idx); - for (prio = 0; prio < NUM_PPORT_PRIO; prio++) { - for (i = 0; i < NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS; i++) - sprintf(data + (idx++) * ETH_GSTRING_LEN, - pport_per_prio_traffic_stats_desc[i].format, prio); - } - pfc_combined = mlx5e_query_pfc_combined(priv); for_each_set_bit(prio, &pfc_combined, NUM_PPORT_PRIO) { for (i = 0; i < NUM_PPORT_PER_PRIO_PFC_COUNTERS; i++) { @@ -315,12 +308,6 @@ void mlx5e_ethtool_get_ethtool_stats(struct mlx5e_priv *priv, for (i = 0; i < mlx5e_num_stats_grps; i++) idx = mlx5e_stats_grps[i].fill_stats(priv, data, idx); - for (prio = 0; prio < NUM_PPORT_PRIO; prio++) { - for (i = 0; i < NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS; i++) - data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.per_prio_counters[prio], - pport_per_prio_traffic_stats_desc, i); - } - pfc_combined = mlx5e_query_pfc_combined(priv); for_each_set_bit(prio, &pfc_combined, NUM_PPORT_PRIO) { for (i = 0; i < NUM_PPORT_PER_PRIO_PFC_COUNTERS; i++) { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c index 700362a00fd0..5d00d38f9bd0 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c @@ -502,6 +502,51 @@ static int mlx5e_grp_pcie_fill_stats(struct mlx5e_priv *priv, u64 *data, return idx; } +static const struct counter_desc pport_per_prio_traffic_stats_desc[] = { + { "rx_prio%d_bytes", PPORT_PER_PRIO_OFF(rx_octets) }, + { "rx_prio%d_packets", PPORT_PER_PRIO_OFF(rx_frames) }, + { "tx_prio%d_bytes", PPORT_PER_PRIO_OFF(tx_octets) }, + { "tx_prio%d_packets", PPORT_PER_PRIO_OFF(tx_frames) }, +}; + +#define NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS ARRAY_SIZE(pport_per_prio_traffic_stats_desc) + +static int mlx5e_grp_per_prio_traffic_get_num_stats(struct mlx5e_priv *priv) +{ + return NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS * NUM_PPORT_PRIO; +} + +static int mlx5e_grp_per_prio_traffic_fill_strings(struct mlx5e_priv *priv, + u8 *data, + int idx) +{ + int i, prio; + + for (prio = 0; prio < NUM_PPORT_PRIO; prio++) { + for (i = 0; i < NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS; i++) + sprintf(data + (idx++) * ETH_GSTRING_LEN, + pport_per_prio_traffic_stats_desc[i].format, prio); + } + + return idx; +} + +static int mlx5e_grp_per_prio_traffic_fill_stats(struct mlx5e_priv *priv, + u64 *data, + int idx) +{ + int i, prio; + + for (prio = 0; prio < NUM_PPORT_PRIO; prio++) { + for (i = 0; i < NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS; i++) + data[idx++] = + MLX5E_READ_CTR64_BE(&priv->stats.pport.per_prio_counters[prio], + pport_per_prio_traffic_stats_desc, i); + } + + return idx; +} + const struct mlx5e_stats_grp mlx5e_stats_grps[] = { { .get_num_stats = mlx5e_grp_sw_get_num_stats, @@ -548,6 +593,11 @@ const struct mlx5e_stats_grp mlx5e_stats_grps[] = { .fill_strings = mlx5e_grp_pcie_fill_strings, .fill_stats = mlx5e_grp_pcie_fill_stats, }, + { + .get_num_stats = mlx5e_grp_per_prio_traffic_get_num_stats, + .fill_strings = mlx5e_grp_per_prio_traffic_fill_strings, + .fill_stats = mlx5e_grp_per_prio_traffic_fill_stats, + }, }; const int mlx5e_num_stats_grps = ARRAY_SIZE(mlx5e_stats_grps); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h index 8afff1981b25..8b5b622306a9 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h @@ -135,13 +135,6 @@ struct mlx5e_pport_stats { __be64 eth_ext_counters[MLX5_ST_SZ_QW(ppcnt_reg)]; }; -static const struct counter_desc pport_per_prio_traffic_stats_desc[] = { - { "rx_prio%d_bytes", PPORT_PER_PRIO_OFF(rx_octets) }, - { "rx_prio%d_packets", PPORT_PER_PRIO_OFF(rx_frames) }, - { "tx_prio%d_bytes", PPORT_PER_PRIO_OFF(tx_octets) }, - { "tx_prio%d_packets", PPORT_PER_PRIO_OFF(tx_frames) }, -}; - static const struct counter_desc pport_per_prio_pfc_stats_desc[] = { /* %s is "global" or "prio{i}" */ { "rx_%s_pause", PPORT_PER_PRIO_OFF(rx_pause) }, @@ -249,12 +242,8 @@ static const struct counter_desc sq_stats_desc[] = { { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, xmit_more) }, }; -#define NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS \ - ARRAY_SIZE(pport_per_prio_traffic_stats_desc) #define NUM_PPORT_PER_PRIO_PFC_COUNTERS \ ARRAY_SIZE(pport_per_prio_pfc_stats_desc) -#define NUM_PPORT_COUNTERS(priv) (NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS * \ - NUM_PPORT_PRIO) #define NUM_RQ_STATS ARRAY_SIZE(rq_stats_desc) #define NUM_SQ_STATS ARRAY_SIZE(sq_stats_desc) -- cgit v1.2.3 From 4377bea27696f45834d77b8e3c7206874d5a66e6 Mon Sep 17 00:00:00 2001 From: Kamal Heib Date: Wed, 23 Aug 2017 15:04:47 +0300 Subject: net/mlx5e: Switch per prio pfc counters to use stats group API Switch the per prio pfc counters to use the new stats group API. Signed-off-by: Kamal Heib Reviewed-by: Gal Pressman Signed-off-by: Saeed Mahameed --- .../net/ethernet/mellanox/mlx5/core/en_ethtool.c | 73 +------------- drivers/net/ethernet/mellanox/mlx5/core/en_stats.c | 111 +++++++++++++++++++++ drivers/net/ethernet/mellanox/mlx5/core/en_stats.h | 14 --- 3 files changed, 113 insertions(+), 85 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c index b17460e14b29..6680b24bf1c7 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c @@ -136,42 +136,9 @@ void mlx5e_build_ptys2ethtool_map(void) ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT); } -static unsigned long mlx5e_query_pfc_combined(struct mlx5e_priv *priv) -{ - struct mlx5_core_dev *mdev = priv->mdev; - u8 pfc_en_tx; - u8 pfc_en_rx; - int err; - - if (MLX5_CAP_GEN(mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH) - return 0; - - err = mlx5_query_port_pfc(mdev, &pfc_en_tx, &pfc_en_rx); - - return err ? 0 : pfc_en_tx | pfc_en_rx; -} - -static bool mlx5e_query_global_pause_combined(struct mlx5e_priv *priv) -{ - struct mlx5_core_dev *mdev = priv->mdev; - u32 rx_pause; - u32 tx_pause; - int err; - - if (MLX5_CAP_GEN(mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH) - return false; - - err = mlx5_query_port_pause(mdev, &rx_pause, &tx_pause); - - return err ? false : rx_pause | tx_pause; -} - #define MLX5E_NUM_RQ_STATS(priv) (NUM_RQ_STATS * (priv)->channels.num) #define MLX5E_NUM_SQ_STATS(priv) \ (NUM_SQ_STATS * (priv)->channels.num * (priv)->channels.params.num_tc) -#define MLX5E_NUM_PFC_COUNTERS(priv) \ - ((mlx5e_query_global_pause_combined(priv) + hweight8(mlx5e_query_pfc_combined(priv))) * \ - NUM_PPORT_PER_PRIO_PFC_COUNTERS) int mlx5e_ethtool_get_sset_count(struct mlx5e_priv *priv, int sset) { @@ -184,7 +151,6 @@ int mlx5e_ethtool_get_sset_count(struct mlx5e_priv *priv, int sset) return num_stats + MLX5E_NUM_RQ_STATS(priv) + MLX5E_NUM_SQ_STATS(priv) + - MLX5E_NUM_PFC_COUNTERS(priv) + ARRAY_SIZE(mlx5e_pme_status_desc) + ARRAY_SIZE(mlx5e_pme_error_desc) + mlx5e_ipsec_get_count(priv); @@ -208,30 +174,11 @@ static int mlx5e_get_sset_count(struct net_device *dev, int sset) static void mlx5e_fill_stats_strings(struct mlx5e_priv *priv, u8 *data) { - int i, j, tc, prio, idx = 0; - unsigned long pfc_combined; + int i, j, tc, idx = 0; for (i = 0; i < mlx5e_num_stats_grps; i++) idx = mlx5e_stats_grps[i].fill_strings(priv, data, idx); - pfc_combined = mlx5e_query_pfc_combined(priv); - for_each_set_bit(prio, &pfc_combined, NUM_PPORT_PRIO) { - for (i = 0; i < NUM_PPORT_PER_PRIO_PFC_COUNTERS; i++) { - char pfc_string[ETH_GSTRING_LEN]; - - snprintf(pfc_string, sizeof(pfc_string), "prio%d", prio); - sprintf(data + (idx++) * ETH_GSTRING_LEN, - pport_per_prio_pfc_stats_desc[i].format, pfc_string); - } - } - - if (mlx5e_query_global_pause_combined(priv)) { - for (i = 0; i < NUM_PPORT_PER_PRIO_PFC_COUNTERS; i++) { - sprintf(data + (idx++) * ETH_GSTRING_LEN, - pport_per_prio_pfc_stats_desc[i].format, "global"); - } - } - /* port module event counters */ for (i = 0; i < ARRAY_SIZE(mlx5e_pme_status_desc); i++) strcpy(data + (idx++) * ETH_GSTRING_LEN, mlx5e_pme_status_desc[i].format); @@ -293,8 +240,7 @@ void mlx5e_ethtool_get_ethtool_stats(struct mlx5e_priv *priv, { struct mlx5e_channels *channels; struct mlx5_priv *mlx5_priv; - int i, j, tc, prio, idx = 0; - unsigned long pfc_combined; + int i, j, tc, idx = 0; if (!data) return; @@ -308,21 +254,6 @@ void mlx5e_ethtool_get_ethtool_stats(struct mlx5e_priv *priv, for (i = 0; i < mlx5e_num_stats_grps; i++) idx = mlx5e_stats_grps[i].fill_stats(priv, data, idx); - pfc_combined = mlx5e_query_pfc_combined(priv); - for_each_set_bit(prio, &pfc_combined, NUM_PPORT_PRIO) { - for (i = 0; i < NUM_PPORT_PER_PRIO_PFC_COUNTERS; i++) { - data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.per_prio_counters[prio], - pport_per_prio_pfc_stats_desc, i); - } - } - - if (mlx5e_query_global_pause_combined(priv)) { - for (i = 0; i < NUM_PPORT_PER_PRIO_PFC_COUNTERS; i++) { - data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.per_prio_counters[0], - pport_per_prio_pfc_stats_desc, i); - } - } - /* port module event counters */ mlx5_priv = &priv->mdev->priv; for (i = 0; i < ARRAY_SIZE(mlx5e_pme_status_desc); i++) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c index 5d00d38f9bd0..c9f3be940934 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c @@ -502,6 +502,9 @@ static int mlx5e_grp_pcie_fill_stats(struct mlx5e_priv *priv, u64 *data, return idx; } +#define PPORT_PER_PRIO_OFF(c) \ + MLX5_BYTE_OFF(ppcnt_reg, \ + counter_set.eth_per_prio_grp_data_layout.c##_high) static const struct counter_desc pport_per_prio_traffic_stats_desc[] = { { "rx_prio%d_bytes", PPORT_PER_PRIO_OFF(rx_octets) }, { "rx_prio%d_packets", PPORT_PER_PRIO_OFF(rx_frames) }, @@ -547,6 +550,109 @@ static int mlx5e_grp_per_prio_traffic_fill_stats(struct mlx5e_priv *priv, return idx; } +static const struct counter_desc pport_per_prio_pfc_stats_desc[] = { + /* %s is "global" or "prio{i}" */ + { "rx_%s_pause", PPORT_PER_PRIO_OFF(rx_pause) }, + { "rx_%s_pause_duration", PPORT_PER_PRIO_OFF(rx_pause_duration) }, + { "tx_%s_pause", PPORT_PER_PRIO_OFF(tx_pause) }, + { "tx_%s_pause_duration", PPORT_PER_PRIO_OFF(tx_pause_duration) }, + { "rx_%s_pause_transition", PPORT_PER_PRIO_OFF(rx_pause_transition) }, +}; + +#define NUM_PPORT_PER_PRIO_PFC_COUNTERS ARRAY_SIZE(pport_per_prio_pfc_stats_desc) + +static unsigned long mlx5e_query_pfc_combined(struct mlx5e_priv *priv) +{ + struct mlx5_core_dev *mdev = priv->mdev; + u8 pfc_en_tx; + u8 pfc_en_rx; + int err; + + if (MLX5_CAP_GEN(mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH) + return 0; + + err = mlx5_query_port_pfc(mdev, &pfc_en_tx, &pfc_en_rx); + + return err ? 0 : pfc_en_tx | pfc_en_rx; +} + +static bool mlx5e_query_global_pause_combined(struct mlx5e_priv *priv) +{ + struct mlx5_core_dev *mdev = priv->mdev; + u32 rx_pause; + u32 tx_pause; + int err; + + if (MLX5_CAP_GEN(mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH) + return false; + + err = mlx5_query_port_pause(mdev, &rx_pause, &tx_pause); + + return err ? false : rx_pause | tx_pause; +} + +static int mlx5e_grp_per_prio_pfc_get_num_stats(struct mlx5e_priv *priv) +{ + return (mlx5e_query_global_pause_combined(priv) + + hweight8(mlx5e_query_pfc_combined(priv))) * + NUM_PPORT_PER_PRIO_PFC_COUNTERS; +} + +static int mlx5e_grp_per_prio_pfc_fill_strings(struct mlx5e_priv *priv, + u8 *data, + int idx) +{ + unsigned long pfc_combined; + int i, prio; + + pfc_combined = mlx5e_query_pfc_combined(priv); + for_each_set_bit(prio, &pfc_combined, NUM_PPORT_PRIO) { + for (i = 0; i < NUM_PPORT_PER_PRIO_PFC_COUNTERS; i++) { + char pfc_string[ETH_GSTRING_LEN]; + + snprintf(pfc_string, sizeof(pfc_string), "prio%d", prio); + sprintf(data + (idx++) * ETH_GSTRING_LEN, + pport_per_prio_pfc_stats_desc[i].format, pfc_string); + } + } + + if (mlx5e_query_global_pause_combined(priv)) { + for (i = 0; i < NUM_PPORT_PER_PRIO_PFC_COUNTERS; i++) { + sprintf(data + (idx++) * ETH_GSTRING_LEN, + pport_per_prio_pfc_stats_desc[i].format, "global"); + } + } + + return idx; +} + +static int mlx5e_grp_per_prio_pfc_fill_stats(struct mlx5e_priv *priv, + u64 *data, + int idx) +{ + unsigned long pfc_combined; + int i, prio; + + pfc_combined = mlx5e_query_pfc_combined(priv); + for_each_set_bit(prio, &pfc_combined, NUM_PPORT_PRIO) { + for (i = 0; i < NUM_PPORT_PER_PRIO_PFC_COUNTERS; i++) { + data[idx++] = + MLX5E_READ_CTR64_BE(&priv->stats.pport.per_prio_counters[prio], + pport_per_prio_pfc_stats_desc, i); + } + } + + if (mlx5e_query_global_pause_combined(priv)) { + for (i = 0; i < NUM_PPORT_PER_PRIO_PFC_COUNTERS; i++) { + data[idx++] = + MLX5E_READ_CTR64_BE(&priv->stats.pport.per_prio_counters[0], + pport_per_prio_pfc_stats_desc, i); + } + } + + return idx; +} + const struct mlx5e_stats_grp mlx5e_stats_grps[] = { { .get_num_stats = mlx5e_grp_sw_get_num_stats, @@ -598,6 +704,11 @@ const struct mlx5e_stats_grp mlx5e_stats_grps[] = { .fill_strings = mlx5e_grp_per_prio_traffic_fill_strings, .fill_stats = mlx5e_grp_per_prio_traffic_fill_stats, }, + { + .get_num_stats = mlx5e_grp_per_prio_pfc_get_num_stats, + .fill_strings = mlx5e_grp_per_prio_pfc_fill_strings, + .fill_stats = mlx5e_grp_per_prio_pfc_fill_stats, + }, }; const int mlx5e_num_stats_grps = ARRAY_SIZE(mlx5e_stats_grps); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h index 8b5b622306a9..8e92cd80c638 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h @@ -114,9 +114,6 @@ struct mlx5e_vport_stats { #define PPORT_PHY_STATISTICAL_GET(pstats, c) \ MLX5_GET64(ppcnt_reg, (pstats)->phy_statistical_counters, \ counter_set.phys_layer_statistical_cntrs.c##_high) -#define PPORT_PER_PRIO_OFF(c) \ - MLX5_BYTE_OFF(ppcnt_reg, \ - counter_set.eth_per_prio_grp_data_layout.c##_high) #define PPORT_PER_PRIO_GET(pstats, prio, c) \ MLX5_GET64(ppcnt_reg, pstats->per_prio_counters[prio], \ counter_set.eth_per_prio_grp_data_layout.c##_high) @@ -135,15 +132,6 @@ struct mlx5e_pport_stats { __be64 eth_ext_counters[MLX5_ST_SZ_QW(ppcnt_reg)]; }; -static const struct counter_desc pport_per_prio_pfc_stats_desc[] = { - /* %s is "global" or "prio{i}" */ - { "rx_%s_pause", PPORT_PER_PRIO_OFF(rx_pause) }, - { "rx_%s_pause_duration", PPORT_PER_PRIO_OFF(rx_pause_duration) }, - { "tx_%s_pause", PPORT_PER_PRIO_OFF(tx_pause) }, - { "tx_%s_pause_duration", PPORT_PER_PRIO_OFF(tx_pause_duration) }, - { "rx_%s_pause_transition", PPORT_PER_PRIO_OFF(rx_pause_transition) }, -}; - #define PCIE_PERF_GET(pcie_stats, c) \ MLX5_GET(mpcnt_reg, (pcie_stats)->pcie_perf_counters, \ counter_set.pcie_perf_cntrs_grp_data_layout.c) @@ -242,8 +230,6 @@ static const struct counter_desc sq_stats_desc[] = { { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, xmit_more) }, }; -#define NUM_PPORT_PER_PRIO_PFC_COUNTERS \ - ARRAY_SIZE(pport_per_prio_pfc_stats_desc) #define NUM_RQ_STATS ARRAY_SIZE(rq_stats_desc) #define NUM_SQ_STATS ARRAY_SIZE(sq_stats_desc) -- cgit v1.2.3 From 0e6f01a49d9c006e3ee3104ca5d4ccf722e154db Mon Sep 17 00:00:00 2001 From: Kamal Heib Date: Wed, 23 Aug 2017 15:05:58 +0300 Subject: net/mlx5e: Switch pme counters to use stats group API Switch the pme counters to use the new stats group API. Signed-off-by: Kamal Heib Reviewed-by: Gal Pressman Signed-off-by: Saeed Mahameed --- .../net/ethernet/mellanox/mlx5/core/en_ethtool.c | 20 -------- drivers/net/ethernet/mellanox/mlx5/core/en_stats.c | 54 ++++++++++++++++++++++ drivers/net/ethernet/mellanox/mlx5/core/en_stats.h | 10 ---- 3 files changed, 54 insertions(+), 30 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c index 6680b24bf1c7..6de948819034 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c @@ -151,8 +151,6 @@ int mlx5e_ethtool_get_sset_count(struct mlx5e_priv *priv, int sset) return num_stats + MLX5E_NUM_RQ_STATS(priv) + MLX5E_NUM_SQ_STATS(priv) + - ARRAY_SIZE(mlx5e_pme_status_desc) + - ARRAY_SIZE(mlx5e_pme_error_desc) + mlx5e_ipsec_get_count(priv); case ETH_SS_PRIV_FLAGS: @@ -179,13 +177,6 @@ static void mlx5e_fill_stats_strings(struct mlx5e_priv *priv, u8 *data) for (i = 0; i < mlx5e_num_stats_grps; i++) idx = mlx5e_stats_grps[i].fill_strings(priv, data, idx); - /* port module event counters */ - for (i = 0; i < ARRAY_SIZE(mlx5e_pme_status_desc); i++) - strcpy(data + (idx++) * ETH_GSTRING_LEN, mlx5e_pme_status_desc[i].format); - - for (i = 0; i < ARRAY_SIZE(mlx5e_pme_error_desc); i++) - strcpy(data + (idx++) * ETH_GSTRING_LEN, mlx5e_pme_error_desc[i].format); - /* IPSec counters */ idx += mlx5e_ipsec_get_strings(priv, data + idx * ETH_GSTRING_LEN); @@ -239,7 +230,6 @@ void mlx5e_ethtool_get_ethtool_stats(struct mlx5e_priv *priv, struct ethtool_stats *stats, u64 *data) { struct mlx5e_channels *channels; - struct mlx5_priv *mlx5_priv; int i, j, tc, idx = 0; if (!data) @@ -254,16 +244,6 @@ void mlx5e_ethtool_get_ethtool_stats(struct mlx5e_priv *priv, for (i = 0; i < mlx5e_num_stats_grps; i++) idx = mlx5e_stats_grps[i].fill_stats(priv, data, idx); - /* port module event counters */ - mlx5_priv = &priv->mdev->priv; - for (i = 0; i < ARRAY_SIZE(mlx5e_pme_status_desc); i++) - data[idx++] = MLX5E_READ_CTR64_CPU(mlx5_priv->pme_stats.status_counters, - mlx5e_pme_status_desc, i); - - for (i = 0; i < ARRAY_SIZE(mlx5e_pme_error_desc); i++) - data[idx++] = MLX5E_READ_CTR64_CPU(mlx5_priv->pme_stats.error_counters, - mlx5e_pme_error_desc, i); - /* IPSec counters */ idx += mlx5e_ipsec_get_stats(priv, data + idx); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c index c9f3be940934..b120957ea940 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c @@ -653,6 +653,55 @@ static int mlx5e_grp_per_prio_pfc_fill_stats(struct mlx5e_priv *priv, return idx; } +static const struct counter_desc mlx5e_pme_status_desc[] = { + { "module_unplug", 8 }, +}; + +static const struct counter_desc mlx5e_pme_error_desc[] = { + { "module_bus_stuck", 16 }, /* bus stuck (I2C or data shorted) */ + { "module_high_temp", 48 }, /* high temperature */ + { "module_bad_shorted", 56 }, /* bad or shorted cable/module */ +}; + +#define NUM_PME_STATUS_STATS ARRAY_SIZE(mlx5e_pme_status_desc) +#define NUM_PME_ERR_STATS ARRAY_SIZE(mlx5e_pme_error_desc) + +static int mlx5e_grp_pme_get_num_stats(struct mlx5e_priv *priv) +{ + return NUM_PME_STATUS_STATS + NUM_PME_ERR_STATS; +} + +static int mlx5e_grp_pme_fill_strings(struct mlx5e_priv *priv, u8 *data, + int idx) +{ + int i; + + for (i = 0; i < NUM_PME_STATUS_STATS; i++) + strcpy(data + (idx++) * ETH_GSTRING_LEN, mlx5e_pme_status_desc[i].format); + + for (i = 0; i < NUM_PME_ERR_STATS; i++) + strcpy(data + (idx++) * ETH_GSTRING_LEN, mlx5e_pme_error_desc[i].format); + + return idx; +} + +static int mlx5e_grp_pme_fill_stats(struct mlx5e_priv *priv, u64 *data, + int idx) +{ + struct mlx5_priv *mlx5_priv = &priv->mdev->priv; + int i; + + for (i = 0; i < NUM_PME_STATUS_STATS; i++) + data[idx++] = MLX5E_READ_CTR64_CPU(mlx5_priv->pme_stats.status_counters, + mlx5e_pme_status_desc, i); + + for (i = 0; i < NUM_PME_ERR_STATS; i++) + data[idx++] = MLX5E_READ_CTR64_CPU(mlx5_priv->pme_stats.error_counters, + mlx5e_pme_error_desc, i); + + return idx; +} + const struct mlx5e_stats_grp mlx5e_stats_grps[] = { { .get_num_stats = mlx5e_grp_sw_get_num_stats, @@ -709,6 +758,11 @@ const struct mlx5e_stats_grp mlx5e_stats_grps[] = { .fill_strings = mlx5e_grp_per_prio_pfc_fill_strings, .fill_stats = mlx5e_grp_per_prio_pfc_fill_stats, }, + { + .get_num_stats = mlx5e_grp_pme_get_num_stats, + .fill_strings = mlx5e_grp_pme_fill_strings, + .fill_stats = mlx5e_grp_pme_fill_stats, + }, }; const int mlx5e_num_stats_grps = ARRAY_SIZE(mlx5e_stats_grps); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h index 8e92cd80c638..800d72a22d91 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h @@ -252,14 +252,4 @@ struct mlx5e_stats_grp { extern const struct mlx5e_stats_grp mlx5e_stats_grps[]; extern const int mlx5e_num_stats_grps; -static const struct counter_desc mlx5e_pme_status_desc[] = { - { "module_unplug", 8 }, -}; - -static const struct counter_desc mlx5e_pme_error_desc[] = { - { "module_bus_stuck", 16 }, /* bus stuck (I2C or data shorted) */ - { "module_high_temp", 48 }, /* high temperature */ - { "module_bad_shorted", 56 }, /* bad or shorted cable/module */ -}; - #endif /* __MLX5_EN_STATS_H__ */ -- cgit v1.2.3 From e185d43f59ccde68dff474f1f2b38b62f915d74c Mon Sep 17 00:00:00 2001 From: Kamal Heib Date: Wed, 23 Aug 2017 15:07:20 +0300 Subject: net/mlx5e: Switch ipsec counters to use stats group API Switch the ipsec counters to use the new stats group API. Signed-off-by: Kamal Heib Reviewed-by: Gal Pressman Signed-off-by: Saeed Mahameed --- .../net/ethernet/mellanox/mlx5/core/en_ethtool.c | 10 +-------- drivers/net/ethernet/mellanox/mlx5/core/en_stats.c | 24 ++++++++++++++++++++++ 2 files changed, 25 insertions(+), 9 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c index 6de948819034..ff21348b7623 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c @@ -31,7 +31,6 @@ */ #include "en.h" -#include "en_accel/ipsec.h" void mlx5e_ethtool_get_drvinfo(struct mlx5e_priv *priv, struct ethtool_drvinfo *drvinfo) @@ -150,8 +149,7 @@ int mlx5e_ethtool_get_sset_count(struct mlx5e_priv *priv, int sset) num_stats += mlx5e_stats_grps[i].get_num_stats(priv); return num_stats + MLX5E_NUM_RQ_STATS(priv) + - MLX5E_NUM_SQ_STATS(priv) + - mlx5e_ipsec_get_count(priv); + MLX5E_NUM_SQ_STATS(priv); case ETH_SS_PRIV_FLAGS: return ARRAY_SIZE(mlx5e_priv_flags); @@ -177,9 +175,6 @@ static void mlx5e_fill_stats_strings(struct mlx5e_priv *priv, u8 *data) for (i = 0; i < mlx5e_num_stats_grps; i++) idx = mlx5e_stats_grps[i].fill_strings(priv, data, idx); - /* IPSec counters */ - idx += mlx5e_ipsec_get_strings(priv, data + idx * ETH_GSTRING_LEN); - if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) return; @@ -244,9 +239,6 @@ void mlx5e_ethtool_get_ethtool_stats(struct mlx5e_priv *priv, for (i = 0; i < mlx5e_num_stats_grps; i++) idx = mlx5e_stats_grps[i].fill_stats(priv, data, idx); - /* IPSec counters */ - idx += mlx5e_ipsec_get_stats(priv, data + idx); - if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) return; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c index b120957ea940..930a8224e013 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c @@ -31,6 +31,7 @@ */ #include "en.h" +#include "en_accel/ipsec.h" static const struct counter_desc sw_stats_desc[] = { { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_packets) }, @@ -702,6 +703,24 @@ static int mlx5e_grp_pme_fill_stats(struct mlx5e_priv *priv, u64 *data, return idx; } +static int mlx5e_grp_ipsec_get_num_stats(struct mlx5e_priv *priv) +{ + return mlx5e_ipsec_get_count(priv); +} + +static int mlx5e_grp_ipsec_fill_strings(struct mlx5e_priv *priv, u8 *data, + int idx) +{ + return idx + mlx5e_ipsec_get_strings(priv, + data + idx * ETH_GSTRING_LEN); +} + +static int mlx5e_grp_ipsec_fill_stats(struct mlx5e_priv *priv, u64 *data, + int idx) +{ + return idx + mlx5e_ipsec_get_stats(priv, data + idx); +} + const struct mlx5e_stats_grp mlx5e_stats_grps[] = { { .get_num_stats = mlx5e_grp_sw_get_num_stats, @@ -763,6 +782,11 @@ const struct mlx5e_stats_grp mlx5e_stats_grps[] = { .fill_strings = mlx5e_grp_pme_fill_strings, .fill_stats = mlx5e_grp_pme_fill_stats, }, + { + .get_num_stats = mlx5e_grp_ipsec_get_num_stats, + .fill_strings = mlx5e_grp_ipsec_fill_strings, + .fill_stats = mlx5e_grp_ipsec_fill_stats, + }, }; const int mlx5e_num_stats_grps = ARRAY_SIZE(mlx5e_stats_grps); -- cgit v1.2.3 From 1fe850062c9ee15a3bea1ae90aef386a492a1c5e Mon Sep 17 00:00:00 2001 From: Kamal Heib Date: Wed, 23 Aug 2017 15:08:19 +0300 Subject: net/mlx5e: Switch channels counters to use stats group API Switch the channels counters to use the new stats group API. Signed-off-by: Kamal Heib Reviewed-by: Gal Pressman Signed-off-by: Saeed Mahameed --- .../net/ethernet/mellanox/mlx5/core/en_ethtool.c | 47 +--------- drivers/net/ethernet/mellanox/mlx5/core/en_stats.c | 103 +++++++++++++++++++++ drivers/net/ethernet/mellanox/mlx5/core/en_stats.h | 45 --------- 3 files changed, 106 insertions(+), 89 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c index ff21348b7623..b34aa8efb036 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c @@ -135,10 +135,6 @@ void mlx5e_build_ptys2ethtool_map(void) ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT); } -#define MLX5E_NUM_RQ_STATS(priv) (NUM_RQ_STATS * (priv)->channels.num) -#define MLX5E_NUM_SQ_STATS(priv) \ - (NUM_SQ_STATS * (priv)->channels.num * (priv)->channels.params.num_tc) - int mlx5e_ethtool_get_sset_count(struct mlx5e_priv *priv, int sset) { int i, num_stats = 0; @@ -147,10 +143,7 @@ int mlx5e_ethtool_get_sset_count(struct mlx5e_priv *priv, int sset) case ETH_SS_STATS: for (i = 0; i < mlx5e_num_stats_grps; i++) num_stats += mlx5e_stats_grps[i].get_num_stats(priv); - return num_stats + - MLX5E_NUM_RQ_STATS(priv) + - MLX5E_NUM_SQ_STATS(priv); - + return num_stats; case ETH_SS_PRIV_FLAGS: return ARRAY_SIZE(mlx5e_priv_flags); case ETH_SS_TEST: @@ -170,26 +163,10 @@ static int mlx5e_get_sset_count(struct net_device *dev, int sset) static void mlx5e_fill_stats_strings(struct mlx5e_priv *priv, u8 *data) { - int i, j, tc, idx = 0; + int i, idx = 0; for (i = 0; i < mlx5e_num_stats_grps; i++) idx = mlx5e_stats_grps[i].fill_strings(priv, data, idx); - - if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) - return; - - /* per channel counters */ - for (i = 0; i < priv->channels.num; i++) - for (j = 0; j < NUM_RQ_STATS; j++) - sprintf(data + (idx++) * ETH_GSTRING_LEN, - rq_stats_desc[j].format, i); - - for (tc = 0; tc < priv->channels.params.num_tc; tc++) - for (i = 0; i < priv->channels.num; i++) - for (j = 0; j < NUM_SQ_STATS; j++) - sprintf(data + (idx++) * ETH_GSTRING_LEN, - sq_stats_desc[j].format, - priv->channel_tc2txq[i][tc]); } void mlx5e_ethtool_get_strings(struct mlx5e_priv *priv, u32 stringset, u8 *data) @@ -224,8 +201,7 @@ static void mlx5e_get_strings(struct net_device *dev, u32 stringset, u8 *data) void mlx5e_ethtool_get_ethtool_stats(struct mlx5e_priv *priv, struct ethtool_stats *stats, u64 *data) { - struct mlx5e_channels *channels; - int i, j, tc, idx = 0; + int i, idx = 0; if (!data) return; @@ -233,27 +209,10 @@ void mlx5e_ethtool_get_ethtool_stats(struct mlx5e_priv *priv, mutex_lock(&priv->state_lock); if (test_bit(MLX5E_STATE_OPENED, &priv->state)) mlx5e_update_stats(priv, true); - channels = &priv->channels; mutex_unlock(&priv->state_lock); for (i = 0; i < mlx5e_num_stats_grps; i++) idx = mlx5e_stats_grps[i].fill_stats(priv, data, idx); - - if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) - return; - - /* per channel counters */ - for (i = 0; i < channels->num; i++) - for (j = 0; j < NUM_RQ_STATS; j++) - data[idx++] = - MLX5E_READ_CTR64_CPU(&channels->c[i]->rq.stats, - rq_stats_desc, j); - - for (tc = 0; tc < priv->channels.params.num_tc; tc++) - for (i = 0; i < channels->num; i++) - for (j = 0; j < NUM_SQ_STATS; j++) - data[idx++] = MLX5E_READ_CTR64_CPU(&channels->c[i]->sq[tc].stats, - sq_stats_desc, j); } static void mlx5e_get_ethtool_stats(struct net_device *dev, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c index 930a8224e013..8bc30484ecc1 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c @@ -721,6 +721,104 @@ static int mlx5e_grp_ipsec_fill_stats(struct mlx5e_priv *priv, u64 *data, return idx + mlx5e_ipsec_get_stats(priv, data + idx); } +static const struct counter_desc rq_stats_desc[] = { + { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, packets) }, + { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, bytes) }, + { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_complete) }, + { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_unnecessary) }, + { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_unnecessary_inner) }, + { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_none) }, + { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, xdp_drop) }, + { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, xdp_tx) }, + { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, xdp_tx_full) }, + { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, lro_packets) }, + { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, lro_bytes) }, + { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, wqe_err) }, + { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, mpwqe_filler) }, + { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, buff_alloc_err) }, + { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cqe_compress_blks) }, + { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cqe_compress_pkts) }, + { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, page_reuse) }, + { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_reuse) }, + { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_full) }, + { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_empty) }, + { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_busy) }, + { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_waive) }, +}; + +static const struct counter_desc sq_stats_desc[] = { + { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, packets) }, + { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, bytes) }, + { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_packets) }, + { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_bytes) }, + { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_inner_packets) }, + { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_inner_bytes) }, + { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, csum_partial) }, + { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, csum_partial_inner) }, + { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, nop) }, + { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, csum_none) }, + { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, stopped) }, + { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, wake) }, + { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, dropped) }, + { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, xmit_more) }, +}; + +#define NUM_RQ_STATS ARRAY_SIZE(rq_stats_desc) +#define NUM_SQ_STATS ARRAY_SIZE(sq_stats_desc) + +static int mlx5e_grp_channels_get_num_stats(struct mlx5e_priv *priv) +{ + return (NUM_RQ_STATS * priv->channels.num) + + (NUM_SQ_STATS * priv->channels.num * priv->channels.params.num_tc); +} + +static int mlx5e_grp_channels_fill_strings(struct mlx5e_priv *priv, u8 *data, + int idx) +{ + int i, j, tc; + + if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) + return idx; + + for (i = 0; i < priv->channels.num; i++) + for (j = 0; j < NUM_RQ_STATS; j++) + sprintf(data + (idx++) * ETH_GSTRING_LEN, rq_stats_desc[j].format, i); + + for (tc = 0; tc < priv->channels.params.num_tc; tc++) + for (i = 0; i < priv->channels.num; i++) + for (j = 0; j < NUM_SQ_STATS; j++) + sprintf(data + (idx++) * ETH_GSTRING_LEN, + sq_stats_desc[j].format, + priv->channel_tc2txq[i][tc]); + + return idx; +} + +static int mlx5e_grp_channels_fill_stats(struct mlx5e_priv *priv, u64 *data, + int idx) +{ + struct mlx5e_channels *channels = &priv->channels; + int i, j, tc; + + if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) + return idx; + + for (i = 0; i < channels->num; i++) + for (j = 0; j < NUM_RQ_STATS; j++) + data[idx++] = + MLX5E_READ_CTR64_CPU(&channels->c[i]->rq.stats, + rq_stats_desc, j); + + for (tc = 0; tc < priv->channels.params.num_tc; tc++) + for (i = 0; i < channels->num; i++) + for (j = 0; j < NUM_SQ_STATS; j++) + data[idx++] = + MLX5E_READ_CTR64_CPU(&channels->c[i]->sq[tc].stats, + sq_stats_desc, j); + + return idx; +} + const struct mlx5e_stats_grp mlx5e_stats_grps[] = { { .get_num_stats = mlx5e_grp_sw_get_num_stats, @@ -787,6 +885,11 @@ const struct mlx5e_stats_grp mlx5e_stats_grps[] = { .fill_strings = mlx5e_grp_ipsec_fill_strings, .fill_stats = mlx5e_grp_ipsec_fill_stats, }, + { + .get_num_stats = mlx5e_grp_channels_get_num_stats, + .fill_strings = mlx5e_grp_channels_fill_strings, + .fill_stats = mlx5e_grp_channels_fill_stats, + } }; const int mlx5e_num_stats_grps = ARRAY_SIZE(mlx5e_stats_grps); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h index 800d72a22d91..d094663edd9b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h @@ -169,31 +169,6 @@ struct mlx5e_rq_stats { u64 cache_waive; }; -static const struct counter_desc rq_stats_desc[] = { - { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, packets) }, - { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, bytes) }, - { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_complete) }, - { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_unnecessary) }, - { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_unnecessary_inner) }, - { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_none) }, - { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, xdp_drop) }, - { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, xdp_tx) }, - { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, xdp_tx_full) }, - { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, lro_packets) }, - { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, lro_bytes) }, - { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, wqe_err) }, - { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, mpwqe_filler) }, - { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, buff_alloc_err) }, - { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cqe_compress_blks) }, - { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cqe_compress_pkts) }, - { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, page_reuse) }, - { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_reuse) }, - { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_full) }, - { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_empty) }, - { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_busy) }, - { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_waive) }, -}; - struct mlx5e_sq_stats { /* commonly accessed in data path */ u64 packets; @@ -213,26 +188,6 @@ struct mlx5e_sq_stats { u64 dropped; }; -static const struct counter_desc sq_stats_desc[] = { - { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, packets) }, - { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, bytes) }, - { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_packets) }, - { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_bytes) }, - { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_inner_packets) }, - { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_inner_bytes) }, - { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, csum_partial) }, - { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, csum_partial_inner) }, - { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, nop) }, - { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, csum_none) }, - { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, stopped) }, - { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, wake) }, - { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, dropped) }, - { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, xmit_more) }, -}; - -#define NUM_RQ_STATS ARRAY_SIZE(rq_stats_desc) -#define NUM_SQ_STATS ARRAY_SIZE(sq_stats_desc) - struct mlx5e_stats { struct mlx5e_sw_stats sw; struct mlx5e_qcounter_stats qcnt; -- cgit v1.2.3 From 1f233f327913f3dee0602cba9c64df1903772b55 Mon Sep 17 00:00:00 2001 From: Vijaya Mohan Guvva Date: Tue, 31 Oct 2017 16:04:53 -0700 Subject: liquidio: switchdev support for LiquidIO NIC Enable switchdev for SRIOV capable LiquidIO NIC. It registers a representor netdev (with switchdev_ops) for each SRIOV VF created. It also has changes to send representor interface configurations like admin state and MTU to LiquidIO firmware and to retrieve HW counted VF stats for VF representor. Signed-off-by: Vijaya Mohan Guvva Signed-off-by: Satanand Burla Signed-off-by: Raghu Vatsavayi Signed-off-by: Felix Manlunas Signed-off-by: David S. Miller --- drivers/net/ethernet/cavium/liquidio/Makefile | 2 +- drivers/net/ethernet/cavium/liquidio/lio_main.c | 25 + drivers/net/ethernet/cavium/liquidio/lio_vf_rep.c | 621 +++++++++++++++++++++ drivers/net/ethernet/cavium/liquidio/lio_vf_rep.h | 47 ++ .../net/ethernet/cavium/liquidio/liquidio_common.h | 49 ++ .../net/ethernet/cavium/liquidio/octeon_device.h | 7 + 6 files changed, 750 insertions(+), 1 deletion(-) create mode 100644 drivers/net/ethernet/cavium/liquidio/lio_vf_rep.c create mode 100644 drivers/net/ethernet/cavium/liquidio/lio_vf_rep.h (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/cavium/liquidio/Makefile b/drivers/net/ethernet/cavium/liquidio/Makefile index c4d411d1aa28..cad4fe1ffe55 100644 --- a/drivers/net/ethernet/cavium/liquidio/Makefile +++ b/drivers/net/ethernet/cavium/liquidio/Makefile @@ -17,7 +17,7 @@ liquidio-$(CONFIG_LIQUIDIO) += lio_ethtool.o \ octeon_droq.o \ octeon_nic.o -liquidio-objs := lio_main.o octeon_console.o $(liquidio-y) +liquidio-objs := lio_main.o octeon_console.o lio_vf_rep.o $(liquidio-y) obj-$(CONFIG_LIQUIDIO_VF) += liquidio_vf.o diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c index 8ea24d68e824..80784122e6e9 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_main.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c @@ -21,6 +21,7 @@ #include #include #include +#include #include "liquidio_common.h" #include "octeon_droq.h" #include "octeon_iq.h" @@ -3309,6 +3310,29 @@ static int liquidio_set_vf_link_state(struct net_device *netdev, int vfidx, return 0; } +static int +lio_pf_switchdev_attr_get(struct net_device *dev, struct switchdev_attr *attr) +{ + struct lio *lio = GET_LIO(dev); + + switch (attr->id) { + case SWITCHDEV_ATTR_ID_PORT_PARENT_ID: + attr->u.ppid.id_len = ETH_ALEN; + ether_addr_copy(attr->u.ppid.id, + (void *)&lio->linfo.hw_addr + 2); + break; + + default: + return -EOPNOTSUPP; + } + + return 0; +} + +static const struct switchdev_ops lio_pf_switchdev_ops = { + .switchdev_port_attr_get = lio_pf_switchdev_attr_get, +}; + static const struct net_device_ops lionetdevops = { .ndo_open = liquidio_open, .ndo_stop = liquidio_stop, @@ -3583,6 +3607,7 @@ static int setup_nic_devices(struct octeon_device *octeon_dev) * netdev tasks. */ netdev->netdev_ops = &lionetdevops; + SWITCHDEV_SET_OPS(netdev, &lio_pf_switchdev_ops); lio = GET_LIO(netdev); diff --git a/drivers/net/ethernet/cavium/liquidio/lio_vf_rep.c b/drivers/net/ethernet/cavium/liquidio/lio_vf_rep.c new file mode 100644 index 000000000000..67ff7a143e9e --- /dev/null +++ b/drivers/net/ethernet/cavium/liquidio/lio_vf_rep.c @@ -0,0 +1,621 @@ +/********************************************************************** + * Author: Cavium, Inc. + * + * Contact: support@cavium.com + * Please include "LiquidIO" in the subject. + * + * Copyright (c) 2003-2017 Cavium, Inc. + * + * This file is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, Version 2, as + * published by the Free Software Foundation. + * + * This file is distributed in the hope that it will be useful, but + * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or + * NONINFRINGEMENT. See the GNU General Public License for more details. + ***********************************************************************/ +#include +#include +#include "liquidio_common.h" +#include "octeon_droq.h" +#include "octeon_iq.h" +#include "response_manager.h" +#include "octeon_device.h" +#include "octeon_nic.h" +#include "octeon_main.h" +#include "octeon_network.h" +#include +#include "lio_vf_rep.h" +#include "octeon_network.h" + +static int lio_vf_rep_open(struct net_device *ndev); +static int lio_vf_rep_stop(struct net_device *ndev); +static int lio_vf_rep_pkt_xmit(struct sk_buff *skb, struct net_device *ndev); +static void lio_vf_rep_tx_timeout(struct net_device *netdev); +static int lio_vf_rep_phys_port_name(struct net_device *dev, + char *buf, size_t len); +static void lio_vf_rep_get_stats64(struct net_device *dev, + struct rtnl_link_stats64 *stats64); +static int lio_vf_rep_change_mtu(struct net_device *ndev, int new_mtu); + +static const struct net_device_ops lio_vf_rep_ndev_ops = { + .ndo_open = lio_vf_rep_open, + .ndo_stop = lio_vf_rep_stop, + .ndo_start_xmit = lio_vf_rep_pkt_xmit, + .ndo_tx_timeout = lio_vf_rep_tx_timeout, + .ndo_get_phys_port_name = lio_vf_rep_phys_port_name, + .ndo_get_stats64 = lio_vf_rep_get_stats64, + .ndo_change_mtu = lio_vf_rep_change_mtu, +}; + +static void +lio_vf_rep_send_sc_complete(struct octeon_device *oct, + u32 status, void *ptr) +{ + struct octeon_soft_command *sc = (struct octeon_soft_command *)ptr; + struct lio_vf_rep_sc_ctx *ctx = + (struct lio_vf_rep_sc_ctx *)sc->ctxptr; + struct lio_vf_rep_resp *resp = + (struct lio_vf_rep_resp *)sc->virtrptr; + + if (status != OCTEON_REQUEST_TIMEOUT && READ_ONCE(resp->status)) + WRITE_ONCE(resp->status, 0); + + complete(&ctx->complete); +} + +static int +lio_vf_rep_send_soft_command(struct octeon_device *oct, + void *req, int req_size, + void *resp, int resp_size) +{ + int tot_resp_size = sizeof(struct lio_vf_rep_resp) + resp_size; + int ctx_size = sizeof(struct lio_vf_rep_sc_ctx); + struct octeon_soft_command *sc = NULL; + struct lio_vf_rep_resp *rep_resp; + struct lio_vf_rep_sc_ctx *ctx; + void *sc_req; + int err; + + sc = (struct octeon_soft_command *) + octeon_alloc_soft_command(oct, req_size, + tot_resp_size, ctx_size); + if (!sc) + return -ENOMEM; + + ctx = (struct lio_vf_rep_sc_ctx *)sc->ctxptr; + memset(ctx, 0, ctx_size); + init_completion(&ctx->complete); + + sc_req = (struct lio_vf_rep_req *)sc->virtdptr; + memcpy(sc_req, req, req_size); + + rep_resp = (struct lio_vf_rep_resp *)sc->virtrptr; + memset(rep_resp, 0, tot_resp_size); + WRITE_ONCE(rep_resp->status, 1); + + sc->iq_no = 0; + octeon_prepare_soft_command(oct, sc, OPCODE_NIC, + OPCODE_NIC_VF_REP_CMD, 0, 0, 0); + sc->callback = lio_vf_rep_send_sc_complete; + sc->callback_arg = sc; + sc->wait_time = LIO_VF_REP_REQ_TMO_MS; + + err = octeon_send_soft_command(oct, sc); + if (err == IQ_SEND_FAILED) + goto free_buff; + + wait_for_completion_timeout(&ctx->complete, + msecs_to_jiffies + (2 * LIO_VF_REP_REQ_TMO_MS)); + err = READ_ONCE(rep_resp->status) ? -EBUSY : 0; + if (err) + dev_err(&oct->pci_dev->dev, "VF rep send config failed\n"); + + if (resp) + memcpy(resp, (rep_resp + 1), resp_size); +free_buff: + octeon_free_soft_command(oct, sc); + + return err; +} + +static int +lio_vf_rep_open(struct net_device *ndev) +{ + struct lio_vf_rep_desc *vf_rep = netdev_priv(ndev); + struct lio_vf_rep_req rep_cfg; + struct octeon_device *oct; + int ret; + + oct = vf_rep->oct; + + memset(&rep_cfg, 0, sizeof(rep_cfg)); + rep_cfg.req_type = LIO_VF_REP_REQ_STATE; + rep_cfg.ifidx = vf_rep->ifidx; + rep_cfg.rep_state.state = LIO_VF_REP_STATE_UP; + + ret = lio_vf_rep_send_soft_command(oct, &rep_cfg, + sizeof(rep_cfg), NULL, 0); + + if (ret) { + dev_err(&oct->pci_dev->dev, + "VF_REP open failed with err %d\n", ret); + return -EIO; + } + + atomic_set(&vf_rep->ifstate, (atomic_read(&vf_rep->ifstate) | + LIO_IFSTATE_RUNNING)); + + netif_carrier_on(ndev); + netif_start_queue(ndev); + + return 0; +} + +static int +lio_vf_rep_stop(struct net_device *ndev) +{ + struct lio_vf_rep_desc *vf_rep = netdev_priv(ndev); + struct lio_vf_rep_req rep_cfg; + struct octeon_device *oct; + int ret; + + oct = vf_rep->oct; + + memset(&rep_cfg, 0, sizeof(rep_cfg)); + rep_cfg.req_type = LIO_VF_REP_REQ_STATE; + rep_cfg.ifidx = vf_rep->ifidx; + rep_cfg.rep_state.state = LIO_VF_REP_STATE_DOWN; + + ret = lio_vf_rep_send_soft_command(oct, &rep_cfg, + sizeof(rep_cfg), NULL, 0); + + if (ret) { + dev_err(&oct->pci_dev->dev, + "VF_REP dev stop failed with err %d\n", ret); + return -EIO; + } + + atomic_set(&vf_rep->ifstate, (atomic_read(&vf_rep->ifstate) & + ~LIO_IFSTATE_RUNNING)); + + netif_tx_disable(ndev); + netif_carrier_off(ndev); + + return 0; +} + +static void +lio_vf_rep_tx_timeout(struct net_device *ndev) +{ + netif_trans_update(ndev); + + netif_wake_queue(ndev); +} + +static void +lio_vf_rep_get_stats64(struct net_device *dev, + struct rtnl_link_stats64 *stats64) +{ + struct lio_vf_rep_desc *vf_rep = netdev_priv(dev); + + stats64->tx_packets = vf_rep->stats.tx_packets; + stats64->tx_bytes = vf_rep->stats.tx_bytes; + stats64->tx_dropped = vf_rep->stats.tx_dropped; + + stats64->rx_packets = vf_rep->stats.rx_packets; + stats64->rx_bytes = vf_rep->stats.rx_bytes; + stats64->rx_dropped = vf_rep->stats.rx_dropped; +} + +static int +lio_vf_rep_change_mtu(struct net_device *ndev, int new_mtu) +{ + struct lio_vf_rep_desc *vf_rep = netdev_priv(ndev); + struct lio_vf_rep_req rep_cfg; + struct octeon_device *oct; + int ret; + + oct = vf_rep->oct; + + memset(&rep_cfg, 0, sizeof(rep_cfg)); + rep_cfg.req_type = LIO_VF_REP_REQ_MTU; + rep_cfg.ifidx = vf_rep->ifidx; + rep_cfg.rep_mtu.mtu = cpu_to_be32(new_mtu); + + ret = lio_vf_rep_send_soft_command(oct, &rep_cfg, + sizeof(rep_cfg), NULL, 0); + if (ret) { + dev_err(&oct->pci_dev->dev, + "Change MTU failed with err %d\n", ret); + return -EIO; + } + + ndev->mtu = new_mtu; + + return 0; +} + +static int +lio_vf_rep_phys_port_name(struct net_device *dev, + char *buf, size_t len) +{ + struct lio_vf_rep_desc *vf_rep = netdev_priv(dev); + struct octeon_device *oct = vf_rep->oct; + int ret; + + ret = snprintf(buf, len, "pf%dvf%d", oct->pf_num, + vf_rep->ifidx - oct->pf_num * 64 - 1); + if (ret >= len) + return -EOPNOTSUPP; + + return 0; +} + +static struct net_device * +lio_vf_rep_get_ndev(struct octeon_device *oct, int ifidx) +{ + int vf_id, max_vfs = CN23XX_MAX_VFS_PER_PF + 1; + int vfid_mask = max_vfs - 1; + + if (ifidx <= oct->pf_num * max_vfs || + ifidx >= oct->pf_num * max_vfs + max_vfs) + return NULL; + + /* ifidx 1-63 for PF0 VFs + * ifidx 65-127 for PF1 VFs + */ + vf_id = (ifidx & vfid_mask) - 1; + + return oct->vf_rep_list.ndev[vf_id]; +} + +static void +lio_vf_rep_copy_packet(struct octeon_device *oct, + struct sk_buff *skb, + int len) +{ + if (likely(len > MIN_SKB_SIZE)) { + struct octeon_skb_page_info *pg_info; + unsigned char *va; + + pg_info = ((struct octeon_skb_page_info *)(skb->cb)); + if (pg_info->page) { + va = page_address(pg_info->page) + + pg_info->page_offset; + memcpy(skb->data, va, MIN_SKB_SIZE); + skb_put(skb, MIN_SKB_SIZE); + } + + skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, + pg_info->page, + pg_info->page_offset + MIN_SKB_SIZE, + len - MIN_SKB_SIZE, + LIO_RXBUFFER_SZ); + } else { + struct octeon_skb_page_info *pg_info = + ((struct octeon_skb_page_info *)(skb->cb)); + + skb_copy_to_linear_data(skb, page_address(pg_info->page) + + pg_info->page_offset, len); + skb_put(skb, len); + put_page(pg_info->page); + } +} + +static int +lio_vf_rep_pkt_recv(struct octeon_recv_info *recv_info, void *buf) +{ + struct octeon_recv_pkt *recv_pkt = recv_info->recv_pkt; + struct lio_vf_rep_desc *vf_rep; + struct net_device *vf_ndev; + struct octeon_device *oct; + union octeon_rh *rh; + struct sk_buff *skb; + int i, ifidx; + + oct = lio_get_device(recv_pkt->octeon_id); + if (!oct) + goto free_buffers; + + skb = recv_pkt->buffer_ptr[0]; + rh = &recv_pkt->rh; + ifidx = rh->r.ossp; + + vf_ndev = lio_vf_rep_get_ndev(oct, ifidx); + if (!vf_ndev) + goto free_buffers; + + vf_rep = netdev_priv(vf_ndev); + if (!(atomic_read(&vf_rep->ifstate) & LIO_IFSTATE_RUNNING) || + recv_pkt->buffer_count > 1) + goto free_buffers; + + skb->dev = vf_ndev; + + /* Multiple buffers are not used for vf_rep packets. + * So just buffer_size[0] is valid. + */ + lio_vf_rep_copy_packet(oct, skb, recv_pkt->buffer_size[0]); + + skb_pull(skb, rh->r_dh.len * BYTES_PER_DHLEN_UNIT); + skb->protocol = eth_type_trans(skb, skb->dev); + skb->ip_summed = CHECKSUM_NONE; + + netif_rx(skb); + + octeon_free_recv_info(recv_info); + + return 0; + +free_buffers: + for (i = 0; i < recv_pkt->buffer_count; i++) + recv_buffer_free(recv_pkt->buffer_ptr[i]); + + octeon_free_recv_info(recv_info); + + return 0; +} + +static void +lio_vf_rep_packet_sent_callback(struct octeon_device *oct, + u32 status, void *buf) +{ + struct octeon_soft_command *sc = (struct octeon_soft_command *)buf; + struct sk_buff *skb = sc->ctxptr; + struct net_device *ndev = skb->dev; + + dma_unmap_single(&oct->pci_dev->dev, sc->dmadptr, + sc->datasize, DMA_TO_DEVICE); + dev_kfree_skb_any(skb); + octeon_free_soft_command(oct, sc); + + if (octnet_iq_is_full(oct, sc->iq_no)) + return; + + if (netif_queue_stopped(ndev)) + netif_wake_queue(ndev); +} + +static int +lio_vf_rep_pkt_xmit(struct sk_buff *skb, struct net_device *ndev) +{ + struct lio_vf_rep_desc *vf_rep = netdev_priv(ndev); + struct net_device *parent_ndev = vf_rep->parent_ndev; + struct octeon_device *oct = vf_rep->oct; + struct octeon_instr_pki_ih3 *pki_ih3; + struct octeon_soft_command *sc; + struct lio *parent_lio; + int status; + + parent_lio = GET_LIO(parent_ndev); + + if (!(atomic_read(&vf_rep->ifstate) & LIO_IFSTATE_RUNNING) || + skb->len <= 0) + goto xmit_failed; + + if (octnet_iq_is_full(vf_rep->oct, parent_lio->txq)) { + dev_err(&oct->pci_dev->dev, "VF rep: Device IQ full\n"); + netif_stop_queue(ndev); + return NETDEV_TX_BUSY; + } + + sc = (struct octeon_soft_command *) + octeon_alloc_soft_command(oct, 0, 0, 0); + if (!sc) { + dev_err(&oct->pci_dev->dev, "VF rep: Soft command alloc failed\n"); + goto xmit_failed; + } + + /* Multiple buffers are not used for vf_rep packets. */ + if (skb_shinfo(skb)->nr_frags != 0) { + dev_err(&oct->pci_dev->dev, "VF rep: nr_frags != 0. Dropping packet\n"); + goto xmit_failed; + } + + sc->dmadptr = dma_map_single(&oct->pci_dev->dev, + skb->data, skb->len, DMA_TO_DEVICE); + if (dma_mapping_error(&oct->pci_dev->dev, sc->dmadptr)) { + dev_err(&oct->pci_dev->dev, "VF rep: DMA mapping failed\n"); + goto xmit_failed; + } + + sc->virtdptr = skb->data; + sc->datasize = skb->len; + sc->ctxptr = skb; + sc->iq_no = parent_lio->txq; + + octeon_prepare_soft_command(oct, sc, OPCODE_NIC, OPCODE_NIC_VF_REP_PKT, + vf_rep->ifidx, 0, 0); + pki_ih3 = (struct octeon_instr_pki_ih3 *)&sc->cmd.cmd3.pki_ih3; + pki_ih3->tagtype = ORDERED_TAG; + + sc->callback = lio_vf_rep_packet_sent_callback; + sc->callback_arg = sc; + + status = octeon_send_soft_command(oct, sc); + if (status == IQ_SEND_FAILED) { + dma_unmap_single(&oct->pci_dev->dev, sc->dmadptr, + sc->datasize, DMA_TO_DEVICE); + goto xmit_failed; + } + + if (status == IQ_SEND_STOP) + netif_stop_queue(ndev); + + netif_trans_update(ndev); + + return NETDEV_TX_OK; + +xmit_failed: + dev_kfree_skb_any(skb); + + return NETDEV_TX_OK; +} + +static int +lio_vf_rep_attr_get(struct net_device *dev, struct switchdev_attr *attr) +{ + struct lio_vf_rep_desc *vf_rep = netdev_priv(dev); + struct net_device *parent_ndev = vf_rep->parent_ndev; + struct lio *lio = GET_LIO(parent_ndev); + + switch (attr->id) { + case SWITCHDEV_ATTR_ID_PORT_PARENT_ID: + attr->u.ppid.id_len = ETH_ALEN; + ether_addr_copy(attr->u.ppid.id, + (void *)&lio->linfo.hw_addr + 2); + break; + + default: + return -EOPNOTSUPP; + } + + return 0; +} + +static const struct switchdev_ops lio_vf_rep_switchdev_ops = { + .switchdev_port_attr_get = lio_vf_rep_attr_get, +}; + +static void +lio_vf_rep_fetch_stats(struct work_struct *work) +{ + struct cavium_wk *wk = (struct cavium_wk *)work; + struct lio_vf_rep_desc *vf_rep = wk->ctxptr; + struct lio_vf_rep_stats stats; + struct lio_vf_rep_req rep_cfg; + struct octeon_device *oct; + int ret; + + oct = vf_rep->oct; + + memset(&rep_cfg, 0, sizeof(rep_cfg)); + rep_cfg.req_type = LIO_VF_REP_REQ_STATS; + rep_cfg.ifidx = vf_rep->ifidx; + + ret = lio_vf_rep_send_soft_command(oct, &rep_cfg, sizeof(rep_cfg), + &stats, sizeof(stats)); + + if (!ret) { + octeon_swap_8B_data((u64 *)&stats, (sizeof(stats) >> 3)); + memcpy(&vf_rep->stats, &stats, sizeof(stats)); + } + + schedule_delayed_work(&vf_rep->stats_wk.work, + msecs_to_jiffies(LIO_VF_REP_STATS_POLL_TIME_MS)); +} + +int +lio_vf_rep_create(struct octeon_device *oct) +{ + struct lio_vf_rep_desc *vf_rep; + struct net_device *ndev; + int i, num_vfs; + + if (!oct->sriov_info.sriov_enabled) + return 0; + + num_vfs = oct->sriov_info.num_vfs_alloced; + + oct->vf_rep_list.num_vfs = 0; + for (i = 0; i < num_vfs; i++) { + ndev = alloc_etherdev(sizeof(struct lio_vf_rep_desc)); + + if (!ndev) { + dev_err(&oct->pci_dev->dev, + "VF rep device %d creation failed\n", i); + goto cleanup; + } + + ndev->min_mtu = LIO_MIN_MTU_SIZE; + ndev->max_mtu = LIO_MAX_MTU_SIZE; + ndev->netdev_ops = &lio_vf_rep_ndev_ops; + SWITCHDEV_SET_OPS(ndev, &lio_vf_rep_switchdev_ops); + + vf_rep = netdev_priv(ndev); + memset(vf_rep, 0, sizeof(*vf_rep)); + + vf_rep->ndev = ndev; + vf_rep->oct = oct; + vf_rep->parent_ndev = oct->props[0].netdev; + vf_rep->ifidx = (oct->pf_num * 64) + i + 1; + + eth_hw_addr_random(ndev); + + if (register_netdev(ndev)) { + dev_err(&oct->pci_dev->dev, "VF rep nerdev registration failed\n"); + + free_netdev(ndev); + goto cleanup; + } + + netif_carrier_off(ndev); + + INIT_DELAYED_WORK(&vf_rep->stats_wk.work, + lio_vf_rep_fetch_stats); + vf_rep->stats_wk.ctxptr = (void *)vf_rep; + schedule_delayed_work(&vf_rep->stats_wk.work, + msecs_to_jiffies + (LIO_VF_REP_STATS_POLL_TIME_MS)); + oct->vf_rep_list.num_vfs++; + oct->vf_rep_list.ndev[i] = ndev; + } + + if (octeon_register_dispatch_fn(oct, OPCODE_NIC, + OPCODE_NIC_VF_REP_PKT, + lio_vf_rep_pkt_recv, oct)) { + dev_err(&oct->pci_dev->dev, "VF rep Dispatch func registration failed\n"); + + goto cleanup; + } + + return 0; + +cleanup: + for (i = 0; i < oct->vf_rep_list.num_vfs; i++) { + ndev = oct->vf_rep_list.ndev[i]; + oct->vf_rep_list.ndev[i] = NULL; + if (ndev) { + vf_rep = netdev_priv(ndev); + cancel_delayed_work_sync + (&vf_rep->stats_wk.work); + unregister_netdev(ndev); + free_netdev(ndev); + } + } + + oct->vf_rep_list.num_vfs = 0; + + return -1; +} + +void +lio_vf_rep_destroy(struct octeon_device *oct) +{ + struct lio_vf_rep_desc *vf_rep; + struct net_device *ndev; + int i; + + if (!oct->sriov_info.sriov_enabled) + return; + + for (i = 0; i < oct->vf_rep_list.num_vfs; i++) { + ndev = oct->vf_rep_list.ndev[i]; + oct->vf_rep_list.ndev[i] = NULL; + if (ndev) { + vf_rep = netdev_priv(ndev); + cancel_delayed_work_sync + (&vf_rep->stats_wk.work); + netif_tx_disable(ndev); + netif_carrier_off(ndev); + + unregister_netdev(ndev); + free_netdev(ndev); + } + } + + oct->vf_rep_list.num_vfs = 0; +} diff --git a/drivers/net/ethernet/cavium/liquidio/lio_vf_rep.h b/drivers/net/ethernet/cavium/liquidio/lio_vf_rep.h new file mode 100644 index 000000000000..5a9ec9851426 --- /dev/null +++ b/drivers/net/ethernet/cavium/liquidio/lio_vf_rep.h @@ -0,0 +1,47 @@ +/********************************************************************** + * Author: Cavium, Inc. + * + * Contact: support@cavium.com + * Please include "LiquidIO" in the subject. + * + * Copyright (c) 2003-2017 Cavium, Inc. + * + * This file is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, Version 2, as + * published by the Free Software Foundation. + * + * This file is distributed in the hope that it will be useful, but + * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or + * NONINFRINGEMENT. See the GNU General Public License for more + * details. + * + * This file may also be available under a different license from Cavium. + * Contact Cavium, Inc. for more information + **********************************************************************/ + +/*! \file octeon_vf_main.h + * \brief Host Driver: This file defines vf_rep related macros and structures + */ +#ifndef __LIO_VF_REP_H__ +#define __LIO_VF_REP_H__ +#define LIO_VF_REP_REQ_TMO_MS 5000 +#define LIO_VF_REP_STATS_POLL_TIME_MS 200 + +struct lio_vf_rep_desc { + struct net_device *parent_ndev; + struct net_device *ndev; + struct octeon_device *oct; + struct lio_vf_rep_stats stats; + struct cavium_wk stats_wk; + atomic_t ifstate; + int ifidx; +}; + +struct lio_vf_rep_sc_ctx { + struct completion complete; +}; + +int lio_vf_rep_create(struct octeon_device *oct); +void lio_vf_rep_destroy(struct octeon_device *oct); +#endif diff --git a/drivers/net/ethernet/cavium/liquidio/liquidio_common.h b/drivers/net/ethernet/cavium/liquidio/liquidio_common.h index 2033a65cd97a..3aceb78caa86 100644 --- a/drivers/net/ethernet/cavium/liquidio/liquidio_common.h +++ b/drivers/net/ethernet/cavium/liquidio/liquidio_common.h @@ -89,6 +89,9 @@ enum octeon_tag_type { #define VF_DRV_REMOVED -1 #define VF_DRV_MACADDR_CHANGED 2 +#define OPCODE_NIC_VF_REP_PKT 0x15 +#define OPCODE_NIC_VF_REP_CMD 0x16 + #define CORE_DRV_TEST_SCATTER_OP 0xFFF5 /* Application codes advertised by the core driver initialization packet. */ @@ -909,4 +912,50 @@ struct lio_time { s64 sec; /* seconds */ s64 nsec; /* nanoseconds */ }; + +struct lio_vf_rep_stats { + u64 tx_packets; + u64 tx_bytes; + u64 tx_dropped; + + u64 rx_packets; + u64 rx_bytes; + u64 rx_dropped; +}; + +enum lio_vf_rep_req_type { + LIO_VF_REP_REQ_NONE, + LIO_VF_REP_REQ_STATE, + LIO_VF_REP_REQ_MTU, + LIO_VF_REP_REQ_STATS +}; + +enum { + LIO_VF_REP_STATE_DOWN, + LIO_VF_REP_STATE_UP +}; + +struct lio_vf_rep_req { + u8 req_type; + u8 ifidx; + u8 rsvd[6]; + + union { + struct lio_vf_rep_mtu { + u32 mtu; + u32 rsvd; + } rep_mtu; + + struct lio_vf_rep_state { + u8 state; + u8 rsvd[7]; + } rep_state; + }; +}; + +struct lio_vf_rep_resp { + u64 rh; + u8 status; + u8 rsvd[7]; +}; #endif diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_device.h b/drivers/net/ethernet/cavium/liquidio/octeon_device.h index 33d19c4509bc..c97e067a8974 100644 --- a/drivers/net/ethernet/cavium/liquidio/octeon_device.h +++ b/drivers/net/ethernet/cavium/liquidio/octeon_device.h @@ -391,6 +391,11 @@ struct octeon_ioq_vector { u32 ioq_num; }; +struct lio_vf_rep_list { + int num_vfs; + struct net_device *ndev[CN23XX_MAX_VFS_PER_PF]; +}; + /** The Octeon device. * Each Octeon device has this structure to represent all its * components. @@ -568,6 +573,8 @@ struct octeon_device { atomic_t *adapter_fw_state; /* per-adapter, lio_fw_state */ bool ptp_enable; + + struct lio_vf_rep_list vf_rep_list; }; #define OCT_DRV_ONLINE 1 -- cgit v1.2.3 From d4be8ebefb449c43b7daa5c9d23b22cd20c17258 Mon Sep 17 00:00:00 2001 From: Vijaya Mohan Guvva Date: Tue, 31 Oct 2017 16:04:57 -0700 Subject: liquidio: Configure switchdev with devlink Enable and disable switchdev on SRIOV capable LiquidIO NIC with devlink. Create representor netdev for each SRIOV VF function on SRIOV enable and and do the cleanup on SRIOV disable. Signed-off-by: Vijaya Mohan Guvva Signed-off-by: Satanand Burla Signed-off-by: Raghu Vatsavayi Signed-off-by: Felix Manlunas Signed-off-by: David S. Miller --- drivers/net/ethernet/cavium/Kconfig | 1 + drivers/net/ethernet/cavium/liquidio/lio_main.c | 93 ++++++++++++++++++++++ drivers/net/ethernet/cavium/liquidio/lio_vf_rep.c | 6 ++ .../net/ethernet/cavium/liquidio/liquidio_common.h | 1 + .../net/ethernet/cavium/liquidio/octeon_device.h | 7 ++ 5 files changed, 108 insertions(+) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/cavium/Kconfig b/drivers/net/ethernet/cavium/Kconfig index dcbce6cac63e..63be75eb34d2 100644 --- a/drivers/net/ethernet/cavium/Kconfig +++ b/drivers/net/ethernet/cavium/Kconfig @@ -53,6 +53,7 @@ config THUNDER_NIC_RGX config LIQUIDIO tristate "Cavium LiquidIO support" depends on 64BIT + depends on MAY_USE_DEVLINK imply PTP_1588_CLOCK select FW_LOADER select LIBCRC32C diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c index 80784122e6e9..f27f0afd0ecf 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_main.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c @@ -35,6 +35,7 @@ #include "cn68xx_device.h" #include "cn23xx_pf_device.h" #include "liquidio_image.h" +#include "lio_vf_rep.h" MODULE_AUTHOR("Cavium Networks, "); MODULE_DESCRIPTION("Cavium LiquidIO Intelligent Server Adapter Driver"); @@ -1603,6 +1604,8 @@ static int liquidio_stop_nic_module(struct octeon_device *oct) oct->cmd_resp_state = OCT_DRV_OFFLINE; spin_unlock_bh(&oct->cmd_resp_wqlock); + lio_vf_rep_destroy(oct); + for (i = 0; i < oct->ifcount; i++) { lio = GET_LIO(oct->props[i].netdev); for (j = 0; j < oct->num_oqs; j++) @@ -1613,6 +1616,12 @@ static int liquidio_stop_nic_module(struct octeon_device *oct) for (i = 0; i < oct->ifcount; i++) liquidio_destroy_nic_device(oct, i); + if (oct->devlink) { + devlink_unregister(oct->devlink); + devlink_free(oct->devlink); + oct->devlink = NULL; + } + dev_dbg(&oct->pci_dev->dev, "Network interfaces stopped\n"); return 0; } @@ -3310,10 +3319,67 @@ static int liquidio_set_vf_link_state(struct net_device *netdev, int vfidx, return 0; } +static int +liquidio_eswitch_mode_get(struct devlink *devlink, u16 *mode) +{ + struct lio_devlink_priv *priv; + struct octeon_device *oct; + + priv = devlink_priv(devlink); + oct = priv->oct; + + *mode = oct->eswitch_mode; + + return 0; +} + +static int +liquidio_eswitch_mode_set(struct devlink *devlink, u16 mode) +{ + struct lio_devlink_priv *priv; + struct octeon_device *oct; + int ret = 0; + + priv = devlink_priv(devlink); + oct = priv->oct; + + if (!(oct->fw_info.app_cap_flags & LIQUIDIO_SWITCHDEV_CAP)) + return -EINVAL; + + if (oct->eswitch_mode == mode) + return 0; + + switch (mode) { + case DEVLINK_ESWITCH_MODE_SWITCHDEV: + oct->eswitch_mode = mode; + ret = lio_vf_rep_create(oct); + break; + + case DEVLINK_ESWITCH_MODE_LEGACY: + lio_vf_rep_destroy(oct); + oct->eswitch_mode = mode; + break; + + default: + ret = -EINVAL; + } + + return ret; +} + +static const struct devlink_ops liquidio_devlink_ops = { + .eswitch_mode_get = liquidio_eswitch_mode_get, + .eswitch_mode_set = liquidio_eswitch_mode_set, +}; + static int lio_pf_switchdev_attr_get(struct net_device *dev, struct switchdev_attr *attr) { struct lio *lio = GET_LIO(dev); + struct octeon_device *oct = lio->oct_dev; + + if (oct->eswitch_mode != DEVLINK_ESWITCH_MODE_SWITCHDEV) + return -EOPNOTSUPP; switch (attr->id) { case SWITCHDEV_ATTR_ID_PORT_PARENT_ID: @@ -3462,6 +3528,8 @@ static int setup_nic_devices(struct octeon_device *octeon_dev) u32 resp_size, ctx_size, data_size; u32 ifidx_or_pfnum; struct lio_version *vdata; + struct devlink *devlink; + struct lio_devlink_priv *lio_devlink; /* This is to handle link status changes */ octeon_register_dispatch_fn(octeon_dev, OPCODE_NIC, @@ -3794,6 +3862,26 @@ static int setup_nic_devices(struct octeon_device *octeon_dev) octeon_free_soft_command(octeon_dev, sc); } + devlink = devlink_alloc(&liquidio_devlink_ops, + sizeof(struct lio_devlink_priv)); + if (!devlink) { + dev_err(&octeon_dev->pci_dev->dev, "devlink alloc failed\n"); + goto setup_nic_wait_intr; + } + + lio_devlink = devlink_priv(devlink); + lio_devlink->oct = octeon_dev; + + if (devlink_register(devlink, &octeon_dev->pci_dev->dev)) { + devlink_free(devlink); + dev_err(&octeon_dev->pci_dev->dev, + "devlink registration failed\n"); + goto setup_nic_wait_intr; + } + + octeon_dev->devlink = devlink; + octeon_dev->eswitch_mode = DEVLINK_ESWITCH_MODE_LEGACY; + return 0; setup_nic_dev_fail: @@ -3888,6 +3976,7 @@ static int liquidio_enable_sriov(struct pci_dev *dev, int num_vfs) } if (!num_vfs) { + lio_vf_rep_destroy(oct); ret = lio_pci_sriov_disable(oct); } else if (num_vfs > oct->sriov_info.max_vfs) { dev_err(&oct->pci_dev->dev, @@ -3899,6 +3988,10 @@ static int liquidio_enable_sriov(struct pci_dev *dev, int num_vfs) ret = octeon_enable_sriov(oct); dev_info(&oct->pci_dev->dev, "oct->pf_num:%d num_vfs:%d\n", oct->pf_num, num_vfs); + ret = lio_vf_rep_create(oct); + if (ret) + dev_info(&oct->pci_dev->dev, + "vf representor create failed"); } return ret; diff --git a/drivers/net/ethernet/cavium/liquidio/lio_vf_rep.c b/drivers/net/ethernet/cavium/liquidio/lio_vf_rep.c index 67ff7a143e9e..de0c80d150f3 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_vf_rep.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_vf_rep.c @@ -515,6 +515,9 @@ lio_vf_rep_create(struct octeon_device *oct) struct net_device *ndev; int i, num_vfs; + if (oct->eswitch_mode != DEVLINK_ESWITCH_MODE_SWITCHDEV) + return 0; + if (!oct->sriov_info.sriov_enabled) return 0; @@ -599,6 +602,9 @@ lio_vf_rep_destroy(struct octeon_device *oct) struct net_device *ndev; int i; + if (oct->eswitch_mode != DEVLINK_ESWITCH_MODE_SWITCHDEV) + return; + if (!oct->sriov_info.sriov_enabled) return; diff --git a/drivers/net/ethernet/cavium/liquidio/liquidio_common.h b/drivers/net/ethernet/cavium/liquidio/liquidio_common.h index 3aceb78caa86..441cc78faff1 100644 --- a/drivers/net/ethernet/cavium/liquidio/liquidio_common.h +++ b/drivers/net/ethernet/cavium/liquidio/liquidio_common.h @@ -114,6 +114,7 @@ enum octeon_tag_type { /* App specific capabilities from firmware to pf driver */ #define LIQUIDIO_TIME_SYNC_CAP 0x1 +#define LIQUIDIO_SWITCHDEV_CAP 0x2 static inline u32 incr_index(u32 index, u32 count, u32 max) { diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_device.h b/drivers/net/ethernet/cavium/liquidio/octeon_device.h index c97e067a8974..63b0c758a0a6 100644 --- a/drivers/net/ethernet/cavium/liquidio/octeon_device.h +++ b/drivers/net/ethernet/cavium/liquidio/octeon_device.h @@ -23,6 +23,7 @@ #define _OCTEON_DEVICE_H_ #include +#include /** PCI VendorId Device Id */ #define OCTEON_CN68XX_PCIID 0x91177d @@ -396,6 +397,10 @@ struct lio_vf_rep_list { struct net_device *ndev[CN23XX_MAX_VFS_PER_PF]; }; +struct lio_devlink_priv { + struct octeon_device *oct; +}; + /** The Octeon device. * Each Octeon device has this structure to represent all its * components. @@ -575,6 +580,8 @@ struct octeon_device { bool ptp_enable; struct lio_vf_rep_list vf_rep_list; + struct devlink *devlink; + enum devlink_eswitch_mode eswitch_mode; }; #define OCT_DRV_ONLINE 1 -- cgit v1.2.3 From 638f5b90d46016372a8e3e0a434f199cc5e12b8c Mon Sep 17 00:00:00 2001 From: Alexei Starovoitov Date: Tue, 31 Oct 2017 18:16:05 -0700 Subject: bpf: reduce verifier memory consumption the verifier got progressively smarter over time and size of its internal state grew as well. Time to reduce the memory consumption. Before: sizeof(struct bpf_verifier_state) = 6520 After: sizeof(struct bpf_verifier_state) = 896 It's done by observing that majority of BPF programs use little to no stack whereas verifier kept all of 512 stack slots ready always. Instead dynamically reallocate struct verifier state when stack access is detected. Runtime difference before vs after is within a noise. The number of processed instructions stays the same. Signed-off-by: Alexei Starovoitov Acked-by: Daniel Borkmann Signed-off-by: David S. Miller --- drivers/net/ethernet/netronome/nfp/bpf/verifier.c | 8 +- include/linux/bpf_verifier.h | 16 +- kernel/bpf/verifier.c | 437 ++++++++++++++-------- 3 files changed, 305 insertions(+), 156 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/netronome/nfp/bpf/verifier.c b/drivers/net/ethernet/netronome/nfp/bpf/verifier.c index 3d3dcac1c942..a8c7615546a9 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/verifier.c +++ b/drivers/net/ethernet/netronome/nfp/bpf/verifier.c @@ -76,9 +76,9 @@ nfp_bpf_goto_meta(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, static int nfp_bpf_check_exit(struct nfp_prog *nfp_prog, - const struct bpf_verifier_env *env) + struct bpf_verifier_env *env) { - const struct bpf_reg_state *reg0 = &env->cur_state.regs[0]; + const struct bpf_reg_state *reg0 = cur_regs(env) + BPF_REG_0; u64 imm; if (nfp_prog->act == NN_ACT_XDP) @@ -144,9 +144,9 @@ nfp_bpf_check_stack_access(struct nfp_prog *nfp_prog, static int nfp_bpf_check_ptr(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, - const struct bpf_verifier_env *env, u8 reg_no) + struct bpf_verifier_env *env, u8 reg_no) { - const struct bpf_reg_state *reg = &env->cur_state.regs[reg_no]; + const struct bpf_reg_state *reg = cur_regs(env) + reg_no; int err; if (reg->type != PTR_TO_CTX && diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h index feeaea93d959..3b0976aaac75 100644 --- a/include/linux/bpf_verifier.h +++ b/include/linux/bpf_verifier.h @@ -88,14 +88,19 @@ enum bpf_stack_slot_type { #define BPF_REG_SIZE 8 /* size of eBPF register in bytes */ +struct bpf_stack_state { + struct bpf_reg_state spilled_ptr; + u8 slot_type[BPF_REG_SIZE]; +}; + /* state of the program: * type of all registers and stack info */ struct bpf_verifier_state { struct bpf_reg_state regs[MAX_BPF_REG]; - u8 stack_slot_type[MAX_BPF_STACK]; - struct bpf_reg_state spilled_regs[MAX_BPF_STACK / BPF_REG_SIZE]; struct bpf_verifier_state *parent; + int allocated_stack; + struct bpf_stack_state *stack; }; /* linked list of verifier states used to prune search */ @@ -145,7 +150,7 @@ struct bpf_verifier_env { struct bpf_verifier_stack_elem *head; /* stack of verifier states to be processed */ int stack_size; /* number of states to be processed */ bool strict_alignment; /* perform strict pointer alignment checks */ - struct bpf_verifier_state cur_state; /* current verifier state */ + struct bpf_verifier_state *cur_state; /* current verifier state */ struct bpf_verifier_state_list **explored_states; /* search pruning optimization */ const struct bpf_ext_analyzer_ops *analyzer_ops; /* external analyzer ops */ void *analyzer_priv; /* pointer to external analyzer's private data */ @@ -159,6 +164,11 @@ struct bpf_verifier_env { struct bpf_verifer_log log; }; +static inline struct bpf_reg_state *cur_regs(struct bpf_verifier_env *env) +{ + return env->cur_state->regs; +} + int bpf_analyzer(struct bpf_prog *prog, const struct bpf_ext_analyzer_ops *ops, void *priv); diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index d906775e12c1..5f26f7ad124f 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -276,43 +276,132 @@ static void print_verifier_state(struct bpf_verifier_env *env, verbose(env, ")"); } } - for (i = 0; i < MAX_BPF_STACK; i += BPF_REG_SIZE) { - if (state->stack_slot_type[i] == STACK_SPILL) - verbose(env, " fp%d=%s", -MAX_BPF_STACK + i, - reg_type_str[state->spilled_regs[i / BPF_REG_SIZE].type]); + for (i = 0; i < state->allocated_stack / BPF_REG_SIZE; i++) { + if (state->stack[i].slot_type[0] == STACK_SPILL) + verbose(env, " fp%d=%s", + -MAX_BPF_STACK + i * BPF_REG_SIZE, + reg_type_str[state->stack[i].spilled_ptr.type]); } verbose(env, "\n"); } -static int pop_stack(struct bpf_verifier_env *env, int *prev_insn_idx) +static int copy_stack_state(struct bpf_verifier_state *dst, + const struct bpf_verifier_state *src) { - struct bpf_verifier_stack_elem *elem; - int insn_idx; + if (!src->stack) + return 0; + if (WARN_ON_ONCE(dst->allocated_stack < src->allocated_stack)) { + /* internal bug, make state invalid to reject the program */ + memset(dst, 0, sizeof(*dst)); + return -EFAULT; + } + memcpy(dst->stack, src->stack, + sizeof(*src->stack) * (src->allocated_stack / BPF_REG_SIZE)); + return 0; +} + +/* do_check() starts with zero-sized stack in struct bpf_verifier_state to + * make it consume minimal amount of memory. check_stack_write() access from + * the program calls into realloc_verifier_state() to grow the stack size. + * Note there is a non-zero 'parent' pointer inside bpf_verifier_state + * which this function copies over. It points to previous bpf_verifier_state + * which is never reallocated + */ +static int realloc_verifier_state(struct bpf_verifier_state *state, int size, + bool copy_old) +{ + u32 old_size = state->allocated_stack; + struct bpf_stack_state *new_stack; + int slot = size / BPF_REG_SIZE; + + if (size <= old_size || !size) { + if (copy_old) + return 0; + state->allocated_stack = slot * BPF_REG_SIZE; + if (!size && old_size) { + kfree(state->stack); + state->stack = NULL; + } + return 0; + } + new_stack = kmalloc_array(slot, sizeof(struct bpf_stack_state), + GFP_KERNEL); + if (!new_stack) + return -ENOMEM; + if (copy_old) { + if (state->stack) + memcpy(new_stack, state->stack, + sizeof(*new_stack) * (old_size / BPF_REG_SIZE)); + memset(new_stack + old_size / BPF_REG_SIZE, 0, + sizeof(*new_stack) * (size - old_size) / BPF_REG_SIZE); + } + state->allocated_stack = slot * BPF_REG_SIZE; + kfree(state->stack); + state->stack = new_stack; + return 0; +} + +static void free_verifier_state(struct bpf_verifier_state *state) +{ + kfree(state->stack); + kfree(state); +} + +/* copy verifier state from src to dst growing dst stack space + * when necessary to accommodate larger src stack + */ +static int copy_verifier_state(struct bpf_verifier_state *dst, + const struct bpf_verifier_state *src) +{ + int err; + + err = realloc_verifier_state(dst, src->allocated_stack, false); + if (err) + return err; + memcpy(dst, src, offsetof(struct bpf_verifier_state, allocated_stack)); + return copy_stack_state(dst, src); +} + +static int pop_stack(struct bpf_verifier_env *env, int *prev_insn_idx, + int *insn_idx) +{ + struct bpf_verifier_state *cur = env->cur_state; + struct bpf_verifier_stack_elem *elem, *head = env->head; + int err; if (env->head == NULL) - return -1; + return -ENOENT; - memcpy(&env->cur_state, &env->head->st, sizeof(env->cur_state)); - insn_idx = env->head->insn_idx; + if (cur) { + err = copy_verifier_state(cur, &head->st); + if (err) + return err; + } + if (insn_idx) + *insn_idx = head->insn_idx; if (prev_insn_idx) - *prev_insn_idx = env->head->prev_insn_idx; - elem = env->head->next; - kfree(env->head); + *prev_insn_idx = head->prev_insn_idx; + elem = head->next; + kfree(head); env->head = elem; env->stack_size--; - return insn_idx; + return 0; } static struct bpf_verifier_state *push_stack(struct bpf_verifier_env *env, int insn_idx, int prev_insn_idx) { + struct bpf_verifier_state *cur = env->cur_state; struct bpf_verifier_stack_elem *elem; + int err; - elem = kmalloc(sizeof(struct bpf_verifier_stack_elem), GFP_KERNEL); + elem = kzalloc(sizeof(struct bpf_verifier_stack_elem), GFP_KERNEL); if (!elem) goto err; - memcpy(&elem->st, &env->cur_state, sizeof(env->cur_state)); + err = copy_verifier_state(&elem->st, cur); + if (err) + return NULL; elem->insn_idx = insn_idx; elem->prev_insn_idx = prev_insn_idx; elem->next = env->head; @@ -325,7 +414,7 @@ static struct bpf_verifier_state *push_stack(struct bpf_verifier_env *env, return &elem->st; err: /* pop all elements and return */ - while (pop_stack(env, NULL) >= 0); + while (!pop_stack(env, NULL, NULL)); return NULL; } @@ -550,7 +639,7 @@ static void mark_reg_read(const struct bpf_verifier_state *state, u32 regno) static int check_reg_arg(struct bpf_verifier_env *env, u32 regno, enum reg_arg_type t) { - struct bpf_reg_state *regs = env->cur_state.regs; + struct bpf_reg_state *regs = env->cur_state->regs; if (regno >= MAX_BPF_REG) { verbose(env, "R%d is invalid\n", regno); @@ -563,7 +652,7 @@ static int check_reg_arg(struct bpf_verifier_env *env, u32 regno, verbose(env, "R%d !read_ok\n", regno); return -EACCES; } - mark_reg_read(&env->cur_state, regno); + mark_reg_read(env->cur_state, regno); } else { /* check whether register used as dest operand can be written to */ if (regno == BPF_REG_FP) { @@ -601,10 +690,21 @@ static int check_stack_write(struct bpf_verifier_env *env, struct bpf_verifier_state *state, int off, int size, int value_regno) { - int i, spi = (MAX_BPF_STACK + off) / BPF_REG_SIZE; + int i, slot = -off - 1, spi = slot / BPF_REG_SIZE, err; + + err = realloc_verifier_state(state, round_up(slot + 1, BPF_REG_SIZE), + true); + if (err) + return err; /* caller checked that off % size == 0 and -MAX_BPF_STACK <= off < 0, * so it's aligned access and [off, off + size) are within stack limits */ + if (!env->allow_ptr_leaks && + state->stack[spi].slot_type[0] == STACK_SPILL && + size != BPF_REG_SIZE) { + verbose(env, "attempt to corrupt spilled pointer on stack\n"); + return -EACCES; + } if (value_regno >= 0 && is_spillable_regtype(state->regs[value_regno].type)) { @@ -616,17 +716,18 @@ static int check_stack_write(struct bpf_verifier_env *env, } /* save register state */ - state->spilled_regs[spi] = state->regs[value_regno]; - state->spilled_regs[spi].live |= REG_LIVE_WRITTEN; + state->stack[spi].spilled_ptr = state->regs[value_regno]; + state->stack[spi].spilled_ptr.live |= REG_LIVE_WRITTEN; for (i = 0; i < BPF_REG_SIZE; i++) - state->stack_slot_type[MAX_BPF_STACK + off + i] = STACK_SPILL; + state->stack[spi].slot_type[i] = STACK_SPILL; } else { /* regular write of data into stack */ - state->spilled_regs[spi] = (struct bpf_reg_state) {}; + state->stack[spi].spilled_ptr = (struct bpf_reg_state) {}; for (i = 0; i < size; i++) - state->stack_slot_type[MAX_BPF_STACK + off + i] = STACK_MISC; + state->stack[spi].slot_type[(slot - i) % BPF_REG_SIZE] = + STACK_MISC; } return 0; } @@ -637,10 +738,10 @@ static void mark_stack_slot_read(const struct bpf_verifier_state *state, int slo while (parent) { /* if read wasn't screened by an earlier write ... */ - if (state->spilled_regs[slot].live & REG_LIVE_WRITTEN) + if (state->stack[slot].spilled_ptr.live & REG_LIVE_WRITTEN) break; /* ... then we depend on parent's value */ - parent->spilled_regs[slot].live |= REG_LIVE_READ; + parent->stack[slot].spilled_ptr.live |= REG_LIVE_READ; state = parent; parent = state->parent; } @@ -650,34 +751,37 @@ static int check_stack_read(struct bpf_verifier_env *env, struct bpf_verifier_state *state, int off, int size, int value_regno) { - u8 *slot_type; - int i, spi; + int i, slot = -off - 1, spi = slot / BPF_REG_SIZE; + u8 *stype; - slot_type = &state->stack_slot_type[MAX_BPF_STACK + off]; + if (state->allocated_stack <= slot) { + verbose(env, "invalid read from stack off %d+0 size %d\n", + off, size); + return -EACCES; + } + stype = state->stack[spi].slot_type; - if (slot_type[0] == STACK_SPILL) { + if (stype[0] == STACK_SPILL) { if (size != BPF_REG_SIZE) { verbose(env, "invalid size of register spill\n"); return -EACCES; } for (i = 1; i < BPF_REG_SIZE; i++) { - if (slot_type[i] != STACK_SPILL) { + if (stype[(slot - i) % BPF_REG_SIZE] != STACK_SPILL) { verbose(env, "corrupted spill memory\n"); return -EACCES; } } - spi = (MAX_BPF_STACK + off) / BPF_REG_SIZE; - if (value_regno >= 0) { /* restore register state from stack */ - state->regs[value_regno] = state->spilled_regs[spi]; + state->regs[value_regno] = state->stack[spi].spilled_ptr; mark_stack_slot_read(state, spi); } return 0; } else { for (i = 0; i < size; i++) { - if (slot_type[i] != STACK_MISC) { + if (stype[(slot - i) % BPF_REG_SIZE] != STACK_MISC) { verbose(env, "invalid read from stack off %d+%d size %d\n", off, i, size); return -EACCES; @@ -694,7 +798,8 @@ static int check_stack_read(struct bpf_verifier_env *env, static int __check_map_access(struct bpf_verifier_env *env, u32 regno, int off, int size) { - struct bpf_map *map = env->cur_state.regs[regno].map_ptr; + struct bpf_reg_state *regs = cur_regs(env); + struct bpf_map *map = regs[regno].map_ptr; if (off < 0 || size <= 0 || off + size > map->value_size) { verbose(env, "invalid access to map value, value_size=%d off=%d size=%d\n", @@ -706,9 +811,9 @@ static int __check_map_access(struct bpf_verifier_env *env, u32 regno, int off, /* check read/write into a map element with possible variable offset */ static int check_map_access(struct bpf_verifier_env *env, u32 regno, - int off, int size) + int off, int size) { - struct bpf_verifier_state *state = &env->cur_state; + struct bpf_verifier_state *state = env->cur_state; struct bpf_reg_state *reg = &state->regs[regno]; int err; @@ -783,7 +888,7 @@ static bool may_access_direct_pkt_data(struct bpf_verifier_env *env, static int __check_packet_access(struct bpf_verifier_env *env, u32 regno, int off, int size) { - struct bpf_reg_state *regs = env->cur_state.regs; + struct bpf_reg_state *regs = cur_regs(env); struct bpf_reg_state *reg = ®s[regno]; if (off < 0 || size <= 0 || (u64)off + size > reg->range) { @@ -797,7 +902,7 @@ static int __check_packet_access(struct bpf_verifier_env *env, u32 regno, static int check_packet_access(struct bpf_verifier_env *env, u32 regno, int off, int size) { - struct bpf_reg_state *regs = env->cur_state.regs; + struct bpf_reg_state *regs = cur_regs(env); struct bpf_reg_state *reg = ®s[regno]; int err; @@ -866,7 +971,7 @@ static bool __is_pointer_value(bool allow_ptr_leaks, static bool is_pointer_value(struct bpf_verifier_env *env, int regno) { - return __is_pointer_value(env->allow_ptr_leaks, &env->cur_state.regs[regno]); + return __is_pointer_value(env->allow_ptr_leaks, cur_regs(env) + regno); } static int check_pkt_ptr_alignment(struct bpf_verifier_env *env, @@ -968,8 +1073,9 @@ static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regn int bpf_size, enum bpf_access_type t, int value_regno) { - struct bpf_verifier_state *state = &env->cur_state; - struct bpf_reg_state *reg = &state->regs[regno]; + struct bpf_verifier_state *state = env->cur_state; + struct bpf_reg_state *regs = cur_regs(env); + struct bpf_reg_state *reg = regs + regno; int size, err = 0; size = bpf_size_to_bytes(bpf_size); @@ -993,7 +1099,7 @@ static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regn err = check_map_access(env, regno, off, size); if (!err && t == BPF_READ && value_regno >= 0) - mark_reg_unknown(env, state->regs, value_regno); + mark_reg_unknown(env, regs, value_regno); } else if (reg->type == PTR_TO_CTX) { enum bpf_reg_type reg_type = SCALAR_VALUE; @@ -1028,14 +1134,14 @@ static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regn * case, we know the offset is zero. */ if (reg_type == SCALAR_VALUE) - mark_reg_unknown(env, state->regs, value_regno); + mark_reg_unknown(env, regs, value_regno); else - mark_reg_known_zero(env, state->regs, + mark_reg_known_zero(env, regs, value_regno); - state->regs[value_regno].id = 0; - state->regs[value_regno].off = 0; - state->regs[value_regno].range = 0; - state->regs[value_regno].type = reg_type; + regs[value_regno].id = 0; + regs[value_regno].off = 0; + regs[value_regno].range = 0; + regs[value_regno].type = reg_type; } } else if (reg->type == PTR_TO_STACK) { @@ -1061,19 +1167,12 @@ static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regn if (env->prog->aux->stack_depth < -off) env->prog->aux->stack_depth = -off; - if (t == BPF_WRITE) { - if (!env->allow_ptr_leaks && - state->stack_slot_type[MAX_BPF_STACK + off] == STACK_SPILL && - size != BPF_REG_SIZE) { - verbose(env, "attempt to corrupt spilled pointer on stack\n"); - return -EACCES; - } + if (t == BPF_WRITE) err = check_stack_write(env, state, off, size, value_regno); - } else { + else err = check_stack_read(env, state, off, size, value_regno); - } } else if (reg_is_pkt_pointer(reg)) { if (t == BPF_WRITE && !may_access_direct_pkt_data(env, NULL, t)) { verbose(env, "cannot write into packet\n"); @@ -1087,7 +1186,7 @@ static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regn } err = check_packet_access(env, regno, off, size); if (!err && t == BPF_READ && value_regno >= 0) - mark_reg_unknown(env, state->regs, value_regno); + mark_reg_unknown(env, regs, value_regno); } else { verbose(env, "R%d invalid mem access '%s'\n", regno, reg_type_str[reg->type]); @@ -1095,11 +1194,11 @@ static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regn } if (!err && size < BPF_REG_SIZE && value_regno >= 0 && t == BPF_READ && - state->regs[value_regno].type == SCALAR_VALUE) { + regs[value_regno].type == SCALAR_VALUE) { /* b/h/w load zero-extends, mark upper bits as known 0 */ - state->regs[value_regno].var_off = tnum_cast( - state->regs[value_regno].var_off, size); - __update_reg_bounds(&state->regs[value_regno]); + regs[value_regno].var_off = + tnum_cast(regs[value_regno].var_off, size); + __update_reg_bounds(®s[value_regno]); } return err; } @@ -1156,9 +1255,9 @@ static int check_stack_boundary(struct bpf_verifier_env *env, int regno, int access_size, bool zero_size_allowed, struct bpf_call_arg_meta *meta) { - struct bpf_verifier_state *state = &env->cur_state; + struct bpf_verifier_state *state = env->cur_state; struct bpf_reg_state *regs = state->regs; - int off, i; + int off, i, slot, spi; if (regs[regno].type != PTR_TO_STACK) { /* Allow zero-byte read from NULL, regardless of pointer type */ @@ -1198,7 +1297,11 @@ static int check_stack_boundary(struct bpf_verifier_env *env, int regno, } for (i = 0; i < access_size; i++) { - if (state->stack_slot_type[MAX_BPF_STACK + off + i] != STACK_MISC) { + slot = -(off + i) - 1; + spi = slot / BPF_REG_SIZE; + if (state->allocated_stack <= slot || + state->stack[spi].slot_type[slot % BPF_REG_SIZE] != + STACK_MISC) { verbose(env, "invalid indirect read from stack off %d+%d size %d\n", off, i, access_size); return -EACCES; @@ -1211,7 +1314,7 @@ static int check_helper_mem_access(struct bpf_verifier_env *env, int regno, int access_size, bool zero_size_allowed, struct bpf_call_arg_meta *meta) { - struct bpf_reg_state *regs = env->cur_state.regs, *reg = ®s[regno]; + struct bpf_reg_state *regs = cur_regs(env), *reg = ®s[regno]; switch (reg->type) { case PTR_TO_PACKET: @@ -1229,7 +1332,7 @@ static int check_func_arg(struct bpf_verifier_env *env, u32 regno, enum bpf_arg_type arg_type, struct bpf_call_arg_meta *meta) { - struct bpf_reg_state *regs = env->cur_state.regs, *reg = ®s[regno]; + struct bpf_reg_state *regs = cur_regs(env), *reg = ®s[regno]; enum bpf_reg_type expected_type, type = reg->type; int err = 0; @@ -1514,7 +1617,7 @@ static int check_raw_mode(const struct bpf_func_proto *fn) */ static void clear_all_pkt_pointers(struct bpf_verifier_env *env) { - struct bpf_verifier_state *state = &env->cur_state; + struct bpf_verifier_state *state = env->cur_state; struct bpf_reg_state *regs = state->regs, *reg; int i; @@ -1522,10 +1625,10 @@ static void clear_all_pkt_pointers(struct bpf_verifier_env *env) if (reg_is_pkt_pointer_any(®s[i])) mark_reg_unknown(env, regs, i); - for (i = 0; i < MAX_BPF_STACK; i += BPF_REG_SIZE) { - if (state->stack_slot_type[i] != STACK_SPILL) + for (i = 0; i < state->allocated_stack / BPF_REG_SIZE; i++) { + if (state->stack[i].slot_type[0] != STACK_SPILL) continue; - reg = &state->spilled_regs[i / BPF_REG_SIZE]; + reg = &state->stack[i].spilled_ptr; if (reg_is_pkt_pointer_any(reg)) __mark_reg_unknown(reg); } @@ -1533,9 +1636,8 @@ static void clear_all_pkt_pointers(struct bpf_verifier_env *env) static int check_call(struct bpf_verifier_env *env, int func_id, int insn_idx) { - struct bpf_verifier_state *state = &env->cur_state; const struct bpf_func_proto *fn = NULL; - struct bpf_reg_state *regs = state->regs; + struct bpf_reg_state *regs; struct bpf_call_arg_meta meta; bool changes_data; int i, err; @@ -1603,6 +1705,7 @@ static int check_call(struct bpf_verifier_env *env, int func_id, int insn_idx) return err; } + regs = cur_regs(env); /* reset caller saved regs */ for (i = 0; i < CALLER_SAVED_REGS; i++) { mark_reg_not_init(env, regs, caller_saved[i]); @@ -1691,7 +1794,7 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env, const struct bpf_reg_state *ptr_reg, const struct bpf_reg_state *off_reg) { - struct bpf_reg_state *regs = env->cur_state.regs, *dst_reg; + struct bpf_reg_state *regs = cur_regs(env), *dst_reg; bool known = tnum_is_const(off_reg->var_off); s64 smin_val = off_reg->smin_value, smax_val = off_reg->smax_value, smin_ptr = ptr_reg->smin_value, smax_ptr = ptr_reg->smax_value; @@ -1703,13 +1806,13 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env, dst_reg = ®s[dst]; if (WARN_ON_ONCE(known && (smin_val != smax_val))) { - print_verifier_state(env, &env->cur_state); + print_verifier_state(env, env->cur_state); verbose(env, "verifier internal error: known but bad sbounds\n"); return -EINVAL; } if (WARN_ON_ONCE(known && (umin_val != umax_val))) { - print_verifier_state(env, &env->cur_state); + print_verifier_state(env, env->cur_state); verbose(env, "verifier internal error: known but bad ubounds\n"); return -EINVAL; @@ -1890,7 +1993,7 @@ static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env, struct bpf_reg_state *dst_reg, struct bpf_reg_state src_reg) { - struct bpf_reg_state *regs = env->cur_state.regs; + struct bpf_reg_state *regs = cur_regs(env); u8 opcode = BPF_OP(insn->code); bool src_known, dst_known; s64 smin_val, smax_val; @@ -2111,7 +2214,7 @@ static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env, static int adjust_reg_min_max_vals(struct bpf_verifier_env *env, struct bpf_insn *insn) { - struct bpf_reg_state *regs = env->cur_state.regs, *dst_reg, *src_reg; + struct bpf_reg_state *regs = cur_regs(env), *dst_reg, *src_reg; struct bpf_reg_state *ptr_reg = NULL, off_reg = {0}; u8 opcode = BPF_OP(insn->code); int rc; @@ -2185,12 +2288,12 @@ static int adjust_reg_min_max_vals(struct bpf_verifier_env *env, /* Got here implies adding two SCALAR_VALUEs */ if (WARN_ON_ONCE(ptr_reg)) { - print_verifier_state(env, &env->cur_state); + print_verifier_state(env, env->cur_state); verbose(env, "verifier internal error: unexpected ptr_reg\n"); return -EINVAL; } if (WARN_ON(!src_reg)) { - print_verifier_state(env, &env->cur_state); + print_verifier_state(env, env->cur_state); verbose(env, "verifier internal error: no src_reg\n"); return -EINVAL; } @@ -2200,7 +2303,7 @@ static int adjust_reg_min_max_vals(struct bpf_verifier_env *env, /* check validity of 32-bit and 64-bit arithmetic operations */ static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn) { - struct bpf_reg_state *regs = env->cur_state.regs; + struct bpf_reg_state *regs = cur_regs(env); u8 opcode = BPF_OP(insn->code); int err; @@ -2421,10 +2524,10 @@ static void find_good_pkt_pointers(struct bpf_verifier_state *state, /* keep the maximum range already checked */ regs[i].range = max(regs[i].range, new_range); - for (i = 0; i < MAX_BPF_STACK; i += BPF_REG_SIZE) { - if (state->stack_slot_type[i] != STACK_SPILL) + for (i = 0; i < state->allocated_stack / BPF_REG_SIZE; i++) { + if (state->stack[i].slot_type[0] != STACK_SPILL) continue; - reg = &state->spilled_regs[i / BPF_REG_SIZE]; + reg = &state->stack[i].spilled_ptr; if (reg->type == type && reg->id == dst_reg->id) reg->range = max_t(u16, reg->range, new_range); } @@ -2674,17 +2777,17 @@ static void mark_map_regs(struct bpf_verifier_state *state, u32 regno, for (i = 0; i < MAX_BPF_REG; i++) mark_map_reg(regs, i, id, is_null); - for (i = 0; i < MAX_BPF_STACK; i += BPF_REG_SIZE) { - if (state->stack_slot_type[i] != STACK_SPILL) + for (i = 0; i < state->allocated_stack / BPF_REG_SIZE; i++) { + if (state->stack[i].slot_type[0] != STACK_SPILL) continue; - mark_map_reg(state->spilled_regs, i / BPF_REG_SIZE, id, is_null); + mark_map_reg(&state->stack[i].spilled_ptr, 0, id, is_null); } } static int check_cond_jmp_op(struct bpf_verifier_env *env, struct bpf_insn *insn, int *insn_idx) { - struct bpf_verifier_state *other_branch, *this_branch = &env->cur_state; + struct bpf_verifier_state *other_branch, *this_branch = env->cur_state; struct bpf_reg_state *regs = this_branch->regs, *dst_reg; u8 opcode = BPF_OP(insn->code); int err; @@ -2876,7 +2979,7 @@ static struct bpf_map *ld_imm64_to_map_ptr(struct bpf_insn *insn) /* verify BPF_LD_IMM64 instruction */ static int check_ld_imm(struct bpf_verifier_env *env, struct bpf_insn *insn) { - struct bpf_reg_state *regs = env->cur_state.regs; + struct bpf_reg_state *regs = cur_regs(env); int err; if (BPF_SIZE(insn->code) != BPF_DW) { @@ -2937,7 +3040,7 @@ static bool may_access_skb(enum bpf_prog_type type) */ static int check_ld_abs(struct bpf_verifier_env *env, struct bpf_insn *insn) { - struct bpf_reg_state *regs = env->cur_state.regs; + struct bpf_reg_state *regs = cur_regs(env); u8 mode = BPF_MODE(insn->code); int i, err; @@ -2999,7 +3102,7 @@ static int check_return_code(struct bpf_verifier_env *env) return 0; } - reg = &env->cur_state.regs[BPF_REG_0]; + reg = cur_regs(env) + BPF_REG_0; if (reg->type != SCALAR_VALUE) { verbose(env, "At program exit the register R0 is not a known value (%s)\n", reg_type_str[reg->type]); @@ -3363,6 +3466,57 @@ static bool regsafe(struct bpf_reg_state *rold, struct bpf_reg_state *rcur, return false; } +static bool stacksafe(struct bpf_verifier_state *old, + struct bpf_verifier_state *cur, + struct idpair *idmap) +{ + int i, spi; + + /* if explored stack has more populated slots than current stack + * such stacks are not equivalent + */ + if (old->allocated_stack > cur->allocated_stack) + return false; + + /* walk slots of the explored stack and ignore any additional + * slots in the current stack, since explored(safe) state + * didn't use them + */ + for (i = 0; i < old->allocated_stack; i++) { + spi = i / BPF_REG_SIZE; + + if (old->stack[spi].slot_type[i % BPF_REG_SIZE] == STACK_INVALID) + continue; + if (old->stack[spi].slot_type[i % BPF_REG_SIZE] != + cur->stack[spi].slot_type[i % BPF_REG_SIZE]) + /* Ex: old explored (safe) state has STACK_SPILL in + * this stack slot, but current has has STACK_MISC -> + * this verifier states are not equivalent, + * return false to continue verification of this path + */ + return false; + if (i % BPF_REG_SIZE) + continue; + if (old->stack[spi].slot_type[0] != STACK_SPILL) + continue; + if (!regsafe(&old->stack[spi].spilled_ptr, + &cur->stack[spi].spilled_ptr, + idmap)) + /* when explored and current stack slot are both storing + * spilled registers, check that stored pointers types + * are the same as well. + * Ex: explored safe path could have stored + * (bpf_reg_state) {.type = PTR_TO_STACK, .off = -8} + * but current path has stored: + * (bpf_reg_state) {.type = PTR_TO_STACK, .off = -16} + * such verifier states are not equivalent. + * return false to continue verification of this path + */ + return false; + } + return true; +} + /* compare two verifier states * * all states stored in state_list are known to be valid, since @@ -3407,37 +3561,8 @@ static bool states_equal(struct bpf_verifier_env *env, goto out_free; } - for (i = 0; i < MAX_BPF_STACK; i++) { - if (old->stack_slot_type[i] == STACK_INVALID) - continue; - if (old->stack_slot_type[i] != cur->stack_slot_type[i]) - /* Ex: old explored (safe) state has STACK_SPILL in - * this stack slot, but current has has STACK_MISC -> - * this verifier states are not equivalent, - * return false to continue verification of this path - */ - goto out_free; - if (i % BPF_REG_SIZE) - continue; - if (old->stack_slot_type[i] != STACK_SPILL) - continue; - if (!regsafe(&old->spilled_regs[i / BPF_REG_SIZE], - &cur->spilled_regs[i / BPF_REG_SIZE], - idmap)) - /* when explored and current stack slot are both storing - * spilled registers, check that stored pointers types - * are the same as well. - * Ex: explored safe path could have stored - * (bpf_reg_state) {.type = PTR_TO_STACK, .off = -8} - * but current path has stored: - * (bpf_reg_state) {.type = PTR_TO_STACK, .off = -16} - * such verifier states are not equivalent. - * return false to continue verification of this path - */ - goto out_free; - else - continue; - } + if (!stacksafe(old, cur, idmap)) + goto out_free; ret = true; out_free: kfree(idmap); @@ -3473,17 +3598,19 @@ static bool do_propagate_liveness(const struct bpf_verifier_state *state, } } /* ... and stack slots */ - for (i = 0; i < MAX_BPF_STACK / BPF_REG_SIZE; i++) { - if (parent->stack_slot_type[i * BPF_REG_SIZE] != STACK_SPILL) + for (i = 0; i < state->allocated_stack / BPF_REG_SIZE && + i < parent->allocated_stack / BPF_REG_SIZE; i++) { + if (parent->stack[i].slot_type[0] != STACK_SPILL) continue; - if (state->stack_slot_type[i * BPF_REG_SIZE] != STACK_SPILL) + if (state->stack[i].slot_type[0] != STACK_SPILL) continue; - if (parent->spilled_regs[i].live & REG_LIVE_READ) + if (parent->stack[i].spilled_ptr.live & REG_LIVE_READ) continue; - if (writes && (state->spilled_regs[i].live & REG_LIVE_WRITTEN)) + if (writes && + (state->stack[i].spilled_ptr.live & REG_LIVE_WRITTEN)) continue; - if (state->spilled_regs[i].live & REG_LIVE_READ) { - parent->spilled_regs[i].live |= REG_LIVE_READ; + if (state->stack[i].spilled_ptr.live & REG_LIVE_READ) { + parent->stack[i].spilled_ptr.live |= REG_LIVE_READ; touched = true; } } @@ -3513,6 +3640,7 @@ static int is_state_visited(struct bpf_verifier_env *env, int insn_idx) { struct bpf_verifier_state_list *new_sl; struct bpf_verifier_state_list *sl; + struct bpf_verifier_state *cur = env->cur_state; int i; sl = env->explored_states[insn_idx]; @@ -3523,7 +3651,7 @@ static int is_state_visited(struct bpf_verifier_env *env, int insn_idx) return 0; while (sl != STATE_LIST_MARK) { - if (states_equal(env, &sl->state, &env->cur_state)) { + if (states_equal(env, &sl->state, cur)) { /* reached equivalent register/stack state, * prune the search. * Registers read by the continuation are read by us. @@ -3534,7 +3662,7 @@ static int is_state_visited(struct bpf_verifier_env *env, int insn_idx) * they'll be immediately forgotten as we're pruning * this state and will pop a new one. */ - propagate_liveness(&sl->state, &env->cur_state); + propagate_liveness(&sl->state, cur); return 1; } sl = sl->next; @@ -3546,16 +3674,16 @@ static int is_state_visited(struct bpf_verifier_env *env, int insn_idx) * it will be rejected. Since there are no loops, we won't be * seeing this 'insn_idx' instruction again on the way to bpf_exit */ - new_sl = kmalloc(sizeof(struct bpf_verifier_state_list), GFP_USER); + new_sl = kzalloc(sizeof(struct bpf_verifier_state_list), GFP_KERNEL); if (!new_sl) return -ENOMEM; /* add new state to the head of linked list */ - memcpy(&new_sl->state, &env->cur_state, sizeof(env->cur_state)); + copy_verifier_state(&new_sl->state, cur); new_sl->next = env->explored_states[insn_idx]; env->explored_states[insn_idx] = new_sl; /* connect new state to parentage chain */ - env->cur_state.parent = &new_sl->state; + cur->parent = &new_sl->state; /* clear write marks in current state: the writes we did are not writes * our child did, so they don't screen off its reads from us. * (There are no read marks in current state, because reads always mark @@ -3563,10 +3691,10 @@ static int is_state_visited(struct bpf_verifier_env *env, int insn_idx) * explored_states can get read marks.) */ for (i = 0; i < BPF_REG_FP; i++) - env->cur_state.regs[i].live = REG_LIVE_NONE; - for (i = 0; i < MAX_BPF_STACK / BPF_REG_SIZE; i++) - if (env->cur_state.stack_slot_type[i * BPF_REG_SIZE] == STACK_SPILL) - env->cur_state.spilled_regs[i].live = REG_LIVE_NONE; + cur->regs[i].live = REG_LIVE_NONE; + for (i = 0; i < cur->allocated_stack / BPF_REG_SIZE; i++) + if (cur->stack[i].slot_type[0] == STACK_SPILL) + cur->stack[i].spilled_ptr.live = REG_LIVE_NONE; return 0; } @@ -3581,15 +3709,19 @@ static int ext_analyzer_insn_hook(struct bpf_verifier_env *env, static int do_check(struct bpf_verifier_env *env) { - struct bpf_verifier_state *state = &env->cur_state; + struct bpf_verifier_state *state; struct bpf_insn *insns = env->prog->insnsi; - struct bpf_reg_state *regs = state->regs; + struct bpf_reg_state *regs; int insn_cnt = env->prog->len; int insn_idx, prev_insn_idx = 0; int insn_processed = 0; bool do_print_state = false; - init_reg_state(env, regs); + state = kzalloc(sizeof(struct bpf_verifier_state), GFP_KERNEL); + if (!state) + return -ENOMEM; + env->cur_state = state; + init_reg_state(env, state->regs); state->parent = NULL; insn_idx = 0; for (;;) { @@ -3637,7 +3769,7 @@ static int do_check(struct bpf_verifier_env *env) else verbose(env, "\nfrom %d to %d:", prev_insn_idx, insn_idx); - print_verifier_state(env, &env->cur_state); + print_verifier_state(env, state); do_print_state = false; } @@ -3651,6 +3783,7 @@ static int do_check(struct bpf_verifier_env *env) if (err) return err; + regs = cur_regs(env); if (class == BPF_ALU || class == BPF_ALU64) { err = check_alu_op(env, insn); if (err) @@ -3818,8 +3951,10 @@ static int do_check(struct bpf_verifier_env *env) if (err) return err; process_bpf_exit: - insn_idx = pop_stack(env, &prev_insn_idx); - if (insn_idx < 0) { + err = pop_stack(env, &prev_insn_idx, &insn_idx); + if (err < 0) { + if (err != -ENOENT) + return err; break; } else { do_print_state = true; @@ -4359,9 +4494,11 @@ int bpf_check(struct bpf_prog **prog, union bpf_attr *attr) env->allow_ptr_leaks = capable(CAP_SYS_ADMIN); ret = do_check(env); + free_verifier_state(env->cur_state); + env->cur_state = NULL; skip_full_check: - while (pop_stack(env, NULL) >= 0); + while (!pop_stack(env, NULL, NULL)); free_states(env); if (ret == 0) @@ -4464,9 +4601,11 @@ int bpf_analyzer(struct bpf_prog *prog, const struct bpf_ext_analyzer_ops *ops, env->allow_ptr_leaks = capable(CAP_SYS_ADMIN); ret = do_check(env); + free_verifier_state(env->cur_state); + env->cur_state = NULL; skip_full_check: - while (pop_stack(env, NULL) >= 0); + while (!pop_stack(env, NULL, NULL)); free_states(env); mutex_unlock(&bpf_verifier_lock); -- cgit v1.2.3 From 1f279233affe115dd3f65c89716a4f4315bb4cfe Mon Sep 17 00:00:00 2001 From: David Ahern Date: Fri, 27 Oct 2017 17:37:14 -0700 Subject: mlxsw: spectrum_router: Return extack message on abort due to fib rules Adding a FIB rule on a spectrum platform silently aborts FIB offload: $ ip ru add pref 99 from all to 192.168.1.1 table 10 $ dmesg -c [ 623.144736] mlxsw_spectrum 0000:03:00.0: FIB abort triggered. Note that FIB entries are no longer being offloaded to this device. This patch reworks FIB rule handling to return a message to the user: $ ip ru add pref 99 from all to 8.8.8.8 table 11 Error: spectrum: FIB rules not supported. Aborting offload. spectrum currently only checks whether the fib rule is a default rule or an l3mdev rule, both of which it knows how to handle. Any other it aborts FIB offload. Move the processing to check the rule type inline with the user request. If the rule is an unsupported one, then a work queue entry is used to abort the offload. Change the rule delete handling to just return since it does nothing at the moment. Signed-off-by: David Ahern Reviewed-by: Ido Schimmel Signed-off-by: David S. Miller --- .../net/ethernet/mellanox/mlxsw/spectrum_router.c | 108 +++++++++++++-------- 1 file changed, 66 insertions(+), 42 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c index 27b632cac991..9fe4cdb23189 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c @@ -5234,7 +5234,6 @@ static void mlxsw_sp_router_fib4_event_work(struct work_struct *work) struct mlxsw_sp_fib_event_work *fib_work = container_of(work, struct mlxsw_sp_fib_event_work, work); struct mlxsw_sp *mlxsw_sp = fib_work->mlxsw_sp; - struct fib_rule *rule; bool replace, append; int err; @@ -5256,12 +5255,11 @@ static void mlxsw_sp_router_fib4_event_work(struct work_struct *work) mlxsw_sp_router_fib4_del(mlxsw_sp, &fib_work->fen_info); fib_info_put(fib_work->fen_info.fi); break; - case FIB_EVENT_RULE_ADD: /* fall through */ - case FIB_EVENT_RULE_DEL: - rule = fib_work->fr_info.rule; - if (!fib4_rule_default(rule) && !rule->l3mdev) - mlxsw_sp_router_fib_abort(mlxsw_sp); - fib_rule_put(rule); + case FIB_EVENT_RULE_ADD: + /* if we get here, a rule was added that we do not support. + * just do the fib_abort + */ + mlxsw_sp_router_fib_abort(mlxsw_sp); break; case FIB_EVENT_NH_ADD: /* fall through */ case FIB_EVENT_NH_DEL: @@ -5279,7 +5277,6 @@ static void mlxsw_sp_router_fib6_event_work(struct work_struct *work) struct mlxsw_sp_fib_event_work *fib_work = container_of(work, struct mlxsw_sp_fib_event_work, work); struct mlxsw_sp *mlxsw_sp = fib_work->mlxsw_sp; - struct fib_rule *rule; bool replace; int err; @@ -5298,12 +5295,11 @@ static void mlxsw_sp_router_fib6_event_work(struct work_struct *work) mlxsw_sp_router_fib6_del(mlxsw_sp, fib_work->fen6_info.rt); mlxsw_sp_rt6_release(fib_work->fen6_info.rt); break; - case FIB_EVENT_RULE_ADD: /* fall through */ - case FIB_EVENT_RULE_DEL: - rule = fib_work->fr_info.rule; - if (!fib6_rule_default(rule) && !rule->l3mdev) - mlxsw_sp_router_fib_abort(mlxsw_sp); - fib_rule_put(rule); + case FIB_EVENT_RULE_ADD: + /* if we get here, a rule was added that we do not support. + * just do the fib_abort + */ + mlxsw_sp_router_fib_abort(mlxsw_sp); break; } rtnl_unlock(); @@ -5315,7 +5311,6 @@ static void mlxsw_sp_router_fibmr_event_work(struct work_struct *work) struct mlxsw_sp_fib_event_work *fib_work = container_of(work, struct mlxsw_sp_fib_event_work, work); struct mlxsw_sp *mlxsw_sp = fib_work->mlxsw_sp; - struct fib_rule *rule; bool replace; int err; @@ -5347,12 +5342,11 @@ static void mlxsw_sp_router_fibmr_event_work(struct work_struct *work) &fib_work->ven_info); dev_put(fib_work->ven_info.dev); break; - case FIB_EVENT_RULE_ADD: /* fall through */ - case FIB_EVENT_RULE_DEL: - rule = fib_work->fr_info.rule; - if (!ipmr_rule_default(rule) && !rule->l3mdev) - mlxsw_sp_router_fib_abort(mlxsw_sp); - fib_rule_put(rule); + case FIB_EVENT_RULE_ADD: + /* if we get here, a rule was added that we do not support. + * just do the fib_abort + */ + mlxsw_sp_router_fib_abort(mlxsw_sp); break; } rtnl_unlock(); @@ -5363,7 +5357,6 @@ static void mlxsw_sp_router_fib4_event(struct mlxsw_sp_fib_event_work *fib_work, struct fib_notifier_info *info) { struct fib_entry_notifier_info *fen_info; - struct fib_rule_notifier_info *fr_info; struct fib_nh_notifier_info *fnh_info; switch (fib_work->event) { @@ -5379,13 +5372,6 @@ static void mlxsw_sp_router_fib4_event(struct mlxsw_sp_fib_event_work *fib_work, */ fib_info_hold(fib_work->fen_info.fi); break; - case FIB_EVENT_RULE_ADD: /* fall through */ - case FIB_EVENT_RULE_DEL: - fr_info = container_of(info, struct fib_rule_notifier_info, - info); - fib_work->fr_info = *fr_info; - fib_rule_get(fib_work->fr_info.rule); - break; case FIB_EVENT_NH_ADD: /* fall through */ case FIB_EVENT_NH_DEL: fnh_info = container_of(info, struct fib_nh_notifier_info, @@ -5400,7 +5386,6 @@ static void mlxsw_sp_router_fib6_event(struct mlxsw_sp_fib_event_work *fib_work, struct fib_notifier_info *info) { struct fib6_entry_notifier_info *fen6_info; - struct fib_rule_notifier_info *fr_info; switch (fib_work->event) { case FIB_EVENT_ENTRY_REPLACE: /* fall through */ @@ -5411,13 +5396,6 @@ static void mlxsw_sp_router_fib6_event(struct mlxsw_sp_fib_event_work *fib_work, fib_work->fen6_info = *fen6_info; rt6_hold(fib_work->fen6_info.rt); break; - case FIB_EVENT_RULE_ADD: /* fall through */ - case FIB_EVENT_RULE_DEL: - fr_info = container_of(info, struct fib_rule_notifier_info, - info); - fib_work->fr_info = *fr_info; - fib_rule_get(fib_work->fr_info.rule); - break; } } @@ -5437,12 +5415,47 @@ mlxsw_sp_router_fibmr_event(struct mlxsw_sp_fib_event_work *fib_work, memcpy(&fib_work->ven_info, info, sizeof(fib_work->ven_info)); dev_hold(fib_work->ven_info.dev); break; - case FIB_EVENT_RULE_ADD: /* fall through */ - case FIB_EVENT_RULE_DEL: - memcpy(&fib_work->fr_info, info, sizeof(fib_work->fr_info)); - fib_rule_get(fib_work->fr_info.rule); + } +} + +static int mlxsw_sp_router_fib_rule_event(unsigned long event, + struct fib_notifier_info *info, + struct mlxsw_sp *mlxsw_sp) +{ + struct netlink_ext_ack *extack = info->extack; + struct fib_rule_notifier_info *fr_info; + struct fib_rule *rule; + int err = 0; + + /* nothing to do at the moment */ + if (event == FIB_EVENT_RULE_DEL) + return 0; + + if (mlxsw_sp->router->aborted) + return 0; + + fr_info = container_of(info, struct fib_rule_notifier_info, info); + rule = fr_info->rule; + + switch (info->family) { + case AF_INET: + if (!fib4_rule_default(rule) && !rule->l3mdev) + err = -1; + break; + case AF_INET6: + if (!fib6_rule_default(rule) && !rule->l3mdev) + err = -1; + break; + case RTNL_FAMILY_IPMR: + if (!ipmr_rule_default(rule) && !rule->l3mdev) + err = -1; break; } + + if (err < 0) + NL_SET_ERR_MSG(extack, "spectrum: FIB rules not supported. Aborting offload"); + + return err; } /* Called with rcu_read_lock() */ @@ -5452,17 +5465,28 @@ static int mlxsw_sp_router_fib_event(struct notifier_block *nb, struct mlxsw_sp_fib_event_work *fib_work; struct fib_notifier_info *info = ptr; struct mlxsw_sp_router *router; + int err; if (!net_eq(info->net, &init_net) || (info->family != AF_INET && info->family != AF_INET6 && info->family != RTNL_FAMILY_IPMR)) return NOTIFY_DONE; + router = container_of(nb, struct mlxsw_sp_router, fib_nb); + + switch (event) { + case FIB_EVENT_RULE_ADD: /* fall through */ + case FIB_EVENT_RULE_DEL: + err = mlxsw_sp_router_fib_rule_event(event, info, + router->mlxsw_sp); + if (!err) + return NOTIFY_DONE; + } + fib_work = kzalloc(sizeof(*fib_work), GFP_ATOMIC); if (WARN_ON(!fib_work)) return NOTIFY_BAD; - router = container_of(nb, struct mlxsw_sp_router, fib_nb); fib_work->mlxsw_sp = router->mlxsw_sp; fib_work->event = event; -- cgit v1.2.3 From aa7365e19f8410659ec30503cd8ce866a176c9f4 Mon Sep 17 00:00:00 2001 From: Wei Yongjun Date: Sat, 28 Oct 2017 05:05:46 +0000 Subject: net: bcmgenet: Avoid calling platform_device_put() twice in bcmgenet_mii_exit() Remove platform_device_put() call after platform_device_unregister() from function bcmgenet_mii_exit(), otherwise, we will call platform_device_put() twice. Fixes: 9a4e79697009 ("net: bcmgenet: utilize generic Broadcom UniMAC MDIO controller driver") Signed-off-by: Wei Yongjun Acked-by: Doug Berger Acked-by: Florian Fainelli Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/genet/bcmmii.c | 1 - 1 file changed, 1 deletion(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/broadcom/genet/bcmmii.c b/drivers/net/ethernet/broadcom/genet/bcmmii.c index ba3fcfdaa0bc..5333274a283c 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmmii.c +++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c @@ -571,5 +571,4 @@ void bcmgenet_mii_exit(struct net_device *dev) of_phy_deregister_fixed_link(dn); of_node_put(priv->phy_dn); platform_device_unregister(priv->mii_pdev); - platform_device_put(priv->mii_pdev); } -- cgit v1.2.3 From e8992e408273334375bfe8f03267dcdef3a8c2e6 Mon Sep 17 00:00:00 2001 From: Zhu Yanjun Date: Sat, 28 Oct 2017 08:25:30 -0400 Subject: forcedeth: replace pci_alloc_consistent with dma_alloc_coherent The functions pci_alloc_consistent is obsolete. So it is replaced with dma_alloc_coherent Signed-off-by: Zhu Yanjun Signed-off-by: David S. Miller --- drivers/net/ethernet/nvidia/forcedeth.c | 61 ++++++++++++++++++++++----------- 1 file changed, 41 insertions(+), 20 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/nvidia/forcedeth.c b/drivers/net/ethernet/nvidia/forcedeth.c index 88128ce61471..31a943860f32 100644 --- a/drivers/net/ethernet/nvidia/forcedeth.c +++ b/drivers/net/ethernet/nvidia/forcedeth.c @@ -1024,12 +1024,18 @@ static void free_rings(struct net_device *dev) if (!nv_optimized(np)) { if (np->rx_ring.orig) - pci_free_consistent(np->pci_dev, sizeof(struct ring_desc) * (np->rx_ring_size + np->tx_ring_size), - np->rx_ring.orig, np->ring_addr); + dma_free_coherent(&np->pci_dev->dev, + sizeof(struct ring_desc) * + (np->rx_ring_size + + np->tx_ring_size), + np->rx_ring.orig, np->ring_addr); } else { if (np->rx_ring.ex) - pci_free_consistent(np->pci_dev, sizeof(struct ring_desc_ex) * (np->rx_ring_size + np->tx_ring_size), - np->rx_ring.ex, np->ring_addr); + dma_free_coherent(&np->pci_dev->dev, + sizeof(struct ring_desc_ex) * + (np->rx_ring_size + + np->tx_ring_size), + np->rx_ring.ex, np->ring_addr); } kfree(np->rx_skb); kfree(np->tx_skb); @@ -4596,13 +4602,17 @@ static int nv_set_ringparam(struct net_device *dev, struct ethtool_ringparam* ri /* allocate new rings */ if (!nv_optimized(np)) { - rxtx_ring = pci_alloc_consistent(np->pci_dev, - sizeof(struct ring_desc) * (ring->rx_pending + ring->tx_pending), - &ring_addr); + rxtx_ring = dma_alloc_coherent(&np->pci_dev->dev, + sizeof(struct ring_desc) * + (ring->rx_pending + + ring->tx_pending), + &ring_addr, GFP_ATOMIC); } else { - rxtx_ring = pci_alloc_consistent(np->pci_dev, - sizeof(struct ring_desc_ex) * (ring->rx_pending + ring->tx_pending), - &ring_addr); + rxtx_ring = dma_alloc_coherent(&np->pci_dev->dev, + sizeof(struct ring_desc_ex) * + (ring->rx_pending + + ring->tx_pending), + &ring_addr, GFP_ATOMIC); } rx_skbuff = kmalloc(sizeof(struct nv_skb_map) * ring->rx_pending, GFP_KERNEL); tx_skbuff = kmalloc(sizeof(struct nv_skb_map) * ring->tx_pending, GFP_KERNEL); @@ -4610,12 +4620,18 @@ static int nv_set_ringparam(struct net_device *dev, struct ethtool_ringparam* ri /* fall back to old rings */ if (!nv_optimized(np)) { if (rxtx_ring) - pci_free_consistent(np->pci_dev, sizeof(struct ring_desc) * (ring->rx_pending + ring->tx_pending), - rxtx_ring, ring_addr); + dma_free_coherent(&np->pci_dev->dev, + sizeof(struct ring_desc) * + (ring->rx_pending + + ring->tx_pending), + rxtx_ring, ring_addr); } else { if (rxtx_ring) - pci_free_consistent(np->pci_dev, sizeof(struct ring_desc_ex) * (ring->rx_pending + ring->tx_pending), - rxtx_ring, ring_addr); + dma_free_coherent(&np->pci_dev->dev, + sizeof(struct ring_desc_ex) * + (ring->rx_pending + + ring->tx_pending), + rxtx_ring, ring_addr); } kfree(rx_skbuff); @@ -5740,16 +5756,21 @@ static int nv_probe(struct pci_dev *pci_dev, const struct pci_device_id *id) np->tx_ring_size = TX_RING_DEFAULT; if (!nv_optimized(np)) { - np->rx_ring.orig = pci_alloc_consistent(pci_dev, - sizeof(struct ring_desc) * (np->rx_ring_size + np->tx_ring_size), - &np->ring_addr); + np->rx_ring.orig = dma_alloc_coherent(&pci_dev->dev, + sizeof(struct ring_desc) * + (np->rx_ring_size + + np->tx_ring_size), + &np->ring_addr, + GFP_ATOMIC); if (!np->rx_ring.orig) goto out_unmap; np->tx_ring.orig = &np->rx_ring.orig[np->rx_ring_size]; } else { - np->rx_ring.ex = pci_alloc_consistent(pci_dev, - sizeof(struct ring_desc_ex) * (np->rx_ring_size + np->tx_ring_size), - &np->ring_addr); + np->rx_ring.ex = dma_alloc_coherent(&pci_dev->dev, + sizeof(struct ring_desc_ex) * + (np->rx_ring_size + + np->tx_ring_size), + &np->ring_addr, GFP_ATOMIC); if (!np->rx_ring.ex) goto out_unmap; np->tx_ring.ex = &np->rx_ring.ex[np->rx_ring_size]; -- cgit v1.2.3 From 2d1d7df8a3652697da7f7929791d555e6c5981c2 Mon Sep 17 00:00:00 2001 From: Antoine Tenart Date: Mon, 30 Oct 2017 11:23:28 +0100 Subject: net: mvpp2: set the Rx FIFO size depending on the port speeds for PPv2.2 The Rx FIFO size was set to the same value for all ports. This patch sets it depending on the maximum speed a given port can handle. This is only working for PPv2.2. Signed-off-by: Antoine Tenart Signed-off-by: David S. Miller --- drivers/net/ethernet/marvell/mvpp2.c | 52 +++++++++++++++++++++++++++++++----- 1 file changed, 46 insertions(+), 6 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/marvell/mvpp2.c b/drivers/net/ethernet/marvell/mvpp2.c index 794a3b6aa573..2b0ae35d2168 100644 --- a/drivers/net/ethernet/marvell/mvpp2.c +++ b/drivers/net/ethernet/marvell/mvpp2.c @@ -504,9 +504,13 @@ #define MVPP2_TX_DESC_ALIGN (MVPP2_DESC_ALIGNED_SIZE - 1) /* RX FIFO constants */ -#define MVPP2_RX_FIFO_PORT_DATA_SIZE 0x2000 -#define MVPP2_RX_FIFO_PORT_ATTR_SIZE 0x80 -#define MVPP2_RX_FIFO_PORT_MIN_PKT 0x80 +#define MVPP2_RX_FIFO_PORT_DATA_SIZE_32KB 0x8000 +#define MVPP2_RX_FIFO_PORT_DATA_SIZE_8KB 0x2000 +#define MVPP2_RX_FIFO_PORT_DATA_SIZE_4KB 0x1000 +#define MVPP2_RX_FIFO_PORT_ATTR_SIZE_32KB 0x200 +#define MVPP2_RX_FIFO_PORT_ATTR_SIZE_8KB 0x80 +#define MVPP2_RX_FIFO_PORT_ATTR_SIZE_4KB 0x40 +#define MVPP2_RX_FIFO_PORT_MIN_PKT 0x80 /* RX buffer constants */ #define MVPP2_SKB_SHINFO_SIZE \ @@ -7764,9 +7768,42 @@ static void mvpp2_rx_fifo_init(struct mvpp2 *priv) for (port = 0; port < MVPP2_MAX_PORTS; port++) { mvpp2_write(priv, MVPP2_RX_DATA_FIFO_SIZE_REG(port), - MVPP2_RX_FIFO_PORT_DATA_SIZE); + MVPP2_RX_FIFO_PORT_DATA_SIZE_4KB); mvpp2_write(priv, MVPP2_RX_ATTR_FIFO_SIZE_REG(port), - MVPP2_RX_FIFO_PORT_ATTR_SIZE); + MVPP2_RX_FIFO_PORT_ATTR_SIZE_4KB); + } + + mvpp2_write(priv, MVPP2_RX_MIN_PKT_SIZE_REG, + MVPP2_RX_FIFO_PORT_MIN_PKT); + mvpp2_write(priv, MVPP2_RX_FIFO_INIT_REG, 0x1); +} + +static void mvpp22_rx_fifo_init(struct mvpp2 *priv) +{ + int port; + + /* The FIFO size parameters are set depending on the maximum speed a + * given port can handle: + * - Port 0: 10Gbps + * - Port 1: 2.5Gbps + * - Ports 2 and 3: 1Gbps + */ + + mvpp2_write(priv, MVPP2_RX_DATA_FIFO_SIZE_REG(0), + MVPP2_RX_FIFO_PORT_DATA_SIZE_32KB); + mvpp2_write(priv, MVPP2_RX_ATTR_FIFO_SIZE_REG(0), + MVPP2_RX_FIFO_PORT_ATTR_SIZE_32KB); + + mvpp2_write(priv, MVPP2_RX_DATA_FIFO_SIZE_REG(1), + MVPP2_RX_FIFO_PORT_DATA_SIZE_8KB); + mvpp2_write(priv, MVPP2_RX_ATTR_FIFO_SIZE_REG(1), + MVPP2_RX_FIFO_PORT_ATTR_SIZE_8KB); + + for (port = 2; port < MVPP2_MAX_PORTS; port++) { + mvpp2_write(priv, MVPP2_RX_DATA_FIFO_SIZE_REG(port), + MVPP2_RX_FIFO_PORT_DATA_SIZE_4KB); + mvpp2_write(priv, MVPP2_RX_ATTR_FIFO_SIZE_REG(port), + MVPP2_RX_FIFO_PORT_ATTR_SIZE_4KB); } mvpp2_write(priv, MVPP2_RX_MIN_PKT_SIZE_REG, @@ -7870,7 +7907,10 @@ static int mvpp2_init(struct platform_device *pdev, struct mvpp2 *priv) } /* Rx Fifo Init */ - mvpp2_rx_fifo_init(priv); + if (priv->hw_version == MVPP21) + mvpp2_rx_fifo_init(priv); + else + mvpp22_rx_fifo_init(priv); if (priv->hw_version == MVPP21) writel(MVPP2_EXT_GLOBAL_CTRL_DEFAULT, -- cgit v1.2.3 From 7c10f9742d76ec18bed5de14f5f4ed08859f7b7a Mon Sep 17 00:00:00 2001 From: Antoine Tenart Date: Mon, 30 Oct 2017 11:23:29 +0100 Subject: net: mvpp2: initialize the Tx FIFO size So far only the Rx FIFO size was initialized. For PPv2.2 the Tx FIFO size can be set as well. This patch initializes the Tx FIFO size for PPv2.2 controllers to 3K. Signed-off-by: Antoine Tenart Signed-off-by: David S. Miller --- drivers/net/ethernet/marvell/mvpp2.c | 25 +++++++++++++++++++++---- 1 file changed, 21 insertions(+), 4 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/marvell/mvpp2.c b/drivers/net/ethernet/marvell/mvpp2.c index 2b0ae35d2168..7a6e6ae0a074 100644 --- a/drivers/net/ethernet/marvell/mvpp2.c +++ b/drivers/net/ethernet/marvell/mvpp2.c @@ -38,11 +38,12 @@ #include #include -/* RX Fifo Registers */ +/* Fifo Registers */ #define MVPP2_RX_DATA_FIFO_SIZE_REG(port) (0x00 + 4 * (port)) #define MVPP2_RX_ATTR_FIFO_SIZE_REG(port) (0x20 + 4 * (port)) #define MVPP2_RX_MIN_PKT_SIZE_REG 0x60 #define MVPP2_RX_FIFO_INIT_REG 0x64 +#define MVPP22_TX_FIFO_SIZE_REG(port) (0x8860 + 4 * (port)) /* RX DMA Top Registers */ #define MVPP2_RX_CTRL_REG(port) (0x140 + 4 * (port)) @@ -512,6 +513,10 @@ #define MVPP2_RX_FIFO_PORT_ATTR_SIZE_4KB 0x40 #define MVPP2_RX_FIFO_PORT_MIN_PKT 0x80 +/* TX FIFO constants */ +#define MVPP22_TX_FIFO_DATA_SIZE_10KB 0xa +#define MVPP22_TX_FIFO_DATA_SIZE_3KB 0x3 + /* RX buffer constants */ #define MVPP2_SKB_SHINFO_SIZE \ SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) @@ -7811,6 +7816,16 @@ static void mvpp22_rx_fifo_init(struct mvpp2 *priv) mvpp2_write(priv, MVPP2_RX_FIFO_INIT_REG, 0x1); } +/* Initialize Tx FIFO's */ +static void mvpp22_tx_fifo_init(struct mvpp2 *priv) +{ + int port; + + for (port = 0; port < MVPP2_MAX_PORTS; port++) + mvpp2_write(priv, MVPP22_TX_FIFO_SIZE_REG(port), + MVPP22_TX_FIFO_DATA_SIZE_3KB); +} + static void mvpp2_axi_init(struct mvpp2 *priv) { u32 val, rdval, wrval; @@ -7906,11 +7921,13 @@ static int mvpp2_init(struct platform_device *pdev, struct mvpp2 *priv) return err; } - /* Rx Fifo Init */ - if (priv->hw_version == MVPP21) + /* Fifo Init */ + if (priv->hw_version == MVPP21) { mvpp2_rx_fifo_init(priv); - else + } else { mvpp22_rx_fifo_init(priv); + mvpp22_tx_fifo_init(priv); + } if (priv->hw_version == MVPP21) writel(MVPP2_EXT_GLOBAL_CTRL_DEFAULT, -- cgit v1.2.3 From 1d7d15d79fb4660bec3a86e748c50aac7c5d2121 Mon Sep 17 00:00:00 2001 From: Antoine Tenart Date: Mon, 30 Oct 2017 11:23:30 +0100 Subject: net: mvpp2: initialize the RSS tables This patch initialize the RSS tables to evenly (depending on the packets RSS hashes) distribute the packets across port Rx queues. This helps to handle packets on different CPUs to improve performances, as more queues will be used in parallel. Signed-off-by: Antoine Tenart Signed-off-by: David S. Miller --- drivers/net/ethernet/marvell/mvpp2.c | 50 ++++++++++++++++++++++++++++++++++++ 1 file changed, 50 insertions(+) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/marvell/mvpp2.c b/drivers/net/ethernet/marvell/mvpp2.c index 7a6e6ae0a074..54d80df1c1ac 100644 --- a/drivers/net/ethernet/marvell/mvpp2.c +++ b/drivers/net/ethernet/marvell/mvpp2.c @@ -83,6 +83,16 @@ #define MVPP2_PRS_TCAM_CTRL_REG 0x1230 #define MVPP2_PRS_TCAM_EN_MASK BIT(0) +/* RSS Registers */ +#define MVPP22_RSS_INDEX 0x1500 +#define MVPP22_RSS_INDEX_TABLE_ENTRY(idx) ((idx) << 8) +#define MVPP22_RSS_INDEX_TABLE(idx) ((idx) << 8) +#define MVPP22_RSS_INDEX_QUEUE(idx) ((idx) << 16) +#define MVPP22_RSS_TABLE_ENTRY 0x1508 +#define MVPP22_RSS_TABLE 0x1510 +#define MVPP22_RSS_TABLE_POINTER(p) (p) +#define MVPP22_RSS_WIDTH 0x150c + /* Classifier Registers */ #define MVPP2_CLS_MODE_REG 0x1800 #define MVPP2_CLS_MODE_ACTIVE_MASK BIT(0) @@ -746,6 +756,10 @@ enum mvpp2_prs_l3_cast { #define MVPP2_CLS_FLOWS_TBL_SIZE 512 #define MVPP2_CLS_FLOWS_TBL_DATA_WORDS 3 #define MVPP2_CLS_LKP_TBL_SIZE 64 +#define MVPP2_CLS_RX_QUEUES 256 + +/* RSS constants */ +#define MVPP22_RSS_TABLE_ENTRIES 32 /* BM constants */ #define MVPP2_BM_POOLS_NUM 8 @@ -6788,6 +6802,39 @@ static void mvpp2_irqs_deinit(struct mvpp2_port *port) } } +static void mvpp22_init_rss(struct mvpp2_port *port) +{ + struct mvpp2 *priv = port->priv; + int i; + + /* Set the table width: replace the whole classifier Rx queue number + * with the ones configured in RSS table entries. + */ + mvpp2_write(priv, MVPP22_RSS_INDEX, MVPP22_RSS_INDEX_TABLE(0)); + mvpp2_write(priv, MVPP22_RSS_WIDTH, 8); + + /* Loop through the classifier Rx Queues and map them to a RSS table. + * Map them all to the first table (0) by default. + */ + for (i = 0; i < MVPP2_CLS_RX_QUEUES; i++) { + mvpp2_write(priv, MVPP22_RSS_INDEX, MVPP22_RSS_INDEX_QUEUE(i)); + mvpp2_write(priv, MVPP22_RSS_TABLE, + MVPP22_RSS_TABLE_POINTER(0)); + } + + /* Configure the first table to evenly distribute the packets across + * real Rx Queues. The table entries map a hash to an port Rx Queue. + */ + for (i = 0; i < MVPP22_RSS_TABLE_ENTRIES; i++) { + u32 sel = MVPP22_RSS_INDEX_TABLE(0) | + MVPP22_RSS_INDEX_TABLE_ENTRY(i); + mvpp2_write(priv, MVPP22_RSS_INDEX, sel); + + mvpp2_write(priv, MVPP22_RSS_TABLE_ENTRY, i % port->nrxqs); + } + +} + static int mvpp2_open(struct net_device *dev) { struct mvpp2_port *port = netdev_priv(dev); @@ -6862,6 +6909,9 @@ static int mvpp2_open(struct net_device *dev) mvpp2_start_dev(port); + if (priv->hw_version == MVPP22) + mvpp22_init_rss(port); + return 0; err_free_link_irq: -- cgit v1.2.3 From 1d17db08c056c1f7f4abbff6aff8711b7c3a3b7f Mon Sep 17 00:00:00 2001 From: Antoine Tenart Date: Mon, 30 Oct 2017 11:23:31 +0100 Subject: net: mvpp2: limit TSO segments and use stop/wake thresholds Too many TSO descriptors can be required for the default queue size, when using small MSS values for example. Prevent this by adding a maximum number of allowed TSO segments (300). In addition set a stop and a wake thresholds to stop the queue when there's no room for a 1 "worst case scenario skb". Wake up the queue when the number of descriptors is low enough. Signed-off-by: Antoine Tenart Signed-off-by: David S. Miller --- drivers/net/ethernet/marvell/mvpp2.c | 18 ++++++++++++++++-- 1 file changed, 16 insertions(+), 2 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/marvell/mvpp2.c b/drivers/net/ethernet/marvell/mvpp2.c index 54d80df1c1ac..340b4d682951 100644 --- a/drivers/net/ethernet/marvell/mvpp2.c +++ b/drivers/net/ethernet/marvell/mvpp2.c @@ -493,6 +493,13 @@ /* Maximum number of TXQs used by single port */ #define MVPP2_MAX_TXQ 8 +/* MVPP2_MAX_TSO_SEGS is the maximum number of fragments to allow in the GSO + * skb. As we need a maxium of two descriptors per fragments (1 header, 1 data), + * multiply this value by two to count the maximum number of skb descs needed. + */ +#define MVPP2_MAX_TSO_SEGS 300 +#define MVPP2_MAX_SKB_DESCS (MVPP2_MAX_TSO_SEGS * 2 + MAX_SKB_FRAGS) + /* Dfault number of RXQs in use */ #define MVPP2_DEFAULT_RXQ 4 @@ -1045,6 +1052,9 @@ struct mvpp2_txq_pcpu { */ int count; + int wake_threshold; + int stop_threshold; + /* Number of Tx DMA descriptors reserved for each CPU */ int reserved_num; @@ -5393,7 +5403,7 @@ static void mvpp2_txq_done(struct mvpp2_port *port, struct mvpp2_tx_queue *txq, txq_pcpu->count -= tx_done; if (netif_tx_queue_stopped(nq)) - if (txq_pcpu->size - txq_pcpu->count >= MAX_SKB_FRAGS + 1) + if (txq_pcpu->count <= txq_pcpu->wake_threshold) netif_tx_wake_queue(nq); } @@ -5636,6 +5646,9 @@ static int mvpp2_txq_init(struct mvpp2_port *port, txq_pcpu->txq_put_index = 0; txq_pcpu->txq_get_index = 0; + txq_pcpu->stop_threshold = txq->size - MVPP2_MAX_SKB_DESCS; + txq_pcpu->wake_threshold = txq_pcpu->stop_threshold / 2; + txq_pcpu->tso_headers = dma_alloc_coherent(port->dev->dev.parent, txq_pcpu->size * TSO_HEADER_SIZE, @@ -6508,7 +6521,7 @@ out: wmb(); mvpp2_aggr_txq_pend_desc_add(port, frags); - if (txq_pcpu->size - txq_pcpu->count < MAX_SKB_FRAGS + 1) + if (txq_pcpu->count >= txq_pcpu->stop_threshold) netif_tx_stop_queue(nq); u64_stats_update_begin(&stats->syncp); @@ -7732,6 +7745,7 @@ static int mvpp2_port_probe(struct platform_device *pdev, dev->features = features | NETIF_F_RXCSUM; dev->hw_features |= features | NETIF_F_RXCSUM | NETIF_F_GRO; dev->vlan_features |= features; + dev->gso_max_segs = MVPP2_MAX_TSO_SEGS; /* MTU range: 68 - 9676 */ dev->min_mtu = ETH_MIN_MTU; -- cgit v1.2.3 From 02856a3ba6333c536f13d27cc847fcb442a23d9b Mon Sep 17 00:00:00 2001 From: Antoine Tenart Date: Mon, 30 Oct 2017 11:23:32 +0100 Subject: net: mvpp2: use the aggr txq size define everywhere Cosmetic patch using the MVPP2_AGGR_TXQ_SIZE everywhere instead of the size field of aggr_txq, as the size never change and is always equal to the MVPP2_AGGR_TXQ_SIZE define. Signed-off-by: Antoine Tenart Signed-off-by: David S. Miller --- drivers/net/ethernet/marvell/mvpp2.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/marvell/mvpp2.c b/drivers/net/ethernet/marvell/mvpp2.c index 340b4d682951..981fedeef67b 100644 --- a/drivers/net/ethernet/marvell/mvpp2.c +++ b/drivers/net/ethernet/marvell/mvpp2.c @@ -5055,7 +5055,7 @@ static void mvpp2_aggr_txq_pend_desc_add(struct mvpp2_port *port, int pending) static int mvpp2_aggr_desc_num_check(struct mvpp2 *priv, struct mvpp2_tx_queue *aggr_txq, int num) { - if ((aggr_txq->count + num) > aggr_txq->size) { + if ((aggr_txq->count + num) > MVPP2_AGGR_TXQ_SIZE) { /* Update number of occupied aggregated Tx descriptors */ int cpu = smp_processor_id(); u32 val = mvpp2_read(priv, MVPP2_AGGR_TXQ_STATUS_REG(cpu)); @@ -5063,7 +5063,7 @@ static int mvpp2_aggr_desc_num_check(struct mvpp2 *priv, aggr_txq->count = val & MVPP2_AGGR_TXQ_PENDING_MASK; } - if ((aggr_txq->count + num) > aggr_txq->size) + if ((aggr_txq->count + num) > MVPP2_AGGR_TXQ_SIZE) return -ENOMEM; return 0; @@ -5447,7 +5447,7 @@ static int mvpp2_aggr_txq_init(struct platform_device *pdev, if (!aggr_txq->descs) return -ENOMEM; - aggr_txq->last_desc = aggr_txq->size - 1; + aggr_txq->last_desc = MVPP2_AGGR_TXQ_SIZE - 1; /* Aggr TXQ no reset WA */ aggr_txq->next_desc_to_proc = mvpp2_read(priv, -- cgit v1.2.3 From 6eb5d375cefcbd60ebb4251b150ea95d47140fe0 Mon Sep 17 00:00:00 2001 From: Antoine Tenart Date: Mon, 30 Oct 2017 11:23:33 +0100 Subject: net: mvpp2: simplify the Tx desc set DMA logic Two functions were always used to set the DMA addresses in Tx descriptors, because this address is split into a base+offset in the descriptors. A mask was used to come up with the base and offset addresses and two functions were called, mvpp2_txdesc_dma_addr_set() and mvpp2_txdesc_offset_set(). This patch moves the base+offset calculation logic to mvpp2_txdesc_dma_addr_set(), and removes mvpp2_txdesc_offset_set() to simplify things. Signed-off-by: Antoine Tenart Signed-off-by: David S. Miller --- drivers/net/ethernet/marvell/mvpp2.c | 39 ++++++++++++------------------------ 1 file changed, 13 insertions(+), 26 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/marvell/mvpp2.c b/drivers/net/ethernet/marvell/mvpp2.c index 981fedeef67b..965b6a829a5d 100644 --- a/drivers/net/ethernet/marvell/mvpp2.c +++ b/drivers/net/ethernet/marvell/mvpp2.c @@ -1290,13 +1290,20 @@ static void mvpp2_txdesc_dma_addr_set(struct mvpp2_port *port, struct mvpp2_tx_desc *tx_desc, dma_addr_t dma_addr) { + dma_addr_t addr, offset; + + addr = dma_addr & ~MVPP2_TX_DESC_ALIGN; + offset = dma_addr & MVPP2_TX_DESC_ALIGN; + if (port->priv->hw_version == MVPP21) { - tx_desc->pp21.buf_dma_addr = dma_addr; + tx_desc->pp21.buf_dma_addr = addr; + tx_desc->pp21.packet_offset = offset; } else { - u64 val = (u64)dma_addr; + u64 val = (u64)addr; tx_desc->pp22.buf_dma_addr_ptp &= ~GENMASK_ULL(40, 0); tx_desc->pp22.buf_dma_addr_ptp |= val; + tx_desc->pp22.packet_offset = offset; } } @@ -1339,16 +1346,6 @@ static void mvpp2_txdesc_cmd_set(struct mvpp2_port *port, tx_desc->pp22.command = command; } -static void mvpp2_txdesc_offset_set(struct mvpp2_port *port, - struct mvpp2_tx_desc *tx_desc, - unsigned int offset) -{ - if (port->priv->hw_version == MVPP21) - tx_desc->pp21.packet_offset = offset; - else - tx_desc->pp22.packet_offset = offset; -} - static unsigned int mvpp2_txdesc_offset_get(struct mvpp2_port *port, struct mvpp2_tx_desc *tx_desc) { @@ -6292,10 +6289,7 @@ static int mvpp2_tx_frag_process(struct mvpp2_port *port, struct sk_buff *skb, goto cleanup; } - mvpp2_txdesc_offset_set(port, tx_desc, - buf_dma_addr & MVPP2_TX_DESC_ALIGN); - mvpp2_txdesc_dma_addr_set(port, tx_desc, - buf_dma_addr & ~MVPP2_TX_DESC_ALIGN); + mvpp2_txdesc_dma_addr_set(port, tx_desc, buf_dma_addr); if (i == (skb_shinfo(skb)->nr_frags - 1)) { /* Last descriptor */ @@ -6338,8 +6332,7 @@ static inline void mvpp2_tso_put_hdr(struct sk_buff *skb, addr = txq_pcpu->tso_headers_dma + txq_pcpu->txq_put_index * TSO_HEADER_SIZE; - mvpp2_txdesc_offset_set(port, tx_desc, addr & MVPP2_TX_DESC_ALIGN); - mvpp2_txdesc_dma_addr_set(port, tx_desc, addr & ~MVPP2_TX_DESC_ALIGN); + mvpp2_txdesc_dma_addr_set(port, tx_desc, addr); mvpp2_txdesc_cmd_set(port, tx_desc, mvpp2_skb_tx_csum(port, skb) | MVPP2_TXD_F_DESC | @@ -6368,10 +6361,7 @@ static inline int mvpp2_tso_put_data(struct sk_buff *skb, return -ENOMEM; } - mvpp2_txdesc_offset_set(port, tx_desc, - buf_dma_addr & MVPP2_TX_DESC_ALIGN); - mvpp2_txdesc_dma_addr_set(port, tx_desc, - buf_dma_addr & ~MVPP2_TX_DESC_ALIGN); + mvpp2_txdesc_dma_addr_set(port, tx_desc, buf_dma_addr); if (!left) { mvpp2_txdesc_cmd_set(port, tx_desc, MVPP2_TXD_L_DESC); @@ -6483,10 +6473,7 @@ static int mvpp2_tx(struct sk_buff *skb, struct net_device *dev) goto out; } - mvpp2_txdesc_offset_set(port, tx_desc, - buf_dma_addr & MVPP2_TX_DESC_ALIGN); - mvpp2_txdesc_dma_addr_set(port, tx_desc, - buf_dma_addr & ~MVPP2_TX_DESC_ALIGN); + mvpp2_txdesc_dma_addr_set(port, tx_desc, buf_dma_addr); tx_cmd = mvpp2_skb_tx_csum(port, skb); -- cgit v1.2.3 From 1556770a1a071435ba7e67c1bc809099dc1de849 Mon Sep 17 00:00:00 2001 From: Kees Cook Date: Mon, 30 Oct 2017 14:05:41 -0700 Subject: drivers/net: tundra: Convert timers to use timer_setup() In preparation for unconditionally passing the struct timer_list pointer to all timer callbacks, switch to using the new timer_setup() and from_timer() to pass the timer pointer explicitly. Cc: "David S. Miller" Cc: Philippe Reynes Cc: "yuval.shaia@oracle.com" Cc: Eric Dumazet Cc: netdev@vger.kernel.org Signed-off-by: Kees Cook Signed-off-by: David S. Miller --- drivers/net/ethernet/tundra/tsi108_eth.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/tundra/tsi108_eth.c b/drivers/net/ethernet/tundra/tsi108_eth.c index c2d15d9c0c33..0624b71ab5d4 100644 --- a/drivers/net/ethernet/tundra/tsi108_eth.c +++ b/drivers/net/ethernet/tundra/tsi108_eth.c @@ -164,7 +164,7 @@ static struct platform_driver tsi_eth_driver = { }, }; -static void tsi108_timed_checker(unsigned long dev_ptr); +static void tsi108_timed_checker(struct timer_list *t); #ifdef DEBUG static void dump_eth_one(struct net_device *dev) @@ -1370,7 +1370,7 @@ static int tsi108_open(struct net_device *dev) napi_enable(&data->napi); - setup_timer(&data->timer, tsi108_timed_checker, (unsigned long)dev); + timer_setup(&data->timer, tsi108_timed_checker, 0); mod_timer(&data->timer, jiffies + 1); tsi108_restart_rx(data, dev); @@ -1666,10 +1666,10 @@ regs_fail: * Thus, we have to do it using a timer. */ -static void tsi108_timed_checker(unsigned long dev_ptr) +static void tsi108_timed_checker(struct timer_list *t) { - struct net_device *dev = (struct net_device *)dev_ptr; - struct tsi108_prv_data *data = netdev_priv(dev); + struct tsi108_prv_data *data = from_timer(data, t, timer); + struct net_device *dev = data->dev; tsi108_check_phy(dev); tsi108_check_rxring(dev); -- cgit v1.2.3 From 687d4f2bea322077cc70be0555ad2a1cd48b812a Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Tue, 31 Oct 2017 10:08:23 +0000 Subject: net: ethernet: slicoss: remove redundant initialization of idx Variable idx is being initialized and later on over-written by a new value in a do-loop without the initial value ever being read. Hence the initializion is redundant and can be removed. Cleans up clang warning: drivers/net/ethernet/alacritech/slicoss.c:358:15: warning: Value stored to 'idx' during its initialization is never read Signed-off-by: Colin Ian King Acked-by: Lino Sanfilippo Signed-off-by: David S. Miller --- drivers/net/ethernet/alacritech/slicoss.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/alacritech/slicoss.c b/drivers/net/ethernet/alacritech/slicoss.c index 15a8096c60df..0b60921c392f 100644 --- a/drivers/net/ethernet/alacritech/slicoss.c +++ b/drivers/net/ethernet/alacritech/slicoss.c @@ -355,10 +355,10 @@ static void slic_xmit_complete(struct slic_device *sdev) { struct slic_tx_queue *txq = &sdev->txq; struct net_device *dev = sdev->netdev; - unsigned int idx = txq->done_idx; struct slic_tx_buffer *buff; unsigned int frames = 0; unsigned int bytes = 0; + unsigned int idx; /* Limit processing to SLIC_MAX_TX_COMPLETIONS frames to avoid that new * completions during processing keeps the loop running endlessly. -- cgit v1.2.3 From e0337f92f6f36040ba91215bbe47203c410f472f Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Tue, 31 Oct 2017 14:23:24 +0000 Subject: net: macb: remove redundant assignment to variable work_done Variable work_done is set to zero and this value is never read, instead it is set to another value a few statements later. Remove the redundant assignment. Cleans up clang warning: drivers/net/ethernet/cadence/macb_main.c:1221:2: warning: Value stored to 'work_done' is never read Signed-off-by: Colin Ian King Tested-by: Alexander Dahl Signed-off-by: David S. Miller --- drivers/net/ethernet/cadence/macb_main.c | 2 -- 1 file changed, 2 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c index 6df2cad61647..5dafcde67e45 100644 --- a/drivers/net/ethernet/cadence/macb_main.c +++ b/drivers/net/ethernet/cadence/macb_main.c @@ -1218,8 +1218,6 @@ static int macb_poll(struct napi_struct *napi, int budget) status = macb_readl(bp, RSR); macb_writel(bp, RSR, status); - work_done = 0; - netdev_vdbg(bp->dev, "poll: status = %08lx, budget = %d\n", (unsigned long)status, budget); -- cgit v1.2.3 From 6978729fbd02ced7060f15b9f7e1ba5b39d9bbf3 Mon Sep 17 00:00:00 2001 From: Edward Cree Date: Tue, 31 Oct 2017 14:29:47 +0000 Subject: sfc: support rx-fcs and rx-all Ethernet FCS inclusion (rx-fcs) is supported on EF10 NICs, conditional on a firmware capability bit (MC_CMD_GET_CAPABILITIES_OUT_RX_INCLUDE_FCS). To receive frames with bad FCS (rx-all) we just don't return the discard flag EFX_RX_PKT_DISCARD from efx_ef10_handle_rx_event_errors() or efx_farch_handle_rx_not_ok(). Signed-off-by: Edward Cree Signed-off-by: David S. Miller --- drivers/net/ethernet/sfc/ef10.c | 16 ++++++++++++---- drivers/net/ethernet/sfc/efx.c | 14 ++++++++++---- drivers/net/ethernet/sfc/farch.c | 4 ++++ drivers/net/ethernet/sfc/mcdi_port.c | 4 ++++ 4 files changed, 30 insertions(+), 8 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c index 13f72f5b18d2..19a91881fbf9 100644 --- a/drivers/net/ethernet/sfc/ef10.c +++ b/drivers/net/ethernet/sfc/ef10.c @@ -674,6 +674,10 @@ static int efx_ef10_probe(struct efx_nic *efx) efx->rx_packet_len_offset = ES_DZ_RX_PREFIX_PKTLEN_OFST - ES_DZ_RX_PREFIX_SIZE; + if (nic_data->datapath_caps & + (1 << MC_CMD_GET_CAPABILITIES_OUT_RX_INCLUDE_FCS_LBN)) + efx->net_dev->hw_features |= NETIF_F_RXFCS; + rc = efx_mcdi_port_get_number(efx); if (rc < 0) goto fail5; @@ -3199,11 +3203,15 @@ static u16 efx_ef10_handle_rx_event_errors(struct efx_channel *channel, const efx_qword_t *event) { struct efx_nic *efx = channel->efx; + bool handled = false; if (EFX_QWORD_FIELD(*event, ESF_DZ_RX_ECRC_ERR)) { - if (!efx->loopback_selftest) - channel->n_rx_eth_crc_err += n_packets; - return EFX_RX_PKT_DISCARD; + if (!(efx->net_dev->features & NETIF_F_RXALL)) { + if (!efx->loopback_selftest) + channel->n_rx_eth_crc_err += n_packets; + return EFX_RX_PKT_DISCARD; + } + handled = true; } if (EFX_QWORD_FIELD(*event, ESF_DZ_RX_IPCKSUM_ERR)) { if (unlikely(rx_encap_hdr != ESE_EZ_ENCAP_HDR_VXLAN && @@ -3274,7 +3282,7 @@ static u16 efx_ef10_handle_rx_event_errors(struct efx_channel *channel, return 0; } - WARN_ON(1); /* No error bits were recognised */ + WARN_ON(!handled); /* No error bits were recognised */ return 0; } diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c index 8fdcf7aaf997..6668e371405c 100644 --- a/drivers/net/ethernet/sfc/efx.c +++ b/drivers/net/ethernet/sfc/efx.c @@ -2315,8 +2315,11 @@ static int efx_set_features(struct net_device *net_dev, netdev_features_t data) return rc; } - /* If Rx VLAN filter is changed, update filters via mac_reconfigure */ - if ((net_dev->features ^ data) & NETIF_F_HW_VLAN_CTAG_FILTER) { + /* If Rx VLAN filter is changed, update filters via mac_reconfigure. + * If rx-fcs is changed, mac_reconfigure updates that too. + */ + if ((net_dev->features ^ data) & (NETIF_F_HW_VLAN_CTAG_FILTER | + NETIF_F_RXFCS)) { /* efx_set_rx_mode() will schedule MAC work to update filters * when a new features are finally set in net_dev. */ @@ -3242,7 +3245,7 @@ static int efx_pci_probe_post_io(struct efx_nic *efx) /* Determine netdevice features */ net_dev->features |= (efx->type->offload_features | NETIF_F_SG | - NETIF_F_TSO | NETIF_F_RXCSUM); + NETIF_F_TSO | NETIF_F_RXCSUM | NETIF_F_RXALL); if (efx->type->offload_features & (NETIF_F_IPV6_CSUM | NETIF_F_HW_CSUM)) net_dev->features |= NETIF_F_TSO6; /* Check whether device supports TSO */ @@ -3253,7 +3256,10 @@ static int efx_pci_probe_post_io(struct efx_nic *efx) NETIF_F_HIGHDMA | NETIF_F_ALL_TSO | NETIF_F_RXCSUM); - net_dev->hw_features = net_dev->features & ~efx->fixed_features; + net_dev->hw_features |= net_dev->features & ~efx->fixed_features; + + /* Disable receiving frames with bad FCS, by default. */ + net_dev->features &= ~NETIF_F_RXALL; /* Disable VLAN filtering by default. It may be enforced if * the feature is fixed (i.e. VLAN filters are required to diff --git a/drivers/net/ethernet/sfc/farch.c b/drivers/net/ethernet/sfc/farch.c index ba45150f53c7..6608dfe455b1 100644 --- a/drivers/net/ethernet/sfc/farch.c +++ b/drivers/net/ethernet/sfc/farch.c @@ -927,6 +927,10 @@ static u16 efx_farch_handle_rx_not_ok(struct efx_rx_queue *rx_queue, } #endif + if (efx->net_dev->features & NETIF_F_RXALL) + /* don't discard frame for CRC error */ + rx_ev_eth_crc_err = false; + /* The frame must be discarded if any of these are true. */ return (rx_ev_eth_crc_err | rx_ev_frm_trunc | rx_ev_tobe_disc | rx_ev_pause_frm) ? diff --git a/drivers/net/ethernet/sfc/mcdi_port.c b/drivers/net/ethernet/sfc/mcdi_port.c index c7407d129c7d..6e1f282b2976 100644 --- a/drivers/net/ethernet/sfc/mcdi_port.c +++ b/drivers/net/ethernet/sfc/mcdi_port.c @@ -1029,6 +1029,10 @@ int efx_mcdi_set_mac(struct efx_nic *efx) MCDI_POPULATE_DWORD_1(cmdbytes, SET_MAC_IN_REJECT, SET_MAC_IN_REJECT_UNCST, efx->unicast_filter); + MCDI_POPULATE_DWORD_1(cmdbytes, SET_MAC_IN_FLAGS, + SET_MAC_IN_FLAG_INCLUDE_FCS, + !!(efx->net_dev->features & NETIF_F_RXFCS)); + switch (efx->wanted_fc) { case EFX_FC_RX | EFX_FC_TX: fcntl = MC_CMD_FCNTL_BIDIR; -- cgit v1.2.3 From a95157d72d638913422773ea6faa384ebe4d366d Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Tue, 31 Oct 2017 14:37:55 +0000 Subject: net: thunderx: remove a couple of redundant assignments The assignment to pointer msg is redundant as it is never read, so remove msg. Also remove the first assignment to qset as this is not read before the next re-assignment of a new value to qset in the for-loop. Cleans up two clang warnings: drivers/net/ethernet/cavium/thunder/nic_main.c:589:2: warning: Value stored to 'msg' is never read drivers/net/ethernet/cavium/thunder/nic_main.c:611:2: warning: Value stored to 'qset' is never read Signed-off-by: Colin Ian King Signed-off-by: David S. Miller --- drivers/net/ethernet/cavium/thunder/nic_main.c | 4 ---- 1 file changed, 4 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/cavium/thunder/nic_main.c b/drivers/net/ethernet/cavium/thunder/nic_main.c index fb770b0182d3..988c06a28e5e 100644 --- a/drivers/net/ethernet/cavium/thunder/nic_main.c +++ b/drivers/net/ethernet/cavium/thunder/nic_main.c @@ -584,9 +584,6 @@ static void nic_config_cpi(struct nicpf *nic, struct cpi_cfg_msg *cfg) static void nic_send_rss_size(struct nicpf *nic, int vf) { union nic_mbx mbx = {}; - u64 *msg; - - msg = (u64 *)&mbx; mbx.rss_size.msg = NIC_MBOX_MSG_RSS_SIZE; mbx.rss_size.ind_tbl_size = nic->hw->rss_ind_tbl_size; @@ -608,7 +605,6 @@ static void nic_config_rss(struct nicpf *nic, struct rss_cfg_msg *cfg) rssi_base = nic->rssi_base[cfg->vf_id] + cfg->tbl_offset; rssi = rssi_base; - qset = cfg->vf_id; for (; rssi < (rssi_base + cfg->tbl_len); rssi++) { u8 svf = cfg->ind_tbl[idx] >> 3; -- cgit v1.2.3 From 0ba9a3b65c794982f4dc7fcdc8110c327359916b Mon Sep 17 00:00:00 2001 From: Kumar Sanghvi Date: Wed, 1 Nov 2017 08:52:59 +0530 Subject: cxgb4: save additional filter tuple field shifts in tp_params Save additional filter tuple field shifts in tp_params based on configured filter tuple fields. Also, save the combined filter tuple mask based on configured filter tuple fields. Signed-off-by: Kumar Sanghvi Signed-off-by: Rahul Lakkireddy Signed-off-by: Ganesh Goudar Signed-off-by: David S. Miller --- drivers/net/ethernet/chelsio/cxgb4/cxgb4.h | 12 ++++++++++-- drivers/net/ethernet/chelsio/cxgb4/t4_hw.c | 18 ++++++++++++++++-- drivers/net/ethernet/chelsio/cxgb4/t4_regs.h | 3 +++ 3 files changed, 29 insertions(+), 4 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h index a57761b28edc..e2c75b73595f 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h @@ -287,10 +287,18 @@ struct tp_params { * places we store their offsets here, or a -1 if the field isn't * present. */ - int vlan_shift; - int vnic_shift; + int fcoe_shift; int port_shift; + int vnic_shift; + int vlan_shift; + int tos_shift; int protocol_shift; + int ethertype_shift; + int macmatch_shift; + int matchtype_shift; + int frag_shift; + + u64 hash_filter_mask; }; struct vpd_params { diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c index c289ca1efc1b..efe9d3a20135 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c @@ -8816,11 +8816,21 @@ int t4_init_tp_params(struct adapter *adap, bool sleep_ok) * shift positions of several elements of the Compressed Filter Tuple * for this adapter which we need frequently ... */ - adap->params.tp.vlan_shift = t4_filter_field_shift(adap, VLAN_F); - adap->params.tp.vnic_shift = t4_filter_field_shift(adap, VNIC_ID_F); + adap->params.tp.fcoe_shift = t4_filter_field_shift(adap, FCOE_F); adap->params.tp.port_shift = t4_filter_field_shift(adap, PORT_F); + adap->params.tp.vnic_shift = t4_filter_field_shift(adap, VNIC_ID_F); + adap->params.tp.vlan_shift = t4_filter_field_shift(adap, VLAN_F); + adap->params.tp.tos_shift = t4_filter_field_shift(adap, TOS_F); adap->params.tp.protocol_shift = t4_filter_field_shift(adap, PROTOCOL_F); + adap->params.tp.ethertype_shift = t4_filter_field_shift(adap, + ETHERTYPE_F); + adap->params.tp.macmatch_shift = t4_filter_field_shift(adap, + MACMATCH_F); + adap->params.tp.matchtype_shift = t4_filter_field_shift(adap, + MPSHITTYPE_F); + adap->params.tp.frag_shift = t4_filter_field_shift(adap, + FRAGMENTATION_F); /* If TP_INGRESS_CONFIG.VNID == 0, then TP_VLAN_PRI_MAP.VNIC_ID * represents the presence of an Outer VLAN instead of a VNIC ID. @@ -8828,6 +8838,10 @@ int t4_init_tp_params(struct adapter *adap, bool sleep_ok) if ((adap->params.tp.ingress_config & VNIC_F) == 0) adap->params.tp.vnic_shift = -1; + v = t4_read_reg(adap, LE_3_DB_HASH_MASK_GEN_IPV4_T6_A); + adap->params.tp.hash_filter_mask = v; + v = t4_read_reg(adap, LE_4_DB_HASH_MASK_GEN_IPV4_T6_A); + adap->params.tp.hash_filter_mask |= ((u64)v << 32); return 0; } diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h index 483fb7644355..44713bad0045 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h @@ -2933,6 +2933,9 @@ #define SSRAMINTPERR_V(x) ((x) << SSRAMINTPERR_S) #define SSRAMINTPERR_F SSRAMINTPERR_V(1U) +#define LE_3_DB_HASH_MASK_GEN_IPV4_T6_A 0x19eac +#define LE_4_DB_HASH_MASK_GEN_IPV4_T6_A 0x19eb0 + #define NCSI_INT_CAUSE_A 0x1a0d8 #define CIM_DM_PRTY_ERR_S 8 -- cgit v1.2.3 From 5c31254e35a8a5767c3b23377c34018d8bdd0567 Mon Sep 17 00:00:00 2001 From: Kumar Sanghvi Date: Wed, 1 Nov 2017 08:53:00 +0530 Subject: cxgb4: initialize hash-filter configuration Add support for hash-filter configuration on T6. Also, do basic checks for the related initialization. Signed-off-by: Kumar Sanghvi Signed-off-by: Rahul Lakkireddy Signed-off-by: Ganesh Goudar Signed-off-by: David S. Miller --- drivers/net/ethernet/chelsio/cxgb4/cxgb4.h | 6 ++++++ drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c | 22 ++++++++++++++++++++++ drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.h | 1 + drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c | 14 ++++++++++---- drivers/net/ethernet/chelsio/cxgb4/t4_regs.h | 14 ++++++++++++++ drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h | 1 + 6 files changed, 54 insertions(+), 4 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h index e2c75b73595f..5f021e6062b0 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h @@ -366,6 +366,7 @@ struct adapter_params { unsigned char crypto; /* HW capability for crypto */ unsigned char bypass; + unsigned char hash_filter; unsigned int ofldq_wr_cred; bool ulptx_memwrite_dsgl; /* use of T5 DSGL allowed */ @@ -1140,6 +1141,11 @@ static inline int is_offload(const struct adapter *adap) return adap->params.offload; } +static inline int is_hashfilter(const struct adapter *adap) +{ + return adap->params.hash_filter; +} + static inline int is_pci_uld(const struct adapter *adap) { return adap->params.crypto; diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c index 89272f29f807..566bd2d3737c 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c @@ -915,3 +915,25 @@ void filter_rpl(struct adapter *adap, const struct cpl_set_tcb_rpl *rpl) complete(&ctx->completion); } } + +int init_hash_filter(struct adapter *adap) +{ + /* On T6, verify the necessary register configs and warn the user in + * case of improper config + */ + if (is_t6(adap->params.chip)) { + if (TCAM_ACTV_HIT_G(t4_read_reg(adap, LE_DB_RSP_CODE_0_A)) != 4) + goto err; + + if (HASH_ACTV_HIT_G(t4_read_reg(adap, LE_DB_RSP_CODE_1_A)) != 4) + goto err; + } else { + dev_err(adap->pdev_dev, "Hash filter supported only on T6\n"); + return -EINVAL; + } + adap->params.hash_filter = 1; + return 0; +err: + dev_warn(adap->pdev_dev, "Invalid hash filter config!\n"); + return -EINVAL; +} diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.h index 23742cb1c69f..d3c1a8fafd32 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.h @@ -45,4 +45,5 @@ int delete_filter(struct adapter *adapter, unsigned int fidx); int writable_filter(struct filter_entry *f); void clear_all_filters(struct adapter *adapter); +int init_hash_filter(struct adapter *adap); #endif /* __CXGB4_FILTER_H */ diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index e16078ddb39f..4b07cfe8c66c 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c @@ -3963,7 +3963,8 @@ static int adap_init0(struct adapter *adap) if (ret < 0) goto bye; - if (caps_cmd.ofldcaps) { + if (caps_cmd.ofldcaps || + (caps_cmd.niccaps & htons(FW_CAPS_CONFIG_NIC_HASHFILTER))) { /* query offload-related parameters */ params[0] = FW_PARAM_DEV(NTID); params[1] = FW_PARAM_PFVF(SERVER_START); @@ -4000,8 +4001,13 @@ static int adap_init0(struct adapter *adap) adap->vres.ddp.size = val[4] - val[3] + 1; adap->params.ofldq_wr_cred = val[5]; - adap->params.offload = 1; - adap->num_ofld_uld += 1; + if (caps_cmd.niccaps & htons(FW_CAPS_CONFIG_NIC_HASHFILTER)) { + if (init_hash_filter(adap) < 0) + goto bye; + } else { + adap->params.offload = 1; + adap->num_ofld_uld += 1; + } } if (caps_cmd.rdmacaps) { params[0] = FW_PARAM_PFVF(STAG_START); @@ -5171,7 +5177,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) cxgb4_init_tc_flower(adapter); } - if (is_offload(adapter)) { + if (is_offload(adapter) || is_hashfilter(adapter)) { if (t4_read_reg(adapter, LE_DB_CONFIG_A) & HASHEN_F) { u32 hash_base, hash_reg; diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h index 44713bad0045..623f453bd327 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h @@ -2933,6 +2933,20 @@ #define SSRAMINTPERR_V(x) ((x) << SSRAMINTPERR_S) #define SSRAMINTPERR_F SSRAMINTPERR_V(1U) +#define LE_DB_RSP_CODE_0_A 0x19c74 + +#define TCAM_ACTV_HIT_S 0 +#define TCAM_ACTV_HIT_M 0x1fU +#define TCAM_ACTV_HIT_V(x) ((x) << TCAM_ACTV_HIT_S) +#define TCAM_ACTV_HIT_G(x) (((x) >> TCAM_ACTV_HIT_S) & TCAM_ACTV_HIT_M) + +#define LE_DB_RSP_CODE_1_A 0x19c78 + +#define HASH_ACTV_HIT_S 25 +#define HASH_ACTV_HIT_M 0x1fU +#define HASH_ACTV_HIT_V(x) ((x) << HASH_ACTV_HIT_S) +#define HASH_ACTV_HIT_G(x) (((x) >> HASH_ACTV_HIT_S) & HASH_ACTV_HIT_M) + #define LE_3_DB_HASH_MASK_GEN_IPV4_T6_A 0x19eac #define LE_4_DB_HASH_MASK_GEN_IPV4_T6_A 0x19eb0 diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h index 2ba890926c73..57eb4ad3485d 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h @@ -1092,6 +1092,7 @@ enum fw_caps_config_switch { enum fw_caps_config_nic { FW_CAPS_CONFIG_NIC = 0x00000001, FW_CAPS_CONFIG_NIC_VM = 0x00000002, + FW_CAPS_CONFIG_NIC_HASHFILTER = 0x00000020, }; enum fw_caps_config_ofld { -- cgit v1.2.3 From 12b276fbf6e092adca08a8125afcc4e7f530a0b6 Mon Sep 17 00:00:00 2001 From: Kumar Sanghvi Date: Wed, 1 Nov 2017 08:53:01 +0530 Subject: cxgb4: add support to create hash filters Add support to create hash (exact-match) filters based on the value of 'hash' field in ch_filter_specification. Allocate SMT/L2T entries if DMAC-rewrite/SMAC-rewrite is requested. Allocate CLIP entry in case of IPv6 filter. Use cpl_act_open_req[6] to send hash filter create request to hw. Also, the filter tuple is calculated as part of sending this request. Hash-filter reply is processed on getting cpl_act_open_rpl. In case of success, various bits/fields in filter-tcb are set per filter requirement, such as enabling filter hitcnts, and/or various header rewrite operations, such as VLAN-rewrite, NAT or (L3/L4)-rewrite, and SMAC/DMAC-rewrite. In case of failure, clear the filter entry and release any hw resources occupied by it. The patch also moves the functions set_tcb_field, set_tcb_tflag and configure_filter_smac towards beginning of file. Signed-off-by: Kumar Sanghvi Signed-off-by: Rahul Lakkireddy Signed-off-by: Ganesh Goudar Signed-off-by: David S. Miller --- drivers/net/ethernet/chelsio/cxgb4/cxgb4.h | 10 +- drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c | 573 ++++++++++++++++++++-- drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.h | 3 + drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c | 4 + drivers/net/ethernet/chelsio/cxgb4/t4_msg.h | 5 + drivers/net/ethernet/chelsio/cxgb4/t4_tcb.h | 17 + 6 files changed, 562 insertions(+), 50 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h index 5f021e6062b0..bb7f0e4c9a81 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h @@ -1050,6 +1050,7 @@ struct ch_filter_specification { * matching that doesn't exist as a (value, mask) tuple. */ uint32_t type:1; /* 0 => IPv4, 1 => IPv6 */ + u32 hash:1; /* 0 => wild-card, 1 => exact-match */ /* Packet dispatch information. Ingress packets which match the * filter rules will be dropped, passed to the host or switched back @@ -1107,7 +1108,14 @@ enum { }; enum { - NAT_MODE_ALL = 7, /* NAT on entire 4-tuple */ + NAT_MODE_NONE = 0, /* No NAT performed */ + NAT_MODE_DIP, /* NAT on Dst IP */ + NAT_MODE_DIP_DP, /* NAT on Dst IP, Dst Port */ + NAT_MODE_DIP_DP_SIP, /* NAT on Dst IP, Dst Port and Src IP */ + NAT_MODE_DIP_DP_SP, /* NAT on Dst IP, Dst Port and Src Port */ + NAT_MODE_SIP_SP, /* NAT on Src IP and Src Port */ + NAT_MODE_DIP_SIP_SP, /* NAT on Dst IP, Src IP and Src Port */ + NAT_MODE_ALL /* NAT on entire 4-tuple */ }; /* Host shadow copy of ingress filter entry. This is in host native format diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c index 566bd2d3737c..cf8ca695f27d 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c @@ -35,6 +35,8 @@ #include "cxgb4.h" #include "t4_regs.h" #include "t4_tcb.h" +#include "t4_values.h" +#include "clip_tbl.h" #include "l2t.h" #include "smt.h" #include "t4fw_api.h" @@ -50,6 +52,141 @@ static inline bool unsupported(u32 conf, u32 conf_mask, u32 val, u32 mask) return !(conf & conf_mask) && is_field_set(val, mask); } +static int set_tcb_field(struct adapter *adap, struct filter_entry *f, + unsigned int ftid, u16 word, u64 mask, u64 val, + int no_reply) +{ + struct cpl_set_tcb_field *req; + struct sk_buff *skb; + + skb = alloc_skb(sizeof(struct cpl_set_tcb_field), GFP_ATOMIC); + if (!skb) + return -ENOMEM; + + req = (struct cpl_set_tcb_field *)__skb_put(skb, sizeof(*req)); + memset(req, 0, sizeof(*req)); + INIT_TP_WR_CPL(req, CPL_SET_TCB_FIELD, ftid); + req->reply_ctrl = htons(REPLY_CHAN_V(0) | + QUEUENO_V(adap->sge.fw_evtq.abs_id) | + NO_REPLY_V(no_reply)); + req->word_cookie = htons(TCB_WORD_V(word) | TCB_COOKIE_V(ftid)); + req->mask = cpu_to_be64(mask); + req->val = cpu_to_be64(val); + set_wr_txq(skb, CPL_PRIORITY_CONTROL, f->fs.val.iport & 0x3); + t4_ofld_send(adap, skb); + return 0; +} + +/* Set one of the t_flags bits in the TCB. + */ +static int set_tcb_tflag(struct adapter *adap, struct filter_entry *f, + unsigned int ftid, unsigned int bit_pos, + unsigned int val, int no_reply) +{ + return set_tcb_field(adap, f, ftid, TCB_T_FLAGS_W, 1ULL << bit_pos, + (unsigned long long)val << bit_pos, no_reply); +} + +static int configure_filter_smac(struct adapter *adap, struct filter_entry *f) +{ + int err; + + /* do a set-tcb for smac-sel and CWR bit.. */ + err = set_tcb_tflag(adap, f, f->tid, TF_CCTRL_CWR_S, 1, 1); + if (err) + goto smac_err; + + err = set_tcb_field(adap, f, f->tid, TCB_SMAC_SEL_W, + TCB_SMAC_SEL_V(TCB_SMAC_SEL_M), + TCB_SMAC_SEL_V(f->smt->idx), 1); + if (!err) + return 0; + +smac_err: + dev_err(adap->pdev_dev, "filter %u smac config failed with error %u\n", + f->tid, err); + return err; +} + +static void set_nat_params(struct adapter *adap, struct filter_entry *f, + unsigned int tid, bool dip, bool sip, bool dp, + bool sp) +{ + if (dip) { + if (f->fs.type) { + set_tcb_field(adap, f, tid, TCB_SND_UNA_RAW_W, + WORD_MASK, f->fs.nat_lip[15] | + f->fs.nat_lip[14] << 8 | + f->fs.nat_lip[13] << 16 | + f->fs.nat_lip[12] << 24, 1); + + set_tcb_field(adap, f, tid, TCB_SND_UNA_RAW_W + 1, + WORD_MASK, f->fs.nat_lip[11] | + f->fs.nat_lip[10] << 8 | + f->fs.nat_lip[9] << 16 | + f->fs.nat_lip[8] << 24, 1); + + set_tcb_field(adap, f, tid, TCB_SND_UNA_RAW_W + 2, + WORD_MASK, f->fs.nat_lip[7] | + f->fs.nat_lip[6] << 8 | + f->fs.nat_lip[5] << 16 | + f->fs.nat_lip[4] << 24, 1); + + set_tcb_field(adap, f, tid, TCB_SND_UNA_RAW_W + 3, + WORD_MASK, f->fs.nat_lip[3] | + f->fs.nat_lip[2] << 8 | + f->fs.nat_lip[1] << 16 | + f->fs.nat_lip[0] << 24, 1); + } else { + set_tcb_field(adap, f, tid, TCB_RX_FRAG3_LEN_RAW_W, + WORD_MASK, f->fs.nat_lip[3] | + f->fs.nat_lip[2] << 8 | + f->fs.nat_lip[1] << 16 | + f->fs.nat_lip[0] << 24, 1); + } + } + + if (sip) { + if (f->fs.type) { + set_tcb_field(adap, f, tid, TCB_RX_FRAG2_PTR_RAW_W, + WORD_MASK, f->fs.nat_fip[15] | + f->fs.nat_fip[14] << 8 | + f->fs.nat_fip[13] << 16 | + f->fs.nat_fip[12] << 24, 1); + + set_tcb_field(adap, f, tid, TCB_RX_FRAG2_PTR_RAW_W + 1, + WORD_MASK, f->fs.nat_fip[11] | + f->fs.nat_fip[10] << 8 | + f->fs.nat_fip[9] << 16 | + f->fs.nat_fip[8] << 24, 1); + + set_tcb_field(adap, f, tid, TCB_RX_FRAG2_PTR_RAW_W + 2, + WORD_MASK, f->fs.nat_fip[7] | + f->fs.nat_fip[6] << 8 | + f->fs.nat_fip[5] << 16 | + f->fs.nat_fip[4] << 24, 1); + + set_tcb_field(adap, f, tid, TCB_RX_FRAG2_PTR_RAW_W + 3, + WORD_MASK, f->fs.nat_fip[3] | + f->fs.nat_fip[2] << 8 | + f->fs.nat_fip[1] << 16 | + f->fs.nat_fip[0] << 24, 1); + + } else { + set_tcb_field(adap, f, tid, + TCB_RX_FRAG3_START_IDX_OFFSET_RAW_W, + WORD_MASK, f->fs.nat_fip[3] | + f->fs.nat_fip[2] << 8 | + f->fs.nat_fip[1] << 16 | + f->fs.nat_fip[0] << 24, 1); + } + } + + set_tcb_field(adap, f, tid, TCB_PDU_HDR_LEN_W, WORD_MASK, + (dp ? f->fs.nat_lport : 0) | + (sp ? f->fs.nat_fport << 16 : 0), 1); +} + /* Validate filter spec against configuration done on the card. */ static int validate_filter(struct net_device *dev, struct ch_filter_specification *fs) @@ -484,10 +621,8 @@ int delete_filter(struct adapter *adapter, unsigned int fidx) void clear_filter(struct adapter *adap, struct filter_entry *f) { /* If the new or old filter have loopback rewriteing rules then we'll - * need to free any existing Layer Two Table (L2T) entries of the old - * filter rule. The firmware will handle freeing up any Source MAC - * Table (SMT) entries used for rewriting Source MAC Addresses in - * loopback rules. + * need to free any existing L2T, SMT, CLIP entries of filter + * rule. */ if (f->l2t) cxgb4_l2t_release(f->l2t); @@ -495,6 +630,9 @@ void clear_filter(struct adapter *adap, struct filter_entry *f) if (f->smt) cxgb4_smt_release(f->smt); + if (f->fs.hash && f->fs.type) + cxgb4_clip_release(f->dev, (const u32 *)&f->fs.val.lip, 1); + /* The zeroing of the filter rule below clears the filter valid, * pending, locked flags, l2t pointer, etc. so it's all we need for * this operation. @@ -564,6 +702,269 @@ static void fill_default_mask(struct ch_filter_specification *fs) fs->mask.fport = ~0; } +static u64 hash_filter_ntuple(struct ch_filter_specification *fs, + struct net_device *dev) +{ + struct adapter *adap = netdev2adap(dev); + struct tp_params *tp = &adap->params.tp; + u64 ntuple = 0; + + /* Initialize each of the fields which we care about which are present + * in the Compressed Filter Tuple. + */ + if (tp->vlan_shift >= 0 && fs->mask.ivlan) + ntuple |= (FT_VLAN_VLD_F | fs->val.ivlan) << tp->vlan_shift; + + if (tp->port_shift >= 0 && fs->mask.iport) + ntuple |= (u64)fs->val.iport << tp->port_shift; + + if (tp->protocol_shift >= 0) { + if (!fs->val.proto) + ntuple |= (u64)IPPROTO_TCP << tp->protocol_shift; + else + ntuple |= (u64)fs->val.proto << tp->protocol_shift; + } + + if (tp->tos_shift >= 0 && fs->mask.tos) + ntuple |= (u64)(fs->val.tos) << tp->tos_shift; + + if (tp->vnic_shift >= 0) { + if ((adap->params.tp.ingress_config & VNIC_F) && + fs->mask.pfvf_vld) + ntuple |= (u64)((fs->val.pfvf_vld << 16) | + (fs->val.pf << 13) | + (fs->val.vf)) << tp->vnic_shift; + else + ntuple |= (u64)((fs->val.ovlan_vld << 16) | + (fs->val.ovlan)) << tp->vnic_shift; + } + + if (tp->macmatch_shift >= 0 && fs->mask.macidx) + ntuple |= (u64)(fs->val.macidx) << tp->macmatch_shift; + + if (tp->ethertype_shift >= 0 && fs->mask.ethtype) + ntuple |= (u64)(fs->val.ethtype) << tp->ethertype_shift; + + if (tp->matchtype_shift >= 0 && fs->mask.matchtype) + ntuple |= (u64)(fs->val.matchtype) << tp->matchtype_shift; + + if (tp->frag_shift >= 0 && fs->mask.frag) + ntuple |= (u64)(fs->val.frag) << tp->frag_shift; + + if (tp->fcoe_shift >= 0 && fs->mask.fcoe) + ntuple |= (u64)(fs->val.fcoe) << tp->fcoe_shift; + return ntuple; +} + +static void mk_act_open_req6(struct filter_entry *f, struct sk_buff *skb, + unsigned int qid_filterid, struct adapter *adap) +{ + struct cpl_t6_act_open_req6 *t6req = NULL; + struct cpl_act_open_req6 *req = NULL; + + t6req = (struct cpl_t6_act_open_req6 *)__skb_put(skb, sizeof(*t6req)); + INIT_TP_WR(t6req, 0); + req = (struct cpl_act_open_req6 *)t6req; + OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_ACT_OPEN_REQ6, qid_filterid)); + req->local_port = cpu_to_be16(f->fs.val.lport); + req->peer_port = cpu_to_be16(f->fs.val.fport); + req->local_ip_hi = *(__be64 *)(&f->fs.val.lip); + req->local_ip_lo = *(((__be64 *)&f->fs.val.lip) + 1); + req->peer_ip_hi = *(__be64 *)(&f->fs.val.fip); + req->peer_ip_lo = *(((__be64 *)&f->fs.val.fip) + 1); + req->opt0 = cpu_to_be64(NAGLE_V(f->fs.newvlan == VLAN_REMOVE || + f->fs.newvlan == VLAN_REWRITE) | + DELACK_V(f->fs.hitcnts) | + L2T_IDX_V(f->l2t ? f->l2t->idx : 0) | + SMAC_SEL_V((cxgb4_port_viid(f->dev) & + 0x7F) << 1) | + TX_CHAN_V(f->fs.eport) | + NO_CONG_V(f->fs.rpttid) | + ULP_MODE_V(f->fs.nat_mode ? + ULP_MODE_TCPDDP : ULP_MODE_NONE) | + TCAM_BYPASS_F | NON_OFFLOAD_F); + t6req->params = cpu_to_be64(FILTER_TUPLE_V(hash_filter_ntuple(&f->fs, + f->dev))); + t6req->opt2 = htonl(RSS_QUEUE_VALID_F | + RSS_QUEUE_V(f->fs.iq) | + TX_QUEUE_V(f->fs.nat_mode) | + T5_OPT_2_VALID_F | + RX_CHANNEL_F | + CONG_CNTRL_V((f->fs.action == FILTER_DROP) | + (f->fs.dirsteer << 1)) | + PACE_V((f->fs.maskhash) | + ((f->fs.dirsteerhash) << 1)) | + CCTRL_ECN_V(f->fs.action == FILTER_SWITCH)); +} + +static void mk_act_open_req(struct filter_entry *f, struct sk_buff *skb, + unsigned int qid_filterid, struct adapter *adap) +{ + struct cpl_t6_act_open_req *t6req = NULL; + struct cpl_act_open_req *req = NULL; + + t6req = (struct cpl_t6_act_open_req *)__skb_put(skb, sizeof(*t6req)); + INIT_TP_WR(t6req, 0); + req = (struct cpl_act_open_req *)t6req; + OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_ACT_OPEN_REQ, qid_filterid)); + req->local_port = cpu_to_be16(f->fs.val.lport); + req->peer_port = cpu_to_be16(f->fs.val.fport); + req->local_ip = f->fs.val.lip[0] | f->fs.val.lip[1] << 8 | + f->fs.val.lip[2] << 16 | f->fs.val.lip[3] << 24; + req->peer_ip = f->fs.val.fip[0] | f->fs.val.fip[1] << 8 | + f->fs.val.fip[2] << 16 | f->fs.val.fip[3] << 24; + req->opt0 = cpu_to_be64(NAGLE_V(f->fs.newvlan == VLAN_REMOVE || + f->fs.newvlan == VLAN_REWRITE) | + DELACK_V(f->fs.hitcnts) | + L2T_IDX_V(f->l2t ? f->l2t->idx : 0) | + SMAC_SEL_V((cxgb4_port_viid(f->dev) & + 0x7F) << 1) | + TX_CHAN_V(f->fs.eport) | + NO_CONG_V(f->fs.rpttid) | + ULP_MODE_V(f->fs.nat_mode ? + ULP_MODE_TCPDDP : ULP_MODE_NONE) | + TCAM_BYPASS_F | NON_OFFLOAD_F); + + t6req->params = cpu_to_be64(FILTER_TUPLE_V(hash_filter_ntuple(&f->fs, + f->dev))); + t6req->opt2 = htonl(RSS_QUEUE_VALID_F | + RSS_QUEUE_V(f->fs.iq) | + TX_QUEUE_V(f->fs.nat_mode) | + T5_OPT_2_VALID_F | + RX_CHANNEL_F | + CONG_CNTRL_V((f->fs.action == FILTER_DROP) | + (f->fs.dirsteer << 1)) | + PACE_V((f->fs.maskhash) | + ((f->fs.dirsteerhash) << 1)) | + CCTRL_ECN_V(f->fs.action == FILTER_SWITCH)); +} + +static int cxgb4_set_hash_filter(struct net_device *dev, + struct ch_filter_specification *fs, + struct filter_ctx *ctx) +{ + struct adapter *adapter = netdev2adap(dev); + struct tid_info *t = &adapter->tids; + struct filter_entry *f; + struct sk_buff *skb; + int iq, atid, size; + int ret = 0; + u32 iconf; + + fill_default_mask(fs); + ret = validate_filter(dev, fs); + if (ret) + return ret; + + iq = get_filter_steerq(dev, fs); + if (iq < 0) + return iq; + + f = kzalloc(sizeof(*f), GFP_KERNEL); + if (!f) + return -ENOMEM; + + f->fs = *fs; + f->ctx = ctx; + f->dev = dev; + f->fs.iq = iq; + + /* If the new filter requires loopback Destination MAC and/or VLAN + * rewriting then we need to allocate a Layer 2 Table (L2T) entry for + * the filter. + */ + if (f->fs.newdmac || f->fs.newvlan) { + /* allocate L2T entry for new filter */ + f->l2t = t4_l2t_alloc_switching(adapter, f->fs.vlan, + f->fs.eport, f->fs.dmac); + if (!f->l2t) { + ret = -ENOMEM; + goto out_err; + } + } + + /* If the new filter requires loopback Source MAC rewriting then + * we need to allocate a SMT entry for the filter. + */ + if (f->fs.newsmac) { + f->smt = cxgb4_smt_alloc_switching(f->dev, f->fs.smac); + if (!f->smt) { + if (f->l2t) { + cxgb4_l2t_release(f->l2t); + f->l2t = NULL; + } + ret = -ENOMEM; + goto free_l2t; + } + } + + atid = cxgb4_alloc_atid(t, f); + if (atid < 0) + goto free_smt; + + iconf = adapter->params.tp.ingress_config; + if (iconf & VNIC_F) { + f->fs.val.ovlan = (fs->val.pf << 13) | fs->val.vf; + f->fs.mask.ovlan = (fs->mask.pf << 13) | fs->mask.vf; + f->fs.val.ovlan_vld = fs->val.pfvf_vld; + f->fs.mask.ovlan_vld = fs->mask.pfvf_vld; + } + + size = sizeof(struct cpl_t6_act_open_req); + if (f->fs.type) { + ret = cxgb4_clip_get(f->dev, (const u32 *)&f->fs.val.lip, 1); + if (ret) + goto free_atid; + + skb = alloc_skb(size, GFP_KERNEL); + if (!skb) { + ret = -ENOMEM; + goto free_clip; + } + + mk_act_open_req6(f, skb, + ((adapter->sge.fw_evtq.abs_id << 14) | atid), + adapter); + } else { + skb = alloc_skb(size, GFP_KERNEL); + if (!skb) { + ret = -ENOMEM; + goto free_atid; + } + + mk_act_open_req(f, skb, + ((adapter->sge.fw_evtq.abs_id << 14) | atid), + adapter); + } + + f->pending = 1; + set_wr_txq(skb, CPL_PRIORITY_SETUP, f->fs.val.iport & 0x3); + t4_ofld_send(adapter, skb); + return 0; + +free_clip: + cxgb4_clip_release(f->dev, (const u32 *)&f->fs.val.lip, 1); + +free_atid: + cxgb4_free_atid(t, atid); + +free_smt: + if (f->smt) { + cxgb4_smt_release(f->smt); + f->smt = NULL; + } + +free_l2t: + if (f->l2t) { + cxgb4_l2t_release(f->l2t); + f->l2t = NULL; + } + +out_err: + kfree(f); + return ret; +} + /* Check a Chelsio Filter Request for validity, convert it into our internal * format and send it to the hardware. Return 0 on success, an error number * otherwise. We attach any provided filter operation context to the internal @@ -580,6 +981,14 @@ int __cxgb4_set_filter(struct net_device *dev, int filter_id, u32 iconf; int iq, ret; + if (fs->hash) { + if (is_hashfilter(adapter)) + return cxgb4_set_hash_filter(dev, fs, ctx); + netdev_err(dev, "%s: Exact-match filters only supported with Hash Filter configuration\n", + __func__); + return -EINVAL; + } + max_fidx = adapter->tids.nftids; if (filter_id != (max_fidx + adapter->tids.nsftids - 1) && filter_id >= max_fidx) @@ -789,60 +1198,126 @@ out: return ret; } -static int set_tcb_field(struct adapter *adap, struct filter_entry *f, - unsigned int ftid, u16 word, u64 mask, u64 val, - int no_reply) +static int configure_filter_tcb(struct adapter *adap, unsigned int tid, + struct filter_entry *f) { - struct cpl_set_tcb_field *req; - struct sk_buff *skb; - - skb = alloc_skb(sizeof(struct cpl_set_tcb_field), GFP_ATOMIC); - if (!skb) - return -ENOMEM; - - req = (struct cpl_set_tcb_field *)__skb_put(skb, sizeof(*req)); - memset(req, 0, sizeof(*req)); - INIT_TP_WR_CPL(req, CPL_SET_TCB_FIELD, ftid); - req->reply_ctrl = htons(REPLY_CHAN_V(0) | - QUEUENO_V(adap->sge.fw_evtq.abs_id) | - NO_REPLY_V(no_reply)); - req->word_cookie = htons(TCB_WORD_V(word) | TCB_COOKIE_V(ftid)); - req->mask = cpu_to_be64(mask); - req->val = cpu_to_be64(val); - set_wr_txq(skb, CPL_PRIORITY_CONTROL, f->fs.val.iport & 0x3); - t4_ofld_send(adap, skb); + if (f->fs.hitcnts) + set_tcb_field(adap, f, tid, TCB_TIMESTAMP_W, + TCB_TIMESTAMP_V(TCB_TIMESTAMP_M) | + TCB_RTT_TS_RECENT_AGE_V(TCB_RTT_TS_RECENT_AGE_M), + TCB_TIMESTAMP_V(0ULL) | + TCB_RTT_TS_RECENT_AGE_V(0ULL), + 1); + + if (f->fs.newdmac) + set_tcb_tflag(adap, f, tid, TF_CCTRL_ECE_S, 1, + 1); + + if (f->fs.newvlan == VLAN_INSERT || + f->fs.newvlan == VLAN_REWRITE) + set_tcb_tflag(adap, f, tid, TF_CCTRL_RFR_S, 1, + 1); + if (f->fs.newsmac) + configure_filter_smac(adap, f); + + if (f->fs.nat_mode) { + switch (f->fs.nat_mode) { + case NAT_MODE_DIP: + set_nat_params(adap, f, tid, true, false, false, false); + break; + + case NAT_MODE_DIP_DP: + set_nat_params(adap, f, tid, true, false, true, false); + break; + + case NAT_MODE_DIP_DP_SIP: + set_nat_params(adap, f, tid, true, true, true, false); + break; + case NAT_MODE_DIP_DP_SP: + set_nat_params(adap, f, tid, true, false, true, true); + break; + + case NAT_MODE_SIP_SP: + set_nat_params(adap, f, tid, false, true, false, true); + break; + + case NAT_MODE_DIP_SIP_SP: + set_nat_params(adap, f, tid, true, true, false, true); + break; + + case NAT_MODE_ALL: + set_nat_params(adap, f, tid, true, true, true, true); + break; + + default: + pr_err("%s: Invalid NAT mode: %d\n", + __func__, f->fs.nat_mode); + return -EINVAL; + } + } return 0; } -/* Set one of the t_flags bits in the TCB. - */ -static int set_tcb_tflag(struct adapter *adap, struct filter_entry *f, - unsigned int ftid, unsigned int bit_pos, - unsigned int val, int no_reply) +void hash_filter_rpl(struct adapter *adap, const struct cpl_act_open_rpl *rpl) { - return set_tcb_field(adap, f, ftid, TCB_T_FLAGS_W, 1ULL << bit_pos, - (unsigned long long)val << bit_pos, no_reply); -} + unsigned int ftid = TID_TID_G(AOPEN_ATID_G(ntohl(rpl->atid_status))); + unsigned int status = AOPEN_STATUS_G(ntohl(rpl->atid_status)); + struct tid_info *t = &adap->tids; + unsigned int tid = GET_TID(rpl); + struct filter_ctx *ctx = NULL; + struct filter_entry *f; -static int configure_filter_smac(struct adapter *adap, struct filter_entry *f) -{ - int err; + dev_dbg(adap->pdev_dev, "%s: tid = %u; atid = %u; status = %u\n", + __func__, tid, ftid, status); - /* do a set-tcb for smac-sel and CWR bit.. */ - err = set_tcb_tflag(adap, f, f->tid, TF_CCTRL_CWR_S, 1, 1); - if (err) - goto smac_err; + f = lookup_atid(t, ftid); + if (!f) { + dev_err(adap->pdev_dev, "%s:could not find filter entry", + __func__); + return; + } + ctx = f->ctx; + f->ctx = NULL; + + switch (status) { + case CPL_ERR_NONE: + f->tid = tid; + f->pending = 0; + f->valid = 1; + cxgb4_insert_tid(t, f, f->tid, 0); + cxgb4_free_atid(t, ftid); + if (ctx) { + ctx->tid = f->tid; + ctx->result = 0; + } + if (configure_filter_tcb(adap, tid, f)) { + clear_filter(adap, f); + cxgb4_remove_tid(t, 0, tid, 0); + kfree(f); + if (ctx) { + ctx->result = -EINVAL; + complete(&ctx->completion); + } + return; + } + break; - err = set_tcb_field(adap, f, f->tid, TCB_SMAC_SEL_W, - TCB_SMAC_SEL_V(TCB_SMAC_SEL_M), - TCB_SMAC_SEL_V(f->smt->idx), 1); - if (!err) - return 0; + default: + dev_err(adap->pdev_dev, "%s: filter creation PROBLEM; status = %u\n", + __func__, status); -smac_err: - dev_err(adap->pdev_dev, "filter %u smac config failed with error %u\n", - f->tid, err); - return err; + if (ctx) { + if (status == CPL_ERR_TCAM_FULL) + ctx->result = -EAGAIN; + else + ctx->result = -EINVAL; + } + clear_filter(adap, f); + cxgb4_free_atid(t, ftid); + kfree(f); + } + if (ctx) + complete(&ctx->completion); } /* Handle a filter write/deletion reply. */ diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.h index d3c1a8fafd32..7480d65550a8 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.h @@ -37,7 +37,10 @@ #include "t4_msg.h" +#define WORD_MASK 0xffffffff + void filter_rpl(struct adapter *adap, const struct cpl_set_tcb_rpl *rpl); +void hash_filter_rpl(struct adapter *adap, const struct cpl_act_open_rpl *rpl); void clear_filter(struct adapter *adap, struct filter_entry *f); int set_filter_wr(struct adapter *adapter, int fidx); diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index 4b07cfe8c66c..77b4bd958748 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c @@ -572,6 +572,10 @@ static int fwevtq_handler(struct sge_rspq *q, const __be64 *rsp, const struct cpl_set_tcb_rpl *p = (void *)rsp; filter_rpl(q->adap, p); + } else if (opcode == CPL_ACT_OPEN_RPL) { + const struct cpl_act_open_rpl *p = (void *)rsp; + + hash_filter_rpl(q->adap, p); } else dev_err(q->adap->pdev_dev, "unexpected CPL %#x on FW event queue\n", opcode); diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h index ce4838d907da..7e12f241145b 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h @@ -286,6 +286,7 @@ struct work_request_hdr { #define RX_CHANNEL_S 26 #define RX_CHANNEL_V(x) ((x) << RX_CHANNEL_S) +#define RX_CHANNEL_F RX_CHANNEL_V(1U) #define WND_SCALE_EN_S 28 #define WND_SCALE_EN_V(x) ((x) << WND_SCALE_EN_S) @@ -315,6 +316,10 @@ struct cpl_pass_open_req { #define DELACK_V(x) ((x) << DELACK_S) #define DELACK_F DELACK_V(1U) +#define NON_OFFLOAD_S 7 +#define NON_OFFLOAD_V(x) ((x) << NON_OFFLOAD_S) +#define NON_OFFLOAD_F NON_OFFLOAD_V(1U) + #define DSCP_S 22 #define DSCP_M 0x3F #define DSCP_V(x) ((x) << DSCP_S) diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_tcb.h b/drivers/net/ethernet/chelsio/cxgb4/t4_tcb.h index c1c76663034d..c7201eb7b14c 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_tcb.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_tcb.h @@ -42,6 +42,23 @@ #define TCB_T_FLAGS_W 1 +#define TF_CCTRL_ECE_S 60 #define TF_CCTRL_CWR_S 61 +#define TF_CCTRL_RFR_S 62 +#define TCB_TIMESTAMP_W 5 +#define TCB_TIMESTAMP_S 0 +#define TCB_TIMESTAMP_M 0xffffffffULL +#define TCB_TIMESTAMP_V(x) ((x) << TCB_TIMESTAMP_S) + +#define TCB_RTT_TS_RECENT_AGE_W 6 +#define TCB_RTT_TS_RECENT_AGE_S 0 +#define TCB_RTT_TS_RECENT_AGE_M 0xffffffffULL +#define TCB_RTT_TS_RECENT_AGE_V(x) ((x) << TCB_RTT_TS_RECENT_AGE_S) + +#define TCB_SND_UNA_RAW_W 10 +#define TCB_RX_FRAG2_PTR_RAW_W 27 +#define TCB_RX_FRAG3_LEN_RAW_W 29 +#define TCB_RX_FRAG3_START_IDX_OFFSET_RAW_W 30 +#define TCB_PDU_HDR_LEN_W 31 #endif /* __T4_TCB_H */ -- cgit v1.2.3 From 3b0b3bee56dd4e5cd1976a046f391a1435d727b2 Mon Sep 17 00:00:00 2001 From: Kumar Sanghvi Date: Wed, 1 Nov 2017 08:53:02 +0530 Subject: cxgb4: add support to delete hash filter Use a combined ulptx work-request to send hash filter deletion request to hw. Hash filter deletion reply is processed on getting cpl_abort_rpl_rss. Release any L2T/SMT/CLIP entries on filter deletion. Also, free up the corresponding filter entry. Signed-off-by: Kumar Sanghvi Signed-off-by: Rahul Lakkireddy Signed-off-by: Ganesh Goudar Signed-off-by: David S. Miller --- drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c | 159 ++++++++++++++++++++- drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.h | 2 + drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c | 4 + .../net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c | 2 +- drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c | 4 +- drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h | 4 +- drivers/net/ethernet/chelsio/cxgb4/t4_tcb.h | 5 + 7 files changed, 173 insertions(+), 7 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c index cf8ca695f27d..eb6ba9824501 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c @@ -87,6 +87,59 @@ static int set_tcb_tflag(struct adapter *adap, struct filter_entry *f, (unsigned long long)val << bit_pos, no_reply); } +static void mk_abort_req_ulp(struct cpl_abort_req *abort_req, unsigned int tid) +{ + struct ulp_txpkt *txpkt = (struct ulp_txpkt *)abort_req; + struct ulptx_idata *sc = (struct ulptx_idata *)(txpkt + 1); + + txpkt->cmd_dest = htonl(ULPTX_CMD_V(ULP_TX_PKT) | ULP_TXPKT_DEST_V(0)); + txpkt->len = htonl(DIV_ROUND_UP(sizeof(*abort_req), 16)); + sc->cmd_more = htonl(ULPTX_CMD_V(ULP_TX_SC_IMM)); + sc->len = htonl(sizeof(*abort_req) - sizeof(struct work_request_hdr)); + OPCODE_TID(abort_req) = htonl(MK_OPCODE_TID(CPL_ABORT_REQ, tid)); + abort_req->rsvd0 = htonl(0); + abort_req->rsvd1 = 0; + abort_req->cmd = CPL_ABORT_NO_RST; +} + +static void mk_abort_rpl_ulp(struct cpl_abort_rpl *abort_rpl, unsigned int tid) +{ + struct ulp_txpkt *txpkt = (struct ulp_txpkt *)abort_rpl; + struct ulptx_idata *sc = (struct ulptx_idata *)(txpkt + 1); + + txpkt->cmd_dest = htonl(ULPTX_CMD_V(ULP_TX_PKT) | ULP_TXPKT_DEST_V(0)); + txpkt->len = htonl(DIV_ROUND_UP(sizeof(*abort_rpl), 16)); + sc->cmd_more = htonl(ULPTX_CMD_V(ULP_TX_SC_IMM)); + sc->len = htonl(sizeof(*abort_rpl) - sizeof(struct work_request_hdr)); + OPCODE_TID(abort_rpl) = htonl(MK_OPCODE_TID(CPL_ABORT_RPL, tid)); + abort_rpl->rsvd0 = htonl(0); + abort_rpl->rsvd1 = 0; + abort_rpl->cmd = CPL_ABORT_NO_RST; +} + +static void mk_set_tcb_ulp(struct filter_entry *f, + struct cpl_set_tcb_field *req, + unsigned int word, u64 mask, u64 val, + u8 cookie, int no_reply) +{ + struct ulp_txpkt *txpkt = (struct ulp_txpkt *)req; + struct ulptx_idata *sc = (struct ulptx_idata *)(txpkt + 1); + + txpkt->cmd_dest = htonl(ULPTX_CMD_V(ULP_TX_PKT) | ULP_TXPKT_DEST_V(0)); + txpkt->len = htonl(DIV_ROUND_UP(sizeof(*req), 16)); + sc->cmd_more = htonl(ULPTX_CMD_V(ULP_TX_SC_IMM)); + sc->len = htonl(sizeof(*req) - sizeof(struct work_request_hdr)); + OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, f->tid)); + req->reply_ctrl = htons(NO_REPLY_V(no_reply) | REPLY_CHAN_V(0) | + QUEUENO_V(0)); + req->word_cookie = htons(TCB_WORD_V(word) | TCB_COOKIE_V(cookie)); + req->mask = cpu_to_be64(mask); + req->val = cpu_to_be64(val); + sc = (struct ulptx_idata *)(req + 1); + sc->cmd_more = htonl(ULPTX_CMD_V(ULP_TX_SC_NOOP)); + sc->len = htonl(0); +} + static int configure_filter_smac(struct adapter *adap, struct filter_entry *f) { int err; @@ -1110,12 +1163,74 @@ int __cxgb4_set_filter(struct net_device *dev, int filter_id, return ret; } +static int cxgb4_del_hash_filter(struct net_device *dev, int filter_id, + struct filter_ctx *ctx) +{ + struct adapter *adapter = netdev2adap(dev); + struct tid_info *t = &adapter->tids; + struct cpl_abort_req *abort_req; + struct cpl_abort_rpl *abort_rpl; + struct cpl_set_tcb_field *req; + struct ulptx_idata *aligner; + struct work_request_hdr *wr; + struct filter_entry *f; + struct sk_buff *skb; + unsigned int wrlen; + int ret; + + netdev_dbg(dev, "%s: filter_id = %d ; nftids = %d\n", + __func__, filter_id, adapter->tids.nftids); + + if (filter_id > adapter->tids.ntids) + return -E2BIG; + + f = lookup_tid(t, filter_id); + if (!f) { + netdev_err(dev, "%s: no filter entry for filter_id = %d", + __func__, filter_id); + return -EINVAL; + } + + ret = writable_filter(f); + if (ret) + return ret; + + if (!f->valid) + return -EINVAL; + + f->ctx = ctx; + f->pending = 1; + wrlen = roundup(sizeof(*wr) + (sizeof(*req) + sizeof(*aligner)) + + sizeof(*abort_req) + sizeof(*abort_rpl), 16); + skb = alloc_skb(wrlen, GFP_KERNEL); + if (!skb) { + netdev_err(dev, "%s: could not allocate skb ..\n", __func__); + return -ENOMEM; + } + set_wr_txq(skb, CPL_PRIORITY_CONTROL, f->fs.val.iport & 0x3); + req = (struct cpl_set_tcb_field *)__skb_put(skb, wrlen); + INIT_ULPTX_WR(req, wrlen, 0, 0); + wr = (struct work_request_hdr *)req; + wr++; + req = (struct cpl_set_tcb_field *)wr; + mk_set_tcb_ulp(f, req, TCB_RSS_INFO_W, TCB_RSS_INFO_V(TCB_RSS_INFO_M), + TCB_RSS_INFO_V(adapter->sge.fw_evtq.abs_id), 0, 1); + aligner = (struct ulptx_idata *)(req + 1); + abort_req = (struct cpl_abort_req *)(aligner + 1); + mk_abort_req_ulp(abort_req, f->tid); + abort_rpl = (struct cpl_abort_rpl *)(abort_req + 1); + mk_abort_rpl_ulp(abort_rpl, f->tid); + t4_ofld_send(adapter, skb); + return 0; +} + /* Check a delete filter request for validity and send it to the hardware. * Return 0 on success, an error number otherwise. We attach any provided * filter operation context to the internal filter specification in order to * facilitate signaling completion of the operation. */ int __cxgb4_del_filter(struct net_device *dev, int filter_id, + struct ch_filter_specification *fs, struct filter_ctx *ctx) { struct adapter *adapter = netdev2adap(dev); @@ -1123,6 +1238,14 @@ int __cxgb4_del_filter(struct net_device *dev, int filter_id, unsigned int max_fidx; int ret; + if (fs && fs->hash) { + if (is_hashfilter(adapter)) + return cxgb4_del_hash_filter(dev, filter_id, ctx); + netdev_err(dev, "%s: Exact-match filters only supported with Hash Filter configuration\n", + __func__); + return -EINVAL; + } + max_fidx = adapter->tids.nftids; if (filter_id != (max_fidx + adapter->tids.nsftids - 1) && filter_id >= max_fidx) @@ -1173,18 +1296,19 @@ out: return ret; } -int cxgb4_del_filter(struct net_device *dev, int filter_id) +int cxgb4_del_filter(struct net_device *dev, int filter_id, + struct ch_filter_specification *fs) { struct filter_ctx ctx; int ret; /* If we are shutting down the adapter do not wait for completion */ if (netdev2adap(dev)->flags & SHUTTING_DOWN) - return __cxgb4_del_filter(dev, filter_id, NULL); + return __cxgb4_del_filter(dev, filter_id, fs, NULL); init_completion(&ctx.completion); - ret = __cxgb4_del_filter(dev, filter_id, &ctx); + ret = __cxgb4_del_filter(dev, filter_id, fs, &ctx); if (ret) goto out; @@ -1258,6 +1382,35 @@ static int configure_filter_tcb(struct adapter *adap, unsigned int tid, return 0; } +void hash_del_filter_rpl(struct adapter *adap, + const struct cpl_abort_rpl_rss *rpl) +{ + unsigned int status = rpl->status; + struct tid_info *t = &adap->tids; + unsigned int tid = GET_TID(rpl); + struct filter_ctx *ctx = NULL; + struct filter_entry *f; + + dev_dbg(adap->pdev_dev, "%s: status = %u; tid = %u\n", + __func__, status, tid); + + f = lookup_tid(t, tid); + if (!f) { + dev_err(adap->pdev_dev, "%s:could not find filter entry", + __func__); + return; + } + ctx = f->ctx; + f->ctx = NULL; + clear_filter(adap, f); + cxgb4_remove_tid(t, 0, tid, 0); + kfree(f); + if (ctx) { + ctx->result = 0; + complete(&ctx->completion); + } +} + void hash_filter_rpl(struct adapter *adap, const struct cpl_act_open_rpl *rpl) { unsigned int ftid = TID_TID_G(AOPEN_ATID_G(ntohl(rpl->atid_status))); diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.h index 7480d65550a8..9475abd3384e 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.h @@ -41,6 +41,8 @@ void filter_rpl(struct adapter *adap, const struct cpl_set_tcb_rpl *rpl); void hash_filter_rpl(struct adapter *adap, const struct cpl_act_open_rpl *rpl); +void hash_del_filter_rpl(struct adapter *adap, + const struct cpl_abort_rpl_rss *rpl); void clear_filter(struct adapter *adap, struct filter_entry *f); int set_filter_wr(struct adapter *adapter, int fidx); diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index 77b4bd958748..35709c7f7c5b 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c @@ -576,6 +576,10 @@ static int fwevtq_handler(struct sge_rspq *q, const __be64 *rsp, const struct cpl_act_open_rpl *p = (void *)rsp; hash_filter_rpl(q->adap, p); + } else if (opcode == CPL_ABORT_RPL_RSS) { + const struct cpl_abort_rpl_rss *p = (void *)rsp; + + hash_del_filter_rpl(q->adap, p); } else dev_err(q->adap->pdev_dev, "unexpected CPL %#x on FW event queue\n", opcode); diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c index 614db014ef18..ed377e2e9f8a 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c @@ -730,7 +730,7 @@ int cxgb4_tc_flower_destroy(struct net_device *dev, if (!ch_flower) return -ENOENT; - ret = cxgb4_del_filter(dev, ch_flower->filter_id); + ret = cxgb4_del_filter(dev, ch_flower->filter_id, &ch_flower->fs); if (ret) goto err; diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c index 48970ba08bdc..cd0cd13a964d 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c @@ -380,7 +380,7 @@ int cxgb4_delete_knode(struct net_device *dev, struct tc_cls_u32_offload *cls) return -EINVAL; } - ret = cxgb4_del_filter(dev, filter_id); + ret = cxgb4_del_filter(dev, filter_id, NULL); if (ret) goto out; @@ -399,7 +399,7 @@ int cxgb4_delete_knode(struct net_device *dev, struct tc_cls_u32_offload *cls) if (!test_bit(j, link->tid_map)) continue; - ret = __cxgb4_del_filter(dev, j, NULL); + ret = __cxgb4_del_filter(dev, j, NULL, NULL); if (ret) goto out; diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h index 52324c77a4fe..a1c850861cbf 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h @@ -217,10 +217,12 @@ int __cxgb4_set_filter(struct net_device *dev, int filter_id, struct ch_filter_specification *fs, struct filter_ctx *ctx); int __cxgb4_del_filter(struct net_device *dev, int filter_id, + struct ch_filter_specification *fs, struct filter_ctx *ctx); int cxgb4_set_filter(struct net_device *dev, int filter_id, struct ch_filter_specification *fs); -int cxgb4_del_filter(struct net_device *dev, int filter_id); +int cxgb4_del_filter(struct net_device *dev, int filter_id, + struct ch_filter_specification *fs); int cxgb4_get_filter_counters(struct net_device *dev, unsigned int fidx, u64 *hitcnt, u64 *bytecnt); diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_tcb.h b/drivers/net/ethernet/chelsio/cxgb4/t4_tcb.h index c7201eb7b14c..3297ce025e8b 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_tcb.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_tcb.h @@ -46,6 +46,11 @@ #define TF_CCTRL_CWR_S 61 #define TF_CCTRL_RFR_S 62 +#define TCB_RSS_INFO_W 3 +#define TCB_RSS_INFO_S 0 +#define TCB_RSS_INFO_M 0x3ffULL +#define TCB_RSS_INFO_V(x) ((x) << TCB_RSS_INFO_S) + #define TCB_TIMESTAMP_W 5 #define TCB_TIMESTAMP_S 0 #define TCB_TIMESTAMP_M 0xffffffffULL -- cgit v1.2.3 From 9d922d4b016d3d7908dd70112aaf46a38313d866 Mon Sep 17 00:00:00 2001 From: Kumar Sanghvi Date: Wed, 1 Nov 2017 08:53:03 +0530 Subject: cxgb4: add support to retrieve stats for hash filters Add support to retrieve packet-count and byte-count for hash-filters by retrieving filter-entry appropriately based on whether the request is for hash-filter or not. Signed-off-by: Kumar Sanghvi Signed-off-by: Rahul Lakkireddy Signed-off-by: Ganesh Goudar Signed-off-by: David S. Miller --- drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c | 30 ++++++++++++++-------- .../net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c | 6 +++-- drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h | 2 +- 3 files changed, 25 insertions(+), 13 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c index eb6ba9824501..9b3ff6209eb5 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c @@ -341,7 +341,7 @@ static int get_filter_steerq(struct net_device *dev, } static int get_filter_count(struct adapter *adapter, unsigned int fidx, - u64 *pkts, u64 *bytes) + u64 *pkts, u64 *bytes, bool hash) { unsigned int tcb_base, tcbaddr; unsigned int word_offset; @@ -350,14 +350,24 @@ static int get_filter_count(struct adapter *adapter, unsigned int fidx, int ret; tcb_base = t4_read_reg(adapter, TP_CMM_TCB_BASE_A); - if ((fidx != (adapter->tids.nftids + adapter->tids.nsftids - 1)) && - fidx >= adapter->tids.nftids) - return -E2BIG; - - f = &adapter->tids.ftid_tab[fidx]; - if (!f->valid) - return -EINVAL; + if (is_hashfilter(adapter) && hash) { + if (fidx < adapter->tids.ntids) { + f = adapter->tids.tid_tab[fidx]; + if (!f) + return -EINVAL; + } else { + return -E2BIG; + } + } else { + if ((fidx != (adapter->tids.nftids + + adapter->tids.nsftids - 1)) && + fidx >= adapter->tids.nftids) + return -E2BIG; + f = &adapter->tids.ftid_tab[fidx]; + if (!f->valid) + return -EINVAL; + } tcbaddr = tcb_base + f->tid * TCB_SIZE; spin_lock(&adapter->win0_lock); @@ -409,11 +419,11 @@ out: } int cxgb4_get_filter_counters(struct net_device *dev, unsigned int fidx, - u64 *hitcnt, u64 *bytecnt) + u64 *hitcnt, u64 *bytecnt, bool hash) { struct adapter *adapter = netdev2adap(dev); - return get_filter_count(adapter, fidx, hitcnt, bytecnt); + return get_filter_count(adapter, fidx, hitcnt, bytecnt, hash); } int cxgb4_get_free_ftid(struct net_device *dev, int family) diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c index ed377e2e9f8a..a26acd183eef 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c @@ -755,7 +755,8 @@ static void ch_flower_stats_cb(struct timer_list *t) hash_for_each_rcu(adap->flower_anymatch_tbl, i, flower_entry, link) { ret = cxgb4_get_filter_counters(adap->port[0], flower_entry->filter_id, - &packets, &bytes); + &packets, &bytes, + flower_entry->fs.hash); if (!ret) { spin_lock(&flower_entry->lock); ofld_stats = &flower_entry->stats; @@ -788,7 +789,8 @@ int cxgb4_tc_flower_stats(struct net_device *dev, } ret = cxgb4_get_filter_counters(dev, ch_flower->filter_id, - &packets, &bytes); + &packets, &bytes, + ch_flower->fs.hash); if (ret < 0) goto err; diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h index a1c850861cbf..08e709ab6dd4 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h @@ -224,7 +224,7 @@ int cxgb4_set_filter(struct net_device *dev, int filter_id, int cxgb4_del_filter(struct net_device *dev, int filter_id, struct ch_filter_specification *fs); int cxgb4_get_filter_counters(struct net_device *dev, unsigned int fidx, - u64 *hitcnt, u64 *bytecnt); + u64 *hitcnt, u64 *bytecnt, bool hash); static inline void set_wr_txq(struct sk_buff *skb, int prio, int queue) { -- cgit v1.2.3 From 79e6d46a65abfd721de378bf496833a04ea10afe Mon Sep 17 00:00:00 2001 From: Kumar Sanghvi Date: Wed, 1 Nov 2017 08:53:04 +0530 Subject: cxgb4: convert flower table to use rhashtable T6 supports ~500K hash filters and can theoretically climb up to ~1 million hash filters. Preallocated hash table is not efficient in terms of memory usage. So, use rhashtable instead which gives the flexibility to grow based on usage. Signed-off-by: Kumar Sanghvi Signed-off-by: Rahul Lakkireddy Signed-off-by: Ganesh Goudar Signed-off-by: David S. Miller --- drivers/net/ethernet/chelsio/cxgb4/cxgb4.h | 4 +- drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c | 4 +- .../net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c | 106 ++++++++++++++------- .../net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.h | 4 +- 4 files changed, 82 insertions(+), 36 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h index bb7f0e4c9a81..0c83ceb5a1a6 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h @@ -918,8 +918,10 @@ struct adapter { struct chcr_stats_debug chcr_stats; /* TC flower offload */ - DECLARE_HASHTABLE(flower_anymatch_tbl, 9); + struct rhashtable flower_tbl; + struct rhashtable_params flower_ht_params; struct timer_list flower_stats_timer; + struct work_struct flower_stats_work; /* Ethtool Dump */ struct ethtool_dump eth_dump; diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index 35709c7f7c5b..8fd41917c07a 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c @@ -5182,7 +5182,9 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) dev_warn(&pdev->dev, "could not offload tc u32, continuing\n"); - cxgb4_init_tc_flower(adapter); + if (cxgb4_init_tc_flower(adapter)) + dev_warn(&pdev->dev, + "could not offload tc flower, continuing\n"); } if (is_offload(adapter) || is_hashfilter(adapter)) { diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c index a26acd183eef..3953bc1fdc20 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c @@ -74,13 +74,8 @@ static struct ch_tc_flower_entry *allocate_flower_entry(void) static struct ch_tc_flower_entry *ch_flower_lookup(struct adapter *adap, unsigned long flower_cookie) { - struct ch_tc_flower_entry *flower_entry; - - hash_for_each_possible_rcu(adap->flower_anymatch_tbl, flower_entry, - link, flower_cookie) - if (flower_entry->tc_flower_cookie == flower_cookie) - return flower_entry; - return NULL; + return rhashtable_lookup_fast(&adap->flower_tbl, &flower_cookie, + adap->flower_ht_params); } static void cxgb4_process_flow_match(struct net_device *dev, @@ -707,12 +702,17 @@ int cxgb4_tc_flower_replace(struct net_device *dev, goto free_entry; } - INIT_HLIST_NODE(&ch_flower->link); ch_flower->tc_flower_cookie = cls->cookie; ch_flower->filter_id = ctx.tid; - hash_add_rcu(adap->flower_anymatch_tbl, &ch_flower->link, cls->cookie); + ret = rhashtable_insert_fast(&adap->flower_tbl, &ch_flower->node, + adap->flower_ht_params); + if (ret) + goto del_filter; - return ret; + return 0; + +del_filter: + cxgb4_del_filter(dev, ch_flower->filter_id, &ch_flower->fs); free_entry: kfree(ch_flower); @@ -734,44 +734,66 @@ int cxgb4_tc_flower_destroy(struct net_device *dev, if (ret) goto err; - hash_del_rcu(&ch_flower->link); + ret = rhashtable_remove_fast(&adap->flower_tbl, &ch_flower->node, + adap->flower_ht_params); + if (ret) { + netdev_err(dev, "Flow remove from rhashtable failed"); + goto err; + } kfree_rcu(ch_flower, rcu); err: return ret; } -static void ch_flower_stats_cb(struct timer_list *t) +static void ch_flower_stats_handler(struct work_struct *work) { - struct adapter *adap = from_timer(adap, t, flower_stats_timer); + struct adapter *adap = container_of(work, struct adapter, + flower_stats_work); struct ch_tc_flower_entry *flower_entry; struct ch_tc_flower_stats *ofld_stats; - unsigned int i; + struct rhashtable_iter iter; u64 packets; u64 bytes; int ret; - rcu_read_lock(); - hash_for_each_rcu(adap->flower_anymatch_tbl, i, flower_entry, link) { - ret = cxgb4_get_filter_counters(adap->port[0], - flower_entry->filter_id, - &packets, &bytes, - flower_entry->fs.hash); - if (!ret) { - spin_lock(&flower_entry->lock); - ofld_stats = &flower_entry->stats; - - if (ofld_stats->prev_packet_count != packets) { - ofld_stats->prev_packet_count = packets; - ofld_stats->last_used = jiffies; + rhashtable_walk_enter(&adap->flower_tbl, &iter); + do { + flower_entry = ERR_PTR(rhashtable_walk_start(&iter)); + if (IS_ERR(flower_entry)) + goto walk_stop; + + while ((flower_entry = rhashtable_walk_next(&iter)) && + !IS_ERR(flower_entry)) { + ret = cxgb4_get_filter_counters(adap->port[0], + flower_entry->filter_id, + &packets, &bytes, + flower_entry->fs.hash); + if (!ret) { + spin_lock(&flower_entry->lock); + ofld_stats = &flower_entry->stats; + + if (ofld_stats->prev_packet_count != packets) { + ofld_stats->prev_packet_count = packets; + ofld_stats->last_used = jiffies; + } + spin_unlock(&flower_entry->lock); } - spin_unlock(&flower_entry->lock); } - } - rcu_read_unlock(); +walk_stop: + rhashtable_walk_stop(&iter); + } while (flower_entry == ERR_PTR(-EAGAIN)); + rhashtable_walk_exit(&iter); mod_timer(&adap->flower_stats_timer, jiffies + STATS_CHECK_PERIOD); } +static void ch_flower_stats_cb(struct timer_list *t) +{ + struct adapter *adap = from_timer(adap, t, flower_stats_timer); + + schedule_work(&adap->flower_stats_work); +} + int cxgb4_tc_flower_stats(struct net_device *dev, struct tc_cls_flower_offload *cls) { @@ -814,15 +836,35 @@ err: return ret; } -void cxgb4_init_tc_flower(struct adapter *adap) +static const struct rhashtable_params cxgb4_tc_flower_ht_params = { + .nelem_hint = 384, + .head_offset = offsetof(struct ch_tc_flower_entry, node), + .key_offset = offsetof(struct ch_tc_flower_entry, tc_flower_cookie), + .key_len = sizeof(((struct ch_tc_flower_entry *)0)->tc_flower_cookie), + .max_size = 524288, + .min_size = 512, + .automatic_shrinking = true +}; + +int cxgb4_init_tc_flower(struct adapter *adap) { - hash_init(adap->flower_anymatch_tbl); + int ret; + + adap->flower_ht_params = cxgb4_tc_flower_ht_params; + ret = rhashtable_init(&adap->flower_tbl, &adap->flower_ht_params); + if (ret) + return ret; + + INIT_WORK(&adap->flower_stats_work, ch_flower_stats_handler); timer_setup(&adap->flower_stats_timer, ch_flower_stats_cb, 0); mod_timer(&adap->flower_stats_timer, jiffies + STATS_CHECK_PERIOD); + return 0; } void cxgb4_cleanup_tc_flower(struct adapter *adap) { if (adap->flower_stats_timer.function) del_timer_sync(&adap->flower_stats_timer); + cancel_work_sync(&adap->flower_stats_work); + rhashtable_destroy(&adap->flower_tbl); } diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.h index 202d5c9ec303..050c8a50ae41 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.h @@ -48,7 +48,7 @@ struct ch_tc_flower_entry { struct ch_filter_specification fs; struct ch_tc_flower_stats stats; unsigned long tc_flower_cookie; - struct hlist_node link; + struct rhash_head node; struct rcu_head rcu; spinlock_t lock; /* lock for stats */ u32 filter_id; @@ -115,6 +115,6 @@ int cxgb4_tc_flower_destroy(struct net_device *dev, int cxgb4_tc_flower_stats(struct net_device *dev, struct tc_cls_flower_offload *cls); -void cxgb4_init_tc_flower(struct adapter *adap); +int cxgb4_init_tc_flower(struct adapter *adap); void cxgb4_cleanup_tc_flower(struct adapter *adap); #endif /* __CXGB4_TC_FLOWER_H */ -- cgit v1.2.3 From 3eb8b62d5a260fcd9683b0ce89beb3b28b12a304 Mon Sep 17 00:00:00 2001 From: Kumar Sanghvi Date: Wed, 1 Nov 2017 08:53:05 +0530 Subject: cxgb4: add support to create hash-filters via tc-flower offload Determine whether the flow classifies as exact-match with respect to 4-tuple and configured tuple mask in hw. If successfully classified as exact-match, offload the flow as hash-filter in hw. Signed-off-by: Kumar Sanghvi Signed-off-by: Rahul Lakkireddy Signed-off-by: Ganesh Goudar Signed-off-by: David S. Miller --- drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c | 148 +++++++++++++++++++++ drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.h | 2 + .../net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c | 16 ++- 3 files changed, 161 insertions(+), 5 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c index 9b3ff6209eb5..abab67d52edb 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c @@ -31,6 +31,7 @@ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ +#include #include "cxgb4.h" #include "t4_regs.h" @@ -765,6 +766,153 @@ static void fill_default_mask(struct ch_filter_specification *fs) fs->mask.fport = ~0; } +static bool is_addr_all_mask(u8 *ipmask, int family) +{ + if (family == AF_INET) { + struct in_addr *addr; + + addr = (struct in_addr *)ipmask; + if (addr->s_addr == 0xffffffff) + return true; + } else if (family == AF_INET6) { + struct in6_addr *addr6; + + addr6 = (struct in6_addr *)ipmask; + if (addr6->s6_addr32[0] == 0xffffffff && + addr6->s6_addr32[1] == 0xffffffff && + addr6->s6_addr32[2] == 0xffffffff && + addr6->s6_addr32[3] == 0xffffffff) + return true; + } + return false; +} + +static bool is_inaddr_any(u8 *ip, int family) +{ + int addr_type; + + if (family == AF_INET) { + struct in_addr *addr; + + addr = (struct in_addr *)ip; + if (addr->s_addr == htonl(INADDR_ANY)) + return true; + } else if (family == AF_INET6) { + struct in6_addr *addr6; + + addr6 = (struct in6_addr *)ip; + addr_type = ipv6_addr_type((const struct in6_addr *) + &addr6); + if (addr_type == IPV6_ADDR_ANY) + return true; + } + return false; +} + +bool is_filter_exact_match(struct adapter *adap, + struct ch_filter_specification *fs) +{ + struct tp_params *tp = &adap->params.tp; + u64 hash_filter_mask = tp->hash_filter_mask; + u32 mask; + + if (!is_hashfilter(adap)) + return false; + + if (fs->type) { + if (is_inaddr_any(fs->val.fip, AF_INET6) || + !is_addr_all_mask(fs->mask.fip, AF_INET6)) + return false; + + if (is_inaddr_any(fs->val.lip, AF_INET6) || + !is_addr_all_mask(fs->mask.lip, AF_INET6)) + return false; + } else { + if (is_inaddr_any(fs->val.fip, AF_INET) || + !is_addr_all_mask(fs->mask.fip, AF_INET)) + return false; + + if (is_inaddr_any(fs->val.lip, AF_INET) || + !is_addr_all_mask(fs->mask.lip, AF_INET)) + return false; + } + + if (!fs->val.lport || fs->mask.lport != 0xffff) + return false; + + if (!fs->val.fport || fs->mask.fport != 0xffff) + return false; + + if (tp->fcoe_shift >= 0) { + mask = (hash_filter_mask >> tp->fcoe_shift) & FT_FCOE_W; + if (mask && !fs->mask.fcoe) + return false; + } + + if (tp->port_shift >= 0) { + mask = (hash_filter_mask >> tp->port_shift) & FT_PORT_W; + if (mask && !fs->mask.iport) + return false; + } + + if (tp->vnic_shift >= 0) { + mask = (hash_filter_mask >> tp->vnic_shift) & FT_VNIC_ID_W; + + if ((adap->params.tp.ingress_config & VNIC_F)) { + if (mask && !fs->mask.pfvf_vld) + return false; + } else { + if (mask && !fs->mask.ovlan_vld) + return false; + } + } + + if (tp->vlan_shift >= 0) { + mask = (hash_filter_mask >> tp->vlan_shift) & FT_VLAN_W; + if (mask && !fs->mask.ivlan) + return false; + } + + if (tp->tos_shift >= 0) { + mask = (hash_filter_mask >> tp->tos_shift) & FT_TOS_W; + if (mask && !fs->mask.tos) + return false; + } + + if (tp->protocol_shift >= 0) { + mask = (hash_filter_mask >> tp->protocol_shift) & FT_PROTOCOL_W; + if (mask && !fs->mask.proto) + return false; + } + + if (tp->ethertype_shift >= 0) { + mask = (hash_filter_mask >> tp->ethertype_shift) & + FT_ETHERTYPE_W; + if (mask && !fs->mask.ethtype) + return false; + } + + if (tp->macmatch_shift >= 0) { + mask = (hash_filter_mask >> tp->macmatch_shift) & FT_MACMATCH_W; + if (mask && !fs->mask.macidx) + return false; + } + + if (tp->matchtype_shift >= 0) { + mask = (hash_filter_mask >> tp->matchtype_shift) & + FT_MPSHITTYPE_W; + if (mask && !fs->mask.matchtype) + return false; + } + if (tp->frag_shift >= 0) { + mask = (hash_filter_mask >> tp->frag_shift) & + FT_FRAGMENTATION_W; + if (mask && !fs->mask.frag) + return false; + } + return true; +} + static u64 hash_filter_ntuple(struct ch_filter_specification *fs, struct net_device *dev) { diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.h index 9475abd3384e..8db5fca6dcc9 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.h @@ -51,4 +51,6 @@ int delete_filter(struct adapter *adapter, unsigned int fidx); int writable_filter(struct filter_entry *f); void clear_all_filters(struct adapter *adapter); int init_hash_filter(struct adapter *adap); +bool is_filter_exact_match(struct adapter *adap, + struct ch_filter_specification *fs); #endif /* __CXGB4_FILTER_H */ diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c index 3953bc1fdc20..d4a548a6a55c 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c @@ -38,6 +38,7 @@ #include #include "cxgb4.h" +#include "cxgb4_filter.h" #include "cxgb4_tc_flower.h" #define STATS_CHECK_PERIOD (HZ / 2) @@ -672,11 +673,16 @@ int cxgb4_tc_flower_replace(struct net_device *dev, cxgb4_process_flow_match(dev, cls, fs); cxgb4_process_flow_actions(dev, cls, fs); - fidx = cxgb4_get_free_ftid(dev, fs->type ? PF_INET6 : PF_INET); - if (fidx < 0) { - netdev_err(dev, "%s: No fidx for offload.\n", __func__); - ret = -ENOMEM; - goto free_entry; + fs->hash = is_filter_exact_match(adap, fs); + if (fs->hash) { + fidx = 0; + } else { + fidx = cxgb4_get_free_ftid(dev, fs->type ? PF_INET6 : PF_INET); + if (fidx < 0) { + netdev_err(dev, "%s: No fidx for offload.\n", __func__); + ret = -ENOMEM; + goto free_entry; + } } init_completion(&ctx.completion); -- cgit v1.2.3 From e6cdfcc581866625980a89391be4e6a8b379d0c5 Mon Sep 17 00:00:00 2001 From: Parvi Kaustubhi Date: Wed, 1 Nov 2017 08:44:46 -0700 Subject: enic: reset fetch index Since we are allowing rx ring size modification, reset fetch index everytime. Otherwise it could have a stale value that can lead to a null pointer dereference. Signed-off-by: Govindarajulu Varadarajan Signed-off-by: Parvi Kaustubhi Signed-off-by: David S. Miller --- drivers/net/ethernet/cisco/enic/vnic_rq.c | 16 ++-------------- 1 file changed, 2 insertions(+), 14 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/cisco/enic/vnic_rq.c b/drivers/net/ethernet/cisco/enic/vnic_rq.c index 36bc2c71fba9..f8aa326d1d58 100644 --- a/drivers/net/ethernet/cisco/enic/vnic_rq.c +++ b/drivers/net/ethernet/cisco/enic/vnic_rq.c @@ -139,20 +139,8 @@ void vnic_rq_init(struct vnic_rq *rq, unsigned int cq_index, unsigned int error_interrupt_enable, unsigned int error_interrupt_offset) { - u32 fetch_index = 0; - - /* Use current fetch_index as the ring starting point */ - fetch_index = ioread32(&rq->ctrl->fetch_index); - - if (fetch_index == 0xFFFFFFFF) { /* check for hardware gone */ - /* Hardware surprise removal: reset fetch_index */ - fetch_index = 0; - } - - vnic_rq_init_start(rq, cq_index, - fetch_index, fetch_index, - error_interrupt_enable, - error_interrupt_offset); + vnic_rq_init_start(rq, cq_index, 0, 0, error_interrupt_enable, + error_interrupt_offset); } unsigned int vnic_rq_error_status(struct vnic_rq *rq) -- cgit v1.2.3 From ed519b7488a42ce549ef7eae8dd13e043dde10a4 Mon Sep 17 00:00:00 2001 From: Parvi Kaustubhi Date: Wed, 1 Nov 2017 08:44:47 -0700 Subject: enic: Add support for 'ethtool -g/-G' Add support for displaying and modifying rx and tx ring sizes using ethtool. Also, increasing version to 2.3.0.45 Signed-off-by: Parvi Kaustubhi Signed-off-by: David S. Miller --- drivers/net/ethernet/cisco/enic/enic.h | 2 +- drivers/net/ethernet/cisco/enic/enic_ethtool.c | 77 ++++++++++++++++++++++++++ 2 files changed, 78 insertions(+), 1 deletion(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/cisco/enic/enic.h b/drivers/net/ethernet/cisco/enic/enic.h index ba032ac9ae86..6a9527004cb1 100644 --- a/drivers/net/ethernet/cisco/enic/enic.h +++ b/drivers/net/ethernet/cisco/enic/enic.h @@ -33,7 +33,7 @@ #define DRV_NAME "enic" #define DRV_DESCRIPTION "Cisco VIC Ethernet NIC Driver" -#define DRV_VERSION "2.3.0.42" +#define DRV_VERSION "2.3.0.45" #define DRV_COPYRIGHT "Copyright 2008-2013 Cisco Systems, Inc" #define ENIC_BARS_MAX 6 diff --git a/drivers/net/ethernet/cisco/enic/enic_ethtool.c b/drivers/net/ethernet/cisco/enic/enic_ethtool.c index fd3980cc1e34..462d0ce51240 100644 --- a/drivers/net/ethernet/cisco/enic/enic_ethtool.c +++ b/drivers/net/ethernet/cisco/enic/enic_ethtool.c @@ -176,6 +176,81 @@ static void enic_get_strings(struct net_device *netdev, u32 stringset, } } +static void enic_get_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring) +{ + struct enic *enic = netdev_priv(netdev); + struct vnic_enet_config *c = &enic->config; + + ring->rx_max_pending = ENIC_MAX_RQ_DESCS; + ring->rx_pending = c->rq_desc_count; + ring->tx_max_pending = ENIC_MAX_WQ_DESCS; + ring->tx_pending = c->wq_desc_count; +} + +static int enic_set_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring) +{ + struct enic *enic = netdev_priv(netdev); + struct vnic_enet_config *c = &enic->config; + int running = netif_running(netdev); + unsigned int rx_pending; + unsigned int tx_pending; + int err = 0; + + if (ring->rx_mini_max_pending || ring->rx_mini_pending) { + netdev_info(netdev, + "modifying mini ring params is not supported"); + return -EINVAL; + } + if (ring->rx_jumbo_max_pending || ring->rx_jumbo_pending) { + netdev_info(netdev, + "modifying jumbo ring params is not supported"); + return -EINVAL; + } + rx_pending = c->rq_desc_count; + tx_pending = c->wq_desc_count; + if (ring->rx_pending > ENIC_MAX_RQ_DESCS || + ring->rx_pending < ENIC_MIN_RQ_DESCS) { + netdev_info(netdev, "rx pending (%u) not in range [%u,%u]", + ring->rx_pending, ENIC_MIN_RQ_DESCS, + ENIC_MAX_RQ_DESCS); + return -EINVAL; + } + if (ring->tx_pending > ENIC_MAX_WQ_DESCS || + ring->tx_pending < ENIC_MIN_WQ_DESCS) { + netdev_info(netdev, "tx pending (%u) not in range [%u,%u]", + ring->tx_pending, ENIC_MIN_WQ_DESCS, + ENIC_MAX_WQ_DESCS); + return -EINVAL; + } + if (running) + dev_close(netdev); + c->rq_desc_count = + ring->rx_pending & 0xffffffe0; /* must be aligned to groups of 32 */ + c->wq_desc_count = + ring->tx_pending & 0xffffffe0; /* must be aligned to groups of 32 */ + enic_free_vnic_resources(enic); + err = enic_alloc_vnic_resources(enic); + if (err) { + netdev_err(netdev, + "Failed to alloc vNIC resources, aborting\n"); + enic_free_vnic_resources(enic); + goto err_out; + } + enic_init_vnic_resources(enic); + if (running) { + err = dev_open(netdev); + if (err) + goto err_out; + } + return 0; +err_out: + c->rq_desc_count = rx_pending; + c->wq_desc_count = tx_pending; + return err; +} + static int enic_get_sset_count(struct net_device *netdev, int sset) { switch (sset) { @@ -509,6 +584,8 @@ static const struct ethtool_ops enic_ethtool_ops = { .set_msglevel = enic_set_msglevel, .get_link = ethtool_op_get_link, .get_strings = enic_get_strings, + .get_ringparam = enic_get_ringparam, + .set_ringparam = enic_set_ringparam, .get_sset_count = enic_get_sset_count, .get_ethtool_stats = enic_get_ethtool_stats, .get_coalesce = enic_get_coalesce, -- cgit v1.2.3 From 5618c8e24ad6ab09282f6583a228d80c1fd14c65 Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Wed, 1 Nov 2017 08:57:37 +0000 Subject: net: dl2k: remove redundant re-assignment to np The pointer np is initialized and then re-assigned the same value a few lines later. Remove the redundant duplicated assignment. Cleans up clang warning: drivers/net/ethernet/dlink/dl2k.c:314:25: warning: Value stored to 'np' during its initialization is never read Signed-off-by: Colin Ian King Signed-off-by: David S. Miller --- drivers/net/ethernet/dlink/dl2k.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/dlink/dl2k.c b/drivers/net/ethernet/dlink/dl2k.c index a2f6758d38dd..f0536b16b3c3 100644 --- a/drivers/net/ethernet/dlink/dl2k.c +++ b/drivers/net/ethernet/dlink/dl2k.c @@ -313,7 +313,7 @@ find_miiphy (struct net_device *dev) { struct netdev_private *np = netdev_priv(dev); int i, phy_found = 0; - np = netdev_priv(dev); + np->phy_addr = 1; for (i = 31; i >= 0; i--) { -- cgit v1.2.3 From a666960d182cfb7074640bdb004633ffb2e58f26 Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Wed, 1 Nov 2017 09:09:13 +0000 Subject: liquidio: remove redundant setting of inst_processed to zero The zero value assigned to inst_processed at the end of each iteration of the do-while loop is overwritten on the next iteration and hence it is a redundant assignment and can be removed. Cleans up clang warning: drivers/net/ethernet/cavium/liquidio/request_manager.c:480:3: warning: Value stored to 'inst_processed' is never read Signed-off-by: Colin Ian King Signed-off-by: David S. Miller --- drivers/net/ethernet/cavium/liquidio/request_manager.c | 2 -- 1 file changed, 2 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/cavium/liquidio/request_manager.c b/drivers/net/ethernet/cavium/liquidio/request_manager.c index a10459742ae4..e07d2093b971 100644 --- a/drivers/net/ethernet/cavium/liquidio/request_manager.c +++ b/drivers/net/ethernet/cavium/liquidio/request_manager.c @@ -489,8 +489,6 @@ octeon_flush_iq(struct octeon_device *oct, struct octeon_instr_queue *iq, } tot_inst_processed += inst_processed; - inst_processed = 0; - } while (tot_inst_processed < napi_budget); if (napi_budget && (tot_inst_processed >= napi_budget)) -- cgit v1.2.3 From ad88d35a6216c54a005480d2693ed0a888ac1b7c Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Wed, 1 Nov 2017 10:17:15 +0000 Subject: net: hns3: remove a couple of redundant assignments The assignment to kinfo is redundant as this is a duplicate of the initialiation of kinfo a few lines earlier, so it can be removed. The assignment to v_tc_info is never read, so this variable is redundant and can be removed completely. Cleans up two clang warnings: drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c:433:34: warning: Value stored to 'kinfo' during its initialization is never read drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c:775:3: warning: Value stored to 'v_tc_info' is never read Signed-off-by: Colin Ian King Signed-off-by: David S. Miller --- drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c | 3 --- 1 file changed, 3 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c index 1ae6eae82eb3..7bfa2e5497cb 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c @@ -434,7 +434,6 @@ static void hclge_tm_vport_tc_info_update(struct hclge_vport *vport) struct hclge_dev *hdev = vport->back; u8 i; - kinfo = &vport->nic.kinfo; vport->bw_limit = hdev->tm_info.pg_info[0].bw_limit; kinfo->num_tc = min_t(u16, kinfo->num_tqps, hdev->tm_info.num_tc); @@ -766,13 +765,11 @@ static int hclge_tm_pri_vnet_base_shaper_qs_cfg(struct hclge_vport *vport) { struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo; struct hclge_dev *hdev = vport->back; - struct hnae3_tc_info *v_tc_info; u8 ir_u, ir_b, ir_s; u32 i; int ret; for (i = 0; i < kinfo->num_tc; i++) { - v_tc_info = &kinfo->tc_info[i]; ret = hclge_shaper_para_calc( hdev->tm_info.tc_info[i].bw_limit, HCLGE_SHAPER_LVL_QSET, -- cgit v1.2.3 From 44ae12a768b7212976a362c590075716a77e8f28 Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Wed, 1 Nov 2017 11:47:39 +0100 Subject: net: sched: move the can_offload check from binding phase to rule insertion phase This restores the original behaviour before the block callbacks were introduced. Allow the drivers to do binding of block always, no matter if the NETIF_F_HW_TC feature is on or off. Move the check to the block callback which is called for rule insertion. Reported-by: Alexander Duyck Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnxt/bnxt.c | 2 +- drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c | 2 +- drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c | 3 +++ drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | 3 +++ drivers/net/ethernet/mellanox/mlx5/core/en_main.c | 3 +++ drivers/net/ethernet/mellanox/mlx5/core/en_rep.c | 3 +++ drivers/net/ethernet/mellanox/mlxsw/spectrum.c | 3 +++ drivers/net/ethernet/netronome/nfp/bpf/main.c | 3 +++ drivers/net/ethernet/netronome/nfp/flower/offload.c | 3 +++ net/dsa/slave.c | 3 +++ net/sched/cls_api.c | 2 +- 11 files changed, 27 insertions(+), 3 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index 5ce950629ce9..c3dfaa5151aa 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -7347,7 +7347,7 @@ static int bnxt_setup_tc_block_cb(enum tc_setup_type type, void *type_data, { struct bnxt *bp = cb_priv; - if (!bnxt_tc_flower_enabled(bp)) + if (!bnxt_tc_flower_enabled(bp) || !tc_can_offload(bp->dev)) return -EOPNOTSUPP; switch (type) { diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c index c1761ed5785e..b6aa7db99705 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c @@ -124,7 +124,7 @@ static int bnxt_vf_rep_setup_tc_block_cb(enum tc_setup_type type, struct bnxt *bp = vf_rep->bp; int vf_fid = bp->pf.vf[vf_rep->vf_idx].fw_fid; - if (!bnxt_tc_flower_enabled(vf_rep->bp)) + if (!bnxt_tc_flower_enabled(vf_rep->bp) || !tc_can_offload(bp->dev)) return -EOPNOTSUPP; switch (type) { diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index 8fd41917c07a..6f900ffe25cc 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c @@ -2943,6 +2943,9 @@ static int cxgb_setup_tc_block_cb(enum tc_setup_type type, void *type_data, return -EINVAL; } + if (!tc_can_offload(dev)) + return -EOPNOTSUPP; + switch (type) { case TC_SETUP_CLSU32: return cxgb_setup_tc_cls_u32(dev, type_data); diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 38bd2e339e48..507977994a03 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -9386,6 +9386,9 @@ static int ixgbe_setup_tc_block_cb(enum tc_setup_type type, void *type_data, { struct ixgbe_adapter *adapter = cb_priv; + if (!tc_can_offload(adapter->netdev)) + return -EOPNOTSUPP; + switch (type) { case TC_SETUP_CLSU32: return ixgbe_setup_tc_cls_u32(adapter, type_data); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 560b208c0483..28ae00b3eb88 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -3106,6 +3106,9 @@ int mlx5e_setup_tc_block_cb(enum tc_setup_type type, void *type_data, { struct mlx5e_priv *priv = cb_priv; + if (!tc_can_offload(priv->netdev)) + return -EOPNOTSUPP; + switch (type) { case TC_SETUP_CLSFLOWER: return mlx5e_setup_tc_cls_flower(priv, type_data); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c index 0edb7065d811..2c43606c26b5 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c @@ -682,6 +682,9 @@ static int mlx5e_rep_setup_tc_cb(enum tc_setup_type type, void *type_data, { struct mlx5e_priv *priv = cb_priv; + if (!tc_can_offload(priv->netdev)) + return -EOPNOTSUPP; + switch (type) { case TC_SETUP_CLSFLOWER: return mlx5e_rep_setup_tc_cls_flower(priv, type_data); diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c index 021926974da6..3f4be9556e56 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c @@ -1738,6 +1738,9 @@ static int mlxsw_sp_setup_tc_block_cb(enum tc_setup_type type, void *type_data, { struct mlxsw_sp_port *mlxsw_sp_port = cb_priv; + if (!tc_can_offload(mlxsw_sp_port->dev)) + return -EOPNOTSUPP; + switch (type) { case TC_SETUP_CLSMATCHALL: return mlxsw_sp_setup_tc_cls_matchall(mlxsw_sp_port, type_data, diff --git a/drivers/net/ethernet/netronome/nfp/bpf/main.c b/drivers/net/ethernet/netronome/nfp/bpf/main.c index f15a186f6c87..04424db24b80 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/main.c +++ b/drivers/net/ethernet/netronome/nfp/bpf/main.c @@ -121,6 +121,9 @@ static int nfp_bpf_setup_tc_block_cb(enum tc_setup_type type, struct tc_cls_bpf_offload *cls_bpf = type_data; struct nfp_net *nn = cb_priv; + if (!tc_can_offload(nn->dp.netdev)) + return -EOPNOTSUPP; + switch (type) { case TC_SETUP_CLSBPF: if (!nfp_net_ebpf_capable(nn) || diff --git a/drivers/net/ethernet/netronome/nfp/flower/offload.c b/drivers/net/ethernet/netronome/nfp/flower/offload.c index c47753fdb55b..7c6cab176293 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/offload.c +++ b/drivers/net/ethernet/netronome/nfp/flower/offload.c @@ -470,6 +470,9 @@ static int nfp_flower_setup_tc_block_cb(enum tc_setup_type type, { struct nfp_net *nn = cb_priv; + if (!tc_can_offload(nn->dp.netdev)) + return -EOPNOTSUPP; + switch (type) { case TC_SETUP_CLSFLOWER: return nfp_flower_repr_offload(nn->app, nn->port->netdev, diff --git a/net/dsa/slave.c b/net/dsa/slave.c index 48b954a76b0d..9b75d0ac4092 100644 --- a/net/dsa/slave.c +++ b/net/dsa/slave.c @@ -799,6 +799,9 @@ static int dsa_slave_setup_tc_block_cb(enum tc_setup_type type, void *type_data, { struct net_device *dev = cb_priv; + if (!tc_can_offload(dev)) + return -EOPNOTSUPP; + switch (type) { case TC_SETUP_CLSMATCHALL: return dsa_slave_setup_tc_cls_matchall(dev, type_data, ingress); diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c index 2c03fcbc7188..15e3216ef25d 100644 --- a/net/sched/cls_api.c +++ b/net/sched/cls_api.c @@ -256,7 +256,7 @@ static void tcf_block_offload_cmd(struct tcf_block *block, struct Qdisc *q, struct net_device *dev = q->dev_queue->dev; struct tc_block_offload bo = {}; - if (!tc_can_offload(dev)) + if (!dev->netdev_ops->ndo_setup_tc) return; bo.command = command; bo.binder_type = ei->binder_type; -- cgit v1.2.3 From a35c52b71580a1fb29df11270b1461f6d17f5670 Mon Sep 17 00:00:00 2001 From: yuan linyu Date: Wed, 1 Nov 2017 21:10:32 +0800 Subject: net: dpaa: fix maybe uninitialized var in dpaa_open() Signed-off-by: yuan linyu Signed-off-by: David S. Miller --- drivers/net/ethernet/freescale/dpaa/dpaa_eth.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c index a8d0be824149..68f0ac129ba4 100644 --- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c +++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c @@ -2468,7 +2468,8 @@ static int dpaa_open(struct net_device *net_dev) mac_dev = priv->mac_dev; dpaa_eth_napi_enable(priv); - if (dpaa_phy_init(net_dev)) + err = dpaa_phy_init(net_dev); + if (err) goto phy_init_failed; for (i = 0; i < ARRAY_SIZE(mac_dev->port); i++) { -- cgit v1.2.3 From d7bcde417b6b2e6f99ed9c2c38a0771b66efb060 Mon Sep 17 00:00:00 2001 From: yuan linyu Date: Wed, 1 Nov 2017 21:11:11 +0800 Subject: net: dpaa: remove init which already done in per-cpu allocation Signed-off-by: yuan linyu Signed-off-by: David S. Miller --- drivers/net/ethernet/freescale/dpaa/dpaa_eth.c | 5 ----- 1 file changed, 5 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c index 68f0ac129ba4..969f6b12952e 100644 --- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c +++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c @@ -2669,7 +2669,6 @@ static inline u16 dpaa_get_headroom(struct dpaa_buffer_layout *bl) static int dpaa_eth_probe(struct platform_device *pdev) { struct dpaa_bp *dpaa_bps[DPAA_BPS_NUM] = {NULL}; - struct dpaa_percpu_priv *percpu_priv; struct net_device *net_dev = NULL; struct dpaa_fq *dpaa_fq, *tmp; struct dpaa_priv *priv = NULL; @@ -2815,10 +2814,6 @@ static int dpaa_eth_probe(struct platform_device *pdev) err = -ENOMEM; goto free_dpaa_fqs; } - for_each_possible_cpu(i) { - percpu_priv = per_cpu_ptr(priv->percpu_priv, i); - memset(percpu_priv, 0, sizeof(*percpu_priv)); - } priv->num_tc = 1; netif_set_real_num_tx_queues(net_dev, priv->num_tc * DPAA_TC_TXQ_NUM); -- cgit v1.2.3 From 5d42ced1950c7a7b5f5aa5c1c7e2c78dde9f8ca4 Mon Sep 17 00:00:00 2001 From: Jiong Wang Date: Wed, 1 Nov 2017 10:38:24 -0700 Subject: nfp: bpf: rename ALU_OP_NEG to ALU_OP_NOT The current ALU_OP_NEG is Op encoding 0x4 for NPF ALU instruction. It is actually performing "~B" operation which is bitwise NOT. The using naming ALU_OP_NEG is misleading as NEG is -B which is not the same as ~B. Signed-off-by: Jiong Wang Reviewed-by: Jakub Kicinski Signed-off-by: David S. Miller --- drivers/net/ethernet/netronome/nfp/bpf/jit.c | 2 +- drivers/net/ethernet/netronome/nfp/nfp_asm.h | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/netronome/nfp/bpf/jit.c b/drivers/net/ethernet/netronome/nfp/bpf/jit.c index e7eeb7a07f81..369173100fcf 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/jit.c +++ b/drivers/net/ethernet/netronome/nfp/bpf/jit.c @@ -944,7 +944,7 @@ wrp_alu_imm(struct nfp_prog *nfp_prog, u8 dst, enum alu_op alu_op, u32 imm) if (alu_op == ALU_OP_XOR) { if (!~imm) emit_alu(nfp_prog, reg_both(dst), reg_none(), - ALU_OP_NEG, reg_b(dst)); + ALU_OP_NOT, reg_b(dst)); if (!imm || !~imm) return; } diff --git a/drivers/net/ethernet/netronome/nfp/nfp_asm.h b/drivers/net/ethernet/netronome/nfp/nfp_asm.h index f4d1df3a1925..74d0c11ab2f9 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_asm.h +++ b/drivers/net/ethernet/netronome/nfp/nfp_asm.h @@ -174,7 +174,7 @@ enum shf_sc { enum alu_op { ALU_OP_NONE = 0x00, ALU_OP_ADD = 0x01, - ALU_OP_NEG = 0x04, + ALU_OP_NOT = 0x04, ALU_OP_AND = 0x08, ALU_OP_SUB_C = 0x0d, ALU_OP_ADD_C = 0x11, -- cgit v1.2.3 From 254ef4d746878162bb095484fc4b53d713620c33 Mon Sep 17 00:00:00 2001 From: Jiong Wang Date: Wed, 1 Nov 2017 10:38:25 -0700 Subject: nfp: bpf: support [BPF_ALU | BPF_ALU64] | BPF_NEG This patch supports BPF_NEG under both BPF_ALU64 and BPF_ALU. LLVM recently starts to generate it. NOTE: BPF_NEG takes single operand which is an register and serve as both input and output. Signed-off-by: Jiong Wang Reviewed-by: Jakub Kicinski Signed-off-by: David S. Miller --- drivers/net/ethernet/netronome/nfp/bpf/jit.c | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/netronome/nfp/bpf/jit.c b/drivers/net/ethernet/netronome/nfp/bpf/jit.c index 369173100fcf..2609a2487100 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/jit.c +++ b/drivers/net/ethernet/netronome/nfp/bpf/jit.c @@ -1218,6 +1218,18 @@ static int sub_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) return 0; } +static int neg_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) +{ + const struct bpf_insn *insn = &meta->insn; + + emit_alu(nfp_prog, reg_both(insn->dst_reg * 2), reg_imm(0), + ALU_OP_SUB, reg_b(insn->dst_reg * 2)); + emit_alu(nfp_prog, reg_both(insn->dst_reg * 2 + 1), reg_imm(0), + ALU_OP_SUB_C, reg_b(insn->dst_reg * 2 + 1)); + + return 0; +} + static int shl_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) { const struct bpf_insn *insn = &meta->insn; @@ -1338,6 +1350,16 @@ static int sub_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) return wrp_alu32_imm(nfp_prog, meta, ALU_OP_SUB, !meta->insn.imm); } +static int neg_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) +{ + u8 dst = meta->insn.dst_reg * 2; + + emit_alu(nfp_prog, reg_both(dst), reg_imm(0), ALU_OP_SUB, reg_b(dst)); + wrp_immed(nfp_prog, reg_both(meta->insn.dst_reg * 2 + 1), 0); + + return 0; +} + static int shl_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) { const struct bpf_insn *insn = &meta->insn; @@ -1847,6 +1869,7 @@ static const instr_cb_t instr_cb[256] = { [BPF_ALU64 | BPF_ADD | BPF_K] = add_imm64, [BPF_ALU64 | BPF_SUB | BPF_X] = sub_reg64, [BPF_ALU64 | BPF_SUB | BPF_K] = sub_imm64, + [BPF_ALU64 | BPF_NEG] = neg_reg64, [BPF_ALU64 | BPF_LSH | BPF_K] = shl_imm64, [BPF_ALU64 | BPF_RSH | BPF_K] = shr_imm64, [BPF_ALU | BPF_MOV | BPF_X] = mov_reg, @@ -1861,6 +1884,7 @@ static const instr_cb_t instr_cb[256] = { [BPF_ALU | BPF_ADD | BPF_K] = add_imm, [BPF_ALU | BPF_SUB | BPF_X] = sub_reg, [BPF_ALU | BPF_SUB | BPF_K] = sub_imm, + [BPF_ALU | BPF_NEG] = neg_reg, [BPF_ALU | BPF_LSH | BPF_K] = shl_imm, [BPF_ALU | BPF_END | BPF_X] = end_reg32, [BPF_LD | BPF_IMM | BPF_DW] = imm_ld8, -- cgit v1.2.3 From 3ded76a8ff53fd2a9b011e86c6f3588a984d432d Mon Sep 17 00:00:00 2001 From: Florian Fainelli Date: Wed, 1 Nov 2017 11:29:47 -0700 Subject: net: systemport: Only inspect valid switch port & queues Hesoteric board configurations where port 0 is not available would still make SYSTEMPORT inspect the switch port 0, queue 0, which, not being enabled, would cause transmit timeouts over time. Just ignore those unconfigured rings instead. Fixes: 84ff33eeb23d ("net: systemport: Establish DSA network device queue mapping") Signed-off-by: Florian Fainelli Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bcmsysport.c | 9 +++++++-- drivers/net/ethernet/broadcom/bcmsysport.h | 1 + 2 files changed, 8 insertions(+), 2 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c index dcee843d05d7..e6da9b165bbe 100644 --- a/drivers/net/ethernet/broadcom/bcmsysport.c +++ b/drivers/net/ethernet/broadcom/bcmsysport.c @@ -1420,8 +1420,12 @@ static int bcm_sysport_init_tx_ring(struct bcm_sysport_priv *priv, /* Configure QID and port mapping */ reg = tdma_readl(priv, TDMA_DESC_RING_MAPPING(index)); reg &= ~(RING_QID_MASK | RING_PORT_ID_MASK << RING_PORT_ID_SHIFT); - reg |= ring->switch_queue & RING_QID_MASK; - reg |= ring->switch_port << RING_PORT_ID_SHIFT; + if (ring->inspect) { + reg |= ring->switch_queue & RING_QID_MASK; + reg |= ring->switch_port << RING_PORT_ID_SHIFT; + } else { + reg |= RING_IGNORE_STATUS; + } tdma_writel(priv, reg, TDMA_DESC_RING_MAPPING(index)); tdma_writel(priv, 0, TDMA_DESC_RING_PCP_DEI_VID(index)); @@ -2108,6 +2112,7 @@ static int bcm_sysport_map_queues(struct net_device *dev, */ ring->switch_queue = q; ring->switch_port = port; + ring->inspect = true; priv->ring_map[q + port * num_tx_queues] = ring; /* Set all queues as being used now */ diff --git a/drivers/net/ethernet/broadcom/bcmsysport.h b/drivers/net/ethernet/broadcom/bcmsysport.h index 82f70a6783cb..f5a984c1c986 100644 --- a/drivers/net/ethernet/broadcom/bcmsysport.h +++ b/drivers/net/ethernet/broadcom/bcmsysport.h @@ -714,6 +714,7 @@ struct bcm_sysport_tx_ring { unsigned long bytes; /* bytes statistics */ unsigned int switch_queue; /* switch port queue number */ unsigned int switch_port; /* switch port queue number */ + bool inspect; /* inspect switch port and queue */ }; /* Driver private structure */ -- cgit v1.2.3 From e20f469660ad526fcd9bf865e30ee491ae4e4a86 Mon Sep 17 00:00:00 2001 From: Vijaya Mohan Guvva Date: Wed, 1 Nov 2017 16:19:49 -0700 Subject: liquidio: synchronize VF representor names with NIC firmware LiquidIO firmware supports a vswitch that needs to know the names of the VF representors in the host to maintain compatibility for direct programming using external Openflow agents. So, for each VF representor, send its name to the firmware when it gets registered and when its name changes. Signed-off-by: Vijaya Mohan Guvva Signed-off-by: Raghu Vatsavayi Signed-off-by: Felix Manlunas Signed-off-by: David S. Miller --- drivers/net/ethernet/cavium/liquidio/lio_main.c | 15 +++++ drivers/net/ethernet/cavium/liquidio/lio_vf_rep.c | 68 ++++++++++++++++++++++ drivers/net/ethernet/cavium/liquidio/lio_vf_rep.h | 2 + .../net/ethernet/cavium/liquidio/liquidio_common.h | 8 ++- 4 files changed, 92 insertions(+), 1 deletion(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c index f27f0afd0ecf..f05045a69dcc 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_main.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c @@ -1639,6 +1639,10 @@ static void liquidio_remove(struct pci_dev *pdev) if (oct_dev->watchdog_task) kthread_stop(oct_dev->watchdog_task); + if (!oct_dev->octeon_id && + oct_dev->fw_info.app_cap_flags & LIQUIDIO_SWITCHDEV_CAP) + lio_vf_rep_modexit(); + if (oct_dev->app_mode && (oct_dev->app_mode == CVM_DRV_NIC_APP)) liquidio_stop_nic_module(oct_dev); @@ -4029,6 +4033,17 @@ static int liquidio_init_nic_module(struct octeon_device *oct) goto octnet_init_failure; } + /* Call vf_rep_modinit if the firmware is switchdev capable + * and do it from the first liquidio function probed. + */ + if (!oct->octeon_id && + oct->fw_info.app_cap_flags & LIQUIDIO_SWITCHDEV_CAP) { + if (lio_vf_rep_modinit()) { + liquidio_stop_nic_module(oct); + goto octnet_init_failure; + } + } + liquidio_ptp_init(oct); dev_dbg(&oct->pci_dev->dev, "Network interfaces ready\n"); diff --git a/drivers/net/ethernet/cavium/liquidio/lio_vf_rep.c b/drivers/net/ethernet/cavium/liquidio/lio_vf_rep.c index de0c80d150f3..2adafa366d3f 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_vf_rep.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_vf_rep.c @@ -625,3 +625,71 @@ lio_vf_rep_destroy(struct octeon_device *oct) oct->vf_rep_list.num_vfs = 0; } + +static int +lio_vf_rep_netdev_event(struct notifier_block *nb, + unsigned long event, void *ptr) +{ + struct net_device *ndev = netdev_notifier_info_to_dev(ptr); + struct lio_vf_rep_desc *vf_rep; + struct lio_vf_rep_req rep_cfg; + struct octeon_device *oct; + int ret; + + switch (event) { + case NETDEV_REGISTER: + case NETDEV_CHANGENAME: + break; + + default: + return NOTIFY_DONE; + } + + if (ndev->netdev_ops != &lio_vf_rep_ndev_ops) + return NOTIFY_DONE; + + vf_rep = netdev_priv(ndev); + oct = vf_rep->oct; + + if (strlen(ndev->name) > LIO_IF_NAME_SIZE) { + dev_err(&oct->pci_dev->dev, + "Device name change sync failed as the size is > %d\n", + LIO_IF_NAME_SIZE); + return NOTIFY_DONE; + } + + memset(&rep_cfg, 0, sizeof(rep_cfg)); + rep_cfg.req_type = LIO_VF_REP_REQ_DEVNAME; + rep_cfg.ifidx = vf_rep->ifidx; + strncpy(rep_cfg.rep_name.name, ndev->name, LIO_IF_NAME_SIZE); + + ret = lio_vf_rep_send_soft_command(oct, &rep_cfg, + sizeof(rep_cfg), NULL, 0); + if (ret) + dev_err(&oct->pci_dev->dev, + "vf_rep netdev name change failed with err %d\n", ret); + + return NOTIFY_DONE; +} + +static struct notifier_block lio_vf_rep_netdev_notifier = { + .notifier_call = lio_vf_rep_netdev_event, +}; + +int +lio_vf_rep_modinit(void) +{ + if (register_netdevice_notifier(&lio_vf_rep_netdev_notifier)) { + pr_err("netdev notifier registration failed\n"); + return -EFAULT; + } + + return 0; +} + +void +lio_vf_rep_modexit(void) +{ + if (unregister_netdevice_notifier(&lio_vf_rep_netdev_notifier)) + pr_err("netdev notifier unregister failed\n"); +} diff --git a/drivers/net/ethernet/cavium/liquidio/lio_vf_rep.h b/drivers/net/ethernet/cavium/liquidio/lio_vf_rep.h index 5a9ec9851426..bb3cedc63c63 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_vf_rep.h +++ b/drivers/net/ethernet/cavium/liquidio/lio_vf_rep.h @@ -44,4 +44,6 @@ struct lio_vf_rep_sc_ctx { int lio_vf_rep_create(struct octeon_device *oct); void lio_vf_rep_destroy(struct octeon_device *oct); +int lio_vf_rep_modinit(void); +void lio_vf_rep_modexit(void); #endif diff --git a/drivers/net/ethernet/cavium/liquidio/liquidio_common.h b/drivers/net/ethernet/cavium/liquidio/liquidio_common.h index 441cc78faff1..3bcdda85e360 100644 --- a/drivers/net/ethernet/cavium/liquidio/liquidio_common.h +++ b/drivers/net/ethernet/cavium/liquidio/liquidio_common.h @@ -928,7 +928,8 @@ enum lio_vf_rep_req_type { LIO_VF_REP_REQ_NONE, LIO_VF_REP_REQ_STATE, LIO_VF_REP_REQ_MTU, - LIO_VF_REP_REQ_STATS + LIO_VF_REP_REQ_STATS, + LIO_VF_REP_REQ_DEVNAME }; enum { @@ -936,12 +937,17 @@ enum { LIO_VF_REP_STATE_UP }; +#define LIO_IF_NAME_SIZE 16 struct lio_vf_rep_req { u8 req_type; u8 ifidx; u8 rsvd[6]; union { + struct lio_vf_rep_name { + char name[LIO_IF_NAME_SIZE]; + } rep_name; + struct lio_vf_rep_mtu { u32 mtu; u32 rsvd; -- cgit v1.2.3 From 2df7b2d20622f98bca2f001bb0e8d8bee01782ac Mon Sep 17 00:00:00 2001 From: John Hurley Date: Thu, 2 Nov 2017 01:31:29 -0700 Subject: nfp: flower: app should use struct nfp_repr Ensure priv netdev data in flower app is cast to nfp_repr and not nfp_net as in other apps. Fixes: 363fc53b8b58 ("nfp: flower: Convert ndo_setup_tc offloads to block callbacks") Signed-off-by: John Hurley Signed-off-by: Jakub Kicinski Signed-off-by: David S. Miller --- drivers/net/ethernet/netronome/nfp/flower/offload.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/netronome/nfp/flower/offload.c b/drivers/net/ethernet/netronome/nfp/flower/offload.c index 7c6cab176293..95c2b9284857 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/offload.c +++ b/drivers/net/ethernet/netronome/nfp/flower/offload.c @@ -468,14 +468,14 @@ nfp_flower_repr_offload(struct nfp_app *app, struct net_device *netdev, static int nfp_flower_setup_tc_block_cb(enum tc_setup_type type, void *type_data, void *cb_priv) { - struct nfp_net *nn = cb_priv; + struct nfp_repr *repr = cb_priv; - if (!tc_can_offload(nn->dp.netdev)) + if (!tc_can_offload(repr->netdev)) return -EOPNOTSUPP; switch (type) { case TC_SETUP_CLSFLOWER: - return nfp_flower_repr_offload(nn->app, nn->port->netdev, + return nfp_flower_repr_offload(repr->app, repr->netdev, type_data); default: return -EOPNOTSUPP; @@ -485,7 +485,7 @@ static int nfp_flower_setup_tc_block_cb(enum tc_setup_type type, static int nfp_flower_setup_tc_block(struct net_device *netdev, struct tc_block_offload *f) { - struct nfp_net *nn = netdev_priv(netdev); + struct nfp_repr *repr = netdev_priv(netdev); if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS) return -EOPNOTSUPP; @@ -494,11 +494,11 @@ static int nfp_flower_setup_tc_block(struct net_device *netdev, case TC_BLOCK_BIND: return tcf_block_cb_register(f->block, nfp_flower_setup_tc_block_cb, - nn, nn); + repr, repr); case TC_BLOCK_UNBIND: tcf_block_cb_unregister(f->block, nfp_flower_setup_tc_block_cb, - nn); + repr); return 0; default: return -EOPNOTSUPP; -- cgit v1.2.3 From dc4646a950153242313ed340f0a404de38b21d5c Mon Sep 17 00:00:00 2001 From: John Hurley Date: Thu, 2 Nov 2017 01:31:30 -0700 Subject: nfp: flower: vxlan - ensure no sleep in atomic context Functions called by the netevent notifier must be in atomic context. Change the mutex to spinlock and ensure mem allocations are done with the atomic flag. Also, remove unnecessary locking after notifiers are unregistered. Signed-off-by: John Hurley Reviewed-by: Simon Horman Signed-off-by: David S. Miller --- drivers/net/ethernet/netronome/nfp/flower/cmsg.c | 9 +++-- drivers/net/ethernet/netronome/nfp/flower/cmsg.h | 2 +- drivers/net/ethernet/netronome/nfp/flower/main.h | 2 +- .../net/ethernet/netronome/nfp/flower/offload.c | 2 +- .../ethernet/netronome/nfp/flower/tunnel_conf.c | 47 +++++++++------------- 5 files changed, 28 insertions(+), 34 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/netronome/nfp/flower/cmsg.c b/drivers/net/ethernet/netronome/nfp/flower/cmsg.c index 6b71c719deba..e98bb9cdb6a3 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/cmsg.c +++ b/drivers/net/ethernet/netronome/nfp/flower/cmsg.c @@ -50,14 +50,14 @@ nfp_flower_cmsg_get_hdr(struct sk_buff *skb) struct sk_buff * nfp_flower_cmsg_alloc(struct nfp_app *app, unsigned int size, - enum nfp_flower_cmsg_type_port type) + enum nfp_flower_cmsg_type_port type, gfp_t flag) { struct nfp_flower_cmsg_hdr *ch; struct sk_buff *skb; size += NFP_FLOWER_CMSG_HLEN; - skb = nfp_app_ctrl_msg_alloc(app, size, GFP_KERNEL); + skb = nfp_app_ctrl_msg_alloc(app, size, flag); if (!skb) return NULL; @@ -78,7 +78,8 @@ nfp_flower_cmsg_mac_repr_start(struct nfp_app *app, unsigned int num_ports) unsigned int size; size = sizeof(*msg) + num_ports * sizeof(msg->ports[0]); - skb = nfp_flower_cmsg_alloc(app, size, NFP_FLOWER_CMSG_TYPE_MAC_REPR); + skb = nfp_flower_cmsg_alloc(app, size, NFP_FLOWER_CMSG_TYPE_MAC_REPR, + GFP_KERNEL); if (!skb) return NULL; @@ -109,7 +110,7 @@ int nfp_flower_cmsg_portmod(struct nfp_repr *repr, bool carrier_ok) struct sk_buff *skb; skb = nfp_flower_cmsg_alloc(repr->app, sizeof(*msg), - NFP_FLOWER_CMSG_TYPE_PORT_MOD); + NFP_FLOWER_CMSG_TYPE_PORT_MOD, GFP_KERNEL); if (!skb) return -ENOMEM; diff --git a/drivers/net/ethernet/netronome/nfp/flower/cmsg.h b/drivers/net/ethernet/netronome/nfp/flower/cmsg.h index 64e87f8e7089..66070741d55f 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/cmsg.h +++ b/drivers/net/ethernet/netronome/nfp/flower/cmsg.h @@ -458,6 +458,6 @@ void nfp_flower_cmsg_process_rx(struct work_struct *work); void nfp_flower_cmsg_rx(struct nfp_app *app, struct sk_buff *skb); struct sk_buff * nfp_flower_cmsg_alloc(struct nfp_app *app, unsigned int size, - enum nfp_flower_cmsg_type_port type); + enum nfp_flower_cmsg_type_port type, gfp_t flag); #endif diff --git a/drivers/net/ethernet/netronome/nfp/flower/main.h b/drivers/net/ethernet/netronome/nfp/flower/main.h index 12c319a219d8..c90e72b7ff5a 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/main.h +++ b/drivers/net/ethernet/netronome/nfp/flower/main.h @@ -115,7 +115,7 @@ struct nfp_flower_priv { struct mutex nfp_mac_off_lock; struct mutex nfp_mac_index_lock; struct mutex nfp_ipv4_off_lock; - struct mutex nfp_neigh_off_lock; + spinlock_t nfp_neigh_off_lock; struct ida nfp_mac_off_ids; int nfp_mac_off_count; struct notifier_block nfp_tun_mac_nb; diff --git a/drivers/net/ethernet/netronome/nfp/flower/offload.c b/drivers/net/ethernet/netronome/nfp/flower/offload.c index 95c2b9284857..cdbb5464b790 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/offload.c +++ b/drivers/net/ethernet/netronome/nfp/flower/offload.c @@ -95,7 +95,7 @@ nfp_flower_xmit_flow(struct net_device *netdev, nfp_flow->meta.mask_len >>= NFP_FL_LW_SIZ; nfp_flow->meta.act_len >>= NFP_FL_LW_SIZ; - skb = nfp_flower_cmsg_alloc(priv->app, tot_len, mtype); + skb = nfp_flower_cmsg_alloc(priv->app, tot_len, mtype, GFP_KERNEL); if (!skb) return -ENOMEM; diff --git a/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c b/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c index c495f8f38506..b03f22f29612 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c +++ b/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c @@ -224,12 +224,13 @@ static bool nfp_tun_is_netdev_to_offload(struct net_device *netdev) } static int -nfp_flower_xmit_tun_conf(struct nfp_app *app, u8 mtype, u16 plen, void *pdata) +nfp_flower_xmit_tun_conf(struct nfp_app *app, u8 mtype, u16 plen, void *pdata, + gfp_t flag) { struct sk_buff *skb; unsigned char *msg; - skb = nfp_flower_cmsg_alloc(app, plen, mtype); + skb = nfp_flower_cmsg_alloc(app, plen, mtype, flag); if (!skb) return -ENOMEM; @@ -246,15 +247,15 @@ static bool nfp_tun_has_route(struct nfp_app *app, __be32 ipv4_addr) struct nfp_ipv4_route_entry *entry; struct list_head *ptr, *storage; - mutex_lock(&priv->nfp_neigh_off_lock); + spin_lock_bh(&priv->nfp_neigh_off_lock); list_for_each_safe(ptr, storage, &priv->nfp_neigh_off_list) { entry = list_entry(ptr, struct nfp_ipv4_route_entry, list); if (entry->ipv4_addr == ipv4_addr) { - mutex_unlock(&priv->nfp_neigh_off_lock); + spin_unlock_bh(&priv->nfp_neigh_off_lock); return true; } } - mutex_unlock(&priv->nfp_neigh_off_lock); + spin_unlock_bh(&priv->nfp_neigh_off_lock); return false; } @@ -264,24 +265,24 @@ static void nfp_tun_add_route_to_cache(struct nfp_app *app, __be32 ipv4_addr) struct nfp_ipv4_route_entry *entry; struct list_head *ptr, *storage; - mutex_lock(&priv->nfp_neigh_off_lock); + spin_lock_bh(&priv->nfp_neigh_off_lock); list_for_each_safe(ptr, storage, &priv->nfp_neigh_off_list) { entry = list_entry(ptr, struct nfp_ipv4_route_entry, list); if (entry->ipv4_addr == ipv4_addr) { - mutex_unlock(&priv->nfp_neigh_off_lock); + spin_unlock_bh(&priv->nfp_neigh_off_lock); return; } } - entry = kmalloc(sizeof(*entry), GFP_KERNEL); + entry = kmalloc(sizeof(*entry), GFP_ATOMIC); if (!entry) { - mutex_unlock(&priv->nfp_neigh_off_lock); + spin_unlock_bh(&priv->nfp_neigh_off_lock); nfp_flower_cmsg_warn(app, "Mem error when storing new route.\n"); return; } entry->ipv4_addr = ipv4_addr; list_add_tail(&entry->list, &priv->nfp_neigh_off_list); - mutex_unlock(&priv->nfp_neigh_off_lock); + spin_unlock_bh(&priv->nfp_neigh_off_lock); } static void nfp_tun_del_route_from_cache(struct nfp_app *app, __be32 ipv4_addr) @@ -290,7 +291,7 @@ static void nfp_tun_del_route_from_cache(struct nfp_app *app, __be32 ipv4_addr) struct nfp_ipv4_route_entry *entry; struct list_head *ptr, *storage; - mutex_lock(&priv->nfp_neigh_off_lock); + spin_lock_bh(&priv->nfp_neigh_off_lock); list_for_each_safe(ptr, storage, &priv->nfp_neigh_off_list) { entry = list_entry(ptr, struct nfp_ipv4_route_entry, list); if (entry->ipv4_addr == ipv4_addr) { @@ -299,12 +300,12 @@ static void nfp_tun_del_route_from_cache(struct nfp_app *app, __be32 ipv4_addr) break; } } - mutex_unlock(&priv->nfp_neigh_off_lock); + spin_unlock_bh(&priv->nfp_neigh_off_lock); } static void nfp_tun_write_neigh(struct net_device *netdev, struct nfp_app *app, - struct flowi4 *flow, struct neighbour *neigh) + struct flowi4 *flow, struct neighbour *neigh, gfp_t flag) { struct nfp_tun_neigh payload; @@ -334,7 +335,7 @@ nfp_tun_write_neigh(struct net_device *netdev, struct nfp_app *app, send_msg: nfp_flower_xmit_tun_conf(app, NFP_FLOWER_CMSG_TYPE_TUN_NEIGH, sizeof(struct nfp_tun_neigh), - (unsigned char *)&payload); + (unsigned char *)&payload, flag); } static int @@ -385,7 +386,7 @@ nfp_tun_neigh_event_handler(struct notifier_block *nb, unsigned long event, #endif flow.flowi4_proto = IPPROTO_UDP; - nfp_tun_write_neigh(n->dev, app, &flow, n); + nfp_tun_write_neigh(n->dev, app, &flow, n, GFP_ATOMIC); return NOTIFY_OK; } @@ -423,7 +424,7 @@ void nfp_tunnel_request_route(struct nfp_app *app, struct sk_buff *skb) ip_rt_put(rt); if (!n) goto route_fail_warning; - nfp_tun_write_neigh(n->dev, app, &flow, n); + nfp_tun_write_neigh(n->dev, app, &flow, n, GFP_KERNEL); neigh_release(n); return; @@ -456,7 +457,7 @@ static void nfp_tun_write_ipv4_list(struct nfp_app *app) nfp_flower_xmit_tun_conf(app, NFP_FLOWER_CMSG_TYPE_TUN_IPS, sizeof(struct nfp_tun_ipv4_addr), - &payload); + &payload, GFP_KERNEL); } void nfp_tunnel_add_ipv4_off(struct nfp_app *app, __be32 ipv4) @@ -548,7 +549,7 @@ void nfp_tunnel_write_macs(struct nfp_app *app) } err = nfp_flower_xmit_tun_conf(app, NFP_FLOWER_CMSG_TYPE_TUN_MAC, - pay_size, payload); + pay_size, payload, GFP_KERNEL); kfree(payload); @@ -729,7 +730,7 @@ int nfp_tunnel_config_start(struct nfp_app *app) INIT_LIST_HEAD(&priv->nfp_ipv4_off_list); /* Initialise priv data for neighbour offloading. */ - mutex_init(&priv->nfp_neigh_off_lock); + spin_lock_init(&priv->nfp_neigh_off_lock); INIT_LIST_HEAD(&priv->nfp_neigh_off_list); priv->nfp_tun_neigh_nb.notifier_call = nfp_tun_neigh_event_handler; @@ -769,43 +770,35 @@ void nfp_tunnel_config_stop(struct nfp_app *app) unregister_netevent_notifier(&priv->nfp_tun_neigh_nb); /* Free any memory that may be occupied by MAC list. */ - mutex_lock(&priv->nfp_mac_off_lock); list_for_each_safe(ptr, storage, &priv->nfp_mac_off_list) { mac_entry = list_entry(ptr, struct nfp_tun_mac_offload_entry, list); list_del(&mac_entry->list); kfree(mac_entry); } - mutex_unlock(&priv->nfp_mac_off_lock); /* Free any memory that may be occupied by MAC index list. */ - mutex_lock(&priv->nfp_mac_index_lock); list_for_each_safe(ptr, storage, &priv->nfp_mac_index_list) { mac_idx = list_entry(ptr, struct nfp_tun_mac_non_nfp_idx, list); list_del(&mac_idx->list); kfree(mac_idx); } - mutex_unlock(&priv->nfp_mac_index_lock); ida_destroy(&priv->nfp_mac_off_ids); /* Free any memory that may be occupied by ipv4 list. */ - mutex_lock(&priv->nfp_ipv4_off_lock); list_for_each_safe(ptr, storage, &priv->nfp_ipv4_off_list) { ip_entry = list_entry(ptr, struct nfp_ipv4_addr_entry, list); list_del(&ip_entry->list); kfree(ip_entry); } - mutex_unlock(&priv->nfp_ipv4_off_lock); /* Free any memory that may be occupied by the route list. */ - mutex_lock(&priv->nfp_neigh_off_lock); list_for_each_safe(ptr, storage, &priv->nfp_neigh_off_list) { route_entry = list_entry(ptr, struct nfp_ipv4_route_entry, list); list_del(&route_entry->list); kfree(route_entry); } - mutex_unlock(&priv->nfp_neigh_off_lock); } -- cgit v1.2.3 From f449657f83532807f388b9b99cf0c3f7be65eda9 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Thu, 2 Nov 2017 01:31:31 -0700 Subject: nfp: bpf: reject TC offload if XDP loaded Recent TC changes dropped the check protecting us from trying to offload a TC program if XDP programs are already loaded. Fixes: 90d97315b3e7 ("nfp: bpf: Convert ndo_setup_tc offloads to block callbacks") Signed-off-by: Jakub Kicinski Reviewed-by: Simon Horman Signed-off-by: David S. Miller --- drivers/net/ethernet/netronome/nfp/bpf/main.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/netronome/nfp/bpf/main.c b/drivers/net/ethernet/netronome/nfp/bpf/main.c index 04424db24b80..8e3e89cace8d 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/main.c +++ b/drivers/net/ethernet/netronome/nfp/bpf/main.c @@ -130,6 +130,9 @@ static int nfp_bpf_setup_tc_block_cb(enum tc_setup_type type, cls_bpf->common.protocol != htons(ETH_P_ALL) || cls_bpf->common.chain_index) return -EOPNOTSUPP; + if (nn->dp.bpf_offload_xdp) + return -EBUSY; + return nfp_net_bpf_offload(nn, cls_bpf); default: return -EOPNOTSUPP; -- cgit v1.2.3 From 2c4197a041dfbb5101aaa8be7b378ba69b91e765 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Thu, 2 Nov 2017 01:31:32 -0700 Subject: nfp: reorganize the app table The app table is an unordered array right now. We have to search apps by ID. It also makes it harder to fall back to core NIC if advanced functions are not compiled into the kernel (e.g. eBPF). Make the table keyed by app id. Signed-off-by: Jakub Kicinski Reviewed-by: Simon Horman Signed-off-by: David S. Miller --- drivers/net/ethernet/netronome/nfp/nfp_app.c | 16 ++++++---------- 1 file changed, 6 insertions(+), 10 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/netronome/nfp/nfp_app.c b/drivers/net/ethernet/netronome/nfp/nfp_app.c index 5d9e2eba5b49..085c5151c601 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_app.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_app.c @@ -43,10 +43,10 @@ #include "nfp_net_repr.h" static const struct nfp_app_type *apps[] = { - &app_nic, - &app_bpf, + [NFP_APP_CORE_NIC] = &app_nic, + [NFP_APP_BPF_NIC] = &app_bpf, #ifdef CONFIG_NFP_APP_FLOWER - &app_flower, + [NFP_APP_FLOWER_NIC] = &app_flower, #endif }; @@ -116,17 +116,13 @@ exit_unlock: struct nfp_app *nfp_app_alloc(struct nfp_pf *pf, enum nfp_app_id id) { struct nfp_app *app; - unsigned int i; - for (i = 0; i < ARRAY_SIZE(apps); i++) - if (apps[i]->id == id) - break; - if (i == ARRAY_SIZE(apps)) { + if (id >= ARRAY_SIZE(apps) || !apps[id]) { nfp_err(pf->cpp, "failed to find app with ID 0x%02hhx\n", id); return ERR_PTR(-EINVAL); } - if (WARN_ON(!apps[i]->name || !apps[i]->vnic_alloc)) + if (WARN_ON(!apps[id]->name || !apps[id]->vnic_alloc)) return ERR_PTR(-EINVAL); app = kzalloc(sizeof(*app), GFP_KERNEL); @@ -136,7 +132,7 @@ struct nfp_app *nfp_app_alloc(struct nfp_pf *pf, enum nfp_app_id id) app->pf = pf; app->cpp = pf->cpp; app->pdev = pf->pdev; - app->type = apps[i]; + app->type = apps[id]; return app; } -- cgit v1.2.3 From 43b45245e5a6c274f374ecb49e5bca39f28dbfad Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Thu, 2 Nov 2017 01:31:33 -0700 Subject: nfp: bpf: fall back to core NIC app if BPF not selected If kernel config does not include BPF just replace the BPF app handler with the handler for basic NIC. The BPF app will now be built only if BPF infrastructure is selected in kernel config. Signed-off-by: Jakub Kicinski Reviewed-by: Simon Horman Signed-off-by: David S. Miller --- drivers/net/ethernet/netronome/nfp/Makefile | 4 ++-- drivers/net/ethernet/netronome/nfp/bpf/offload.c | 3 --- drivers/net/ethernet/netronome/nfp/nfp_app.c | 4 ++++ 3 files changed, 6 insertions(+), 5 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/netronome/nfp/Makefile b/drivers/net/ethernet/netronome/nfp/Makefile index bd3b2bd408bc..9e8d30cb1517 100644 --- a/drivers/net/ethernet/netronome/nfp/Makefile +++ b/drivers/net/ethernet/netronome/nfp/Makefile @@ -27,8 +27,6 @@ nfp-objs := \ nfp_net_sriov.o \ nfp_netvf_main.o \ nfp_port.o \ - bpf/main.o \ - bpf/offload.o \ nic/main.o ifeq ($(CONFIG_NFP_APP_FLOWER),y) @@ -44,6 +42,8 @@ endif ifeq ($(CONFIG_BPF_SYSCALL),y) nfp-objs += \ + bpf/main.o \ + bpf/offload.o \ bpf/verifier.o \ bpf/jit.o endif diff --git a/drivers/net/ethernet/netronome/nfp/bpf/offload.c b/drivers/net/ethernet/netronome/nfp/bpf/offload.c index 63c8f7847054..6d576f631392 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/offload.c +++ b/drivers/net/ethernet/netronome/nfp/bpf/offload.c @@ -150,9 +150,6 @@ nfp_net_bpf_offload_prepare(struct nfp_net *nn, unsigned int max_mtu; int ret; - if (!IS_ENABLED(CONFIG_BPF_SYSCALL)) - return -EOPNOTSUPP; - ret = nfp_net_bpf_get_act(nn, cls_bpf); if (ret < 0) return ret; diff --git a/drivers/net/ethernet/netronome/nfp/nfp_app.c b/drivers/net/ethernet/netronome/nfp/nfp_app.c index 085c5151c601..3644d74fe304 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_app.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_app.c @@ -44,7 +44,11 @@ static const struct nfp_app_type *apps[] = { [NFP_APP_CORE_NIC] = &app_nic, +#ifdef CONFIG_BPF_SYSCALL [NFP_APP_BPF_NIC] = &app_bpf, +#else + [NFP_APP_BPF_NIC] = &app_nic, +#endif #ifdef CONFIG_NFP_APP_FLOWER [NFP_APP_FLOWER_NIC] = &app_flower, #endif -- cgit v1.2.3 From 790a399171831d31c8016a27294ef69130d3e7cc Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Thu, 2 Nov 2017 01:31:34 -0700 Subject: nfp: switch to dev_alloc_page() Use the dev_alloc_page() networking helper to allocate pages for RX packets. Signed-off-by: Jakub Kicinski Reviewed-by: Simon Horman Signed-off-by: David S. Miller --- drivers/net/ethernet/netronome/nfp/nfp_net_common.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c index eddf850a6a7f..7147335a8b36 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c @@ -1212,7 +1212,7 @@ static void *nfp_net_napi_alloc_one(struct nfp_net_dp *dp, dma_addr_t *dma_addr) } else { struct page *page; - page = alloc_page(GFP_ATOMIC | __GFP_COLD); + page = dev_alloc_page(); frag = page ? page_address(page) : NULL; } if (!frag) { -- cgit v1.2.3 From 16f50cda06ae023cb7beb15c88233fc516c03a2a Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Thu, 2 Nov 2017 01:31:35 -0700 Subject: nfp: use a counter instead of log message for allocation failures Add a counter incremented when allocation of replacement RX page fails. Signed-off-by: Jakub Kicinski Reviewed-by: Simon Horman Signed-off-by: David S. Miller --- drivers/net/ethernet/netronome/nfp/nfp_net.h | 3 +++ drivers/net/ethernet/netronome/nfp/nfp_net_common.c | 15 ++++++++++----- drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c | 12 +++++++----- 3 files changed, 20 insertions(+), 10 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net.h b/drivers/net/ethernet/netronome/nfp/nfp_net.h index d51d8237b984..3d411f0d15b6 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net.h +++ b/drivers/net/ethernet/netronome/nfp/nfp_net.h @@ -394,6 +394,7 @@ struct nfp_net_rx_ring { * @tx_lso: Counter of LSO packets sent * @tx_errors: How many TX errors were encountered * @tx_busy: How often was TX busy (no space)? + * @rx_replace_buf_alloc_fail: Counter of RX buffer allocation failures * @irq_vector: Interrupt vector number (use for talking to the OS) * @handler: Interrupt handler for this ring vector * @name: Name of the interrupt vector @@ -437,6 +438,8 @@ struct nfp_net_r_vector { u64 hw_csum_tx_inner; u64 tx_gather; u64 tx_lso; + + u64 rx_replace_buf_alloc_fail; u64 tx_errors; u64 tx_busy; diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c index 7147335a8b36..185a3dd35a3f 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c @@ -1209,15 +1209,15 @@ static void *nfp_net_napi_alloc_one(struct nfp_net_dp *dp, dma_addr_t *dma_addr) if (!dp->xdp_prog) { frag = napi_alloc_frag(dp->fl_bufsz); + if (unlikely(!frag)) + return NULL; } else { struct page *page; page = dev_alloc_page(); - frag = page ? page_address(page) : NULL; - } - if (!frag) { - nn_dp_warn(dp, "Failed to alloc receive page frag\n"); - return NULL; + if (unlikely(!page)) + return NULL; + frag = page_address(page); } *dma_addr = nfp_net_dma_map_rx(dp, frag); @@ -1514,6 +1514,11 @@ nfp_net_rx_drop(const struct nfp_net_dp *dp, struct nfp_net_r_vector *r_vec, { u64_stats_update_begin(&r_vec->rx_sync); r_vec->rx_drops++; + /* If we have both skb and rxbuf the replacement buffer allocation + * must have failed, count this as an alloc failure. + */ + if (skb && rxbuf) + r_vec->rx_replace_buf_alloc_fail++; u64_stats_update_end(&r_vec->rx_sync); /* skb is build based on the frag, free_skb() would free the frag diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c index dc016dfec64d..6d5c376f0000 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c @@ -181,7 +181,7 @@ static const struct nfp_et_stat nfp_mac_et_stats[] = { #define NN_ET_GLOBAL_STATS_LEN ARRAY_SIZE(nfp_net_et_stats) #define NN_ET_SWITCH_STATS_LEN 9 -#define NN_ET_RVEC_GATHER_STATS 7 +#define NN_ET_RVEC_GATHER_STATS 8 static void nfp_net_get_nspinfo(struct nfp_app *app, char *version) { @@ -444,6 +444,7 @@ static u8 *nfp_vnic_get_sw_stats_strings(struct net_device *netdev, u8 *data) data = nfp_pr_et(data, "hw_rx_csum_ok"); data = nfp_pr_et(data, "hw_rx_csum_inner_ok"); data = nfp_pr_et(data, "hw_rx_csum_err"); + data = nfp_pr_et(data, "rx_replace_buf_alloc_fail"); data = nfp_pr_et(data, "hw_tx_csum"); data = nfp_pr_et(data, "hw_tx_inner_csum"); data = nfp_pr_et(data, "tx_gather"); @@ -468,16 +469,17 @@ static u64 *nfp_vnic_get_sw_stats(struct net_device *netdev, u64 *data) tmp[0] = nn->r_vecs[i].hw_csum_rx_ok; tmp[1] = nn->r_vecs[i].hw_csum_rx_inner_ok; tmp[2] = nn->r_vecs[i].hw_csum_rx_error; + tmp[3] = nn->r_vecs[i].rx_replace_buf_alloc_fail; } while (u64_stats_fetch_retry(&nn->r_vecs[i].rx_sync, start)); do { start = u64_stats_fetch_begin(&nn->r_vecs[i].tx_sync); data[1] = nn->r_vecs[i].tx_pkts; data[2] = nn->r_vecs[i].tx_busy; - tmp[3] = nn->r_vecs[i].hw_csum_tx; - tmp[4] = nn->r_vecs[i].hw_csum_tx_inner; - tmp[5] = nn->r_vecs[i].tx_gather; - tmp[6] = nn->r_vecs[i].tx_lso; + tmp[4] = nn->r_vecs[i].hw_csum_tx; + tmp[5] = nn->r_vecs[i].hw_csum_tx_inner; + tmp[6] = nn->r_vecs[i].tx_gather; + tmp[7] = nn->r_vecs[i].tx_lso; } while (u64_stats_fetch_retry(&nn->r_vecs[i].tx_sync, start)); data += 3; -- cgit v1.2.3 From 18f76191796ad478e42528cfafcae0b11d4c8db4 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Thu, 2 Nov 2017 01:31:36 -0700 Subject: nfp: improve defines for constants in ethtool We split rvector stats into two categories - per queue and stats which are added up into one total counter. Improve the defines denoting their number. Signed-off-by: Jakub Kicinski Reviewed-by: Simon Horman Signed-off-by: David S. Miller --- drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c index 6d5c376f0000..c67b90c8d8b7 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c @@ -181,7 +181,8 @@ static const struct nfp_et_stat nfp_mac_et_stats[] = { #define NN_ET_GLOBAL_STATS_LEN ARRAY_SIZE(nfp_net_et_stats) #define NN_ET_SWITCH_STATS_LEN 9 -#define NN_ET_RVEC_GATHER_STATS 8 +#define NN_RVEC_GATHER_STATS 8 +#define NN_RVEC_PER_Q_STATS 3 static void nfp_net_get_nspinfo(struct nfp_app *app, char *version) { @@ -427,7 +428,7 @@ static unsigned int nfp_vnic_get_sw_stats_count(struct net_device *netdev) { struct nfp_net *nn = netdev_priv(netdev); - return NN_ET_RVEC_GATHER_STATS + nn->dp.num_r_vecs * 3; + return NN_RVEC_GATHER_STATS + nn->dp.num_r_vecs * NN_RVEC_PER_Q_STATS; } static u8 *nfp_vnic_get_sw_stats_strings(struct net_device *netdev, u8 *data) @@ -455,9 +456,9 @@ static u8 *nfp_vnic_get_sw_stats_strings(struct net_device *netdev, u8 *data) static u64 *nfp_vnic_get_sw_stats(struct net_device *netdev, u64 *data) { - u64 gathered_stats[NN_ET_RVEC_GATHER_STATS] = {}; + u64 gathered_stats[NN_RVEC_GATHER_STATS] = {}; struct nfp_net *nn = netdev_priv(netdev); - u64 tmp[NN_ET_RVEC_GATHER_STATS]; + u64 tmp[NN_RVEC_GATHER_STATS]; unsigned int i, j; for (i = 0; i < nn->dp.num_r_vecs; i++) { @@ -482,13 +483,13 @@ static u64 *nfp_vnic_get_sw_stats(struct net_device *netdev, u64 *data) tmp[7] = nn->r_vecs[i].tx_lso; } while (u64_stats_fetch_retry(&nn->r_vecs[i].tx_sync, start)); - data += 3; + data += NN_RVEC_PER_Q_STATS; - for (j = 0; j < NN_ET_RVEC_GATHER_STATS; j++) + for (j = 0; j < NN_RVEC_GATHER_STATS; j++) gathered_stats[j] += tmp[j]; } - for (j = 0; j < NN_ET_RVEC_GATHER_STATS; j++) + for (j = 0; j < NN_RVEC_GATHER_STATS; j++) *data++ = gathered_stats[j]; return data; -- cgit v1.2.3 From 9691cea91c9d35f09d6b31a5e8dbfc631d40903a Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Thu, 2 Nov 2017 12:13:36 +0100 Subject: net: seeq: fix timer conversion One of the timer conversion patches evidently escaped build testing until I ran into in on ARM randconfig builds: drivers/net/ethernet/seeq/ether3.c: In function 'ether3_ledoff': drivers/net/ethernet/seeq/ether3.c:175:40: error: 'priv' undeclared (first use in this function); did you mean 'pid'? drivers/net/ethernet/seeq/ether3.c:176:27: error: initialization from incompatible pointer type [-Werror=incompatible-pointer-types] This fixes the two small typos that caused the problems. Fixes: 6fd9c53f7186 ("net: seeq: Convert timers to use timer_setup()") Signed-off-by: Arnd Bergmann Signed-off-by: David S. Miller --- drivers/net/ethernet/seeq/ether3.c | 2 +- drivers/net/ethernet/seeq/ether3.h | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/seeq/ether3.c b/drivers/net/ethernet/seeq/ether3.c index da4807723a06..c5bc124b41a9 100644 --- a/drivers/net/ethernet/seeq/ether3.c +++ b/drivers/net/ethernet/seeq/ether3.c @@ -172,7 +172,7 @@ ether3_setbuffer(struct net_device *dev, buffer_rw_t read, int start) */ static void ether3_ledoff(struct timer_list *t) { - struct dev_priv *private = from_timer(priv, t, timer); + struct dev_priv *private = from_timer(private, t, timer); struct net_device *dev = private->dev; ether3_outw(priv(dev)->regs.config2 |= CFG2_CTRLO, REG_CONFIG2); diff --git a/drivers/net/ethernet/seeq/ether3.h b/drivers/net/ethernet/seeq/ether3.h index ea2ba286e665..be19e5fa5cf2 100644 --- a/drivers/net/ethernet/seeq/ether3.h +++ b/drivers/net/ethernet/seeq/ether3.h @@ -165,7 +165,7 @@ struct dev_priv { unsigned char tx_tail; /* buffer nr of transmitting packet */ unsigned int rx_head; /* address to fetch next packet from */ struct timer_list timer; - net_device *dev; + struct net_device *dev; int broken; /* 0 = ok, 1 = something went wrong */ }; -- cgit v1.2.3 From 7df7dad633e2c6d43ee9b39c267ee0add9798384 Mon Sep 17 00:00:00 2001 From: Lipeng Date: Thu, 2 Nov 2017 20:45:15 +0800 Subject: net: hns3: Refactor the mapping of tqp to vport This patch refactor the mapping of tqp to vport, making the maping function can be used both in the reset process and initialization process. Signed-off-by: qumingguang Signed-off-by: Lipeng Signed-off-by: Yunsheng Lin Signed-off-by: David S. Miller --- .../ethernet/hisilicon/hns3/hns3pf/hclge_main.c | 61 ++++++++++++++++++---- 1 file changed, 50 insertions(+), 11 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c index 0b95fbe63ac1..404757a6792f 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c @@ -1184,11 +1184,7 @@ static int hclge_assign_tqp(struct hclge_vport *vport, struct hnae3_queue **tqp, u16 num_tqps) { struct hclge_dev *hdev = vport->back; - int i, alloced, func_id, ret; - bool is_pf; - - func_id = vport->vport_id; - is_pf = (vport->vport_id == 0) ? true : false; + int i, alloced; for (i = 0, alloced = 0; i < hdev->num_tqps && alloced < num_tqps; i++) { @@ -1197,12 +1193,6 @@ static int hclge_assign_tqp(struct hclge_vport *vport, hdev->htqp[i].q.tqp_index = alloced; tqp[alloced] = &hdev->htqp[i].q; hdev->htqp[i].alloced = true; - ret = hclge_map_tqps_to_func(hdev, func_id, - hdev->htqp[i].index, - alloced, is_pf); - if (ret) - return ret; - alloced++; } } @@ -1254,6 +1244,49 @@ static int hclge_knic_setup(struct hclge_vport *vport, u16 num_tqps) return 0; } +static int hclge_map_tqp_to_vport(struct hclge_dev *hdev, + struct hclge_vport *vport) +{ + struct hnae3_handle *nic = &vport->nic; + struct hnae3_knic_private_info *kinfo; + u16 i; + + kinfo = &nic->kinfo; + for (i = 0; i < kinfo->num_tqps; i++) { + struct hclge_tqp *q = + container_of(kinfo->tqp[i], struct hclge_tqp, q); + bool is_pf; + int ret; + + is_pf = !(vport->vport_id); + ret = hclge_map_tqps_to_func(hdev, vport->vport_id, q->index, + i, is_pf); + if (ret) + return ret; + } + + return 0; +} + +static int hclge_map_tqp(struct hclge_dev *hdev) +{ + struct hclge_vport *vport = hdev->vport; + u16 i, num_vport; + + num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1; + for (i = 0; i < num_vport; i++) { + int ret; + + ret = hclge_map_tqp_to_vport(hdev, vport); + if (ret) + return ret; + + vport++; + } + + return 0; +} + static void hclge_unic_setup(struct hclge_vport *vport, u16 num_tqps) { /* this would be initialized later */ @@ -4459,6 +4492,12 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev) return ret; } + ret = hclge_map_tqp(hdev); + if (ret) { + dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret); + return ret; + } + ret = hclge_mac_init(hdev); if (ret) { dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret); -- cgit v1.2.3 From cf9cca2dd903b78d04ea7ad4cde0231988944d0f Mon Sep 17 00:00:00 2001 From: qumingguang Date: Thu, 2 Nov 2017 20:45:16 +0800 Subject: net: hns3: Refactor mac_init function It needs initialize mdio in initialization process, but reset process does not reset mdio, so do not initialize mdio in reset process. This patch move out the mdio configuration function from the mac_init. So mac_init can be used both in reset process and initialization process. Signed-off-by: qumingguang Signed-off-by: Lipeng Signed-off-by: Yunsheng Lin Signed-off-by: David S. Miller --- drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c index 404757a6792f..5daa8c791010 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c @@ -2228,13 +2228,6 @@ static int hclge_mac_init(struct hclge_dev *hdev) mac->link = 0; - ret = hclge_mac_mdio_config(hdev); - if (ret) { - dev_warn(&hdev->pdev->dev, - "mdio config fail ret=%d\n", ret); - return ret; - } - /* Initialize the MTA table work mode */ hdev->accept_mta_mc = true; hdev->enable_mta = true; @@ -4498,6 +4491,13 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev) return ret; } + ret = hclge_mac_mdio_config(hdev); + if (ret) { + dev_warn(&hdev->pdev->dev, + "mdio config fail ret=%d\n", ret); + return ret; + } + ret = hclge_mac_init(hdev); if (ret) { dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret); -- cgit v1.2.3 From 3efb960f056d855d4b1f07095df1f313c05765f4 Mon Sep 17 00:00:00 2001 From: Lipeng Date: Thu, 2 Nov 2017 20:45:17 +0800 Subject: net: hns3: Refactor the initialization of command queue There is no necessary to reallocate the descriptor and remap the descriptor memory in reset process, But there is still some other action exist in both reset process and initialization process. To reuse the common interface in reset process and initialization process, This patch moves out the descriptor allocate and memory maping from interface cmdq_init. Signed-off-by: qumingguang Signed-off-by: Lipeng Signed-off-by: Yunsheng Lin Signed-off-by: David S. Miller --- .../net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c | 39 +++++++++++++--------- .../net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h | 1 + .../ethernet/hisilicon/hns3/hns3pf/hclge_main.c | 9 ++++- 3 files changed, 33 insertions(+), 16 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c index 60960e588b5f..ff13d1876d9e 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c @@ -62,7 +62,7 @@ static void hclge_free_cmd_desc(struct hclge_cmq_ring *ring) ring->desc = NULL; } -static int hclge_init_cmd_queue(struct hclge_dev *hdev, int ring_type) +static int hclge_alloc_cmd_queue(struct hclge_dev *hdev, int ring_type) { struct hclge_hw *hw = &hdev->hw; struct hclge_cmq_ring *ring = @@ -79,9 +79,6 @@ static int hclge_init_cmd_queue(struct hclge_dev *hdev, int ring_type) return ret; } - ring->next_to_clean = 0; - ring->next_to_use = 0; - return 0; } @@ -302,37 +299,52 @@ static enum hclge_cmd_status hclge_cmd_query_firmware_version( return ret; } -int hclge_cmd_init(struct hclge_dev *hdev) +int hclge_cmd_queue_init(struct hclge_dev *hdev) { - u32 version; int ret; /* Setup the queue entries for use cmd queue */ hdev->hw.cmq.csq.desc_num = HCLGE_NIC_CMQ_DESC_NUM; hdev->hw.cmq.crq.desc_num = HCLGE_NIC_CMQ_DESC_NUM; - /* Setup the lock for command queue */ - spin_lock_init(&hdev->hw.cmq.csq.lock); - spin_lock_init(&hdev->hw.cmq.crq.lock); - /* Setup Tx write back timeout */ hdev->hw.cmq.tx_timeout = HCLGE_CMDQ_TX_TIMEOUT; /* Setup queue rings */ - ret = hclge_init_cmd_queue(hdev, HCLGE_TYPE_CSQ); + ret = hclge_alloc_cmd_queue(hdev, HCLGE_TYPE_CSQ); if (ret) { dev_err(&hdev->pdev->dev, "CSQ ring setup error %d\n", ret); return ret; } - ret = hclge_init_cmd_queue(hdev, HCLGE_TYPE_CRQ); + ret = hclge_alloc_cmd_queue(hdev, HCLGE_TYPE_CRQ); if (ret) { dev_err(&hdev->pdev->dev, "CRQ ring setup error %d\n", ret); goto err_csq; } + return 0; +err_csq: + hclge_free_cmd_desc(&hdev->hw.cmq.csq); + return ret; +} + +int hclge_cmd_init(struct hclge_dev *hdev) +{ + u32 version; + int ret; + + hdev->hw.cmq.csq.next_to_clean = 0; + hdev->hw.cmq.csq.next_to_use = 0; + hdev->hw.cmq.crq.next_to_clean = 0; + hdev->hw.cmq.crq.next_to_use = 0; + + /* Setup the lock for command queue */ + spin_lock_init(&hdev->hw.cmq.csq.lock); + spin_lock_init(&hdev->hw.cmq.crq.lock); + hclge_cmd_init_regs(&hdev->hw); ret = hclge_cmd_query_firmware_version(&hdev->hw, &version); @@ -346,9 +358,6 @@ int hclge_cmd_init(struct hclge_dev *hdev) dev_info(&hdev->pdev->dev, "The firmware version is %08x\n", version); return 0; -err_csq: - hclge_free_cmd_desc(&hdev->hw.cmq.csq); - return ret; } static void hclge_destroy_queue(struct hclge_cmq_ring *ring) diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h index b4373345c2b4..6bdc2167084b 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h @@ -750,4 +750,5 @@ enum hclge_cmd_status hclge_cmd_mdio_read(struct hclge_hw *hw, struct hclge_desc *desc); void hclge_destroy_cmd_queue(struct hclge_hw *hw); +int hclge_cmd_queue_init(struct hclge_dev *hdev); #endif diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c index 5daa8c791010..cf0fafec7954 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c @@ -4446,7 +4446,14 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev) goto err_pci_init; } - /* Command queue initialize */ + /* Firmware command queue initialize */ + ret = hclge_cmd_queue_init(hdev); + if (ret) { + dev_err(&pdev->dev, "Cmd queue init failed, ret = %d.\n", ret); + return ret; + } + + /* Firmware command initialize */ ret = hclge_cmd_init(hdev); if (ret) goto err_cmd_init; -- cgit v1.2.3 From 466b0c00391bf160d1355489e542ecbfc86f4d98 Mon Sep 17 00:00:00 2001 From: Lipeng Date: Thu, 2 Nov 2017 20:45:18 +0800 Subject: net: hns3: Add support for misc interrupt This patch adds initialization and deinitialization for misc interrupt. This interrupt will be used to handle reset message(IRQ). Signed-off-by: qumingguang Signed-off-by: Lipeng Signed-off-by: Yunsheng Lin Signed-off-by: David S. Miller --- .../net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h | 5 ++ .../ethernet/hisilicon/hns3/hns3pf/hclge_main.c | 74 ++++++++++++++++++++++ .../ethernet/hisilicon/hns3/hns3pf/hclge_main.h | 2 + 3 files changed, 81 insertions(+) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h index 6bdc2167084b..db4d887fd748 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h @@ -63,6 +63,11 @@ enum hclge_cmd_status { HCLGE_ERR_CSQ_ERROR = -3, }; +struct hclge_misc_vector { + u8 __iomem *addr; + int vector_irq; +}; + struct hclge_cmq { struct hclge_cmq_ring csq; struct hclge_cmq_ring crq; diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c index cf0fafec7954..e45842e48865 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c @@ -2392,11 +2392,71 @@ static void hclge_service_complete(struct hclge_dev *hdev) clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state); } +static void hclge_enable_vector(struct hclge_misc_vector *vector, bool enable) +{ + writel(enable ? 1 : 0, vector->addr); +} + +static irqreturn_t hclge_misc_irq_handle(int irq, void *data) +{ + struct hclge_dev *hdev = data; + + hclge_enable_vector(&hdev->misc_vector, false); + if (!test_and_set_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state)) + schedule_work(&hdev->service_task); + + return IRQ_HANDLED; +} + +static void hclge_free_vector(struct hclge_dev *hdev, int vector_id) +{ + hdev->vector_status[vector_id] = HCLGE_INVALID_VPORT; + hdev->num_msi_left += 1; + hdev->num_msi_used -= 1; +} + +static void hclge_get_misc_vector(struct hclge_dev *hdev) +{ + struct hclge_misc_vector *vector = &hdev->misc_vector; + + vector->vector_irq = pci_irq_vector(hdev->pdev, 0); + + vector->addr = hdev->hw.io_base + HCLGE_MISC_VECTOR_REG_BASE; + hdev->vector_status[0] = 0; + + hdev->num_msi_left -= 1; + hdev->num_msi_used += 1; +} + +static int hclge_misc_irq_init(struct hclge_dev *hdev) +{ + int ret; + + hclge_get_misc_vector(hdev); + + ret = devm_request_irq(&hdev->pdev->dev, + hdev->misc_vector.vector_irq, + hclge_misc_irq_handle, 0, "hclge_misc", hdev); + if (ret) { + hclge_free_vector(hdev, 0); + dev_err(&hdev->pdev->dev, "request misc irq(%d) fail\n", + hdev->misc_vector.vector_irq); + } + + return ret; +} + +static void hclge_misc_irq_service_task(struct hclge_dev *hdev) +{ + hclge_enable_vector(&hdev->misc_vector, true); +} + static void hclge_service_task(struct work_struct *work) { struct hclge_dev *hdev = container_of(work, struct hclge_dev, service_task); + hclge_misc_irq_service_task(hdev); hclge_update_speed_duplex(hdev); hclge_update_link_status(hdev); hclge_update_stats_for_all(hdev); @@ -4480,6 +4540,14 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev) return ret; } + ret = hclge_misc_irq_init(hdev); + if (ret) { + dev_err(&pdev->dev, + "Misc IRQ(vector0) init error, ret = %d.\n", + ret); + return ret; + } + ret = hclge_alloc_tqps(hdev); if (ret) { dev_err(&pdev->dev, "Allocate TQPs error, ret = %d.\n", ret); @@ -4545,6 +4613,9 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev) timer_setup(&hdev->service_timer, hclge_service_timer, 0); INIT_WORK(&hdev->service_task, hclge_service_task); + /* Enable MISC vector(vector0) */ + hclge_enable_vector(&hdev->misc_vector, true); + set_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state); set_bit(HCLGE_STATE_DOWN, &hdev->state); @@ -4577,6 +4648,9 @@ static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev) if (mac->phydev) mdiobus_unregister(mac->mdio_bus); + /* Disable MISC vector(vector0) */ + hclge_enable_vector(&hdev->misc_vector, false); + hclge_free_vector(hdev, 0); hclge_destroy_cmd_queue(&hdev->hw); hclge_pci_uninit(hdev); ae_dev->priv = NULL; diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h index bca4430bb7e7..2a1d4d6810bf 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h @@ -27,6 +27,7 @@ (HCLGE_PF_CFG_BLOCK_SIZE / HCLGE_CFG_RD_LEN_BYTES) #define HCLGE_VECTOR_REG_BASE 0x20000 +#define HCLGE_MISC_VECTOR_REG_BASE 0x20400 #define HCLGE_VECTOR_REG_OFFSET 0x4 #define HCLGE_VECTOR_VF_OFFSET 0x100000 @@ -400,6 +401,7 @@ struct hclge_dev { struct pci_dev *pdev; struct hnae3_ae_dev *ae_dev; struct hclge_hw hw; + struct hclge_misc_vector misc_vector; struct hclge_hw_stats hw_stats; unsigned long state; -- cgit v1.2.3 From 4ed340ab8f49275a83337cb66e8f53e544f34674 Mon Sep 17 00:00:00 2001 From: Lipeng Date: Thu, 2 Nov 2017 20:45:19 +0800 Subject: net: hns3: Add reset process in hclge_main This patch adds reset support for PF,it include : global reset, core reset, IMP reset, PF reset.The core reset will Reset all datapath of all functions except IMP, MAC and PCI interface. Global reset is equal with the core reset plus all MAC reset. IMP reset is caused by watchdog timer expiration, the same with core reset in the reset flow. PF reset will reset whole physical function. Signed-off-by: qumingguang Signed-off-by: Lipeng Signed-off-by: Yunsheng Lin Signed-off-by: David S. Miller --- drivers/net/ethernet/hisilicon/hns3/hnae3.h | 19 ++ .../net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h | 7 + .../ethernet/hisilicon/hns3/hns3pf/hclge_main.c | 285 +++++++++++++++++++++ .../ethernet/hisilicon/hns3/hns3pf/hclge_main.h | 15 ++ 4 files changed, 326 insertions(+) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.h b/drivers/net/ethernet/hisilicon/hns3/hnae3.h index 3acd8db0a794..67c59e1039f2 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hnae3.h +++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h @@ -110,6 +110,21 @@ enum hnae3_media_type { HNAE3_MEDIA_TYPE_BACKPLANE, }; +enum hnae3_reset_notify_type { + HNAE3_UP_CLIENT, + HNAE3_DOWN_CLIENT, + HNAE3_INIT_CLIENT, + HNAE3_UNINIT_CLIENT, +}; + +enum hnae3_reset_type { + HNAE3_FUNC_RESET, + HNAE3_CORE_RESET, + HNAE3_GLOBAL_RESET, + HNAE3_IMP_RESET, + HNAE3_NONE_RESET, +}; + struct hnae3_vector_info { u8 __iomem *io_addr; int vector; @@ -133,6 +148,8 @@ struct hnae3_client_ops { void (*uninit_instance)(struct hnae3_handle *handle, bool reset); void (*link_status_change)(struct hnae3_handle *handle, bool state); int (*setup_tc)(struct hnae3_handle *handle, u8 tc); + int (*reset_notify)(struct hnae3_handle *handle, + enum hnae3_reset_notify_type type); }; #define HNAE3_CLIENT_NAME_LENGTH 16 @@ -367,6 +384,8 @@ struct hnae3_ae_ops { u16 vlan_id, bool is_kill); int (*set_vf_vlan_filter)(struct hnae3_handle *handle, int vfid, u16 vlan, u8 qos, __be16 proto); + void (*reset_event)(struct hnae3_handle *handle, + enum hnae3_reset_type reset); }; struct hnae3_dcb_ops { diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h index db4d887fd748..844c83ea549e 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h @@ -697,6 +697,13 @@ struct hclge_reset_tqp_queue_cmd { u8 rsv[20]; }; +#define HCLGE_CFG_RESET_MAC_B 3 +#define HCLGE_CFG_RESET_FUNC_B 7 +struct hclge_reset_cmd { + u8 mac_func_reset; + u8 fun_reset_vfid; + u8 rsv[22]; +}; #define HCLGE_DEFAULT_TX_BUF 0x4000 /* 16k bytes */ #define HCLGE_TOTAL_PKT_BUF 0x108000 /* 1.03125M bytes */ #define HCLGE_DEFAULT_DV 0xA000 /* 40k byte */ diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c index e45842e48865..699983a954b2 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c @@ -35,6 +35,7 @@ static int hclge_set_mta_filter_mode(struct hclge_dev *hdev, enum hclge_mta_dmac_sel_type mta_mac_sel, bool enable); static int hclge_init_vlan_config(struct hclge_dev *hdev); +static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev); static struct hnae3_ae_algo ae_algo; @@ -2446,8 +2447,212 @@ static int hclge_misc_irq_init(struct hclge_dev *hdev) return ret; } +static int hclge_notify_client(struct hclge_dev *hdev, + enum hnae3_reset_notify_type type) +{ + struct hnae3_client *client = hdev->nic_client; + u16 i; + + if (!client->ops->reset_notify) + return -EOPNOTSUPP; + + for (i = 0; i < hdev->num_vmdq_vport + 1; i++) { + struct hnae3_handle *handle = &hdev->vport[i].nic; + int ret; + + ret = client->ops->reset_notify(handle, type); + if (ret) + return ret; + } + + return 0; +} + +static int hclge_reset_wait(struct hclge_dev *hdev) +{ +#define HCLGE_RESET_WATI_MS 100 +#define HCLGE_RESET_WAIT_CNT 5 + u32 val, reg, reg_bit; + u32 cnt = 0; + + switch (hdev->reset_type) { + case HNAE3_GLOBAL_RESET: + reg = HCLGE_GLOBAL_RESET_REG; + reg_bit = HCLGE_GLOBAL_RESET_BIT; + break; + case HNAE3_CORE_RESET: + reg = HCLGE_GLOBAL_RESET_REG; + reg_bit = HCLGE_CORE_RESET_BIT; + break; + case HNAE3_FUNC_RESET: + reg = HCLGE_FUN_RST_ING; + reg_bit = HCLGE_FUN_RST_ING_B; + break; + default: + dev_err(&hdev->pdev->dev, + "Wait for unsupported reset type: %d\n", + hdev->reset_type); + return -EINVAL; + } + + val = hclge_read_dev(&hdev->hw, reg); + while (hnae_get_bit(val, reg_bit) && cnt < HCLGE_RESET_WAIT_CNT) { + msleep(HCLGE_RESET_WATI_MS); + val = hclge_read_dev(&hdev->hw, reg); + cnt++; + } + + /* must clear reset status register to + * prevent driver detect reset interrupt again + */ + reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG); + hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, reg); + + if (cnt >= HCLGE_RESET_WAIT_CNT) { + dev_warn(&hdev->pdev->dev, + "Wait for reset timeout: %d\n", hdev->reset_type); + return -EBUSY; + } + + return 0; +} + +static int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id) +{ + struct hclge_desc desc; + struct hclge_reset_cmd *req = (struct hclge_reset_cmd *)desc.data; + int ret; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_RST_TRIGGER, false); + hnae_set_bit(req->mac_func_reset, HCLGE_CFG_RESET_MAC_B, 0); + hnae_set_bit(req->mac_func_reset, HCLGE_CFG_RESET_FUNC_B, 1); + req->fun_reset_vfid = func_id; + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) + dev_err(&hdev->pdev->dev, + "send function reset cmd fail, status =%d\n", ret); + + return ret; +} + +static void hclge_do_reset(struct hclge_dev *hdev, enum hnae3_reset_type type) +{ + struct pci_dev *pdev = hdev->pdev; + u32 val; + + switch (type) { + case HNAE3_GLOBAL_RESET: + val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG); + hnae_set_bit(val, HCLGE_GLOBAL_RESET_BIT, 1); + hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val); + dev_info(&pdev->dev, "Global Reset requested\n"); + break; + case HNAE3_CORE_RESET: + val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG); + hnae_set_bit(val, HCLGE_CORE_RESET_BIT, 1); + hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val); + dev_info(&pdev->dev, "Core Reset requested\n"); + break; + case HNAE3_FUNC_RESET: + dev_info(&pdev->dev, "PF Reset requested\n"); + hclge_func_reset_cmd(hdev, 0); + break; + default: + dev_warn(&pdev->dev, + "Unsupported reset type: %d\n", type); + break; + } +} + +static enum hnae3_reset_type hclge_detected_reset_event(struct hclge_dev *hdev) +{ + enum hnae3_reset_type rst_level = HNAE3_NONE_RESET; + u32 rst_reg_val; + + rst_reg_val = hclge_read_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG); + if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & rst_reg_val) + rst_level = HNAE3_GLOBAL_RESET; + else if (BIT(HCLGE_VECTOR0_CORERESET_INT_B) & rst_reg_val) + rst_level = HNAE3_CORE_RESET; + else if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & rst_reg_val) + rst_level = HNAE3_IMP_RESET; + + return rst_level; +} + +static void hclge_reset_event(struct hnae3_handle *handle, + enum hnae3_reset_type reset) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + + dev_info(&hdev->pdev->dev, + "Receive reset event , reset_type is %d", reset); + + switch (reset) { + case HNAE3_FUNC_RESET: + case HNAE3_CORE_RESET: + case HNAE3_GLOBAL_RESET: + if (test_bit(HCLGE_STATE_RESET_INT, &hdev->state)) { + dev_err(&hdev->pdev->dev, "Already in reset state"); + return; + } + hdev->reset_type = reset; + set_bit(HCLGE_STATE_RESET_INT, &hdev->state); + set_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state); + schedule_work(&hdev->service_task); + break; + default: + dev_warn(&hdev->pdev->dev, "Unsupported reset event:%d", reset); + break; + } +} + +static void hclge_reset_subtask(struct hclge_dev *hdev) +{ + bool do_reset; + + do_reset = hdev->reset_type != HNAE3_NONE_RESET; + + /* Reset is detected by interrupt */ + if (hdev->reset_type == HNAE3_NONE_RESET) + hdev->reset_type = hclge_detected_reset_event(hdev); + + if (hdev->reset_type == HNAE3_NONE_RESET) + return; + + switch (hdev->reset_type) { + case HNAE3_FUNC_RESET: + case HNAE3_CORE_RESET: + case HNAE3_GLOBAL_RESET: + case HNAE3_IMP_RESET: + hclge_notify_client(hdev, HNAE3_DOWN_CLIENT); + + if (do_reset) + hclge_do_reset(hdev, hdev->reset_type); + else + set_bit(HCLGE_STATE_RESET_INT, &hdev->state); + + if (!hclge_reset_wait(hdev)) { + hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT); + hclge_reset_ae_dev(hdev->ae_dev); + hclge_notify_client(hdev, HNAE3_INIT_CLIENT); + clear_bit(HCLGE_STATE_RESET_INT, &hdev->state); + } + hclge_notify_client(hdev, HNAE3_UP_CLIENT); + break; + default: + dev_err(&hdev->pdev->dev, "Unsupported reset type:%d\n", + hdev->reset_type); + break; + } + hdev->reset_type = HNAE3_NONE_RESET; +} + static void hclge_misc_irq_service_task(struct hclge_dev *hdev) { + hclge_reset_subtask(hdev); hclge_enable_vector(&hdev->misc_vector, true); } @@ -4498,6 +4703,7 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev) hdev->flag |= HCLGE_FLAG_USE_MSIX; hdev->pdev = pdev; hdev->ae_dev = ae_dev; + hdev->reset_type = HNAE3_NONE_RESET; ae_dev->priv = hdev; ret = hclge_pci_init(hdev); @@ -4630,6 +4836,84 @@ err_hclge_dev: return ret; } +static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev) +{ + struct hclge_dev *hdev = ae_dev->priv; + struct pci_dev *pdev = ae_dev->pdev; + int ret; + + set_bit(HCLGE_STATE_DOWN, &hdev->state); + + ret = hclge_cmd_init(hdev); + if (ret) { + dev_err(&pdev->dev, "Cmd queue init failed\n"); + return ret; + } + + ret = hclge_get_cap(hdev); + if (ret) { + dev_err(&pdev->dev, "get hw capability error, ret = %d.\n", + ret); + return ret; + } + + ret = hclge_configure(hdev); + if (ret) { + dev_err(&pdev->dev, "Configure dev error, ret = %d.\n", ret); + return ret; + } + + ret = hclge_map_tqp(hdev); + if (ret) { + dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret); + return ret; + } + + ret = hclge_mac_init(hdev); + if (ret) { + dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret); + return ret; + } + + ret = hclge_buffer_alloc(hdev); + if (ret) { + dev_err(&pdev->dev, "Buffer allocate fail, ret =%d\n", ret); + return ret; + } + + ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX); + if (ret) { + dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret); + return ret; + } + + ret = hclge_init_vlan_config(hdev); + if (ret) { + dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret); + return ret; + } + + ret = hclge_tm_schd_init(hdev); + if (ret) { + dev_err(&pdev->dev, "tm schd init fail, ret =%d\n", ret); + return ret; + } + + ret = hclge_rss_init_hw(hdev); + if (ret) { + dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret); + return ret; + } + + /* Enable MISC vector(vector0) */ + hclge_enable_vector(&hdev->misc_vector, true); + + dev_info(&pdev->dev, "Reset done, %s driver initialization finished.\n", + HCLGE_DRIVER_NAME); + + return 0; +} + static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev) { struct hclge_dev *hdev = ae_dev->priv; @@ -4699,6 +4983,7 @@ static const struct hnae3_ae_ops hclge_ops = { .get_mdix_mode = hclge_get_mdix_mode, .set_vlan_filter = hclge_set_port_vlan_filter, .set_vf_vlan_filter = hclge_set_vf_vlan_filter, + .reset_event = hclge_reset_event, }; static struct hnae3_ae_algo ae_algo = { diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h index 2a1d4d6810bf..742e6ee9efaf 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h @@ -79,6 +79,19 @@ #define HCLGE_PHY_MDIX_STATUS_B (6) #define HCLGE_PHY_SPEED_DUP_RESOLVE_B (11) +/* Reset related Registers */ +#define HCLGE_MISC_RESET_STS_REG 0x20700 +#define HCLGE_GLOBAL_RESET_REG 0x20A00 +#define HCLGE_GLOBAL_RESET_BIT 0x0 +#define HCLGE_CORE_RESET_BIT 0x1 +#define HCLGE_FUN_RST_ING 0x20C00 +#define HCLGE_FUN_RST_ING_B 0 + +/* Vector0 register bits define */ +#define HCLGE_VECTOR0_GLOBALRESET_INT_B 5 +#define HCLGE_VECTOR0_CORERESET_INT_B 6 +#define HCLGE_VECTOR0_IMPRESET_INT_B 7 + enum HCLGE_DEV_STATE { HCLGE_STATE_REINITING, HCLGE_STATE_DOWN, @@ -88,6 +101,7 @@ enum HCLGE_DEV_STATE { HCLGE_STATE_SERVICE_SCHED, HCLGE_STATE_MBX_HANDLING, HCLGE_STATE_MBX_IRQ, + HCLGE_STATE_RESET_INT, HCLGE_STATE_MAX }; @@ -405,6 +419,7 @@ struct hclge_dev { struct hclge_hw_stats hw_stats; unsigned long state; + enum hnae3_reset_type reset_type; u32 fw_version; u16 num_vmdq_vport; /* Num vmdq vport this PF has set up */ u16 num_tqps; /* Num task queue pairs of this PF */ -- cgit v1.2.3 From f8fa222ca57cccb066d18767010275e9e3a2b9fe Mon Sep 17 00:00:00 2001 From: Lipeng Date: Thu, 2 Nov 2017 20:45:20 +0800 Subject: net: hns3: Add timeout process in hns3_enet This patch add timeout handler in hns3_enet.c to handle TX side timeout event, when TX timeout event occur, it will triger NIC driver into reset process. Signed-off-by: qumingguang Signed-off-by: Lipeng Signed-off-by: Yunsheng Lin Signed-off-by: David S. Miller --- .../net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c | 86 ++++++++++++++++++++++ .../net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.h | 2 + 2 files changed, 88 insertions(+) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c index c6c5b2a96aaa..f0cb88a07850 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c @@ -258,6 +258,7 @@ out_start_err: static int hns3_nic_net_open(struct net_device *netdev) { + struct hns3_nic_priv *priv = netdev_priv(netdev); int ret; netif_carrier_off(netdev); @@ -273,6 +274,7 @@ static int hns3_nic_net_open(struct net_device *netdev) return ret; } + priv->last_reset_time = jiffies; return 0; } @@ -1322,10 +1324,91 @@ static int hns3_nic_change_mtu(struct net_device *netdev, int new_mtu) return ret; } +static bool hns3_get_tx_timeo_queue_info(struct net_device *ndev) +{ + struct hns3_nic_priv *priv = netdev_priv(ndev); + struct hns3_enet_ring *tx_ring = NULL; + int timeout_queue = 0; + int hw_head, hw_tail; + int i; + + /* Find the stopped queue the same way the stack does */ + for (i = 0; i < ndev->real_num_tx_queues; i++) { + struct netdev_queue *q; + unsigned long trans_start; + + q = netdev_get_tx_queue(ndev, i); + trans_start = q->trans_start; + if (netif_xmit_stopped(q) && + time_after(jiffies, + (trans_start + ndev->watchdog_timeo))) { + timeout_queue = i; + break; + } + } + + if (i == ndev->num_tx_queues) { + netdev_info(ndev, + "no netdev TX timeout queue found, timeout count: %llu\n", + priv->tx_timeout_count); + return false; + } + + tx_ring = priv->ring_data[timeout_queue].ring; + + hw_head = readl_relaxed(tx_ring->tqp->io_base + + HNS3_RING_TX_RING_HEAD_REG); + hw_tail = readl_relaxed(tx_ring->tqp->io_base + + HNS3_RING_TX_RING_TAIL_REG); + netdev_info(ndev, + "tx_timeout count: %llu, queue id: %d, SW_NTU: 0x%x, SW_NTC: 0x%x, HW_HEAD: 0x%x, HW_TAIL: 0x%x, INT: 0x%x\n", + priv->tx_timeout_count, + timeout_queue, + tx_ring->next_to_use, + tx_ring->next_to_clean, + hw_head, + hw_tail, + readl(tx_ring->tqp_vector->mask_addr)); + + return true; +} + +static void hns3_nic_net_timeout(struct net_device *ndev) +{ + struct hns3_nic_priv *priv = netdev_priv(ndev); + unsigned long last_reset_time = priv->last_reset_time; + struct hnae3_handle *h = priv->ae_handle; + + if (!hns3_get_tx_timeo_queue_info(ndev)) + return; + + priv->tx_timeout_count++; + + /* This timeout is far away enough from last timeout, + * if timeout again,set the reset type to PF reset + */ + if (time_after(jiffies, (last_reset_time + 20 * HZ))) + priv->reset_level = HNAE3_FUNC_RESET; + + /* Don't do any new action before the next timeout */ + else if (time_before(jiffies, (last_reset_time + ndev->watchdog_timeo))) + return; + + priv->last_reset_time = jiffies; + + if (h->ae_algo->ops->reset_event) + h->ae_algo->ops->reset_event(h, priv->reset_level); + + priv->reset_level++; + if (priv->reset_level > HNAE3_GLOBAL_RESET) + priv->reset_level = HNAE3_GLOBAL_RESET; +} + static const struct net_device_ops hns3_nic_netdev_ops = { .ndo_open = hns3_nic_net_open, .ndo_stop = hns3_nic_net_stop, .ndo_start_xmit = hns3_nic_net_xmit, + .ndo_tx_timeout = hns3_nic_net_timeout, .ndo_set_mac_address = hns3_nic_net_set_mac_address, .ndo_change_mtu = hns3_nic_change_mtu, .ndo_set_features = hns3_nic_set_features, @@ -2763,6 +2846,9 @@ static int hns3_client_init(struct hnae3_handle *handle) priv->dev = &pdev->dev; priv->netdev = netdev; priv->ae_handle = handle; + priv->last_reset_time = jiffies; + priv->reset_level = HNAE3_FUNC_RESET; + priv->tx_timeout_count = 0; handle->kinfo.netdev = netdev; handle->priv = (void *)priv; diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.h index 58dc30bf893c..8a9de759957b 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.h @@ -518,6 +518,8 @@ struct hns3_nic_priv { /* The most recently read link state */ int link; u64 tx_timeout_count; + enum hnae3_reset_type reset_level; + unsigned long last_reset_time; unsigned long state; -- cgit v1.2.3 From bb6b94a896d4dd4dcdeccca87c3fd22521c652c0 Mon Sep 17 00:00:00 2001 From: Lipeng Date: Thu, 2 Nov 2017 20:45:21 +0800 Subject: net: hns3: Add reset interface implementation in client This patch implement the interface of reset notification in hns3_enet, it will do resetting business which include shutdown nic device, free and initialize client side resource. Signed-off-by: qumingguang Signed-off-by: Lipeng Signed-off-by: Yunsheng Lin Signed-off-by: David S. Miller --- .../net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c | 153 +++++++++++++++++++++ 1 file changed, 153 insertions(+) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c index f0cb88a07850..39679fdb83c7 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c @@ -3009,11 +3009,164 @@ err_out: return ret; } +static void hns3_recover_hw_addr(struct net_device *ndev) +{ + struct netdev_hw_addr_list *list; + struct netdev_hw_addr *ha, *tmp; + + /* go through and sync uc_addr entries to the device */ + list = &ndev->uc; + list_for_each_entry_safe(ha, tmp, &list->list, list) + hns3_nic_uc_sync(ndev, ha->addr); + + /* go through and sync mc_addr entries to the device */ + list = &ndev->mc; + list_for_each_entry_safe(ha, tmp, &list->list, list) + hns3_nic_mc_sync(ndev, ha->addr); +} + +static void hns3_drop_skb_data(struct hns3_enet_ring *ring, struct sk_buff *skb) +{ + dev_kfree_skb_any(skb); +} + +static void hns3_clear_all_ring(struct hnae3_handle *h) +{ + struct net_device *ndev = h->kinfo.netdev; + struct hns3_nic_priv *priv = netdev_priv(ndev); + u32 i; + + for (i = 0; i < h->kinfo.num_tqps; i++) { + struct netdev_queue *dev_queue; + struct hns3_enet_ring *ring; + + ring = priv->ring_data[i].ring; + hns3_clean_tx_ring(ring, ring->desc_num); + dev_queue = netdev_get_tx_queue(ndev, + priv->ring_data[i].queue_index); + netdev_tx_reset_queue(dev_queue); + + ring = priv->ring_data[i + h->kinfo.num_tqps].ring; + hns3_clean_rx_ring(ring, ring->desc_num, hns3_drop_skb_data); + } +} + +static int hns3_reset_notify_down_enet(struct hnae3_handle *handle) +{ + struct hnae3_knic_private_info *kinfo = &handle->kinfo; + struct net_device *ndev = kinfo->netdev; + + if (!netif_running(ndev)) + return -EIO; + + return hns3_nic_net_stop(ndev); +} + +static int hns3_reset_notify_up_enet(struct hnae3_handle *handle) +{ + struct hnae3_knic_private_info *kinfo = &handle->kinfo; + struct hns3_nic_priv *priv = netdev_priv(kinfo->netdev); + int ret = 0; + + if (netif_running(kinfo->netdev)) { + ret = hns3_nic_net_up(kinfo->netdev); + if (ret) { + netdev_err(kinfo->netdev, + "hns net up fail, ret=%d!\n", ret); + return ret; + } + + priv->last_reset_time = jiffies; + } + + return ret; +} + +static int hns3_reset_notify_init_enet(struct hnae3_handle *handle) +{ + struct net_device *netdev = handle->kinfo.netdev; + struct hns3_nic_priv *priv = netdev_priv(netdev); + int ret; + + priv->reset_level = 1; + hns3_init_mac_addr(netdev); + hns3_nic_set_rx_mode(netdev); + hns3_recover_hw_addr(netdev); + + /* Carrier off reporting is important to ethtool even BEFORE open */ + netif_carrier_off(netdev); + + ret = hns3_get_ring_config(priv); + if (ret) + return ret; + + ret = hns3_nic_init_vector_data(priv); + if (ret) + return ret; + + ret = hns3_init_all_ring(priv); + if (ret) { + hns3_nic_uninit_vector_data(priv); + priv->ring_data = NULL; + } + + return ret; +} + +static int hns3_reset_notify_uninit_enet(struct hnae3_handle *handle) +{ + struct net_device *netdev = handle->kinfo.netdev; + struct hns3_nic_priv *priv = netdev_priv(netdev); + int ret; + + hns3_clear_all_ring(handle); + + ret = hns3_nic_uninit_vector_data(priv); + if (ret) { + netdev_err(netdev, "uninit vector error\n"); + return ret; + } + + ret = hns3_uninit_all_ring(priv); + if (ret) + netdev_err(netdev, "uninit ring error\n"); + + priv->ring_data = NULL; + + return ret; +} + +static int hns3_reset_notify(struct hnae3_handle *handle, + enum hnae3_reset_notify_type type) +{ + int ret = 0; + + switch (type) { + case HNAE3_UP_CLIENT: + ret = hns3_reset_notify_up_enet(handle); + break; + case HNAE3_DOWN_CLIENT: + ret = hns3_reset_notify_down_enet(handle); + break; + case HNAE3_INIT_CLIENT: + ret = hns3_reset_notify_init_enet(handle); + break; + case HNAE3_UNINIT_CLIENT: + ret = hns3_reset_notify_uninit_enet(handle); + break; + default: + break; + } + + return ret; +} + static const struct hnae3_client_ops client_ops = { .init_instance = hns3_client_init, .uninit_instance = hns3_client_uninit, .link_status_change = hns3_link_status_change, .setup_tc = hns3_client_setup_tc, + .reset_notify = hns3_reset_notify, }; /* hns3_init_module - Driver registration routine -- cgit v1.2.3 From ae064e6123f89f90af7e4ea190cc0c612643ca93 Mon Sep 17 00:00:00 2001 From: qumingguang Date: Thu, 2 Nov 2017 20:45:22 +0800 Subject: net: hns3: Fix a misuse to devm_free_irq we should use free_irq to free the nic irq during the unloading time. because we use request_irq to apply it when nic up. It will crash if up net device after reset the port. This patch fixes the issue. Signed-off-by: qumingguang Signed-off-by: Lipeng Signed-off-by: Yunsheng Lin Signed-off-by: David S. Miller --- drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c index 39679fdb83c7..2a0af11c9b59 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c @@ -2558,9 +2558,8 @@ static int hns3_nic_uninit_vector_data(struct hns3_nic_priv *priv) (void)irq_set_affinity_hint( priv->tqp_vector[i].vector_irq, NULL); - devm_free_irq(&pdev->dev, - priv->tqp_vector[i].vector_irq, - &priv->tqp_vector[i]); + free_irq(priv->tqp_vector[i].vector_irq, + &priv->tqp_vector[i]); } priv->ring_data[i].ring->irq_init_flag = HNS3_VECTOR_NOT_INITED; -- cgit v1.2.3 From c6dc52130536d6d3d750bf1097b3bc897f7ef054 Mon Sep 17 00:00:00 2001 From: qumingguang Date: Thu, 2 Nov 2017 20:45:23 +0800 Subject: net: hns3: hns3:fix a bug about statistic counter in reset process All member of Struct hdev->hw_stats is initialized to 0 as hdev is allocated by devm_kzalloc. But in reset process, hdev will not be allocated again, so need clear hdev->hw_stats in reset process, otherwise the statistic will be wrong after reset. This patch set all of the statistic counters to zero after reset. Signed-off-by: qumingguang Signed-off-by: Lipeng Signed-off-by: Yunsheng Lin Signed-off-by: David S. Miller --- drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c | 7 +++++++ 1 file changed, 7 insertions(+) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c index 699983a954b2..c6ba89089ef3 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c @@ -4836,6 +4836,11 @@ err_hclge_dev: return ret; } +static void hclge_stats_clear(struct hclge_dev *hdev) +{ + memset(&hdev->hw_stats, 0, sizeof(hdev->hw_stats)); +} + static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev) { struct hclge_dev *hdev = ae_dev->priv; @@ -4844,6 +4849,8 @@ static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev) set_bit(HCLGE_STATE_DOWN, &hdev->state); + hclge_stats_clear(hdev); + ret = hclge_cmd_init(hdev); if (ret) { dev_err(&pdev->dev, "Cmd queue init failed\n"); -- cgit v1.2.3 From 25c5f715381e0f52993972567fae653b700126fa Mon Sep 17 00:00:00 2001 From: Felix Manlunas Date: Wed, 1 Nov 2017 18:14:49 -0700 Subject: liquidio: bump up driver version to 1.7.0 to match newer NIC firmware Signed-off-by: Felix Manlunas Acked-by: Derek Chickles Signed-off-by: David S. Miller --- drivers/net/ethernet/cavium/liquidio/liquidio_common.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/cavium/liquidio/liquidio_common.h b/drivers/net/ethernet/cavium/liquidio/liquidio_common.h index 3bcdda85e360..522dcc4dcff7 100644 --- a/drivers/net/ethernet/cavium/liquidio/liquidio_common.h +++ b/drivers/net/ethernet/cavium/liquidio/liquidio_common.h @@ -27,8 +27,8 @@ #define LIQUIDIO_PACKAGE "" #define LIQUIDIO_BASE_MAJOR_VERSION 1 -#define LIQUIDIO_BASE_MINOR_VERSION 6 -#define LIQUIDIO_BASE_MICRO_VERSION 1 +#define LIQUIDIO_BASE_MINOR_VERSION 7 +#define LIQUIDIO_BASE_MICRO_VERSION 0 #define LIQUIDIO_BASE_VERSION __stringify(LIQUIDIO_BASE_MAJOR_VERSION) "." \ __stringify(LIQUIDIO_BASE_MINOR_VERSION) #define LIQUIDIO_MICRO_VERSION "." __stringify(LIQUIDIO_BASE_MICRO_VERSION) -- cgit v1.2.3 From a882d20cdb7775ff8b4aac880255eff6a2c1c85e Mon Sep 17 00:00:00 2001 From: Wei Yongjun Date: Thu, 2 Nov 2017 11:15:07 +0000 Subject: cxgb4: fix error return code in cxgb4_set_hash_filter() Fix to return a negative error code from thecxgb4_alloc_atid() error handling case instead of 0. Fixes: 12b276fbf6e0 ("cxgb4: add support to create hash filters") Signed-off-by: Wei Yongjun Acked-By: Kumar Sanghvi Signed-off-by: David S. Miller --- drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c index abab67d52edb..5980f308a253 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c @@ -1110,8 +1110,10 @@ static int cxgb4_set_hash_filter(struct net_device *dev, } atid = cxgb4_alloc_atid(t, f); - if (atid < 0) + if (atid < 0) { + ret = atid; goto free_smt; + } iconf = adapter->params.tp.ingress_config; if (iconf & VNIC_F) { -- cgit v1.2.3 From 48fac8852637f00abcb05b1af3489e3cd45cda58 Mon Sep 17 00:00:00 2001 From: Ido Schimmel Date: Thu, 2 Nov 2017 17:14:06 +0100 Subject: mlxsw: spectrum_router: Embed netevent notifier block in router struct We are going to need to respond to netevents notifying us about multipath hash updates by configuring the device's hash parameters. Embed the netevent notifier in the router struct so that we could retrieve it upon notifications and use it to configure the device. Signed-off-by: Ido Schimmel Reviewed-by: Petr Machata Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/spectrum.c | 7 ------- drivers/net/ethernet/mellanox/mlxsw/spectrum.h | 2 -- drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c | 14 ++++++++++++-- 3 files changed, 12 insertions(+), 11 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c index 3f4be9556e56..52f38b480669 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c @@ -4574,10 +4574,6 @@ static struct notifier_block mlxsw_sp_inet6addr_nb __read_mostly = { .notifier_call = mlxsw_sp_inet6addr_event, }; -static struct notifier_block mlxsw_sp_router_netevent_nb __read_mostly = { - .notifier_call = mlxsw_sp_router_netevent_event, -}; - static const struct pci_device_id mlxsw_sp_pci_id_table[] = { {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM), 0}, {0, }, @@ -4596,7 +4592,6 @@ static int __init mlxsw_sp_module_init(void) register_inetaddr_notifier(&mlxsw_sp_inetaddr_nb); register_inet6addr_validator_notifier(&mlxsw_sp_inet6addr_valid_nb); register_inet6addr_notifier(&mlxsw_sp_inet6addr_nb); - register_netevent_notifier(&mlxsw_sp_router_netevent_nb); err = mlxsw_core_driver_register(&mlxsw_sp_driver); if (err) @@ -4611,7 +4606,6 @@ static int __init mlxsw_sp_module_init(void) err_pci_driver_register: mlxsw_core_driver_unregister(&mlxsw_sp_driver); err_core_driver_register: - unregister_netevent_notifier(&mlxsw_sp_router_netevent_nb); unregister_inet6addr_notifier(&mlxsw_sp_inet6addr_nb); unregister_inet6addr_validator_notifier(&mlxsw_sp_inet6addr_valid_nb); unregister_inetaddr_notifier(&mlxsw_sp_inetaddr_nb); @@ -4623,7 +4617,6 @@ static void __exit mlxsw_sp_module_exit(void) { mlxsw_pci_driver_unregister(&mlxsw_sp_pci_driver); mlxsw_core_driver_unregister(&mlxsw_sp_driver); - unregister_netevent_notifier(&mlxsw_sp_router_netevent_nb); unregister_inet6addr_notifier(&mlxsw_sp_inet6addr_nb); unregister_inet6addr_validator_notifier(&mlxsw_sp_inet6addr_valid_nb); unregister_inetaddr_notifier(&mlxsw_sp_inetaddr_nb); diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h index aa0cefb25e18..b2393bb8cef9 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h @@ -385,8 +385,6 @@ static inline void mlxsw_sp_port_dcb_fini(struct mlxsw_sp_port *mlxsw_sp_port) /* spectrum_router.c */ int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp); void mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp); -int mlxsw_sp_router_netevent_event(struct notifier_block *unused, - unsigned long event, void *ptr); int mlxsw_sp_netdevice_router_port_event(struct net_device *dev); int mlxsw_sp_inetaddr_event(struct notifier_block *unused, unsigned long event, void *ptr); diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c index 9fe4cdb23189..d49c1c92a0fa 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c @@ -96,6 +96,7 @@ struct mlxsw_sp_router { struct list_head ipip_list; bool aborted; struct notifier_block fib_nb; + struct notifier_block netevent_nb; const struct mlxsw_sp_rif_ops **rif_ops_arr; const struct mlxsw_sp_ipip_ops **ipip_ops_arr; }; @@ -2076,8 +2077,8 @@ out: kfree(neigh_work); } -int mlxsw_sp_router_netevent_event(struct notifier_block *unused, - unsigned long event, void *ptr) +static int mlxsw_sp_router_netevent_event(struct notifier_block *unused, + unsigned long event, void *ptr) { struct mlxsw_sp_neigh_event_work *neigh_work; struct mlxsw_sp_port *mlxsw_sp_port; @@ -6720,6 +6721,12 @@ int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp) if (err) goto err_neigh_init; + mlxsw_sp->router->netevent_nb.notifier_call = + mlxsw_sp_router_netevent_event; + err = register_netevent_notifier(&mlxsw_sp->router->netevent_nb); + if (err) + goto err_register_netevent_notifier; + mlxsw_sp->router->fib_nb.notifier_call = mlxsw_sp_router_fib_event; err = register_fib_notifier(&mlxsw_sp->router->fib_nb, mlxsw_sp_router_fib_dump_flush); @@ -6729,6 +6736,8 @@ int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp) return 0; err_register_fib_notifier: + unregister_netevent_notifier(&mlxsw_sp->router->netevent_nb); +err_register_netevent_notifier: mlxsw_sp_neigh_fini(mlxsw_sp); err_neigh_init: mlxsw_sp_vrs_fini(mlxsw_sp); @@ -6754,6 +6763,7 @@ err_router_init: void mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp) { unregister_fib_notifier(&mlxsw_sp->router->fib_nb); + unregister_netevent_notifier(&mlxsw_sp->router->netevent_nb); mlxsw_sp_neigh_fini(mlxsw_sp); mlxsw_sp_vrs_fini(mlxsw_sp); mlxsw_sp_mr_fini(mlxsw_sp); -- cgit v1.2.3 From ceb8881ddf6d79c257c916763b0e558d053b2560 Mon Sep 17 00:00:00 2001 From: Ido Schimmel Date: Thu, 2 Nov 2017 17:14:07 +0100 Subject: mlxsw: spectrum_router: Properly name netevent work struct The struct containing the work item queued from the netevent handler is named after the only event it is currently used for, which is neighbour updates. Use a more appropriate name for the struct, as we are going to use it for more events. Signed-off-by: Ido Schimmel Reviewed-by: Petr Machata Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- .../net/ethernet/mellanox/mlxsw/spectrum_router.c | 26 +++++++++++----------- 1 file changed, 13 insertions(+), 13 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c index d49c1c92a0fa..d5094b81adbf 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c @@ -2026,7 +2026,7 @@ mlxsw_sp_neigh_entry_counter_update(struct mlxsw_sp *mlxsw_sp, mlxsw_sp_neigh_entry_update(mlxsw_sp, neigh_entry, true); } -struct mlxsw_sp_neigh_event_work { +struct mlxsw_sp_netevent_work { struct work_struct work; struct mlxsw_sp *mlxsw_sp; struct neighbour *n; @@ -2034,11 +2034,11 @@ struct mlxsw_sp_neigh_event_work { static void mlxsw_sp_router_neigh_event_work(struct work_struct *work) { - struct mlxsw_sp_neigh_event_work *neigh_work = - container_of(work, struct mlxsw_sp_neigh_event_work, work); - struct mlxsw_sp *mlxsw_sp = neigh_work->mlxsw_sp; + struct mlxsw_sp_netevent_work *net_work = + container_of(work, struct mlxsw_sp_netevent_work, work); + struct mlxsw_sp *mlxsw_sp = net_work->mlxsw_sp; struct mlxsw_sp_neigh_entry *neigh_entry; - struct neighbour *n = neigh_work->n; + struct neighbour *n = net_work->n; unsigned char ha[ETH_ALEN]; bool entry_connected; u8 nud_state, dead; @@ -2074,13 +2074,13 @@ static void mlxsw_sp_router_neigh_event_work(struct work_struct *work) out: rtnl_unlock(); neigh_release(n); - kfree(neigh_work); + kfree(net_work); } static int mlxsw_sp_router_netevent_event(struct notifier_block *unused, unsigned long event, void *ptr) { - struct mlxsw_sp_neigh_event_work *neigh_work; + struct mlxsw_sp_netevent_work *net_work; struct mlxsw_sp_port *mlxsw_sp_port; struct mlxsw_sp *mlxsw_sp; unsigned long interval; @@ -2119,22 +2119,22 @@ static int mlxsw_sp_router_netevent_event(struct notifier_block *unused, if (!mlxsw_sp_port) return NOTIFY_DONE; - neigh_work = kzalloc(sizeof(*neigh_work), GFP_ATOMIC); - if (!neigh_work) { + net_work = kzalloc(sizeof(*net_work), GFP_ATOMIC); + if (!net_work) { mlxsw_sp_port_dev_put(mlxsw_sp_port); return NOTIFY_BAD; } - INIT_WORK(&neigh_work->work, mlxsw_sp_router_neigh_event_work); - neigh_work->mlxsw_sp = mlxsw_sp_port->mlxsw_sp; - neigh_work->n = n; + INIT_WORK(&net_work->work, mlxsw_sp_router_neigh_event_work); + net_work->mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + net_work->n = n; /* Take a reference to ensure the neighbour won't be * destructed until we drop the reference in delayed * work. */ neigh_clone(n); - mlxsw_core_schedule_work(&neigh_work->work); + mlxsw_core_schedule_work(&net_work->work); mlxsw_sp_port_dev_put(mlxsw_sp_port); break; } -- cgit v1.2.3 From e471859b72fc2d9fe0f3163a7c238029271259ec Mon Sep 17 00:00:00 2001 From: Ido Schimmel Date: Thu, 2 Nov 2017 17:14:08 +0100 Subject: mlxsw: reg: Add Router ECMP Configuration Register Version 2 The RECRv2 register is used for setting up the router's ECMP hash configuration. Signed-off-by: Ido Schimmel Reviewed-by: Petr Machata Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/reg.h | 132 ++++++++++++++++++++++++++++++ 1 file changed, 132 insertions(+) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h index db6cd263dd61..5066553dd0b6 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/reg.h +++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h @@ -5844,6 +5844,137 @@ static inline void mlxsw_reg_rigr2_erif_entry_pack(char *payload, int index, mlxsw_reg_rigr2_erif_entry_erif_set(payload, index, erif); } +/* RECR-V2 - Router ECMP Configuration Version 2 Register + * ------------------------------------------------------ + */ +#define MLXSW_REG_RECR2_ID 0x8025 +#define MLXSW_REG_RECR2_LEN 0x38 + +MLXSW_REG_DEFINE(recr2, MLXSW_REG_RECR2_ID, MLXSW_REG_RECR2_LEN); + +/* reg_recr2_pp + * Per-port configuration + * Access: Index + */ +MLXSW_ITEM32(reg, recr2, pp, 0x00, 24, 1); + +/* reg_recr2_sh + * Symmetric hash + * Access: RW + */ +MLXSW_ITEM32(reg, recr2, sh, 0x00, 8, 1); + +/* reg_recr2_seed + * Seed + * Access: RW + */ +MLXSW_ITEM32(reg, recr2, seed, 0x08, 0, 32); + +enum { + /* Enable IPv4 fields if packet is not TCP and not UDP */ + MLXSW_REG_RECR2_IPV4_EN_NOT_TCP_NOT_UDP = 3, + /* Enable IPv4 fields if packet is TCP or UDP */ + MLXSW_REG_RECR2_IPV4_EN_TCP_UDP = 4, + /* Enable IPv6 fields if packet is not TCP and not UDP */ + MLXSW_REG_RECR2_IPV6_EN_NOT_TCP_NOT_UDP = 5, + /* Enable IPv6 fields if packet is TCP or UDP */ + MLXSW_REG_RECR2_IPV6_EN_TCP_UDP = 6, + /* Enable TCP/UDP header fields if packet is IPv4 */ + MLXSW_REG_RECR2_TCP_UDP_EN_IPV4 = 7, + /* Enable TCP/UDP header fields if packet is IPv6 */ + MLXSW_REG_RECR2_TCP_UDP_EN_IPV6 = 8, +}; + +/* reg_recr2_outer_header_enables + * Bit mask where each bit enables a specific layer to be included in + * the hash calculation. + * Access: RW + */ +MLXSW_ITEM_BIT_ARRAY(reg, recr2, outer_header_enables, 0x10, 0x04, 1); + +enum { + /* IPv4 Source IP */ + MLXSW_REG_RECR2_IPV4_SIP0 = 9, + MLXSW_REG_RECR2_IPV4_SIP3 = 12, + /* IPv4 Destination IP */ + MLXSW_REG_RECR2_IPV4_DIP0 = 13, + MLXSW_REG_RECR2_IPV4_DIP3 = 16, + /* IP Protocol */ + MLXSW_REG_RECR2_IPV4_PROTOCOL = 17, + /* IPv6 Source IP */ + MLXSW_REG_RECR2_IPV6_SIP0_7 = 21, + MLXSW_REG_RECR2_IPV6_SIP8 = 29, + MLXSW_REG_RECR2_IPV6_SIP15 = 36, + /* IPv6 Destination IP */ + MLXSW_REG_RECR2_IPV6_DIP0_7 = 37, + MLXSW_REG_RECR2_IPV6_DIP8 = 45, + MLXSW_REG_RECR2_IPV6_DIP15 = 52, + /* IPv6 Next Header */ + MLXSW_REG_RECR2_IPV6_NEXT_HEADER = 53, + /* IPv6 Flow Label */ + MLXSW_REG_RECR2_IPV6_FLOW_LABEL = 57, + /* TCP/UDP Source Port */ + MLXSW_REG_RECR2_TCP_UDP_SPORT = 74, + /* TCP/UDP Destination Port */ + MLXSW_REG_RECR2_TCP_UDP_DPORT = 75, +}; + +/* reg_recr2_outer_header_fields_enable + * Packet fields to enable for ECMP hash subject to outer_header_enable. + * Access: RW + */ +MLXSW_ITEM_BIT_ARRAY(reg, recr2, outer_header_fields_enable, 0x14, 0x14, 1); + +static inline void mlxsw_reg_recr2_ipv4_sip_enable(char *payload) +{ + int i; + + for (i = MLXSW_REG_RECR2_IPV4_SIP0; i <= MLXSW_REG_RECR2_IPV4_SIP3; i++) + mlxsw_reg_recr2_outer_header_fields_enable_set(payload, i, + true); +} + +static inline void mlxsw_reg_recr2_ipv4_dip_enable(char *payload) +{ + int i; + + for (i = MLXSW_REG_RECR2_IPV4_DIP0; i <= MLXSW_REG_RECR2_IPV4_DIP3; i++) + mlxsw_reg_recr2_outer_header_fields_enable_set(payload, i, + true); +} + +static inline void mlxsw_reg_recr2_ipv6_sip_enable(char *payload) +{ + int i = MLXSW_REG_RECR2_IPV6_SIP0_7; + + mlxsw_reg_recr2_outer_header_fields_enable_set(payload, i, true); + + i = MLXSW_REG_RECR2_IPV6_SIP8; + for (; i <= MLXSW_REG_RECR2_IPV6_SIP15; i++) + mlxsw_reg_recr2_outer_header_fields_enable_set(payload, i, + true); +} + +static inline void mlxsw_reg_recr2_ipv6_dip_enable(char *payload) +{ + int i = MLXSW_REG_RECR2_IPV6_DIP0_7; + + mlxsw_reg_recr2_outer_header_fields_enable_set(payload, i, true); + + i = MLXSW_REG_RECR2_IPV6_DIP8; + for (; i <= MLXSW_REG_RECR2_IPV6_DIP15; i++) + mlxsw_reg_recr2_outer_header_fields_enable_set(payload, i, + true); +} + +static inline void mlxsw_reg_recr2_pack(char *payload, u32 seed) +{ + MLXSW_REG_ZERO(recr2, payload); + mlxsw_reg_recr2_pp_set(payload, false); + mlxsw_reg_recr2_sh_set(payload, true); + mlxsw_reg_recr2_seed_set(payload, seed); +} + /* RMFT-V2 - Router Multicast Forwarding Table Version 2 Register * -------------------------------------------------------------- * The RMFT_V2 register is used to configure and query the multicast table. @@ -7313,6 +7444,7 @@ static const struct mlxsw_reg_info *mlxsw_reg_infos[] = { MLXSW_REG(raleu), MLXSW_REG(rauhtd), MLXSW_REG(rigr2), + MLXSW_REG(recr2), MLXSW_REG(rmft2), MLXSW_REG(mfcr), MLXSW_REG(mfsc), -- cgit v1.2.3 From af658b6a0e6da7a9d9b82fa536d610a7457f37fd Mon Sep 17 00:00:00 2001 From: Ido Schimmel Date: Thu, 2 Nov 2017 17:14:09 +0100 Subject: mlxsw: spectrum_router: Align multipath hash parameters with kernel's Up until now we used the hardware's defaults for multipath hash computation. This patch aligns the hardware's multipath parameters with the kernel's. For IPv4 packets, the parameters are determined according to the 'fib_multipath_hash_policy' sysctl during module initialization. In case L3-mode is requested, only the source and destination IP addresses are used. There is no special handling of ICMP error packets. In case L4-mode is requested, a 5-tuple is used: source and destination IP addresses, source and destination ports and IP protocol. Note that the layer 4 fields are not considered for fragmented packets. For IPv6 packets, the source and destination IP addresses are used, as well as the flow label and the next header fields. Signed-off-by: Ido Schimmel Reviewed-by: Petr Machata Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- .../net/ethernet/mellanox/mlxsw/spectrum_router.c | 64 ++++++++++++++++++++++ 1 file changed, 64 insertions(+) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c index d5094b81adbf..fe99d245dd91 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c @@ -47,6 +47,7 @@ #include #include #include +#include #include #include #include @@ -6644,6 +6645,64 @@ static void mlxsw_sp_router_fib_dump_flush(struct notifier_block *nb) mlxsw_sp_router_fib_flush(router->mlxsw_sp); } +#ifdef CONFIG_IP_ROUTE_MULTIPATH +static void mlxsw_sp_mp_hash_header_set(char *recr2_pl, int header) +{ + mlxsw_reg_recr2_outer_header_enables_set(recr2_pl, header, true); +} + +static void mlxsw_sp_mp_hash_field_set(char *recr2_pl, int field) +{ + mlxsw_reg_recr2_outer_header_fields_enable_set(recr2_pl, field, true); +} + +static void mlxsw_sp_mp4_hash_init(char *recr2_pl) +{ + bool only_l3 = !init_net.ipv4.sysctl_fib_multipath_hash_policy; + + mlxsw_sp_mp_hash_header_set(recr2_pl, + MLXSW_REG_RECR2_IPV4_EN_NOT_TCP_NOT_UDP); + mlxsw_sp_mp_hash_header_set(recr2_pl, MLXSW_REG_RECR2_IPV4_EN_TCP_UDP); + mlxsw_reg_recr2_ipv4_sip_enable(recr2_pl); + mlxsw_reg_recr2_ipv4_dip_enable(recr2_pl); + if (only_l3) + return; + mlxsw_sp_mp_hash_header_set(recr2_pl, MLXSW_REG_RECR2_TCP_UDP_EN_IPV4); + mlxsw_sp_mp_hash_field_set(recr2_pl, MLXSW_REG_RECR2_IPV4_PROTOCOL); + mlxsw_sp_mp_hash_field_set(recr2_pl, MLXSW_REG_RECR2_TCP_UDP_SPORT); + mlxsw_sp_mp_hash_field_set(recr2_pl, MLXSW_REG_RECR2_TCP_UDP_DPORT); +} + +static void mlxsw_sp_mp6_hash_init(char *recr2_pl) +{ + mlxsw_sp_mp_hash_header_set(recr2_pl, + MLXSW_REG_RECR2_IPV6_EN_NOT_TCP_NOT_UDP); + mlxsw_sp_mp_hash_header_set(recr2_pl, MLXSW_REG_RECR2_IPV6_EN_TCP_UDP); + mlxsw_reg_recr2_ipv6_sip_enable(recr2_pl); + mlxsw_reg_recr2_ipv6_dip_enable(recr2_pl); + mlxsw_sp_mp_hash_field_set(recr2_pl, MLXSW_REG_RECR2_IPV6_FLOW_LABEL); + mlxsw_sp_mp_hash_field_set(recr2_pl, MLXSW_REG_RECR2_IPV6_NEXT_HEADER); +} + +static int mlxsw_sp_mp_hash_init(struct mlxsw_sp *mlxsw_sp) +{ + char recr2_pl[MLXSW_REG_RECR2_LEN]; + u32 seed; + + get_random_bytes(&seed, sizeof(seed)); + mlxsw_reg_recr2_pack(recr2_pl, seed); + mlxsw_sp_mp4_hash_init(recr2_pl); + mlxsw_sp_mp6_hash_init(recr2_pl); + + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(recr2), recr2_pl); +} +#else +static int mlxsw_sp_mp_hash_init(struct mlxsw_sp *mlxsw_sp) +{ + return 0; +} +#endif + static int __mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp) { char rgcr_pl[MLXSW_REG_RGCR_LEN]; @@ -6727,6 +6786,10 @@ int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp) if (err) goto err_register_netevent_notifier; + err = mlxsw_sp_mp_hash_init(mlxsw_sp); + if (err) + goto err_mp_hash_init; + mlxsw_sp->router->fib_nb.notifier_call = mlxsw_sp_router_fib_event; err = register_fib_notifier(&mlxsw_sp->router->fib_nb, mlxsw_sp_router_fib_dump_flush); @@ -6736,6 +6799,7 @@ int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp) return 0; err_register_fib_notifier: +err_mp_hash_init: unregister_netevent_notifier(&mlxsw_sp->router->netevent_nb); err_register_netevent_notifier: mlxsw_sp_neigh_fini(mlxsw_sp); -- cgit v1.2.3 From 28678f07f127d151354ff12b0d05557ae897e972 Mon Sep 17 00:00:00 2001 From: Ido Schimmel Date: Thu, 2 Nov 2017 17:14:10 +0100 Subject: mlxsw: spectrum_router: Update multipath hash parameters upon netevents Make sure the device and the kernel are performing the multipath hash according to the same parameters by updating the device whenever the relevant netevent is generated. Signed-off-by: Ido Schimmel Reviewed-by: Petr Machata Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- .../net/ethernet/mellanox/mlxsw/spectrum_router.c | 31 +++++++++++++++++++++- 1 file changed, 30 insertions(+), 1 deletion(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c index fe99d245dd91..d657f01f2d79 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c @@ -2078,15 +2078,29 @@ out: kfree(net_work); } -static int mlxsw_sp_router_netevent_event(struct notifier_block *unused, +static int mlxsw_sp_mp_hash_init(struct mlxsw_sp *mlxsw_sp); + +static void mlxsw_sp_router_mp_hash_event_work(struct work_struct *work) +{ + struct mlxsw_sp_netevent_work *net_work = + container_of(work, struct mlxsw_sp_netevent_work, work); + struct mlxsw_sp *mlxsw_sp = net_work->mlxsw_sp; + + mlxsw_sp_mp_hash_init(mlxsw_sp); + kfree(net_work); +} + +static int mlxsw_sp_router_netevent_event(struct notifier_block *nb, unsigned long event, void *ptr) { struct mlxsw_sp_netevent_work *net_work; struct mlxsw_sp_port *mlxsw_sp_port; + struct mlxsw_sp_router *router; struct mlxsw_sp *mlxsw_sp; unsigned long interval; struct neigh_parms *p; struct neighbour *n; + struct net *net; switch (event) { case NETEVENT_DELAY_PROBE_TIME_UPDATE: @@ -2138,6 +2152,21 @@ static int mlxsw_sp_router_netevent_event(struct notifier_block *unused, mlxsw_core_schedule_work(&net_work->work); mlxsw_sp_port_dev_put(mlxsw_sp_port); break; + case NETEVENT_MULTIPATH_HASH_UPDATE: + net = ptr; + + if (!net_eq(net, &init_net)) + return NOTIFY_DONE; + + net_work = kzalloc(sizeof(*net_work), GFP_ATOMIC); + if (!net_work) + return NOTIFY_BAD; + + router = container_of(nb, struct mlxsw_sp_router, netevent_nb); + INIT_WORK(&net_work->work, mlxsw_sp_router_mp_hash_event_work); + net_work->mlxsw_sp = router->mlxsw_sp; + mlxsw_core_schedule_work(&net_work->work); + break; } return NOTIFY_DONE; -- cgit v1.2.3 From 36bf994a80571aeee2549db1bc93e34342f40c24 Mon Sep 17 00:00:00 2001 From: Ganesh Goudar Date: Thu, 2 Nov 2017 19:26:58 +0530 Subject: cxgb4: add new T6 pci device id's Add 0x6086 T6 device id. Signed-off-by: Ganesh Goudar Signed-off-by: David S. Miller --- drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h b/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h index 8c22bb8c9fbf..60cf9e02de5d 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h @@ -205,6 +205,7 @@ CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN CH_PCI_ID_TABLE_FENTRY(0x6083), /* Custom T62100-CR QSFP28 */ CH_PCI_ID_TABLE_FENTRY(0x6084), /* Custom T64100-CR QSFP28 */ CH_PCI_ID_TABLE_FENTRY(0x6085), /* Custom T6240-SO */ + CH_PCI_ID_TABLE_FENTRY(0x6086), /* Custom T6225-SO-CR */ CH_PCI_DEVICE_ID_TABLE_DEFINE_END; #endif /* __T4_PCI_ID_TBL_H__ */ -- cgit v1.2.3 From 9a7b96b3462679a2fcf7205d396dbf1f8f28454c Mon Sep 17 00:00:00 2001 From: Ganesh Goudar Date: Thu, 2 Nov 2017 19:28:20 +0530 Subject: cxgb4vf: define get_fecparam ethtool callback Add support to new ethtool operation get_fecparam to fetch FEC parameters. Original Work by: Casey Leedom Signed-off-by: Ganesh Goudar Signed-off-by: David S. Miller --- .../net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c | 58 ++++++++++++++++++++++ 1 file changed, 58 insertions(+) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c index 8996ebbd222e..b48361cfdc78 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c @@ -1401,6 +1401,63 @@ static int cxgb4vf_get_link_ksettings(struct net_device *dev, return 0; } +/* Translate the Firmware FEC value into the ethtool value. */ +static inline unsigned int fwcap_to_eth_fec(unsigned int fw_fec) +{ + unsigned int eth_fec = 0; + + if (fw_fec & FW_PORT_CAP32_FEC_RS) + eth_fec |= ETHTOOL_FEC_RS; + if (fw_fec & FW_PORT_CAP32_FEC_BASER_RS) + eth_fec |= ETHTOOL_FEC_BASER; + + /* if nothing is set, then FEC is off */ + if (!eth_fec) + eth_fec = ETHTOOL_FEC_OFF; + + return eth_fec; +} + +/* Translate Common Code FEC value into ethtool value. */ +static inline unsigned int cc_to_eth_fec(unsigned int cc_fec) +{ + unsigned int eth_fec = 0; + + if (cc_fec & FEC_AUTO) + eth_fec |= ETHTOOL_FEC_AUTO; + if (cc_fec & FEC_RS) + eth_fec |= ETHTOOL_FEC_RS; + if (cc_fec & FEC_BASER_RS) + eth_fec |= ETHTOOL_FEC_BASER; + + /* if nothing is set, then FEC is off */ + if (!eth_fec) + eth_fec = ETHTOOL_FEC_OFF; + + return eth_fec; +} + +static int cxgb4vf_get_fecparam(struct net_device *dev, + struct ethtool_fecparam *fec) +{ + const struct port_info *pi = netdev_priv(dev); + const struct link_config *lc = &pi->link_cfg; + + /* Translate the Firmware FEC Support into the ethtool value. We + * always support IEEE 802.3 "automatic" selection of Link FEC type if + * any FEC is supported. + */ + fec->fec = fwcap_to_eth_fec(lc->pcaps); + if (fec->fec != ETHTOOL_FEC_OFF) + fec->fec |= ETHTOOL_FEC_AUTO; + + /* Translate the current internal FEC parameters into the + * ethtool values. + */ + fec->active_fec = cc_to_eth_fec(lc->fec); + return 0; +} + /* * Return our driver information. */ @@ -1774,6 +1831,7 @@ static void cxgb4vf_get_wol(struct net_device *dev, static const struct ethtool_ops cxgb4vf_ethtool_ops = { .get_link_ksettings = cxgb4vf_get_link_ksettings, + .get_fecparam = cxgb4vf_get_fecparam, .get_drvinfo = cxgb4vf_get_drvinfo, .get_msglevel = cxgb4vf_get_msglevel, .set_msglevel = cxgb4vf_set_msglevel, -- cgit v1.2.3 From de4a10ef6eff0eb0ced97a39dc3edd0d3101b6ed Mon Sep 17 00:00:00 2001 From: Andy Gospodarek Date: Fri, 3 Nov 2017 03:32:38 -0400 Subject: bnxt_en: fix typo in bnxt_set_coalesce Recent refactoring of coalesce settings contained a typo that prevents receive settings from being set properly. Fixes: 18775aa8a91f ("bnxt_en: Reorganize the coalescing parameters.") Signed-off-by: Andy Gospodarek Signed-off-by: Michael Chan Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c index 5cd1a501c62b..7ce1d4b7e67d 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c @@ -84,7 +84,7 @@ static int bnxt_set_coalesce(struct net_device *dev, hw_coal->coal_ticks_irq = coal->rx_coalesce_usecs_irq; hw_coal->coal_bufs_irq = coal->rx_max_coalesced_frames_irq * mult; - hw_coal = &bp->rx_coal; + hw_coal = &bp->tx_coal; mult = hw_coal->bufs_per_record; hw_coal->coal_ticks = coal->tx_coalesce_usecs; hw_coal->coal_bufs = coal->tx_max_coalesced_frames * mult; -- cgit v1.2.3 From b153cbc507946f52d5aa687fd64f45d82cb36a3b Mon Sep 17 00:00:00 2001 From: Michael Chan Date: Fri, 3 Nov 2017 03:32:39 -0400 Subject: bnxt_en: Fix IRQ coalescing regression. Recent IRQ coalescing clean up has removed a guard-rail for the max DMA buffer coalescing value. This is a 6-bit value and must not be 0. We already have a check for 0 but 64 is equivalent to 0 and will cause non-stop interrupts. Fix it by adding the proper check. Fixes: f8503969d27b ("bnxt_en: Refactor and simplify coalescing code.") Reported-by: Andy Gospodarek Signed-off-by: Michael Chan Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnxt/bnxt.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index c3dfaa5151aa..4e3d569bf32e 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -4548,9 +4548,13 @@ static void bnxt_hwrm_set_coal_params(struct bnxt_coal *hw_coal, val = clamp_t(u16, hw_coal->coal_bufs, 1, max); req->num_cmpl_aggr_int = cpu_to_le16(val); + + /* This is a 6-bit value and must not be 0, or we'll get non stop IRQ */ + val = min_t(u16, val, 63); req->num_cmpl_dma_aggr = cpu_to_le16(val); - val = clamp_t(u16, hw_coal->coal_bufs_irq, 1, max); + /* This is a 6-bit value and must not be 0, or we'll get non stop IRQ */ + val = clamp_t(u16, hw_coal->coal_bufs_irq, 1, 63); req->num_cmpl_dma_aggr_during_int = cpu_to_le16(val); tmr = BNXT_USEC_TO_COAL_TIMER(hw_coal->coal_ticks); -- cgit v1.2.3 From 16b5e50147c21edef7133f204c43465f0c03c3f5 Mon Sep 17 00:00:00 2001 From: Fuyun Liang Date: Fri, 3 Nov 2017 12:18:25 +0800 Subject: net: hns3: fix for getting autoneg in hns3_get_link_ksettings This patch fixes a bug for ethtool's get_link_ksettings(). When phy exists, we should get autoneg from phy rather than from mac. Because the value of mac.autoneg is invalid when phy exists. Fixes: 496d03e (net: hns3: Add Ethtool support to HNS3 driver) Signed-off-by: Fuyun Liang Signed-off-by: Lipeng Signed-off-by: David S. Miller --- .../ethernet/hisilicon/hns3/hns3pf/hns3_ethtool.c | 30 +++++++++++----------- 1 file changed, 15 insertions(+), 15 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_ethtool.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_ethtool.c index 5cd163bdbf14..367b20cef294 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_ethtool.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_ethtool.c @@ -9,6 +9,7 @@ #include #include +#include #include "hns3_enet.h" @@ -571,26 +572,25 @@ static int hns3_get_link_ksettings(struct net_device *netdev, u32 advertised_caps; u8 media_type = HNAE3_MEDIA_TYPE_UNKNOWN; u8 link_stat; - u8 auto_neg; - u8 duplex; - u32 speed; if (!h->ae_algo || !h->ae_algo->ops) return -EOPNOTSUPP; /* 1.auto_neg & speed & duplex from cmd */ - if (h->ae_algo->ops->get_ksettings_an_result) { - h->ae_algo->ops->get_ksettings_an_result(h, &auto_neg, - &speed, &duplex); - cmd->base.autoneg = auto_neg; - cmd->base.speed = speed; - cmd->base.duplex = duplex; - - link_stat = hns3_get_link(netdev); - if (!link_stat) { - cmd->base.speed = (u32)SPEED_UNKNOWN; - cmd->base.duplex = DUPLEX_UNKNOWN; - } + if (netdev->phydev) + phy_ethtool_ksettings_get(netdev->phydev, cmd); + else if (h->ae_algo->ops->get_ksettings_an_result) + h->ae_algo->ops->get_ksettings_an_result(h, + &cmd->base.autoneg, + &cmd->base.speed, + &cmd->base.duplex); + else + return -EOPNOTSUPP; + + link_stat = hns3_get_link(netdev); + if (!link_stat) { + cmd->base.speed = SPEED_UNKNOWN; + cmd->base.duplex = DUPLEX_UNKNOWN; } /* 2.media_type get from bios parameter block */ -- cgit v1.2.3 From 2b39cabb2a283cea0c3d96d9370575371726164f Mon Sep 17 00:00:00 2001 From: Fuyun Liang Date: Fri, 3 Nov 2017 12:18:26 +0800 Subject: net: hns3: fix for getting advertised_caps in hns3_get_link_ksettings This patch fixes a bug for ethtool's get_link_ksettings(). The advertising for autoneg is always added to advertised_caps whether autoneg is enable or disable. This patch fixes it. Fixes: 496d03e (net: hns3: Add Ethtool support to HNS3 driver) Signed-off-by: Fuyun Liang Signed-off-by: Lipeng Signed-off-by: David S. Miller --- drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_ethtool.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_ethtool.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_ethtool.c index 367b20cef294..0e10a43e29b3 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_ethtool.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_ethtool.c @@ -640,6 +640,9 @@ static int hns3_get_link_ksettings(struct net_device *netdev, break; } + if (!cmd->base.autoneg) + advertised_caps &= ~HNS3_LM_AUTONEG_BIT; + /* now, map driver link modes to ethtool link modes */ hns3_driv_to_eth_caps(supported_caps, cmd, false); hns3_driv_to_eth_caps(advertised_caps, cmd, true); -- cgit v1.2.3 From 3e1a8f10a1375133ea4a943f21138f00b4d06dc2 Mon Sep 17 00:00:00 2001 From: Fuyun Liang Date: Fri, 3 Nov 2017 12:18:27 +0800 Subject: net: hns3: fix a bug in hns3_driv_to_eth_caps The value of link_modes.advertising and the value of link_modes.supported is initialized to zero every time in for loop in hns3_driv_to_eth_caps(). But we just want to set specified bit for them. Initialization is unnecessary. This patch fixes it. Fixes: 496d03e (net: hns3: Add Ethtool support to HNS3 driver) Signed-off-by: Fuyun Liang Signed-off-by: Lipeng Signed-off-by: David S. Miller --- drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_ethtool.c | 9 ++------- 1 file changed, 2 insertions(+), 7 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_ethtool.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_ethtool.c index 0e10a43e29b3..c7b8ebd14f33 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_ethtool.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_ethtool.c @@ -359,17 +359,12 @@ static void hns3_driv_to_eth_caps(u32 caps, struct ethtool_link_ksettings *cmd, if (!(caps & hns3_lm_map[i].hns3_link_mode)) continue; - if (is_advertised) { - ethtool_link_ksettings_zero_link_mode(cmd, - advertising); + if (is_advertised) __set_bit(hns3_lm_map[i].ethtool_link_mode, cmd->link_modes.advertising); - } else { - ethtool_link_ksettings_zero_link_mode(cmd, - supported); + else __set_bit(hns3_lm_map[i].ethtool_link_mode, cmd->link_modes.supported); - } } } -- cgit v1.2.3 From 80cb5f3d97aa22ce3aac2737da03d4679722c60f Mon Sep 17 00:00:00 2001 From: Fuyun Liang Date: Fri, 3 Nov 2017 12:18:28 +0800 Subject: net: hns3: add support for set_link_ksettings This patch adds set_link_ksettings support for ethtool cmd. Signed-off-by: Fuyun Liang Signed-off-by: Lipeng Signed-off-by: David S. Miller --- drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_ethtool.c | 11 +++++++++++ 1 file changed, 11 insertions(+) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_ethtool.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_ethtool.c index c7b8ebd14f33..7fe193b1ccaf 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_ethtool.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_ethtool.c @@ -653,6 +653,16 @@ static int hns3_get_link_ksettings(struct net_device *netdev, return 0; } +static int hns3_set_link_ksettings(struct net_device *netdev, + const struct ethtool_link_ksettings *cmd) +{ + /* Only support ksettings_set for netdev with phy attached for now */ + if (netdev->phydev) + return phy_ethtool_ksettings_set(netdev->phydev, cmd); + + return -EOPNOTSUPP; +} + static u32 hns3_get_rss_key_size(struct net_device *netdev) { struct hnae3_handle *h = hns3_get_handle(netdev); @@ -839,6 +849,7 @@ static const struct ethtool_ops hns3_ethtool_ops = { .get_rxfh = hns3_get_rss, .set_rxfh = hns3_set_rss, .get_link_ksettings = hns3_get_link_ksettings, + .set_link_ksettings = hns3_set_link_ksettings, }; void hns3_ethtool_set_ops(struct net_device *netdev) -- cgit v1.2.3 From d63671d27cd1cc1f93f5fcb86eaeee57c8190d46 Mon Sep 17 00:00:00 2001 From: Fuyun Liang Date: Fri, 3 Nov 2017 12:18:29 +0800 Subject: net: hns3: add support for nway_reset This patch adds nway_reset support for ethtool cmd. Signed-off-by: Fuyun Liang Signed-off-by: Lipeng Signed-off-by: David S. Miller --- .../net/ethernet/hisilicon/hns3/hns3pf/hns3_ethtool.c | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_ethtool.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_ethtool.c index 7fe193b1ccaf..a21470c72da3 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_ethtool.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_ethtool.c @@ -832,6 +832,23 @@ static int hns3_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd) } } +static int hns3_nway_reset(struct net_device *netdev) +{ + struct phy_device *phy = netdev->phydev; + + if (!netif_running(netdev)) + return 0; + + /* Only support nway_reset for netdev with phy attached for now */ + if (!phy) + return -EOPNOTSUPP; + + if (phy->autoneg != AUTONEG_ENABLE) + return -EINVAL; + + return genphy_restart_aneg(phy); +} + static const struct ethtool_ops hns3_ethtool_ops = { .self_test = hns3_self_test, .get_drvinfo = hns3_get_drvinfo, @@ -850,6 +867,7 @@ static const struct ethtool_ops hns3_ethtool_ops = { .set_rxfh = hns3_set_rss, .get_link_ksettings = hns3_get_link_ksettings, .set_link_ksettings = hns3_set_link_ksettings, + .nway_reset = hns3_nway_reset, }; void hns3_ethtool_set_ops(struct net_device *netdev) -- cgit v1.2.3 From 439adf885e6dd3b2a64941a167b4c18d3728c6dc Mon Sep 17 00:00:00 2001 From: Fuyun Liang Date: Fri, 3 Nov 2017 12:18:30 +0800 Subject: net: hns3: fix a bug for phy supported feature initialization This patch fixes a bug for phy supported feature initialization. Currently, the value of phydev->supported is initialized by kernel. So it includes many features that we do not support, such as SUPPORTED_FIBRE and SUPPORTED_BNC. This patch fixes it. Fixes: 256727d (net: hns3: Add MDIO support to HNS3 Ethernet driver for hip08 SoC) Signed-off-by: Fuyun Liang Signed-off-by: Lipeng Signed-off-by: David S. Miller --- drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c | 10 ++++++++++ 1 file changed, 10 insertions(+) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c index f32d719c4f77..7069e9408d7d 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c @@ -14,6 +14,13 @@ #include "hclge_main.h" #include "hclge_mdio.h" +#define HCLGE_PHY_SUPPORTED_FEATURES (SUPPORTED_Autoneg | \ + SUPPORTED_TP | \ + SUPPORTED_Pause | \ + PHY_10BT_FEATURES | \ + PHY_100BT_FEATURES | \ + PHY_1000BT_FEATURES) + enum hclge_mdio_c22_op_seq { HCLGE_MDIO_C22_WRITE = 1, HCLGE_MDIO_C22_READ = 2 @@ -195,6 +202,9 @@ int hclge_mac_start_phy(struct hclge_dev *hdev) return ret; } + phydev->supported &= HCLGE_PHY_SUPPORTED_FEATURES; + phydev->advertising = phydev->supported; + phy_start(phydev); return 0; -- cgit v1.2.3 From 796ec7769d452fd75a3afca3d768f25c120b6c50 Mon Sep 17 00:00:00 2001 From: Petr Machata Date: Fri, 3 Nov 2017 10:03:29 +0100 Subject: mlxsw: spectrum: Rename IPIP-related netdevice handlers To distinguish between events related to tunnel device itself and its bound device, rename a number of functions related to handling tunneling netdevice events to include _ol_ (for "overlay") in the name. That leaves room in the namespace for underlay-related functions, which would have _ul_ in the name. Signed-off-by: Petr Machata Reviewed-by: Ido Schimmel Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/spectrum.c | 5 ++- drivers/net/ethernet/mellanox/mlxsw/spectrum.h | 12 +++--- .../net/ethernet/mellanox/mlxsw/spectrum_router.c | 45 +++++++++++----------- 3 files changed, 32 insertions(+), 30 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c index 52f38b480669..55bb3669bbcc 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c @@ -4542,8 +4542,9 @@ static int mlxsw_sp_netdevice_event(struct notifier_block *nb, int err = 0; mlxsw_sp = container_of(nb, struct mlxsw_sp, netdevice_nb); - if (mlxsw_sp_netdev_is_ipip(mlxsw_sp, dev)) - err = mlxsw_sp_netdevice_ipip_event(mlxsw_sp, dev, event, ptr); + if (mlxsw_sp_netdev_is_ipip_ol(mlxsw_sp, dev)) + err = mlxsw_sp_netdevice_ipip_ol_event(mlxsw_sp, dev, + event, ptr); else if (event == NETDEV_CHANGEADDR || event == NETDEV_CHANGEMTU) err = mlxsw_sp_netdevice_router_port_event(dev); else if (mlxsw_sp_is_vrf_event(event, ptr)) diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h index b2393bb8cef9..f01b5cb04963 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h @@ -396,13 +396,13 @@ int mlxsw_sp_inet6addr_valid_event(struct notifier_block *unused, unsigned long event, void *ptr); int mlxsw_sp_netdevice_vrf_event(struct net_device *l3_dev, unsigned long event, struct netdev_notifier_changeupper_info *info); -bool mlxsw_sp_netdev_is_ipip(const struct mlxsw_sp *mlxsw_sp, - const struct net_device *dev); +bool mlxsw_sp_netdev_is_ipip_ol(const struct mlxsw_sp *mlxsw_sp, + const struct net_device *dev); int -mlxsw_sp_netdevice_ipip_event(struct mlxsw_sp *mlxsw_sp, - struct net_device *l3_dev, - unsigned long event, - struct netdev_notifier_changeupper_info *info); +mlxsw_sp_netdevice_ipip_ol_event(struct mlxsw_sp *mlxsw_sp, + struct net_device *l3_dev, + unsigned long event, + struct netdev_notifier_changeupper_info *info); void mlxsw_sp_port_vlan_router_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan); void mlxsw_sp_rif_destroy(struct mlxsw_sp_rif *rif); diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c index d657f01f2d79..96729331eb21 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c @@ -1306,8 +1306,8 @@ static bool mlxsw_sp_netdev_ipip_type(const struct mlxsw_sp *mlxsw_sp, return false; } -bool mlxsw_sp_netdev_is_ipip(const struct mlxsw_sp *mlxsw_sp, - const struct net_device *dev) +bool mlxsw_sp_netdev_is_ipip_ol(const struct mlxsw_sp *mlxsw_sp, + const struct net_device *dev) { return mlxsw_sp_netdev_ipip_type(mlxsw_sp, dev, NULL); } @@ -1326,8 +1326,8 @@ mlxsw_sp_ipip_entry_find_by_ol_dev(struct mlxsw_sp *mlxsw_sp, return NULL; } -static int mlxsw_sp_netdevice_ipip_reg_event(struct mlxsw_sp *mlxsw_sp, - struct net_device *ol_dev) +static int mlxsw_sp_netdevice_ipip_ol_reg_event(struct mlxsw_sp *mlxsw_sp, + struct net_device *ol_dev) { struct mlxsw_sp_router *router = mlxsw_sp->router; struct mlxsw_sp_ipip_entry *ipip_entry; @@ -1347,8 +1347,8 @@ static int mlxsw_sp_netdevice_ipip_reg_event(struct mlxsw_sp *mlxsw_sp, return 0; } -static void mlxsw_sp_netdevice_ipip_unreg_event(struct mlxsw_sp *mlxsw_sp, - struct net_device *ol_dev) +static void mlxsw_sp_netdevice_ipip_ol_unreg_event(struct mlxsw_sp *mlxsw_sp, + struct net_device *ol_dev) { struct mlxsw_sp_ipip_entry *ipip_entry; @@ -1357,8 +1357,8 @@ static void mlxsw_sp_netdevice_ipip_unreg_event(struct mlxsw_sp *mlxsw_sp, mlxsw_sp_ipip_entry_destroy(mlxsw_sp, ipip_entry); } -static int mlxsw_sp_netdevice_ipip_up_event(struct mlxsw_sp *mlxsw_sp, - struct net_device *ol_dev) +static int mlxsw_sp_netdevice_ipip_ol_up_event(struct mlxsw_sp *mlxsw_sp, + struct net_device *ol_dev) { struct mlxsw_sp_fib_entry *decap_fib_entry; struct mlxsw_sp_ipip_entry *ipip_entry; @@ -1375,8 +1375,8 @@ static int mlxsw_sp_netdevice_ipip_up_event(struct mlxsw_sp *mlxsw_sp, return 0; } -static void mlxsw_sp_netdevice_ipip_down_event(struct mlxsw_sp *mlxsw_sp, - struct net_device *ol_dev) +static void mlxsw_sp_netdevice_ipip_ol_down_event(struct mlxsw_sp *mlxsw_sp, + struct net_device *ol_dev) { struct mlxsw_sp_ipip_entry *ipip_entry; @@ -1385,8 +1385,8 @@ static void mlxsw_sp_netdevice_ipip_down_event(struct mlxsw_sp *mlxsw_sp, mlxsw_sp_ipip_entry_demote_decap(mlxsw_sp, ipip_entry); } -static int mlxsw_sp_netdevice_ipip_vrf_event(struct mlxsw_sp *mlxsw_sp, - struct net_device *ol_dev) +static int mlxsw_sp_netdevice_ipip_ol_vrf_event(struct mlxsw_sp *mlxsw_sp, + struct net_device *ol_dev) { struct mlxsw_sp_fib_entry *decap_fib_entry; struct mlxsw_sp_ipip_entry *ipip_entry; @@ -1424,26 +1424,27 @@ static int mlxsw_sp_netdevice_ipip_vrf_event(struct mlxsw_sp *mlxsw_sp, return 0; } -int mlxsw_sp_netdevice_ipip_event(struct mlxsw_sp *mlxsw_sp, - struct net_device *ol_dev, - unsigned long event, - struct netdev_notifier_changeupper_info *info) +int +mlxsw_sp_netdevice_ipip_ol_event(struct mlxsw_sp *mlxsw_sp, + struct net_device *ol_dev, + unsigned long event, + struct netdev_notifier_changeupper_info *info) { switch (event) { case NETDEV_REGISTER: - return mlxsw_sp_netdevice_ipip_reg_event(mlxsw_sp, ol_dev); + return mlxsw_sp_netdevice_ipip_ol_reg_event(mlxsw_sp, ol_dev); case NETDEV_UNREGISTER: - mlxsw_sp_netdevice_ipip_unreg_event(mlxsw_sp, ol_dev); + mlxsw_sp_netdevice_ipip_ol_unreg_event(mlxsw_sp, ol_dev); return 0; case NETDEV_UP: - return mlxsw_sp_netdevice_ipip_up_event(mlxsw_sp, ol_dev); + return mlxsw_sp_netdevice_ipip_ol_up_event(mlxsw_sp, ol_dev); case NETDEV_DOWN: - mlxsw_sp_netdevice_ipip_down_event(mlxsw_sp, ol_dev); + mlxsw_sp_netdevice_ipip_ol_down_event(mlxsw_sp, ol_dev); return 0; case NETDEV_CHANGEUPPER: if (netif_is_l3_master(info->upper_dev)) - return mlxsw_sp_netdevice_ipip_vrf_event(mlxsw_sp, - ol_dev); + return mlxsw_sp_netdevice_ipip_ol_vrf_event(mlxsw_sp, + ol_dev); return 0; } return 0; -- cgit v1.2.3 From cafdb2a0d4216c694971a06edf26029a08026ba4 Mon Sep 17 00:00:00 2001 From: Petr Machata Date: Fri, 3 Nov 2017 10:03:30 +0100 Subject: mlxsw: spectrum_router: Extract mlxsw_sp_netdevice_ipip_can_offload() Some of the code down the road needs this logic as well. Signed-off-by: Petr Machata Reviewed-by: Ido Schimmel Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- .../net/ethernet/mellanox/mlxsw/spectrum_router.c | 20 +++++++++++++++----- 1 file changed, 15 insertions(+), 5 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c index 96729331eb21..97f062a4ca64 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c @@ -1326,18 +1326,28 @@ mlxsw_sp_ipip_entry_find_by_ol_dev(struct mlxsw_sp *mlxsw_sp, return NULL; } +static bool mlxsw_sp_netdevice_ipip_can_offload(struct mlxsw_sp *mlxsw_sp, + const struct net_device *ol_dev, + enum mlxsw_sp_ipip_type ipipt) +{ + const struct mlxsw_sp_ipip_ops *ops + = mlxsw_sp->router->ipip_ops_arr[ipipt]; + + /* For deciding whether decap should be offloaded, we don't care about + * overlay protocol, so ask whether either one is supported. + */ + return ops->can_offload(mlxsw_sp, ol_dev, MLXSW_SP_L3_PROTO_IPV4) || + ops->can_offload(mlxsw_sp, ol_dev, MLXSW_SP_L3_PROTO_IPV6); +} + static int mlxsw_sp_netdevice_ipip_ol_reg_event(struct mlxsw_sp *mlxsw_sp, struct net_device *ol_dev) { - struct mlxsw_sp_router *router = mlxsw_sp->router; struct mlxsw_sp_ipip_entry *ipip_entry; enum mlxsw_sp_ipip_type ipipt; mlxsw_sp_netdev_ipip_type(mlxsw_sp, ol_dev, &ipipt); - if (router->ipip_ops_arr[ipipt]->can_offload(mlxsw_sp, ol_dev, - MLXSW_SP_L3_PROTO_IPV4) || - router->ipip_ops_arr[ipipt]->can_offload(mlxsw_sp, ol_dev, - MLXSW_SP_L3_PROTO_IPV6)) { + if (mlxsw_sp_netdevice_ipip_can_offload(mlxsw_sp, ol_dev, ipipt)) { ipip_entry = mlxsw_sp_ipip_entry_create(mlxsw_sp, ipipt, ol_dev); if (IS_ERR(ipip_entry)) -- cgit v1.2.3 From 474f0ff618ae4305637e972746b42fabe2245b99 Mon Sep 17 00:00:00 2001 From: Petr Machata Date: Fri, 3 Nov 2017 10:03:31 +0100 Subject: mlxsw: spectrum: Move mlxsw_sp_ipip_netdev_{s, d}addr{, 4}() These functions ideologically belong to the IPIP module, and some follow-up work will benefit from their presence there. Signed-off-by: Petr Machata Reviewed-by: Ido Schimmel Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- .../net/ethernet/mellanox/mlxsw/spectrum_ipip.c | 53 ++++++++++++++++++++++ .../net/ethernet/mellanox/mlxsw/spectrum_ipip.h | 4 ++ .../net/ethernet/mellanox/mlxsw/spectrum_router.c | 53 ---------------------- .../net/ethernet/mellanox/mlxsw/spectrum_router.h | 7 --- 4 files changed, 57 insertions(+), 60 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c index 702fe945227c..8a9fbb64f4ad 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c @@ -68,6 +68,59 @@ static u32 mlxsw_sp_ipip_netdev_okey(const struct net_device *ol_dev) be32_to_cpu(tun->parms.o_key) : 0; } +static __be32 +mlxsw_sp_ipip_netdev_saddr4(const struct net_device *ol_dev) +{ + struct ip_tunnel *tun = netdev_priv(ol_dev); + + return tun->parms.iph.saddr; +} + +union mlxsw_sp_l3addr +mlxsw_sp_ipip_netdev_saddr(enum mlxsw_sp_l3proto proto, + const struct net_device *ol_dev) +{ + switch (proto) { + case MLXSW_SP_L3_PROTO_IPV4: + return (union mlxsw_sp_l3addr) { + .addr4 = mlxsw_sp_ipip_netdev_saddr4(ol_dev), + }; + case MLXSW_SP_L3_PROTO_IPV6: + break; + } + + WARN_ON(1); + return (union mlxsw_sp_l3addr) { + .addr4 = 0, + }; +} + +static __be32 mlxsw_sp_ipip_netdev_daddr4(const struct net_device *ol_dev) +{ + struct ip_tunnel *tun = netdev_priv(ol_dev); + + return tun->parms.iph.daddr; +} + +static union mlxsw_sp_l3addr +mlxsw_sp_ipip_netdev_daddr(enum mlxsw_sp_l3proto proto, + const struct net_device *ol_dev) +{ + switch (proto) { + case MLXSW_SP_L3_PROTO_IPV4: + return (union mlxsw_sp_l3addr) { + .addr4 = mlxsw_sp_ipip_netdev_daddr4(ol_dev), + }; + case MLXSW_SP_L3_PROTO_IPV6: + break; + } + + WARN_ON(1); + return (union mlxsw_sp_l3addr) { + .addr4 = 0, + }; +} + static int mlxsw_sp_ipip_nexthop_update_gre4(struct mlxsw_sp *mlxsw_sp, u32 adj_index, struct mlxsw_sp_ipip_entry *ipip_entry) diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.h index 6fb49129ce87..87becd152a5c 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.h @@ -38,6 +38,10 @@ #include "spectrum_router.h" #include +union mlxsw_sp_l3addr +mlxsw_sp_ipip_netdev_saddr(enum mlxsw_sp_l3proto proto, + const struct net_device *ol_dev); + enum mlxsw_sp_ipip_type { MLXSW_SP_IPIP_TYPE_GRE4, MLXSW_SP_IPIP_TYPE_MAX, diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c index 97f062a4ca64..ec90c6b6d126 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c @@ -1016,59 +1016,6 @@ mlxsw_sp_ipip_entry_dealloc(struct mlxsw_sp_ipip_entry *ipip_entry) kfree(ipip_entry); } -static __be32 -mlxsw_sp_ipip_netdev_saddr4(const struct net_device *ol_dev) -{ - struct ip_tunnel *tun = netdev_priv(ol_dev); - - return tun->parms.iph.saddr; -} - -union mlxsw_sp_l3addr -mlxsw_sp_ipip_netdev_saddr(enum mlxsw_sp_l3proto proto, - const struct net_device *ol_dev) -{ - switch (proto) { - case MLXSW_SP_L3_PROTO_IPV4: - return (union mlxsw_sp_l3addr) { - .addr4 = mlxsw_sp_ipip_netdev_saddr4(ol_dev), - }; - case MLXSW_SP_L3_PROTO_IPV6: - break; - }; - - WARN_ON(1); - return (union mlxsw_sp_l3addr) { - .addr4 = 0, - }; -} - -__be32 mlxsw_sp_ipip_netdev_daddr4(const struct net_device *ol_dev) -{ - struct ip_tunnel *tun = netdev_priv(ol_dev); - - return tun->parms.iph.daddr; -} - -union mlxsw_sp_l3addr -mlxsw_sp_ipip_netdev_daddr(enum mlxsw_sp_l3proto proto, - const struct net_device *ol_dev) -{ - switch (proto) { - case MLXSW_SP_L3_PROTO_IPV4: - return (union mlxsw_sp_l3addr) { - .addr4 = mlxsw_sp_ipip_netdev_daddr4(ol_dev), - }; - case MLXSW_SP_L3_PROTO_IPV6: - break; - }; - - WARN_ON(1); - return (union mlxsw_sp_l3addr) { - .addr4 = 0, - }; -} - static bool mlxsw_sp_l3addr_eq(const union mlxsw_sp_l3addr *addr1, const union mlxsw_sp_l3addr *addr2) { diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h index 39e5811ed263..8120b01a9c36 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h @@ -103,13 +103,6 @@ mlxsw_sp_neigh_entry_counter_update(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_neigh_entry *neigh_entry, bool adding); bool mlxsw_sp_neigh_ipv6_ignore(struct mlxsw_sp_neigh_entry *neigh_entry); -union mlxsw_sp_l3addr -mlxsw_sp_ipip_netdev_saddr(enum mlxsw_sp_l3proto proto, - const struct net_device *ol_dev); -union mlxsw_sp_l3addr -mlxsw_sp_ipip_netdev_daddr(enum mlxsw_sp_l3proto proto, - const struct net_device *ol_dev); -__be32 mlxsw_sp_ipip_netdev_daddr4(const struct net_device *ol_dev); struct mlxsw_sp_nexthop *mlxsw_sp_nexthop_next(struct mlxsw_sp_router *router, struct mlxsw_sp_nexthop *nh); bool mlxsw_sp_nexthop_offload(struct mlxsw_sp_nexthop *nh); -- cgit v1.2.3 From 9fb7bd77d11ab03b4a969279de9f54d8fd6fe988 Mon Sep 17 00:00:00 2001 From: Petr Machata Date: Fri, 3 Nov 2017 10:03:32 +0100 Subject: mlxsw: spectrum_ipip: Split accessor functions To implement NETDEV_CHANGE notifications on IP-in-IP tunnels, the handler needs to figure out what actually changed, to understand how exactly to update the offloads. It will do so by storing struct ip_tunnel_parm with previous configuration, and comparing that to the new version. To facilitate these comparisons, extract the code that operates on struct ip_tunnel_parm from the existing accessor functions, and make those a thin wrapper that extracts tunnel parameters and dispatches. Signed-off-by: Petr Machata Reviewed-by: Ido Schimmel Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- .../net/ethernet/mellanox/mlxsw/spectrum_ipip.c | 100 ++++++++++++++------- .../net/ethernet/mellanox/mlxsw/spectrum_ipip.h | 3 + 2 files changed, 70 insertions(+), 33 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c index 8a9fbb64f4ad..1850080aacbc 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c @@ -36,54 +36,49 @@ #include "spectrum_ipip.h" -static bool -mlxsw_sp_ipip_netdev_has_ikey(const struct net_device *ol_dev) +struct ip_tunnel_parm +mlxsw_sp_ipip_netdev_parms(const struct net_device *ol_dev) { struct ip_tunnel *tun = netdev_priv(ol_dev); - return !!(tun->parms.i_flags & TUNNEL_KEY); + return tun->parms; } -static bool -mlxsw_sp_ipip_netdev_has_okey(const struct net_device *ol_dev) +static bool mlxsw_sp_ipip_parms_has_ikey(struct ip_tunnel_parm parms) { - struct ip_tunnel *tun = netdev_priv(ol_dev); - - return !!(tun->parms.o_flags & TUNNEL_KEY); + return !!(parms.i_flags & TUNNEL_KEY); } -static u32 mlxsw_sp_ipip_netdev_ikey(const struct net_device *ol_dev) +static bool mlxsw_sp_ipip_parms_has_okey(struct ip_tunnel_parm parms) { - struct ip_tunnel *tun = netdev_priv(ol_dev); - - return mlxsw_sp_ipip_netdev_has_ikey(ol_dev) ? - be32_to_cpu(tun->parms.i_key) : 0; + return !!(parms.o_flags & TUNNEL_KEY); } -static u32 mlxsw_sp_ipip_netdev_okey(const struct net_device *ol_dev) +static u32 mlxsw_sp_ipip_parms_ikey(struct ip_tunnel_parm parms) { - struct ip_tunnel *tun = netdev_priv(ol_dev); - - return mlxsw_sp_ipip_netdev_has_okey(ol_dev) ? - be32_to_cpu(tun->parms.o_key) : 0; + return mlxsw_sp_ipip_parms_has_ikey(parms) ? + be32_to_cpu(parms.i_key) : 0; } -static __be32 -mlxsw_sp_ipip_netdev_saddr4(const struct net_device *ol_dev) +static u32 mlxsw_sp_ipip_parms_okey(struct ip_tunnel_parm parms) { - struct ip_tunnel *tun = netdev_priv(ol_dev); + return mlxsw_sp_ipip_parms_has_okey(parms) ? + be32_to_cpu(parms.o_key) : 0; +} - return tun->parms.iph.saddr; +static __be32 mlxsw_sp_ipip_parms_saddr4(struct ip_tunnel_parm parms) +{ + return parms.iph.saddr; } -union mlxsw_sp_l3addr -mlxsw_sp_ipip_netdev_saddr(enum mlxsw_sp_l3proto proto, - const struct net_device *ol_dev) +static union mlxsw_sp_l3addr +mlxsw_sp_ipip_parms_saddr(enum mlxsw_sp_l3proto proto, + struct ip_tunnel_parm parms) { switch (proto) { case MLXSW_SP_L3_PROTO_IPV4: return (union mlxsw_sp_l3addr) { - .addr4 = mlxsw_sp_ipip_netdev_saddr4(ol_dev), + .addr4 = mlxsw_sp_ipip_parms_saddr4(parms), }; case MLXSW_SP_L3_PROTO_IPV6: break; @@ -95,21 +90,19 @@ mlxsw_sp_ipip_netdev_saddr(enum mlxsw_sp_l3proto proto, }; } -static __be32 mlxsw_sp_ipip_netdev_daddr4(const struct net_device *ol_dev) +static __be32 mlxsw_sp_ipip_parms_daddr4(struct ip_tunnel_parm parms) { - struct ip_tunnel *tun = netdev_priv(ol_dev); - - return tun->parms.iph.daddr; + return parms.iph.daddr; } static union mlxsw_sp_l3addr -mlxsw_sp_ipip_netdev_daddr(enum mlxsw_sp_l3proto proto, - const struct net_device *ol_dev) +mlxsw_sp_ipip_parms_daddr(enum mlxsw_sp_l3proto proto, + struct ip_tunnel_parm parms) { switch (proto) { case MLXSW_SP_L3_PROTO_IPV4: return (union mlxsw_sp_l3addr) { - .addr4 = mlxsw_sp_ipip_netdev_daddr4(ol_dev), + .addr4 = mlxsw_sp_ipip_parms_daddr4(parms), }; case MLXSW_SP_L3_PROTO_IPV6: break; @@ -121,6 +114,47 @@ mlxsw_sp_ipip_netdev_daddr(enum mlxsw_sp_l3proto proto, }; } +static bool mlxsw_sp_ipip_netdev_has_ikey(const struct net_device *ol_dev) +{ + return mlxsw_sp_ipip_parms_has_ikey(mlxsw_sp_ipip_netdev_parms(ol_dev)); +} + +static bool mlxsw_sp_ipip_netdev_has_okey(const struct net_device *ol_dev) +{ + return mlxsw_sp_ipip_parms_has_okey(mlxsw_sp_ipip_netdev_parms(ol_dev)); +} + +static u32 mlxsw_sp_ipip_netdev_ikey(const struct net_device *ol_dev) +{ + return mlxsw_sp_ipip_parms_ikey(mlxsw_sp_ipip_netdev_parms(ol_dev)); +} + +static u32 mlxsw_sp_ipip_netdev_okey(const struct net_device *ol_dev) +{ + return mlxsw_sp_ipip_parms_okey(mlxsw_sp_ipip_netdev_parms(ol_dev)); +} + +union mlxsw_sp_l3addr +mlxsw_sp_ipip_netdev_saddr(enum mlxsw_sp_l3proto proto, + const struct net_device *ol_dev) +{ + return mlxsw_sp_ipip_parms_saddr(proto, + mlxsw_sp_ipip_netdev_parms(ol_dev)); +} + +static __be32 mlxsw_sp_ipip_netdev_daddr4(const struct net_device *ol_dev) +{ + return mlxsw_sp_ipip_parms_daddr4(mlxsw_sp_ipip_netdev_parms(ol_dev)); +} + +static union mlxsw_sp_l3addr +mlxsw_sp_ipip_netdev_daddr(enum mlxsw_sp_l3proto proto, + const struct net_device *ol_dev) +{ + return mlxsw_sp_ipip_parms_daddr(proto, + mlxsw_sp_ipip_netdev_parms(ol_dev)); +} + static int mlxsw_sp_ipip_nexthop_update_gre4(struct mlxsw_sp *mlxsw_sp, u32 adj_index, struct mlxsw_sp_ipip_entry *ipip_entry) diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.h index 87becd152a5c..918d74b4e8d7 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.h @@ -38,6 +38,9 @@ #include "spectrum_router.h" #include +struct ip_tunnel_parm +mlxsw_sp_ipip_netdev_parms(const struct net_device *ol_dev); + union mlxsw_sp_l3addr mlxsw_sp_ipip_netdev_saddr(enum mlxsw_sp_l3proto proto, const struct net_device *ol_dev); -- cgit v1.2.3 From a3fe198ecda678e7360c9a08942f0a0e43b6bb2c Mon Sep 17 00:00:00 2001 From: Petr Machata Date: Fri, 3 Nov 2017 10:03:33 +0100 Subject: mlxsw: spectrum_router: Extract mlxsw_sp_ipip_entry_ol_down_event() Signed-off-by: Petr Machata Reviewed-by: Ido Schimmel Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c index ec90c6b6d126..da8fe7ef8f1d 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c @@ -1332,14 +1332,22 @@ static int mlxsw_sp_netdevice_ipip_ol_up_event(struct mlxsw_sp *mlxsw_sp, return 0; } +static void +mlxsw_sp_ipip_entry_ol_down_event(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_ipip_entry *ipip_entry) +{ + if (ipip_entry->decap_fib_entry) + mlxsw_sp_ipip_entry_demote_decap(mlxsw_sp, ipip_entry); +} + static void mlxsw_sp_netdevice_ipip_ol_down_event(struct mlxsw_sp *mlxsw_sp, struct net_device *ol_dev) { struct mlxsw_sp_ipip_entry *ipip_entry; ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev); - if (ipip_entry && ipip_entry->decap_fib_entry) - mlxsw_sp_ipip_entry_demote_decap(mlxsw_sp, ipip_entry); + if (ipip_entry) + mlxsw_sp_ipip_entry_ol_down_event(mlxsw_sp, ipip_entry); } static int mlxsw_sp_netdevice_ipip_ol_vrf_event(struct mlxsw_sp *mlxsw_sp, -- cgit v1.2.3 From 6d4de44550a8a434b89666088a8f98850e6348c2 Mon Sep 17 00:00:00 2001 From: Petr Machata Date: Fri, 3 Nov 2017 10:03:34 +0100 Subject: mlxsw: spectrum_router: Make mlxsw_sp_netdevice_ipip_ol_up_event() void This function only ever returns 0, so don't pretend it returns anything useful and just make it void. Signed-off-by: Petr Machata Reviewed-by: Ido Schimmel Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c index da8fe7ef8f1d..2b05f9ff7ff4 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c @@ -1314,8 +1314,8 @@ static void mlxsw_sp_netdevice_ipip_ol_unreg_event(struct mlxsw_sp *mlxsw_sp, mlxsw_sp_ipip_entry_destroy(mlxsw_sp, ipip_entry); } -static int mlxsw_sp_netdevice_ipip_ol_up_event(struct mlxsw_sp *mlxsw_sp, - struct net_device *ol_dev) +static void mlxsw_sp_netdevice_ipip_ol_up_event(struct mlxsw_sp *mlxsw_sp, + struct net_device *ol_dev) { struct mlxsw_sp_fib_entry *decap_fib_entry; struct mlxsw_sp_ipip_entry *ipip_entry; @@ -1329,7 +1329,6 @@ static int mlxsw_sp_netdevice_ipip_ol_up_event(struct mlxsw_sp *mlxsw_sp, decap_fib_entry); } - return 0; } static void @@ -1402,7 +1401,8 @@ mlxsw_sp_netdevice_ipip_ol_event(struct mlxsw_sp *mlxsw_sp, mlxsw_sp_netdevice_ipip_ol_unreg_event(mlxsw_sp, ol_dev); return 0; case NETDEV_UP: - return mlxsw_sp_netdevice_ipip_ol_up_event(mlxsw_sp, ol_dev); + mlxsw_sp_netdevice_ipip_ol_up_event(mlxsw_sp, ol_dev); + return 0; case NETDEV_DOWN: mlxsw_sp_netdevice_ipip_ol_down_event(mlxsw_sp, ol_dev); return 0; -- cgit v1.2.3 From 47518ca5d293dd62ca428581941ee51271a4e468 Mon Sep 17 00:00:00 2001 From: Petr Machata Date: Fri, 3 Nov 2017 10:03:35 +0100 Subject: mlxsw: spectrum_router: Extract mlxsw_sp_ipip_entry_ol_up_event() The piece of logic to promote decap route, if any, is useful for generic tunnel updates, not just for handling of NETDEV_UP events on tunnel interfaces. Extract it to a separate function. Signed-off-by: Petr Machata Reviewed-by: Ido Schimmel Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- .../net/ethernet/mellanox/mlxsw/spectrum_router.c | 23 +++++++++++++--------- 1 file changed, 14 insertions(+), 9 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c index 2b05f9ff7ff4..ce0d4625c996 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c @@ -1314,21 +1314,26 @@ static void mlxsw_sp_netdevice_ipip_ol_unreg_event(struct mlxsw_sp *mlxsw_sp, mlxsw_sp_ipip_entry_destroy(mlxsw_sp, ipip_entry); } +static void +mlxsw_sp_ipip_entry_ol_up_event(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_ipip_entry *ipip_entry) +{ + struct mlxsw_sp_fib_entry *decap_fib_entry; + + decap_fib_entry = mlxsw_sp_ipip_entry_find_decap(mlxsw_sp, ipip_entry); + if (decap_fib_entry) + mlxsw_sp_ipip_entry_promote_decap(mlxsw_sp, ipip_entry, + decap_fib_entry); +} + static void mlxsw_sp_netdevice_ipip_ol_up_event(struct mlxsw_sp *mlxsw_sp, struct net_device *ol_dev) { - struct mlxsw_sp_fib_entry *decap_fib_entry; struct mlxsw_sp_ipip_entry *ipip_entry; ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev); - if (ipip_entry) { - decap_fib_entry = mlxsw_sp_ipip_entry_find_decap(mlxsw_sp, - ipip_entry); - if (decap_fib_entry) - mlxsw_sp_ipip_entry_promote_decap(mlxsw_sp, ipip_entry, - decap_fib_entry); - } - + if (ipip_entry) + mlxsw_sp_ipip_entry_ol_up_event(mlxsw_sp, ipip_entry); } static void -- cgit v1.2.3 From 7e75af6366b90bbd0cfb62c9c5aeb5e3ec37bcd4 Mon Sep 17 00:00:00 2001 From: Petr Machata Date: Fri, 3 Nov 2017 10:03:36 +0100 Subject: mlxsw: spectrum: Propagate extack for tunnel events The function mlxsw_sp_rif_create() takes an extack parameter. So far, for creation of loopback interfaces, NULL was passed. For some events however the extack can be extracted and passed along. So do that for NETDEV_CHANGEUPPER handler. Use the opportunity to update the type of info argument that mlxsw_sp_netdevice_ipip_ol_event() takes. Follow-up patches will introduce handling of more changes, and some of them carry an extack as well, but in an info structure of a different type. Though not strictly erroneous (the pointer could be cast whichever way), it makes no sense to pretend the value is always of a certain type, when in fact it isn't. So change the prototype of the above-mentioned function as well. Signed-off-by: Petr Machata Reviewed-by: Ido Schimmel Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/spectrum.h | 9 +++---- .../net/ethernet/mellanox/mlxsw/spectrum_router.c | 31 +++++++++++++--------- 2 files changed, 23 insertions(+), 17 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h index f01b5cb04963..07cba529b8a9 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h @@ -398,11 +398,10 @@ int mlxsw_sp_netdevice_vrf_event(struct net_device *l3_dev, unsigned long event, struct netdev_notifier_changeupper_info *info); bool mlxsw_sp_netdev_is_ipip_ol(const struct mlxsw_sp *mlxsw_sp, const struct net_device *dev); -int -mlxsw_sp_netdevice_ipip_ol_event(struct mlxsw_sp *mlxsw_sp, - struct net_device *l3_dev, - unsigned long event, - struct netdev_notifier_changeupper_info *info); +int mlxsw_sp_netdevice_ipip_ol_event(struct mlxsw_sp *mlxsw_sp, + struct net_device *l3_dev, + unsigned long event, + struct netdev_notifier_info *info); void mlxsw_sp_port_vlan_router_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan); void mlxsw_sp_rif_destroy(struct mlxsw_sp_rif *rif); diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c index ce0d4625c996..c4f1881cfedf 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c @@ -961,7 +961,8 @@ mlxsw_sp_rif_create(struct mlxsw_sp *mlxsw_sp, static struct mlxsw_sp_rif_ipip_lb * mlxsw_sp_ipip_ol_ipip_lb_create(struct mlxsw_sp *mlxsw_sp, enum mlxsw_sp_ipip_type ipipt, - struct net_device *ol_dev) + struct net_device *ol_dev, + struct netlink_ext_ack *extack) { struct mlxsw_sp_rif_params_ipip_lb lb_params; const struct mlxsw_sp_ipip_ops *ipip_ops; @@ -974,7 +975,7 @@ mlxsw_sp_ipip_ol_ipip_lb_create(struct mlxsw_sp *mlxsw_sp, .lb_config = ipip_ops->ol_loopback_config(mlxsw_sp, ol_dev), }; - rif = mlxsw_sp_rif_create(mlxsw_sp, &lb_params.common, NULL); + rif = mlxsw_sp_rif_create(mlxsw_sp, &lb_params.common, extack); if (IS_ERR(rif)) return ERR_CAST(rif); return container_of(rif, struct mlxsw_sp_rif_ipip_lb, common); @@ -993,7 +994,7 @@ mlxsw_sp_ipip_entry_alloc(struct mlxsw_sp *mlxsw_sp, return ERR_PTR(-ENOMEM); ipip_entry->ol_lb = mlxsw_sp_ipip_ol_ipip_lb_create(mlxsw_sp, ipipt, - ol_dev); + ol_dev, NULL); if (IS_ERR(ipip_entry->ol_lb)) { ret = ERR_CAST(ipip_entry->ol_lb); goto err_ol_ipip_lb_create; @@ -1355,7 +1356,8 @@ static void mlxsw_sp_netdevice_ipip_ol_down_event(struct mlxsw_sp *mlxsw_sp, } static int mlxsw_sp_netdevice_ipip_ol_vrf_event(struct mlxsw_sp *mlxsw_sp, - struct net_device *ol_dev) + struct net_device *ol_dev, + struct netlink_ext_ack *extack) { struct mlxsw_sp_fib_entry *decap_fib_entry; struct mlxsw_sp_ipip_entry *ipip_entry; @@ -1376,7 +1378,7 @@ static int mlxsw_sp_netdevice_ipip_ol_vrf_event(struct mlxsw_sp *mlxsw_sp, mlxsw_sp_ipip_entry_demote_decap(mlxsw_sp, ipip_entry); lb_rif = mlxsw_sp_ipip_ol_ipip_lb_create(mlxsw_sp, ipip_entry->ipipt, - ol_dev); + ol_dev, extack); if (IS_ERR(lb_rif)) return PTR_ERR(lb_rif); mlxsw_sp_rif_destroy(&ipip_entry->ol_lb->common); @@ -1393,12 +1395,14 @@ static int mlxsw_sp_netdevice_ipip_ol_vrf_event(struct mlxsw_sp *mlxsw_sp, return 0; } -int -mlxsw_sp_netdevice_ipip_ol_event(struct mlxsw_sp *mlxsw_sp, - struct net_device *ol_dev, - unsigned long event, - struct netdev_notifier_changeupper_info *info) +int mlxsw_sp_netdevice_ipip_ol_event(struct mlxsw_sp *mlxsw_sp, + struct net_device *ol_dev, + unsigned long event, + struct netdev_notifier_info *info) { + struct netdev_notifier_changeupper_info *chup; + struct netlink_ext_ack *extack; + switch (event) { case NETDEV_REGISTER: return mlxsw_sp_netdevice_ipip_ol_reg_event(mlxsw_sp, ol_dev); @@ -1412,9 +1416,12 @@ mlxsw_sp_netdevice_ipip_ol_event(struct mlxsw_sp *mlxsw_sp, mlxsw_sp_netdevice_ipip_ol_down_event(mlxsw_sp, ol_dev); return 0; case NETDEV_CHANGEUPPER: - if (netif_is_l3_master(info->upper_dev)) + chup = container_of(info, typeof(*chup), info); + extack = info->extack; + if (netif_is_l3_master(chup->upper_dev)) return mlxsw_sp_netdevice_ipip_ol_vrf_event(mlxsw_sp, - ol_dev); + ol_dev, + extack); return 0; } return 0; -- cgit v1.2.3 From 65a6121b30a65bb4b61322c895bf835fedd6e315 Mon Sep 17 00:00:00 2001 From: Petr Machata Date: Fri, 3 Nov 2017 10:03:37 +0100 Subject: mlxsw: spectrum_router: Extract __mlxsw_sp_ipip_entry_update_tunnel() The work that's done by mlxsw_sp_netdevice_ipip_ol_vrf_event() is a good basis for a more versatile function that would take care of all sorts of tunnel updates requests: __mlxsw_sp_ipip_entry_update_tunnel(). Extract that function. Factor out a helper mlxsw_sp_ipip_entry_ol_lb_update() as well. Signed-off-by: Petr Machata Reviewed-by: Ido Schimmel Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- .../net/ethernet/mellanox/mlxsw/spectrum_router.c | 74 ++++++++++++++-------- 1 file changed, 46 insertions(+), 28 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c index c4f1881cfedf..e2795b889068 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c @@ -1355,46 +1355,64 @@ static void mlxsw_sp_netdevice_ipip_ol_down_event(struct mlxsw_sp *mlxsw_sp, mlxsw_sp_ipip_entry_ol_down_event(mlxsw_sp, ipip_entry); } -static int mlxsw_sp_netdevice_ipip_ol_vrf_event(struct mlxsw_sp *mlxsw_sp, - struct net_device *ol_dev, - struct netlink_ext_ack *extack) -{ - struct mlxsw_sp_fib_entry *decap_fib_entry; - struct mlxsw_sp_ipip_entry *ipip_entry; - struct mlxsw_sp_rif_ipip_lb *lb_rif; +static int +mlxsw_sp_ipip_entry_ol_lb_update(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_ipip_entry *ipip_entry, + struct netlink_ext_ack *extack) +{ + struct mlxsw_sp_rif_ipip_lb *old_lb_rif = ipip_entry->ol_lb; + struct mlxsw_sp_rif_ipip_lb *new_lb_rif; + + new_lb_rif = mlxsw_sp_ipip_ol_ipip_lb_create(mlxsw_sp, + ipip_entry->ipipt, + ipip_entry->ol_dev, + extack); + if (IS_ERR(new_lb_rif)) + return PTR_ERR(new_lb_rif); + ipip_entry->ol_lb = new_lb_rif; + mlxsw_sp_rif_destroy(&old_lb_rif->common); - ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev); - if (!ipip_entry) - return 0; + return 0; +} + +int __mlxsw_sp_ipip_entry_update_tunnel(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_ipip_entry *ipip_entry, + struct netlink_ext_ack *extack) +{ + int err; - /* When a tunneling device is moved to a different VRF, we need to - * update the backing loopback. Since RIFs can't be edited, we need to - * destroy and recreate it. That might create a window of opportunity - * where RALUE and RATR registers end up referencing a RIF that's - * already gone. RATRs are handled by the RIF destroy, and to take care + /* RIFs can't be edited, so to update loopback, we need to destroy and + * recreate it. That creates a window of opportunity where RALUE and + * RATR registers end up referencing a RIF that's already gone. RATRs + * are handled in mlxsw_sp_ipip_entry_ol_lb_update(), and to take care * of RALUE, demote the decap route back. */ if (ipip_entry->decap_fib_entry) mlxsw_sp_ipip_entry_demote_decap(mlxsw_sp, ipip_entry); - lb_rif = mlxsw_sp_ipip_ol_ipip_lb_create(mlxsw_sp, ipip_entry->ipipt, - ol_dev, extack); - if (IS_ERR(lb_rif)) - return PTR_ERR(lb_rif); - mlxsw_sp_rif_destroy(&ipip_entry->ol_lb->common); - ipip_entry->ol_lb = lb_rif; + err = mlxsw_sp_ipip_entry_ol_lb_update(mlxsw_sp, ipip_entry, extack); + if (err) + return err; - if (ol_dev->flags & IFF_UP) { - decap_fib_entry = mlxsw_sp_ipip_entry_find_decap(mlxsw_sp, - ipip_entry); - if (decap_fib_entry) - mlxsw_sp_ipip_entry_promote_decap(mlxsw_sp, ipip_entry, - decap_fib_entry); - } + if (ipip_entry->ol_dev->flags & IFF_UP) + mlxsw_sp_ipip_entry_ol_up_event(mlxsw_sp, ipip_entry); return 0; } +static int mlxsw_sp_netdevice_ipip_ol_vrf_event(struct mlxsw_sp *mlxsw_sp, + struct net_device *ol_dev, + struct netlink_ext_ack *extack) +{ + struct mlxsw_sp_ipip_entry *ipip_entry = + mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev); + + if (!ipip_entry) + return 0; + return __mlxsw_sp_ipip_entry_update_tunnel(mlxsw_sp, ipip_entry, + extack); +} + int mlxsw_sp_netdevice_ipip_ol_event(struct mlxsw_sp *mlxsw_sp, struct net_device *ol_dev, unsigned long event, -- cgit v1.2.3 From 0c5f1cd5ba8c03567c67910816a7a0fb9fee5746 Mon Sep 17 00:00:00 2001 From: Petr Machata Date: Fri, 3 Nov 2017 10:03:38 +0100 Subject: mlxsw: spectrum_router: Generalize __mlxsw_sp_ipip_entry_update_tunnel() The work that needs to be done to update HW configuration in response to changes is similar to what __mlxsw_sp_ipip_entry_update_tunnel() already does, but with a number of twists: each change requires a different subset of things to happen. Extend the function to support all these uses, and allow finely-grained configuration of what should happen at each call through a suite of function arguments. Publish the updated function to allow use from the spectrum_ipip module. Signed-off-by: Petr Machata Reviewed-by: Ido Schimmel Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- .../net/ethernet/mellanox/mlxsw/spectrum_router.c | 47 ++++++++++++++++++++-- .../net/ethernet/mellanox/mlxsw/spectrum_router.h | 7 ++++ 2 files changed, 50 insertions(+), 4 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c index e2795b889068..1376a9738b3c 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c @@ -1355,9 +1355,12 @@ static void mlxsw_sp_netdevice_ipip_ol_down_event(struct mlxsw_sp *mlxsw_sp, mlxsw_sp_ipip_entry_ol_down_event(mlxsw_sp, ipip_entry); } +static void mlxsw_sp_nexthop_rif_update(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_rif *rif); static int mlxsw_sp_ipip_entry_ol_lb_update(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_ipip_entry *ipip_entry, + bool keep_encap, struct netlink_ext_ack *extack) { struct mlxsw_sp_rif_ipip_lb *old_lb_rif = ipip_entry->ol_lb; @@ -1370,13 +1373,32 @@ mlxsw_sp_ipip_entry_ol_lb_update(struct mlxsw_sp *mlxsw_sp, if (IS_ERR(new_lb_rif)) return PTR_ERR(new_lb_rif); ipip_entry->ol_lb = new_lb_rif; + + if (keep_encap) { + list_splice_init(&old_lb_rif->common.nexthop_list, + &new_lb_rif->common.nexthop_list); + mlxsw_sp_nexthop_rif_update(mlxsw_sp, &new_lb_rif->common); + } + mlxsw_sp_rif_destroy(&old_lb_rif->common); return 0; } +/** + * Update the offload related to an IPIP entry. This always updates decap, and + * in addition to that it also: + * @recreate_loopback: recreates the associated loopback RIF + * @keep_encap: updates next hops that use the tunnel netdevice. This is only + * relevant when recreate_loopback is true. + * @update_nexthops: updates next hops, keeping the current loopback RIF. This + * is only relevant when recreate_loopback is false. + */ int __mlxsw_sp_ipip_entry_update_tunnel(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_ipip_entry *ipip_entry, + bool recreate_loopback, + bool keep_encap, + bool update_nexthops, struct netlink_ext_ack *extack) { int err; @@ -1390,9 +1412,15 @@ int __mlxsw_sp_ipip_entry_update_tunnel(struct mlxsw_sp *mlxsw_sp, if (ipip_entry->decap_fib_entry) mlxsw_sp_ipip_entry_demote_decap(mlxsw_sp, ipip_entry); - err = mlxsw_sp_ipip_entry_ol_lb_update(mlxsw_sp, ipip_entry, extack); - if (err) - return err; + if (recreate_loopback) { + err = mlxsw_sp_ipip_entry_ol_lb_update(mlxsw_sp, ipip_entry, + keep_encap, extack); + if (err) + return err; + } else if (update_nexthops) { + mlxsw_sp_nexthop_rif_update(mlxsw_sp, + &ipip_entry->ol_lb->common); + } if (ipip_entry->ol_dev->flags & IFF_UP) mlxsw_sp_ipip_entry_ol_up_event(mlxsw_sp, ipip_entry); @@ -1410,7 +1438,7 @@ static int mlxsw_sp_netdevice_ipip_ol_vrf_event(struct mlxsw_sp *mlxsw_sp, if (!ipip_entry) return 0; return __mlxsw_sp_ipip_entry_update_tunnel(mlxsw_sp, ipip_entry, - extack); + true, false, false, extack); } int mlxsw_sp_netdevice_ipip_ol_event(struct mlxsw_sp *mlxsw_sp, @@ -3285,6 +3313,17 @@ static void mlxsw_sp_nexthop4_event(struct mlxsw_sp *mlxsw_sp, mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nh_grp); } +static void mlxsw_sp_nexthop_rif_update(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_rif *rif) +{ + struct mlxsw_sp_nexthop *nh; + + list_for_each_entry(nh, &rif->nexthop_list, rif_list_node) { + __mlxsw_sp_nexthop_neigh_update(nh, false); + mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nh_grp); + } +} + static void mlxsw_sp_nexthop_rif_gone_sync(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_rif *rif) { diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h index 8120b01a9c36..4b8a12a4f493 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h @@ -63,6 +63,7 @@ enum mlxsw_sp_rif_counter_dir { struct mlxsw_sp_neigh_entry; struct mlxsw_sp_nexthop; +struct mlxsw_sp_ipip_entry; struct mlxsw_sp_rif *mlxsw_sp_rif_by_index(const struct mlxsw_sp *mlxsw_sp, u16 rif_index); @@ -103,6 +104,12 @@ mlxsw_sp_neigh_entry_counter_update(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_neigh_entry *neigh_entry, bool adding); bool mlxsw_sp_neigh_ipv6_ignore(struct mlxsw_sp_neigh_entry *neigh_entry); +int __mlxsw_sp_ipip_entry_update_tunnel(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_ipip_entry *ipip_entry, + bool recreate_loopback, + bool keep_encap, + bool update_nexthops, + struct netlink_ext_ack *extack); struct mlxsw_sp_nexthop *mlxsw_sp_nexthop_next(struct mlxsw_sp_router *router, struct mlxsw_sp_nexthop *nh); bool mlxsw_sp_nexthop_offload(struct mlxsw_sp_nexthop *nh); -- cgit v1.2.3 From 4526cc8aed2b4bf481709911fc1fee9f040ccda1 Mon Sep 17 00:00:00 2001 From: Petr Machata Date: Fri, 3 Nov 2017 10:03:39 +0100 Subject: mlxsw: spectrum_router: Fix saddr deduction in mlxsw_sp_ipip_entry_create() When trying to determine whether there are other offloaded tunnels with the same local address, mlxsw_sp_ipip_entry_create() should look for a tunnel with matching UL protocol, matching saddr, in the same VRF. However instead of taking into account the UL protocol of the tunnel netdevice (which mlxsw_sp_ipip_entry_saddr_matches() then compares to the UL protocol of inspected IPIP entry), it deduces the UL protocol from the inspected IPIP entry (and that's compared to itself). This is currently immaterial, because only one tunnel type is offloaded, and therefore the UL protocol always matches, but introducing support for a tunnel with IPv6 underlay would uncover this error. Signed-off-by: Petr Machata Reviewed-by: Ido Schimmel Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c index 1376a9738b3c..897a3841e52f 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c @@ -1169,10 +1169,10 @@ mlxsw_sp_ipip_entry_create(struct mlxsw_sp *mlxsw_sp, * in the same underlay table needs special treatment in the HW. That is * currently not implemented in the driver. */ + ul_proto = router->ipip_ops_arr[ipipt]->ul_proto; + saddr = mlxsw_sp_ipip_netdev_saddr(ul_proto, ol_dev); list_for_each_entry(ipip_entry, &mlxsw_sp->router->ipip_list, ipip_list_node) { - ul_proto = router->ipip_ops_arr[ipip_entry->ipipt]->ul_proto; - saddr = mlxsw_sp_ipip_netdev_saddr(ul_proto, ol_dev); if (mlxsw_sp_ipip_entry_saddr_matches(mlxsw_sp, ul_proto, saddr, ul_tb_id, ipip_entry)) return ERR_PTR(-EEXIST); -- cgit v1.2.3 From af641713e97da4126439c3fb1dee031f7e497654 Mon Sep 17 00:00:00 2001 From: Petr Machata Date: Fri, 3 Nov 2017 10:03:40 +0100 Subject: mlxsw: spectrum_router: Onload conflicting tunnels The approach for offloading IP tunnels implemented currently by mlxsw doesn't allow two tunnels that have the same local IP address in the same (underlay) VRF. Previously, offloads were introduced on demand as encap routes were formed. When such a route was created that would cause offload of a conflicting tunnel, mlxsw_sp_ipip_entry_create() would detect it and return -EEXIST, which would propagate up and cause FIB abort. Now however IPIP entries are created as soon as an offloadable netdevice is created, and the failure prevents creation of such device. Furthermore, if the driver is installed at the point where such conflicting tunnels exist, the failure actually prevents successful modprobe. Furthermore, follow-up patches implement handling of NETDEV_CHANGE due to the local address change. However, NETDEV_CHANGE can't be vetoed. The failure merely means that the offloads weren't updated, but the change in Linux configuration is not rolled back. It is thus desirable to have a robust way of handling these conflicts, which can later be reused for handling NETDEV_CHANGE as well. To fix this, when a conflicting tunnel is created, instead of failing, simply pull the old tunnel to slow path and reject offloading the new one. Introduce two functions: mlxsw_sp_ipip_entry_demote_tunnel() and mlxsw_sp_ipip_demote_tunnel_by_saddr() to handle this. Make them both public, because they will be useful later on in this patchset. Signed-off-by: Petr Machata Reviewed-by: Ido Schimmel Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- .../net/ethernet/mellanox/mlxsw/spectrum_router.c | 73 +++++++++++++++------- .../net/ethernet/mellanox/mlxsw/spectrum_router.h | 8 +++ 2 files changed, 60 insertions(+), 21 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c index 897a3841e52f..832bfa125512 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c @@ -1159,24 +1159,7 @@ mlxsw_sp_ipip_entry_create(struct mlxsw_sp *mlxsw_sp, enum mlxsw_sp_ipip_type ipipt, struct net_device *ol_dev) { - u32 ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(ol_dev); - struct mlxsw_sp_router *router = mlxsw_sp->router; struct mlxsw_sp_ipip_entry *ipip_entry; - enum mlxsw_sp_l3proto ul_proto; - union mlxsw_sp_l3addr saddr; - - /* The configuration where several tunnels have the same local address - * in the same underlay table needs special treatment in the HW. That is - * currently not implemented in the driver. - */ - ul_proto = router->ipip_ops_arr[ipipt]->ul_proto; - saddr = mlxsw_sp_ipip_netdev_saddr(ul_proto, ol_dev); - list_for_each_entry(ipip_entry, &mlxsw_sp->router->ipip_list, - ipip_list_node) { - if (mlxsw_sp_ipip_entry_saddr_matches(mlxsw_sp, ul_proto, saddr, - ul_tb_id, ipip_entry)) - return ERR_PTR(-EEXIST); - } ipip_entry = mlxsw_sp_ipip_entry_alloc(mlxsw_sp, ipipt, ol_dev); if (IS_ERR(ipip_entry)) @@ -1292,14 +1275,24 @@ static int mlxsw_sp_netdevice_ipip_ol_reg_event(struct mlxsw_sp *mlxsw_sp, struct net_device *ol_dev) { struct mlxsw_sp_ipip_entry *ipip_entry; + enum mlxsw_sp_l3proto ul_proto; enum mlxsw_sp_ipip_type ipipt; + union mlxsw_sp_l3addr saddr; + u32 ul_tb_id; mlxsw_sp_netdev_ipip_type(mlxsw_sp, ol_dev, &ipipt); if (mlxsw_sp_netdevice_ipip_can_offload(mlxsw_sp, ol_dev, ipipt)) { - ipip_entry = mlxsw_sp_ipip_entry_create(mlxsw_sp, ipipt, - ol_dev); - if (IS_ERR(ipip_entry)) - return PTR_ERR(ipip_entry); + ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(ol_dev); + ul_proto = mlxsw_sp->router->ipip_ops_arr[ipipt]->ul_proto; + saddr = mlxsw_sp_ipip_netdev_saddr(ul_proto, ol_dev); + if (!mlxsw_sp_ipip_demote_tunnel_by_saddr(mlxsw_sp, ul_proto, + saddr, ul_tb_id, + NULL)) { + ipip_entry = mlxsw_sp_ipip_entry_create(mlxsw_sp, ipipt, + ol_dev); + if (IS_ERR(ipip_entry)) + return PTR_ERR(ipip_entry); + } } return 0; @@ -1441,6 +1434,44 @@ static int mlxsw_sp_netdevice_ipip_ol_vrf_event(struct mlxsw_sp *mlxsw_sp, true, false, false, extack); } +void mlxsw_sp_ipip_entry_demote_tunnel(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_ipip_entry *ipip_entry) +{ + struct net_device *ol_dev = ipip_entry->ol_dev; + + if (ol_dev->flags & IFF_UP) + mlxsw_sp_ipip_entry_ol_down_event(mlxsw_sp, ipip_entry); + mlxsw_sp_ipip_entry_destroy(mlxsw_sp, ipip_entry); +} + +/* The configuration where several tunnels have the same local address in the + * same underlay table needs special treatment in the HW. That is currently not + * implemented in the driver. This function finds and demotes the first tunnel + * with a given source address, except the one passed in in the argument + * `except'. + */ +bool +mlxsw_sp_ipip_demote_tunnel_by_saddr(struct mlxsw_sp *mlxsw_sp, + enum mlxsw_sp_l3proto ul_proto, + union mlxsw_sp_l3addr saddr, + u32 ul_tb_id, + const struct mlxsw_sp_ipip_entry *except) +{ + struct mlxsw_sp_ipip_entry *ipip_entry, *tmp; + + list_for_each_entry_safe(ipip_entry, tmp, &mlxsw_sp->router->ipip_list, + ipip_list_node) { + if (ipip_entry != except && + mlxsw_sp_ipip_entry_saddr_matches(mlxsw_sp, ul_proto, saddr, + ul_tb_id, ipip_entry)) { + mlxsw_sp_ipip_entry_demote_tunnel(mlxsw_sp, ipip_entry); + return true; + } + } + + return false; +} + int mlxsw_sp_netdevice_ipip_ol_event(struct mlxsw_sp *mlxsw_sp, struct net_device *ol_dev, unsigned long event, diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h index 4b8a12a4f493..5dd650bdcff6 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h @@ -110,6 +110,14 @@ int __mlxsw_sp_ipip_entry_update_tunnel(struct mlxsw_sp *mlxsw_sp, bool keep_encap, bool update_nexthops, struct netlink_ext_ack *extack); +void mlxsw_sp_ipip_entry_demote_tunnel(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_ipip_entry *ipip_entry); +bool +mlxsw_sp_ipip_demote_tunnel_by_saddr(struct mlxsw_sp *mlxsw_sp, + enum mlxsw_sp_l3proto ul_proto, + union mlxsw_sp_l3addr saddr, + u32 ul_tb_id, + const struct mlxsw_sp_ipip_entry *except); struct mlxsw_sp_nexthop *mlxsw_sp_nexthop_next(struct mlxsw_sp_router *router, struct mlxsw_sp_nexthop *nh); bool mlxsw_sp_nexthop_offload(struct mlxsw_sp_nexthop *nh); -- cgit v1.2.3 From 61481f2fcea9112944330b34767192d7f1696fca Mon Sep 17 00:00:00 2001 From: Petr Machata Date: Fri, 3 Nov 2017 10:03:41 +0100 Subject: mlxsw: spectrum: Support IPIP underlay VRF migration When a bound device of a tunnel netdevice changes VRF, the loopback RIF that backs the tunnel needs to be updated and existing encapsulating routes need to be refreshed. Note that several tunnels can share the same bound device, in which case all the impacted tunnels need to be updated. Signed-off-by: Petr Machata Reviewed-by: Ido Schimmel Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/spectrum.c | 3 + drivers/net/ethernet/mellanox/mlxsw/spectrum.h | 7 ++ .../net/ethernet/mellanox/mlxsw/spectrum_router.c | 100 +++++++++++++++++++++ 3 files changed, 110 insertions(+) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c index 55bb3669bbcc..63e50877796b 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c @@ -4545,6 +4545,9 @@ static int mlxsw_sp_netdevice_event(struct notifier_block *nb, if (mlxsw_sp_netdev_is_ipip_ol(mlxsw_sp, dev)) err = mlxsw_sp_netdevice_ipip_ol_event(mlxsw_sp, dev, event, ptr); + else if (mlxsw_sp_netdev_is_ipip_ul(mlxsw_sp, dev)) + err = mlxsw_sp_netdevice_ipip_ul_event(mlxsw_sp, dev, + event, ptr); else if (event == NETDEV_CHANGEADDR || event == NETDEV_CHANGEMTU) err = mlxsw_sp_netdevice_router_port_event(dev); else if (mlxsw_sp_is_vrf_event(event, ptr)) diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h index 07cba529b8a9..47dd7e06fd29 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h @@ -398,10 +398,17 @@ int mlxsw_sp_netdevice_vrf_event(struct net_device *l3_dev, unsigned long event, struct netdev_notifier_changeupper_info *info); bool mlxsw_sp_netdev_is_ipip_ol(const struct mlxsw_sp *mlxsw_sp, const struct net_device *dev); +bool mlxsw_sp_netdev_is_ipip_ul(const struct mlxsw_sp *mlxsw_sp, + const struct net_device *dev); int mlxsw_sp_netdevice_ipip_ol_event(struct mlxsw_sp *mlxsw_sp, struct net_device *l3_dev, unsigned long event, struct netdev_notifier_info *info); +int +mlxsw_sp_netdevice_ipip_ul_event(struct mlxsw_sp *mlxsw_sp, + struct net_device *l3_dev, + unsigned long event, + struct netdev_notifier_info *info); void mlxsw_sp_port_vlan_router_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan); void mlxsw_sp_rif_destroy(struct mlxsw_sp_rif *rif); diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c index 832bfa125512..aa7b820e8408 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c @@ -1257,6 +1257,33 @@ mlxsw_sp_ipip_entry_find_by_ol_dev(struct mlxsw_sp *mlxsw_sp, return NULL; } +static struct mlxsw_sp_ipip_entry * +mlxsw_sp_ipip_entry_find_by_ul_dev(const struct mlxsw_sp *mlxsw_sp, + const struct net_device *ul_dev, + struct mlxsw_sp_ipip_entry *start) +{ + struct mlxsw_sp_ipip_entry *ipip_entry; + + ipip_entry = list_prepare_entry(start, &mlxsw_sp->router->ipip_list, + ipip_list_node); + list_for_each_entry_continue(ipip_entry, &mlxsw_sp->router->ipip_list, + ipip_list_node) { + struct net_device *ipip_ul_dev = + __mlxsw_sp_ipip_netdev_ul_dev_get(ipip_entry->ol_dev); + + if (ipip_ul_dev == ul_dev) + return ipip_entry; + } + + return NULL; +} + +bool mlxsw_sp_netdev_is_ipip_ul(const struct mlxsw_sp *mlxsw_sp, + const struct net_device *dev) +{ + return mlxsw_sp_ipip_entry_find_by_ul_dev(mlxsw_sp, dev, NULL); +} + static bool mlxsw_sp_netdevice_ipip_can_offload(struct mlxsw_sp *mlxsw_sp, const struct net_device *ol_dev, enum mlxsw_sp_ipip_type ipipt) @@ -1434,6 +1461,16 @@ static int mlxsw_sp_netdevice_ipip_ol_vrf_event(struct mlxsw_sp *mlxsw_sp, true, false, false, extack); } +static int +mlxsw_sp_netdevice_ipip_ul_vrf_event(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_ipip_entry *ipip_entry, + struct net_device *ul_dev, + struct netlink_ext_ack *extack) +{ + return __mlxsw_sp_ipip_entry_update_tunnel(mlxsw_sp, ipip_entry, + true, true, false, extack); +} + void mlxsw_sp_ipip_entry_demote_tunnel(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_ipip_entry *ipip_entry) { @@ -1472,6 +1509,21 @@ mlxsw_sp_ipip_demote_tunnel_by_saddr(struct mlxsw_sp *mlxsw_sp, return false; } +static void mlxsw_sp_ipip_demote_tunnel_by_ul_netdev(struct mlxsw_sp *mlxsw_sp, + struct net_device *ul_dev) +{ + struct mlxsw_sp_ipip_entry *ipip_entry, *tmp; + + list_for_each_entry_safe(ipip_entry, tmp, &mlxsw_sp->router->ipip_list, + ipip_list_node) { + struct net_device *ipip_ul_dev = + __mlxsw_sp_ipip_netdev_ul_dev_get(ipip_entry->ol_dev); + + if (ipip_ul_dev == ul_dev) + mlxsw_sp_ipip_entry_demote_tunnel(mlxsw_sp, ipip_entry); + } +} + int mlxsw_sp_netdevice_ipip_ol_event(struct mlxsw_sp *mlxsw_sp, struct net_device *ol_dev, unsigned long event, @@ -1504,6 +1556,54 @@ int mlxsw_sp_netdevice_ipip_ol_event(struct mlxsw_sp *mlxsw_sp, return 0; } +static int +__mlxsw_sp_netdevice_ipip_ul_event(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_ipip_entry *ipip_entry, + struct net_device *ul_dev, + unsigned long event, + struct netdev_notifier_info *info) +{ + struct netdev_notifier_changeupper_info *chup; + struct netlink_ext_ack *extack; + + switch (event) { + case NETDEV_CHANGEUPPER: + chup = container_of(info, typeof(*chup), info); + extack = info->extack; + if (netif_is_l3_master(chup->upper_dev)) + return mlxsw_sp_netdevice_ipip_ul_vrf_event(mlxsw_sp, + ipip_entry, + ul_dev, + extack); + break; + } + return 0; +} + +int +mlxsw_sp_netdevice_ipip_ul_event(struct mlxsw_sp *mlxsw_sp, + struct net_device *ul_dev, + unsigned long event, + struct netdev_notifier_info *info) +{ + struct mlxsw_sp_ipip_entry *ipip_entry = NULL; + int err; + + while ((ipip_entry = mlxsw_sp_ipip_entry_find_by_ul_dev(mlxsw_sp, + ul_dev, + ipip_entry))) { + err = __mlxsw_sp_netdevice_ipip_ul_event(mlxsw_sp, ipip_entry, + ul_dev, event, info); + if (err) { + mlxsw_sp_ipip_demote_tunnel_by_ul_netdev(mlxsw_sp, + ul_dev); + return err; + } + } + + return 0; +} + struct mlxsw_sp_neigh_key { struct neighbour *n; }; -- cgit v1.2.3 From 4cf04f3ff4da9dd536d9f70127868908a03aaf0a Mon Sep 17 00:00:00 2001 From: Petr Machata Date: Fri, 3 Nov 2017 10:03:42 +0100 Subject: mlxsw: spectrum: Handle NETDEV_CHANGE on L3 tunnels Changes to L3 tunnel netdevices (through `ip tunnel change' as well as `ip link set') lead to NETDEV_CHANGE being generated on the tunnel device. Because what is relevant for the tunnel in question depends on the tunnel type, handling of the event is dispatched to the IPIP module through a newly-added interface mlxsw_sp_ipip_ops.ol_netdev_change(). IPIP tunnels now remember the last set of tunnel parameters in struct mlxsw_sp_ipip_entry.parms, and use it to figure out what exactly has changed. Signed-off-by: Petr Machata Reviewed-by: Ido Schimmel Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- .../net/ethernet/mellanox/mlxsw/spectrum_ipip.c | 67 ++++++++++++++++++++++ .../net/ethernet/mellanox/mlxsw/spectrum_ipip.h | 5 ++ .../net/ethernet/mellanox/mlxsw/spectrum_router.c | 42 +++++++++++--- .../net/ethernet/mellanox/mlxsw/spectrum_router.h | 7 +++ 4 files changed, 114 insertions(+), 7 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c index 1850080aacbc..5f78fc5e7724 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c @@ -287,6 +287,72 @@ mlxsw_sp_ipip_ol_loopback_config_gre4(struct mlxsw_sp *mlxsw_sp, }; } +static int +mlxsw_sp_ipip_ol_netdev_change_gre4(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_ipip_entry *ipip_entry, + struct netlink_ext_ack *extack) +{ + union mlxsw_sp_l3addr old_saddr, new_saddr; + union mlxsw_sp_l3addr old_daddr, new_daddr; + struct ip_tunnel_parm new_parms; + bool update_tunnel = false; + bool update_decap = false; + bool update_nhs = false; + int err = 0; + + new_parms = mlxsw_sp_ipip_netdev_parms(ipip_entry->ol_dev); + + new_saddr = mlxsw_sp_ipip_parms_saddr(MLXSW_SP_L3_PROTO_IPV4, + new_parms); + old_saddr = mlxsw_sp_ipip_parms_saddr(MLXSW_SP_L3_PROTO_IPV4, + ipip_entry->parms); + new_daddr = mlxsw_sp_ipip_parms_daddr(MLXSW_SP_L3_PROTO_IPV4, + new_parms); + old_daddr = mlxsw_sp_ipip_parms_daddr(MLXSW_SP_L3_PROTO_IPV4, + ipip_entry->parms); + + if (!mlxsw_sp_l3addr_eq(&new_saddr, &old_saddr)) { + u16 ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(ipip_entry->ol_dev); + + /* Since the local address has changed, if there is another + * tunnel with a matching saddr, both need to be demoted. + */ + if (mlxsw_sp_ipip_demote_tunnel_by_saddr(mlxsw_sp, + MLXSW_SP_L3_PROTO_IPV4, + new_saddr, ul_tb_id, + ipip_entry)) { + mlxsw_sp_ipip_entry_demote_tunnel(mlxsw_sp, ipip_entry); + return 0; + } + + update_tunnel = true; + } else if (mlxsw_sp_ipip_parms_okey(ipip_entry->parms) != + mlxsw_sp_ipip_parms_okey(new_parms)) { + update_tunnel = true; + } else if (!mlxsw_sp_l3addr_eq(&new_daddr, &old_daddr)) { + update_nhs = true; + } else if (mlxsw_sp_ipip_parms_ikey(ipip_entry->parms) != + mlxsw_sp_ipip_parms_ikey(new_parms)) { + update_decap = true; + } + + if (update_tunnel) + err = __mlxsw_sp_ipip_entry_update_tunnel(mlxsw_sp, ipip_entry, + true, true, true, + extack); + else if (update_nhs) + err = __mlxsw_sp_ipip_entry_update_tunnel(mlxsw_sp, ipip_entry, + false, false, true, + extack); + else if (update_decap) + err = __mlxsw_sp_ipip_entry_update_tunnel(mlxsw_sp, ipip_entry, + false, false, false, + extack); + + ipip_entry->parms = new_parms; + return err; +} + static const struct mlxsw_sp_ipip_ops mlxsw_sp_ipip_gre4_ops = { .dev_type = ARPHRD_IPGRE, .ul_proto = MLXSW_SP_L3_PROTO_IPV4, @@ -294,6 +360,7 @@ static const struct mlxsw_sp_ipip_ops mlxsw_sp_ipip_gre4_ops = { .fib_entry_op = mlxsw_sp_ipip_fib_entry_op_gre4, .can_offload = mlxsw_sp_ipip_can_offload_gre4, .ol_loopback_config = mlxsw_sp_ipip_ol_loopback_config_gre4, + .ol_netdev_change = mlxsw_sp_ipip_ol_netdev_change_gre4, }; const struct mlxsw_sp_ipip_ops *mlxsw_sp_ipip_ops_arr[] = { diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.h index 918d74b4e8d7..04b08d9d76e9 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.h @@ -56,6 +56,7 @@ struct mlxsw_sp_ipip_entry { struct mlxsw_sp_rif_ipip_lb *ol_lb; struct mlxsw_sp_fib_entry *decap_fib_entry; struct list_head ipip_list_node; + struct ip_tunnel_parm parms; }; struct mlxsw_sp_ipip_ops { @@ -78,6 +79,10 @@ struct mlxsw_sp_ipip_ops { struct mlxsw_sp_ipip_entry *ipip_entry, enum mlxsw_reg_ralue_op op, u32 tunnel_index); + + int (*ol_netdev_change)(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_ipip_entry *ipip_entry, + struct netlink_ext_ack *extack); }; extern const struct mlxsw_sp_ipip_ops *mlxsw_sp_ipip_ops_arr[]; diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c index aa7b820e8408..c1928561c412 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c @@ -943,7 +943,7 @@ __mlxsw_sp_ipip_netdev_ul_dev_get(const struct net_device *ol_dev) return __dev_get_by_index(net, tun->parms.link); } -static u32 mlxsw_sp_ipip_dev_ul_tb_id(const struct net_device *ol_dev) +u32 mlxsw_sp_ipip_dev_ul_tb_id(const struct net_device *ol_dev) { struct net_device *d = __mlxsw_sp_ipip_netdev_ul_dev_get(ol_dev); @@ -1002,6 +1002,7 @@ mlxsw_sp_ipip_entry_alloc(struct mlxsw_sp *mlxsw_sp, ipip_entry->ipipt = ipipt; ipip_entry->ol_dev = ol_dev; + ipip_entry->parms = mlxsw_sp_ipip_netdev_parms(ol_dev); return ipip_entry; @@ -1017,12 +1018,6 @@ mlxsw_sp_ipip_entry_dealloc(struct mlxsw_sp_ipip_entry *ipip_entry) kfree(ipip_entry); } -static bool mlxsw_sp_l3addr_eq(const union mlxsw_sp_l3addr *addr1, - const union mlxsw_sp_l3addr *addr2) -{ - return !memcmp(addr1, addr2, sizeof(*addr1)); -} - static bool mlxsw_sp_ipip_entry_saddr_matches(struct mlxsw_sp *mlxsw_sp, const enum mlxsw_sp_l3proto ul_proto, @@ -1471,6 +1466,35 @@ mlxsw_sp_netdevice_ipip_ul_vrf_event(struct mlxsw_sp *mlxsw_sp, true, true, false, extack); } +static int +mlxsw_sp_netdevice_ipip_ol_change_event(struct mlxsw_sp *mlxsw_sp, + struct net_device *ol_dev, + struct netlink_ext_ack *extack) +{ + const struct mlxsw_sp_ipip_ops *ipip_ops; + struct mlxsw_sp_ipip_entry *ipip_entry; + int err; + + ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev); + if (!ipip_entry) + /* A change might make a tunnel eligible for offloading, but + * that is currently not implemented. What falls to slow path + * stays there. + */ + return 0; + + /* A change might make a tunnel not eligible for offloading. */ + if (!mlxsw_sp_netdevice_ipip_can_offload(mlxsw_sp, ol_dev, + ipip_entry->ipipt)) { + mlxsw_sp_ipip_entry_demote_tunnel(mlxsw_sp, ipip_entry); + return 0; + } + + ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt]; + err = ipip_ops->ol_netdev_change(mlxsw_sp, ipip_entry, extack); + return err; +} + void mlxsw_sp_ipip_entry_demote_tunnel(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_ipip_entry *ipip_entry) { @@ -1552,6 +1576,10 @@ int mlxsw_sp_netdevice_ipip_ol_event(struct mlxsw_sp *mlxsw_sp, ol_dev, extack); return 0; + case NETDEV_CHANGE: + extack = info->extack; + return mlxsw_sp_netdevice_ipip_ol_change_event(mlxsw_sp, + ol_dev, extack); } return 0; } diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h index 5dd650bdcff6..1fb82246ce96 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h @@ -70,6 +70,7 @@ struct mlxsw_sp_rif *mlxsw_sp_rif_by_index(const struct mlxsw_sp *mlxsw_sp, u16 mlxsw_sp_rif_index(const struct mlxsw_sp_rif *rif); u16 mlxsw_sp_ipip_lb_rif_index(const struct mlxsw_sp_rif_ipip_lb *rif); u16 mlxsw_sp_ipip_lb_ul_vr_id(const struct mlxsw_sp_rif_ipip_lb *rif); +u32 mlxsw_sp_ipip_dev_ul_tb_id(const struct net_device *ol_dev); int mlxsw_sp_rif_dev_ifindex(const struct mlxsw_sp_rif *rif); u8 mlxsw_sp_router_port(const struct mlxsw_sp *mlxsw_sp); const struct net_device *mlxsw_sp_rif_dev(const struct mlxsw_sp_rif *rif); @@ -138,4 +139,10 @@ void mlxsw_sp_nexthop_counter_alloc(struct mlxsw_sp *mlxsw_sp, void mlxsw_sp_nexthop_counter_free(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_nexthop *nh); +static inline bool mlxsw_sp_l3addr_eq(const union mlxsw_sp_l3addr *addr1, + const union mlxsw_sp_l3addr *addr2) +{ + return !memcmp(addr1, addr2, sizeof(*addr1)); +} + #endif /* _MLXSW_ROUTER_H_*/ -- cgit v1.2.3 From 89c2b7dabaafee2220e516d314c9b7757fc8176e Mon Sep 17 00:00:00 2001 From: Petr Machata Date: Fri, 3 Nov 2017 10:03:43 +0100 Subject: mlxsw: spectrum_ipip: Handle underlay device change When a bound device of an IP-in-IP tunnel changes, such as through 'ip tunnel change name $name dev $dev', the loopback backing the tunnel needs to be recreated. Signed-off-by: Petr Machata Reviewed-by: Ido Schimmel Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c index 5f78fc5e7724..7502e53447bd 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c @@ -326,8 +326,9 @@ mlxsw_sp_ipip_ol_netdev_change_gre4(struct mlxsw_sp *mlxsw_sp, } update_tunnel = true; - } else if (mlxsw_sp_ipip_parms_okey(ipip_entry->parms) != - mlxsw_sp_ipip_parms_okey(new_parms)) { + } else if ((mlxsw_sp_ipip_parms_okey(ipip_entry->parms) != + mlxsw_sp_ipip_parms_okey(new_parms)) || + ipip_entry->parms.link != new_parms.link) { update_tunnel = true; } else if (!mlxsw_sp_l3addr_eq(&new_daddr, &old_daddr)) { update_nhs = true; -- cgit v1.2.3 From 44b0fff1d8a461a5cd66cfc3a15ff05959d77df5 Mon Sep 17 00:00:00 2001 From: Petr Machata Date: Fri, 3 Nov 2017 10:03:44 +0100 Subject: mlxsw: spectrum_router: Handle down of tunnel underlay When the bound device of a tunnel device is down, encapsulated packets are not egressed anymore, but tunnel decap still works. Extend mlxsw_sp_nexthop_rif_update() to take IFF_UP into consideration when deciding whether a given next hop should be offloaded. Because the new logic was added to mlxsw_sp_nexthop_rif_update(), this fixes the case where a newly-added tunnel has a down bound device, which would previously be fully offloaded. Now the down state of the bound device is noted and next hops forwarding to such tunnel are not offloaded. In addition to that, notice NETDEV_UP and NETDEV_DOWN of a bound device to force refresh of tunnel encap route offloads. Signed-off-by: Petr Machata Reviewed-by: Ido Schimmel Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- .../net/ethernet/mellanox/mlxsw/spectrum_router.c | 57 +++++++++++++++++++++- 1 file changed, 55 insertions(+), 2 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c index c1928561c412..e9187841d82a 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c @@ -1466,6 +1466,28 @@ mlxsw_sp_netdevice_ipip_ul_vrf_event(struct mlxsw_sp *mlxsw_sp, true, true, false, extack); } +static int +mlxsw_sp_netdevice_ipip_ul_up_event(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_ipip_entry *ipip_entry, + struct net_device *ul_dev) +{ + return __mlxsw_sp_ipip_entry_update_tunnel(mlxsw_sp, ipip_entry, + false, false, true, NULL); +} + +static int +mlxsw_sp_netdevice_ipip_ul_down_event(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_ipip_entry *ipip_entry, + struct net_device *ul_dev) +{ + /* A down underlay device causes encapsulated packets to not be + * forwarded, but decap still works. So refresh next hops without + * touching anything else. + */ + return __mlxsw_sp_ipip_entry_update_tunnel(mlxsw_sp, ipip_entry, + false, false, true, NULL); +} + static int mlxsw_sp_netdevice_ipip_ol_change_event(struct mlxsw_sp *mlxsw_sp, struct net_device *ol_dev, @@ -1604,6 +1626,14 @@ __mlxsw_sp_netdevice_ipip_ul_event(struct mlxsw_sp *mlxsw_sp, ul_dev, extack); break; + + case NETDEV_UP: + return mlxsw_sp_netdevice_ipip_ul_up_event(mlxsw_sp, ipip_entry, + ul_dev); + case NETDEV_DOWN: + return mlxsw_sp_netdevice_ipip_ul_down_event(mlxsw_sp, + ipip_entry, + ul_dev); } return 0; } @@ -3297,10 +3327,19 @@ static void mlxsw_sp_nexthop_neigh_fini(struct mlxsw_sp *mlxsw_sp, neigh_release(n); } +static bool mlxsw_sp_ipip_netdev_ul_up(struct net_device *ol_dev) +{ + struct net_device *ul_dev = __mlxsw_sp_ipip_netdev_ul_dev_get(ol_dev); + + return ul_dev ? (ul_dev->flags & IFF_UP) : true; +} + static int mlxsw_sp_nexthop_ipip_init(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_nexthop *nh, struct net_device *ol_dev) { + bool removing; + if (!nh->nh_grp->gateway || nh->ipip_entry) return 0; @@ -3308,7 +3347,8 @@ static int mlxsw_sp_nexthop_ipip_init(struct mlxsw_sp *mlxsw_sp, if (!nh->ipip_entry) return -ENOENT; - __mlxsw_sp_nexthop_neigh_update(nh, false); + removing = !mlxsw_sp_ipip_netdev_ul_up(ol_dev); + __mlxsw_sp_nexthop_neigh_update(nh, removing); return 0; } @@ -3476,9 +3516,22 @@ static void mlxsw_sp_nexthop_rif_update(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_rif *rif) { struct mlxsw_sp_nexthop *nh; + bool removing; list_for_each_entry(nh, &rif->nexthop_list, rif_list_node) { - __mlxsw_sp_nexthop_neigh_update(nh, false); + switch (nh->type) { + case MLXSW_SP_NEXTHOP_TYPE_ETH: + removing = false; + break; + case MLXSW_SP_NEXTHOP_TYPE_IPIP: + removing = !mlxsw_sp_ipip_netdev_ul_up(rif->dev); + break; + default: + WARN_ON(1); + continue; + } + + __mlxsw_sp_nexthop_neigh_update(nh, removing); mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nh_grp); } } -- cgit v1.2.3 From bf5345882bd18bc1b4966d170c0491ebe5c9a7d6 Mon Sep 17 00:00:00 2001 From: Vijaya Mohan Guvva Date: Fri, 3 Nov 2017 12:17:44 -0700 Subject: liquidio: Fix an issue with multiple switchdev enable disables Return success if the same dispatch function is being registered for a given opcode and subcode, there by allow multiple switchdev enable and disables. Signed-off-by: Vijaya Mohan Guvva Signed-off-by: Satanand Burla Signed-off-by: Felix Manlunas Signed-off-by: David S. Miller --- drivers/net/ethernet/cavium/liquidio/octeon_device.c | 4 ++++ drivers/net/ethernet/cavium/liquidio/octeon_droq.c | 4 ++-- drivers/net/ethernet/cavium/liquidio/octeon_droq.h | 3 +++ 3 files changed, 9 insertions(+), 2 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_device.c b/drivers/net/ethernet/cavium/liquidio/octeon_device.c index e4aa3395a578..2c615ab09e64 100644 --- a/drivers/net/ethernet/cavium/liquidio/octeon_device.c +++ b/drivers/net/ethernet/cavium/liquidio/octeon_device.c @@ -1180,6 +1180,10 @@ octeon_register_dispatch_fn(struct octeon_device *oct, spin_unlock_bh(&oct->dispatch.lock); } else { + if (pfn == fn && + octeon_get_dispatch_arg(oct, opcode, subcode) == fn_arg) + return 0; + dev_err(&oct->pci_dev->dev, "Found previously registered dispatch fn for opcode/subcode: %x/%x\n", opcode, subcode); diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_droq.c b/drivers/net/ethernet/cavium/liquidio/octeon_droq.c index 9372d4ce9954..3461d65ff4eb 100644 --- a/drivers/net/ethernet/cavium/liquidio/octeon_droq.c +++ b/drivers/net/ethernet/cavium/liquidio/octeon_droq.c @@ -52,8 +52,8 @@ struct __dispatch { * @return Failure: NULL * */ -static inline void *octeon_get_dispatch_arg(struct octeon_device *octeon_dev, - u16 opcode, u16 subcode) +void *octeon_get_dispatch_arg(struct octeon_device *octeon_dev, + u16 opcode, u16 subcode) { int idx; struct list_head *dispatch; diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_droq.h b/drivers/net/ethernet/cavium/liquidio/octeon_droq.h index f91bc84d1719..815a9f56fd59 100644 --- a/drivers/net/ethernet/cavium/liquidio/octeon_droq.h +++ b/drivers/net/ethernet/cavium/liquidio/octeon_droq.h @@ -400,6 +400,9 @@ int octeon_register_dispatch_fn(struct octeon_device *oct, u16 subcode, octeon_dispatch_fn_t fn, void *fn_arg); +void *octeon_get_dispatch_arg(struct octeon_device *oct, + u16 opcode, u16 subcode); + void octeon_droq_print_stats(void); u32 octeon_droq_check_hw_for_pkts(struct octeon_droq *droq); -- cgit v1.2.3 From c02762eb20cb57ec5b7c037b056c37d5838c803f Mon Sep 17 00:00:00 2001 From: Huy Nguyen Date: Tue, 18 Jul 2017 16:03:17 -0500 Subject: net/mlx5: QCAM register firmware command support The QCAM register provides capability bit for all the QoS registers using ACCESS_REG command. Signed-off-by: Huy Nguyen Reviewed-by: Parav Pandit Reviewed-by: Or Gerlitz Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/fw.c | 10 ++++++ .../net/ethernet/mellanox/mlx5/core/mlx5_core.h | 2 ++ drivers/net/ethernet/mellanox/mlx5/core/port.c | 12 +++++++ include/linux/mlx5/device.h | 14 ++++++++ include/linux/mlx5/driver.h | 2 ++ include/linux/mlx5/mlx5_ifc.h | 40 +++++++++++++++++++++- 6 files changed, 79 insertions(+), 1 deletion(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw.c b/drivers/net/ethernet/mellanox/mlx5/core/fw.c index 2c71557d1cee..5ef1b56b6a96 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fw.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fw.c @@ -106,6 +106,13 @@ static int mlx5_get_mcam_reg(struct mlx5_core_dev *dev) MLX5_MCAM_REGS_FIRST_128); } +static int mlx5_get_qcam_reg(struct mlx5_core_dev *dev) +{ + return mlx5_query_qcam_reg(dev, dev->caps.qcam, + MLX5_QCAM_FEATURE_ENHANCED_FEATURES, + MLX5_QCAM_REGS_FIRST_128); +} + int mlx5_query_hca_caps(struct mlx5_core_dev *dev) { int err; @@ -182,6 +189,9 @@ int mlx5_query_hca_caps(struct mlx5_core_dev *dev) if (MLX5_CAP_GEN(dev, mcam_reg)) mlx5_get_mcam_reg(dev); + if (MLX5_CAP_GEN(dev, qcam_reg)) + mlx5_get_qcam_reg(dev); + return 0; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h index 8f00de2fe283..ff4a0b889a6f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h @@ -122,6 +122,8 @@ int mlx5_query_pcam_reg(struct mlx5_core_dev *dev, u32 *pcam, u8 feature_group, u8 access_reg_group); int mlx5_query_mcam_reg(struct mlx5_core_dev *dev, u32 *mcap, u8 feature_group, u8 access_reg_group); +int mlx5_query_qcam_reg(struct mlx5_core_dev *mdev, u32 *qcam, + u8 feature_group, u8 access_reg_group); void mlx5_lag_add(struct mlx5_core_dev *dev, struct net_device *netdev); void mlx5_lag_remove(struct mlx5_core_dev *dev); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/port.c b/drivers/net/ethernet/mellanox/mlx5/core/port.c index e07061f565d6..b6553be841f9 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/port.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/port.c @@ -98,6 +98,18 @@ int mlx5_query_mcam_reg(struct mlx5_core_dev *dev, u32 *mcam, u8 feature_group, return mlx5_core_access_reg(dev, in, sz, mcam, sz, MLX5_REG_MCAM, 0, 0); } +int mlx5_query_qcam_reg(struct mlx5_core_dev *mdev, u32 *qcam, + u8 feature_group, u8 access_reg_group) +{ + u32 in[MLX5_ST_SZ_DW(qcam_reg)] = {}; + int sz = MLX5_ST_SZ_BYTES(qcam_reg); + + MLX5_SET(qcam_reg, in, feature_group, feature_group); + MLX5_SET(qcam_reg, in, access_reg_group, access_reg_group); + + return mlx5_core_access_reg(mdev, in, sz, qcam, sz, MLX5_REG_QCAM, 0, 0); +} + struct mlx5_reg_pcap { u8 rsvd0; u8 port_num; diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h index e32dbc4934db..6d79b3f79458 100644 --- a/include/linux/mlx5/device.h +++ b/include/linux/mlx5/device.h @@ -1000,6 +1000,14 @@ enum mlx5_mcam_feature_groups { MLX5_MCAM_FEATURE_ENHANCED_FEATURES = 0x0, }; +enum mlx5_qcam_reg_groups { + MLX5_QCAM_REGS_FIRST_128 = 0x0, +}; + +enum mlx5_qcam_feature_groups { + MLX5_QCAM_FEATURE_ENHANCED_FEATURES = 0x0, +}; + /* GET Dev Caps macros */ #define MLX5_CAP_GEN(mdev, cap) \ MLX5_GET(cmd_hca_cap, mdev->caps.hca_cur[MLX5_CAP_GENERAL], cap) @@ -1108,6 +1116,12 @@ enum mlx5_mcam_feature_groups { #define MLX5_CAP_MCAM_FEATURE(mdev, fld) \ MLX5_GET(mcam_reg, (mdev)->caps.mcam, mng_feature_cap_mask.enhanced_features.fld) +#define MLX5_CAP_QCAM_REG(mdev, fld) \ + MLX5_GET(qcam_reg, (mdev)->caps.qcam, qos_access_reg_cap_mask.reg_cap.fld) + +#define MLX5_CAP_QCAM_FEATURE(mdev, fld) \ + MLX5_GET(qcam_reg, (mdev)->caps.qcam, qos_feature_cap_mask.feature_cap.fld) + #define MLX5_CAP_FPGA(mdev, cap) \ MLX5_GET(fpga_cap, (mdev)->caps.fpga, cap) diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h index 08c77b7e59cb..ed5be52282ea 100644 --- a/include/linux/mlx5/driver.h +++ b/include/linux/mlx5/driver.h @@ -109,6 +109,7 @@ enum { enum { MLX5_REG_QETCR = 0x4005, MLX5_REG_QTCT = 0x400a, + MLX5_REG_QCAM = 0x4019, MLX5_REG_DCBX_PARAM = 0x4020, MLX5_REG_DCBX_APP = 0x4021, MLX5_REG_FPGA_CAP = 0x4022, @@ -798,6 +799,7 @@ struct mlx5_core_dev { u32 pcam[MLX5_ST_SZ_DW(pcam_reg)]; u32 mcam[MLX5_ST_SZ_DW(mcam_reg)]; u32 fpga[MLX5_ST_SZ_DW(fpga_cap)]; + u32 qcam[MLX5_ST_SZ_DW(qcam_reg)]; } caps; phys_addr_t iseg_base; struct mlx5_init_seg __iomem *iseg; diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h index 69772347f866..f127c5b310c5 100644 --- a/include/linux/mlx5/mlx5_ifc.h +++ b/include/linux/mlx5/mlx5_ifc.h @@ -838,7 +838,8 @@ struct mlx5_ifc_cmd_hca_cap_bits { u8 cc_modify_allowed[0x1]; u8 start_pad[0x1]; u8 cache_line_128byte[0x1]; - u8 reserved_at_165[0xb]; + u8 reserved_at_165[0xa]; + u8 qcam_reg[0x1]; u8 gid_table_size[0x10]; u8 out_of_seq_cnt[0x1]; @@ -7890,6 +7891,43 @@ struct mlx5_ifc_mcam_reg_bits { u8 reserved_at_1c0[0x80]; }; +struct mlx5_ifc_qcam_access_reg_cap_mask { + u8 qcam_access_reg_cap_mask_127_to_20[0x6C]; + u8 qpdpm[0x1]; + u8 qcam_access_reg_cap_mask_18_to_4[0x0F]; + u8 qdpm[0x1]; + u8 qpts[0x1]; + u8 qcap[0x1]; + u8 qcam_access_reg_cap_mask_0[0x1]; +}; + +struct mlx5_ifc_qcam_qos_feature_cap_mask { + u8 qcam_qos_feature_cap_mask_127_to_1[0x7F]; + u8 qpts_trust_both[0x1]; +}; + +struct mlx5_ifc_qcam_reg_bits { + u8 reserved_at_0[0x8]; + u8 feature_group[0x8]; + u8 reserved_at_10[0x8]; + u8 access_reg_group[0x8]; + u8 reserved_at_20[0x20]; + + union { + struct mlx5_ifc_qcam_access_reg_cap_mask reg_cap; + u8 reserved_at_0[0x80]; + } qos_access_reg_cap_mask; + + u8 reserved_at_c0[0x80]; + + union { + struct mlx5_ifc_qcam_qos_feature_cap_mask feature_cap; + u8 reserved_at_0[0x80]; + } qos_feature_cap_mask; + + u8 reserved_at_1c0[0x80]; +}; + struct mlx5_ifc_pcap_reg_bits { u8 reserved_at_0[0x8]; u8 local_port[0x8]; -- cgit v1.2.3 From 415a64aa8dc6b4fc478609c549ca652d95a12f13 Mon Sep 17 00:00:00 2001 From: Huy Nguyen Date: Tue, 18 Jul 2017 16:08:46 -0500 Subject: net/mlx5: QPTS and QPDPM register firmware command support The QPTS register allows changing the priority trust state between pcp and dscp. Add support to get/set trust state from device. When the port is in pcp/dscp trust state, packet is routed by hardware to matching priority based on its pcp/dscp value respectively. The QPDPM register allow channing the dscp to priority mapping. Add support to get/set dscp to priority mapping from device. Note that to change a dscp mapping, the "e" bit of this dscp structure must be set in the QPDPM firmware command. Signed-off-by: Huy Nguyen Reviewed-by: Parav Pandit Reviewed-by: Or Gerlitz Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/port.c | 99 ++++++++++++++++++++++++++ include/linux/mlx5/driver.h | 7 ++ include/linux/mlx5/mlx5_ifc.h | 20 ++++++ include/linux/mlx5/port.h | 5 ++ 4 files changed, 131 insertions(+) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/port.c b/drivers/net/ethernet/mellanox/mlx5/core/port.c index b6553be841f9..c37d00cd472a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/port.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/port.c @@ -971,3 +971,102 @@ int mlx5_set_mtppse(struct mlx5_core_dev *mdev, u8 pin, u8 arm, u8 mode) return mlx5_core_access_reg(mdev, in, sizeof(in), out, sizeof(out), MLX5_REG_MTPPSE, 0, 1); } + +int mlx5_set_trust_state(struct mlx5_core_dev *mdev, u8 trust_state) +{ + u32 out[MLX5_ST_SZ_DW(qpts_reg)] = {}; + u32 in[MLX5_ST_SZ_DW(qpts_reg)] = {}; + int err; + + MLX5_SET(qpts_reg, in, local_port, 1); + MLX5_SET(qpts_reg, in, trust_state, trust_state); + + err = mlx5_core_access_reg(mdev, in, sizeof(in), out, + sizeof(out), MLX5_REG_QPTS, 0, 1); + return err; +} + +int mlx5_query_trust_state(struct mlx5_core_dev *mdev, u8 *trust_state) +{ + u32 out[MLX5_ST_SZ_DW(qpts_reg)] = {}; + u32 in[MLX5_ST_SZ_DW(qpts_reg)] = {}; + int err; + + MLX5_SET(qpts_reg, in, local_port, 1); + + err = mlx5_core_access_reg(mdev, in, sizeof(in), out, + sizeof(out), MLX5_REG_QPTS, 0, 0); + if (!err) + *trust_state = MLX5_GET(qpts_reg, out, trust_state); + + return err; +} + +int mlx5_set_dscp2prio(struct mlx5_core_dev *mdev, u8 dscp, u8 prio) +{ + int sz = MLX5_ST_SZ_BYTES(qpdpm_reg); + void *qpdpm_dscp; + void *out; + void *in; + int err; + + in = kzalloc(sz, GFP_KERNEL); + out = kzalloc(sz, GFP_KERNEL); + if (!in || !out) { + err = -ENOMEM; + goto out; + } + + MLX5_SET(qpdpm_reg, in, local_port, 1); + err = mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_QPDPM, 0, 0); + if (err) + goto out; + + memcpy(in, out, sz); + MLX5_SET(qpdpm_reg, in, local_port, 1); + + /* Update the corresponding dscp entry */ + qpdpm_dscp = MLX5_ADDR_OF(qpdpm_reg, in, dscp[dscp]); + MLX5_SET16(qpdpm_dscp_reg, qpdpm_dscp, prio, prio); + MLX5_SET16(qpdpm_dscp_reg, qpdpm_dscp, e, 1); + err = mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_QPDPM, 0, 1); + +out: + kfree(in); + kfree(out); + return err; +} + +/* dscp2prio[i]: priority that dscp i mapped to */ +#define MLX5E_SUPPORTED_DSCP 64 +int mlx5_query_dscp2prio(struct mlx5_core_dev *mdev, u8 *dscp2prio) +{ + int sz = MLX5_ST_SZ_BYTES(qpdpm_reg); + void *qpdpm_dscp; + void *out; + void *in; + int err; + int i; + + in = kzalloc(sz, GFP_KERNEL); + out = kzalloc(sz, GFP_KERNEL); + if (!in || !out) { + err = -ENOMEM; + goto out; + } + + MLX5_SET(qpdpm_reg, in, local_port, 1); + err = mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_QPDPM, 0, 0); + if (err) + goto out; + + for (i = 0; i < (MLX5E_SUPPORTED_DSCP); i++) { + qpdpm_dscp = MLX5_ADDR_OF(qpdpm_reg, out, dscp[i]); + dscp2prio[i] = MLX5_GET16(qpdpm_dscp_reg, qpdpm_dscp, prio); + } + +out: + kfree(in); + kfree(out); + return err; +} diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h index ed5be52282ea..a886b51511ab 100644 --- a/include/linux/mlx5/driver.h +++ b/include/linux/mlx5/driver.h @@ -107,8 +107,10 @@ enum { }; enum { + MLX5_REG_QPTS = 0x4002, MLX5_REG_QETCR = 0x4005, MLX5_REG_QTCT = 0x400a, + MLX5_REG_QPDPM = 0x4013, MLX5_REG_QCAM = 0x4019, MLX5_REG_DCBX_PARAM = 0x4020, MLX5_REG_DCBX_APP = 0x4021, @@ -142,6 +144,11 @@ enum { MLX5_REG_MCAM = 0x907f, }; +enum mlx5_qpts_trust_state { + MLX5_QPTS_TRUST_PCP = 1, + MLX5_QPTS_TRUST_DSCP = 2, +}; + enum mlx5_dcbx_oper_mode { MLX5E_DCBX_PARAM_VER_OPER_HOST = 0x0, MLX5E_DCBX_PARAM_VER_OPER_AUTO = 0x3, diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h index f127c5b310c5..3e5363f760dd 100644 --- a/include/linux/mlx5/mlx5_ifc.h +++ b/include/linux/mlx5/mlx5_ifc.h @@ -8578,6 +8578,26 @@ struct mlx5_ifc_qetc_reg_bits { struct mlx5_ifc_ets_global_config_reg_bits global_configuration; }; +struct mlx5_ifc_qpdpm_dscp_reg_bits { + u8 e[0x1]; + u8 reserved_at_01[0x0b]; + u8 prio[0x04]; +}; + +struct mlx5_ifc_qpdpm_reg_bits { + u8 reserved_at_0[0x8]; + u8 local_port[0x8]; + u8 reserved_at_10[0x10]; + struct mlx5_ifc_qpdpm_dscp_reg_bits dscp[64]; +}; + +struct mlx5_ifc_qpts_reg_bits { + u8 reserved_at_0[0x8]; + u8 local_port[0x8]; + u8 reserved_at_10[0x2d]; + u8 trust_state[0x3]; +}; + struct mlx5_ifc_qtct_reg_bits { u8 reserved_at_0[0x8]; u8 port_number[0x8]; diff --git a/include/linux/mlx5/port.h b/include/linux/mlx5/port.h index c59af8ab753a..035f0d4dc9fe 100644 --- a/include/linux/mlx5/port.h +++ b/include/linux/mlx5/port.h @@ -179,4 +179,9 @@ int mlx5_query_module_eeprom(struct mlx5_core_dev *dev, int mlx5_query_port_dcbx_param(struct mlx5_core_dev *mdev, u32 *out); int mlx5_set_port_dcbx_param(struct mlx5_core_dev *mdev, u32 *in); + +int mlx5_set_trust_state(struct mlx5_core_dev *mdev, u8 trust_state); +int mlx5_query_trust_state(struct mlx5_core_dev *mdev, u8 *trust_state); +int mlx5_set_dscp2prio(struct mlx5_core_dev *mdev, u8 dscp, u8 prio); +int mlx5_query_dscp2prio(struct mlx5_core_dev *mdev, u8 *dscp2prio); #endif /* __MLX5_PORT_H__ */ -- cgit v1.2.3 From 2a5e7a1344f4dff71bb921ee0c9ecf7f5932e570 Mon Sep 17 00:00:00 2001 From: Huy Nguyen Date: Tue, 18 Jul 2017 16:23:36 -0500 Subject: net/mlx5e: Add dcbnl dscp to priority support This patch implements dcbnl hooks to set and delete DSCP to priority map as defined by the DCB subsystem. Device maintains internal trust state which needs to be set to DSCP state for performing DSCP to priority mapping. When the first dscp to priority APP entry is added by the user, the trust state is changed to dscp. When the last dscp to priority APP entry is deleted by the user, the trust state is changed to pcp. If user sends multiple dscp to priority APP entries on the same dscp, the last sent one will take effect. All the previous sent will be deleted. The dscp to priority APP entries are added and deleted in the net/dcb APP database using dcb_ieee_setapp/getapp. Signed-off-by: Huy Nguyen Reviewed-by: Parav Pandit Reviewed-by: Or Gerlitz Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/en.h | 15 +- drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c | 204 +++++++++++++++++++++ drivers/net/ethernet/mellanox/mlx5/core/en_main.c | 15 +- 3 files changed, 232 insertions(+), 2 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index e613ce02216d..ab6f0c18850f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -57,6 +57,7 @@ #define MLX5E_HW2SW_MTU(priv, hwmtu) ((hwmtu) - ((priv)->hard_mtu)) #define MLX5E_SW2HW_MTU(priv, swmtu) ((swmtu) + ((priv)->hard_mtu)) +#define MLX5E_MAX_DSCP 64 #define MLX5E_MAX_NUM_TC 8 #define MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE 0x6 @@ -260,11 +261,17 @@ enum { struct mlx5e_dcbx { enum mlx5_dcbx_oper_mode mode; struct mlx5e_cee_config cee_cfg; /* pending configuration */ + u8 dscp_app_cnt; /* The only setting that cannot be read from FW */ u8 tc_tsa[IEEE_8021QAZ_MAX_TCS]; u8 cap; }; + +struct mlx5e_dcbx_dp { + u8 dscp2prio[MLX5E_MAX_DSCP]; + u8 trust_state; +}; #endif enum { @@ -742,6 +749,9 @@ struct mlx5e_priv { /* priv data path fields - start */ struct mlx5e_txqsq *txq2sq[MLX5E_MAX_NUM_CHANNELS * MLX5E_MAX_NUM_TC]; int channel_tc2txq[MLX5E_MAX_NUM_CHANNELS][MLX5E_MAX_NUM_TC]; +#ifdef CONFIG_MLX5_CORE_EN_DCB + struct mlx5e_dcbx_dp dcbx_dp; +#endif /* priv data path fields - end */ unsigned long state; @@ -800,6 +810,8 @@ struct mlx5e_profile { mlx5e_fp_handle_rx_cqe handle_rx_cqe; mlx5e_fp_handle_rx_cqe handle_rx_cqe_mpwqe; } rx_handlers; + void (*netdev_registered_init)(struct mlx5e_priv *priv); + void (*netdev_registered_remove)(struct mlx5e_priv *priv); int max_tc; }; @@ -968,6 +980,8 @@ extern const struct ethtool_ops mlx5e_ethtool_ops; extern const struct dcbnl_rtnl_ops mlx5e_dcbnl_ops; int mlx5e_dcbnl_ieee_setets_core(struct mlx5e_priv *priv, struct ieee_ets *ets); void mlx5e_dcbnl_initialize(struct mlx5e_priv *priv); +void mlx5e_dcbnl_init_app(struct mlx5e_priv *priv); +void mlx5e_dcbnl_delete_app(struct mlx5e_priv *priv); #endif #ifndef CONFIG_RFS_ACCEL @@ -1069,5 +1083,4 @@ void mlx5e_destroy_netdev(struct mlx5e_priv *priv); void mlx5e_build_nic_params(struct mlx5_core_dev *mdev, struct mlx5e_params *params, u16 max_channels); - #endif /* __MLX5_EN_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c index 51c4cc00a186..aa59c4324159 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c @@ -46,6 +46,13 @@ enum { MLX5E_LOWEST_PRIO_GROUP = 0, }; +#define MLX5_DSCP_SUPPORTED(mdev) (MLX5_CAP_GEN(mdev, qcam_reg) && \ + MLX5_CAP_QCAM_REG(mdev, qpts) && \ + MLX5_CAP_QCAM_REG(mdev, qpdpm)) + +static int mlx5e_set_trust_state(struct mlx5e_priv *priv, u8 trust_state); +static int mlx5e_set_dscp2prio(struct mlx5e_priv *priv, u8 dscp, u8 prio); + /* If dcbx mode is non-host set the dcbx mode to host. */ static int mlx5e_dcbnl_set_dcbx_mode(struct mlx5e_priv *priv, @@ -381,6 +388,113 @@ static u8 mlx5e_dcbnl_setdcbx(struct net_device *dev, u8 mode) return 0; } +static int mlx5e_dcbnl_ieee_setapp(struct net_device *dev, struct dcb_app *app) +{ + struct mlx5e_priv *priv = netdev_priv(dev); + struct dcb_app temp; + bool is_new; + int err; + + if (app->selector != IEEE_8021QAZ_APP_SEL_DSCP) + return -EINVAL; + + if (!MLX5_CAP_GEN(priv->mdev, vport_group_manager)) + return -EINVAL; + + if (!MLX5_DSCP_SUPPORTED(priv->mdev)) + return -EINVAL; + + if (app->protocol >= MLX5E_MAX_DSCP) + return -EINVAL; + + /* Save the old entry info */ + temp.selector = IEEE_8021QAZ_APP_SEL_DSCP; + temp.protocol = app->protocol; + temp.priority = priv->dcbx_dp.dscp2prio[app->protocol]; + + /* Check if need to switch to dscp trust state */ + if (!priv->dcbx.dscp_app_cnt) { + err = mlx5e_set_trust_state(priv, MLX5_QPTS_TRUST_DSCP); + if (err) + return err; + } + + /* Skip the fw command if new and old mapping are the same */ + if (app->priority != priv->dcbx_dp.dscp2prio[app->protocol]) { + err = mlx5e_set_dscp2prio(priv, app->protocol, app->priority); + if (err) + goto fw_err; + } + + /* Delete the old entry if exists */ + is_new = false; + err = dcb_ieee_delapp(dev, &temp); + if (err) + is_new = true; + + /* Add new entry and update counter */ + err = dcb_ieee_setapp(dev, app); + if (err) + return err; + + if (is_new) + priv->dcbx.dscp_app_cnt++; + + return err; + +fw_err: + mlx5e_set_trust_state(priv, MLX5_QPTS_TRUST_PCP); + return err; +} + +static int mlx5e_dcbnl_ieee_delapp(struct net_device *dev, struct dcb_app *app) +{ + struct mlx5e_priv *priv = netdev_priv(dev); + int err; + + if (app->selector != IEEE_8021QAZ_APP_SEL_DSCP) + return -EINVAL; + + if (!MLX5_CAP_GEN(priv->mdev, vport_group_manager)) + return -EINVAL; + + if (!MLX5_DSCP_SUPPORTED(priv->mdev)) + return -EINVAL; + + if (app->protocol >= MLX5E_MAX_DSCP) + return -EINVAL; + + /* Skip if no dscp app entry */ + if (!priv->dcbx.dscp_app_cnt) + return -ENOENT; + + /* Check if the entry matches fw setting */ + if (app->priority != priv->dcbx_dp.dscp2prio[app->protocol]) + return -ENOENT; + + /* Delete the app entry */ + err = dcb_ieee_delapp(dev, app); + if (err) + return err; + + /* Reset the priority mapping back to zero */ + err = mlx5e_set_dscp2prio(priv, app->protocol, 0); + if (err) + goto fw_err; + + priv->dcbx.dscp_app_cnt--; + + /* Check if need to switch to pcp trust state */ + if (!priv->dcbx.dscp_app_cnt) + err = mlx5e_set_trust_state(priv, MLX5_QPTS_TRUST_PCP); + + return err; + +fw_err: + mlx5e_set_trust_state(priv, MLX5_QPTS_TRUST_PCP); + return err; +} + static int mlx5e_dcbnl_ieee_getmaxrate(struct net_device *netdev, struct ieee_maxrate *maxrate) { @@ -740,6 +854,8 @@ const struct dcbnl_rtnl_ops mlx5e_dcbnl_ops = { .ieee_setmaxrate = mlx5e_dcbnl_ieee_setmaxrate, .ieee_getpfc = mlx5e_dcbnl_ieee_getpfc, .ieee_setpfc = mlx5e_dcbnl_ieee_setpfc, + .ieee_setapp = mlx5e_dcbnl_ieee_setapp, + .ieee_delapp = mlx5e_dcbnl_ieee_delapp, .getdcbx = mlx5e_dcbnl_getdcbx, .setdcbx = mlx5e_dcbnl_setdcbx, @@ -801,10 +917,98 @@ static void mlx5e_ets_init(struct mlx5e_priv *priv) mlx5e_dcbnl_ieee_setets_core(priv, &ets); } +enum { + INIT, + DELETE, +}; + +static void mlx5e_dcbnl_dscp_app(struct mlx5e_priv *priv, int action) +{ + struct dcb_app temp; + int i; + + if (!MLX5_CAP_GEN(priv->mdev, vport_group_manager)) + return; + + if (!MLX5_DSCP_SUPPORTED(priv->mdev)) + return; + + /* No SEL_DSCP entry in non DSCP state */ + if (priv->dcbx_dp.trust_state != MLX5_QPTS_TRUST_DSCP) + return; + + temp.selector = IEEE_8021QAZ_APP_SEL_DSCP; + for (i = 0; i < MLX5E_MAX_DSCP; i++) { + temp.protocol = i; + temp.priority = priv->dcbx_dp.dscp2prio[i]; + if (action == INIT) + dcb_ieee_setapp(priv->netdev, &temp); + else + dcb_ieee_delapp(priv->netdev, &temp); + } + + priv->dcbx.dscp_app_cnt = (action == INIT) ? MLX5E_MAX_DSCP : 0; +} + +void mlx5e_dcbnl_init_app(struct mlx5e_priv *priv) +{ + mlx5e_dcbnl_dscp_app(priv, INIT); +} + +void mlx5e_dcbnl_delete_app(struct mlx5e_priv *priv) +{ + mlx5e_dcbnl_dscp_app(priv, DELETE); +} + +static int mlx5e_set_trust_state(struct mlx5e_priv *priv, u8 trust_state) +{ + int err; + + err = mlx5_set_trust_state(priv->mdev, trust_state); + if (err) + return err; + priv->dcbx_dp.trust_state = trust_state; + + return err; +} + +static int mlx5e_set_dscp2prio(struct mlx5e_priv *priv, u8 dscp, u8 prio) +{ + int err; + + err = mlx5_set_dscp2prio(priv->mdev, dscp, prio); + if (err) + return err; + + priv->dcbx_dp.dscp2prio[dscp] = prio; + return err; +} + +static int mlx5e_trust_initialize(struct mlx5e_priv *priv) +{ + struct mlx5_core_dev *mdev = priv->mdev; + int err; + + if (!MLX5_DSCP_SUPPORTED(mdev)) + return 0; + + err = mlx5_query_trust_state(priv->mdev, &priv->dcbx_dp.trust_state); + if (err) + return err; + + err = mlx5_query_dscp2prio(priv->mdev, priv->dcbx_dp.dscp2prio); + if (err) + return err; + + return 0; +} + void mlx5e_dcbnl_initialize(struct mlx5e_priv *priv) { struct mlx5e_dcbx *dcbx = &priv->dcbx; + mlx5e_trust_initialize(priv); + if (!MLX5_CAP_GEN(priv->mdev, qos)) return; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 28ae00b3eb88..8633476fb536 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -4374,7 +4374,9 @@ static void mlx5e_nic_enable(struct mlx5e_priv *priv) if (netdev->reg_state != NETREG_REGISTERED) return; - +#ifdef CONFIG_MLX5_CORE_EN_DCB + mlx5e_dcbnl_init_app(priv); +#endif /* Device already registered: sync netdev system state */ if (mlx5e_vxlan_allowed(mdev)) { rtnl_lock(); @@ -4395,6 +4397,11 @@ static void mlx5e_nic_disable(struct mlx5e_priv *priv) { struct mlx5_core_dev *mdev = priv->mdev; +#ifdef CONFIG_MLX5_CORE_EN_DCB + if (priv->netdev->reg_state == NETREG_REGISTERED) + mlx5e_dcbnl_delete_app(priv); +#endif + rtnl_lock(); if (netif_running(priv->netdev)) mlx5e_close(priv->netdev); @@ -4615,6 +4622,9 @@ static void *mlx5e_add(struct mlx5_core_dev *mdev) goto err_detach; } +#ifdef CONFIG_MLX5_CORE_EN_DCB + mlx5e_dcbnl_init_app(priv); +#endif return priv; err_detach: @@ -4631,6 +4641,9 @@ static void mlx5e_remove(struct mlx5_core_dev *mdev, void *vpriv) struct mlx5e_priv *priv = vpriv; void *ppriv = priv->ppriv; +#ifdef CONFIG_MLX5_CORE_EN_DCB + mlx5e_dcbnl_delete_app(priv); +#endif unregister_netdev(priv->netdev); mlx5e_detach(mdev, vpriv); mlx5e_destroy_netdev(priv); -- cgit v1.2.3 From fbcb127e89ba8a4ccbec609a27f8d110474044c8 Mon Sep 17 00:00:00 2001 From: Huy Nguyen Date: Tue, 18 Jul 2017 16:34:51 -0500 Subject: net/mlx5e: Support DSCP trust state to Ethernet's IP packet on SQ If the port is in DSCP trust state, packets are placed in the right priority queue based on the dscp value. This is done by selecting the transmit queue based on the dscp of the skb. Until now select_queue honors priority only from the vlan header. However that is not sufficient in cases where port trust state is DSCP mode as packet might not even contain vlan header. Therefore if the port is in dscp trust state and vport's min inline mode is not NONE, copy the IP header to the eseg's inline header if the skb has it. This is done by changing the transmit queue sq's min inline mode to L3. Note that the min inline mode of sqs that belong to other features such as xdpsq, icosq are not modified. Signed-off-by: Huy Nguyen Reviewed-by: Parav Pandit Reviewed-by: Or Gerlitz Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/en.h | 1 + .../net/ethernet/mellanox/mlx5/core/en_common.c | 12 +++++++ drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c | 37 ++++++++++++++++++++++ drivers/net/ethernet/mellanox/mlx5/core/en_main.c | 5 +-- drivers/net/ethernet/mellanox/mlx5/core/en_tx.c | 24 ++++++++++++-- 5 files changed, 73 insertions(+), 6 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index ab6f0c18850f..fae7b62d173f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -1083,4 +1083,5 @@ void mlx5e_destroy_netdev(struct mlx5e_priv *priv); void mlx5e_build_nic_params(struct mlx5_core_dev *mdev, struct mlx5e_params *params, u16 max_channels); +u8 mlx5e_params_calculate_tx_min_inline(struct mlx5_core_dev *mdev); #endif /* __MLX5_EN_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_common.c b/drivers/net/ethernet/mellanox/mlx5/core/en_common.c index 157d02917237..784e282803db 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_common.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_common.c @@ -171,3 +171,15 @@ out: return err; } + +u8 mlx5e_params_calculate_tx_min_inline(struct mlx5_core_dev *mdev) +{ + u8 min_inline_mode; + + mlx5_query_min_inline(mdev, &min_inline_mode); + if (min_inline_mode == MLX5_INLINE_MODE_NONE && + !MLX5_CAP_ETH(mdev, wqe_vlan_insert)) + min_inline_mode = MLX5_INLINE_MODE_L2; + + return min_inline_mode; +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c index aa59c4324159..b402d69a701b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c @@ -960,6 +960,40 @@ void mlx5e_dcbnl_delete_app(struct mlx5e_priv *priv) mlx5e_dcbnl_dscp_app(priv, DELETE); } +static void mlx5e_trust_update_tx_min_inline_mode(struct mlx5e_priv *priv, + struct mlx5e_params *params) +{ + params->tx_min_inline_mode = mlx5e_params_calculate_tx_min_inline(priv->mdev); + if (priv->dcbx_dp.trust_state == MLX5_QPTS_TRUST_DSCP && + params->tx_min_inline_mode == MLX5_INLINE_MODE_L2) + params->tx_min_inline_mode = MLX5_INLINE_MODE_IP; +} + +static void mlx5e_trust_update_sq_inline_mode(struct mlx5e_priv *priv) +{ + struct mlx5e_channels new_channels = {}; + + mutex_lock(&priv->state_lock); + + if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) + goto out; + + new_channels.params = priv->channels.params; + mlx5e_trust_update_tx_min_inline_mode(priv, &new_channels.params); + + /* Skip if tx_min_inline is the same */ + if (new_channels.params.tx_min_inline_mode == + priv->channels.params.tx_min_inline_mode) + goto out; + + if (mlx5e_open_channels(priv, &new_channels)) + goto out; + mlx5e_switch_priv_channels(priv, &new_channels, NULL); + +out: + mutex_unlock(&priv->state_lock); +} + static int mlx5e_set_trust_state(struct mlx5e_priv *priv, u8 trust_state) { int err; @@ -968,6 +1002,7 @@ static int mlx5e_set_trust_state(struct mlx5e_priv *priv, u8 trust_state) if (err) return err; priv->dcbx_dp.trust_state = trust_state; + mlx5e_trust_update_sq_inline_mode(priv); return err; } @@ -996,6 +1031,8 @@ static int mlx5e_trust_initialize(struct mlx5e_priv *priv) if (err) return err; + mlx5e_trust_update_tx_min_inline_mode(priv, &priv->channels.params); + err = mlx5_query_dscp2prio(priv->mdev, priv->dcbx_dp.dscp2prio); if (err) return err; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 8633476fb536..a97ee38143aa 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -4071,10 +4071,7 @@ void mlx5e_build_nic_params(struct mlx5_core_dev *mdev, /* TX inline */ params->tx_max_inline = mlx5e_get_max_inline_cap(mdev); - mlx5_query_min_inline(mdev, ¶ms->tx_min_inline_mode); - if (params->tx_min_inline_mode == MLX5_INLINE_MODE_NONE && - !MLX5_CAP_ETH(mdev, wqe_vlan_insert)) - params->tx_min_inline_mode = MLX5_INLINE_MODE_L2; + params->tx_min_inline_mode = mlx5e_params_calculate_tx_min_inline(mdev); /* RSS */ params->rss_hfunc = ETH_RSS_HASH_XOR; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c index a7c208a1ad83..de651de35c9b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c @@ -32,6 +32,7 @@ #include #include +#include #include "en.h" #include "ipoib/ipoib.h" #include "en_accel/ipsec_rxtx.h" @@ -86,6 +87,20 @@ static void mlx5e_dma_unmap_wqe_err(struct mlx5e_txqsq *sq, u8 num_dma) } } +#ifdef CONFIG_MLX5_CORE_EN_DCB +static inline int mlx5e_get_dscp_up(struct mlx5e_priv *priv, struct sk_buff *skb) +{ + int dscp_cp = 0; + + if (skb->protocol == htons(ETH_P_IP)) + dscp_cp = ipv4_get_dsfield(ip_hdr(skb)) >> 2; + else if (skb->protocol == htons(ETH_P_IPV6)) + dscp_cp = ipv6_get_dsfield(ipv6_hdr(skb)) >> 2; + + return priv->dcbx_dp.dscp2prio[dscp_cp]; +} +#endif + u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb, void *accel_priv, select_queue_fallback_t fallback) { @@ -97,8 +112,13 @@ u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb, if (!netdev_get_num_tc(dev)) return channel_ix; - if (skb_vlan_tag_present(skb)) - up = skb->vlan_tci >> VLAN_PRIO_SHIFT; +#ifdef CONFIG_MLX5_CORE_EN_DCB + if (priv->dcbx_dp.trust_state == MLX5_QPTS_TRUST_DSCP) + up = mlx5e_get_dscp_up(priv, skb); + else +#endif + if (skb_vlan_tag_present(skb)) + up = skb->vlan_tci >> VLAN_PRIO_SHIFT; /* channel_ix can be larger than num_channels since * dev->num_real_tx_queues = num_channels * num_tc -- cgit v1.2.3 From 79c48764e1da40341b0e8149417c00efc9849b43 Mon Sep 17 00:00:00 2001 From: Gal Pressman Date: Tue, 28 Jul 2015 09:35:31 +0300 Subject: net/mlx5e: Add support for ethtool msglvl support Use ethtool -s msglvl on/off to toggle debug messages. Signed-off-by: Gal Pressman Signed-off-by: Inbar Karmy Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/en.h | 11 +++++++++++ drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c | 13 +++++++++++++ drivers/net/ethernet/mellanox/mlx5/core/en_main.c | 1 + 3 files changed, 25 insertions(+) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index fae7b62d173f..8c872e2e1aa0 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -127,6 +127,16 @@ #define MLX5E_NUM_MAIN_GROUPS 9 +#define MLX5E_MSG_LEVEL NETIF_MSG_LINK + +#define mlx5e_dbg(mlevel, priv, format, ...) \ +do { \ + if (NETIF_MSG_##mlevel & (priv)->msglevel) \ + netdev_warn(priv->netdev, format, \ + ##__VA_ARGS__); \ +} while (0) + + static inline u16 mlx5_min_rx_wqes(int wq_type, u32 wq_size) { switch (wq_type) { @@ -754,6 +764,7 @@ struct mlx5e_priv { #endif /* priv data path fields - end */ + u32 msglevel; unsigned long state; struct mutex state_lock; /* Protects Interface state */ struct mlx5e_rq drop_rq; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c index b34aa8efb036..63d1ac695a75 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c @@ -1340,6 +1340,16 @@ static int mlx5e_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) return mlx5_set_port_wol(mdev, mlx5_wol_mode); } +static u32 mlx5e_get_msglevel(struct net_device *dev) +{ + return ((struct mlx5e_priv *)netdev_priv(dev))->msglevel; +} + +static void mlx5e_set_msglevel(struct net_device *dev, u32 val) +{ + ((struct mlx5e_priv *)netdev_priv(dev))->msglevel = val; +} + static int mlx5e_set_phys_id(struct net_device *dev, enum ethtool_phys_id_state state) { @@ -1672,4 +1682,7 @@ const struct ethtool_ops mlx5e_ethtool_ops = { .get_priv_flags = mlx5e_get_priv_flags, .set_priv_flags = mlx5e_set_priv_flags, .self_test = mlx5e_self_test, + .get_msglevel = mlx5e_get_msglevel, + .set_msglevel = mlx5e_set_msglevel, + }; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index a97ee38143aa..73d7c672c4ff 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -4091,6 +4091,7 @@ static void mlx5e_build_nic_netdev_priv(struct mlx5_core_dev *mdev, priv->netdev = netdev; priv->profile = profile; priv->ppriv = ppriv; + priv->msglevel = MLX5E_MSG_LEVEL; priv->hard_mtu = MLX5E_ETH_HARD_MTU; mlx5e_build_nic_params(mdev, &priv->channels.params, profile->max_nch(mdev)); -- cgit v1.2.3 From 5da8bc3effb61f0f165ca45c80f4818a234c9f91 Mon Sep 17 00:00:00 2001 From: Inbar Karmy Date: Sun, 18 Jun 2017 09:47:35 +0300 Subject: net/mlx5e: DCBNL, Add debug messages log Add debug print when changing the configuration of QoS through dcbnl. Use ethtool -s msglvl hw on/off to toggle debug messages. Signed-off-by: Inbar Karmy Reviewed-by: Gal Pressman Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c | 24 +++++++++++++++++++++- 1 file changed, 23 insertions(+), 1 deletion(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c index b402d69a701b..c6d90b6dd80e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c @@ -241,7 +241,7 @@ int mlx5e_dcbnl_ieee_setets_core(struct mlx5e_priv *priv, struct ieee_ets *ets) u8 tc_tx_bw[IEEE_8021QAZ_MAX_TCS]; u8 tc_group[IEEE_8021QAZ_MAX_TCS]; int max_tc = mlx5_max_tc(mdev); - int err; + int err, i; mlx5e_build_tc_group(ets, tc_group, max_tc); mlx5e_build_tc_tx_bw(ets, tc_tx_bw, tc_group, max_tc); @@ -260,6 +260,14 @@ int mlx5e_dcbnl_ieee_setets_core(struct mlx5e_priv *priv, struct ieee_ets *ets) return err; memcpy(priv->dcbx.tc_tsa, ets->tc_tsa, sizeof(ets->tc_tsa)); + + for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { + mlx5e_dbg(HW, priv, "%s: prio_%d <=> tc_%d\n", + __func__, i, ets->prio_tc[i]); + mlx5e_dbg(HW, priv, "%s: tc_%d <=> tx_bw_%d%%, group_%d\n", + __func__, i, tc_tx_bw[i], tc_group[i]); + } + return err; } @@ -345,6 +353,11 @@ static int mlx5e_dcbnl_ieee_setpfc(struct net_device *dev, ret = mlx5_set_port_pfc(mdev, pfc->pfc_en, pfc->pfc_en); mlx5_toggle_port_link(mdev); + if (!ret) { + mlx5e_dbg(HW, priv, + "%s: PFC per priority bit mask: 0x%x\n", + __func__, pfc->pfc_en); + } return ret; } @@ -560,6 +573,11 @@ static int mlx5e_dcbnl_ieee_setmaxrate(struct net_device *netdev, } } + for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { + mlx5e_dbg(HW, priv, "%s: tc_%d <=> max_bw %d Gbps\n", + __func__, i, max_bw_value[i]); + } + return mlx5_modify_port_ets_rate_limit(mdev, max_bw_value, max_bw_unit); } @@ -585,6 +603,10 @@ static u8 mlx5e_dcbnl_setall(struct net_device *netdev) ets.tc_rx_bw[i] = cee_cfg->pg_bw_pct[i]; ets.tc_tsa[i] = IEEE_8021QAZ_TSA_ETS; ets.prio_tc[i] = cee_cfg->prio_to_pg_map[i]; + mlx5e_dbg(HW, priv, + "%s: Priority group %d: tx_bw %d, rx_bw %d, prio_tc %d\n", + __func__, i, ets.tc_tx_bw[i], ets.tc_rx_bw[i], + ets.prio_tc[i]); } err = mlx5e_dbcnl_validate_ets(netdev, &ets); -- cgit v1.2.3 From 21b9c1449d21f347e57ba5e69eec460066e5182a Mon Sep 17 00:00:00 2001 From: Or Gerlitz Date: Thu, 12 Jan 2017 16:19:29 +0200 Subject: net/mlx5: Enlarge the NIC TC offload table size The NIC TC offload table size was hard coded to 1k. Change it to be min(max NIC RX table size, min(max flow counters, 64k) * num flow groups) where the max values are read from the firmware and the number of flow groups is hard-coded as before this change. We don't know upfront the division of flows to groups (== different masks). This setup allows each group to be of size up to the where we want to go (when supported, all offloaded flows use counters). Thus, we don't expect multiple occurences for a group which in turn would add steering hops. Signed-off-by: Or Gerlitz Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/en_tc.c | 15 +++++++++++++-- 1 file changed, 13 insertions(+), 2 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c index 9ba1f72060aa..55979ec2e88a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c @@ -90,8 +90,8 @@ enum { MLX5_HEADER_TYPE_NVGRE = 0x1, }; -#define MLX5E_TC_TABLE_NUM_ENTRIES 1024 #define MLX5E_TC_TABLE_NUM_GROUPS 4 +#define MLX5E_TC_TABLE_MAX_GROUP_SIZE (1 << 16) struct mod_hdr_key { int num_actions; @@ -263,10 +263,21 @@ mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv, } if (IS_ERR_OR_NULL(priv->fs.tc.t)) { + int tc_grp_size, tc_tbl_size; + u32 max_flow_counter; + + max_flow_counter = (MLX5_CAP_GEN(dev, max_flow_counter_31_16) << 16) | + MLX5_CAP_GEN(dev, max_flow_counter_15_0); + + tc_grp_size = min_t(int, max_flow_counter, MLX5E_TC_TABLE_MAX_GROUP_SIZE); + + tc_tbl_size = min_t(int, tc_grp_size * MLX5E_TC_TABLE_NUM_GROUPS, + BIT(MLX5_CAP_FLOWTABLE_NIC_RX(dev, log_max_ft_size))); + priv->fs.tc.t = mlx5_create_auto_grouped_flow_table(priv->fs.ns, MLX5E_TC_PRIO, - MLX5E_TC_TABLE_NUM_ENTRIES, + tc_tbl_size, MLX5E_TC_TABLE_NUM_GROUPS, 0, 0); if (IS_ERR(priv->fs.tc.t)) { -- cgit v1.2.3 From 4c5009c5256d065696d280f3a8f16af090bea3e2 Mon Sep 17 00:00:00 2001 From: Rabie Loulou Date: Wed, 18 Oct 2017 17:58:42 +0300 Subject: net/mlx5: Initialize destination_flow struct to 0 This is needed in order to enlarge it with more members that will get value of 0 when not set. Signed-off-by: Rabie Loulou Reviewed-by: Roi Dayan Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c | 10 +++++----- drivers/net/ethernet/mellanox/mlx5/core/en_fs.c | 8 ++++---- drivers/net/ethernet/mellanox/mlx5/core/eswitch.c | 2 +- drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c | 6 +++--- drivers/net/ethernet/mellanox/mlx5/core/fs_core.c | 4 ++-- 5 files changed, 15 insertions(+), 15 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c index 12d3ced61114..610d485c4b03 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c @@ -92,7 +92,7 @@ static enum mlx5e_traffic_types arfs_get_tt(enum arfs_type type) static int arfs_disable(struct mlx5e_priv *priv) { - struct mlx5_flow_destination dest; + struct mlx5_flow_destination dest = {}; struct mlx5e_tir *tir = priv->indir_tir; int err = 0; int tt; @@ -126,7 +126,7 @@ int mlx5e_arfs_disable(struct mlx5e_priv *priv) int mlx5e_arfs_enable(struct mlx5e_priv *priv) { - struct mlx5_flow_destination dest; + struct mlx5_flow_destination dest = {}; int err = 0; int tt; int i; @@ -175,7 +175,7 @@ static int arfs_add_default_rule(struct mlx5e_priv *priv, { struct arfs_table *arfs_t = &priv->fs.arfs.arfs_tables[type]; struct mlx5e_tir *tir = priv->indir_tir; - struct mlx5_flow_destination dest; + struct mlx5_flow_destination dest = {}; MLX5_DECLARE_FLOW_ACT(flow_act); struct mlx5_flow_spec *spec; enum mlx5e_traffic_types tt; @@ -466,7 +466,7 @@ static struct mlx5_flow_handle *arfs_add_rule(struct mlx5e_priv *priv, struct mlx5e_arfs_tables *arfs = &priv->fs.arfs; struct arfs_tuple *tuple = &arfs_rule->tuple; struct mlx5_flow_handle *rule = NULL; - struct mlx5_flow_destination dest; + struct mlx5_flow_destination dest = {}; MLX5_DECLARE_FLOW_ACT(flow_act); struct arfs_table *arfs_table; struct mlx5_flow_spec *spec; @@ -557,7 +557,7 @@ out: static void arfs_modify_rule_rq(struct mlx5e_priv *priv, struct mlx5_flow_handle *rule, u16 rxq) { - struct mlx5_flow_destination dst; + struct mlx5_flow_destination dst = {}; int err = 0; dst.type = MLX5_FLOW_DESTINATION_TYPE_TIR; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c index 850cdc980ab5..8016c8aa946d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c @@ -162,7 +162,7 @@ static int __mlx5e_add_vlan_rule(struct mlx5e_priv *priv, u16 vid, struct mlx5_flow_spec *spec) { struct mlx5_flow_table *ft = priv->fs.vlan.ft.t; - struct mlx5_flow_destination dest; + struct mlx5_flow_destination dest = {}; struct mlx5_flow_handle **rule_p; MLX5_DECLARE_FLOW_ACT(flow_act); int err = 0; @@ -738,7 +738,7 @@ mlx5e_generate_ttc_rule(struct mlx5e_priv *priv, static int mlx5e_generate_ttc_table_rules(struct mlx5e_priv *priv) { - struct mlx5_flow_destination dest; + struct mlx5_flow_destination dest = {}; struct mlx5e_ttc_table *ttc; struct mlx5_flow_handle **rules; struct mlx5_flow_table *ft; @@ -909,7 +909,7 @@ mlx5e_generate_inner_ttc_rule(struct mlx5e_priv *priv, static int mlx5e_generate_inner_ttc_table_rules(struct mlx5e_priv *priv) { - struct mlx5_flow_destination dest; + struct mlx5_flow_destination dest = {}; struct mlx5_flow_handle **rules; struct mlx5e_ttc_table *ttc; struct mlx5_flow_table *ft; @@ -1106,7 +1106,7 @@ static int mlx5e_add_l2_flow_rule(struct mlx5e_priv *priv, struct mlx5e_l2_rule *ai, int type) { struct mlx5_flow_table *ft = priv->fs.l2.ft.t; - struct mlx5_flow_destination dest; + struct mlx5_flow_destination dest = {}; MLX5_DECLARE_FLOW_ACT(flow_act); struct mlx5_flow_spec *spec; int err = 0; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c index c77f4c0c7769..bbb140f517c4 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c @@ -157,7 +157,7 @@ __esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u32 vport, bool rx_rule, MLX5_MATCH_OUTER_HEADERS); struct mlx5_flow_handle *flow_rule = NULL; struct mlx5_flow_act flow_act = {0}; - struct mlx5_flow_destination dest; + struct mlx5_flow_destination dest = {}; struct mlx5_flow_spec *spec; void *mv_misc = NULL; void *mc_misc = NULL; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c index d9fd8570b07c..1143d80119bd 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c @@ -306,7 +306,7 @@ static struct mlx5_flow_handle * mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *esw, int vport, u32 sqn) { struct mlx5_flow_act flow_act = {0}; - struct mlx5_flow_destination dest; + struct mlx5_flow_destination dest = {}; struct mlx5_flow_handle *flow_rule; struct mlx5_flow_spec *spec; void *misc; @@ -395,7 +395,7 @@ out_err: static int esw_add_fdb_miss_rule(struct mlx5_eswitch *esw) { struct mlx5_flow_act flow_act = {0}; - struct mlx5_flow_destination dest; + struct mlx5_flow_destination dest = {}; struct mlx5_flow_handle *flow_rule = NULL; struct mlx5_flow_spec *spec; int err = 0; @@ -670,7 +670,7 @@ struct mlx5_flow_handle * mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch *esw, int vport, u32 tirn) { struct mlx5_flow_act flow_act = {0}; - struct mlx5_flow_destination dest; + struct mlx5_flow_destination dest = {}; struct mlx5_flow_handle *flow_rule; struct mlx5_flow_spec *spec; void *misc; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c index c7fa1389bace..c70fd663a633 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c @@ -912,7 +912,7 @@ static int connect_fwd_rules(struct mlx5_core_dev *dev, struct mlx5_flow_table *new_next_ft, struct mlx5_flow_table *old_next_ft) { - struct mlx5_flow_destination dest; + struct mlx5_flow_destination dest = {}; struct mlx5_flow_rule *iter; int err = 0; @@ -1820,7 +1820,7 @@ mlx5_add_flow_rules(struct mlx5_flow_table *ft, int dest_num) { struct mlx5_flow_root_namespace *root = find_root(&ft->node); - struct mlx5_flow_destination gen_dest; + struct mlx5_flow_destination gen_dest = {}; struct mlx5_flow_table *next_ft = NULL; struct mlx5_flow_handle *handle = NULL; u32 sw_action = flow_act->action; -- cgit v1.2.3 From 458821c72bd02fcd484b9e46526c55e4ab6f57a4 Mon Sep 17 00:00:00 2001 From: Feras Daoud Date: Thu, 7 Sep 2017 16:05:10 +0300 Subject: net/mlx5e: IPoIB, Add inner TTC table to IPoIB flow steering For supported platforms, add inner TTC flow table to enhanced IPoIB flow steering. Signed-off-by: Feras Daoud Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/en.h | 3 +++ drivers/net/ethernet/mellanox/mlx5/core/en_fs.c | 4 ++-- drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c | 12 +++++++++++- 3 files changed, 16 insertions(+), 3 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index 8c872e2e1aa0..95facdf62c77 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -1045,6 +1045,9 @@ void mlx5e_destroy_rqt(struct mlx5e_priv *priv, struct mlx5e_rqt *rqt); int mlx5e_create_ttc_table(struct mlx5e_priv *priv); void mlx5e_destroy_ttc_table(struct mlx5e_priv *priv); +int mlx5e_create_inner_ttc_table(struct mlx5e_priv *priv); +void mlx5e_destroy_inner_ttc_table(struct mlx5e_priv *priv); + int mlx5e_create_tis(struct mlx5_core_dev *mdev, int tc, u32 underlay_qpn, u32 *tisn); void mlx5e_destroy_tis(struct mlx5_core_dev *mdev, u32 tisn); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c index 8016c8aa946d..f0d11ad05ed2 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c @@ -1005,7 +1005,7 @@ err: return err; } -static int mlx5e_create_inner_ttc_table(struct mlx5e_priv *priv) +int mlx5e_create_inner_ttc_table(struct mlx5e_priv *priv) { struct mlx5e_ttc_table *ttc = &priv->fs.inner_ttc; struct mlx5_flow_table_attr ft_attr = {}; @@ -1041,7 +1041,7 @@ err: return err; } -static void mlx5e_destroy_inner_ttc_table(struct mlx5e_priv *priv) +void mlx5e_destroy_inner_ttc_table(struct mlx5e_priv *priv) { struct mlx5e_ttc_table *ttc = &priv->fs.inner_ttc; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c index abf270d7f556..d2a66dc4adc6 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c @@ -255,15 +255,24 @@ static int mlx5i_create_flow_steering(struct mlx5e_priv *priv) priv->netdev->hw_features &= ~NETIF_F_NTUPLE; } + err = mlx5e_create_inner_ttc_table(priv); + if (err) { + netdev_err(priv->netdev, "Failed to create inner ttc table, err=%d\n", + err); + goto err_destroy_arfs_tables; + } + err = mlx5e_create_ttc_table(priv); if (err) { netdev_err(priv->netdev, "Failed to create ttc table, err=%d\n", err); - goto err_destroy_arfs_tables; + goto err_destroy_inner_ttc_table; } return 0; +err_destroy_inner_ttc_table: + mlx5e_destroy_inner_ttc_table(priv); err_destroy_arfs_tables: mlx5e_arfs_destroy_tables(priv); @@ -273,6 +282,7 @@ err_destroy_arfs_tables: static void mlx5i_destroy_flow_steering(struct mlx5e_priv *priv) { mlx5e_destroy_ttc_table(priv); + mlx5e_destroy_inner_ttc_table(priv); mlx5e_arfs_destroy_tables(priv); } -- cgit v1.2.3 From 0088cbbc4b66b287132a8a04b3e2509d44a6387c Mon Sep 17 00:00:00 2001 From: Tal Gilboa Date: Tue, 26 Sep 2017 16:20:43 +0300 Subject: net/mlx5e: Enable CQE based moderation on TX CQ By using CQE based moderation on TX CQ we can reduce the number of TX interrupt rate. Besides the benefit of less interrupts, this also allows the kernel to better utilize TSO. Since TSO has some CPU overhead, it might not aggregate when CPU is under high stress. By reducing the interrupt rate and the CPU utilization, we can get better aggregation and better overall throughput. The feature is enabled by default and has a private flag in ethtool for control. Throughput, interrupt rate and TSO utilization improvements: (ConnectX-4Lx 40GbE, unidirectional, 1/16 TCP streams, 64B packets) --------------------------------------------------------- Metric | Streams | CQE Based | EQE Based | improvement --------------------------------------------------------- BW | 1 | 2.4Gb/s | 2.15Gb/s | +11.6% IR | 1 | 27Kips | 50.6Kips | -46.7% TSO Util | 1 | 74.6% | 71% | +5% BW | 16 | 29Gb/s | 25.85Gb/s | +12.2% IR | 16 | 482Kips | 745Kips | -35.3% TSO Util | 16 | 69.1% | 49% | +41.1% *BW = Bandwidth, IR = Interrupt rate, ips = interrupt per second. TSO Util = bytes in TSO sessions / all bytes transferred Signed-off-by: Tal Gilboa Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/en.h | 9 +++-- .../net/ethernet/mellanox/mlx5/core/en_ethtool.c | 39 +++++++++++++++++----- drivers/net/ethernet/mellanox/mlx5/core/en_main.c | 38 +++++++++++++++------ drivers/net/ethernet/mellanox/mlx5/core/en_rx_am.c | 8 +++-- 4 files changed, 71 insertions(+), 23 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index 95facdf62c77..751f62cae969 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -106,6 +106,7 @@ #define MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC_FROM_CQE 0x3 #define MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_PKTS 0x20 #define MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC 0x10 +#define MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC_FROM_CQE 0x10 #define MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS 0x20 #define MLX5E_PARAMS_DEFAULT_MIN_RX_WQES 0x80 #define MLX5E_PARAMS_DEFAULT_MIN_RX_WQES_MPW 0x2 @@ -198,12 +199,14 @@ extern const char mlx5e_self_tests[][ETH_GSTRING_LEN]; static const char mlx5e_priv_flags[][ETH_GSTRING_LEN] = { "rx_cqe_moder", + "tx_cqe_moder", "rx_cqe_compress", }; enum mlx5e_priv_flag { MLX5E_PFLAG_RX_CQE_BASED_MODER = (1 << 0), - MLX5E_PFLAG_RX_CQE_COMPRESS = (1 << 1), + MLX5E_PFLAG_TX_CQE_BASED_MODER = (1 << 1), + MLX5E_PFLAG_RX_CQE_COMPRESS = (1 << 2), }; #define MLX5E_SET_PFLAG(params, pflag, enable) \ @@ -223,6 +226,7 @@ enum mlx5e_priv_flag { struct mlx5e_cq_moder { u16 usec; u16 pkts; + u8 cq_period_mode; }; struct mlx5e_params { @@ -234,7 +238,6 @@ struct mlx5e_params { u8 log_rq_size; u16 num_channels; u8 num_tc; - u8 rx_cq_period_mode; bool rx_cqe_compress_def; struct mlx5e_cq_moder rx_cq_moderation; struct mlx5e_cq_moder tx_cq_moderation; @@ -926,6 +929,8 @@ void mlx5e_build_default_indir_rqt(u32 *indirection_rqt, int len, int num_channels); int mlx5e_get_max_linkspeed(struct mlx5_core_dev *mdev, u32 *speed); +void mlx5e_set_tx_cq_mode_params(struct mlx5e_params *params, + u8 cq_period_mode); void mlx5e_set_rx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode); void mlx5e_set_rq_type_params(struct mlx5_core_dev *mdev, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c index 63d1ac695a75..23425f028405 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c @@ -1454,29 +1454,36 @@ static int mlx5e_get_module_eeprom(struct net_device *netdev, typedef int (*mlx5e_pflag_handler)(struct net_device *netdev, bool enable); -static int set_pflag_rx_cqe_based_moder(struct net_device *netdev, bool enable) +static int set_pflag_cqe_based_moder(struct net_device *netdev, bool enable, + bool is_rx_cq) { struct mlx5e_priv *priv = netdev_priv(netdev); struct mlx5_core_dev *mdev = priv->mdev; struct mlx5e_channels new_channels = {}; - bool rx_mode_changed; - u8 rx_cq_period_mode; + bool mode_changed; + u8 cq_period_mode, current_cq_period_mode; int err = 0; - rx_cq_period_mode = enable ? + cq_period_mode = enable ? MLX5_CQ_PERIOD_MODE_START_FROM_CQE : MLX5_CQ_PERIOD_MODE_START_FROM_EQE; - rx_mode_changed = rx_cq_period_mode != priv->channels.params.rx_cq_period_mode; + current_cq_period_mode = is_rx_cq ? + priv->channels.params.rx_cq_moderation.cq_period_mode : + priv->channels.params.tx_cq_moderation.cq_period_mode; + mode_changed = cq_period_mode != current_cq_period_mode; - if (rx_cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE && + if (cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE && !MLX5_CAP_GEN(mdev, cq_period_start_from_cqe)) return -EOPNOTSUPP; - if (!rx_mode_changed) + if (!mode_changed) return 0; new_channels.params = priv->channels.params; - mlx5e_set_rx_cq_mode_params(&new_channels.params, rx_cq_period_mode); + if (is_rx_cq) + mlx5e_set_rx_cq_mode_params(&new_channels.params, cq_period_mode); + else + mlx5e_set_tx_cq_mode_params(&new_channels.params, cq_period_mode); if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) { priv->channels.params = new_channels.params; @@ -1491,6 +1498,16 @@ static int set_pflag_rx_cqe_based_moder(struct net_device *netdev, bool enable) return 0; } +static int set_pflag_tx_cqe_based_moder(struct net_device *netdev, bool enable) +{ + return set_pflag_cqe_based_moder(netdev, enable, false); +} + +static int set_pflag_rx_cqe_based_moder(struct net_device *netdev, bool enable) +{ + return set_pflag_cqe_based_moder(netdev, enable, true); +} + int mlx5e_modify_rx_cqe_compression_locked(struct mlx5e_priv *priv, bool new_val) { bool curr_val = MLX5E_GET_PFLAG(&priv->channels.params, MLX5E_PFLAG_RX_CQE_COMPRESS); @@ -1578,6 +1595,12 @@ static int mlx5e_set_priv_flags(struct net_device *netdev, u32 pflags) if (err) goto out; + err = mlx5e_handle_pflag(netdev, pflags, + MLX5E_PFLAG_TX_CQE_BASED_MODER, + set_pflag_tx_cqe_based_moder); + if (err) + goto out; + err = mlx5e_handle_pflag(netdev, pflags, MLX5E_PFLAG_RX_CQE_COMPRESS, set_pflag_rx_cqe_compress); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 73d7c672c4ff..d1c3dc946486 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -681,7 +681,7 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c, } INIT_WORK(&rq->am.work, mlx5e_rx_am_work); - rq->am.mode = params->rx_cq_period_mode; + rq->am.mode = params->rx_cq_moderation.cq_period_mode; rq->page_cache.head = 0; rq->page_cache.tail = 0; @@ -1974,7 +1974,7 @@ static void mlx5e_build_rx_cq_param(struct mlx5e_priv *priv, } mlx5e_build_common_cq_param(priv, param); - param->cq_period_mode = params->rx_cq_period_mode; + param->cq_period_mode = params->rx_cq_moderation.cq_period_mode; } static void mlx5e_build_tx_cq_param(struct mlx5e_priv *priv, @@ -1986,8 +1986,7 @@ static void mlx5e_build_tx_cq_param(struct mlx5e_priv *priv, MLX5_SET(cqc, cqc, log_cq_size, params->log_sq_size); mlx5e_build_common_cq_param(priv, param); - - param->cq_period_mode = MLX5_CQ_PERIOD_MODE_START_FROM_EQE; + param->cq_period_mode = params->tx_cq_moderation.cq_period_mode; } static void mlx5e_build_ico_cq_param(struct mlx5e_priv *priv, @@ -3987,14 +3986,32 @@ static bool hw_lro_heuristic(u32 link_speed, u32 pci_bw) (pci_bw <= 16000) && (pci_bw < link_speed)); } +void mlx5e_set_tx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode) +{ + params->tx_cq_moderation.cq_period_mode = cq_period_mode; + + params->tx_cq_moderation.pkts = + MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS; + params->tx_cq_moderation.usec = + MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC; + + if (cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE) + params->tx_cq_moderation.usec = + MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC_FROM_CQE; + + MLX5E_SET_PFLAG(params, MLX5E_PFLAG_TX_CQE_BASED_MODER, + params->tx_cq_moderation.cq_period_mode == + MLX5_CQ_PERIOD_MODE_START_FROM_CQE); +} + void mlx5e_set_rx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode) { - params->rx_cq_period_mode = cq_period_mode; + params->rx_cq_moderation.cq_period_mode = cq_period_mode; params->rx_cq_moderation.pkts = MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_PKTS; params->rx_cq_moderation.usec = - MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC; + MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC; if (cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE) params->rx_cq_moderation.usec = @@ -4002,10 +4019,11 @@ void mlx5e_set_rx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode) if (params->rx_am_enabled) params->rx_cq_moderation = - mlx5e_am_get_def_profile(params->rx_cq_period_mode); + mlx5e_am_get_def_profile(cq_period_mode); MLX5E_SET_PFLAG(params, MLX5E_PFLAG_RX_CQE_BASED_MODER, - params->rx_cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE); + params->rx_cq_moderation.cq_period_mode == + MLX5_CQ_PERIOD_MODE_START_FROM_CQE); } u32 mlx5e_choose_lro_timeout(struct mlx5_core_dev *mdev, u32 wanted_timeout) @@ -4065,9 +4083,7 @@ void mlx5e_build_nic_params(struct mlx5_core_dev *mdev, MLX5_CQ_PERIOD_MODE_START_FROM_EQE; params->rx_am_enabled = MLX5_CAP_GEN(mdev, cq_moderation); mlx5e_set_rx_cq_mode_params(params, cq_period_mode); - - params->tx_cq_moderation.usec = MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC; - params->tx_cq_moderation.pkts = MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS; + mlx5e_set_tx_cq_mode_params(params, cq_period_mode); /* TX inline */ params->tx_max_inline = mlx5e_get_max_inline_cap(mdev); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx_am.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx_am.c index acf32fe952cd..e401d9d245f3 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx_am.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx_am.c @@ -63,7 +63,11 @@ profile[MLX5_CQ_PERIOD_NUM_MODES][MLX5E_PARAMS_AM_NUM_PROFILES] = { static inline struct mlx5e_cq_moder mlx5e_am_get_profile(u8 cq_period_mode, int ix) { - return profile[cq_period_mode][ix]; + struct mlx5e_cq_moder cq_moder; + + cq_moder = profile[cq_period_mode][ix]; + cq_moder.cq_period_mode = cq_period_mode; + return cq_moder; } struct mlx5e_cq_moder mlx5e_am_get_def_profile(u8 rx_cq_period_mode) @@ -75,7 +79,7 @@ struct mlx5e_cq_moder mlx5e_am_get_def_profile(u8 rx_cq_period_mode) else /* MLX5_CQ_PERIOD_MODE_START_FROM_EQE */ default_profile_ix = MLX5E_RX_AM_DEF_PROFILE_EQE; - return profile[rx_cq_period_mode][default_profile_ix]; + return mlx5e_am_get_profile(rx_cq_period_mode, default_profile_ix); } /* Adaptive moderation logic */ -- cgit v1.2.3 From f21506cb42112b1c0b391dae7a700e69a42128e8 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Fri, 3 Nov 2017 13:52:24 +0100 Subject: dpaa_eth: avoid uninitialized variable false-positive warning We can now build this driver on ARM, so I ran into a randconfig build warning that presumably had existed on powerpc already. drivers/net/ethernet/freescale/dpaa/dpaa_eth.c: In function 'sg_fd_to_skb': drivers/net/ethernet/freescale/dpaa/dpaa_eth.c:1712:18: error: 'skb' may be used uninitialized in this function [-Werror=maybe-uninitialized] I'm slightly changing the logic here, to make it obvious to the compiler that 'skb' is always initialized. Signed-off-by: Arnd Bergmann Signed-off-by: David S. Miller --- drivers/net/ethernet/freescale/dpaa/dpaa_eth.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c index 969f6b12952e..ebc55b6a6349 100644 --- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c +++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c @@ -1721,6 +1721,7 @@ static struct sk_buff *sg_fd_to_skb(const struct dpaa_priv *priv, /* Iterate through the SGT entries and add data buffers to the skb */ sgt = vaddr + fd_off; + skb = NULL; for (i = 0; i < DPAA_SGT_MAX_ENTRIES; i++) { /* Extension bit is not supported */ WARN_ON(qm_sg_entry_is_ext(&sgt[i])); @@ -1738,7 +1739,7 @@ static struct sk_buff *sg_fd_to_skb(const struct dpaa_priv *priv, count_ptr = this_cpu_ptr(dpaa_bp->percpu_count); dma_unmap_single(dpaa_bp->dev, sg_addr, dpaa_bp->size, DMA_FROM_DEVICE); - if (i == 0) { + if (!skb) { sz = dpaa_bp->size + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); skb = build_skb(sg_vaddr, sz); -- cgit v1.2.3 From f4e63525ee35f9c02e9f51f90571718363e9a9a9 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Fri, 3 Nov 2017 13:56:16 -0700 Subject: net: bpf: rename ndo_xdp to ndo_bpf ndo_xdp is a control path callback for setting up XDP in the driver. We can reuse it for other forms of communication between the eBPF stack and the drivers. Rename the callback and associated structures and definitions. Signed-off-by: Jakub Kicinski Reviewed-by: Simon Horman Reviewed-by: Quentin Monnet Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnxt/bnxt.c | 2 +- drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c | 2 +- drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h | 2 +- drivers/net/ethernet/cavium/thunder/nicvf_main.c | 4 +-- drivers/net/ethernet/intel/i40e/i40e_main.c | 6 ++-- drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | 4 +-- drivers/net/ethernet/mellanox/mlx4/en_netdev.c | 6 ++-- drivers/net/ethernet/mellanox/mlx5/core/en_main.c | 4 +-- .../net/ethernet/netronome/nfp/nfp_net_common.c | 4 +-- drivers/net/ethernet/qlogic/qede/qede.h | 2 +- drivers/net/ethernet/qlogic/qede/qede_filter.c | 2 +- drivers/net/ethernet/qlogic/qede/qede_main.c | 4 +-- drivers/net/tun.c | 4 +-- drivers/net/virtio_net.c | 4 +-- include/linux/netdevice.h | 23 ++++++++------- net/core/dev.c | 34 +++++++++++----------- net/core/rtnetlink.c | 4 +-- 17 files changed, 56 insertions(+), 55 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index 4e3d569bf32e..96416f5d97f3 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -7775,7 +7775,7 @@ static const struct net_device_ops bnxt_netdev_ops = { #endif .ndo_udp_tunnel_add = bnxt_udp_tunnel_add, .ndo_udp_tunnel_del = bnxt_udp_tunnel_del, - .ndo_xdp = bnxt_xdp, + .ndo_bpf = bnxt_xdp, .ndo_bridge_getlink = bnxt_bridge_getlink, .ndo_bridge_setlink = bnxt_bridge_setlink, .ndo_get_phys_port_name = bnxt_get_phys_port_name diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c index 06ce63c00821..261e5847557a 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c @@ -208,7 +208,7 @@ static int bnxt_xdp_set(struct bnxt *bp, struct bpf_prog *prog) return 0; } -int bnxt_xdp(struct net_device *dev, struct netdev_xdp *xdp) +int bnxt_xdp(struct net_device *dev, struct netdev_bpf *xdp) { struct bnxt *bp = netdev_priv(dev); int rc; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h index 12a5ad66b564..414b748038ca 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h @@ -16,6 +16,6 @@ void bnxt_tx_int_xdp(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts); bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons, struct page *page, u8 **data_ptr, unsigned int *len, u8 *event); -int bnxt_xdp(struct net_device *dev, struct netdev_xdp *xdp); +int bnxt_xdp(struct net_device *dev, struct netdev_bpf *xdp); #endif diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c index 71989e180289..a063c36c4c58 100644 --- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c +++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c @@ -1741,7 +1741,7 @@ static int nicvf_xdp_setup(struct nicvf *nic, struct bpf_prog *prog) return 0; } -static int nicvf_xdp(struct net_device *netdev, struct netdev_xdp *xdp) +static int nicvf_xdp(struct net_device *netdev, struct netdev_bpf *xdp) { struct nicvf *nic = netdev_priv(netdev); @@ -1774,7 +1774,7 @@ static const struct net_device_ops nicvf_netdev_ops = { .ndo_tx_timeout = nicvf_tx_timeout, .ndo_fix_features = nicvf_fix_features, .ndo_set_features = nicvf_set_features, - .ndo_xdp = nicvf_xdp, + .ndo_bpf = nicvf_xdp, }; static int nicvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index dfecaeda0654..05b94d87a6c3 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -11648,12 +11648,12 @@ static int i40e_xdp_setup(struct i40e_vsi *vsi, } /** - * i40e_xdp - implements ndo_xdp for i40e + * i40e_xdp - implements ndo_bpf for i40e * @dev: netdevice * @xdp: XDP command **/ static int i40e_xdp(struct net_device *dev, - struct netdev_xdp *xdp) + struct netdev_bpf *xdp) { struct i40e_netdev_priv *np = netdev_priv(dev); struct i40e_vsi *vsi = np->vsi; @@ -11705,7 +11705,7 @@ static const struct net_device_ops i40e_netdev_ops = { .ndo_features_check = i40e_features_check, .ndo_bridge_getlink = i40e_ndo_bridge_getlink, .ndo_bridge_setlink = i40e_ndo_bridge_setlink, - .ndo_xdp = i40e_xdp, + .ndo_bpf = i40e_xdp, }; /** diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 507977994a03..e5dcb25be398 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -10004,7 +10004,7 @@ static int ixgbe_xdp_setup(struct net_device *dev, struct bpf_prog *prog) return 0; } -static int ixgbe_xdp(struct net_device *dev, struct netdev_xdp *xdp) +static int ixgbe_xdp(struct net_device *dev, struct netdev_bpf *xdp) { struct ixgbe_adapter *adapter = netdev_priv(dev); @@ -10113,7 +10113,7 @@ static const struct net_device_ops ixgbe_netdev_ops = { .ndo_udp_tunnel_add = ixgbe_add_udp_tunnel_port, .ndo_udp_tunnel_del = ixgbe_del_udp_tunnel_port, .ndo_features_check = ixgbe_features_check, - .ndo_xdp = ixgbe_xdp, + .ndo_bpf = ixgbe_xdp, .ndo_xdp_xmit = ixgbe_xdp_xmit, .ndo_xdp_flush = ixgbe_xdp_flush, }; diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c index d611df2f274d..736a6ccaf05e 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c @@ -2916,7 +2916,7 @@ static u32 mlx4_xdp_query(struct net_device *dev) return prog_id; } -static int mlx4_xdp(struct net_device *dev, struct netdev_xdp *xdp) +static int mlx4_xdp(struct net_device *dev, struct netdev_bpf *xdp) { switch (xdp->command) { case XDP_SETUP_PROG: @@ -2958,7 +2958,7 @@ static const struct net_device_ops mlx4_netdev_ops = { .ndo_udp_tunnel_del = mlx4_en_del_vxlan_port, .ndo_features_check = mlx4_en_features_check, .ndo_set_tx_maxrate = mlx4_en_set_tx_maxrate, - .ndo_xdp = mlx4_xdp, + .ndo_bpf = mlx4_xdp, }; static const struct net_device_ops mlx4_netdev_ops_master = { @@ -2995,7 +2995,7 @@ static const struct net_device_ops mlx4_netdev_ops_master = { .ndo_udp_tunnel_del = mlx4_en_del_vxlan_port, .ndo_features_check = mlx4_en_features_check, .ndo_set_tx_maxrate = mlx4_en_set_tx_maxrate, - .ndo_xdp = mlx4_xdp, + .ndo_bpf = mlx4_xdp, }; struct mlx4_en_bond { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 28ae00b3eb88..3b7b7bb84eb0 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -3831,7 +3831,7 @@ static u32 mlx5e_xdp_query(struct net_device *dev) return prog_id; } -static int mlx5e_xdp(struct net_device *dev, struct netdev_xdp *xdp) +static int mlx5e_xdp(struct net_device *dev, struct netdev_bpf *xdp) { switch (xdp->command) { case XDP_SETUP_PROG: @@ -3883,7 +3883,7 @@ static const struct net_device_ops mlx5e_netdev_ops = { .ndo_rx_flow_steer = mlx5e_rx_flow_steer, #endif .ndo_tx_timeout = mlx5e_tx_timeout, - .ndo_xdp = mlx5e_xdp, + .ndo_bpf = mlx5e_xdp, #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = mlx5e_netpoll, #endif diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c index 185a3dd35a3f..f6c6ad4e8a59 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c @@ -3378,7 +3378,7 @@ nfp_net_xdp_setup(struct nfp_net *nn, struct bpf_prog *prog, u32 flags, return 0; } -static int nfp_net_xdp(struct net_device *netdev, struct netdev_xdp *xdp) +static int nfp_net_xdp(struct net_device *netdev, struct netdev_bpf *xdp) { struct nfp_net *nn = netdev_priv(netdev); @@ -3441,7 +3441,7 @@ const struct net_device_ops nfp_net_netdev_ops = { .ndo_get_phys_port_name = nfp_port_get_phys_port_name, .ndo_udp_tunnel_add = nfp_net_add_vxlan_port, .ndo_udp_tunnel_del = nfp_net_del_vxlan_port, - .ndo_xdp = nfp_net_xdp, + .ndo_bpf = nfp_net_xdp, }; /** diff --git a/drivers/net/ethernet/qlogic/qede/qede.h b/drivers/net/ethernet/qlogic/qede/qede.h index adb700512baa..a3a70ade411f 100644 --- a/drivers/net/ethernet/qlogic/qede/qede.h +++ b/drivers/net/ethernet/qlogic/qede/qede.h @@ -503,7 +503,7 @@ void qede_fill_rss_params(struct qede_dev *edev, void qede_udp_tunnel_add(struct net_device *dev, struct udp_tunnel_info *ti); void qede_udp_tunnel_del(struct net_device *dev, struct udp_tunnel_info *ti); -int qede_xdp(struct net_device *dev, struct netdev_xdp *xdp); +int qede_xdp(struct net_device *dev, struct netdev_bpf *xdp); #ifdef CONFIG_DCB void qede_set_dcbnl_ops(struct net_device *ndev); diff --git a/drivers/net/ethernet/qlogic/qede/qede_filter.c b/drivers/net/ethernet/qlogic/qede/qede_filter.c index f79e36e4060a..c1a0708a7d7c 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_filter.c +++ b/drivers/net/ethernet/qlogic/qede/qede_filter.c @@ -1065,7 +1065,7 @@ static int qede_xdp_set(struct qede_dev *edev, struct bpf_prog *prog) return 0; } -int qede_xdp(struct net_device *dev, struct netdev_xdp *xdp) +int qede_xdp(struct net_device *dev, struct netdev_bpf *xdp) { struct qede_dev *edev = netdev_priv(dev); diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c index e5ee9f274a71..8f9b3eb82137 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_main.c +++ b/drivers/net/ethernet/qlogic/qede/qede_main.c @@ -556,7 +556,7 @@ static const struct net_device_ops qede_netdev_ops = { .ndo_udp_tunnel_add = qede_udp_tunnel_add, .ndo_udp_tunnel_del = qede_udp_tunnel_del, .ndo_features_check = qede_features_check, - .ndo_xdp = qede_xdp, + .ndo_bpf = qede_xdp, #ifdef CONFIG_RFS_ACCEL .ndo_rx_flow_steer = qede_rx_flow_steer, #endif @@ -594,7 +594,7 @@ static const struct net_device_ops qede_netdev_vf_xdp_ops = { .ndo_udp_tunnel_add = qede_udp_tunnel_add, .ndo_udp_tunnel_del = qede_udp_tunnel_del, .ndo_features_check = qede_features_check, - .ndo_xdp = qede_xdp, + .ndo_bpf = qede_xdp, }; /* ------------------------------------------------------------------------- diff --git a/drivers/net/tun.c b/drivers/net/tun.c index 8125956f62a1..1a326b697221 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@ -1141,7 +1141,7 @@ static u32 tun_xdp_query(struct net_device *dev) return 0; } -static int tun_xdp(struct net_device *dev, struct netdev_xdp *xdp) +static int tun_xdp(struct net_device *dev, struct netdev_bpf *xdp) { switch (xdp->command) { case XDP_SETUP_PROG: @@ -1185,7 +1185,7 @@ static const struct net_device_ops tap_netdev_ops = { .ndo_features_check = passthru_features_check, .ndo_set_rx_headroom = tun_set_headroom, .ndo_get_stats64 = tun_net_get_stats64, - .ndo_xdp = tun_xdp, + .ndo_bpf = tun_xdp, }; static void tun_flow_init(struct tun_struct *tun) diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index fc059f193e7d..edf984406ba0 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -2088,7 +2088,7 @@ static u32 virtnet_xdp_query(struct net_device *dev) return 0; } -static int virtnet_xdp(struct net_device *dev, struct netdev_xdp *xdp) +static int virtnet_xdp(struct net_device *dev, struct netdev_bpf *xdp) { switch (xdp->command) { case XDP_SETUP_PROG: @@ -2115,7 +2115,7 @@ static const struct net_device_ops virtnet_netdev = { #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = virtnet_netpoll, #endif - .ndo_xdp = virtnet_xdp, + .ndo_bpf = virtnet_xdp, .ndo_xdp_xmit = virtnet_xdp_xmit, .ndo_xdp_flush = virtnet_xdp_flush, .ndo_features_check = passthru_features_check, diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 7de7656550c2..9af9feaaeb64 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -779,10 +779,10 @@ enum tc_setup_type { TC_SETUP_CBS, }; -/* These structures hold the attributes of xdp state that are being passed - * to the netdevice through the xdp op. +/* These structures hold the attributes of bpf state that are being passed + * to the netdevice through the bpf op. */ -enum xdp_netdev_command { +enum bpf_netdev_command { /* Set or clear a bpf program used in the earliest stages of packet * rx. The prog will have been loaded as BPF_PROG_TYPE_XDP. The callee * is responsible for calling bpf_prog_put on any old progs that are @@ -801,8 +801,8 @@ enum xdp_netdev_command { struct netlink_ext_ack; -struct netdev_xdp { - enum xdp_netdev_command command; +struct netdev_bpf { + enum bpf_netdev_command command; union { /* XDP_SETUP_PROG */ struct { @@ -1124,9 +1124,10 @@ struct dev_ifalias { * appropriate rx headroom value allows avoiding skb head copy on * forward. Setting a negative value resets the rx headroom to the * default value. - * int (*ndo_xdp)(struct net_device *dev, struct netdev_xdp *xdp); + * int (*ndo_bpf)(struct net_device *dev, struct netdev_bpf *bpf); * This function is used to set or query state related to XDP on the - * netdevice. See definition of enum xdp_netdev_command for details. + * netdevice and manage BPF offload. See definition of + * enum bpf_netdev_command for details. * int (*ndo_xdp_xmit)(struct net_device *dev, struct xdp_buff *xdp); * This function is used to submit a XDP packet for transmit on a * netdevice. @@ -1315,8 +1316,8 @@ struct net_device_ops { struct sk_buff *skb); void (*ndo_set_rx_headroom)(struct net_device *dev, int needed_headroom); - int (*ndo_xdp)(struct net_device *dev, - struct netdev_xdp *xdp); + int (*ndo_bpf)(struct net_device *dev, + struct netdev_bpf *bpf); int (*ndo_xdp_xmit)(struct net_device *dev, struct xdp_buff *xdp); void (*ndo_xdp_flush)(struct net_device *dev); @@ -3311,10 +3312,10 @@ struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *d struct sk_buff *dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev, struct netdev_queue *txq, int *ret); -typedef int (*xdp_op_t)(struct net_device *dev, struct netdev_xdp *xdp); +typedef int (*bpf_op_t)(struct net_device *dev, struct netdev_bpf *bpf); int dev_change_xdp_fd(struct net_device *dev, struct netlink_ext_ack *extack, int fd, u32 flags); -u8 __dev_xdp_attached(struct net_device *dev, xdp_op_t xdp_op, u32 *prog_id); +u8 __dev_xdp_attached(struct net_device *dev, bpf_op_t xdp_op, u32 *prog_id); int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb); int dev_forward_skb(struct net_device *dev, struct sk_buff *skb); diff --git a/net/core/dev.c b/net/core/dev.c index 1423cf4d695c..10cde58d3275 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -4545,7 +4545,7 @@ static int __netif_receive_skb(struct sk_buff *skb) return ret; } -static int generic_xdp_install(struct net_device *dev, struct netdev_xdp *xdp) +static int generic_xdp_install(struct net_device *dev, struct netdev_bpf *xdp) { struct bpf_prog *old = rtnl_dereference(dev->xdp_prog); struct bpf_prog *new = xdp->prog; @@ -7090,26 +7090,26 @@ int dev_change_proto_down(struct net_device *dev, bool proto_down) } EXPORT_SYMBOL(dev_change_proto_down); -u8 __dev_xdp_attached(struct net_device *dev, xdp_op_t xdp_op, u32 *prog_id) +u8 __dev_xdp_attached(struct net_device *dev, bpf_op_t bpf_op, u32 *prog_id) { - struct netdev_xdp xdp; + struct netdev_bpf xdp; memset(&xdp, 0, sizeof(xdp)); xdp.command = XDP_QUERY_PROG; /* Query must always succeed. */ - WARN_ON(xdp_op(dev, &xdp) < 0); + WARN_ON(bpf_op(dev, &xdp) < 0); if (prog_id) *prog_id = xdp.prog_id; return xdp.prog_attached; } -static int dev_xdp_install(struct net_device *dev, xdp_op_t xdp_op, +static int dev_xdp_install(struct net_device *dev, bpf_op_t bpf_op, struct netlink_ext_ack *extack, u32 flags, struct bpf_prog *prog) { - struct netdev_xdp xdp; + struct netdev_bpf xdp; memset(&xdp, 0, sizeof(xdp)); if (flags & XDP_FLAGS_HW_MODE) @@ -7120,7 +7120,7 @@ static int dev_xdp_install(struct net_device *dev, xdp_op_t xdp_op, xdp.flags = flags; xdp.prog = prog; - return xdp_op(dev, &xdp); + return bpf_op(dev, &xdp); } /** @@ -7137,24 +7137,24 @@ int dev_change_xdp_fd(struct net_device *dev, struct netlink_ext_ack *extack, { const struct net_device_ops *ops = dev->netdev_ops; struct bpf_prog *prog = NULL; - xdp_op_t xdp_op, xdp_chk; + bpf_op_t bpf_op, bpf_chk; int err; ASSERT_RTNL(); - xdp_op = xdp_chk = ops->ndo_xdp; - if (!xdp_op && (flags & (XDP_FLAGS_DRV_MODE | XDP_FLAGS_HW_MODE))) + bpf_op = bpf_chk = ops->ndo_bpf; + if (!bpf_op && (flags & (XDP_FLAGS_DRV_MODE | XDP_FLAGS_HW_MODE))) return -EOPNOTSUPP; - if (!xdp_op || (flags & XDP_FLAGS_SKB_MODE)) - xdp_op = generic_xdp_install; - if (xdp_op == xdp_chk) - xdp_chk = generic_xdp_install; + if (!bpf_op || (flags & XDP_FLAGS_SKB_MODE)) + bpf_op = generic_xdp_install; + if (bpf_op == bpf_chk) + bpf_chk = generic_xdp_install; if (fd >= 0) { - if (xdp_chk && __dev_xdp_attached(dev, xdp_chk, NULL)) + if (bpf_chk && __dev_xdp_attached(dev, bpf_chk, NULL)) return -EEXIST; if ((flags & XDP_FLAGS_UPDATE_IF_NOEXIST) && - __dev_xdp_attached(dev, xdp_op, NULL)) + __dev_xdp_attached(dev, bpf_op, NULL)) return -EBUSY; prog = bpf_prog_get_type(fd, BPF_PROG_TYPE_XDP); @@ -7162,7 +7162,7 @@ int dev_change_xdp_fd(struct net_device *dev, struct netlink_ext_ack *extack, return PTR_ERR(prog); } - err = dev_xdp_install(dev, xdp_op, extack, flags, prog); + err = dev_xdp_install(dev, bpf_op, extack, flags, prog); if (err < 0 && prog) bpf_prog_put(prog); diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index 8a8c51937edf..dc5ad84ac096 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c @@ -1270,10 +1270,10 @@ static u8 rtnl_xdp_attached_mode(struct net_device *dev, u32 *prog_id) *prog_id = generic_xdp_prog->aux->id; return XDP_ATTACHED_SKB; } - if (!ops->ndo_xdp) + if (!ops->ndo_bpf) return XDP_ATTACHED_NONE; - return __dev_xdp_attached(dev, ops->ndo_xdp, prog_id); + return __dev_xdp_attached(dev, ops->ndo_bpf, prog_id); } static int rtnl_xdp_fill(struct sk_buff *skb, struct net_device *dev) -- cgit v1.2.3 From 012bb8a8b5a2688590f829884acc83697d68a96d Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Fri, 3 Nov 2017 13:56:22 -0700 Subject: nfp: bpf: drop support for cls_bpf with legacy actions Only support BPF_PROG_TYPE_SCHED_CLS programs in direct action mode. This simplifies preparing the offload since there will now be only one mode of operation for that type of program. We need to know the attachment mode type of cls_bpf programs, because exit codes are interpreted differently for legacy vs DA mode. Signed-off-by: Jakub Kicinski Reviewed-by: Quentin Monnet Signed-off-by: David S. Miller --- drivers/net/ethernet/netronome/nfp/bpf/jit.c | 87 ++--------------- drivers/net/ethernet/netronome/nfp/bpf/main.c | 33 ++----- drivers/net/ethernet/netronome/nfp/bpf/main.h | 30 +----- drivers/net/ethernet/netronome/nfp/bpf/offload.c | 108 +--------------------- drivers/net/ethernet/netronome/nfp/bpf/verifier.c | 11 +-- 5 files changed, 22 insertions(+), 247 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/netronome/nfp/bpf/jit.c b/drivers/net/ethernet/netronome/nfp/bpf/jit.c index 2609a2487100..e1907a1d269e 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/jit.c +++ b/drivers/net/ethernet/netronome/nfp/bpf/jit.c @@ -201,47 +201,6 @@ emit_br(struct nfp_prog *nfp_prog, enum br_mask mask, u16 addr, u8 defer) BR_CSS_NONE, addr, defer); } -static void -__emit_br_byte(struct nfp_prog *nfp_prog, u8 areg, u8 breg, bool imm8, - u8 byte, bool equal, u16 addr, u8 defer, bool src_lmextn) -{ - u16 addr_lo, addr_hi; - u64 insn; - - addr_lo = addr & (OP_BB_ADDR_LO >> __bf_shf(OP_BB_ADDR_LO)); - addr_hi = addr != addr_lo; - - insn = OP_BBYTE_BASE | - FIELD_PREP(OP_BB_A_SRC, areg) | - FIELD_PREP(OP_BB_BYTE, byte) | - FIELD_PREP(OP_BB_B_SRC, breg) | - FIELD_PREP(OP_BB_I8, imm8) | - FIELD_PREP(OP_BB_EQ, equal) | - FIELD_PREP(OP_BB_DEFBR, defer) | - FIELD_PREP(OP_BB_ADDR_LO, addr_lo) | - FIELD_PREP(OP_BB_ADDR_HI, addr_hi) | - FIELD_PREP(OP_BB_SRC_LMEXTN, src_lmextn); - - nfp_prog_push(nfp_prog, insn); -} - -static void -emit_br_byte_neq(struct nfp_prog *nfp_prog, - swreg src, u8 imm, u8 byte, u16 addr, u8 defer) -{ - struct nfp_insn_re_regs reg; - int err; - - err = swreg_to_restricted(reg_none(), src, reg_imm(imm), ®, true); - if (err) { - nfp_prog->error = err; - return; - } - - __emit_br_byte(nfp_prog, reg.areg, reg.breg, reg.i8, byte, false, addr, - defer, reg.src_lmextn); -} - static void __emit_immed(struct nfp_prog *nfp_prog, u16 areg, u16 breg, u16 imm_hi, enum immed_width width, bool invert, @@ -1547,7 +1506,7 @@ mem_ldx(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, unsigned int size) { if (meta->ptr.type == PTR_TO_CTX) { - if (nfp_prog->act == NN_ACT_XDP) + if (nfp_prog->type == BPF_PROG_TYPE_XDP) return mem_ldx_xdp(nfp_prog, meta, size); else return mem_ldx_skb(nfp_prog, meta, size); @@ -2022,34 +1981,6 @@ static void nfp_intro(struct nfp_prog *nfp_prog) plen_reg(nfp_prog), ALU_OP_AND, pv_len(nfp_prog)); } -static void nfp_outro_tc_legacy(struct nfp_prog *nfp_prog) -{ - const u8 act2code[] = { - [NN_ACT_TC_DROP] = 0x22, - [NN_ACT_TC_REDIR] = 0x24 - }; - /* Target for aborts */ - nfp_prog->tgt_abort = nfp_prog_current_offset(nfp_prog); - wrp_immed(nfp_prog, reg_both(0), 0); - - /* Target for normal exits */ - nfp_prog->tgt_out = nfp_prog_current_offset(nfp_prog); - /* Legacy TC mode: - * 0 0x11 -> pass, count as stat0 - * -1 drop 0x22 -> drop, count as stat1 - * redir 0x24 -> redir, count as stat1 - * ife mark 0x21 -> pass, count as stat1 - * ife + tx 0x24 -> redir, count as stat1 - */ - emit_br_byte_neq(nfp_prog, reg_b(0), 0xff, 0, nfp_prog->tgt_done, 2); - wrp_mov(nfp_prog, reg_a(0), NFP_BPF_ABI_FLAGS); - emit_ld_field(nfp_prog, reg_a(0), 0xc, reg_imm(0x11), SHF_SC_L_SHF, 16); - - emit_br(nfp_prog, BR_UNC, nfp_prog->tgt_done, 1); - emit_ld_field(nfp_prog, reg_a(0), 0xc, reg_imm(act2code[nfp_prog->act]), - SHF_SC_L_SHF, 16); -} - static void nfp_outro_tc_da(struct nfp_prog *nfp_prog) { /* TC direct-action mode: @@ -2142,17 +2073,15 @@ static void nfp_outro_xdp(struct nfp_prog *nfp_prog) static void nfp_outro(struct nfp_prog *nfp_prog) { - switch (nfp_prog->act) { - case NN_ACT_DIRECT: + switch (nfp_prog->type) { + case BPF_PROG_TYPE_SCHED_CLS: nfp_outro_tc_da(nfp_prog); break; - case NN_ACT_TC_DROP: - case NN_ACT_TC_REDIR: - nfp_outro_tc_legacy(nfp_prog); - break; - case NN_ACT_XDP: + case BPF_PROG_TYPE_XDP: nfp_outro_xdp(nfp_prog); break; + default: + WARN_ON(1); } } @@ -2351,7 +2280,6 @@ static int nfp_bpf_ustore_calc(struct nfp_prog *nfp_prog, __le64 *ustore) * nfp_bpf_jit() - translate BPF code into NFP assembly * @filter: kernel BPF filter struct * @prog_mem: memory to store assembler instructions - * @act: action attached to this eBPF program * @prog_start: offset of the first instruction when loaded * @prog_done: where to jump on exit * @prog_sz: size of @prog_mem in instructions @@ -2359,7 +2287,6 @@ static int nfp_bpf_ustore_calc(struct nfp_prog *nfp_prog, __le64 *ustore) */ int nfp_bpf_jit(struct bpf_prog *filter, void *prog_mem, - enum nfp_bpf_action_type act, unsigned int prog_start, unsigned int prog_done, unsigned int prog_sz, struct nfp_bpf_result *res) { @@ -2371,7 +2298,7 @@ nfp_bpf_jit(struct bpf_prog *filter, void *prog_mem, return -ENOMEM; INIT_LIST_HEAD(&nfp_prog->insns); - nfp_prog->act = act; + nfp_prog->type = filter->type; nfp_prog->start_off = prog_start; nfp_prog->tgt_done = prog_done; diff --git a/drivers/net/ethernet/netronome/nfp/bpf/main.c b/drivers/net/ethernet/netronome/nfp/bpf/main.c index 8e3e89cace8d..2ff97f12c160 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/main.c +++ b/drivers/net/ethernet/netronome/nfp/bpf/main.c @@ -85,34 +85,10 @@ static const char *nfp_bpf_extra_cap(struct nfp_app *app, struct nfp_net *nn) return nfp_net_ebpf_capable(nn) ? "BPF" : ""; } -static int -nfp_bpf_vnic_alloc(struct nfp_app *app, struct nfp_net *nn, unsigned int id) -{ - struct nfp_net_bpf_priv *priv; - int ret; - - priv = kmalloc(sizeof(*priv), GFP_KERNEL); - if (!priv) - return -ENOMEM; - - nn->app_priv = priv; - spin_lock_init(&priv->rx_filter_lock); - priv->nn = nn; - timer_setup(&priv->rx_filter_stats_timer, - nfp_net_filter_stats_timer, 0); - - ret = nfp_app_nic_vnic_alloc(app, nn, id); - if (ret) - kfree(priv); - - return ret; -} - static void nfp_bpf_vnic_free(struct nfp_app *app, struct nfp_net *nn) { if (nn->dp.bpf_offload_xdp) nfp_bpf_xdp_offload(app, nn, NULL); - kfree(nn->app_priv); } static int nfp_bpf_setup_tc_block_cb(enum tc_setup_type type, @@ -133,6 +109,13 @@ static int nfp_bpf_setup_tc_block_cb(enum tc_setup_type type, if (nn->dp.bpf_offload_xdp) return -EBUSY; + /* Only support TC direct action */ + if (!cls_bpf->exts_integrated || + tcf_exts_has_actions(cls_bpf->exts)) { + nn_err(nn, "only direct action with no legacy actions supported\n"); + return -EOPNOTSUPP; + } + return nfp_net_bpf_offload(nn, cls_bpf); default: return -EOPNOTSUPP; @@ -184,7 +167,7 @@ const struct nfp_app_type app_bpf = { .extra_cap = nfp_bpf_extra_cap, - .vnic_alloc = nfp_bpf_vnic_alloc, + .vnic_alloc = nfp_app_nic_vnic_alloc, .vnic_free = nfp_bpf_vnic_free, .setup_tc = nfp_bpf_setup_tc, diff --git a/drivers/net/ethernet/netronome/nfp/bpf/main.h b/drivers/net/ethernet/netronome/nfp/bpf/main.h index bc604030ff6c..c5280de2ab14 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/main.h +++ b/drivers/net/ethernet/netronome/nfp/bpf/main.h @@ -65,13 +65,6 @@ enum pkt_vec { PKT_VEC_PKT_PTR = 2, }; -enum nfp_bpf_action_type { - NN_ACT_TC_DROP, - NN_ACT_TC_REDIR, - NN_ACT_DIRECT, - NN_ACT_XDP, -}; - #define pv_len(np) reg_lm(1, PKT_VEC_PKT_LEN) #define pv_ctm_ptr(np) reg_lm(1, PKT_VEC_PKT_PTR) @@ -147,7 +140,7 @@ static inline u8 mbpf_mode(const struct nfp_insn_meta *meta) * @prog: machine code * @prog_len: number of valid instructions in @prog array * @__prog_alloc_len: alloc size of @prog array - * @act: BPF program/action type (TC DA, TC with action, XDP etc.) + * @type: BPF program type * @num_regs: number of registers used by this program * @regs_per_thread: number of basic registers allocated per thread * @start_off: address of the first instruction in the memory @@ -164,7 +157,7 @@ struct nfp_prog { unsigned int prog_len; unsigned int __prog_alloc_len; - enum nfp_bpf_action_type act; + enum bpf_prog_type type; unsigned int num_regs; unsigned int regs_per_thread; @@ -188,7 +181,7 @@ struct nfp_bpf_result { }; int -nfp_bpf_jit(struct bpf_prog *filter, void *prog, enum nfp_bpf_action_type act, +nfp_bpf_jit(struct bpf_prog *filter, void *prog, unsigned int prog_start, unsigned int prog_done, unsigned int prog_sz, struct nfp_bpf_result *res); @@ -197,23 +190,6 @@ int nfp_prog_verify(struct nfp_prog *nfp_prog, struct bpf_prog *prog); struct nfp_net; struct tc_cls_bpf_offload; -/** - * struct nfp_net_bpf_priv - per-vNIC BPF private data - * @rx_filter: Filter offload statistics - dropped packets/bytes - * @rx_filter_prev: Filter offload statistics - values from previous update - * @rx_filter_change: Jiffies when statistics last changed - * @rx_filter_stats_timer: Timer for polling filter offload statistics - * @rx_filter_lock: Lock protecting timer state changes (teardown) - */ -struct nfp_net_bpf_priv { - struct nfp_stat_pair rx_filter, rx_filter_prev; - unsigned long rx_filter_change; - struct timer_list rx_filter_stats_timer; - struct nfp_net *nn; - spinlock_t rx_filter_lock; -}; - int nfp_net_bpf_offload(struct nfp_net *nn, struct tc_cls_bpf_offload *cls_bpf); -void nfp_net_filter_stats_timer(struct timer_list *t); #endif diff --git a/drivers/net/ethernet/netronome/nfp/bpf/offload.c b/drivers/net/ethernet/netronome/nfp/bpf/offload.c index 6d576f631392..b9b5d675c4d3 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/offload.c +++ b/drivers/net/ethernet/netronome/nfp/bpf/offload.c @@ -51,92 +51,6 @@ #include "../nfp_net_ctrl.h" #include "../nfp_net.h" -void nfp_net_filter_stats_timer(struct timer_list *t) -{ - struct nfp_net_bpf_priv *priv = from_timer(priv, t, - rx_filter_stats_timer); - struct nfp_net *nn = priv->nn; - struct nfp_stat_pair latest; - - spin_lock_bh(&priv->rx_filter_lock); - - if (nn->dp.ctrl & NFP_NET_CFG_CTRL_BPF) - mod_timer(&priv->rx_filter_stats_timer, - jiffies + NFP_NET_STAT_POLL_IVL); - - spin_unlock_bh(&priv->rx_filter_lock); - - latest.pkts = nn_readq(nn, NFP_NET_CFG_STATS_APP1_FRAMES); - latest.bytes = nn_readq(nn, NFP_NET_CFG_STATS_APP1_BYTES); - - if (latest.pkts != priv->rx_filter.pkts) - priv->rx_filter_change = jiffies; - - priv->rx_filter = latest; -} - -static void nfp_net_bpf_stats_reset(struct nfp_net *nn) -{ - struct nfp_net_bpf_priv *priv = nn->app_priv; - - priv->rx_filter.pkts = nn_readq(nn, NFP_NET_CFG_STATS_APP1_FRAMES); - priv->rx_filter.bytes = nn_readq(nn, NFP_NET_CFG_STATS_APP1_BYTES); - priv->rx_filter_prev = priv->rx_filter; - priv->rx_filter_change = jiffies; -} - -static int -nfp_net_bpf_stats_update(struct nfp_net *nn, struct tc_cls_bpf_offload *cls_bpf) -{ - struct nfp_net_bpf_priv *priv = nn->app_priv; - u64 bytes, pkts; - - pkts = priv->rx_filter.pkts - priv->rx_filter_prev.pkts; - bytes = priv->rx_filter.bytes - priv->rx_filter_prev.bytes; - bytes -= pkts * ETH_HLEN; - - priv->rx_filter_prev = priv->rx_filter; - - tcf_exts_stats_update(cls_bpf->exts, - bytes, pkts, priv->rx_filter_change); - - return 0; -} - -static int -nfp_net_bpf_get_act(struct nfp_net *nn, struct tc_cls_bpf_offload *cls_bpf) -{ - const struct tc_action *a; - LIST_HEAD(actions); - - if (!cls_bpf->exts) - return NN_ACT_XDP; - - /* TC direct action */ - if (cls_bpf->exts_integrated) { - if (!tcf_exts_has_actions(cls_bpf->exts)) - return NN_ACT_DIRECT; - - return -EOPNOTSUPP; - } - - /* TC legacy mode */ - if (!tcf_exts_has_one_action(cls_bpf->exts)) - return -EOPNOTSUPP; - - tcf_exts_to_list(cls_bpf->exts, &actions); - list_for_each_entry(a, &actions, list) { - if (is_tcf_gact_shot(a)) - return NN_ACT_TC_DROP; - - if (is_tcf_mirred_egress_redirect(a) && - tcf_mirred_ifindex(a) == nn->dp.netdev->ifindex) - return NN_ACT_TC_REDIR; - } - - return -EOPNOTSUPP; -} - static int nfp_net_bpf_offload_prepare(struct nfp_net *nn, struct tc_cls_bpf_offload *cls_bpf, @@ -144,17 +58,11 @@ nfp_net_bpf_offload_prepare(struct nfp_net *nn, void **code, dma_addr_t *dma_addr, u16 max_instr) { unsigned int code_sz = max_instr * sizeof(u64); - enum nfp_bpf_action_type act; unsigned int stack_size; u16 start_off, done_off; unsigned int max_mtu; int ret; - ret = nfp_net_bpf_get_act(nn, cls_bpf); - if (ret < 0) - return ret; - act = ret; - max_mtu = nn_readb(nn, NFP_NET_CFG_BPF_INL_MTU) * 64 - 32; if (max_mtu < nn->dp.netdev->mtu) { nn_info(nn, "BPF offload not supported with MTU larger than HW packet split boundary\n"); @@ -175,7 +83,7 @@ nfp_net_bpf_offload_prepare(struct nfp_net *nn, if (!*code) return -ENOMEM; - ret = nfp_bpf_jit(cls_bpf->prog, *code, act, start_off, done_off, + ret = nfp_bpf_jit(cls_bpf->prog, *code, start_off, done_off, max_instr, res); if (ret) goto out; @@ -193,7 +101,6 @@ nfp_net_bpf_load_and_start(struct nfp_net *nn, u32 tc_flags, unsigned int code_sz, unsigned int n_instr, bool dense_mode) { - struct nfp_net_bpf_priv *priv = nn->app_priv; u64 bpf_addr = dma_addr; int err; @@ -218,25 +125,15 @@ nfp_net_bpf_load_and_start(struct nfp_net *nn, u32 tc_flags, nn_err(nn, "FW command error while enabling BPF: %d\n", err); dma_free_coherent(nn->dp.dev, code_sz, code, dma_addr); - - nfp_net_bpf_stats_reset(nn); - mod_timer(&priv->rx_filter_stats_timer, - jiffies + NFP_NET_STAT_POLL_IVL); } static int nfp_net_bpf_stop(struct nfp_net *nn) { - struct nfp_net_bpf_priv *priv = nn->app_priv; - if (!(nn->dp.ctrl & NFP_NET_CFG_CTRL_BPF)) return 0; - spin_lock_bh(&priv->rx_filter_lock); nn->dp.ctrl &= ~NFP_NET_CFG_CTRL_BPF; - spin_unlock_bh(&priv->rx_filter_lock); nn_writel(nn, NFP_NET_CFG_CTRL, nn->dp.ctrl); - - del_timer_sync(&priv->rx_filter_stats_timer); nn->dp.bpf_offload_skip_sw = 0; return nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_GEN); @@ -292,9 +189,6 @@ int nfp_net_bpf_offload(struct nfp_net *nn, struct tc_cls_bpf_offload *cls_bpf) case TC_CLSBPF_DESTROY: return nfp_net_bpf_stop(nn); - case TC_CLSBPF_STATS: - return nfp_net_bpf_stats_update(nn, cls_bpf); - default: return -EOPNOTSUPP; } diff --git a/drivers/net/ethernet/netronome/nfp/bpf/verifier.c b/drivers/net/ethernet/netronome/nfp/bpf/verifier.c index a8c7615546a9..4f31bdefd331 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/verifier.c +++ b/drivers/net/ethernet/netronome/nfp/bpf/verifier.c @@ -81,7 +81,7 @@ nfp_bpf_check_exit(struct nfp_prog *nfp_prog, const struct bpf_reg_state *reg0 = cur_regs(env) + BPF_REG_0; u64 imm; - if (nfp_prog->act == NN_ACT_XDP) + if (nfp_prog->type == BPF_PROG_TYPE_XDP) return 0; if (!(reg0->type == SCALAR_VALUE && tnum_is_const(reg0->var_off))) { @@ -94,13 +94,8 @@ nfp_bpf_check_exit(struct nfp_prog *nfp_prog, } imm = reg0->var_off.value; - if (nfp_prog->act != NN_ACT_DIRECT && imm != 0 && (imm & ~0U) != ~0U) { - pr_info("unsupported exit state: %d, imm: %llx\n", - reg0->type, imm); - return -EINVAL; - } - - if (nfp_prog->act == NN_ACT_DIRECT && imm <= TC_ACT_REDIRECT && + if (nfp_prog->type == BPF_PROG_TYPE_SCHED_CLS && + imm <= TC_ACT_REDIRECT && imm != TC_ACT_SHOT && imm != TC_ACT_STOLEN && imm != TC_ACT_QUEUED) { pr_info("unsupported exit state: %d, imm: %llx\n", -- cgit v1.2.3 From 94508438e8ea4391696c5171527678e9dbd08789 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Fri, 3 Nov 2017 13:56:23 -0700 Subject: nfp: bpf: remove the register renumbering leftovers The register renumbering was removed and will not be coming back in its old, naive form, given that it would be fundamentally incompatible with calling functions. Remove the leftovers. Signed-off-by: Jakub Kicinski Reviewed-by: Quentin Monnet Signed-off-by: David S. Miller --- drivers/net/ethernet/netronome/nfp/bpf/jit.c | 4 ---- drivers/net/ethernet/netronome/nfp/bpf/main.h | 6 ------ drivers/net/ethernet/netronome/nfp/bpf/offload.c | 13 ++++--------- 3 files changed, 4 insertions(+), 19 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/netronome/nfp/bpf/jit.c b/drivers/net/ethernet/netronome/nfp/bpf/jit.c index e1907a1d269e..ff150c27f411 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/jit.c +++ b/drivers/net/ethernet/netronome/nfp/bpf/jit.c @@ -2314,9 +2314,6 @@ nfp_bpf_jit(struct bpf_prog *filter, void *prog_mem, if (ret) goto out; - nfp_prog->num_regs = MAX_BPF_REG; - nfp_prog->regs_per_thread = 32; - nfp_prog->prog = prog_mem; nfp_prog->__prog_alloc_len = prog_sz; @@ -2331,7 +2328,6 @@ nfp_bpf_jit(struct bpf_prog *filter, void *prog_mem, ret = nfp_bpf_ustore_calc(nfp_prog, (__force __le64 *)prog_mem); res->n_instr = nfp_prog->prog_len; - res->dense_mode = false; out: nfp_prog_free(nfp_prog); diff --git a/drivers/net/ethernet/netronome/nfp/bpf/main.h b/drivers/net/ethernet/netronome/nfp/bpf/main.h index c5280de2ab14..85b7d9398cda 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/main.h +++ b/drivers/net/ethernet/netronome/nfp/bpf/main.h @@ -141,8 +141,6 @@ static inline u8 mbpf_mode(const struct nfp_insn_meta *meta) * @prog_len: number of valid instructions in @prog array * @__prog_alloc_len: alloc size of @prog array * @type: BPF program type - * @num_regs: number of registers used by this program - * @regs_per_thread: number of basic registers allocated per thread * @start_off: address of the first instruction in the memory * @tgt_out: jump target for normal exit * @tgt_abort: jump target for abort (e.g. access outside of packet buffer) @@ -159,9 +157,6 @@ struct nfp_prog { enum bpf_prog_type type; - unsigned int num_regs; - unsigned int regs_per_thread; - unsigned int start_off; unsigned int tgt_out; unsigned int tgt_abort; @@ -177,7 +172,6 @@ struct nfp_prog { struct nfp_bpf_result { unsigned int n_instr; - bool dense_mode; }; int diff --git a/drivers/net/ethernet/netronome/nfp/bpf/offload.c b/drivers/net/ethernet/netronome/nfp/bpf/offload.c index b9b5d675c4d3..268ba1ba82db 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/offload.c +++ b/drivers/net/ethernet/netronome/nfp/bpf/offload.c @@ -98,19 +98,14 @@ out: static void nfp_net_bpf_load_and_start(struct nfp_net *nn, u32 tc_flags, void *code, dma_addr_t dma_addr, - unsigned int code_sz, unsigned int n_instr, - bool dense_mode) + unsigned int code_sz, unsigned int n_instr) { - u64 bpf_addr = dma_addr; int err; nn->dp.bpf_offload_skip_sw = !!(tc_flags & TCA_CLS_FLAGS_SKIP_SW); - if (dense_mode) - bpf_addr |= NFP_NET_CFG_BPF_CFG_8CTX; - nn_writew(nn, NFP_NET_CFG_BPF_SIZE, n_instr); - nn_writeq(nn, NFP_NET_CFG_BPF_ADDR, bpf_addr); + nn_writeq(nn, NFP_NET_CFG_BPF_ADDR, dma_addr); /* Load up the JITed code */ err = nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_BPF); @@ -169,7 +164,7 @@ int nfp_net_bpf_offload(struct nfp_net *nn, struct tc_cls_bpf_offload *cls_bpf) nfp_net_bpf_stop(nn); nfp_net_bpf_load_and_start(nn, cls_bpf->gen_flags, code, dma_addr, max_instr * sizeof(u64), - res.n_instr, res.dense_mode); + res.n_instr); return 0; case TC_CLSBPF_ADD: @@ -183,7 +178,7 @@ int nfp_net_bpf_offload(struct nfp_net *nn, struct tc_cls_bpf_offload *cls_bpf) nfp_net_bpf_load_and_start(nn, cls_bpf->gen_flags, code, dma_addr, max_instr * sizeof(u64), - res.n_instr, res.dense_mode); + res.n_instr); return 0; case TC_CLSBPF_DESTROY: -- cgit v1.2.3 From 5559eedb78127d6b76c36e3918a75bbc2801653a Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Fri, 3 Nov 2017 13:56:24 -0700 Subject: nfp: bpf: remove unnecessary include of nfp_net.h BPF offload's main header does not need to include nfp_net.h. Signed-off-by: Jakub Kicinski Reviewed-by: Quentin Monnet Signed-off-by: David S. Miller --- drivers/net/ethernet/netronome/nfp/bpf/main.h | 1 - 1 file changed, 1 deletion(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/netronome/nfp/bpf/main.h b/drivers/net/ethernet/netronome/nfp/bpf/main.h index 85b7d9398cda..9f0df6a9786d 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/main.h +++ b/drivers/net/ethernet/netronome/nfp/bpf/main.h @@ -41,7 +41,6 @@ #include #include "../nfp_asm.h" -#include "../nfp_net.h" /* For branch fixup logic use up-most byte of branch instruction as scratch * area. Remember to clear this before sending instructions to HW! -- cgit v1.2.3 From 9ce7a956327ad6c14e1a7eb9f4cb5300c8b61db6 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Fri, 3 Nov 2017 13:56:25 -0700 Subject: nfp: bpf: refactor offload logic We currently create a fake cls_bpf offload object when we want to offload XDP. Simplify and clarify the code by moving the TC/XDP specific logic out of common offload code. This is easy now that we don't support legacy TC actions. We only need the bpf program and state of the skip_sw flag. Temporarily set @code to NULL in nfp_net_bpf_offload(), compilers seem to have trouble recognizing it's always initialized. Next patches will eliminate that variable. Signed-off-by: Jakub Kicinski Reviewed-by: Quentin Monnet Signed-off-by: David S. Miller --- drivers/net/ethernet/netronome/nfp/bpf/main.c | 67 +++++++++++----------- drivers/net/ethernet/netronome/nfp/bpf/main.h | 4 +- drivers/net/ethernet/netronome/nfp/bpf/offload.c | 73 ++++++++++-------------- 3 files changed, 67 insertions(+), 77 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/netronome/nfp/bpf/main.c b/drivers/net/ethernet/netronome/nfp/bpf/main.c index 2ff97f12c160..9e1286346d42 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/main.c +++ b/drivers/net/ethernet/netronome/nfp/bpf/main.c @@ -54,28 +54,25 @@ static int nfp_bpf_xdp_offload(struct nfp_app *app, struct nfp_net *nn, struct bpf_prog *prog) { - struct tc_cls_bpf_offload cmd = { - .prog = prog, - }; + bool running, xdp_running; int ret; if (!nfp_net_ebpf_capable(nn)) return -EINVAL; - if (nn->dp.ctrl & NFP_NET_CFG_CTRL_BPF) { - if (!nn->dp.bpf_offload_xdp) - return prog ? -EBUSY : 0; - cmd.command = prog ? TC_CLSBPF_REPLACE : TC_CLSBPF_DESTROY; - } else { - if (!prog) - return 0; - cmd.command = TC_CLSBPF_ADD; - } + running = nn->dp.ctrl & NFP_NET_CFG_CTRL_BPF; + xdp_running = running && nn->dp.bpf_offload_xdp; + + if (!prog && !xdp_running) + return 0; + if (prog && running && !xdp_running) + return -EBUSY; - ret = nfp_net_bpf_offload(nn, &cmd); + ret = nfp_net_bpf_offload(nn, prog, running, true); /* Stop offload if replace not possible */ - if (ret && cmd.command == TC_CLSBPF_REPLACE) + if (ret && prog) nfp_bpf_xdp_offload(app, nn, NULL); + nn->dp.bpf_offload_xdp = prog && !ret; return ret; } @@ -96,27 +93,33 @@ static int nfp_bpf_setup_tc_block_cb(enum tc_setup_type type, { struct tc_cls_bpf_offload *cls_bpf = type_data; struct nfp_net *nn = cb_priv; + bool skip_sw; + + if (type != TC_SETUP_CLSBPF || + !tc_can_offload(nn->dp.netdev) || + !nfp_net_ebpf_capable(nn) || + cls_bpf->common.protocol != htons(ETH_P_ALL) || + cls_bpf->common.chain_index) + return -EOPNOTSUPP; + if (nn->dp.bpf_offload_xdp) + return -EBUSY; - if (!tc_can_offload(nn->dp.netdev)) + /* Only support TC direct action */ + if (!cls_bpf->exts_integrated || + tcf_exts_has_actions(cls_bpf->exts)) { + nn_err(nn, "only direct action with no legacy actions supported\n"); return -EOPNOTSUPP; + } - switch (type) { - case TC_SETUP_CLSBPF: - if (!nfp_net_ebpf_capable(nn) || - cls_bpf->common.protocol != htons(ETH_P_ALL) || - cls_bpf->common.chain_index) - return -EOPNOTSUPP; - if (nn->dp.bpf_offload_xdp) - return -EBUSY; - - /* Only support TC direct action */ - if (!cls_bpf->exts_integrated || - tcf_exts_has_actions(cls_bpf->exts)) { - nn_err(nn, "only direct action with no legacy actions supported\n"); - return -EOPNOTSUPP; - } - - return nfp_net_bpf_offload(nn, cls_bpf); + skip_sw = !!(cls_bpf->gen_flags & TCA_CLS_FLAGS_SKIP_SW); + + switch (cls_bpf->command) { + case TC_CLSBPF_REPLACE: + return nfp_net_bpf_offload(nn, cls_bpf->prog, true, !skip_sw); + case TC_CLSBPF_ADD: + return nfp_net_bpf_offload(nn, cls_bpf->prog, false, !skip_sw); + case TC_CLSBPF_DESTROY: + return nfp_net_bpf_offload(nn, NULL, true, !skip_sw); default: return -EOPNOTSUPP; } diff --git a/drivers/net/ethernet/netronome/nfp/bpf/main.h b/drivers/net/ethernet/netronome/nfp/bpf/main.h index 9f0df6a9786d..6dddab95d57a 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/main.h +++ b/drivers/net/ethernet/netronome/nfp/bpf/main.h @@ -181,8 +181,8 @@ nfp_bpf_jit(struct bpf_prog *filter, void *prog, int nfp_prog_verify(struct nfp_prog *nfp_prog, struct bpf_prog *prog); struct nfp_net; -struct tc_cls_bpf_offload; -int nfp_net_bpf_offload(struct nfp_net *nn, struct tc_cls_bpf_offload *cls_bpf); +int nfp_net_bpf_offload(struct nfp_net *nn, struct bpf_prog *prog, + bool old_prog, bool sw_fallback); #endif diff --git a/drivers/net/ethernet/netronome/nfp/bpf/offload.c b/drivers/net/ethernet/netronome/nfp/bpf/offload.c index 268ba1ba82db..c09efa1a9649 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/offload.c +++ b/drivers/net/ethernet/netronome/nfp/bpf/offload.c @@ -52,8 +52,7 @@ #include "../nfp_net.h" static int -nfp_net_bpf_offload_prepare(struct nfp_net *nn, - struct tc_cls_bpf_offload *cls_bpf, +nfp_net_bpf_offload_prepare(struct nfp_net *nn, struct bpf_prog *prog, struct nfp_bpf_result *res, void **code, dma_addr_t *dma_addr, u16 max_instr) { @@ -73,9 +72,9 @@ nfp_net_bpf_offload_prepare(struct nfp_net *nn, done_off = nn_readw(nn, NFP_NET_CFG_BPF_DONE); stack_size = nn_readb(nn, NFP_NET_CFG_BPF_STACK_SZ) * 64; - if (cls_bpf->prog->aux->stack_depth > stack_size) { + if (prog->aux->stack_depth > stack_size) { nn_info(nn, "stack too large: program %dB > FW stack %dB\n", - cls_bpf->prog->aux->stack_depth, stack_size); + prog->aux->stack_depth, stack_size); return -EOPNOTSUPP; } @@ -83,8 +82,7 @@ nfp_net_bpf_offload_prepare(struct nfp_net *nn, if (!*code) return -ENOMEM; - ret = nfp_bpf_jit(cls_bpf->prog, *code, start_off, done_off, - max_instr, res); + ret = nfp_bpf_jit(prog, *code, start_off, done_off, max_instr, res); if (ret) goto out; @@ -96,13 +94,13 @@ out: } static void -nfp_net_bpf_load_and_start(struct nfp_net *nn, u32 tc_flags, +nfp_net_bpf_load_and_start(struct nfp_net *nn, bool sw_fallback, void *code, dma_addr_t dma_addr, unsigned int code_sz, unsigned int n_instr) { int err; - nn->dp.bpf_offload_skip_sw = !!(tc_flags & TCA_CLS_FLAGS_SKIP_SW); + nn->dp.bpf_offload_skip_sw = !sw_fallback; nn_writew(nn, NFP_NET_CFG_BPF_SIZE, n_instr); nn_writeq(nn, NFP_NET_CFG_BPF_ADDR, dma_addr); @@ -134,7 +132,8 @@ static int nfp_net_bpf_stop(struct nfp_net *nn) return nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_GEN); } -int nfp_net_bpf_offload(struct nfp_net *nn, struct tc_cls_bpf_offload *cls_bpf) +int nfp_net_bpf_offload(struct nfp_net *nn, struct bpf_prog *prog, + bool old_prog, bool sw_fallback) { struct nfp_bpf_result res; dma_addr_t dma_addr; @@ -142,49 +141,37 @@ int nfp_net_bpf_offload(struct nfp_net *nn, struct tc_cls_bpf_offload *cls_bpf) void *code; int err; + /* There is nothing stopping us from implementing seamless + * replace but the simple method of loading I adopted in + * the firmware does not handle atomic replace (i.e. we have to + * stop the BPF offload and re-enable it). Leaking-in a few + * frames which didn't have BPF applied in the hardware should + * be fine if software fallback is available, though. + */ + if (prog && old_prog && nn->dp.bpf_offload_skip_sw) + return -EBUSY; + + /* Something else is loaded, different program type? */ + if (!old_prog && nn->dp.ctrl & NFP_NET_CFG_CTRL_BPF) + return -EBUSY; + max_instr = nn_readw(nn, NFP_NET_CFG_BPF_MAX_LEN); + code = NULL; - switch (cls_bpf->command) { - case TC_CLSBPF_REPLACE: - /* There is nothing stopping us from implementing seamless - * replace but the simple method of loading I adopted in - * the firmware does not handle atomic replace (i.e. we have to - * stop the BPF offload and re-enable it). Leaking-in a few - * frames which didn't have BPF applied in the hardware should - * be fine if software fallback is available, though. - */ - if (nn->dp.bpf_offload_skip_sw) - return -EBUSY; - - err = nfp_net_bpf_offload_prepare(nn, cls_bpf, &res, &code, + if (prog) { + err = nfp_net_bpf_offload_prepare(nn, prog, &res, &code, &dma_addr, max_instr); if (err) return err; + } + if (old_prog) nfp_net_bpf_stop(nn); - nfp_net_bpf_load_and_start(nn, cls_bpf->gen_flags, code, - dma_addr, max_instr * sizeof(u64), - res.n_instr); - return 0; - - case TC_CLSBPF_ADD: - if (nn->dp.ctrl & NFP_NET_CFG_CTRL_BPF) - return -EBUSY; - - err = nfp_net_bpf_offload_prepare(nn, cls_bpf, &res, &code, - &dma_addr, max_instr); - if (err) - return err; - nfp_net_bpf_load_and_start(nn, cls_bpf->gen_flags, code, + if (prog) + nfp_net_bpf_load_and_start(nn, sw_fallback, code, dma_addr, max_instr * sizeof(u64), res.n_instr); - return 0; - case TC_CLSBPF_DESTROY: - return nfp_net_bpf_stop(nn); - - default: - return -EOPNOTSUPP; - } + return 0; } -- cgit v1.2.3 From e4a91cd565e2c4e299abe9eb906c506ecc01032a Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Fri, 3 Nov 2017 13:56:26 -0700 Subject: nfp: bpf: require seamless reload for program replace Firmware supports live replacement of programs for quite some time now. Remove the software-fallback related logic and depend on the FW for program replace. Seamless reload will become a requirement if maps are present, anyway. Load and start stages have to be split now, since replace only needs a load, start has already been done on add. Signed-off-by: Jakub Kicinski Reviewed-by: Quentin Monnet Signed-off-by: David S. Miller --- drivers/net/ethernet/netronome/nfp/bpf/main.c | 11 ++--- drivers/net/ethernet/netronome/nfp/bpf/main.h | 2 +- drivers/net/ethernet/netronome/nfp/bpf/offload.c | 62 ++++++++++++------------ drivers/net/ethernet/netronome/nfp/nfp_net.h | 2 - 4 files changed, 35 insertions(+), 42 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/netronome/nfp/bpf/main.c b/drivers/net/ethernet/netronome/nfp/bpf/main.c index 9e1286346d42..7ae7528cd96b 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/main.c +++ b/drivers/net/ethernet/netronome/nfp/bpf/main.c @@ -68,7 +68,7 @@ nfp_bpf_xdp_offload(struct nfp_app *app, struct nfp_net *nn, if (prog && running && !xdp_running) return -EBUSY; - ret = nfp_net_bpf_offload(nn, prog, running, true); + ret = nfp_net_bpf_offload(nn, prog, running); /* Stop offload if replace not possible */ if (ret && prog) nfp_bpf_xdp_offload(app, nn, NULL); @@ -93,7 +93,6 @@ static int nfp_bpf_setup_tc_block_cb(enum tc_setup_type type, { struct tc_cls_bpf_offload *cls_bpf = type_data; struct nfp_net *nn = cb_priv; - bool skip_sw; if (type != TC_SETUP_CLSBPF || !tc_can_offload(nn->dp.netdev) || @@ -111,15 +110,13 @@ static int nfp_bpf_setup_tc_block_cb(enum tc_setup_type type, return -EOPNOTSUPP; } - skip_sw = !!(cls_bpf->gen_flags & TCA_CLS_FLAGS_SKIP_SW); - switch (cls_bpf->command) { case TC_CLSBPF_REPLACE: - return nfp_net_bpf_offload(nn, cls_bpf->prog, true, !skip_sw); + return nfp_net_bpf_offload(nn, cls_bpf->prog, true); case TC_CLSBPF_ADD: - return nfp_net_bpf_offload(nn, cls_bpf->prog, false, !skip_sw); + return nfp_net_bpf_offload(nn, cls_bpf->prog, false); case TC_CLSBPF_DESTROY: - return nfp_net_bpf_offload(nn, NULL, true, !skip_sw); + return nfp_net_bpf_offload(nn, NULL, true); default: return -EOPNOTSUPP; } diff --git a/drivers/net/ethernet/netronome/nfp/bpf/main.h b/drivers/net/ethernet/netronome/nfp/bpf/main.h index 6dddab95d57a..df56f40fea7c 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/main.h +++ b/drivers/net/ethernet/netronome/nfp/bpf/main.h @@ -183,6 +183,6 @@ int nfp_prog_verify(struct nfp_prog *nfp_prog, struct bpf_prog *prog); struct nfp_net; int nfp_net_bpf_offload(struct nfp_net *nn, struct bpf_prog *prog, - bool old_prog, bool sw_fallback); + bool old_prog); #endif diff --git a/drivers/net/ethernet/netronome/nfp/bpf/offload.c b/drivers/net/ethernet/netronome/nfp/bpf/offload.c index c09efa1a9649..f4b9a46c844d 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/offload.c +++ b/drivers/net/ethernet/netronome/nfp/bpf/offload.c @@ -94,14 +94,11 @@ out: } static void -nfp_net_bpf_load_and_start(struct nfp_net *nn, bool sw_fallback, - void *code, dma_addr_t dma_addr, - unsigned int code_sz, unsigned int n_instr) +nfp_net_bpf_load(struct nfp_net *nn, void *code, dma_addr_t dma_addr, + unsigned int code_sz, unsigned int n_instr) { int err; - nn->dp.bpf_offload_skip_sw = !sw_fallback; - nn_writew(nn, NFP_NET_CFG_BPF_SIZE, n_instr); nn_writeq(nn, NFP_NET_CFG_BPF_ADDR, dma_addr); @@ -110,14 +107,19 @@ nfp_net_bpf_load_and_start(struct nfp_net *nn, bool sw_fallback, if (err) nn_err(nn, "FW command error while loading BPF: %d\n", err); + dma_free_coherent(nn->dp.dev, code_sz, code, dma_addr); +} + +static void nfp_net_bpf_start(struct nfp_net *nn) +{ + int err; + /* Enable passing packets through BPF function */ nn->dp.ctrl |= NFP_NET_CFG_CTRL_BPF; nn_writel(nn, NFP_NET_CFG_CTRL, nn->dp.ctrl); err = nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_GEN); if (err) nn_err(nn, "FW command error while enabling BPF: %d\n", err); - - dma_free_coherent(nn->dp.dev, code_sz, code, dma_addr); } static int nfp_net_bpf_stop(struct nfp_net *nn) @@ -127,13 +129,12 @@ static int nfp_net_bpf_stop(struct nfp_net *nn) nn->dp.ctrl &= ~NFP_NET_CFG_CTRL_BPF; nn_writel(nn, NFP_NET_CFG_CTRL, nn->dp.ctrl); - nn->dp.bpf_offload_skip_sw = 0; return nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_GEN); } int nfp_net_bpf_offload(struct nfp_net *nn, struct bpf_prog *prog, - bool old_prog, bool sw_fallback) + bool old_prog) { struct nfp_bpf_result res; dma_addr_t dma_addr; @@ -141,37 +142,34 @@ int nfp_net_bpf_offload(struct nfp_net *nn, struct bpf_prog *prog, void *code; int err; - /* There is nothing stopping us from implementing seamless - * replace but the simple method of loading I adopted in - * the firmware does not handle atomic replace (i.e. we have to - * stop the BPF offload and re-enable it). Leaking-in a few - * frames which didn't have BPF applied in the hardware should - * be fine if software fallback is available, though. - */ - if (prog && old_prog && nn->dp.bpf_offload_skip_sw) - return -EBUSY; + if (prog && old_prog) { + u8 cap; + + cap = nn_readb(nn, NFP_NET_CFG_BPF_CAP); + if (!(cap & NFP_NET_BPF_CAP_RELO)) { + nn_err(nn, "FW does not support live reload\n"); + return -EBUSY; + } + } /* Something else is loaded, different program type? */ if (!old_prog && nn->dp.ctrl & NFP_NET_CFG_CTRL_BPF) return -EBUSY; - max_instr = nn_readw(nn, NFP_NET_CFG_BPF_MAX_LEN); - code = NULL; + if (old_prog && !prog) + return nfp_net_bpf_stop(nn); - if (prog) { - err = nfp_net_bpf_offload_prepare(nn, prog, &res, &code, - &dma_addr, max_instr); - if (err) - return err; - } + max_instr = nn_readw(nn, NFP_NET_CFG_BPF_MAX_LEN); - if (old_prog) - nfp_net_bpf_stop(nn); + err = nfp_net_bpf_offload_prepare(nn, prog, &res, &code, &dma_addr, + max_instr); + if (err) + return err; - if (prog) - nfp_net_bpf_load_and_start(nn, sw_fallback, code, - dma_addr, max_instr * sizeof(u64), - res.n_instr); + nfp_net_bpf_load(nn, code, dma_addr, max_instr * sizeof(u64), + res.n_instr); + if (!old_prog) + nfp_net_bpf_start(nn); return 0; } diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net.h b/drivers/net/ethernet/netronome/nfp/nfp_net.h index 3d411f0d15b6..7f9857c276b1 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net.h +++ b/drivers/net/ethernet/netronome/nfp/nfp_net.h @@ -476,7 +476,6 @@ struct nfp_stat_pair { * @dev: Backpointer to struct device * @netdev: Backpointer to net_device structure * @is_vf: Is the driver attached to a VF? - * @bpf_offload_skip_sw: Offloaded BPF program will not be rerun by cls_bpf * @bpf_offload_xdp: Offloaded BPF program is XDP * @chained_metadata_format: Firemware will use new metadata format * @rx_dma_dir: Mapping direction for RX buffers @@ -502,7 +501,6 @@ struct nfp_net_dp { struct net_device *netdev; u8 is_vf:1; - u8 bpf_offload_skip_sw:1; u8 bpf_offload_xdp:1; u8 chained_metadata_format:1; -- cgit v1.2.3 From c1c88eae8a8155c55dbbc7363f1f127c43e1b5d1 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Fri, 3 Nov 2017 13:56:27 -0700 Subject: nfp: bpf: move program prepare and free into offload.c Most of offload/translation prepare logic will be moved to offload.c. To help git generate more reasonable diffs move nfp_prog_prepare() and nfp_prog_free() functions there as a first step. Signed-off-by: Jakub Kicinski Reviewed-by: Quentin Monnet Signed-off-by: David S. Miller --- drivers/net/ethernet/netronome/nfp/bpf/jit.c | 33 ------------------------ drivers/net/ethernet/netronome/nfp/bpf/main.h | 5 ++++ drivers/net/ethernet/netronome/nfp/bpf/offload.c | 33 ++++++++++++++++++++++++ 3 files changed, 38 insertions(+), 33 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/netronome/nfp/bpf/jit.c b/drivers/net/ethernet/netronome/nfp/bpf/jit.c index ff150c27f411..2eddbb45fd60 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/jit.c +++ b/drivers/net/ethernet/netronome/nfp/bpf/jit.c @@ -77,17 +77,6 @@ nfp_meta_has_prev(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) return meta->l.prev != &nfp_prog->insns; } -static void nfp_prog_free(struct nfp_prog *nfp_prog) -{ - struct nfp_insn_meta *meta, *tmp; - - list_for_each_entry_safe(meta, tmp, &nfp_prog->insns, l) { - list_del(&meta->l); - kfree(meta); - } - kfree(nfp_prog); -} - static void nfp_prog_push(struct nfp_prog *nfp_prog, u64 insn) { if (nfp_prog->__prog_alloc_len == nfp_prog->prog_len) { @@ -2127,28 +2116,6 @@ static int nfp_translate(struct nfp_prog *nfp_prog) return nfp_fixup_branches(nfp_prog); } -static int -nfp_prog_prepare(struct nfp_prog *nfp_prog, const struct bpf_insn *prog, - unsigned int cnt) -{ - unsigned int i; - - for (i = 0; i < cnt; i++) { - struct nfp_insn_meta *meta; - - meta = kzalloc(sizeof(*meta), GFP_KERNEL); - if (!meta) - return -ENOMEM; - - meta->insn = prog[i]; - meta->n = i; - - list_add_tail(&meta->l, &nfp_prog->insns); - } - - return 0; -} - /* --- Optimizations --- */ static void nfp_bpf_opt_reg_init(struct nfp_prog *nfp_prog) { diff --git a/drivers/net/ethernet/netronome/nfp/bpf/main.h b/drivers/net/ethernet/netronome/nfp/bpf/main.h index df56f40fea7c..b77231a134b9 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/main.h +++ b/drivers/net/ethernet/netronome/nfp/bpf/main.h @@ -173,6 +173,11 @@ struct nfp_bpf_result { unsigned int n_instr; }; +int +nfp_prog_prepare(struct nfp_prog *nfp_prog, const struct bpf_insn *prog, + unsigned int cnt); +void nfp_prog_free(struct nfp_prog *nfp_prog); + int nfp_bpf_jit(struct bpf_prog *filter, void *prog, unsigned int prog_start, unsigned int prog_done, diff --git a/drivers/net/ethernet/netronome/nfp/bpf/offload.c b/drivers/net/ethernet/netronome/nfp/bpf/offload.c index f4b9a46c844d..3eeee200051e 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/offload.c +++ b/drivers/net/ethernet/netronome/nfp/bpf/offload.c @@ -51,6 +51,39 @@ #include "../nfp_net_ctrl.h" #include "../nfp_net.h" +int +nfp_prog_prepare(struct nfp_prog *nfp_prog, const struct bpf_insn *prog, + unsigned int cnt) +{ + unsigned int i; + + for (i = 0; i < cnt; i++) { + struct nfp_insn_meta *meta; + + meta = kzalloc(sizeof(*meta), GFP_KERNEL); + if (!meta) + return -ENOMEM; + + meta->insn = prog[i]; + meta->n = i; + + list_add_tail(&meta->l, &nfp_prog->insns); + } + + return 0; +} + +void nfp_prog_free(struct nfp_prog *nfp_prog) +{ + struct nfp_insn_meta *meta, *tmp; + + list_for_each_entry_safe(meta, tmp, &nfp_prog->insns, l) { + list_del(&meta->l); + kfree(meta); + } + kfree(nfp_prog); +} + static int nfp_net_bpf_offload_prepare(struct nfp_net *nn, struct bpf_prog *prog, struct nfp_bpf_result *res, -- cgit v1.2.3 From 9314c442d7ddf749d29c09ab48ffa5333d2bf48e Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Fri, 3 Nov 2017 13:56:28 -0700 Subject: nfp: bpf: move translation prepare to offload.c struct nfp_prog is currently only used internally by the translator. This means there is a lot of parameter passing going on, between the translator and different stages of offload. Simplify things by allocating nfp_prog in offload.c already. We will now use kmalloc() to allocate the program area and only DMA map it for the time of loading (instead of allocating DMA coherent memory upfront). Signed-off-by: Jakub Kicinski Reviewed-by: Quentin Monnet Signed-off-by: David S. Miller --- drivers/net/ethernet/netronome/nfp/bpf/jit.c | 43 ++------ drivers/net/ethernet/netronome/nfp/bpf/main.h | 14 +-- drivers/net/ethernet/netronome/nfp/bpf/offload.c | 128 +++++++++++++++-------- 3 files changed, 94 insertions(+), 91 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/netronome/nfp/bpf/jit.c b/drivers/net/ethernet/netronome/nfp/bpf/jit.c index 2eddbb45fd60..eae7a137a7a8 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/jit.c +++ b/drivers/net/ethernet/netronome/nfp/bpf/jit.c @@ -2245,58 +2245,27 @@ static int nfp_bpf_ustore_calc(struct nfp_prog *nfp_prog, __le64 *ustore) /** * nfp_bpf_jit() - translate BPF code into NFP assembly + * @nfp_prog: nfp_prog prepared based on @filter * @filter: kernel BPF filter struct - * @prog_mem: memory to store assembler instructions - * @prog_start: offset of the first instruction when loaded - * @prog_done: where to jump on exit - * @prog_sz: size of @prog_mem in instructions - * @res: achieved parameters of translation results */ -int -nfp_bpf_jit(struct bpf_prog *filter, void *prog_mem, - unsigned int prog_start, unsigned int prog_done, - unsigned int prog_sz, struct nfp_bpf_result *res) +int nfp_bpf_jit(struct nfp_prog *nfp_prog, struct bpf_prog *filter) { - struct nfp_prog *nfp_prog; int ret; - nfp_prog = kzalloc(sizeof(*nfp_prog), GFP_KERNEL); - if (!nfp_prog) - return -ENOMEM; - - INIT_LIST_HEAD(&nfp_prog->insns); - nfp_prog->type = filter->type; - nfp_prog->start_off = prog_start; - nfp_prog->tgt_done = prog_done; - - ret = nfp_prog_prepare(nfp_prog, filter->insnsi, filter->len); - if (ret) - goto out; - ret = nfp_prog_verify(nfp_prog, filter); if (ret) - goto out; + return ret; ret = nfp_bpf_optimize(nfp_prog); if (ret) - goto out; - - nfp_prog->prog = prog_mem; - nfp_prog->__prog_alloc_len = prog_sz; + return ret; ret = nfp_translate(nfp_prog); if (ret) { pr_err("Translation failed with error %d (translated: %u)\n", ret, nfp_prog->n_translated); - ret = -EINVAL; - goto out; + return -EINVAL; } - ret = nfp_bpf_ustore_calc(nfp_prog, (__force __le64 *)prog_mem); - - res->n_instr = nfp_prog->prog_len; -out: - nfp_prog_free(nfp_prog); - - return ret; + return nfp_bpf_ustore_calc(nfp_prog, (__force __le64 *)nfp_prog->prog); } diff --git a/drivers/net/ethernet/netronome/nfp/bpf/main.h b/drivers/net/ethernet/netronome/nfp/bpf/main.h index b77231a134b9..36b4eda2d3f8 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/main.h +++ b/drivers/net/ethernet/netronome/nfp/bpf/main.h @@ -169,19 +169,7 @@ struct nfp_prog { struct list_head insns; }; -struct nfp_bpf_result { - unsigned int n_instr; -}; - -int -nfp_prog_prepare(struct nfp_prog *nfp_prog, const struct bpf_insn *prog, - unsigned int cnt); -void nfp_prog_free(struct nfp_prog *nfp_prog); - -int -nfp_bpf_jit(struct bpf_prog *filter, void *prog, - unsigned int prog_start, unsigned int prog_done, - unsigned int prog_sz, struct nfp_bpf_result *res); +int nfp_bpf_jit(struct nfp_prog *nfp_prog, struct bpf_prog *filter); int nfp_prog_verify(struct nfp_prog *nfp_prog, struct bpf_prog *prog); diff --git a/drivers/net/ethernet/netronome/nfp/bpf/offload.c b/drivers/net/ethernet/netronome/nfp/bpf/offload.c index 3eeee200051e..c5546c0e87d8 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/offload.c +++ b/drivers/net/ethernet/netronome/nfp/bpf/offload.c @@ -51,7 +51,7 @@ #include "../nfp_net_ctrl.h" #include "../nfp_net.h" -int +static int nfp_prog_prepare(struct nfp_prog *nfp_prog, const struct bpf_insn *prog, unsigned int cnt) { @@ -73,7 +73,7 @@ nfp_prog_prepare(struct nfp_prog *nfp_prog, const struct bpf_insn *prog, return 0; } -void nfp_prog_free(struct nfp_prog *nfp_prog) +static void nfp_prog_free(struct nfp_prog *nfp_prog) { struct nfp_insn_meta *meta, *tmp; @@ -84,25 +84,36 @@ void nfp_prog_free(struct nfp_prog *nfp_prog) kfree(nfp_prog); } -static int -nfp_net_bpf_offload_prepare(struct nfp_net *nn, struct bpf_prog *prog, - struct nfp_bpf_result *res, - void **code, dma_addr_t *dma_addr, u16 max_instr) +static struct nfp_prog *nfp_bpf_verifier_prep(struct bpf_prog *prog) { - unsigned int code_sz = max_instr * sizeof(u64); - unsigned int stack_size; - u16 start_off, done_off; - unsigned int max_mtu; + struct nfp_prog *nfp_prog; int ret; - max_mtu = nn_readb(nn, NFP_NET_CFG_BPF_INL_MTU) * 64 - 32; - if (max_mtu < nn->dp.netdev->mtu) { - nn_info(nn, "BPF offload not supported with MTU larger than HW packet split boundary\n"); - return -EOPNOTSUPP; - } + nfp_prog = kzalloc(sizeof(*nfp_prog), GFP_KERNEL); + if (!nfp_prog) + return NULL; + + INIT_LIST_HEAD(&nfp_prog->insns); + nfp_prog->type = prog->type; - start_off = nn_readw(nn, NFP_NET_CFG_BPF_START); - done_off = nn_readw(nn, NFP_NET_CFG_BPF_DONE); + ret = nfp_prog_prepare(nfp_prog, prog->insnsi, prog->len); + if (ret) + goto err_free; + + return nfp_prog; + +err_free: + nfp_prog_free(nfp_prog); + + return NULL; +} + +static int +nfp_bpf_translate(struct nfp_net *nn, struct nfp_prog *nfp_prog, + struct bpf_prog *prog) +{ + unsigned int stack_size; + unsigned int max_instr; stack_size = nn_readb(nn, NFP_NET_CFG_BPF_STACK_SZ) * 64; if (prog->aux->stack_depth > stack_size) { @@ -111,28 +122,68 @@ nfp_net_bpf_offload_prepare(struct nfp_net *nn, struct bpf_prog *prog, return -EOPNOTSUPP; } - *code = dma_zalloc_coherent(nn->dp.dev, code_sz, dma_addr, GFP_KERNEL); - if (!*code) + nfp_prog->stack_depth = prog->aux->stack_depth; + nfp_prog->start_off = nn_readw(nn, NFP_NET_CFG_BPF_START); + nfp_prog->tgt_done = nn_readw(nn, NFP_NET_CFG_BPF_DONE); + + max_instr = nn_readw(nn, NFP_NET_CFG_BPF_MAX_LEN); + nfp_prog->__prog_alloc_len = max_instr * sizeof(u64); + + nfp_prog->prog = kmalloc(nfp_prog->__prog_alloc_len, GFP_KERNEL); + if (!nfp_prog->prog) return -ENOMEM; - ret = nfp_bpf_jit(prog, *code, start_off, done_off, max_instr, res); - if (ret) - goto out; + return nfp_bpf_jit(nfp_prog, prog); +} + +static void nfp_bpf_destroy(struct nfp_prog *nfp_prog) +{ + kfree(nfp_prog->prog); + nfp_prog_free(nfp_prog); +} + +static struct nfp_prog * +nfp_net_bpf_offload_prepare(struct nfp_net *nn, struct bpf_prog *prog, + dma_addr_t *dma_addr) +{ + struct nfp_prog *nfp_prog; + unsigned int max_mtu; + int err; + + max_mtu = nn_readb(nn, NFP_NET_CFG_BPF_INL_MTU) * 64 - 32; + if (max_mtu < nn->dp.netdev->mtu) { + nn_info(nn, "BPF offload not supported with MTU larger than HW packet split boundary\n"); + return NULL; + } + + nfp_prog = nfp_bpf_verifier_prep(prog); + if (!nfp_prog) + return NULL; + + err = nfp_bpf_translate(nn, nfp_prog, prog); + if (err) + goto err_destroy_prog; + + *dma_addr = dma_map_single(nn->dp.dev, nfp_prog->prog, + nfp_prog->prog_len * sizeof(u64), + DMA_TO_DEVICE); + if (dma_mapping_error(nn->dp.dev, *dma_addr)) + goto err_destroy_prog; return 0; -out: - dma_free_coherent(nn->dp.dev, code_sz, *code, *dma_addr); - return ret; +err_destroy_prog: + nfp_bpf_destroy(nfp_prog); + return NULL; } static void -nfp_net_bpf_load(struct nfp_net *nn, void *code, dma_addr_t dma_addr, - unsigned int code_sz, unsigned int n_instr) +nfp_net_bpf_load(struct nfp_net *nn, struct nfp_prog *nfp_prog, + dma_addr_t dma_addr) { int err; - nn_writew(nn, NFP_NET_CFG_BPF_SIZE, n_instr); + nn_writew(nn, NFP_NET_CFG_BPF_SIZE, nfp_prog->prog_len); nn_writeq(nn, NFP_NET_CFG_BPF_ADDR, dma_addr); /* Load up the JITed code */ @@ -140,7 +191,9 @@ nfp_net_bpf_load(struct nfp_net *nn, void *code, dma_addr_t dma_addr, if (err) nn_err(nn, "FW command error while loading BPF: %d\n", err); - dma_free_coherent(nn->dp.dev, code_sz, code, dma_addr); + dma_unmap_single(nn->dp.dev, dma_addr, nfp_prog->prog_len * sizeof(u64), + DMA_TO_DEVICE); + nfp_bpf_destroy(nfp_prog); } static void nfp_net_bpf_start(struct nfp_net *nn) @@ -169,11 +222,8 @@ static int nfp_net_bpf_stop(struct nfp_net *nn) int nfp_net_bpf_offload(struct nfp_net *nn, struct bpf_prog *prog, bool old_prog) { - struct nfp_bpf_result res; + struct nfp_prog *nfp_prog; dma_addr_t dma_addr; - u16 max_instr; - void *code; - int err; if (prog && old_prog) { u8 cap; @@ -192,15 +242,11 @@ int nfp_net_bpf_offload(struct nfp_net *nn, struct bpf_prog *prog, if (old_prog && !prog) return nfp_net_bpf_stop(nn); - max_instr = nn_readw(nn, NFP_NET_CFG_BPF_MAX_LEN); - - err = nfp_net_bpf_offload_prepare(nn, prog, &res, &code, &dma_addr, - max_instr); - if (err) - return err; + nfp_prog = nfp_net_bpf_offload_prepare(nn, prog, &dma_addr); + if (!nfp_prog) + return -EINVAL; - nfp_net_bpf_load(nn, code, dma_addr, max_instr * sizeof(u64), - res.n_instr); + nfp_net_bpf_load(nn, nfp_prog, dma_addr); if (!old_prog) nfp_net_bpf_start(nn); -- cgit v1.2.3 From c6c580d7bc390f864488c66153a487057e76d9d8 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Fri, 3 Nov 2017 13:56:29 -0700 Subject: nfp: bpf: move to new BPF program offload infrastructure Following steps are taken in the driver to offload an XDP program: XDP_SETUP_PROG: * prepare: - allocate program state; - run verifier (bpf_analyzer()); - run translation; * load: - stop old program if needed; - load program; - enable BPF if not enabled; * clean up: - free program image. With new infrastructure the flow will look like this: BPF_OFFLOAD_VERIFIER_PREP: - allocate program state; BPF_OFFLOAD_TRANSLATE: - run translation; XDP_SETUP_PROG: - stop old program if needed; - load program; - enable BPF if not enabled; BPF_OFFLOAD_DESTROY: - free program image. Take advantage of the new infrastructure. Allocation of driver metadata has to be moved from jit.c to offload.c since it's now done at a different stage. Since there is no separate driver private data for verification step, move temporary nfp_meta pointer into nfp_prog. We will now use user space context offsets. Signed-off-by: Jakub Kicinski Reviewed-by: Quentin Monnet Signed-off-by: David S. Miller --- drivers/net/ethernet/netronome/nfp/bpf/jit.c | 35 ++++----- drivers/net/ethernet/netronome/nfp/bpf/main.c | 4 + drivers/net/ethernet/netronome/nfp/bpf/main.h | 15 +++- drivers/net/ethernet/netronome/nfp/bpf/offload.c | 85 ++++++++++------------ drivers/net/ethernet/netronome/nfp/bpf/verifier.c | 43 ++--------- drivers/net/ethernet/netronome/nfp/nfp_app.h | 37 ++++++++++ .../net/ethernet/netronome/nfp/nfp_net_common.c | 8 ++ 7 files changed, 121 insertions(+), 106 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/netronome/nfp/bpf/jit.c b/drivers/net/ethernet/netronome/nfp/bpf/jit.c index eae7a137a7a8..995e95410b11 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/jit.c +++ b/drivers/net/ethernet/netronome/nfp/bpf/jit.c @@ -1427,19 +1427,18 @@ static int mem_ldx_skb(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, swreg dst = reg_both(meta->insn.dst_reg * 2); switch (meta->insn.off) { - case offsetof(struct sk_buff, len): - if (size != FIELD_SIZEOF(struct sk_buff, len)) + case offsetof(struct __sk_buff, len): + if (size != FIELD_SIZEOF(struct __sk_buff, len)) return -EOPNOTSUPP; wrp_mov(nfp_prog, dst, plen_reg(nfp_prog)); break; - case offsetof(struct sk_buff, data): - if (size != sizeof(void *)) + case offsetof(struct __sk_buff, data): + if (size != FIELD_SIZEOF(struct __sk_buff, data)) return -EOPNOTSUPP; wrp_mov(nfp_prog, dst, pptr_reg(nfp_prog)); break; - case offsetof(struct sk_buff, cb) + - offsetof(struct bpf_skb_data_end, data_end): - if (size != sizeof(void *)) + case offsetof(struct __sk_buff, data_end): + if (size != FIELD_SIZEOF(struct __sk_buff, data_end)) return -EOPNOTSUPP; emit_alu(nfp_prog, dst, plen_reg(nfp_prog), ALU_OP_ADD, pptr_reg(nfp_prog)); @@ -1458,14 +1457,15 @@ static int mem_ldx_xdp(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, { swreg dst = reg_both(meta->insn.dst_reg * 2); - if (size != sizeof(void *)) - return -EINVAL; - switch (meta->insn.off) { - case offsetof(struct xdp_buff, data): + case offsetof(struct xdp_md, data): + if (size != FIELD_SIZEOF(struct xdp_md, data)) + return -EOPNOTSUPP; wrp_mov(nfp_prog, dst, pptr_reg(nfp_prog)); break; - case offsetof(struct xdp_buff, data_end): + case offsetof(struct xdp_md, data_end): + if (size != FIELD_SIZEOF(struct xdp_md, data_end)) + return -EOPNOTSUPP; emit_alu(nfp_prog, dst, plen_reg(nfp_prog), ALU_OP_ADD, pptr_reg(nfp_prog)); break; @@ -2243,19 +2243,10 @@ static int nfp_bpf_ustore_calc(struct nfp_prog *nfp_prog, __le64 *ustore) return 0; } -/** - * nfp_bpf_jit() - translate BPF code into NFP assembly - * @nfp_prog: nfp_prog prepared based on @filter - * @filter: kernel BPF filter struct - */ -int nfp_bpf_jit(struct nfp_prog *nfp_prog, struct bpf_prog *filter) +int nfp_bpf_jit(struct nfp_prog *nfp_prog) { int ret; - ret = nfp_prog_verify(nfp_prog, filter); - if (ret) - return ret; - ret = nfp_bpf_optimize(nfp_prog); if (ret) return ret; diff --git a/drivers/net/ethernet/netronome/nfp/bpf/main.c b/drivers/net/ethernet/netronome/nfp/bpf/main.c index 7ae7528cd96b..e379b78e86ef 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/main.c +++ b/drivers/net/ethernet/netronome/nfp/bpf/main.c @@ -173,4 +173,8 @@ const struct nfp_app_type app_bpf = { .setup_tc = nfp_bpf_setup_tc, .tc_busy = nfp_bpf_tc_busy, .xdp_offload = nfp_bpf_xdp_offload, + + .bpf_verifier_prep = nfp_bpf_verifier_prep, + .bpf_translate = nfp_bpf_translate, + .bpf_destroy = nfp_bpf_destroy, }; diff --git a/drivers/net/ethernet/netronome/nfp/bpf/main.h b/drivers/net/ethernet/netronome/nfp/bpf/main.h index 36b4eda2d3f8..082a15f6dfb5 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/main.h +++ b/drivers/net/ethernet/netronome/nfp/bpf/main.h @@ -139,6 +139,7 @@ static inline u8 mbpf_mode(const struct nfp_insn_meta *meta) * @prog: machine code * @prog_len: number of valid instructions in @prog array * @__prog_alloc_len: alloc size of @prog array + * @verifier_meta: temporary storage for verifier's insn meta * @type: BPF program type * @start_off: address of the first instruction in the memory * @tgt_out: jump target for normal exit @@ -154,6 +155,8 @@ struct nfp_prog { unsigned int prog_len; unsigned int __prog_alloc_len; + struct nfp_insn_meta *verifier_meta; + enum bpf_prog_type type; unsigned int start_off; @@ -169,13 +172,21 @@ struct nfp_prog { struct list_head insns; }; -int nfp_bpf_jit(struct nfp_prog *nfp_prog, struct bpf_prog *filter); +int nfp_bpf_jit(struct nfp_prog *prog); -int nfp_prog_verify(struct nfp_prog *nfp_prog, struct bpf_prog *prog); +extern const struct bpf_ext_analyzer_ops nfp_bpf_analyzer_ops; +struct netdev_bpf; +struct nfp_app; struct nfp_net; int nfp_net_bpf_offload(struct nfp_net *nn, struct bpf_prog *prog, bool old_prog); +int nfp_bpf_verifier_prep(struct nfp_app *app, struct nfp_net *nn, + struct netdev_bpf *bpf); +int nfp_bpf_translate(struct nfp_app *app, struct nfp_net *nn, + struct bpf_prog *prog); +int nfp_bpf_destroy(struct nfp_app *app, struct nfp_net *nn, + struct bpf_prog *prog); #endif diff --git a/drivers/net/ethernet/netronome/nfp/bpf/offload.c b/drivers/net/ethernet/netronome/nfp/bpf/offload.c index c5546c0e87d8..b6cee71f49d3 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/offload.c +++ b/drivers/net/ethernet/netronome/nfp/bpf/offload.c @@ -84,14 +84,17 @@ static void nfp_prog_free(struct nfp_prog *nfp_prog) kfree(nfp_prog); } -static struct nfp_prog *nfp_bpf_verifier_prep(struct bpf_prog *prog) +int nfp_bpf_verifier_prep(struct nfp_app *app, struct nfp_net *nn, + struct netdev_bpf *bpf) { + struct bpf_prog *prog = bpf->verifier.prog; struct nfp_prog *nfp_prog; int ret; nfp_prog = kzalloc(sizeof(*nfp_prog), GFP_KERNEL); if (!nfp_prog) - return NULL; + return -ENOMEM; + prog->aux->offload->dev_priv = nfp_prog; INIT_LIST_HEAD(&nfp_prog->insns); nfp_prog->type = prog->type; @@ -100,18 +103,21 @@ static struct nfp_prog *nfp_bpf_verifier_prep(struct bpf_prog *prog) if (ret) goto err_free; - return nfp_prog; + nfp_prog->verifier_meta = nfp_prog_first_meta(nfp_prog); + bpf->verifier.ops = &nfp_bpf_analyzer_ops; + + return 0; err_free: nfp_prog_free(nfp_prog); - return NULL; + return ret; } -static int -nfp_bpf_translate(struct nfp_net *nn, struct nfp_prog *nfp_prog, - struct bpf_prog *prog) +int nfp_bpf_translate(struct nfp_app *app, struct nfp_net *nn, + struct bpf_prog *prog) { + struct nfp_prog *nfp_prog = prog->aux->offload->dev_priv; unsigned int stack_size; unsigned int max_instr; @@ -133,55 +139,38 @@ nfp_bpf_translate(struct nfp_net *nn, struct nfp_prog *nfp_prog, if (!nfp_prog->prog) return -ENOMEM; - return nfp_bpf_jit(nfp_prog, prog); + return nfp_bpf_jit(nfp_prog); } -static void nfp_bpf_destroy(struct nfp_prog *nfp_prog) +int nfp_bpf_destroy(struct nfp_app *app, struct nfp_net *nn, + struct bpf_prog *prog) { + struct nfp_prog *nfp_prog = prog->aux->offload->dev_priv; + kfree(nfp_prog->prog); nfp_prog_free(nfp_prog); + + return 0; } -static struct nfp_prog * -nfp_net_bpf_offload_prepare(struct nfp_net *nn, struct bpf_prog *prog, - dma_addr_t *dma_addr) +static int nfp_net_bpf_load(struct nfp_net *nn, struct bpf_prog *prog) { - struct nfp_prog *nfp_prog; + struct nfp_prog *nfp_prog = prog->aux->offload->dev_priv; unsigned int max_mtu; + dma_addr_t dma_addr; int err; max_mtu = nn_readb(nn, NFP_NET_CFG_BPF_INL_MTU) * 64 - 32; if (max_mtu < nn->dp.netdev->mtu) { nn_info(nn, "BPF offload not supported with MTU larger than HW packet split boundary\n"); - return NULL; + return -EOPNOTSUPP; } - nfp_prog = nfp_bpf_verifier_prep(prog); - if (!nfp_prog) - return NULL; - - err = nfp_bpf_translate(nn, nfp_prog, prog); - if (err) - goto err_destroy_prog; - - *dma_addr = dma_map_single(nn->dp.dev, nfp_prog->prog, - nfp_prog->prog_len * sizeof(u64), - DMA_TO_DEVICE); - if (dma_mapping_error(nn->dp.dev, *dma_addr)) - goto err_destroy_prog; - - return 0; - -err_destroy_prog: - nfp_bpf_destroy(nfp_prog); - return NULL; -} - -static void -nfp_net_bpf_load(struct nfp_net *nn, struct nfp_prog *nfp_prog, - dma_addr_t dma_addr) -{ - int err; + dma_addr = dma_map_single(nn->dp.dev, nfp_prog->prog, + nfp_prog->prog_len * sizeof(u64), + DMA_TO_DEVICE); + if (dma_mapping_error(nn->dp.dev, dma_addr)) + return -ENOMEM; nn_writew(nn, NFP_NET_CFG_BPF_SIZE, nfp_prog->prog_len); nn_writeq(nn, NFP_NET_CFG_BPF_ADDR, dma_addr); @@ -193,7 +182,8 @@ nfp_net_bpf_load(struct nfp_net *nn, struct nfp_prog *nfp_prog, dma_unmap_single(nn->dp.dev, dma_addr, nfp_prog->prog_len * sizeof(u64), DMA_TO_DEVICE); - nfp_bpf_destroy(nfp_prog); + + return err; } static void nfp_net_bpf_start(struct nfp_net *nn) @@ -222,8 +212,10 @@ static int nfp_net_bpf_stop(struct nfp_net *nn) int nfp_net_bpf_offload(struct nfp_net *nn, struct bpf_prog *prog, bool old_prog) { - struct nfp_prog *nfp_prog; - dma_addr_t dma_addr; + int err; + + if (prog && !prog->aux->offload) + return -EINVAL; if (prog && old_prog) { u8 cap; @@ -242,11 +234,10 @@ int nfp_net_bpf_offload(struct nfp_net *nn, struct bpf_prog *prog, if (old_prog && !prog) return nfp_net_bpf_stop(nn); - nfp_prog = nfp_net_bpf_offload_prepare(nn, prog, &dma_addr); - if (!nfp_prog) - return -EINVAL; + err = nfp_net_bpf_load(nn, prog); + if (err) + return err; - nfp_net_bpf_load(nn, nfp_prog, dma_addr); if (!old_prog) nfp_net_bpf_start(nn); diff --git a/drivers/net/ethernet/netronome/nfp/bpf/verifier.c b/drivers/net/ethernet/netronome/nfp/bpf/verifier.c index 4f31bdefd331..8d43491ddd6b 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/verifier.c +++ b/drivers/net/ethernet/netronome/nfp/bpf/verifier.c @@ -40,12 +40,6 @@ #include "main.h" -/* Analyzer/verifier definitions */ -struct nfp_bpf_analyzer_priv { - struct nfp_prog *prog; - struct nfp_insn_meta *meta; -}; - static struct nfp_insn_meta * nfp_bpf_goto_meta(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, unsigned int insn_idx, unsigned int n_insns) @@ -171,11 +165,11 @@ nfp_bpf_check_ptr(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, static int nfp_verify_insn(struct bpf_verifier_env *env, int insn_idx, int prev_insn_idx) { - struct nfp_bpf_analyzer_priv *priv = env->analyzer_priv; - struct nfp_insn_meta *meta = priv->meta; + struct nfp_prog *nfp_prog = env->prog->aux->offload->dev_priv; + struct nfp_insn_meta *meta = nfp_prog->verifier_meta; - meta = nfp_bpf_goto_meta(priv->prog, meta, insn_idx, env->prog->len); - priv->meta = meta; + meta = nfp_bpf_goto_meta(nfp_prog, meta, insn_idx, env->prog->len); + nfp_prog->verifier_meta = meta; if (meta->insn.src_reg >= MAX_BPF_REG || meta->insn.dst_reg >= MAX_BPF_REG) { @@ -184,39 +178,18 @@ nfp_verify_insn(struct bpf_verifier_env *env, int insn_idx, int prev_insn_idx) } if (meta->insn.code == (BPF_JMP | BPF_EXIT)) - return nfp_bpf_check_exit(priv->prog, env); + return nfp_bpf_check_exit(nfp_prog, env); if ((meta->insn.code & ~BPF_SIZE_MASK) == (BPF_LDX | BPF_MEM)) - return nfp_bpf_check_ptr(priv->prog, meta, env, + return nfp_bpf_check_ptr(nfp_prog, meta, env, meta->insn.src_reg); if ((meta->insn.code & ~BPF_SIZE_MASK) == (BPF_STX | BPF_MEM)) - return nfp_bpf_check_ptr(priv->prog, meta, env, + return nfp_bpf_check_ptr(nfp_prog, meta, env, meta->insn.dst_reg); return 0; } -static const struct bpf_ext_analyzer_ops nfp_bpf_analyzer_ops = { +const struct bpf_ext_analyzer_ops nfp_bpf_analyzer_ops = { .insn_hook = nfp_verify_insn, }; - -int nfp_prog_verify(struct nfp_prog *nfp_prog, struct bpf_prog *prog) -{ - struct nfp_bpf_analyzer_priv *priv; - int ret; - - nfp_prog->stack_depth = prog->aux->stack_depth; - - priv = kzalloc(sizeof(*priv), GFP_KERNEL); - if (!priv) - return -ENOMEM; - - priv->prog = nfp_prog; - priv->meta = nfp_prog_first_meta(nfp_prog); - - ret = bpf_analyzer(prog, &nfp_bpf_analyzer_ops, priv); - - kfree(priv); - - return ret; -} diff --git a/drivers/net/ethernet/netronome/nfp/nfp_app.h b/drivers/net/ethernet/netronome/nfp/nfp_app.h index 857bb33020ba..54b67c9b8d5b 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_app.h +++ b/drivers/net/ethernet/netronome/nfp/nfp_app.h @@ -42,6 +42,7 @@ struct bpf_prog; struct net_device; +struct netdev_bpf; struct pci_dev; struct sk_buff; struct sk_buff; @@ -83,6 +84,9 @@ extern const struct nfp_app_type app_flower; * @setup_tc: setup TC ndo * @tc_busy: TC HW offload busy (rules loaded) * @xdp_offload: offload an XDP program + * @bpf_verifier_prep: verifier prep for dev-specific BPF programs + * @bpf_translate: translate call for dev-specific BPF programs + * @bpf_destroy: destroy for dev-specific BPF programs * @eswitch_mode_get: get SR-IOV eswitch mode * @sriov_enable: app-specific sriov initialisation * @sriov_disable: app-specific sriov clean-up @@ -118,6 +122,12 @@ struct nfp_app_type { bool (*tc_busy)(struct nfp_app *app, struct nfp_net *nn); int (*xdp_offload)(struct nfp_app *app, struct nfp_net *nn, struct bpf_prog *prog); + int (*bpf_verifier_prep)(struct nfp_app *app, struct nfp_net *nn, + struct netdev_bpf *bpf); + int (*bpf_translate)(struct nfp_app *app, struct nfp_net *nn, + struct bpf_prog *prog); + int (*bpf_destroy)(struct nfp_app *app, struct nfp_net *nn, + struct bpf_prog *prog); int (*sriov_enable)(struct nfp_app *app, int num_vfs); void (*sriov_disable)(struct nfp_app *app); @@ -271,6 +281,33 @@ static inline int nfp_app_xdp_offload(struct nfp_app *app, struct nfp_net *nn, return app->type->xdp_offload(app, nn, prog); } +static inline int +nfp_app_bpf_verifier_prep(struct nfp_app *app, struct nfp_net *nn, + struct netdev_bpf *bpf) +{ + if (!app || !app->type->bpf_verifier_prep) + return -EOPNOTSUPP; + return app->type->bpf_verifier_prep(app, nn, bpf); +} + +static inline int +nfp_app_bpf_translate(struct nfp_app *app, struct nfp_net *nn, + struct bpf_prog *prog) +{ + if (!app || !app->type->bpf_translate) + return -EOPNOTSUPP; + return app->type->bpf_translate(app, nn, prog); +} + +static inline int +nfp_app_bpf_destroy(struct nfp_app *app, struct nfp_net *nn, + struct bpf_prog *prog) +{ + if (!app || !app->type->bpf_destroy) + return -EOPNOTSUPP; + return app->type->bpf_destroy(app, nn, prog); +} + static inline bool nfp_app_ctrl_tx(struct nfp_app *app, struct sk_buff *skb) { trace_devlink_hwmsg(priv_to_devlink(app->pf), false, 0, diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c index f6c6ad4e8a59..232044b1b7aa 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c @@ -3393,6 +3393,14 @@ static int nfp_net_xdp(struct net_device *netdev, struct netdev_bpf *xdp) xdp->prog_attached = XDP_ATTACHED_HW; xdp->prog_id = nn->xdp_prog ? nn->xdp_prog->aux->id : 0; return 0; + case BPF_OFFLOAD_VERIFIER_PREP: + return nfp_app_bpf_verifier_prep(nn->app, nn, xdp); + case BPF_OFFLOAD_TRANSLATE: + return nfp_app_bpf_translate(nn->app, nn, + xdp->offload.prog); + case BPF_OFFLOAD_DESTROY: + return nfp_app_bpf_destroy(nn->app, nn, + xdp->offload.prog); default: return -EINVAL; } -- cgit v1.2.3 From 952484610cc2f67303be4feedb0e52a519c31470 Mon Sep 17 00:00:00 2001 From: Intiyaz Basha Date: Fri, 3 Nov 2017 14:32:33 -0700 Subject: liquidio: do not consider packets dropped by network stack as driver Rx dropped netdev->rx_dropped was including packets dropped by napi_gro_receive. If a packet is dropped by network stack, it should not be counted under driver Rx dropped. Made necessary changes to not include network stack drops under netdev->rx_dropped. Signed-off-by: Intiyaz Basha Signed-off-by: Satanand Burla Signed-off-by: Felix Manlunas Signed-off-by: David S. Miller --- drivers/net/ethernet/cavium/liquidio/lio_core.c | 15 +++------------ 1 file changed, 3 insertions(+), 12 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/cavium/liquidio/lio_core.c b/drivers/net/ethernet/cavium/liquidio/lio_core.c index 89b7820d59ce..32ae63b6f20e 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_core.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_core.c @@ -467,7 +467,6 @@ liquidio_push_packet(u32 octeon_id __attribute__((unused)), if (netdev) { struct lio *lio = GET_LIO(netdev); struct octeon_device *oct = lio->oct_dev; - int packet_was_received; /* Do not proceed if the interface is not in RUNNING state. */ if (!ifstate_check(lio, LIO_IFSTATE_RUNNING)) { @@ -570,18 +569,10 @@ liquidio_push_packet(u32 octeon_id __attribute__((unused)), __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vtag); } - packet_was_received = (napi_gro_receive(napi, skb) != GRO_DROP); - - if (packet_was_received) { - droq->stats.rx_bytes_received += len; - droq->stats.rx_pkts_received++; - } else { - droq->stats.rx_dropped++; - netif_info(lio, rx_err, lio->netdev, - "droq:%d error rx_dropped:%llu\n", - droq->q_no, droq->stats.rx_dropped); - } + napi_gro_receive(napi, skb); + droq->stats.rx_bytes_received += len; + droq->stats.rx_pkts_received++; } else { recv_buffer_free(skb); } -- cgit v1.2.3 From 4e59532541c865c85c92d42be4edf2ba6aa4af64 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Sat, 4 Nov 2017 16:48:54 +0100 Subject: nfp: don't depend on compiler constant propagation Matthias reports: nfp_eth_set_bit_config() is marked as __always_inline to allow gcc to identify the 'mask' parameter as known to be constant at compile time, which is required to use the FIELD_GET() macro. The forced inlining does the trick for gcc, but for kernel builds with clang it results in undefined symbols: drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.o: In function `__nfp_eth_set_aneg': drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c:(.text+0x787): undefined reference to `__compiletime_assert_492' drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c:(.text+0x7b1): undefined reference to `__compiletime_assert_496' These __compiletime_assert_xyx() calls would have been optimized away if the compiler had seen 'mask' as a constant. Add a macro to extract the mask and shift and pass those to nfp_eth_set_bit_config() separately. Reported-by: Matthias Kaehlcke Signed-off-by: Jakub Kicinski Tested-by: Dirk van der Merwe Signed-off-by: Simon Horman Signed-off-by: David S. Miller --- .../ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c | 23 ++++++++++++++-------- 1 file changed, 15 insertions(+), 8 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c index f6f7c085f8e0..47251396fcae 100644 --- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c @@ -469,10 +469,10 @@ int nfp_eth_set_configured(struct nfp_cpp *cpp, unsigned int idx, bool configed) return nfp_eth_config_commit_end(nsp); } -/* Force inline, FIELD_* macroes require masks to be compilation-time known */ -static __always_inline int +static int nfp_eth_set_bit_config(struct nfp_nsp *nsp, unsigned int raw_idx, - const u64 mask, unsigned int val, const u64 ctrl_bit) + const u64 mask, const unsigned int shift, + unsigned int val, const u64 ctrl_bit) { union eth_table_entry *entries = nfp_nsp_config_entries(nsp); unsigned int idx = nfp_nsp_config_idx(nsp); @@ -489,11 +489,11 @@ nfp_eth_set_bit_config(struct nfp_nsp *nsp, unsigned int raw_idx, /* Check if we are already in requested state */ reg = le64_to_cpu(entries[idx].raw[raw_idx]); - if (val == FIELD_GET(mask, reg)) + if (val == (reg & mask) >> shift) return 0; reg &= ~mask; - reg |= FIELD_PREP(mask, val); + reg |= (val << shift) & mask; entries[idx].raw[raw_idx] = cpu_to_le64(reg); entries[idx].control |= cpu_to_le64(ctrl_bit); @@ -503,6 +503,13 @@ nfp_eth_set_bit_config(struct nfp_nsp *nsp, unsigned int raw_idx, return 0; } +#define NFP_ETH_SET_BIT_CONFIG(nsp, raw_idx, mask, val, ctrl_bit) \ + ({ \ + __BF_FIELD_CHECK(mask, 0ULL, val, "NFP_ETH_SET_BIT_CONFIG: "); \ + nfp_eth_set_bit_config(nsp, raw_idx, mask, __bf_shf(mask), \ + val, ctrl_bit); \ + }) + /** * __nfp_eth_set_aneg() - set PHY autonegotiation control bit * @nsp: NFP NSP handle returned from nfp_eth_config_start() @@ -515,7 +522,7 @@ nfp_eth_set_bit_config(struct nfp_nsp *nsp, unsigned int raw_idx, */ int __nfp_eth_set_aneg(struct nfp_nsp *nsp, enum nfp_eth_aneg mode) { - return nfp_eth_set_bit_config(nsp, NSP_ETH_RAW_STATE, + return NFP_ETH_SET_BIT_CONFIG(nsp, NSP_ETH_RAW_STATE, NSP_ETH_STATE_ANEG, mode, NSP_ETH_CTRL_SET_ANEG); } @@ -544,7 +551,7 @@ int __nfp_eth_set_speed(struct nfp_nsp *nsp, unsigned int speed) return -EINVAL; } - return nfp_eth_set_bit_config(nsp, NSP_ETH_RAW_STATE, + return NFP_ETH_SET_BIT_CONFIG(nsp, NSP_ETH_RAW_STATE, NSP_ETH_STATE_RATE, rate, NSP_ETH_CTRL_SET_RATE); } @@ -561,6 +568,6 @@ int __nfp_eth_set_speed(struct nfp_nsp *nsp, unsigned int speed) */ int __nfp_eth_set_split(struct nfp_nsp *nsp, unsigned int lanes) { - return nfp_eth_set_bit_config(nsp, NSP_ETH_RAW_PORT, NSP_ETH_PORT_LANES, + return NFP_ETH_SET_BIT_CONFIG(nsp, NSP_ETH_RAW_PORT, NSP_ETH_PORT_LANES, lanes, NSP_ETH_CTRL_SET_LANES); } -- cgit v1.2.3 From 7717c319d8c025aba426f10f41a9d7f9ea8af192 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Sat, 4 Nov 2017 16:48:55 +0100 Subject: nfp: make use of MAC reinit Recent management FW images can perform full reinit of MAC cores without requiring a reboot. When loading the driver check if there are changes pending and if so call NSP MAC reinit. Full application FW reload is still required, and all MACs need to be reinited at the same time (not only the ones which have been reconfigured, and thus potentially causing disruption to unrelated netdevs) therefore for now changing MAC config without reloading the driver still remains future work. Signed-off-by: Jakub Kicinski Tested-by: Dirk van der Merwe Signed-off-by: Simon Horman Signed-off-by: David S. Miller --- drivers/net/ethernet/netronome/nfp/nfp_main.c | 28 +++++++++++++++++++++- .../net/ethernet/netronome/nfp/nfp_net_ethtool.c | 2 +- drivers/net/ethernet/netronome/nfp/nfp_net_main.c | 2 +- .../net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c | 5 ++++ .../net/ethernet/netronome/nfp/nfpcore/nfp_nsp.h | 6 +++++ 5 files changed, 40 insertions(+), 3 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/netronome/nfp/nfp_main.c b/drivers/net/ethernet/netronome/nfp/nfp_main.c index f8fa63b66739..35eaccbece36 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_main.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_main.c @@ -346,6 +346,32 @@ exit_release_fw: return err < 0 ? err : 1; } +static void +nfp_nsp_init_ports(struct pci_dev *pdev, struct nfp_pf *pf, + struct nfp_nsp *nsp) +{ + bool needs_reinit = false; + int i; + + pf->eth_tbl = __nfp_eth_read_ports(pf->cpp, nsp); + if (!pf->eth_tbl) + return; + + if (!nfp_nsp_has_mac_reinit(nsp)) + return; + + for (i = 0; i < pf->eth_tbl->count; i++) + needs_reinit |= pf->eth_tbl->ports[i].override_changed; + if (!needs_reinit) + return; + + kfree(pf->eth_tbl); + if (nfp_nsp_mac_reinit(nsp)) + dev_warn(&pdev->dev, "MAC reinit failed\n"); + + pf->eth_tbl = __nfp_eth_read_ports(pf->cpp, nsp); +} + static int nfp_nsp_init(struct pci_dev *pdev, struct nfp_pf *pf) { struct nfp_nsp *nsp; @@ -366,7 +392,7 @@ static int nfp_nsp_init(struct pci_dev *pdev, struct nfp_pf *pf) if (err < 0) goto exit_close_nsp; - pf->eth_tbl = __nfp_eth_read_ports(pf->cpp, nsp); + nfp_nsp_init_ports(pdev, pf, nsp); pf->nspi = __nfp_nsp_identify(nsp); if (pf->nspi) diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c index c67b90c8d8b7..0061097c271e 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c @@ -328,7 +328,7 @@ nfp_net_set_link_ksettings(struct net_device *netdev, return -EOPNOTSUPP; if (netif_running(netdev)) { - netdev_warn(netdev, "Changing settings not allowed on an active interface. It may cause the port to be disabled until reboot.\n"); + netdev_warn(netdev, "Changing settings not allowed on an active interface. It may cause the port to be disabled until driver reload.\n"); return -EBUSY; } diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_main.c b/drivers/net/ethernet/netronome/nfp/nfp_net_main.c index ff373acd28f3..0beb9b21557b 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_main.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_main.c @@ -597,7 +597,7 @@ nfp_net_eth_port_update(struct nfp_cpp *cpp, struct nfp_port *port, return -EIO; } if (eth_port->override_changed) { - nfp_warn(cpp, "Port #%d config changed, unregistering. Reboot required before port will be operational again.\n", port->eth_id); + nfp_warn(cpp, "Port #%d config changed, unregistering. Driver reload required before port will be operational again.\n", port->eth_id); port->type = NFP_PORT_INVALID; } diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c index 37364555c42b..14a6d1ba51a9 100644 --- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c @@ -477,6 +477,11 @@ int nfp_nsp_device_soft_reset(struct nfp_nsp *state) return nfp_nsp_command(state, SPCODE_SOFT_RESET, 0, 0, 0); } +int nfp_nsp_mac_reinit(struct nfp_nsp *state) +{ + return nfp_nsp_command(state, SPCODE_MAC_INIT, 0, 0, 0); +} + int nfp_nsp_load_fw(struct nfp_nsp *state, const struct firmware *fw) { return nfp_nsp_command_buf(state, SPCODE_FW_LOAD, fw->size, fw->data, diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.h b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.h index e2f028027c6f..47486d42f2d7 100644 --- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.h +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.h @@ -48,6 +48,12 @@ u16 nfp_nsp_get_abi_ver_minor(struct nfp_nsp *state); int nfp_nsp_wait(struct nfp_nsp *state); int nfp_nsp_device_soft_reset(struct nfp_nsp *state); int nfp_nsp_load_fw(struct nfp_nsp *state, const struct firmware *fw); +int nfp_nsp_mac_reinit(struct nfp_nsp *state); + +static inline bool nfp_nsp_has_mac_reinit(struct nfp_nsp *state) +{ + return nfp_nsp_get_abi_ver_minor(state) > 20; +} enum nfp_eth_interface { NFP_INTERFACE_NONE = 0, -- cgit v1.2.3 From 51ccc37d9d3392884024b272089fd3e864d3cf3c Mon Sep 17 00:00:00 2001 From: Dirk van der Merwe Date: Sat, 4 Nov 2017 16:48:56 +0100 Subject: nfp: refactor nfp_app_reprs_set The criteria that reprs cannot be replaced with another new set of reprs has been removed. This check is not needed since the only use case that could exercise this at the moment, would be to modify the number of SRIOV VFs without first disabling them. This case is explicitly disallowed in any case and subsequent patches in this series need to be able to replace the running set of reprs. All cases where the return code used to be checked for the nfp_app_reprs_set function have been removed. As stated above, it is not possible for the current code to encounter a case where reprs exist and need to be replaced. Signed-off-by: Dirk van der Merwe Signed-off-by: Simon Horman Signed-off-by: David S. Miller --- drivers/net/ethernet/netronome/nfp/flower/main.c | 16 ++++------------ drivers/net/ethernet/netronome/nfp/nfp_app.c | 6 ------ 2 files changed, 4 insertions(+), 18 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/netronome/nfp/flower/main.c b/drivers/net/ethernet/netronome/nfp/flower/main.c index e46e7c60d491..e0283bb24f06 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/main.c +++ b/drivers/net/ethernet/netronome/nfp/flower/main.c @@ -142,8 +142,8 @@ nfp_flower_spawn_vnic_reprs(struct nfp_app *app, { u8 nfp_pcie = nfp_cppcore_pcie_unit(app->pf->cpp); struct nfp_flower_priv *priv = app->priv; - struct nfp_reprs *reprs, *old_reprs; enum nfp_port_type port_type; + struct nfp_reprs *reprs; const u8 queue = 0; int i, err; @@ -194,11 +194,7 @@ nfp_flower_spawn_vnic_reprs(struct nfp_app *app, reprs->reprs[i]->name); } - old_reprs = nfp_app_reprs_set(app, repr_type, reprs); - if (IS_ERR(old_reprs)) { - err = PTR_ERR(old_reprs); - goto err_reprs_clean; - } + nfp_app_reprs_set(app, repr_type, reprs); return 0; err_reprs_clean: @@ -222,8 +218,8 @@ static int nfp_flower_spawn_phy_reprs(struct nfp_app *app, struct nfp_flower_priv *priv) { struct nfp_eth_table *eth_tbl = app->pf->eth_tbl; - struct nfp_reprs *reprs, *old_reprs; struct sk_buff *ctrl_skb; + struct nfp_reprs *reprs; unsigned int i; int err; @@ -280,11 +276,7 @@ nfp_flower_spawn_phy_reprs(struct nfp_app *app, struct nfp_flower_priv *priv) phys_port, reprs->reprs[phys_port]->name); } - old_reprs = nfp_app_reprs_set(app, NFP_REPR_TYPE_PHYS_PORT, reprs); - if (IS_ERR(old_reprs)) { - err = PTR_ERR(old_reprs); - goto err_reprs_clean; - } + nfp_app_reprs_set(app, NFP_REPR_TYPE_PHYS_PORT, reprs); /* The MAC_REPR control message should be sent after the MAC * representors are registered using nfp_app_reprs_set(). This is diff --git a/drivers/net/ethernet/netronome/nfp/nfp_app.c b/drivers/net/ethernet/netronome/nfp/nfp_app.c index 3644d74fe304..955a9f44d244 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_app.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_app.c @@ -106,14 +106,8 @@ nfp_app_reprs_set(struct nfp_app *app, enum nfp_repr_type type, old = rcu_dereference_protected(app->reprs[type], lockdep_is_held(&app->pf->lock)); - if (reprs && old) { - old = ERR_PTR(-EBUSY); - goto exit_unlock; - } - rcu_assign_pointer(app->reprs[type], reprs); -exit_unlock: return old; } -- cgit v1.2.3 From 5fa27d59af2a36c32156e56b6370387f60b67052 Mon Sep 17 00:00:00 2001 From: Dirk van der Merwe Date: Sat, 4 Nov 2017 16:48:57 +0100 Subject: nfp: resync repr state when port table sync If the NSP port table has been refreshed, resync the representor state with the new port information. At the moment, this only entails looking for invalid ports and killing off representors associated with them. The repr instance becomes NULL which is safe since the app accessor function for reprs returns NULL when it cannot access a repr. Signed-off-by: Dirk van der Merwe Signed-off-by: Simon Horman Signed-off-by: David S. Miller --- drivers/net/ethernet/netronome/nfp/nfp_net_main.c | 6 +++ drivers/net/ethernet/netronome/nfp/nfp_net_repr.c | 47 +++++++++++++++++++++++ drivers/net/ethernet/netronome/nfp/nfp_net_repr.h | 1 + 3 files changed, 54 insertions(+) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_main.c b/drivers/net/ethernet/netronome/nfp/nfp_net_main.c index 0beb9b21557b..c505014121c4 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_main.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_main.c @@ -611,6 +611,7 @@ int nfp_net_refresh_port_table_sync(struct nfp_pf *pf) struct nfp_eth_table *eth_table; struct nfp_net *nn, *next; struct nfp_port *port; + int err; lockdep_assert_held(&pf->lock); @@ -640,6 +641,11 @@ int nfp_net_refresh_port_table_sync(struct nfp_pf *pf) kfree(eth_table); + /* Resync repr state. This may cause reprs to be removed. */ + err = nfp_reprs_resync_phys_ports(pf->app); + if (err) + return err; + /* Shoot off the ports which became invalid */ list_for_each_entry_safe(nn, next, &pf->vnics, vnic_list) { if (!nn->port || nn->port->type != NFP_PORT_INVALID) diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c index d540a9dc77b3..1bce8c131bb9 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c @@ -390,3 +390,50 @@ struct nfp_reprs *nfp_reprs_alloc(unsigned int num_reprs) return reprs; } + +int nfp_reprs_resync_phys_ports(struct nfp_app *app) +{ + struct nfp_reprs *reprs, *old_reprs; + struct nfp_repr *repr; + int i; + + old_reprs = + rcu_dereference_protected(app->reprs[NFP_REPR_TYPE_PHYS_PORT], + lockdep_is_held(&app->pf->lock)); + if (!old_reprs) + return 0; + + reprs = nfp_reprs_alloc(old_reprs->num_reprs); + if (!reprs) + return -ENOMEM; + + for (i = 0; i < old_reprs->num_reprs; i++) { + if (!old_reprs->reprs[i]) + continue; + + repr = netdev_priv(old_reprs->reprs[i]); + if (repr->port->type == NFP_PORT_INVALID) + continue; + + reprs->reprs[i] = old_reprs->reprs[i]; + } + + old_reprs = nfp_app_reprs_set(app, NFP_REPR_TYPE_PHYS_PORT, reprs); + synchronize_rcu(); + + /* Now we free up removed representors */ + for (i = 0; i < old_reprs->num_reprs; i++) { + if (!old_reprs->reprs[i]) + continue; + + repr = netdev_priv(old_reprs->reprs[i]); + if (repr->port->type != NFP_PORT_INVALID) + continue; + + nfp_app_repr_stop(app, repr); + nfp_repr_clean(repr); + } + + kfree(old_reprs); + return 0; +} diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.h b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.h index 32179cad062a..5d4d897bc9c6 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.h +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.h @@ -124,5 +124,6 @@ void nfp_reprs_clean_and_free_by_type(struct nfp_app *app, enum nfp_repr_type type); struct nfp_reprs *nfp_reprs_alloc(unsigned int num_reprs); +int nfp_reprs_resync_phys_ports(struct nfp_app *app); #endif /* NFP_NET_REPR_H */ -- cgit v1.2.3 From a564d30ec2b859205d5fdd521df3fb6d342dc461 Mon Sep 17 00:00:00 2001 From: Dirk van der Merwe Date: Sat, 4 Nov 2017 16:48:58 +0100 Subject: nfp: add get/set link settings ndos to representors Since it is now safe to modify link settings for representors, we can attach the get/set link settings ndos to it. The get/set link settings are nfp_port based operations. If a port becomes invalid, the representor will be removed in the same way a vnic would be. Signed-off-by: Dirk van der Merwe Signed-off-by: Simon Horman Signed-off-by: David S. Miller --- drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c index 0061097c271e..d0028894667c 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c @@ -1155,6 +1155,8 @@ const struct ethtool_ops nfp_port_ethtool_ops = { .set_dump = nfp_app_set_dump, .get_dump_flag = nfp_app_get_dump_flag, .get_dump_data = nfp_app_get_dump_data, + .get_link_ksettings = nfp_net_get_link_ksettings, + .set_link_ksettings = nfp_net_set_link_ksettings, }; void nfp_net_set_ethtool_ops(struct net_device *netdev) -- cgit v1.2.3 From b471232e2caa054e006fa4b5fd4bf15544b00b0f Mon Sep 17 00:00:00 2001 From: Dirk van der Merwe Date: Sat, 4 Nov 2017 16:48:59 +0100 Subject: nfp: add helpers for FEC support Implement helpers to determine and modify FEC modes via the NSP. The NSP advertises FEC capabilities on a per port basis and provides support for: * Auto mode selection * Reed Solomon * BaseR * None/Off Signed-off-by: Dirk van der Merwe Signed-off-by: Simon Horman Signed-off-by: David S. Miller --- .../net/ethernet/netronome/nfp/nfpcore/nfp_nsp.h | 30 ++++++++++ .../ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c | 64 ++++++++++++++++++++++ 2 files changed, 94 insertions(+) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.h b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.h index 47486d42f2d7..650ca1a5bd21 100644 --- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.h +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.h @@ -79,6 +79,18 @@ enum nfp_eth_aneg { NFP_ANEG_DISABLED, }; +enum nfp_eth_fec { + NFP_FEC_AUTO_BIT = 0, + NFP_FEC_BASER_BIT, + NFP_FEC_REED_SOLOMON_BIT, + NFP_FEC_DISABLED_BIT, +}; + +#define NFP_FEC_AUTO BIT(NFP_FEC_AUTO_BIT) +#define NFP_FEC_BASER BIT(NFP_FEC_BASER_BIT) +#define NFP_FEC_REED_SOLOMON BIT(NFP_FEC_REED_SOLOMON_BIT) +#define NFP_FEC_DISABLED BIT(NFP_FEC_DISABLED_BIT) + /** * struct nfp_eth_table - ETH table information * @count: number of table entries @@ -93,6 +105,7 @@ enum nfp_eth_aneg { * @speed: interface speed (in Mbps) * @interface: interface (module) plugged in * @media: media type of the @interface + * @fec: forward error correction mode * @aneg: auto negotiation mode * @mac_addr: interface MAC address * @label_port: port id @@ -105,6 +118,7 @@ enum nfp_eth_aneg { * @port_type: one of %PORT_* defines for ethtool * @port_lanes: total number of lanes on the port (sum of lanes of all subports) * @is_split: is interface part of a split port + * @fec_modes_supported: bitmap of FEC modes supported */ struct nfp_eth_table { unsigned int count; @@ -120,6 +134,7 @@ struct nfp_eth_table { unsigned int interface; enum nfp_eth_media media; + enum nfp_eth_fec fec; enum nfp_eth_aneg aneg; u8 mac_addr[ETH_ALEN]; @@ -139,6 +154,8 @@ struct nfp_eth_table { unsigned int port_lanes; bool is_split; + + unsigned int fec_modes_supported; } ports[0]; }; @@ -149,6 +166,19 @@ __nfp_eth_read_ports(struct nfp_cpp *cpp, struct nfp_nsp *nsp); int nfp_eth_set_mod_enable(struct nfp_cpp *cpp, unsigned int idx, bool enable); int nfp_eth_set_configured(struct nfp_cpp *cpp, unsigned int idx, bool configed); +int +nfp_eth_set_fec(struct nfp_cpp *cpp, unsigned int idx, enum nfp_eth_fec mode); + +static inline bool nfp_eth_can_support_fec(struct nfp_eth_table_port *eth_port) +{ + return !!eth_port->fec_modes_supported; +} + +static inline unsigned int +nfp_eth_supported_fec_modes(struct nfp_eth_table_port *eth_port) +{ + return eth_port->fec_modes_supported; +} struct nfp_nsp *nfp_eth_config_start(struct nfp_cpp *cpp, unsigned int idx); int nfp_eth_config_commit_end(struct nfp_nsp *nsp); diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c index 47251396fcae..7ca589660e4d 100644 --- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c @@ -55,6 +55,8 @@ #define NSP_ETH_PORT_INDEX GENMASK_ULL(15, 8) #define NSP_ETH_PORT_LABEL GENMASK_ULL(53, 48) #define NSP_ETH_PORT_PHYLABEL GENMASK_ULL(59, 54) +#define NSP_ETH_PORT_FEC_SUPP_BASER BIT_ULL(60) +#define NSP_ETH_PORT_FEC_SUPP_RS BIT_ULL(61) #define NSP_ETH_PORT_LANES_MASK cpu_to_le64(NSP_ETH_PORT_LANES) @@ -67,6 +69,7 @@ #define NSP_ETH_STATE_MEDIA GENMASK_ULL(21, 20) #define NSP_ETH_STATE_OVRD_CHNG BIT_ULL(22) #define NSP_ETH_STATE_ANEG GENMASK_ULL(25, 23) +#define NSP_ETH_STATE_FEC GENMASK_ULL(27, 26) #define NSP_ETH_CTRL_CONFIGURED BIT_ULL(0) #define NSP_ETH_CTRL_ENABLED BIT_ULL(1) @@ -75,6 +78,7 @@ #define NSP_ETH_CTRL_SET_RATE BIT_ULL(4) #define NSP_ETH_CTRL_SET_LANES BIT_ULL(5) #define NSP_ETH_CTRL_SET_ANEG BIT_ULL(6) +#define NSP_ETH_CTRL_SET_FEC BIT_ULL(7) enum nfp_eth_raw { NSP_ETH_RAW_PORT = 0, @@ -152,6 +156,7 @@ nfp_eth_port_translate(struct nfp_nsp *nsp, const union eth_table_entry *src, unsigned int index, struct nfp_eth_table_port *dst) { unsigned int rate; + unsigned int fec; u64 port, state; port = le64_to_cpu(src->port); @@ -183,6 +188,18 @@ nfp_eth_port_translate(struct nfp_nsp *nsp, const union eth_table_entry *src, dst->override_changed = FIELD_GET(NSP_ETH_STATE_OVRD_CHNG, state); dst->aneg = FIELD_GET(NSP_ETH_STATE_ANEG, state); + + if (nfp_nsp_get_abi_ver_minor(nsp) < 22) + return; + + fec = FIELD_GET(NSP_ETH_PORT_FEC_SUPP_BASER, port); + dst->fec_modes_supported |= fec << NFP_FEC_BASER_BIT; + fec = FIELD_GET(NSP_ETH_PORT_FEC_SUPP_RS, port); + dst->fec_modes_supported |= fec << NFP_FEC_REED_SOLOMON_BIT; + if (dst->fec_modes_supported) + dst->fec_modes_supported |= NFP_FEC_AUTO | NFP_FEC_DISABLED; + + dst->fec = 1 << FIELD_GET(NSP_ETH_STATE_FEC, state); } static void @@ -527,6 +544,53 @@ int __nfp_eth_set_aneg(struct nfp_nsp *nsp, enum nfp_eth_aneg mode) NSP_ETH_CTRL_SET_ANEG); } +/** + * __nfp_eth_set_fec() - set PHY forward error correction control bit + * @nsp: NFP NSP handle returned from nfp_eth_config_start() + * @mode: Desired fec mode + * + * Set the PHY module forward error correction mode. + * Will write to hwinfo overrides in the flash (persistent config). + * + * Return: 0 or -ERRNO. + */ +static int __nfp_eth_set_fec(struct nfp_nsp *nsp, enum nfp_eth_fec mode) +{ + return NFP_ETH_SET_BIT_CONFIG(nsp, NSP_ETH_RAW_STATE, + NSP_ETH_STATE_FEC, mode, + NSP_ETH_CTRL_SET_FEC); +} + +/** + * nfp_eth_set_fec() - set PHY forward error correction control mode + * @cpp: NFP CPP handle + * @idx: NFP chip-wide port index + * @mode: Desired fec mode + * + * Return: + * 0 - configuration successful; + * 1 - no changes were needed; + * -ERRNO - configuration failed. + */ +int +nfp_eth_set_fec(struct nfp_cpp *cpp, unsigned int idx, enum nfp_eth_fec mode) +{ + struct nfp_nsp *nsp; + int err; + + nsp = nfp_eth_config_start(cpp, idx); + if (IS_ERR(nsp)) + return PTR_ERR(nsp); + + err = __nfp_eth_set_fec(nsp, mode); + if (err) { + nfp_eth_config_cleanup_end(nsp); + return err; + } + + return nfp_eth_config_commit_end(nsp); +} + /** * __nfp_eth_set_speed() - set interface speed/rate * @nsp: NFP NSP handle returned from nfp_eth_config_start() -- cgit v1.2.3 From 0d08709383377087bc50825db4b47c058c7ab70a Mon Sep 17 00:00:00 2001 From: Dirk van der Merwe Date: Sat, 4 Nov 2017 16:49:00 +0100 Subject: nfp: implement ethtool FEC mode settings Add support in the driver ethtool ops to modify the NFP FEC modes. The FEC modes can be set for vNIC associated with physical ports or for MAC representor netdevs. Signed-off-by: Dirk van der Merwe Signed-off-by: Simon Horman Signed-off-by: David S. Miller --- .../net/ethernet/netronome/nfp/nfp_net_ethtool.c | 117 ++++++++++++++++++++- 1 file changed, 116 insertions(+), 1 deletion(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c index d0028894667c..60c8d733a37d 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c @@ -244,6 +244,30 @@ nfp_app_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo) nfp_get_drvinfo(app, app->pdev, "*", drvinfo); } +static void +nfp_net_set_fec_link_mode(struct nfp_eth_table_port *eth_port, + struct ethtool_link_ksettings *c) +{ + unsigned int modes; + + ethtool_link_ksettings_add_link_mode(c, supported, FEC_NONE); + if (!nfp_eth_can_support_fec(eth_port)) { + ethtool_link_ksettings_add_link_mode(c, advertising, FEC_NONE); + return; + } + + modes = nfp_eth_supported_fec_modes(eth_port); + if (modes & NFP_FEC_BASER) { + ethtool_link_ksettings_add_link_mode(c, supported, FEC_BASER); + ethtool_link_ksettings_add_link_mode(c, advertising, FEC_BASER); + } + + if (modes & NFP_FEC_REED_SOLOMON) { + ethtool_link_ksettings_add_link_mode(c, supported, FEC_RS); + ethtool_link_ksettings_add_link_mode(c, advertising, FEC_RS); + } +} + /** * nfp_net_get_link_ksettings - Get Link Speed settings * @netdev: network interface device structure @@ -278,9 +302,11 @@ nfp_net_get_link_ksettings(struct net_device *netdev, port = nfp_port_from_netdev(netdev); eth_port = nfp_port_get_eth_port(port); - if (eth_port) + if (eth_port) { cmd->base.autoneg = eth_port->aneg != NFP_ANEG_DISABLED ? AUTONEG_ENABLE : AUTONEG_DISABLE; + nfp_net_set_fec_link_mode(eth_port, cmd); + } if (!netif_carrier_ok(netdev)) return 0; @@ -686,6 +712,91 @@ static int nfp_port_get_sset_count(struct net_device *netdev, int sset) } } +static int nfp_port_fec_ethtool_to_nsp(u32 fec) +{ + switch (fec) { + case ETHTOOL_FEC_AUTO: + return NFP_FEC_AUTO_BIT; + case ETHTOOL_FEC_OFF: + return NFP_FEC_DISABLED_BIT; + case ETHTOOL_FEC_RS: + return NFP_FEC_REED_SOLOMON_BIT; + case ETHTOOL_FEC_BASER: + return NFP_FEC_BASER_BIT; + default: + /* NSP only supports a single mode at a time */ + return -EOPNOTSUPP; + } +} + +static u32 nfp_port_fec_nsp_to_ethtool(u32 fec) +{ + u32 result = 0; + + if (fec & NFP_FEC_AUTO) + result |= ETHTOOL_FEC_AUTO; + if (fec & NFP_FEC_BASER) + result |= ETHTOOL_FEC_BASER; + if (fec & NFP_FEC_REED_SOLOMON) + result |= ETHTOOL_FEC_RS; + if (fec & NFP_FEC_DISABLED) + result |= ETHTOOL_FEC_OFF; + + return result ?: ETHTOOL_FEC_NONE; +} + +static int +nfp_port_get_fecparam(struct net_device *netdev, + struct ethtool_fecparam *param) +{ + struct nfp_eth_table_port *eth_port; + struct nfp_port *port; + + param->active_fec = ETHTOOL_FEC_NONE_BIT; + param->fec = ETHTOOL_FEC_NONE_BIT; + + port = nfp_port_from_netdev(netdev); + eth_port = nfp_port_get_eth_port(port); + if (!eth_port) + return -EOPNOTSUPP; + + if (!nfp_eth_can_support_fec(eth_port)) + return 0; + + param->fec = nfp_port_fec_nsp_to_ethtool(eth_port->fec_modes_supported); + param->active_fec = nfp_port_fec_nsp_to_ethtool(eth_port->fec); + + return 0; +} + +static int +nfp_port_set_fecparam(struct net_device *netdev, + struct ethtool_fecparam *param) +{ + struct nfp_eth_table_port *eth_port; + struct nfp_port *port; + int err, fec; + + port = nfp_port_from_netdev(netdev); + eth_port = nfp_port_get_eth_port(port); + if (!eth_port) + return -EOPNOTSUPP; + + if (!nfp_eth_can_support_fec(eth_port)) + return -EOPNOTSUPP; + + fec = nfp_port_fec_ethtool_to_nsp(param->fec); + if (fec < 0) + return fec; + + err = nfp_eth_set_fec(port->app->cpp, eth_port->index, fec); + if (!err) + /* Only refresh if we did something */ + nfp_net_refresh_port_table(port); + + return err < 0 ? err : 0; +} + /* RX network flow classification (RSS, filters, etc) */ static u32 ethtool_flow_to_nfp_flag(u32 flow_type) @@ -1144,6 +1255,8 @@ static const struct ethtool_ops nfp_net_ethtool_ops = { .set_channels = nfp_net_set_channels, .get_link_ksettings = nfp_net_get_link_ksettings, .set_link_ksettings = nfp_net_set_link_ksettings, + .get_fecparam = nfp_port_get_fecparam, + .set_fecparam = nfp_port_set_fecparam, }; const struct ethtool_ops nfp_port_ethtool_ops = { @@ -1157,6 +1270,8 @@ const struct ethtool_ops nfp_port_ethtool_ops = { .get_dump_data = nfp_app_get_dump_data, .get_link_ksettings = nfp_net_get_link_ksettings, .set_link_ksettings = nfp_net_set_link_ksettings, + .get_fecparam = nfp_port_get_fecparam, + .set_fecparam = nfp_port_set_fecparam, }; void nfp_net_set_ethtool_ops(struct net_device *netdev) -- cgit v1.2.3 From 4ad1ceec05e49175d0f967cc87628101e79176f6 Mon Sep 17 00:00:00 2001 From: Troy Kisky Date: Fri, 3 Nov 2017 10:29:59 -0700 Subject: net: fec: Let fec_ptp have its own interrupt routine This is better for code locality and should slightly speed up normal interrupts. This also allows PPS clock output to start working for i.mx7. This is because i.mx7 was already using the limit of 3 interrupts, and needed another. Signed-off-by: Troy Kisky Acked-by: Fugang Duan Signed-off-by: David S. Miller --- Documentation/devicetree/bindings/net/fsl-fec.txt | 13 ++++ drivers/net/ethernet/freescale/fec.h | 3 +- drivers/net/ethernet/freescale/fec_main.c | 31 ++++++--- drivers/net/ethernet/freescale/fec_ptp.c | 82 +++++++++++++---------- 4 files changed, 84 insertions(+), 45 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/Documentation/devicetree/bindings/net/fsl-fec.txt b/Documentation/devicetree/bindings/net/fsl-fec.txt index 6f55bdd52f8a..f0dc94409107 100644 --- a/Documentation/devicetree/bindings/net/fsl-fec.txt +++ b/Documentation/devicetree/bindings/net/fsl-fec.txt @@ -34,6 +34,19 @@ Optional properties: - fsl,err006687-workaround-present: If present indicates that the system has the hardware workaround for ERR006687 applied and does not need a software workaround. + -interrupt-names: names of the interrupts listed in interrupts property in + the same order. The defaults if not specified are + __Number of interrupts__ __Default__ + 1 "int0" + 2 "int0", "pps" + 3 "int0", "int1", "int2" + 4 "int0", "int1", "int2", "pps" + The order may be changed as long as they correspond to the interrupts + property. Currently, only i.mx7 uses "int1" and "int2". They correspond to + tx/rx queues 1 and 2. "int0" will be used for queue 0 and ENET_MII interrupts. + For imx6sx, "int0" handles all 3 queues and ENET_MII. "pps" is for the pulse + per second interrupt associated with 1588 precision time protocol(PTP). + Optional subnodes: - mdio : specifies the mdio bus in the FEC, used as a container for phy nodes diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h index 44720f83af27..5385074b3b7d 100644 --- a/drivers/net/ethernet/freescale/fec.h +++ b/drivers/net/ethernet/freescale/fec.h @@ -583,12 +583,11 @@ struct fec_enet_private { u64 ethtool_stats[0]; }; -void fec_ptp_init(struct platform_device *pdev); +void fec_ptp_init(struct platform_device *pdev, int irq_idx); void fec_ptp_stop(struct platform_device *pdev); void fec_ptp_start_cyclecounter(struct net_device *ndev); int fec_ptp_set(struct net_device *ndev, struct ifreq *ifr); int fec_ptp_get(struct net_device *ndev, struct ifreq *ifr); -uint fec_ptp_check_pps_event(struct fec_enet_private *fep); /****************************************************************************/ #endif /* FEC_H */ diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index 3dc2d771a222..610573855213 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -1602,10 +1602,6 @@ fec_enet_interrupt(int irq, void *dev_id) ret = IRQ_HANDLED; complete(&fep->mdio_done); } - - if (fep->ptp_clock) - if (fec_ptp_check_pps_event(fep)) - ret = IRQ_HANDLED; return ret; } @@ -3312,6 +3308,19 @@ fec_enet_get_queue_num(struct platform_device *pdev, int *num_tx, int *num_rx) } +static int fec_enet_get_irq_cnt(struct platform_device *pdev) +{ + int irq_cnt = platform_irq_count(pdev); + + if (irq_cnt > FEC_IRQ_NUM) + irq_cnt = FEC_IRQ_NUM; /* last for pps */ + else if (irq_cnt == 2) + irq_cnt = 1; /* last for pps */ + else if (irq_cnt <= 0) + irq_cnt = 1; /* At least 1 irq is needed */ + return irq_cnt; +} + static int fec_probe(struct platform_device *pdev) { @@ -3325,6 +3334,8 @@ fec_probe(struct platform_device *pdev) struct device_node *np = pdev->dev.of_node, *phy_node; int num_tx_qs; int num_rx_qs; + char irq_name[8]; + int irq_cnt; fec_enet_get_queue_num(pdev, &num_tx_qs, &num_rx_qs); @@ -3465,18 +3476,20 @@ fec_probe(struct platform_device *pdev) if (ret) goto failed_reset; + irq_cnt = fec_enet_get_irq_cnt(pdev); if (fep->bufdesc_ex) - fec_ptp_init(pdev); + fec_ptp_init(pdev, irq_cnt); ret = fec_enet_init(ndev); if (ret) goto failed_init; - for (i = 0; i < FEC_IRQ_NUM; i++) { - irq = platform_get_irq(pdev, i); + for (i = 0; i < irq_cnt; i++) { + sprintf(irq_name, "int%d", i); + irq = platform_get_irq_byname(pdev, irq_name); + if (irq < 0) + irq = platform_get_irq(pdev, i); if (irq < 0) { - if (i) - break; ret = irq; goto failed_irq; } diff --git a/drivers/net/ethernet/freescale/fec_ptp.c b/drivers/net/ethernet/freescale/fec_ptp.c index 6ebad3fac81d..f81439796ac7 100644 --- a/drivers/net/ethernet/freescale/fec_ptp.c +++ b/drivers/net/ethernet/freescale/fec_ptp.c @@ -549,6 +549,37 @@ static void fec_time_keep(struct work_struct *work) schedule_delayed_work(&fep->time_keep, HZ); } +/* This function checks the pps event and reloads the timer compare counter. */ +static irqreturn_t fec_pps_interrupt(int irq, void *dev_id) +{ + struct net_device *ndev = dev_id; + struct fec_enet_private *fep = netdev_priv(ndev); + u32 val; + u8 channel = fep->pps_channel; + struct ptp_clock_event event; + + val = readl(fep->hwp + FEC_TCSR(channel)); + if (val & FEC_T_TF_MASK) { + /* Write the next next compare(not the next according the spec) + * value to the register + */ + writel(fep->next_counter, fep->hwp + FEC_TCCR(channel)); + do { + writel(val, fep->hwp + FEC_TCSR(channel)); + } while (readl(fep->hwp + FEC_TCSR(channel)) & FEC_T_TF_MASK); + + /* Update the counter; */ + fep->next_counter = (fep->next_counter + fep->reload_period) & + fep->cc.mask; + + event.type = PTP_CLOCK_PPS; + ptp_clock_event(fep->ptp_clock, &event); + return IRQ_HANDLED; + } + + return IRQ_NONE; +} + /** * fec_ptp_init * @ndev: The FEC network adapter @@ -558,10 +589,12 @@ static void fec_time_keep(struct work_struct *work) * cyclecounter init routine and exits. */ -void fec_ptp_init(struct platform_device *pdev) +void fec_ptp_init(struct platform_device *pdev, int irq_idx) { struct net_device *ndev = platform_get_drvdata(pdev); struct fec_enet_private *fep = netdev_priv(ndev); + int irq; + int ret; fep->ptp_caps.owner = THIS_MODULE; snprintf(fep->ptp_caps.name, 16, "fec ptp"); @@ -587,6 +620,20 @@ void fec_ptp_init(struct platform_device *pdev) INIT_DELAYED_WORK(&fep->time_keep, fec_time_keep); + irq = platform_get_irq_byname(pdev, "pps"); + if (irq < 0) + irq = platform_get_irq(pdev, irq_idx); + /* Failure to get an irq is not fatal, + * only the PTP_CLOCK_PPS clock events should stop + */ + if (irq >= 0) { + ret = devm_request_irq(&pdev->dev, irq, fec_pps_interrupt, + 0, pdev->name, ndev); + if (ret < 0) + dev_warn(&pdev->dev, "request for pps irq failed(%d)\n", + ret); + } + fep->ptp_clock = ptp_clock_register(&fep->ptp_caps, &pdev->dev); if (IS_ERR(fep->ptp_clock)) { fep->ptp_clock = NULL; @@ -605,36 +652,3 @@ void fec_ptp_stop(struct platform_device *pdev) if (fep->ptp_clock) ptp_clock_unregister(fep->ptp_clock); } - -/** - * fec_ptp_check_pps_event - * @fep: the fec_enet_private structure handle - * - * This function check the pps event and reload the timer compare counter. - */ -uint fec_ptp_check_pps_event(struct fec_enet_private *fep) -{ - u32 val; - u8 channel = fep->pps_channel; - struct ptp_clock_event event; - - val = readl(fep->hwp + FEC_TCSR(channel)); - if (val & FEC_T_TF_MASK) { - /* Write the next next compare(not the next according the spec) - * value to the register - */ - writel(fep->next_counter, fep->hwp + FEC_TCCR(channel)); - do { - writel(val, fep->hwp + FEC_TCSR(channel)); - } while (readl(fep->hwp + FEC_TCSR(channel)) & FEC_T_TF_MASK); - - /* Update the counter; */ - fep->next_counter = (fep->next_counter + fep->reload_period) & fep->cc.mask; - - event.type = PTP_CLOCK_PPS; - ptp_clock_event(fep->ptp_clock, &event); - return 1; - } - - return 0; -} -- cgit v1.2.3 From 575ed7d39e2fbe602a3894bc766a8cb49af83bd3 Mon Sep 17 00:00:00 2001 From: Nogah Frankel Date: Mon, 6 Nov 2017 07:23:42 +0100 Subject: net_sch: mqprio: Change TC_SETUP_MQPRIO to TC_SETUP_QDISC_MQPRIO Change TC_SETUP_MQPRIO to TC_SETUP_QDISC_MQPRIO to match the new convention. Signed-off-by: Nogah Frankel Signed-off-by: Jiri Pirko Reviewed-by: Simon Horman Signed-off-by: David S. Miller --- drivers/net/ethernet/amd/xgbe/xgbe-drv.c | 2 +- drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c | 2 +- drivers/net/ethernet/broadcom/bnxt/bnxt.c | 2 +- drivers/net/ethernet/freescale/dpaa/dpaa_eth.c | 2 +- drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c | 2 +- drivers/net/ethernet/intel/fm10k/fm10k_netdev.c | 2 +- drivers/net/ethernet/intel/i40e/i40e_main.c | 2 +- drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | 2 +- drivers/net/ethernet/mellanox/mlx4/en_netdev.c | 2 +- drivers/net/ethernet/mellanox/mlx5/core/en_main.c | 2 +- drivers/net/ethernet/sfc/falcon/tx.c | 2 +- drivers/net/ethernet/sfc/tx.c | 2 +- drivers/net/ethernet/ti/netcp_core.c | 2 +- include/linux/netdevice.h | 2 +- net/sched/sch_mqprio.c | 5 +++-- 15 files changed, 17 insertions(+), 16 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c index 3d53153ce751..a74a8fbad53a 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c @@ -2206,7 +2206,7 @@ static int xgbe_setup_tc(struct net_device *netdev, enum tc_setup_type type, struct tc_mqprio_qopt *mqprio = type_data; u8 tc; - if (type != TC_SETUP_MQPRIO) + if (type != TC_SETUP_QDISC_MQPRIO) return -EOPNOTSUPP; mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS; diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c index 1216c1f1e052..4c739d5355d2 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c @@ -4289,7 +4289,7 @@ int __bnx2x_setup_tc(struct net_device *dev, enum tc_setup_type type, { struct tc_mqprio_qopt *mqprio = type_data; - if (type != TC_SETUP_MQPRIO) + if (type != TC_SETUP_QDISC_MQPRIO) return -EOPNOTSUPP; mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index 96416f5d97f3..e5472e5ae7b2 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -7388,7 +7388,7 @@ static int bnxt_setup_tc(struct net_device *dev, enum tc_setup_type type, switch (type) { case TC_SETUP_BLOCK: return bnxt_setup_tc_block(dev, type_data); - case TC_SETUP_MQPRIO: { + case TC_SETUP_QDISC_MQPRIO: { struct tc_mqprio_qopt *mqprio = type_data; mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS; diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c index ebc55b6a6349..784dbf5a3e12 100644 --- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c +++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c @@ -351,7 +351,7 @@ static int dpaa_setup_tc(struct net_device *net_dev, enum tc_setup_type type, u8 num_tc; int i; - if (type != TC_SETUP_MQPRIO) + if (type != TC_SETUP_QDISC_MQPRIO) return -EOPNOTSUPP; mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS; diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c index 2a0af11c9b59..59415090ff0f 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c @@ -1252,7 +1252,7 @@ out: static int hns3_nic_setup_tc(struct net_device *dev, enum tc_setup_type type, void *type_data) { - if (type != TC_SETUP_MQPRIO) + if (type != TC_SETUP_QDISC_MQPRIO) return -EOPNOTSUPP; return hns3_setup_tc(dev, type_data); diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c index 81e4425f0529..adc62fb38c49 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c @@ -1389,7 +1389,7 @@ static int __fm10k_setup_tc(struct net_device *dev, enum tc_setup_type type, { struct tc_mqprio_qopt *mqprio = type_data; - if (type != TC_SETUP_MQPRIO) + if (type != TC_SETUP_QDISC_MQPRIO) return -EOPNOTSUPP; mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS; diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index 05b94d87a6c3..17e6f64299cf 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -7550,7 +7550,7 @@ static int __i40e_setup_tc(struct net_device *netdev, enum tc_setup_type type, void *type_data) { switch (type) { - case TC_SETUP_MQPRIO: + case TC_SETUP_QDISC_MQPRIO: return i40e_setup_tc(netdev, type_data); case TC_SETUP_BLOCK: return i40e_setup_tc_block(netdev, type_data); diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index e5dcb25be398..6eaca8366ac8 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -9431,7 +9431,7 @@ static int __ixgbe_setup_tc(struct net_device *dev, enum tc_setup_type type, switch (type) { case TC_SETUP_BLOCK: return ixgbe_setup_tc_block(dev, type_data); - case TC_SETUP_MQPRIO: + case TC_SETUP_QDISC_MQPRIO: return ixgbe_setup_tc_mqprio(dev, type_data); default: return -EOPNOTSUPP; diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c index 736a6ccaf05e..99051a294fa6 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c @@ -135,7 +135,7 @@ static int __mlx4_en_setup_tc(struct net_device *dev, enum tc_setup_type type, { struct tc_mqprio_qopt *mqprio = type_data; - if (type != TC_SETUP_MQPRIO) + if (type != TC_SETUP_QDISC_MQPRIO) return -EOPNOTSUPP; if (mqprio->num_tc && mqprio->num_tc != MLX4_EN_NUM_UP_HIGH) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index f877f2f5f2a5..5d5d2e50e4bf 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -3146,7 +3146,7 @@ int mlx5e_setup_tc(struct net_device *dev, enum tc_setup_type type, case TC_SETUP_BLOCK: return mlx5e_setup_tc_block(dev, type_data); #endif - case TC_SETUP_MQPRIO: + case TC_SETUP_QDISC_MQPRIO: return mlx5e_setup_tc_mqprio(dev, type_data); default: return -EOPNOTSUPP; diff --git a/drivers/net/ethernet/sfc/falcon/tx.c b/drivers/net/ethernet/sfc/falcon/tx.c index 6a75f4140a4b..1b978d69e702 100644 --- a/drivers/net/ethernet/sfc/falcon/tx.c +++ b/drivers/net/ethernet/sfc/falcon/tx.c @@ -435,7 +435,7 @@ int ef4_setup_tc(struct net_device *net_dev, enum tc_setup_type type, unsigned tc, num_tc; int rc; - if (type != TC_SETUP_MQPRIO) + if (type != TC_SETUP_QDISC_MQPRIO) return -EOPNOTSUPP; num_tc = mqprio->num_tc; diff --git a/drivers/net/ethernet/sfc/tx.c b/drivers/net/ethernet/sfc/tx.c index 32bf1fecf864..ea27b8a7f465 100644 --- a/drivers/net/ethernet/sfc/tx.c +++ b/drivers/net/ethernet/sfc/tx.c @@ -663,7 +663,7 @@ int efx_setup_tc(struct net_device *net_dev, enum tc_setup_type type, unsigned tc, num_tc; int rc; - if (type != TC_SETUP_MQPRIO) + if (type != TC_SETUP_QDISC_MQPRIO) return -EOPNOTSUPP; num_tc = mqprio->num_tc; diff --git a/drivers/net/ethernet/ti/netcp_core.c b/drivers/net/ethernet/ti/netcp_core.c index 437d36289786..15e2e3031d36 100644 --- a/drivers/net/ethernet/ti/netcp_core.c +++ b/drivers/net/ethernet/ti/netcp_core.c @@ -1887,7 +1887,7 @@ static int netcp_setup_tc(struct net_device *dev, enum tc_setup_type type, /* setup tc must be called under rtnl lock */ ASSERT_RTNL(); - if (type != TC_SETUP_MQPRIO) + if (type != TC_SETUP_QDISC_MQPRIO) return -EOPNOTSUPP; mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS; diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 71968a2ca9f3..703885aed856 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -770,7 +770,7 @@ typedef u16 (*select_queue_fallback_t)(struct net_device *dev, struct sk_buff *skb); enum tc_setup_type { - TC_SETUP_MQPRIO, + TC_SETUP_QDISC_MQPRIO, TC_SETUP_CLSU32, TC_SETUP_CLSFLOWER, TC_SETUP_CLSMATCHALL, diff --git a/net/sched/sch_mqprio.c b/net/sched/sch_mqprio.c index 4d5ed45123f0..b85885a9d8a1 100644 --- a/net/sched/sch_mqprio.c +++ b/net/sched/sch_mqprio.c @@ -50,7 +50,8 @@ static void mqprio_destroy(struct Qdisc *sch) switch (priv->mode) { case TC_MQPRIO_MODE_DCB: case TC_MQPRIO_MODE_CHANNEL: - dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_MQPRIO, + dev->netdev_ops->ndo_setup_tc(dev, + TC_SETUP_QDISC_MQPRIO, &mqprio); break; default: @@ -265,7 +266,7 @@ static int mqprio_init(struct Qdisc *sch, struct nlattr *opt) return -EINVAL; } err = dev->netdev_ops->ndo_setup_tc(dev, - TC_SETUP_MQPRIO, + TC_SETUP_QDISC_MQPRIO, &mqprio); if (err) return err; -- cgit v1.2.3 From 8521db4c7e155d12fb280686c0552e47f77e9110 Mon Sep 17 00:00:00 2001 From: Nogah Frankel Date: Mon, 6 Nov 2017 07:23:43 +0100 Subject: net_sch: cbs: Change TC_SETUP_CBS to TC_SETUP_QDISC_CBS Change TC_SETUP_CBS to TC_SETUP_QDISC_CBS to match the new convention.. Signed-off-by: Nogah Frankel Signed-off-by: Jiri Pirko Reviewed-by: Simon Horman Acked-by: Vinicius Costa Gomes Signed-off-by: David S. Miller --- drivers/net/ethernet/intel/igb/igb_main.c | 2 +- include/linux/netdevice.h | 2 +- net/sched/sch_cbs.c | 4 ++-- 3 files changed, 4 insertions(+), 4 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index e22bce7cdacd..43cf39527660 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -2488,7 +2488,7 @@ static int igb_setup_tc(struct net_device *dev, enum tc_setup_type type, struct igb_adapter *adapter = netdev_priv(dev); switch (type) { - case TC_SETUP_CBS: + case TC_SETUP_QDISC_CBS: return igb_offload_cbs(adapter, type_data); default: diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 703885aed856..30f0f2928808 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -776,7 +776,7 @@ enum tc_setup_type { TC_SETUP_CLSMATCHALL, TC_SETUP_CLSBPF, TC_SETUP_BLOCK, - TC_SETUP_CBS, + TC_SETUP_QDISC_CBS, TC_SETUP_QDISC_RED, }; diff --git a/net/sched/sch_cbs.c b/net/sched/sch_cbs.c index bdb533b7fb8c..7a72980c1509 100644 --- a/net/sched/sch_cbs.c +++ b/net/sched/sch_cbs.c @@ -212,7 +212,7 @@ static void cbs_disable_offload(struct net_device *dev, cbs.queue = q->queue; cbs.enable = 0; - err = ops->ndo_setup_tc(dev, TC_SETUP_CBS, &cbs); + err = ops->ndo_setup_tc(dev, TC_SETUP_QDISC_CBS, &cbs); if (err < 0) pr_warn("Couldn't disable CBS offload for queue %d\n", cbs.queue); @@ -236,7 +236,7 @@ static int cbs_enable_offload(struct net_device *dev, struct cbs_sched_data *q, cbs.idleslope = opt->idleslope; cbs.sendslope = opt->sendslope; - err = ops->ndo_setup_tc(dev, TC_SETUP_CBS, &cbs); + err = ops->ndo_setup_tc(dev, TC_SETUP_QDISC_CBS, &cbs); if (err < 0) return err; -- cgit v1.2.3 From ad53fa06c126d2d739563802cc412cdcc9c32e63 Mon Sep 17 00:00:00 2001 From: Nogah Frankel Date: Mon, 6 Nov 2017 07:23:44 +0100 Subject: mlxsw: reg: Add cwtp & cwtpm registers This patch adds 2 new registers: - Congestion WRED ECN TClass Profile Register [CWTP] - Congestion WRED ECN TClass and Pool Mapping Register [CWTPM] These registers would later be needed to offload RED-related functionality to the HW. Signed-off-by: Yuval Mintz Signed-off-by: Nogah Frankel Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/reg.h | 187 ++++++++++++++++++++++++++++++ 1 file changed, 187 insertions(+) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h index 5066553dd0b6..db394ec2a4dc 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/reg.h +++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h @@ -1758,6 +1758,191 @@ static inline void mlxsw_reg_spvmlr_pack(char *payload, u8 local_port, } } +/* CWTP - Congetion WRED ECN TClass Profile + * ---------------------------------------- + * Configures the profiles for queues of egress port and traffic class + */ +#define MLXSW_REG_CWTP_ID 0x2802 +#define MLXSW_REG_CWTP_BASE_LEN 0x28 +#define MLXSW_REG_CWTP_PROFILE_DATA_REC_LEN 0x08 +#define MLXSW_REG_CWTP_LEN 0x40 + +MLXSW_REG_DEFINE(cwtp, MLXSW_REG_CWTP_ID, MLXSW_REG_CWTP_LEN); + +/* reg_cwtp_local_port + * Local port number + * Not supported for CPU port + * Access: Index + */ +MLXSW_ITEM32(reg, cwtp, local_port, 0, 16, 8); + +/* reg_cwtp_traffic_class + * Traffic Class to configure + * Access: Index + */ +MLXSW_ITEM32(reg, cwtp, traffic_class, 32, 0, 8); + +/* reg_cwtp_profile_min + * Minimum Average Queue Size of the profile in cells. + * Access: RW + */ +MLXSW_ITEM32_INDEXED(reg, cwtp, profile_min, MLXSW_REG_CWTP_BASE_LEN, + 0, 20, MLXSW_REG_CWTP_PROFILE_DATA_REC_LEN, 0, false); + +/* reg_cwtp_profile_percent + * Percentage of WRED and ECN marking for maximum Average Queue size + * Range is 0 to 100, units of integer percentage + * Access: RW + */ +MLXSW_ITEM32_INDEXED(reg, cwtp, profile_percent, MLXSW_REG_CWTP_BASE_LEN, + 24, 7, MLXSW_REG_CWTP_PROFILE_DATA_REC_LEN, 4, false); + +/* reg_cwtp_profile_max + * Maximum Average Queue size of the profile in cells + * Access: RW + */ +MLXSW_ITEM32_INDEXED(reg, cwtp, profile_max, MLXSW_REG_CWTP_BASE_LEN, + 0, 20, MLXSW_REG_CWTP_PROFILE_DATA_REC_LEN, 4, false); + +#define MLXSW_REG_CWTP_MIN_VALUE 64 +#define MLXSW_REG_CWTP_MAX_PROFILE 2 +#define MLXSW_REG_CWTP_DEFAULT_PROFILE 1 + +static inline void mlxsw_reg_cwtp_pack(char *payload, u8 local_port, + u8 traffic_class) +{ + int i; + + MLXSW_REG_ZERO(cwtp, payload); + mlxsw_reg_cwtp_local_port_set(payload, local_port); + mlxsw_reg_cwtp_traffic_class_set(payload, traffic_class); + + for (i = 0; i <= MLXSW_REG_CWTP_MAX_PROFILE; i++) { + mlxsw_reg_cwtp_profile_min_set(payload, i, + MLXSW_REG_CWTP_MIN_VALUE); + mlxsw_reg_cwtp_profile_max_set(payload, i, + MLXSW_REG_CWTP_MIN_VALUE); + } +} + +#define MLXSW_REG_CWTP_PROFILE_TO_INDEX(profile) (profile - 1) + +static inline void +mlxsw_reg_cwtp_profile_pack(char *payload, u8 profile, u32 min, u32 max, + u32 probability) +{ + u8 index = MLXSW_REG_CWTP_PROFILE_TO_INDEX(profile); + + mlxsw_reg_cwtp_profile_min_set(payload, index, min); + mlxsw_reg_cwtp_profile_max_set(payload, index, max); + mlxsw_reg_cwtp_profile_percent_set(payload, index, probability); +} + +/* CWTPM - Congestion WRED ECN TClass and Pool Mapping + * --------------------------------------------------- + * The CWTPM register maps each egress port and traffic class to profile num. + */ +#define MLXSW_REG_CWTPM_ID 0x2803 +#define MLXSW_REG_CWTPM_LEN 0x44 + +MLXSW_REG_DEFINE(cwtpm, MLXSW_REG_CWTPM_ID, MLXSW_REG_CWTPM_LEN); + +/* reg_cwtpm_local_port + * Local port number + * Not supported for CPU port + * Access: Index + */ +MLXSW_ITEM32(reg, cwtpm, local_port, 0, 16, 8); + +/* reg_cwtpm_traffic_class + * Traffic Class to configure + * Access: Index + */ +MLXSW_ITEM32(reg, cwtpm, traffic_class, 32, 0, 8); + +/* reg_cwtpm_ew + * Control enablement of WRED for traffic class: + * 0 - Disable + * 1 - Enable + * Access: RW + */ +MLXSW_ITEM32(reg, cwtpm, ew, 36, 1, 1); + +/* reg_cwtpm_ee + * Control enablement of ECN for traffic class: + * 0 - Disable + * 1 - Enable + * Access: RW + */ +MLXSW_ITEM32(reg, cwtpm, ee, 36, 0, 1); + +/* reg_cwtpm_tcp_g + * TCP Green Profile. + * Index of the profile within {port, traffic class} to use. + * 0 for disabling both WRED and ECN for this type of traffic. + * Access: RW + */ +MLXSW_ITEM32(reg, cwtpm, tcp_g, 52, 0, 2); + +/* reg_cwtpm_tcp_y + * TCP Yellow Profile. + * Index of the profile within {port, traffic class} to use. + * 0 for disabling both WRED and ECN for this type of traffic. + * Access: RW + */ +MLXSW_ITEM32(reg, cwtpm, tcp_y, 56, 16, 2); + +/* reg_cwtpm_tcp_r + * TCP Red Profile. + * Index of the profile within {port, traffic class} to use. + * 0 for disabling both WRED and ECN for this type of traffic. + * Access: RW + */ +MLXSW_ITEM32(reg, cwtpm, tcp_r, 56, 0, 2); + +/* reg_cwtpm_ntcp_g + * Non-TCP Green Profile. + * Index of the profile within {port, traffic class} to use. + * 0 for disabling both WRED and ECN for this type of traffic. + * Access: RW + */ +MLXSW_ITEM32(reg, cwtpm, ntcp_g, 60, 0, 2); + +/* reg_cwtpm_ntcp_y + * Non-TCP Yellow Profile. + * Index of the profile within {port, traffic class} to use. + * 0 for disabling both WRED and ECN for this type of traffic. + * Access: RW + */ +MLXSW_ITEM32(reg, cwtpm, ntcp_y, 64, 16, 2); + +/* reg_cwtpm_ntcp_r + * Non-TCP Red Profile. + * Index of the profile within {port, traffic class} to use. + * 0 for disabling both WRED and ECN for this type of traffic. + * Access: RW + */ +MLXSW_ITEM32(reg, cwtpm, ntcp_r, 64, 0, 2); + +#define MLXSW_REG_CWTPM_RESET_PROFILE 0 + +static inline void mlxsw_reg_cwtpm_pack(char *payload, u8 local_port, + u8 traffic_class, u8 profile, + bool wred, bool ecn) +{ + MLXSW_REG_ZERO(cwtpm, payload); + mlxsw_reg_cwtpm_local_port_set(payload, local_port); + mlxsw_reg_cwtpm_traffic_class_set(payload, traffic_class); + mlxsw_reg_cwtpm_ew_set(payload, wred); + mlxsw_reg_cwtpm_ee_set(payload, ecn); + mlxsw_reg_cwtpm_tcp_g_set(payload, profile); + mlxsw_reg_cwtpm_tcp_y_set(payload, profile); + mlxsw_reg_cwtpm_tcp_r_set(payload, profile); + mlxsw_reg_cwtpm_ntcp_g_set(payload, profile); + mlxsw_reg_cwtpm_ntcp_y_set(payload, profile); + mlxsw_reg_cwtpm_ntcp_r_set(payload, profile); +} + /* PPBT - Policy-Engine Port Binding Table * --------------------------------------- * This register is used for configuration of the Port Binding Table. @@ -7405,6 +7590,8 @@ static const struct mlxsw_reg_info *mlxsw_reg_infos[] = { MLXSW_REG(svpe), MLXSW_REG(sfmr), MLXSW_REG(spvmlr), + MLXSW_REG(cwtp), + MLXSW_REG(cwtpm), MLXSW_REG(ppbt), MLXSW_REG(pacl), MLXSW_REG(pagt), -- cgit v1.2.3 From 96f17e0776c285b7373bdccbfc7300dbeac3878c Mon Sep 17 00:00:00 2001 From: Nogah Frankel Date: Mon, 6 Nov 2017 07:23:45 +0100 Subject: mlxsw: spectrum: Support RED qdisc offload Add support for ndo_setup_tc with enum tc_setup_type value of TC_SETUP_RED. This call sets RED qdisc on a traffic class. This patch supports RED qdisc only as a root qdisc and set in on the default tclass. It can be set with or without ECN. Signed-off-by: Yuval Mintz Signed-off-by: Nogah Frankel Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/Makefile | 3 +- drivers/net/ethernet/mellanox/mlxsw/spectrum.c | 2 + drivers/net/ethernet/mellanox/mlxsw/spectrum.h | 15 ++ .../net/ethernet/mellanox/mlxsw/spectrum_qdisc.c | 174 +++++++++++++++++++++ 4 files changed, 193 insertions(+), 1 deletion(-) create mode 100644 drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/mellanox/mlxsw/Makefile b/drivers/net/ethernet/mellanox/mlxsw/Makefile index 80f4efd3e82f..9463c3fa254f 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/Makefile +++ b/drivers/net/ethernet/mellanox/mlxsw/Makefile @@ -19,7 +19,8 @@ mlxsw_spectrum-objs := spectrum.o spectrum_buffers.o \ spectrum_acl.o spectrum_flower.o \ spectrum_cnt.o spectrum_fid.o \ spectrum_ipip.o spectrum_acl_flex_actions.o \ - spectrum_mr.o spectrum_mr_tcam.o + spectrum_mr.o spectrum_mr_tcam.o \ + spectrum_qdisc.o mlxsw_spectrum-$(CONFIG_MLXSW_SPECTRUM_DCB) += spectrum_dcb.o mlxsw_spectrum-$(CONFIG_NET_DEVLINK) += spectrum_dpipe.o obj-$(CONFIG_MLXSW_MINIMAL) += mlxsw_minimal.o diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c index 63e50877796b..e42b3e7bd588 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c @@ -1797,6 +1797,8 @@ static int mlxsw_sp_setup_tc(struct net_device *dev, enum tc_setup_type type, switch (type) { case TC_SETUP_BLOCK: return mlxsw_sp_setup_tc_block(mlxsw_sp_port, type_data); + case TC_SETUP_QDISC_RED: + return mlxsw_sp_setup_tc_red(mlxsw_sp_port, type_data); default: return -EOPNOTSUPP; } diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h index 47dd7e06fd29..76ebd58b6248 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h @@ -203,6 +203,16 @@ struct mlxsw_sp_port_vlan { struct list_head bridge_vlan_node; }; +enum mlxsw_sp_qdisc_type { + MLXSW_SP_QDISC_NO_QDISC, + MLXSW_SP_QDISC_RED, +}; + +struct mlxsw_sp_qdisc { + u32 handle; + enum mlxsw_sp_qdisc_type type; +}; + struct mlxsw_sp_port { struct net_device *dev; struct mlxsw_sp_port_pcpu_stats __percpu *pcpu_stats; @@ -236,6 +246,7 @@ struct mlxsw_sp_port { } periodic_hw_stats; struct mlxsw_sp_port_sample *sample; struct list_head vlans_list; + struct mlxsw_sp_qdisc root_qdisc; }; static inline bool @@ -546,6 +557,10 @@ void mlxsw_sp_flower_destroy(struct mlxsw_sp_port *mlxsw_sp_port, bool ingress, int mlxsw_sp_flower_stats(struct mlxsw_sp_port *mlxsw_sp_port, bool ingress, struct tc_cls_flower_offload *f); +/* spectrum_qdisc.c */ +int mlxsw_sp_setup_tc_red(struct mlxsw_sp_port *mlxsw_sp_port, + struct tc_red_qopt_offload *p); + /* spectrum_fid.c */ int mlxsw_sp_fid_flood_set(struct mlxsw_sp_fid *fid, enum mlxsw_sp_flood_type packet_type, u8 local_port, diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c new file mode 100644 index 000000000000..c33e51a2b538 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c @@ -0,0 +1,174 @@ +/* + * drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c + * Copyright (c) 2017 Mellanox Technologies. All rights reserved. + * Copyright (c) 2017 Nogah Frankel + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the names of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include +#include +#include +#include + +#include "spectrum.h" +#include "reg.h" + +static int +mlxsw_sp_tclass_congestion_enable(struct mlxsw_sp_port *mlxsw_sp_port, + int tclass_num, u32 min, u32 max, + u32 probability, bool is_ecn) +{ + char cwtp_cmd[max_t(u8, MLXSW_REG_CWTP_LEN, MLXSW_REG_CWTPM_LEN)]; + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + int err; + + mlxsw_reg_cwtp_pack(cwtp_cmd, mlxsw_sp_port->local_port, tclass_num); + mlxsw_reg_cwtp_profile_pack(cwtp_cmd, MLXSW_REG_CWTP_DEFAULT_PROFILE, + roundup(min, MLXSW_REG_CWTP_MIN_VALUE), + roundup(max, MLXSW_REG_CWTP_MIN_VALUE), + probability); + + err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(cwtp), cwtp_cmd); + if (err) + return err; + + mlxsw_reg_cwtpm_pack(cwtp_cmd, mlxsw_sp_port->local_port, tclass_num, + MLXSW_REG_CWTP_DEFAULT_PROFILE, true, is_ecn); + + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(cwtpm), cwtp_cmd); +} + +static int +mlxsw_sp_tclass_congestion_disable(struct mlxsw_sp_port *mlxsw_sp_port, + int tclass_num) +{ + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + char cwtpm_cmd[MLXSW_REG_CWTPM_LEN]; + + mlxsw_reg_cwtpm_pack(cwtpm_cmd, mlxsw_sp_port->local_port, tclass_num, + MLXSW_REG_CWTPM_RESET_PROFILE, false, false); + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(cwtpm), cwtpm_cmd); +} + +static int +mlxsw_sp_qdisc_red_destroy(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle, + struct mlxsw_sp_qdisc *mlxsw_sp_qdisc, + int tclass_num) +{ + int err; + + if (mlxsw_sp_qdisc->handle != handle) + return 0; + + err = mlxsw_sp_tclass_congestion_disable(mlxsw_sp_port, tclass_num); + mlxsw_sp_qdisc->handle = TC_H_UNSPEC; + mlxsw_sp_qdisc->type = MLXSW_SP_QDISC_NO_QDISC; + + return err; +} + +static int +mlxsw_sp_qdisc_red_replace(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle, + struct mlxsw_sp_qdisc *mlxsw_sp_qdisc, + int tclass_num, + struct tc_red_qopt_offload_params *p) +{ + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + u32 min, max; + u64 prob; + int err = 0; + + if (p->min > p->max) { + dev_err(mlxsw_sp->bus_info->dev, + "spectrum: RED: min %u is bigger then max %u\n", p->min, + p->max); + goto err_bad_param; + } + if (p->max > MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_BUFFER_SIZE)) { + dev_err(mlxsw_sp->bus_info->dev, + "spectrum: RED: max value %u is too big\n", p->max); + goto err_bad_param; + } + if (p->min == 0 || p->max == 0) { + dev_err(mlxsw_sp->bus_info->dev, + "spectrum: RED: 0 value is illegal for min and max\n"); + goto err_bad_param; + } + + /* calculate probability in percentage */ + prob = p->probability; + prob *= 100; + prob = DIV_ROUND_UP(prob, 1 << 16); + prob = DIV_ROUND_UP(prob, 1 << 16); + min = mlxsw_sp_bytes_cells(mlxsw_sp, p->min); + max = mlxsw_sp_bytes_cells(mlxsw_sp, p->max); + err = mlxsw_sp_tclass_congestion_enable(mlxsw_sp_port, tclass_num, min, + max, prob, p->is_ecn); + if (err) + goto err_config; + + mlxsw_sp_qdisc->type = MLXSW_SP_QDISC_RED; + mlxsw_sp_qdisc->handle = handle; + return 0; + +err_bad_param: + err = -EINVAL; +err_config: + mlxsw_sp_qdisc_red_destroy(mlxsw_sp_port, mlxsw_sp_qdisc->handle, + mlxsw_sp_qdisc, tclass_num); + return err; +} + +#define MLXSW_SP_PORT_DEFAULT_TCLASS 0 + +int mlxsw_sp_setup_tc_red(struct mlxsw_sp_port *mlxsw_sp_port, + struct tc_red_qopt_offload *p) +{ + struct mlxsw_sp_qdisc *mlxsw_sp_qdisc; + int tclass_num; + + if (p->parent != TC_H_ROOT) + return -EOPNOTSUPP; + + mlxsw_sp_qdisc = &mlxsw_sp_port->root_qdisc; + tclass_num = MLXSW_SP_PORT_DEFAULT_TCLASS; + + switch (p->command) { + case TC_RED_REPLACE: + return mlxsw_sp_qdisc_red_replace(mlxsw_sp_port, p->handle, + mlxsw_sp_qdisc, tclass_num, + &p->set); + case TC_RED_DESTROY: + return mlxsw_sp_qdisc_red_destroy(mlxsw_sp_port, p->handle, + mlxsw_sp_qdisc, tclass_num); + default: + return -EOPNOTSUPP; + } +} -- cgit v1.2.3 From 0afc1221ffecbbe4a9fdd6b46697cc7c31ecf8aa Mon Sep 17 00:00:00 2001 From: Yuval Mintz Date: Mon, 6 Nov 2017 07:23:46 +0100 Subject: mlxsw: reg: Add ext and tc-cong counter groups This adds the counter group definitions for 2 new counter groups which are necessary for gaining ECN & wred counters. Signed-off-by: Yuval Mintz Signed-off-by: Nogah Frankel Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/reg.h | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h index db394ec2a4dc..6c4e08b8058a 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/reg.h +++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h @@ -3341,8 +3341,10 @@ MLXSW_ITEM32(reg, ppcnt, pnat, 0x00, 14, 2); enum mlxsw_reg_ppcnt_grp { MLXSW_REG_PPCNT_IEEE_8023_CNT = 0x0, + MLXSW_REG_PPCNT_EXT_CNT = 0x5, MLXSW_REG_PPCNT_PRIO_CNT = 0x10, MLXSW_REG_PPCNT_TC_CNT = 0x11, + MLXSW_REG_PPCNT_TC_CONG_TC = 0x13, }; /* reg_ppcnt_grp @@ -3358,6 +3360,7 @@ enum mlxsw_reg_ppcnt_grp { * 0x10: Per Priority Counters * 0x11: Per Traffic Class Counters * 0x12: Physical Layer Counters + * 0x13: Per Traffic Class Congestion Counters * Access: Index */ MLXSW_ITEM32(reg, ppcnt, grp, 0x00, 0, 6); @@ -3496,6 +3499,14 @@ MLXSW_ITEM64(reg, ppcnt, a_pause_mac_ctrl_frames_received, MLXSW_ITEM64(reg, ppcnt, a_pause_mac_ctrl_frames_transmitted, MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x90, 0, 64); +/* Ethernet Extended Counter Group Counters */ + +/* reg_ppcnt_ecn_marked + * Access: RO + */ +MLXSW_ITEM64(reg, ppcnt, ecn_marked, + MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x08, 0, 64); + /* Ethernet Per Priority Group Counters */ /* reg_ppcnt_rx_octets @@ -3571,6 +3582,14 @@ MLXSW_ITEM64(reg, ppcnt, tc_transmit_queue, MLXSW_ITEM64(reg, ppcnt, tc_no_buffer_discard_uc, MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x08, 0, 64); +/* Ethernet Per Traffic Class Congestion Group Counters */ + +/* reg_ppcnt_wred_discard + * Access: RO + */ +MLXSW_ITEM64(reg, ppcnt, wred_discard, + MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x00, 0, 64); + static inline void mlxsw_reg_ppcnt_pack(char *payload, u8 local_port, enum mlxsw_reg_ppcnt_grp grp, u8 prio_tc) -- cgit v1.2.3 From 075ab8adaf4e7443159bee6412cb85434c63ed15 Mon Sep 17 00:00:00 2001 From: Nogah Frankel Date: Mon, 6 Nov 2017 07:23:47 +0100 Subject: mlxsw: spectrum: Collect tclass related stats periodically Add more statistics to be collected from the HW periodically. These stats are tclass based (beside ECN marked packet, that exist only port based). They are needed to expose RED qdisc stats and xstats correctly. Signed-off-by: Nogah Frankel Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/spectrum.c | 34 ++++++++++++++++++++++++++ drivers/net/ethernet/mellanox/mlxsw/spectrum.h | 9 +++++++ 2 files changed, 43 insertions(+) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c index e42b3e7bd588..1497b436be78 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c @@ -1324,6 +1324,38 @@ out: return err; } +static void +mlxsw_sp_port_get_hw_xstats(struct net_device *dev, + struct mlxsw_sp_port_xstats *xstats) +{ + char ppcnt_pl[MLXSW_REG_PPCNT_LEN]; + int err, i; + + err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_EXT_CNT, 0, + ppcnt_pl); + if (!err) + xstats->ecn = mlxsw_reg_ppcnt_ecn_marked_get(ppcnt_pl); + + for (i = 0; i < TC_MAX_QUEUE; i++) { + err = mlxsw_sp_port_get_stats_raw(dev, + MLXSW_REG_PPCNT_TC_CONG_TC, + i, ppcnt_pl); + if (!err) + xstats->wred_drop[i] = + mlxsw_reg_ppcnt_wred_discard_get(ppcnt_pl); + + err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_TC_CNT, + i, ppcnt_pl); + if (err) + continue; + + xstats->backlog[i] = + mlxsw_reg_ppcnt_tc_transmit_queue_get(ppcnt_pl); + xstats->tail_drop[i] = + mlxsw_reg_ppcnt_tc_no_buffer_discard_uc_get(ppcnt_pl); + } +} + static void update_stats_cache(struct work_struct *work) { struct mlxsw_sp_port *mlxsw_sp_port = @@ -1335,6 +1367,8 @@ static void update_stats_cache(struct work_struct *work) mlxsw_sp_port_get_hw_stats(mlxsw_sp_port->dev, &mlxsw_sp_port->periodic_hw_stats.stats); + mlxsw_sp_port_get_hw_xstats(mlxsw_sp_port->dev, + &mlxsw_sp_port->periodic_hw_stats.xstats); out: mlxsw_core_schedule_dw(&mlxsw_sp_port->periodic_hw_stats.update_dw, diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h index 76ebd58b6248..e68299e6a963 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h @@ -213,6 +213,14 @@ struct mlxsw_sp_qdisc { enum mlxsw_sp_qdisc_type type; }; +/* No need an internal lock; At worse - miss a single periodic iteration */ +struct mlxsw_sp_port_xstats { + u64 ecn; + u64 wred_drop[TC_MAX_QUEUE]; + u64 tail_drop[TC_MAX_QUEUE]; + u64 backlog[TC_MAX_QUEUE]; +}; + struct mlxsw_sp_port { struct net_device *dev; struct mlxsw_sp_port_pcpu_stats __percpu *pcpu_stats; @@ -242,6 +250,7 @@ struct mlxsw_sp_port { struct { #define MLXSW_HW_STATS_UPDATE_TIME HZ struct rtnl_link_stats64 stats; + struct mlxsw_sp_port_xstats xstats; struct delayed_work update_dw; } periodic_hw_stats; struct mlxsw_sp_port_sample *sample; -- cgit v1.2.3 From 861fb8294d83ad950dfaa62b0bf8384c66e2cd5e Mon Sep 17 00:00:00 2001 From: Nogah Frankel Date: Mon, 6 Nov 2017 07:23:48 +0100 Subject: mlxsw: spectrum: Support RED xstats Add support for ndo_setup_tc with enum tc_setup_type value of TC_SETUP_RED_XSTATS. This call returns the RED qdisc xstats from the cache if the handle ID that is asked for matching the root qdisc ID and fails otherwise. Signed-off-by: Nogah Frankel Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/spectrum.h | 9 ++++ .../net/ethernet/mellanox/mlxsw/spectrum_qdisc.c | 51 ++++++++++++++++++++++ 2 files changed, 60 insertions(+) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h index e68299e6a963..a86a493788dd 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h @@ -48,6 +48,7 @@ #include #include #include +#include #include "port.h" #include "core.h" @@ -211,6 +212,14 @@ enum mlxsw_sp_qdisc_type { struct mlxsw_sp_qdisc { u32 handle; enum mlxsw_sp_qdisc_type type; + struct red_stats xstats_base; + union { + struct { + u64 tail_drop_base; + u64 ecn_base; + u64 wred_drop_base; + } red; + } xstats; }; /* No need an internal lock; At worse - miss a single periodic iteration */ diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c index c33e51a2b538..b97b30e08d3a 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c @@ -36,6 +36,7 @@ #include #include #include +#include #include "spectrum.h" #include "reg.h" @@ -77,6 +78,27 @@ mlxsw_sp_tclass_congestion_disable(struct mlxsw_sp_port *mlxsw_sp_port, return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(cwtpm), cwtpm_cmd); } +static void +mlxsw_sp_setup_tc_qdisc_clean_stats(struct mlxsw_sp_port *mlxsw_sp_port, + struct mlxsw_sp_qdisc *mlxsw_sp_qdisc, + int tclass_num) +{ + struct red_stats *xstats_base = &mlxsw_sp_qdisc->xstats_base; + struct mlxsw_sp_port_xstats *xstats; + + xstats = &mlxsw_sp_port->periodic_hw_stats.xstats; + + switch (mlxsw_sp_qdisc->type) { + case MLXSW_SP_QDISC_RED: + xstats_base->prob_mark = xstats->ecn; + xstats_base->prob_drop = xstats->wred_drop[tclass_num]; + xstats_base->pdrop = xstats->tail_drop[tclass_num]; + break; + default: + break; + } +} + static int mlxsw_sp_qdisc_red_destroy(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle, struct mlxsw_sp_qdisc *mlxsw_sp_qdisc, @@ -135,6 +157,11 @@ mlxsw_sp_qdisc_red_replace(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle, goto err_config; mlxsw_sp_qdisc->type = MLXSW_SP_QDISC_RED; + if (mlxsw_sp_qdisc->handle != handle) + mlxsw_sp_setup_tc_qdisc_clean_stats(mlxsw_sp_port, + mlxsw_sp_qdisc, + tclass_num); + mlxsw_sp_qdisc->handle = handle; return 0; @@ -146,6 +173,26 @@ err_config: return err; } +static int +mlxsw_sp_qdisc_get_red_xstats(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle, + struct mlxsw_sp_qdisc *mlxsw_sp_qdisc, + int tclass_num, struct red_stats *res) +{ + struct red_stats *xstats_base = &mlxsw_sp_qdisc->xstats_base; + struct mlxsw_sp_port_xstats *xstats; + + if (mlxsw_sp_qdisc->handle != handle || + mlxsw_sp_qdisc->type != MLXSW_SP_QDISC_RED) + return -EOPNOTSUPP; + + xstats = &mlxsw_sp_port->periodic_hw_stats.xstats; + + res->prob_drop = xstats->wred_drop[tclass_num] - xstats_base->prob_drop; + res->prob_mark = xstats->ecn - xstats_base->prob_mark; + res->pdrop = xstats->tail_drop[tclass_num] - xstats_base->pdrop; + return 0; +} + #define MLXSW_SP_PORT_DEFAULT_TCLASS 0 int mlxsw_sp_setup_tc_red(struct mlxsw_sp_port *mlxsw_sp_port, @@ -168,6 +215,10 @@ int mlxsw_sp_setup_tc_red(struct mlxsw_sp_port *mlxsw_sp_port, case TC_RED_DESTROY: return mlxsw_sp_qdisc_red_destroy(mlxsw_sp_port, p->handle, mlxsw_sp_qdisc, tclass_num); + case TC_RED_XSTATS: + return mlxsw_sp_qdisc_get_red_xstats(mlxsw_sp_port, p->handle, + mlxsw_sp_qdisc, tclass_num, + p->xstats); default: return -EOPNOTSUPP; } -- cgit v1.2.3 From 3670756fe6f370c0748b0c9227f3807fddf0e1ac Mon Sep 17 00:00:00 2001 From: Nogah Frankel Date: Mon, 6 Nov 2017 07:23:49 +0100 Subject: mlxsw: spectrum: Support general qdisc stats Add support for ndo_setup_tc with enum tc_setup_type value of TC_SETUP_QDISC_STATS. This call updates the generic qdisc stats from the cache if the handle ID that is asked for matching the root qdisc ID and fails otherwise. Currently doesn't support qlen and rqueues. Signed-off-by: Nogah Frankel Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/spectrum.h | 5 +++ .../net/ethernet/mellanox/mlxsw/spectrum_qdisc.c | 51 ++++++++++++++++++++++ 2 files changed, 56 insertions(+) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h index a86a493788dd..58cf222fb985 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h @@ -220,6 +220,11 @@ struct mlxsw_sp_qdisc { u64 wred_drop_base; } red; } xstats; + + u64 tx_bytes; + u64 tx_packets; + u64 drops; + u64 overlimits; }; /* No need an internal lock; At worse - miss a single periodic iteration */ diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c index b97b30e08d3a..c33beac5def0 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c @@ -85,14 +85,24 @@ mlxsw_sp_setup_tc_qdisc_clean_stats(struct mlxsw_sp_port *mlxsw_sp_port, { struct red_stats *xstats_base = &mlxsw_sp_qdisc->xstats_base; struct mlxsw_sp_port_xstats *xstats; + struct rtnl_link_stats64 *stats; xstats = &mlxsw_sp_port->periodic_hw_stats.xstats; + stats = &mlxsw_sp_port->periodic_hw_stats.stats; + + mlxsw_sp_qdisc->tx_packets = stats->tx_packets; + mlxsw_sp_qdisc->tx_bytes = stats->tx_bytes; switch (mlxsw_sp_qdisc->type) { case MLXSW_SP_QDISC_RED: xstats_base->prob_mark = xstats->ecn; xstats_base->prob_drop = xstats->wred_drop[tclass_num]; xstats_base->pdrop = xstats->tail_drop[tclass_num]; + + mlxsw_sp_qdisc->overlimits = xstats_base->prob_drop + + xstats_base->prob_mark; + mlxsw_sp_qdisc->drops = xstats_base->prob_drop + + xstats_base->pdrop; break; default: break; @@ -193,6 +203,43 @@ mlxsw_sp_qdisc_get_red_xstats(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle, return 0; } +static int +mlxsw_sp_qdisc_get_red_stats(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle, + struct mlxsw_sp_qdisc *mlxsw_sp_qdisc, + int tclass_num, + struct tc_red_qopt_offload_stats *res) +{ + u64 tx_bytes, tx_packets, overlimits, drops; + struct mlxsw_sp_port_xstats *xstats; + struct rtnl_link_stats64 *stats; + + if (mlxsw_sp_qdisc->handle != handle || + mlxsw_sp_qdisc->type != MLXSW_SP_QDISC_RED) + return -EOPNOTSUPP; + + xstats = &mlxsw_sp_port->periodic_hw_stats.xstats; + stats = &mlxsw_sp_port->periodic_hw_stats.stats; + + tx_bytes = stats->tx_bytes - mlxsw_sp_qdisc->tx_bytes; + tx_packets = stats->tx_packets - mlxsw_sp_qdisc->tx_packets; + overlimits = xstats->wred_drop[tclass_num] + xstats->ecn - + mlxsw_sp_qdisc->overlimits; + drops = xstats->wred_drop[tclass_num] + xstats->tail_drop[tclass_num] - + mlxsw_sp_qdisc->drops; + + _bstats_update(res->bstats, tx_bytes, tx_packets); + res->qstats->overlimits += overlimits; + res->qstats->drops += drops; + res->qstats->backlog += mlxsw_sp_cells_bytes(mlxsw_sp_port->mlxsw_sp, + xstats->backlog[tclass_num]); + + mlxsw_sp_qdisc->drops += drops; + mlxsw_sp_qdisc->overlimits += overlimits; + mlxsw_sp_qdisc->tx_bytes += tx_bytes; + mlxsw_sp_qdisc->tx_packets += tx_packets; + return 0; +} + #define MLXSW_SP_PORT_DEFAULT_TCLASS 0 int mlxsw_sp_setup_tc_red(struct mlxsw_sp_port *mlxsw_sp_port, @@ -219,6 +266,10 @@ int mlxsw_sp_setup_tc_red(struct mlxsw_sp_port *mlxsw_sp_port, return mlxsw_sp_qdisc_get_red_xstats(mlxsw_sp_port, p->handle, mlxsw_sp_qdisc, tclass_num, p->xstats); + case TC_RED_STATS: + return mlxsw_sp_qdisc_get_red_stats(mlxsw_sp_port, p->handle, + mlxsw_sp_qdisc, tclass_num, + &p->stats); default: return -EOPNOTSUPP; } -- cgit v1.2.3 From 29130853fe6dee04ad88d0586ff39182fa408a75 Mon Sep 17 00:00:00 2001 From: Wei Yongjun Date: Mon, 6 Nov 2017 11:12:08 +0000 Subject: dpaa_eth: fix error return code in dpaa_eth_probe() Fix to return a negative error code from the dpaa_bp_alloc() error handling case instead of 0, as done elsewhere in this function. Signed-off-by: Wei Yongjun Signed-off-by: David S. Miller --- drivers/net/ethernet/freescale/dpaa/dpaa_eth.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c index 784dbf5a3e12..7caa8da48421 100644 --- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c +++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c @@ -2728,11 +2728,11 @@ static int dpaa_eth_probe(struct platform_device *pdev) /* bp init */ for (i = 0; i < DPAA_BPS_NUM; i++) { - int err; - dpaa_bps[i] = dpaa_bp_alloc(dev); - if (IS_ERR(dpaa_bps[i])) + if (IS_ERR(dpaa_bps[i])) { + err = PTR_ERR(dpaa_bps[i]); goto free_dpaa_bps; + } /* the raw size of the buffers used for reception */ dpaa_bps[i]->raw_size = bpool_buffer_raw_size(i, DPAA_BPS_NUM); /* avoid runtime computations by keeping the usable size here */ -- cgit v1.2.3 From d86fd113ebbb37726ef7c7cc6fd6d5ce377455d6 Mon Sep 17 00:00:00 2001 From: Wei Yongjun Date: Mon, 6 Nov 2017 11:11:28 +0000 Subject: mlxsw: spectrum: Fix error return code in mlxsw_sp_port_create() Fix to return a negative error code from the VID create error handling case instead of 0, as done elsewhere in this function. Fixes: c57529e1d5d8 ("mlxsw: spectrum: Replace vPorts with Port-VLAN") Signed-off-by: Wei Yongjun Reviewed-by: Ido Schimmel Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/spectrum.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c index 1497b436be78..b2cd1ebf4e36 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c @@ -3043,6 +3043,7 @@ static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port, if (IS_ERR(mlxsw_sp_port_vlan)) { dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to create VID 1\n", mlxsw_sp_port->local_port); + err = PTR_ERR(mlxsw_sp_port_vlan); goto err_port_vlan_get; } -- cgit v1.2.3 From 42ca728b829b8fee8ac85adb79eaffd36f0b4e06 Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Mon, 6 Nov 2017 14:43:01 +0300 Subject: bnxt: delete some unreachable code We return on the previous line so this "return 0;" statement should just be deleted. Signed-off-by: Dan Carpenter Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c | 1 - 1 file changed, 1 deletion(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c index b6aa7db99705..69186d188c43 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c @@ -148,7 +148,6 @@ static int bnxt_vf_rep_setup_tc_block(struct net_device *dev, return tcf_block_cb_register(f->block, bnxt_vf_rep_setup_tc_block_cb, vf_rep, vf_rep); - return 0; case TC_BLOCK_UNBIND: tcf_block_cb_unregister(f->block, bnxt_vf_rep_setup_tc_block_cb, vf_rep); -- cgit v1.2.3 From 7dfaa7bc99498da1c6c4a48bee8d2d5265161a8c Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Mon, 6 Nov 2017 15:04:39 +0100 Subject: bnxt: fix bnxt_hwrm_fw_set_time for y2038 On 32-bit architectures, rtc_time_to_tm() returns incorrect results in 2038 or later, and do_gettimeofday() is broken for the same reason. This changes the code to use ktime_get_real_seconds() and time64_to_tm() instead, both of them are 2038-safe, and we can also get rid of the CONFIG_RTC_LIB dependency that way. Signed-off-by: Arnd Bergmann Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnxt/bnxt.c | 11 +++-------- 1 file changed, 3 insertions(+), 8 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index e5472e5ae7b2..33c49ad697e4 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -4915,16 +4915,14 @@ hwrm_ver_get_exit: int bnxt_hwrm_fw_set_time(struct bnxt *bp) { -#if IS_ENABLED(CONFIG_RTC_LIB) struct hwrm_fw_set_time_input req = {0}; - struct rtc_time tm; - struct timeval tv; + struct tm tm; + time64_t now = ktime_get_real_seconds(); if (bp->hwrm_spec_code < 0x10400) return -EOPNOTSUPP; - do_gettimeofday(&tv); - rtc_time_to_tm(tv.tv_sec, &tm); + time64_to_tm(now, 0, &tm); bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FW_SET_TIME, -1, -1); req.year = cpu_to_le16(1900 + tm.tm_year); req.month = 1 + tm.tm_mon; @@ -4933,9 +4931,6 @@ int bnxt_hwrm_fw_set_time(struct bnxt *bp) req.minute = tm.tm_min; req.second = tm.tm_sec; return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); -#else - return -EOPNOTSUPP; -#endif } static int bnxt_hwrm_port_qstats(struct bnxt *bp) -- cgit v1.2.3 From 5adb55c92918225005873aaac5e6af36789bf0ad Mon Sep 17 00:00:00 2001 From: Christophe JAILLET Date: Mon, 6 Nov 2017 22:53:29 +0100 Subject: fsl/fman: Remove a useless call to 'dev_set_drvdata()' Commit c6e26ea8c893 ("dpaa_eth: change device used") has removed usage of 'dev_set_drvdata()' in the 'mac_probe() function. This call should also be axed. Signed-off-by: Christophe JAILLET Signed-off-by: David S. Miller --- drivers/net/ethernet/freescale/fman/mac.c | 1 - 1 file changed, 1 deletion(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/freescale/fman/mac.c b/drivers/net/ethernet/freescale/fman/mac.c index 1d6da1ea7bfb..c27667a005f7 100644 --- a/drivers/net/ethernet/freescale/fman/mac.c +++ b/drivers/net/ethernet/freescale/fman/mac.c @@ -713,7 +713,6 @@ static int mac_probe(struct platform_device *_of_dev) __devm_release_region(dev, fman_get_mem_region(priv->fman), res.start, res.end + 1 - res.start); devm_kfree(dev, mac_dev); - dev_set_drvdata(dev, NULL); return -ENODEV; } -- cgit v1.2.3 From 336eac4347e74589f868e5da9ca0106953942aa8 Mon Sep 17 00:00:00 2001 From: Christophe JAILLET Date: Mon, 6 Nov 2017 22:53:30 +0100 Subject: fsl/fman: Remove some useless code There is no need to release explicitly some devm_ allocated resources. If the 'mac_probe()' probe function fails, they will be released automatically, as already done in the other error handling paths of this function. Also goto '_return_of_get_parent' as in the other error handling paths. This is useless (priv->fixed_link is NULL at this point), but at least it is consistent. Signed-off-by: Christophe JAILLET Signed-off-by: David S. Miller --- drivers/net/ethernet/freescale/fman/mac.c | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/freescale/fman/mac.c b/drivers/net/ethernet/freescale/fman/mac.c index c27667a005f7..ca12e28129ed 100644 --- a/drivers/net/ethernet/freescale/fman/mac.c +++ b/drivers/net/ethernet/freescale/fman/mac.c @@ -709,11 +709,8 @@ static int mac_probe(struct platform_device *_of_dev) } if (!of_device_is_available(mac_node)) { - devm_iounmap(dev, priv->vaddr); - __devm_release_region(dev, fman_get_mem_region(priv->fman), - res.start, res.end + 1 - res.start); - devm_kfree(dev, mac_dev); - return -ENODEV; + err = -ENODEV; + goto _return_of_get_parent; } /* Get the cell-index */ -- cgit v1.2.3 From 25850c31c8b5c2ce7fb922f5e80de7227ecf6be4 Mon Sep 17 00:00:00 2001 From: Christophe JAILLET Date: Mon, 6 Nov 2017 22:53:31 +0100 Subject: fsl/fman: Add a missing 'of_node_put()' call in an error handling path If 'of_phy_find_device()' fails, we must undo the previous 'of_node_get()' call, as done the the following error handling code. Signed-off-by: Christophe JAILLET Signed-off-by: David S. Miller --- drivers/net/ethernet/freescale/fman/mac.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/freescale/fman/mac.c b/drivers/net/ethernet/freescale/fman/mac.c index ca12e28129ed..86c1e69f44d6 100644 --- a/drivers/net/ethernet/freescale/fman/mac.c +++ b/drivers/net/ethernet/freescale/fman/mac.c @@ -821,6 +821,7 @@ static int mac_probe(struct platform_device *_of_dev) phy = of_phy_find_device(mac_dev->phy_node); if (!phy) { err = -EINVAL; + of_node_put(mac_dev->phy_node); goto _return_of_get_parent; } -- cgit v1.2.3 From e51f37bd3ae8e09d131bce5485ec7c5b03726b02 Mon Sep 17 00:00:00 2001 From: Christophe JAILLET Date: Mon, 6 Nov 2017 22:53:32 +0100 Subject: fsl/fman: Remove a useless 'dev_err()' call Memory allocation functions already display some informaton in case of memory allocation failure. There is no need to add an extra 'dev_err' here. Signed-off-by: Christophe JAILLET Signed-off-by: David S. Miller --- drivers/net/ethernet/freescale/fman/mac.c | 1 - 1 file changed, 1 deletion(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/freescale/fman/mac.c b/drivers/net/ethernet/freescale/fman/mac.c index 86c1e69f44d6..88c0a0636b44 100644 --- a/drivers/net/ethernet/freescale/fman/mac.c +++ b/drivers/net/ethernet/freescale/fman/mac.c @@ -615,7 +615,6 @@ static int mac_probe(struct platform_device *_of_dev) mac_dev = devm_kzalloc(dev, sizeof(*mac_dev), GFP_KERNEL); if (!mac_dev) { err = -ENOMEM; - dev_err(dev, "devm_kzalloc() = %d\n", err); goto _return; } priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); -- cgit v1.2.3 From 118d6298f6f0556e54331a6e86de2313d134fdbb Mon Sep 17 00:00:00 2001 From: Miquel Raynal Date: Mon, 6 Nov 2017 22:56:53 +0100 Subject: net: mvpp2: add ethtool GOP statistics Add ethtool statistics support by reading the GOP statistics from the hardware counters. Also implement a workqueue to gather the statistics every second or some 32-bit counters could overflow. Suggested-by: Stefan Chulski Signed-off-by: Miquel Raynal Reviewed-by: Andrew Lunn Signed-off-by: David S. Miller --- drivers/net/ethernet/marvell/mvpp2.c | 228 ++++++++++++++++++++++++++++++++++- 1 file changed, 223 insertions(+), 5 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/marvell/mvpp2.c b/drivers/net/ethernet/marvell/mvpp2.c index 965b6a829a5d..aa38bca597f2 100644 --- a/drivers/net/ethernet/marvell/mvpp2.c +++ b/drivers/net/ethernet/marvell/mvpp2.c @@ -799,6 +799,42 @@ enum mvpp2_bm_type { MVPP2_BM_SWF_SHORT }; +/* GMAC MIB Counters register definitions */ +#define MVPP21_MIB_COUNTERS_OFFSET 0x1000 +#define MVPP21_MIB_COUNTERS_PORT_SZ 0x400 +#define MVPP22_MIB_COUNTERS_OFFSET 0x0 +#define MVPP22_MIB_COUNTERS_PORT_SZ 0x100 + +#define MVPP2_MIB_GOOD_OCTETS_RCVD 0x0 +#define MVPP2_MIB_BAD_OCTETS_RCVD 0x8 +#define MVPP2_MIB_CRC_ERRORS_SENT 0xc +#define MVPP2_MIB_UNICAST_FRAMES_RCVD 0x10 +#define MVPP2_MIB_BROADCAST_FRAMES_RCVD 0x18 +#define MVPP2_MIB_MULTICAST_FRAMES_RCVD 0x1c +#define MVPP2_MIB_FRAMES_64_OCTETS 0x20 +#define MVPP2_MIB_FRAMES_65_TO_127_OCTETS 0x24 +#define MVPP2_MIB_FRAMES_128_TO_255_OCTETS 0x28 +#define MVPP2_MIB_FRAMES_256_TO_511_OCTETS 0x2c +#define MVPP2_MIB_FRAMES_512_TO_1023_OCTETS 0x30 +#define MVPP2_MIB_FRAMES_1024_TO_MAX_OCTETS 0x34 +#define MVPP2_MIB_GOOD_OCTETS_SENT 0x38 +#define MVPP2_MIB_UNICAST_FRAMES_SENT 0x40 +#define MVPP2_MIB_MULTICAST_FRAMES_SENT 0x48 +#define MVPP2_MIB_BROADCAST_FRAMES_SENT 0x4c +#define MVPP2_MIB_FC_SENT 0x54 +#define MVPP2_MIB_FC_RCVD 0x58 +#define MVPP2_MIB_RX_FIFO_OVERRUN 0x5c +#define MVPP2_MIB_UNDERSIZE_RCVD 0x60 +#define MVPP2_MIB_FRAGMENTS_RCVD 0x64 +#define MVPP2_MIB_OVERSIZE_RCVD 0x68 +#define MVPP2_MIB_JABBER_RCVD 0x6c +#define MVPP2_MIB_MAC_RCV_ERROR 0x70 +#define MVPP2_MIB_BAD_CRC_EVENT 0x74 +#define MVPP2_MIB_COLLISION 0x78 +#define MVPP2_MIB_LATE_COLLISION 0x7c + +#define MVPP2_MIB_COUNTERS_STATS_DELAY (1 * HZ) + /* Definitions */ /* Shared Packet Processor resources */ @@ -826,6 +862,7 @@ struct mvpp2 { struct clk *axi_clk; /* List of pointers to port structures */ + int port_count; struct mvpp2_port **port_list; /* Aggregated TXQs */ @@ -847,6 +884,12 @@ struct mvpp2 { /* Maximum number of RXQs per port */ unsigned int max_port_rxqs; + + /* Workqueue to gather hardware statistics with its lock */ + struct mutex gather_stats_lock; + struct delayed_work stats_work; + char queue_name[30]; + struct workqueue_struct *stats_queue; }; struct mvpp2_pcpu_stats { @@ -891,6 +934,7 @@ struct mvpp2_port { /* Per-port registers' base address */ void __iomem *base; + void __iomem *stats_base; struct mvpp2_rx_queue **rxqs; unsigned int nrxqs; @@ -909,6 +953,7 @@ struct mvpp2_port { u16 tx_ring_size; u16 rx_ring_size; struct mvpp2_pcpu_stats __percpu *stats; + u64 *ethtool_stats; phy_interface_t phy_interface; struct device_node *phy_node; @@ -4778,9 +4823,136 @@ static void mvpp2_port_loopback_set(struct mvpp2_port *port) writel(val, port->base + MVPP2_GMAC_CTRL_1_REG); } +struct mvpp2_ethtool_counter { + unsigned int offset; + const char string[ETH_GSTRING_LEN]; + bool reg_is_64b; +}; + +static u64 mvpp2_read_count(struct mvpp2_port *port, + const struct mvpp2_ethtool_counter *counter) +{ + u64 val; + + val = readl(port->stats_base + counter->offset); + if (counter->reg_is_64b) + val += (u64)readl(port->stats_base + counter->offset + 4) << 32; + + return val; +} + +/* Due to the fact that software statistics and hardware statistics are, by + * design, incremented at different moments in the chain of packet processing, + * it is very likely that incoming packets could have been dropped after being + * counted by hardware but before reaching software statistics (most probably + * multicast packets), and in the oppposite way, during transmission, FCS bytes + * are added in between as well as TSO skb will be split and header bytes added. + * Hence, statistics gathered from userspace with ifconfig (software) and + * ethtool (hardware) cannot be compared. + */ +static const struct mvpp2_ethtool_counter mvpp2_ethtool_regs[] = { + { MVPP2_MIB_GOOD_OCTETS_RCVD, "good_octets_received", true }, + { MVPP2_MIB_BAD_OCTETS_RCVD, "bad_octets_received" }, + { MVPP2_MIB_CRC_ERRORS_SENT, "crc_errors_sent" }, + { MVPP2_MIB_UNICAST_FRAMES_RCVD, "unicast_frames_received" }, + { MVPP2_MIB_BROADCAST_FRAMES_RCVD, "broadcast_frames_received" }, + { MVPP2_MIB_MULTICAST_FRAMES_RCVD, "multicast_frames_received" }, + { MVPP2_MIB_FRAMES_64_OCTETS, "frames_64_octets" }, + { MVPP2_MIB_FRAMES_65_TO_127_OCTETS, "frames_65_to_127_octet" }, + { MVPP2_MIB_FRAMES_128_TO_255_OCTETS, "frames_128_to_255_octet" }, + { MVPP2_MIB_FRAMES_256_TO_511_OCTETS, "frames_256_to_511_octet" }, + { MVPP2_MIB_FRAMES_512_TO_1023_OCTETS, "frames_512_to_1023_octet" }, + { MVPP2_MIB_FRAMES_1024_TO_MAX_OCTETS, "frames_1024_to_max_octet" }, + { MVPP2_MIB_GOOD_OCTETS_SENT, "good_octets_sent", true }, + { MVPP2_MIB_UNICAST_FRAMES_SENT, "unicast_frames_sent" }, + { MVPP2_MIB_MULTICAST_FRAMES_SENT, "multicast_frames_sent" }, + { MVPP2_MIB_BROADCAST_FRAMES_SENT, "broadcast_frames_sent" }, + { MVPP2_MIB_FC_SENT, "fc_sent" }, + { MVPP2_MIB_FC_RCVD, "fc_received" }, + { MVPP2_MIB_RX_FIFO_OVERRUN, "rx_fifo_overrun" }, + { MVPP2_MIB_UNDERSIZE_RCVD, "undersize_received" }, + { MVPP2_MIB_FRAGMENTS_RCVD, "fragments_received" }, + { MVPP2_MIB_OVERSIZE_RCVD, "oversize_received" }, + { MVPP2_MIB_JABBER_RCVD, "jabber_received" }, + { MVPP2_MIB_MAC_RCV_ERROR, "mac_receive_error" }, + { MVPP2_MIB_BAD_CRC_EVENT, "bad_crc_event" }, + { MVPP2_MIB_COLLISION, "collision" }, + { MVPP2_MIB_LATE_COLLISION, "late_collision" }, +}; + +static void mvpp2_ethtool_get_strings(struct net_device *netdev, u32 sset, + u8 *data) +{ + if (sset == ETH_SS_STATS) { + int i; + + for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_regs); i++) + memcpy(data + i * ETH_GSTRING_LEN, + &mvpp2_ethtool_regs[i].string, ETH_GSTRING_LEN); + } +} + +static void mvpp2_gather_hw_statistics(struct work_struct *work) +{ + struct delayed_work *del_work = to_delayed_work(work); + struct mvpp2 *priv = container_of(del_work, struct mvpp2, stats_work); + struct mvpp2_port *port; + u64 *pstats; + int i, j; + + mutex_lock(&priv->gather_stats_lock); + + for (i = 0; i < priv->port_count; i++) { + if (!priv->port_list[i]) + continue; + + port = priv->port_list[i]; + pstats = port->ethtool_stats; + for (j = 0; j < ARRAY_SIZE(mvpp2_ethtool_regs); j++) + *pstats++ += mvpp2_read_count(port, + &mvpp2_ethtool_regs[j]); + } + + /* No need to read again the counters right after this function if it + * was called asynchronously by the user (ie. use of ethtool). + */ + cancel_delayed_work(&priv->stats_work); + queue_delayed_work(priv->stats_queue, &priv->stats_work, + MVPP2_MIB_COUNTERS_STATS_DELAY); + + mutex_unlock(&priv->gather_stats_lock); +} + +static void mvpp2_ethtool_get_stats(struct net_device *dev, + struct ethtool_stats *stats, u64 *data) +{ + struct mvpp2_port *port = netdev_priv(dev); + + /* Update statistics for all ports, copy only those actually needed */ + mvpp2_gather_hw_statistics(&port->priv->stats_work.work); + + mutex_lock(&port->priv->gather_stats_lock); + memcpy(data, port->ethtool_stats, + sizeof(u64) * ARRAY_SIZE(mvpp2_ethtool_regs)); + mutex_unlock(&port->priv->gather_stats_lock); +} + +static int mvpp2_ethtool_get_sset_count(struct net_device *dev, int sset) +{ + if (sset == ETH_SS_STATS) + return ARRAY_SIZE(mvpp2_ethtool_regs); + + return -EOPNOTSUPP; +} + static void mvpp2_port_reset(struct mvpp2_port *port) { u32 val; + unsigned int i; + + /* Read the GOP statistics to reset the hardware counters */ + for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_regs); i++) + mvpp2_read_count(port, &mvpp2_ethtool_regs[i]); val = readl(port->base + MVPP2_GMAC_CTRL_2_REG) & ~MVPP2_GMAC_PORT_RESET_MASK; @@ -6912,6 +7084,10 @@ static int mvpp2_open(struct net_device *dev) if (priv->hw_version == MVPP22) mvpp22_init_rss(port); + /* Start hardware statistics gathering */ + queue_delayed_work(priv->stats_queue, &priv->stats_work, + MVPP2_MIB_COUNTERS_STATS_DELAY); + return 0; err_free_link_irq: @@ -6956,6 +7132,9 @@ static int mvpp2_stop(struct net_device *dev) mvpp2_cleanup_rxqs(port); mvpp2_cleanup_txqs(port); + cancel_delayed_work_sync(&priv->stats_work); + flush_workqueue(priv->stats_queue); + return 0; } @@ -7267,6 +7446,9 @@ static const struct ethtool_ops mvpp2_eth_tool_ops = { .get_drvinfo = mvpp2_ethtool_get_drvinfo, .get_ringparam = mvpp2_ethtool_get_ringparam, .set_ringparam = mvpp2_ethtool_set_ringparam, + .get_strings = mvpp2_ethtool_get_strings, + .get_ethtool_stats = mvpp2_ethtool_get_stats, + .get_sset_count = mvpp2_ethtool_get_sset_count, .get_link_ksettings = phy_ethtool_get_link_ksettings, .set_link_ksettings = phy_ethtool_set_link_ksettings, }; @@ -7670,6 +7852,10 @@ static int mvpp2_port_probe(struct platform_device *pdev, err = PTR_ERR(port->base); goto err_free_irq; } + + port->stats_base = port->priv->lms_base + + MVPP21_MIB_COUNTERS_OFFSET + + port->gop_id * MVPP21_MIB_COUNTERS_PORT_SZ; } else { if (of_property_read_u32(port_node, "gop-port-id", &port->gop_id)) { @@ -7679,15 +7865,26 @@ static int mvpp2_port_probe(struct platform_device *pdev, } port->base = priv->iface_base + MVPP22_GMAC_BASE(port->gop_id); + port->stats_base = port->priv->iface_base + + MVPP22_MIB_COUNTERS_OFFSET + + port->gop_id * MVPP22_MIB_COUNTERS_PORT_SZ; } - /* Alloc per-cpu stats */ + /* Alloc per-cpu and ethtool stats */ port->stats = netdev_alloc_pcpu_stats(struct mvpp2_pcpu_stats); if (!port->stats) { err = -ENOMEM; goto err_free_irq; } + port->ethtool_stats = devm_kcalloc(&pdev->dev, + ARRAY_SIZE(mvpp2_ethtool_regs), + sizeof(u64), GFP_KERNEL); + if (!port->ethtool_stats) { + err = -ENOMEM; + goto err_free_stats; + } + mvpp2_port_copy_mac_addr(dev, priv, port_node, &mac_from); port->tx_ring_size = MVPP2_MAX_TXD; @@ -8010,7 +8207,7 @@ static int mvpp2_probe(struct platform_device *pdev) struct mvpp2 *priv; struct resource *res; void __iomem *base; - int port_count, i; + int i; int err; priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); @@ -8125,14 +8322,14 @@ static int mvpp2_probe(struct platform_device *pdev) goto err_mg_clk; } - port_count = of_get_available_child_count(dn); - if (port_count == 0) { + priv->port_count = of_get_available_child_count(dn); + if (priv->port_count == 0) { dev_err(&pdev->dev, "no ports enabled\n"); err = -ENODEV; goto err_mg_clk; } - priv->port_list = devm_kcalloc(&pdev->dev, port_count, + priv->port_list = devm_kcalloc(&pdev->dev, priv->port_count, sizeof(*priv->port_list), GFP_KERNEL); if (!priv->port_list) { @@ -8149,6 +8346,24 @@ static int mvpp2_probe(struct platform_device *pdev) i++; } + /* Statistics must be gathered regularly because some of them (like + * packets counters) are 32-bit registers and could overflow quite + * quickly. For instance, a 10Gb link used at full bandwidth with the + * smallest packets (64B) will overflow a 32-bit counter in less than + * 30 seconds. Then, use a workqueue to fill 64-bit counters. + */ + mutex_init(&priv->gather_stats_lock); + snprintf(priv->queue_name, sizeof(priv->queue_name), + "stats-wq-%s%s", netdev_name(priv->port_list[0]->dev), + priv->port_count > 1 ? "+" : ""); + priv->stats_queue = create_singlethread_workqueue(priv->queue_name); + if (!priv->stats_queue) { + err = -ENOMEM; + goto err_mg_clk; + } + + INIT_DELAYED_WORK(&priv->stats_work, mvpp2_gather_hw_statistics); + platform_set_drvdata(pdev, priv); return 0; @@ -8170,6 +8385,9 @@ static int mvpp2_remove(struct platform_device *pdev) struct device_node *port_node; int i = 0; + destroy_workqueue(priv->stats_queue); + mutex_destroy(&priv->gather_stats_lock); + for_each_available_child_of_node(dn, port_node) { if (priv->port_list[i]) mvpp2_port_remove(priv->port_list[i]); -- cgit v1.2.3 From 2b52a283907b7f6d439444bfdde7f2241ddde735 Mon Sep 17 00:00:00 2001 From: Gal Pressman Date: Sun, 10 Sep 2017 17:51:10 +0300 Subject: net/mlx5e: Rename VLAN related variables and functions Rename VLAN related symbols to better reflect the fact that they are associated to C-tag VLAN. Signed-off-by: Gal Pressman Reviewed-by: Maor Gottlieb Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/en.h | 10 ++-- drivers/net/ethernet/mellanox/mlx5/core/en_fs.c | 56 +++++++++++------------ drivers/net/ethernet/mellanox/mlx5/core/en_main.c | 8 ++-- 3 files changed, 37 insertions(+), 37 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index 751f62cae969..eba5db75214b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -655,12 +655,12 @@ struct mlx5e_tc_table { struct mlx5e_vlan_table { struct mlx5e_flow_table ft; - unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; - struct mlx5_flow_handle *active_vlans_rule[VLAN_N_VID]; + unsigned long active_cvlans[BITS_TO_LONGS(VLAN_N_VID)]; + struct mlx5_flow_handle *active_cvlans_rule[VLAN_N_VID]; struct mlx5_flow_handle *untagged_rule; struct mlx5_flow_handle *any_cvlan_rule; struct mlx5_flow_handle *any_svlan_rule; - bool filter_disabled; + bool cvlan_filter_disabled; }; struct mlx5e_l2_table { @@ -887,8 +887,8 @@ int mlx5e_vlan_rx_add_vid(struct net_device *dev, __always_unused __be16 proto, u16 vid); int mlx5e_vlan_rx_kill_vid(struct net_device *dev, __always_unused __be16 proto, u16 vid); -void mlx5e_enable_vlan_filter(struct mlx5e_priv *priv); -void mlx5e_disable_vlan_filter(struct mlx5e_priv *priv); +void mlx5e_enable_cvlan_filter(struct mlx5e_priv *priv); +void mlx5e_disable_cvlan_filter(struct mlx5e_priv *priv); void mlx5e_timestamp_set(struct mlx5e_priv *priv); struct mlx5e_redirect_rqt_param { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c index f0d11ad05ed2..53901537778b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c @@ -118,7 +118,7 @@ static int mlx5e_vport_context_update_vlans(struct mlx5e_priv *priv) int i; list_size = 0; - for_each_set_bit(vlan, priv->fs.vlan.active_vlans, VLAN_N_VID) + for_each_set_bit(vlan, priv->fs.vlan.active_cvlans, VLAN_N_VID) list_size++; max_list_size = 1 << MLX5_CAP_GEN(priv->mdev, log_max_vlan_list); @@ -135,7 +135,7 @@ static int mlx5e_vport_context_update_vlans(struct mlx5e_priv *priv) return -ENOMEM; i = 0; - for_each_set_bit(vlan, priv->fs.vlan.active_vlans, VLAN_N_VID) { + for_each_set_bit(vlan, priv->fs.vlan.active_cvlans, VLAN_N_VID) { if (i >= list_size) break; vlans[i++] = vlan; @@ -154,7 +154,7 @@ enum mlx5e_vlan_rule_type { MLX5E_VLAN_RULE_TYPE_UNTAGGED, MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID, MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID, - MLX5E_VLAN_RULE_TYPE_MATCH_VID, + MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID, }; static int __mlx5e_add_vlan_rule(struct mlx5e_priv *priv, @@ -190,8 +190,8 @@ static int __mlx5e_add_vlan_rule(struct mlx5e_priv *priv, outer_headers.svlan_tag); MLX5_SET(fte_match_param, spec->match_value, outer_headers.svlan_tag, 1); break; - default: /* MLX5E_VLAN_RULE_TYPE_MATCH_VID */ - rule_p = &priv->fs.vlan.active_vlans_rule[vid]; + default: /* MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID */ + rule_p = &priv->fs.vlan.active_cvlans_rule[vid]; MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.cvlan_tag); MLX5_SET(fte_match_param, spec->match_value, outer_headers.cvlan_tag, 1); @@ -223,7 +223,7 @@ static int mlx5e_add_vlan_rule(struct mlx5e_priv *priv, if (!spec) return -ENOMEM; - if (rule_type == MLX5E_VLAN_RULE_TYPE_MATCH_VID) + if (rule_type == MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID) mlx5e_vport_context_update_vlans(priv); err = __mlx5e_add_vlan_rule(priv, rule_type, vid, spec); @@ -255,11 +255,11 @@ static void mlx5e_del_vlan_rule(struct mlx5e_priv *priv, priv->fs.vlan.any_svlan_rule = NULL; } break; - case MLX5E_VLAN_RULE_TYPE_MATCH_VID: + case MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID: mlx5e_vport_context_update_vlans(priv); - if (priv->fs.vlan.active_vlans_rule[vid]) { - mlx5_del_flow_rules(priv->fs.vlan.active_vlans_rule[vid]); - priv->fs.vlan.active_vlans_rule[vid] = NULL; + if (priv->fs.vlan.active_cvlans_rule[vid]) { + mlx5_del_flow_rules(priv->fs.vlan.active_cvlans_rule[vid]); + priv->fs.vlan.active_cvlans_rule[vid] = NULL; } mlx5e_vport_context_update_vlans(priv); break; @@ -283,23 +283,23 @@ static int mlx5e_add_any_vid_rules(struct mlx5e_priv *priv) return mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID, 0); } -void mlx5e_enable_vlan_filter(struct mlx5e_priv *priv) +void mlx5e_enable_cvlan_filter(struct mlx5e_priv *priv) { - if (!priv->fs.vlan.filter_disabled) + if (!priv->fs.vlan.cvlan_filter_disabled) return; - priv->fs.vlan.filter_disabled = false; + priv->fs.vlan.cvlan_filter_disabled = false; if (priv->netdev->flags & IFF_PROMISC) return; mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID, 0); } -void mlx5e_disable_vlan_filter(struct mlx5e_priv *priv) +void mlx5e_disable_cvlan_filter(struct mlx5e_priv *priv) { - if (priv->fs.vlan.filter_disabled) + if (priv->fs.vlan.cvlan_filter_disabled) return; - priv->fs.vlan.filter_disabled = true; + priv->fs.vlan.cvlan_filter_disabled = true; if (priv->netdev->flags & IFF_PROMISC) return; mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID, 0); @@ -310,9 +310,9 @@ int mlx5e_vlan_rx_add_vid(struct net_device *dev, __always_unused __be16 proto, { struct mlx5e_priv *priv = netdev_priv(dev); - set_bit(vid, priv->fs.vlan.active_vlans); + set_bit(vid, priv->fs.vlan.active_cvlans); - return mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID, vid); + return mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID, vid); } int mlx5e_vlan_rx_kill_vid(struct net_device *dev, __always_unused __be16 proto, @@ -320,9 +320,9 @@ int mlx5e_vlan_rx_kill_vid(struct net_device *dev, __always_unused __be16 proto, { struct mlx5e_priv *priv = netdev_priv(dev); - clear_bit(vid, priv->fs.vlan.active_vlans); + clear_bit(vid, priv->fs.vlan.active_cvlans); - mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID, vid); + mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID, vid); return 0; } @@ -333,11 +333,11 @@ static void mlx5e_add_vlan_rules(struct mlx5e_priv *priv) mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0); - for_each_set_bit(i, priv->fs.vlan.active_vlans, VLAN_N_VID) { - mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID, i); + for_each_set_bit(i, priv->fs.vlan.active_cvlans, VLAN_N_VID) { + mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID, i); } - if (priv->fs.vlan.filter_disabled && + if (priv->fs.vlan.cvlan_filter_disabled && !(priv->netdev->flags & IFF_PROMISC)) mlx5e_add_any_vid_rules(priv); } @@ -348,11 +348,11 @@ static void mlx5e_del_vlan_rules(struct mlx5e_priv *priv) mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0); - for_each_set_bit(i, priv->fs.vlan.active_vlans, VLAN_N_VID) { - mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID, i); + for_each_set_bit(i, priv->fs.vlan.active_cvlans, VLAN_N_VID) { + mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID, i); } - if (priv->fs.vlan.filter_disabled && + if (priv->fs.vlan.cvlan_filter_disabled && !(priv->netdev->flags & IFF_PROMISC)) mlx5e_del_any_vid_rules(priv); } @@ -546,7 +546,7 @@ void mlx5e_set_rx_mode_work(struct work_struct *work) if (enable_promisc) { mlx5e_add_l2_flow_rule(priv, &ea->promisc, MLX5E_PROMISC); - if (!priv->fs.vlan.filter_disabled) + if (!priv->fs.vlan.cvlan_filter_disabled) mlx5e_add_any_vid_rules(priv); } if (enable_allmulti) @@ -561,7 +561,7 @@ void mlx5e_set_rx_mode_work(struct work_struct *work) if (disable_allmulti) mlx5e_del_l2_flow_rule(priv, &ea->allmulti); if (disable_promisc) { - if (!priv->fs.vlan.filter_disabled) + if (!priv->fs.vlan.cvlan_filter_disabled) mlx5e_del_any_vid_rules(priv); mlx5e_del_l2_flow_rule(priv, &ea->promisc); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 5d5d2e50e4bf..c21aa54122dd 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -3260,14 +3260,14 @@ out: return err; } -static int set_feature_vlan_filter(struct net_device *netdev, bool enable) +static int set_feature_cvlan_filter(struct net_device *netdev, bool enable) { struct mlx5e_priv *priv = netdev_priv(netdev); if (enable) - mlx5e_enable_vlan_filter(priv); + mlx5e_enable_cvlan_filter(priv); else - mlx5e_disable_vlan_filter(priv); + mlx5e_disable_cvlan_filter(priv); return 0; } @@ -3378,7 +3378,7 @@ static int mlx5e_set_features(struct net_device *netdev, set_feature_lro); err |= mlx5e_handle_feature(netdev, features, NETIF_F_HW_VLAN_CTAG_FILTER, - set_feature_vlan_filter); + set_feature_cvlan_filter); err |= mlx5e_handle_feature(netdev, features, NETIF_F_HW_TC, set_feature_tc_num_filters); err |= mlx5e_handle_feature(netdev, features, NETIF_F_RXALL, -- cgit v1.2.3 From 355368d530460aa19b9d4291e2da2c6fd8929c76 Mon Sep 17 00:00:00 2001 From: Gal Pressman Date: Thu, 14 Sep 2017 16:24:19 +0300 Subject: net/mlx5e: Add rollback on add VLAN failure When add VLAN rule fails the active vlan bit should be cleared. Fixes: afb736e9330a ("net/mlx5: Ethernet resource handling files") Signed-off-by: Gal Pressman Reviewed-by: Maor Gottlieb Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/en_fs.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c index 53901537778b..d3d775a93183 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c @@ -309,10 +309,15 @@ int mlx5e_vlan_rx_add_vid(struct net_device *dev, __always_unused __be16 proto, u16 vid) { struct mlx5e_priv *priv = netdev_priv(dev); + int err; set_bit(vid, priv->fs.vlan.active_cvlans); - return mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID, vid); + err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID, vid); + if (err) + clear_bit(vid, priv->fs.vlan.active_cvlans); + + return err; } int mlx5e_vlan_rx_kill_vid(struct net_device *dev, __always_unused __be16 proto, -- cgit v1.2.3 From 03eda9541f351fb289fe18c87cb111f8bfca9837 Mon Sep 17 00:00:00 2001 From: Gal Pressman Date: Mon, 18 Sep 2017 13:09:18 +0300 Subject: net/mlx5e: Declare bitmap using kernel macro Replace explicit declaration of bitmap with DECLARE_BITMAP kernel macro. Signed-off-by: Gal Pressman Reviewed-by: Maor Gottlieb Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/en.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index eba5db75214b..f952796e9e82 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -655,7 +655,7 @@ struct mlx5e_tc_table { struct mlx5e_vlan_table { struct mlx5e_flow_table ft; - unsigned long active_cvlans[BITS_TO_LONGS(VLAN_N_VID)]; + DECLARE_BITMAP(active_cvlans, VLAN_N_VID); struct mlx5_flow_handle *active_cvlans_rule[VLAN_N_VID]; struct mlx5_flow_handle *untagged_rule; struct mlx5_flow_handle *any_cvlan_rule; -- cgit v1.2.3 From 7d92d580334a18800aaf66aaf2e103271c48bafb Mon Sep 17 00:00:00 2001 From: Gal Pressman Date: Sun, 10 Sep 2017 10:36:43 +0300 Subject: net/mlx5e: Add 802.1ad VLAN filter steering rules When a user chooses to use 802.1ad VLAN the proper steering rules will be added to the VLAN flow table (matching the specific S-tag VID). Due to current hardware limitation, when using 802.1ad, we must disable C-tag VLAN stripping on the RQs. Signed-off-by: Gal Pressman Reviewed-by: Maor Gottlieb Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/en.h | 2 + drivers/net/ethernet/mellanox/mlx5/core/en_fs.c | 102 +++++++++++++++++++--- drivers/net/ethernet/mellanox/mlx5/core/en_main.c | 21 +++++ 3 files changed, 112 insertions(+), 13 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index f952796e9e82..2a0739d07a08 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -656,7 +656,9 @@ struct mlx5e_tc_table { struct mlx5e_vlan_table { struct mlx5e_flow_table ft; DECLARE_BITMAP(active_cvlans, VLAN_N_VID); + DECLARE_BITMAP(active_svlans, VLAN_N_VID); struct mlx5_flow_handle *active_cvlans_rule[VLAN_N_VID]; + struct mlx5_flow_handle *active_svlans_rule[VLAN_N_VID]; struct mlx5_flow_handle *untagged_rule; struct mlx5_flow_handle *any_cvlan_rule; struct mlx5_flow_handle *any_svlan_rule; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c index d3d775a93183..f771be99329e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c @@ -155,6 +155,7 @@ enum mlx5e_vlan_rule_type { MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID, MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID, MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID, + MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID, }; static int __mlx5e_add_vlan_rule(struct mlx5e_priv *priv, @@ -174,6 +175,10 @@ static int __mlx5e_add_vlan_rule(struct mlx5e_priv *priv, switch (rule_type) { case MLX5E_VLAN_RULE_TYPE_UNTAGGED: + /* cvlan_tag enabled in match criteria and + * disabled in match value means both S & C tags + * don't exist (untagged of both) + */ rule_p = &priv->fs.vlan.untagged_rule; MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.cvlan_tag); @@ -190,6 +195,16 @@ static int __mlx5e_add_vlan_rule(struct mlx5e_priv *priv, outer_headers.svlan_tag); MLX5_SET(fte_match_param, spec->match_value, outer_headers.svlan_tag, 1); break; + case MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID: + rule_p = &priv->fs.vlan.active_svlans_rule[vid]; + MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, + outer_headers.svlan_tag); + MLX5_SET(fte_match_param, spec->match_value, outer_headers.svlan_tag, 1); + MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, + outer_headers.first_vid); + MLX5_SET(fte_match_param, spec->match_value, outer_headers.first_vid, + vid); + break; default: /* MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID */ rule_p = &priv->fs.vlan.active_cvlans_rule[vid]; MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, @@ -255,6 +270,12 @@ static void mlx5e_del_vlan_rule(struct mlx5e_priv *priv, priv->fs.vlan.any_svlan_rule = NULL; } break; + case MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID: + if (priv->fs.vlan.active_svlans_rule[vid]) { + mlx5_del_flow_rules(priv->fs.vlan.active_svlans_rule[vid]); + priv->fs.vlan.active_svlans_rule[vid] = NULL; + } + break; case MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID: mlx5e_vport_context_update_vlans(priv); if (priv->fs.vlan.active_cvlans_rule[vid]) { @@ -305,10 +326,8 @@ void mlx5e_disable_cvlan_filter(struct mlx5e_priv *priv) mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID, 0); } -int mlx5e_vlan_rx_add_vid(struct net_device *dev, __always_unused __be16 proto, - u16 vid) +static int mlx5e_vlan_rx_add_cvid(struct mlx5e_priv *priv, u16 vid) { - struct mlx5e_priv *priv = netdev_priv(dev); int err; set_bit(vid, priv->fs.vlan.active_cvlans); @@ -320,14 +339,48 @@ int mlx5e_vlan_rx_add_vid(struct net_device *dev, __always_unused __be16 proto, return err; } -int mlx5e_vlan_rx_kill_vid(struct net_device *dev, __always_unused __be16 proto, - u16 vid) +static int mlx5e_vlan_rx_add_svid(struct mlx5e_priv *priv, u16 vid) +{ + struct net_device *netdev = priv->netdev; + int err; + + set_bit(vid, priv->fs.vlan.active_svlans); + + err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID, vid); + if (err) { + clear_bit(vid, priv->fs.vlan.active_svlans); + return err; + } + + /* Need to fix some features.. */ + netdev_update_features(netdev); + return err; +} + +int mlx5e_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid) { struct mlx5e_priv *priv = netdev_priv(dev); - clear_bit(vid, priv->fs.vlan.active_cvlans); + if (be16_to_cpu(proto) == ETH_P_8021Q) + return mlx5e_vlan_rx_add_cvid(priv, vid); + else if (be16_to_cpu(proto) == ETH_P_8021AD) + return mlx5e_vlan_rx_add_svid(priv, vid); - mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID, vid); + return -EOPNOTSUPP; +} + +int mlx5e_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid) +{ + struct mlx5e_priv *priv = netdev_priv(dev); + + if (be16_to_cpu(proto) == ETH_P_8021Q) { + clear_bit(vid, priv->fs.vlan.active_cvlans); + mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID, vid); + } else if (be16_to_cpu(proto) == ETH_P_8021AD) { + clear_bit(vid, priv->fs.vlan.active_svlans); + mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID, vid); + netdev_update_features(dev); + } return 0; } @@ -342,6 +395,9 @@ static void mlx5e_add_vlan_rules(struct mlx5e_priv *priv) mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID, i); } + for_each_set_bit(i, priv->fs.vlan.active_svlans, VLAN_N_VID) + mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID, i); + if (priv->fs.vlan.cvlan_filter_disabled && !(priv->netdev->flags & IFF_PROMISC)) mlx5e_add_any_vid_rules(priv); @@ -357,6 +413,9 @@ static void mlx5e_del_vlan_rules(struct mlx5e_priv *priv) mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID, i); } + for_each_set_bit(i, priv->fs.vlan.active_svlans, VLAN_N_VID) + mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID, i); + if (priv->fs.vlan.cvlan_filter_disabled && !(priv->netdev->flags & IFF_PROMISC)) mlx5e_del_any_vid_rules(priv); @@ -550,6 +609,9 @@ void mlx5e_set_rx_mode_work(struct work_struct *work) bool disable_broadcast = ea->broadcast_enabled && !broadcast_enabled; if (enable_promisc) { + if (!priv->channels.params.vlan_strip_disable) + netdev_warn_once(ndev, + "S-tagged traffic will be dropped while C-tag vlan stripping is enabled\n"); mlx5e_add_l2_flow_rule(priv, &ea->promisc, MLX5E_PROMISC); if (!priv->fs.vlan.cvlan_filter_disabled) mlx5e_add_any_vid_rules(priv); @@ -1270,13 +1332,15 @@ err_destroy_flow_table: return err; } -#define MLX5E_NUM_VLAN_GROUPS 3 +#define MLX5E_NUM_VLAN_GROUPS 4 #define MLX5E_VLAN_GROUP0_SIZE BIT(12) -#define MLX5E_VLAN_GROUP1_SIZE BIT(1) -#define MLX5E_VLAN_GROUP2_SIZE BIT(0) +#define MLX5E_VLAN_GROUP1_SIZE BIT(12) +#define MLX5E_VLAN_GROUP2_SIZE BIT(1) +#define MLX5E_VLAN_GROUP3_SIZE BIT(0) #define MLX5E_VLAN_TABLE_SIZE (MLX5E_VLAN_GROUP0_SIZE +\ MLX5E_VLAN_GROUP1_SIZE +\ - MLX5E_VLAN_GROUP2_SIZE) + MLX5E_VLAN_GROUP2_SIZE +\ + MLX5E_VLAN_GROUP3_SIZE) static int __mlx5e_create_vlan_table_groups(struct mlx5e_flow_table *ft, u32 *in, int inlen) @@ -1299,7 +1363,8 @@ static int __mlx5e_create_vlan_table_groups(struct mlx5e_flow_table *ft, u32 *in memset(in, 0, inlen); MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS); - MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.cvlan_tag); + MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.svlan_tag); + MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.first_vid); MLX5_SET_CFG(in, start_flow_index, ix); ix += MLX5E_VLAN_GROUP1_SIZE; MLX5_SET_CFG(in, end_flow_index, ix - 1); @@ -1310,7 +1375,7 @@ static int __mlx5e_create_vlan_table_groups(struct mlx5e_flow_table *ft, u32 *in memset(in, 0, inlen); MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS); - MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.svlan_tag); + MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.cvlan_tag); MLX5_SET_CFG(in, start_flow_index, ix); ix += MLX5E_VLAN_GROUP2_SIZE; MLX5_SET_CFG(in, end_flow_index, ix - 1); @@ -1319,6 +1384,17 @@ static int __mlx5e_create_vlan_table_groups(struct mlx5e_flow_table *ft, u32 *in goto err_destroy_groups; ft->num_groups++; + memset(in, 0, inlen); + MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS); + MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.svlan_tag); + MLX5_SET_CFG(in, start_flow_index, ix); + ix += MLX5E_VLAN_GROUP3_SIZE; + MLX5_SET_CFG(in, end_flow_index, ix - 1); + ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in); + if (IS_ERR(ft->g[ft->num_groups])) + goto err_destroy_groups; + ft->num_groups++; + return 0; err_destroy_groups: diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index c21aa54122dd..59b8a2d62b8d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -3395,6 +3395,25 @@ static int mlx5e_set_features(struct net_device *netdev, return err ? -EINVAL : 0; } +static netdev_features_t mlx5e_fix_features(struct net_device *netdev, + netdev_features_t features) +{ + struct mlx5e_priv *priv = netdev_priv(netdev); + + mutex_lock(&priv->state_lock); + if (!bitmap_empty(priv->fs.vlan.active_svlans, VLAN_N_VID)) { + /* HW strips the outer C-tag header, this is a problem + * for S-tag traffic. + */ + features &= ~NETIF_F_HW_VLAN_CTAG_RX; + if (!priv->channels.params.vlan_strip_disable) + netdev_warn(netdev, "Dropping C-tag vlan stripping offload due to S-tag vlan\n"); + } + mutex_unlock(&priv->state_lock); + + return features; +} + static int mlx5e_change_mtu(struct net_device *netdev, int new_mtu) { struct mlx5e_priv *priv = netdev_priv(netdev); @@ -3872,6 +3891,7 @@ static const struct net_device_ops mlx5e_netdev_ops = { .ndo_vlan_rx_add_vid = mlx5e_vlan_rx_add_vid, .ndo_vlan_rx_kill_vid = mlx5e_vlan_rx_kill_vid, .ndo_set_features = mlx5e_set_features, + .ndo_fix_features = mlx5e_fix_features, .ndo_change_mtu = mlx5e_change_mtu, .ndo_do_ioctl = mlx5e_ioctl, .ndo_set_tx_maxrate = mlx5e_set_tx_maxrate, @@ -4231,6 +4251,7 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev) } netdev->features |= NETIF_F_HIGHDMA; + netdev->features |= NETIF_F_HW_VLAN_STAG_FILTER; netdev->priv_flags |= IFF_UNICAST_FLT; -- cgit v1.2.3 From 4382c7b92a1db397874ca62c73aa8b023af6dba8 Mon Sep 17 00:00:00 2001 From: Gal Pressman Date: Sun, 10 Sep 2017 13:22:51 +0300 Subject: net/mlx5e: Add 802.1ad VLAN insertion support Report VLAN insertion support for S-tagged packets and add support by choosing the correct VLAN type in the WQE. Signed-off-by: Gal Pressman Reviewed-by: Tariq Toukan Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/en_main.c | 1 + drivers/net/ethernet/mellanox/mlx5/core/en_tx.c | 2 ++ include/linux/mlx5/qp.h | 1 + 3 files changed, 4 insertions(+) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 59b8a2d62b8d..c408b7efa42e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -4194,6 +4194,7 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev) netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX; netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX; netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER; + netdev->hw_features |= NETIF_F_HW_VLAN_STAG_TX; if (mlx5e_vxlan_allowed(mdev) || MLX5_CAP_ETH(mdev, tunnel_stateless_gre)) { netdev->hw_features |= NETIF_F_GSO_PARTIAL; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c index de651de35c9b..c62305b214cc 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c @@ -369,6 +369,8 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb, ds_cnt += DIV_ROUND_UP(ihs - sizeof(eseg->inline_hdr.start), MLX5_SEND_WQE_DS); } else if (skb_vlan_tag_present(skb)) { eseg->insert.type = cpu_to_be16(MLX5_ETH_WQE_INSERT_VLAN); + if (skb->vlan_proto == cpu_to_be16(ETH_P_8021AD)) + eseg->insert.type |= cpu_to_be16(MLX5_ETH_WQE_SVLAN); eseg->insert.vlan_tci = cpu_to_be16(skb_vlan_tag_get(skb)); } diff --git a/include/linux/mlx5/qp.h b/include/linux/mlx5/qp.h index 66d19b611fe4..62af7512dabb 100644 --- a/include/linux/mlx5/qp.h +++ b/include/linux/mlx5/qp.h @@ -221,6 +221,7 @@ enum { }; enum { + MLX5_ETH_WQE_SVLAN = 1 << 0, MLX5_ETH_WQE_INSERT_VLAN = 1 << 15, }; -- cgit v1.2.3 From f24686e878914c260331b1067898a3925b598c6e Mon Sep 17 00:00:00 2001 From: Gal Pressman Date: Sun, 10 Sep 2017 13:49:59 +0300 Subject: net/mlx5e: Add VLAN offloads statistics The following counters are now exposed through ethtool -S: rx[i]_removed_vlan_packets (per channel) rx_removed_vlan_packets tx[i]_added_vlan_packets (per channel) tx_added_vlan_packets rx_removed_vlan_packets: The number of packets that had their outer VLAN header stripped to the CQE by the hardware. tx_added_vlan_packets: The number of packets that had their outer VLAN header inserted by the hardware. Signed-off-by: Gal Pressman Reviewed-by: Tariq Toukan Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/en_main.c | 2 ++ drivers/net/ethernet/mellanox/mlx5/core/en_rx.c | 4 +++- drivers/net/ethernet/mellanox/mlx5/core/en_stats.c | 4 ++++ drivers/net/ethernet/mellanox/mlx5/core/en_stats.h | 4 ++++ drivers/net/ethernet/mellanox/mlx5/core/en_tx.c | 2 ++ 5 files changed, 15 insertions(+), 1 deletion(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index c408b7efa42e..d2b057a3e512 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -196,6 +196,7 @@ static void mlx5e_update_sw_counters(struct mlx5e_priv *priv) s->rx_bytes += rq_stats->bytes; s->rx_lro_packets += rq_stats->lro_packets; s->rx_lro_bytes += rq_stats->lro_bytes; + s->rx_removed_vlan_packets += rq_stats->removed_vlan_packets; s->rx_csum_none += rq_stats->csum_none; s->rx_csum_complete += rq_stats->csum_complete; s->rx_csum_unnecessary += rq_stats->csum_unnecessary; @@ -224,6 +225,7 @@ static void mlx5e_update_sw_counters(struct mlx5e_priv *priv) s->tx_tso_bytes += sq_stats->tso_bytes; s->tx_tso_inner_packets += sq_stats->tso_inner_packets; s->tx_tso_inner_bytes += sq_stats->tso_inner_bytes; + s->tx_added_vlan_packets += sq_stats->added_vlan_packets; s->tx_queue_stopped += sq_stats->stopped; s->tx_queue_wake += sq_stats->wake; s->tx_queue_dropped += sq_stats->dropped; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c index 6d7df4750e0f..d2b1549056d2 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c @@ -685,9 +685,11 @@ static inline void mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe, if (likely(netdev->features & NETIF_F_RXHASH)) mlx5e_skb_set_hash(cqe, skb); - if (cqe_has_vlan(cqe)) + if (cqe_has_vlan(cqe)) { __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), be16_to_cpu(cqe->vlan_info)); + rq->stats.removed_vlan_packets++; + } skb->mark = be32_to_cpu(cqe->sop_drop_qpn) & MLX5E_TC_FLOW_ID_MASK; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c index 8bc30484ecc1..b74ddc7984bc 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c @@ -42,8 +42,10 @@ static const struct counter_desc sw_stats_desc[] = { { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tso_bytes) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tso_inner_packets) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tso_inner_bytes) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_added_vlan_packets) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_lro_packets) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_lro_bytes) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_removed_vlan_packets) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_unnecessary) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_none) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_complete) }, @@ -733,6 +735,7 @@ static const struct counter_desc rq_stats_desc[] = { { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, xdp_tx_full) }, { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, lro_packets) }, { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, lro_bytes) }, + { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, removed_vlan_packets) }, { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, wqe_err) }, { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, mpwqe_filler) }, { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, buff_alloc_err) }, @@ -755,6 +758,7 @@ static const struct counter_desc sq_stats_desc[] = { { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_inner_bytes) }, { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, csum_partial) }, { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, csum_partial_inner) }, + { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, added_vlan_packets) }, { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, nop) }, { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, csum_none) }, { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, stopped) }, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h index d094663edd9b..d679e21f686e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h @@ -59,8 +59,10 @@ struct mlx5e_sw_stats { u64 tx_tso_bytes; u64 tx_tso_inner_packets; u64 tx_tso_inner_bytes; + u64 tx_added_vlan_packets; u64 rx_lro_packets; u64 rx_lro_bytes; + u64 rx_removed_vlan_packets; u64 rx_csum_unnecessary; u64 rx_csum_none; u64 rx_csum_complete; @@ -153,6 +155,7 @@ struct mlx5e_rq_stats { u64 csum_none; u64 lro_packets; u64 lro_bytes; + u64 removed_vlan_packets; u64 xdp_drop; u64 xdp_tx; u64 xdp_tx_full; @@ -180,6 +183,7 @@ struct mlx5e_sq_stats { u64 tso_inner_bytes; u64 csum_partial; u64 csum_partial_inner; + u64 added_vlan_packets; u64 nop; /* less likely accessed in data path */ u64 csum_none; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c index c62305b214cc..569b42a01026 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c @@ -361,6 +361,7 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb, if (skb_vlan_tag_present(skb)) { mlx5e_insert_vlan(eseg->inline_hdr.start, skb, ihs, &skb_data, &skb_len); ihs += VLAN_HLEN; + sq->stats.added_vlan_packets++; } else { memcpy(eseg->inline_hdr.start, skb_data, ihs); mlx5e_tx_skb_pull_inline(&skb_data, &skb_len, ihs); @@ -372,6 +373,7 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb, if (skb->vlan_proto == cpu_to_be16(ETH_P_8021AD)) eseg->insert.type |= cpu_to_be16(MLX5_ETH_WQE_SVLAN); eseg->insert.vlan_tci = cpu_to_be16(skb_vlan_tag_get(skb)); + sq->stats.added_vlan_packets++; } headlen = skb_len - skb->data_len; -- cgit v1.2.3 From f938daeee95eb36ef6b431bf054a5cc6cdada112 Mon Sep 17 00:00:00 2001 From: Gal Pressman Date: Wed, 30 Aug 2017 15:12:45 +0300 Subject: net/mlx5e: CHECKSUM_COMPLETE offload for VLAN/QinQ packets When the VLAN tag is present in the packet buffer (i.e VLAN stripping disabled, QinQ) the driver will currently report CHECKSUM_UNNECESSARY. Instead of using CHECKSUM_COMPLETE offload for packets with first ethertype of IPv4/6, use it for packets with last ethertype of IPv4/6 to cover the former cases as well. The checksum field present in the CQE is calculated from the IP header until the end of the packet. When the first ethertype is different than IPv4/6 (for ex. 802.1Q VLAN) a checksum of the VLAN header/s should be added. The small header/s checksum calculation will allow us to use CHECKSUM_COMPLETE instead of CHECKSUM_UNNECESSARY. Testing bandwidth of one and 8 TCP streams to a single RQ, LRO and VLAN stripping offloads disabled: CPU: Intel(R) Xeon(R) CPU E5-2660 v2 @ 2.20GHz NIC: Mellanox Technologies MT27710 Family [ConnectX-4 Lx] Before: +--------------+--------------------+---------------------+----------------------+ | Traffic type | 1 Stream BW [Mbps] | 8 Streams BW [Mbps] | Checksum offload | +--------------+--------------------+---------------------+----------------------+ | Untagged | 28,247.35 | 24,716.88 | CHECKSUM_COMPLETE | | VLAN | 27,516.69 | 23,752.26 | CHECKSUM_UNNECESSARY | | QinQ | 6,961.30 | 20,667.04 | CHECKSUM_UNNECESSARY | +--------------+--------------------+---------------------+----------------------+ Now: +--------------+--------------------+---------------------+-------------------+ | Traffic type | 1 Stream BW [Mbps] | 8 Streams BW [Mbps] | Checksum offload | +--------------+--------------------+---------------------+-------------------+ | Untagged | 28,521.28 | 24,926.32 | CHECKSUM_COMPLETE | | VLAN | 27,389.37 | 23,715.34 | CHECKSUM_COMPLETE | | QinQ | 6,901.77 | 20,845.73 | CHECKSUM_COMPLETE | +--------------+--------------------+---------------------+-------------------+ No performance degradation observed. Signed-off-by: Gal Pressman Reviewed-by: Tariq Toukan Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/en_rx.c | 17 ++++++++++++++--- 1 file changed, 14 insertions(+), 3 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c index d2b1549056d2..a9d08f292fbe 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c @@ -563,7 +563,6 @@ static void mlx5e_lro_update_hdr(struct sk_buff *skb, struct mlx5_cqe64 *cqe, u8 tcp_ack = (l4_hdr_type == CQE_L4_HDR_TYPE_TCP_ACK_NO_DATA) || (l4_hdr_type == CQE_L4_HDR_TYPE_TCP_ACK_AND_DATA); - skb->mac_len = ETH_HLEN; proto = __vlan_get_protocol(skb, eth->h_proto, &network_depth); tot_len = cqe_bcnt - network_depth; @@ -610,10 +609,11 @@ static inline void mlx5e_skb_set_hash(struct mlx5_cqe64 *cqe, skb_set_hash(skb, be32_to_cpu(cqe->rss_hash_result), ht); } -static inline bool is_first_ethertype_ip(struct sk_buff *skb) +static inline bool is_last_ethertype_ip(struct sk_buff *skb, int *network_depth) { __be16 ethertype = ((struct ethhdr *)skb->data)->h_proto; + ethertype = __vlan_get_protocol(skb, ethertype, network_depth); return (ethertype == htons(ETH_P_IP) || ethertype == htons(ETH_P_IPV6)); } @@ -623,6 +623,8 @@ static inline void mlx5e_handle_csum(struct net_device *netdev, struct sk_buff *skb, bool lro) { + int network_depth = 0; + if (unlikely(!(netdev->features & NETIF_F_RXCSUM))) goto csum_none; @@ -632,9 +634,17 @@ static inline void mlx5e_handle_csum(struct net_device *netdev, return; } - if (is_first_ethertype_ip(skb)) { + if (is_last_ethertype_ip(skb, &network_depth)) { skb->ip_summed = CHECKSUM_COMPLETE; skb->csum = csum_unfold((__force __sum16)cqe->check_sum); + if (network_depth > ETH_HLEN) + /* CQE csum is calculated from the IP header and does + * not cover VLAN headers (if present). This will add + * the checksum manually. + */ + skb->csum = csum_partial(skb->data + ETH_HLEN, + network_depth - ETH_HLEN, + skb->csum); rq->stats.csum_complete++; return; } @@ -664,6 +674,7 @@ static inline void mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe, struct net_device *netdev = rq->netdev; int lro_num_seg; + skb->mac_len = ETH_HLEN; lro_num_seg = be32_to_cpu(cqe->srqn) >> 24; if (lro_num_seg > 1) { mlx5e_lro_update_hdr(skb, cqe, cqe_bcnt); -- cgit v1.2.3 From e4effc094c91706c5737530ef9ae6298e1d67512 Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Tue, 7 Nov 2017 15:47:24 +0000 Subject: net: vxge: remove redundant assignments and pointers There are several pointers that are being assigned but never read so remove these as they are redundant. Also remove an assignment to function_mode that is never read. Cleans up several clang warnings: vxge-main.c:1139:2: warning: Value stored to 'hldev' is never read vxge-main.c:1294:2: warning: Value stored to 'hldev' is never read vxge-main.c:2188:2: warning: Value stored to 'dev' is never read vxge-main.c:2188:2: warning: Value stored to 'dev' is never read vxge-main.c:2723:2: warning: Value stored to 'function_mode' is never read Signed-off-by: Colin Ian King Signed-off-by: David S. Miller --- drivers/net/ethernet/neterion/vxge/vxge-main.c | 9 +-------- 1 file changed, 1 insertion(+), 8 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/neterion/vxge/vxge-main.c b/drivers/net/ethernet/neterion/vxge/vxge-main.c index 5d5b9855e24e..426c9a946eb4 100644 --- a/drivers/net/ethernet/neterion/vxge/vxge-main.c +++ b/drivers/net/ethernet/neterion/vxge/vxge-main.c @@ -1122,7 +1122,6 @@ static void vxge_set_multicast(struct net_device *dev) struct netdev_hw_addr *ha; struct vxgedev *vdev; int i, mcast_cnt = 0; - struct __vxge_hw_device *hldev; struct vxge_vpath *vpath; enum vxge_hw_status status = VXGE_HW_OK; struct macInfo mac_info; @@ -1136,7 +1135,6 @@ static void vxge_set_multicast(struct net_device *dev) "%s:%d", __func__, __LINE__); vdev = netdev_priv(dev); - hldev = vdev->devh; if (unlikely(!is_vxge_card_up(vdev))) return; @@ -1283,7 +1281,6 @@ static int vxge_set_mac_addr(struct net_device *dev, void *p) { struct sockaddr *addr = p; struct vxgedev *vdev; - struct __vxge_hw_device *hldev; enum vxge_hw_status status = VXGE_HW_OK; struct macInfo mac_info_new, mac_info_old; int vpath_idx = 0; @@ -1291,7 +1288,6 @@ static int vxge_set_mac_addr(struct net_device *dev, void *p) vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__); vdev = netdev_priv(dev); - hldev = vdev->devh; if (!is_valid_ether_addr(addr->sa_data)) return -EINVAL; @@ -2177,7 +2173,6 @@ static void adaptive_coalesce_rx_interrupts(struct vxge_ring *ring) */ static irqreturn_t vxge_isr_napi(int irq, void *dev_id) { - struct net_device *dev; struct __vxge_hw_device *hldev; u64 reason; enum vxge_hw_status status; @@ -2185,7 +2180,6 @@ static irqreturn_t vxge_isr_napi(int irq, void *dev_id) vxge_debug_intr(VXGE_TRACE, "%s:%d", __func__, __LINE__); - dev = vdev->ndev; hldev = pci_get_drvdata(vdev->pdev); if (pci_channel_offline(vdev->pdev)) @@ -2713,14 +2707,13 @@ static int vxge_open(struct net_device *dev) struct vxge_vpath *vpath; int ret = 0; int i; - u64 val64, function_mode; + u64 val64; vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d", dev->name, __func__, __LINE__); vdev = netdev_priv(dev); hldev = pci_get_drvdata(vdev->pdev); - function_mode = vdev->config.device_hw_info.function_mode; /* make sure you have link off by default every time Nic is * initialized */ -- cgit v1.2.3 From cbad52e92ad7f01f0be4ca58bde59462dc1afe3a Mon Sep 17 00:00:00 2001 From: Robert Stonehouse Date: Tue, 7 Nov 2017 17:30:30 +0000 Subject: sfc: don't warn on successful change of MAC Fixes: 535a61777f44e ("sfc: suppress handled MCDI failures when changing the MAC address") Signed-off-by: Bert Kenward Signed-off-by: David S. Miller --- drivers/net/ethernet/sfc/ef10.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c index 19a91881fbf9..46d60013564c 100644 --- a/drivers/net/ethernet/sfc/ef10.c +++ b/drivers/net/ethernet/sfc/ef10.c @@ -5734,7 +5734,7 @@ static int efx_ef10_set_mac_address(struct efx_nic *efx) * MCFW do not support VFs. */ rc = efx_ef10_vport_set_mac_address(efx); - } else { + } else if (rc) { efx_mcdi_display_error(efx, MC_CMD_VADAPTOR_SET_MAC, sizeof(inbuf), NULL, 0, rc); } -- cgit v1.2.3 From bf068bdd3c1e29c516ef0dc5cfb3c2b95fd450d1 Mon Sep 17 00:00:00 2001 From: Manish Kurup Date: Tue, 7 Nov 2017 15:48:45 -0500 Subject: nfp flower action: Modified to use VLAN helper functions Modified netronome nfp flower action to use VLAN helper functions instead of accessing/referencing TC act_vlan private structures directly. Reviewed-by: Pieter Jansen van Vuuren Signed-off-by: Manish Kurup Signed-off-by: David S. Miller --- drivers/net/ethernet/netronome/nfp/flower/action.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/netronome/nfp/flower/action.c b/drivers/net/ethernet/netronome/nfp/flower/action.c index de64cedf8b26..c1c595f8bb87 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/action.c +++ b/drivers/net/ethernet/netronome/nfp/flower/action.c @@ -58,7 +58,6 @@ nfp_fl_push_vlan(struct nfp_fl_push_vlan *push_vlan, const struct tc_action *action) { size_t act_size = sizeof(struct nfp_fl_push_vlan); - struct tcf_vlan *vlan = to_vlan(action); u16 tmp_push_vlan_tci; push_vlan->head.jump_id = NFP_FL_ACTION_OPCODE_PUSH_VLAN; @@ -67,8 +66,8 @@ nfp_fl_push_vlan(struct nfp_fl_push_vlan *push_vlan, push_vlan->vlan_tpid = tcf_vlan_push_proto(action); tmp_push_vlan_tci = - FIELD_PREP(NFP_FL_PUSH_VLAN_PRIO, vlan->tcfv_push_prio) | - FIELD_PREP(NFP_FL_PUSH_VLAN_VID, vlan->tcfv_push_vid) | + FIELD_PREP(NFP_FL_PUSH_VLAN_PRIO, tcf_vlan_push_prio(action)) | + FIELD_PREP(NFP_FL_PUSH_VLAN_VID, tcf_vlan_push_vid(action)) | NFP_FL_PUSH_VLAN_CFI; push_vlan->vlan_tci = cpu_to_be16(tmp_push_vlan_tci); } -- cgit v1.2.3 From 39e2151f1012eb4163b3a9f414c27d92798e4cbe Mon Sep 17 00:00:00 2001 From: Fuyun Liang Date: Wed, 8 Nov 2017 15:52:22 +0800 Subject: net: hns3: fix a bug when getting phy address from NCL_config file Driver gets phy address from NCL_config file and uses the phy address to initialize phydev. There are 5 bits for phy address. And C22 phy address has 5 bits. So 0-31 are all valid address for phy. If there is no phy, it will crash. Because driver always get a valid phy address. This patch fixes the phy address to 8 bits, and use 0xff to indicate invalid phy address. Fixes: 46a3df9f9718 (net: hns3: Add HNS3 Acceleration Engine & Compatibility Layer Support) Signed-off-by: Fuyun Liang Signed-off-by: Lipeng Signed-off-by: David S. Miller --- drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h index 844c83ea549e..ce5ed8845042 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h @@ -390,7 +390,7 @@ struct hclge_pf_res_cmd { #define HCLGE_CFG_TQP_DESC_N_S 16 #define HCLGE_CFG_TQP_DESC_N_M GENMASK(31, 16) #define HCLGE_CFG_PHY_ADDR_S 0 -#define HCLGE_CFG_PHY_ADDR_M GENMASK(4, 0) +#define HCLGE_CFG_PHY_ADDR_M GENMASK(7, 0) #define HCLGE_CFG_MEDIA_TP_S 8 #define HCLGE_CFG_MEDIA_TP_M GENMASK(15, 8) #define HCLGE_CFG_RX_BUF_LEN_S 16 -- cgit v1.2.3 From c040366bc4a58f719e61111dea4b550b71b2a0b4 Mon Sep 17 00:00:00 2001 From: Fuyun Liang Date: Wed, 8 Nov 2017 15:52:23 +0800 Subject: net: hns3: cleanup mac auto-negotiation state query in hclge_update_speed_duplex When checking whether auto-negotiation is on, driver only needs to check the value of mac.autoneg(SW) directly, and does not need to query it from hardware. Because this value is always synchronized with the auto-negotiation state of hardware. This patch removes mac auto-negotiation state query in hclge_update_speed_duplex(). Fixes: 46a3df9f9718 (net: hns3: Add HNS3 Acceleration Engine & Compatibility Layer Support) Signed-off-by: Fuyun Liang Signed-off-by: Lipeng Signed-off-by: David S. Miller --- drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c | 13 +------------ 1 file changed, 1 insertion(+), 12 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c index c6ba89089ef3..781d5a8cbb6a 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c @@ -2325,18 +2325,7 @@ static int hclge_update_speed_duplex(struct hclge_dev *hdev) /* get the speed and duplex as autoneg'result from mac cmd when phy * doesn't exit. */ - if (mac.phydev) - return 0; - - /* update mac->antoneg. */ - ret = hclge_query_autoneg_result(hdev); - if (ret) { - dev_err(&hdev->pdev->dev, - "autoneg result query failed %d\n", ret); - return ret; - } - - if (!mac.autoneg) + if (mac.phydev || !mac.autoneg) return 0; ret = hclge_query_mac_an_speed_dup(hdev, &speed, &duplex); -- cgit v1.2.3 From e5c500eb298a9f5ef9b80d16fcea9662c89467b7 Mon Sep 17 00:00:00 2001 From: Miquel Raynal Date: Wed, 8 Nov 2017 08:59:40 +0100 Subject: net: mvpp2: fix GOP statistics loop start and stop conditions GOP statistics from all ports of one instance of the driver are gathered with one work recalled in loop in a workqueue. The loop is started when a port is up, and stopped when a port is down. This last condition is obviously wrong. Fix this by having a work per port. This way, starting and stoping it when the port is up or down will be fine, while minimizing unnecessary CPU usage. Fixes: 118d6298f6f0 ("net: mvpp2: add ethtool GOP statistics") Reported-by: Stefan Chulski Signed-off-by: Miquel Raynal Signed-off-by: David S. Miller --- drivers/net/ethernet/marvell/mvpp2.c | 62 +++++++++++++++++------------------- 1 file changed, 30 insertions(+), 32 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/marvell/mvpp2.c b/drivers/net/ethernet/marvell/mvpp2.c index a79d2ff4f86e..6c20e811f973 100644 --- a/drivers/net/ethernet/marvell/mvpp2.c +++ b/drivers/net/ethernet/marvell/mvpp2.c @@ -885,9 +885,7 @@ struct mvpp2 { /* Maximum number of RXQs per port */ unsigned int max_port_rxqs; - /* Workqueue to gather hardware statistics with its lock */ - struct mutex gather_stats_lock; - struct delayed_work stats_work; + /* Workqueue to gather hardware statistics */ char queue_name[30]; struct workqueue_struct *stats_queue; }; @@ -955,6 +953,10 @@ struct mvpp2_port { struct mvpp2_pcpu_stats __percpu *stats; u64 *ethtool_stats; + /* Per-port work and its lock to gather hardware statistics */ + struct mutex gather_stats_lock; + struct delayed_work stats_work; + phy_interface_t phy_interface; struct device_node *phy_node; struct phy *comphy; @@ -4895,32 +4897,25 @@ static void mvpp2_ethtool_get_strings(struct net_device *netdev, u32 sset, static void mvpp2_gather_hw_statistics(struct work_struct *work) { struct delayed_work *del_work = to_delayed_work(work); - struct mvpp2 *priv = container_of(del_work, struct mvpp2, stats_work); - struct mvpp2_port *port; + struct mvpp2_port *port = container_of(del_work, struct mvpp2_port, + stats_work); u64 *pstats; - int i, j; - - mutex_lock(&priv->gather_stats_lock); + int i; - for (i = 0; i < priv->port_count; i++) { - if (!priv->port_list[i]) - continue; + mutex_lock(&port->gather_stats_lock); - port = priv->port_list[i]; - pstats = port->ethtool_stats; - for (j = 0; j < ARRAY_SIZE(mvpp2_ethtool_regs); j++) - *pstats++ += mvpp2_read_count(port, - &mvpp2_ethtool_regs[j]); - } + pstats = port->ethtool_stats; + for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_regs); i++) + *pstats++ += mvpp2_read_count(port, &mvpp2_ethtool_regs[i]); /* No need to read again the counters right after this function if it * was called asynchronously by the user (ie. use of ethtool). */ - cancel_delayed_work(&priv->stats_work); - queue_delayed_work(priv->stats_queue, &priv->stats_work, + cancel_delayed_work(&port->stats_work); + queue_delayed_work(port->priv->stats_queue, &port->stats_work, MVPP2_MIB_COUNTERS_STATS_DELAY); - mutex_unlock(&priv->gather_stats_lock); + mutex_unlock(&port->gather_stats_lock); } static void mvpp2_ethtool_get_stats(struct net_device *dev, @@ -4928,13 +4923,15 @@ static void mvpp2_ethtool_get_stats(struct net_device *dev, { struct mvpp2_port *port = netdev_priv(dev); - /* Update statistics for all ports, copy only those actually needed */ - mvpp2_gather_hw_statistics(&port->priv->stats_work.work); + /* Update statistics for the given port, then take the lock to avoid + * concurrent accesses on the ethtool_stats structure during its copy. + */ + mvpp2_gather_hw_statistics(&port->stats_work.work); - mutex_lock(&port->priv->gather_stats_lock); + mutex_lock(&port->gather_stats_lock); memcpy(data, port->ethtool_stats, sizeof(u64) * ARRAY_SIZE(mvpp2_ethtool_regs)); - mutex_unlock(&port->priv->gather_stats_lock); + mutex_unlock(&port->gather_stats_lock); } static int mvpp2_ethtool_get_sset_count(struct net_device *dev, int sset) @@ -7089,7 +7086,7 @@ static int mvpp2_open(struct net_device *dev) mvpp22_init_rss(port); /* Start hardware statistics gathering */ - queue_delayed_work(priv->stats_queue, &priv->stats_work, + queue_delayed_work(priv->stats_queue, &port->stats_work, MVPP2_MIB_COUNTERS_STATS_DELAY); return 0; @@ -7136,8 +7133,7 @@ static int mvpp2_stop(struct net_device *dev) mvpp2_cleanup_rxqs(port); mvpp2_cleanup_txqs(port); - cancel_delayed_work_sync(&priv->stats_work); - flush_workqueue(priv->stats_queue); + cancel_delayed_work_sync(&port->stats_work); return 0; } @@ -7889,6 +7885,9 @@ static int mvpp2_port_probe(struct platform_device *pdev, goto err_free_stats; } + mutex_init(&port->gather_stats_lock); + INIT_DELAYED_WORK(&port->stats_work, mvpp2_gather_hw_statistics); + mvpp2_port_copy_mac_addr(dev, priv, port_node, &mac_from); port->tx_ring_size = MVPP2_MAX_TXD; @@ -8356,7 +8355,6 @@ static int mvpp2_probe(struct platform_device *pdev) * smallest packets (64B) will overflow a 32-bit counter in less than * 30 seconds. Then, use a workqueue to fill 64-bit counters. */ - mutex_init(&priv->gather_stats_lock); snprintf(priv->queue_name, sizeof(priv->queue_name), "stats-wq-%s%s", netdev_name(priv->port_list[0]->dev), priv->port_count > 1 ? "+" : ""); @@ -8366,8 +8364,6 @@ static int mvpp2_probe(struct platform_device *pdev) goto err_mg_clk; } - INIT_DELAYED_WORK(&priv->stats_work, mvpp2_gather_hw_statistics); - platform_set_drvdata(pdev, priv); return 0; @@ -8389,12 +8385,14 @@ static int mvpp2_remove(struct platform_device *pdev) struct device_node *port_node; int i = 0; + flush_workqueue(priv->stats_queue); destroy_workqueue(priv->stats_queue); - mutex_destroy(&priv->gather_stats_lock); for_each_available_child_of_node(dn, port_node) { - if (priv->port_list[i]) + if (priv->port_list[i]) { + mutex_destroy(&priv->port_list[i]->gather_stats_lock); mvpp2_port_remove(priv->port_list[i]); + } i++; } -- cgit v1.2.3 From 9ce981401cce7852542cab267702b6e89f37a4b8 Mon Sep 17 00:00:00 2001 From: Michael Grzeschik Date: Wed, 8 Nov 2017 09:56:34 +0100 Subject: net: macb: add of_phy_deregister_fixed_link to error paths We add the call of_phy_deregister_fixed_link to all associated error paths for memory clean up. Signed-off-by: Michael Grzeschik Acked-by: Nicolas Ferre Signed-off-by: David S. Miller --- drivers/net/ethernet/cadence/macb_main.c | 7 +++++++ 1 file changed, 7 insertions(+) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c index 5dafcde67e45..cc3f36a5c6e1 100644 --- a/drivers/net/ethernet/cadence/macb_main.c +++ b/drivers/net/ethernet/cadence/macb_main.c @@ -611,6 +611,8 @@ static int macb_mii_init(struct macb *bp) err_out_unregister_bus: mdiobus_unregister(bp->mii_bus); err_out_free_mdiobus: + if (np && of_phy_is_fixed_link(np)) + of_phy_deregister_fixed_link(np); mdiobus_free(bp->mii_bus); err_out: return err; @@ -3550,6 +3552,8 @@ static int macb_probe(struct platform_device *pdev) err_out_unregister_mdio: phy_disconnect(dev->phydev); mdiobus_unregister(bp->mii_bus); + if (np && of_phy_is_fixed_link(np)) + of_phy_deregister_fixed_link(np); mdiobus_free(bp->mii_bus); /* Shutdown the PHY if there is a GPIO reset */ @@ -3572,6 +3576,7 @@ static int macb_remove(struct platform_device *pdev) { struct net_device *dev; struct macb *bp; + struct device_node *np = pdev->dev.of_node; dev = platform_get_drvdata(pdev); @@ -3580,6 +3585,8 @@ static int macb_remove(struct platform_device *pdev) if (dev->phydev) phy_disconnect(dev->phydev); mdiobus_unregister(bp->mii_bus); + if (np && of_phy_is_fixed_link(np)) + of_phy_deregister_fixed_link(np); dev->phydev = NULL; mdiobus_free(bp->mii_bus); -- cgit v1.2.3 From 66ee6a06e620740b706b8dbde161492f6a405b26 Mon Sep 17 00:00:00 2001 From: Michael Grzeschik Date: Wed, 8 Nov 2017 09:56:35 +0100 Subject: net: macb: add of_node_put to error paths We add the call of_node_put(bp->phy_node) to all associated error paths for memory clean up. Signed-off-by: Michael Grzeschik Acked-by: Nicolas Ferre Signed-off-by: David S. Miller --- drivers/net/ethernet/cadence/macb_main.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c index cc3f36a5c6e1..72a67f74b97b 100644 --- a/drivers/net/ethernet/cadence/macb_main.c +++ b/drivers/net/ethernet/cadence/macb_main.c @@ -611,6 +611,7 @@ static int macb_mii_init(struct macb *bp) err_out_unregister_bus: mdiobus_unregister(bp->mii_bus); err_out_free_mdiobus: + of_node_put(bp->phy_node); if (np && of_phy_is_fixed_link(np)) of_phy_deregister_fixed_link(np); mdiobus_free(bp->mii_bus); @@ -3552,6 +3553,7 @@ static int macb_probe(struct platform_device *pdev) err_out_unregister_mdio: phy_disconnect(dev->phydev); mdiobus_unregister(bp->mii_bus); + of_node_put(bp->phy_node); if (np && of_phy_is_fixed_link(np)) of_phy_deregister_fixed_link(np); mdiobus_free(bp->mii_bus); -- cgit v1.2.3 From 37798d0211315d60d92452eb54b22af199cce11d Mon Sep 17 00:00:00 2001 From: Nathan Fontenot Date: Wed, 8 Nov 2017 11:23:56 -0600 Subject: ibmvnic: Add vnic client data to login buffer Update the login buffer to include client data for the vnic driver, this includes the OS name, LPAR name, and device name. This update allows this information to be available in the VIOS. Signed-off-by: Nathan Fontenot Signed-off-by: David S. Miller --- drivers/net/ethernet/ibm/ibmvnic.c | 68 ++++++++++++++++++++++++++++++++++++-- drivers/net/ethernet/ibm/ibmvnic.h | 2 ++ 2 files changed, 68 insertions(+), 2 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c index d0cff2807d0b..b918bc2f2e4f 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.c +++ b/drivers/net/ethernet/ibm/ibmvnic.c @@ -75,6 +75,7 @@ #include #include #include +#include #include "ibmvnic.h" @@ -2813,6 +2814,55 @@ static int send_version_xchg(struct ibmvnic_adapter *adapter) return ibmvnic_send_crq(adapter, &crq); } +struct vnic_login_client_data { + u8 type; + __be16 len; + char name; +} __packed; + +static int vnic_client_data_len(struct ibmvnic_adapter *adapter) +{ + int len; + + /* Calculate the amount of buffer space needed for the + * vnic client data in the login buffer. There are four entries, + * OS name, LPAR name, device name, and a null last entry. + */ + len = 4 * sizeof(struct vnic_login_client_data); + len += 6; /* "Linux" plus NULL */ + len += strlen(utsname()->nodename) + 1; + len += strlen(adapter->netdev->name) + 1; + + return len; +} + +static void vnic_add_client_data(struct ibmvnic_adapter *adapter, + struct vnic_login_client_data *vlcd) +{ + const char *os_name = "Linux"; + int len; + + /* Type 1 - LPAR OS */ + vlcd->type = 1; + len = strlen(os_name) + 1; + vlcd->len = cpu_to_be16(len); + strncpy(&vlcd->name, os_name, len); + vlcd = (struct vnic_login_client_data *)((char *)&vlcd->name + len); + + /* Type 2 - LPAR name */ + vlcd->type = 2; + len = strlen(utsname()->nodename) + 1; + vlcd->len = cpu_to_be16(len); + strncpy(&vlcd->name, utsname()->nodename, len); + vlcd = (struct vnic_login_client_data *)((char *)&vlcd->name + len); + + /* Type 3 - device name */ + vlcd->type = 3; + len = strlen(adapter->netdev->name) + 1; + vlcd->len = cpu_to_be16(len); + strncpy(&vlcd->name, adapter->netdev->name, len); +} + static void send_login(struct ibmvnic_adapter *adapter) { struct ibmvnic_login_rsp_buffer *login_rsp_buffer; @@ -2825,13 +2875,18 @@ static void send_login(struct ibmvnic_adapter *adapter) size_t buffer_size; __be64 *tx_list_p; __be64 *rx_list_p; + int client_data_len; + struct vnic_login_client_data *vlcd; int i; + client_data_len = vnic_client_data_len(adapter); + buffer_size = sizeof(struct ibmvnic_login_buffer) + - sizeof(u64) * (adapter->req_tx_queues + adapter->req_rx_queues); + sizeof(u64) * (adapter->req_tx_queues + adapter->req_rx_queues) + + client_data_len; - login_buffer = kmalloc(buffer_size, GFP_ATOMIC); + login_buffer = kzalloc(buffer_size, GFP_ATOMIC); if (!login_buffer) goto buf_alloc_failed; @@ -2898,6 +2953,15 @@ static void send_login(struct ibmvnic_adapter *adapter) } } + /* Insert vNIC login client data */ + vlcd = (struct vnic_login_client_data *) + ((char *)rx_list_p + (sizeof(u64) * adapter->req_rx_queues)); + login_buffer->client_data_offset = + cpu_to_be32((char *)vlcd - (char *)login_buffer); + login_buffer->client_data_len = cpu_to_be32(client_data_len); + + vnic_add_client_data(adapter, vlcd); + netdev_dbg(adapter->netdev, "Login Buffer:\n"); for (i = 0; i < (adapter->login_buf_sz - 1) / 8 + 1; i++) { netdev_dbg(adapter->netdev, "%016lx\n", diff --git a/drivers/net/ethernet/ibm/ibmvnic.h b/drivers/net/ethernet/ibm/ibmvnic.h index 4670af80d612..8ed829c5b026 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.h +++ b/drivers/net/ethernet/ibm/ibmvnic.h @@ -57,6 +57,8 @@ struct ibmvnic_login_buffer { __be32 off_rxcomp_subcrqs; __be32 login_rsp_ioba; __be32 login_rsp_len; + __be32 client_data_offset; + __be32 client_data_len; } __packed __aligned(8); struct ibmvnic_login_rsp_buffer { -- cgit v1.2.3 From d9b9c0e027c93160c94dae07b6d29acc5cdd6d54 Mon Sep 17 00:00:00 2001 From: "Gustavo A. R. Silva" Date: Wed, 8 Nov 2017 11:24:57 -0600 Subject: net: ethernet: bgmac: mark expected switch fall-through In preparation to enabling -Wimplicit-fallthrough, mark switch cases where we are expecting to fall through. Addresses-Coverity-ID: 1397972 Signed-off-by: Gustavo A. R. Silva Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bgmac-platform.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/broadcom/bgmac-platform.c b/drivers/net/ethernet/broadcom/bgmac-platform.c index d937083db9a4..894eda5b13cf 100644 --- a/drivers/net/ethernet/broadcom/bgmac-platform.c +++ b/drivers/net/ethernet/broadcom/bgmac-platform.c @@ -131,6 +131,7 @@ static void bgmac_nicpm_speed_set(struct net_device *net_dev) switch (bgmac->net_dev->phydev->speed) { default: netdev_err(net_dev, "Unsupported speed. Defaulting to 1000Mb\n"); + /* fall through */ case SPEED_1000: val |= NICPM_IOMUX_CTRL_SPD_1000M << NICPM_IOMUX_CTRL_SPD_SHIFT; break; -- cgit v1.2.3 From e4ec1384132ead18e972f1180e958aa0b69abd11 Mon Sep 17 00:00:00 2001 From: "Gustavo A. R. Silva" Date: Wed, 8 Nov 2017 11:57:13 -0600 Subject: fsl/fman_port: mark expected switch fall-throughs In preparation to enabling -Wimplicit-fallthrough, mark switch cases where we are expecting to fall through. Addresses-Coverity-ID: 1397960 Signed-off-by: Gustavo A. R. Silva Signed-off-by: David S. Miller --- drivers/net/ethernet/freescale/fman/fman_port.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/freescale/fman/fman_port.c b/drivers/net/ethernet/freescale/fman/fman_port.c index 1789b206be58..6552d68ea6e1 100644 --- a/drivers/net/ethernet/freescale/fman/fman_port.c +++ b/drivers/net/ethernet/freescale/fman/fman_port.c @@ -1339,8 +1339,10 @@ int fman_port_config(struct fman_port *port, struct fman_port_params *params) switch (port->port_type) { case FMAN_PORT_TYPE_RX: set_rx_dflt_cfg(port, params); + /* fall through */ case FMAN_PORT_TYPE_TX: set_tx_dflt_cfg(port, params, &port->dts_params); + /* fall through */ default: set_dflt_cfg(port, params); } -- cgit v1.2.3 From 75d28f461ed700fe527312cc0a8b5de86d1c09c1 Mon Sep 17 00:00:00 2001 From: "Gustavo A. R. Silva" Date: Wed, 8 Nov 2017 21:44:38 -0600 Subject: net: 8390: pcnet_cs: mark expected switch fall-through In preparation to enabling -Wimplicit-fallthrough, mark switch cases where we are expecting to fall through. Addresses-Coverity-ID: 114891 Signed-off-by: Gustavo A. R. Silva Signed-off-by: David S. Miller --- drivers/net/ethernet/8390/pcnet_cs.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/8390/pcnet_cs.c b/drivers/net/ethernet/8390/pcnet_cs.c index eae9827035dc..bcad4a7fac9f 100644 --- a/drivers/net/ethernet/8390/pcnet_cs.c +++ b/drivers/net/ethernet/8390/pcnet_cs.c @@ -1107,6 +1107,7 @@ static int ei_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) switch (cmd) { case SIOCGMIIPHY: data->phy_id = info->phy_id; + /* fall through */ case SIOCGMIIREG: /* Read MII PHY register. */ data->val_out = mdio_read(mii_addr, data->phy_id, data->reg_num & 0x1f); return 0; -- cgit v1.2.3 From 0aa3b413f68dfe4c883c8164d152a1a03ce789bf Mon Sep 17 00:00:00 2001 From: "Gustavo A. R. Silva" Date: Wed, 8 Nov 2017 21:49:33 -0600 Subject: net: 3com: 3c574_cs: mark expected switch fall-through In preparation to enabling -Wimplicit-fallthrough, mark switch cases where we are expecting to fall through. Addresses-Coverity-ID: 114888 Signed-off-by: Gustavo A. R. Silva Signed-off-by: David S. Miller --- drivers/net/ethernet/3com/3c574_cs.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/3com/3c574_cs.c b/drivers/net/ethernet/3com/3c574_cs.c index 48bc7fa0258c..3044a6f35f04 100644 --- a/drivers/net/ethernet/3com/3c574_cs.c +++ b/drivers/net/ethernet/3com/3c574_cs.c @@ -1046,6 +1046,7 @@ static int el3_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) switch(cmd) { case SIOCGMIIPHY: /* Get the address of the PHY in use. */ data->phy_id = phy; + /* fall through */ case SIOCGMIIREG: /* Read the specified MII register. */ { int saved_window; -- cgit v1.2.3 From 98b07e3ed019cbea5ad049df3892957d5fa90b9e Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Thu, 9 Nov 2017 07:52:15 +0000 Subject: qlge: remove duplicated assignment to mbcp The assignment to mbcp is identical to the initiatialized value assigned to mbcp at declaration time a few lines earlier, hence we can remove the second redundant assignment. Cleans up clang warning: drivers/net/ethernet/qlogic/qlge/qlge_mpi.c:209:22: warning: Value stored to 'mbcp' during its initialization is never read Signed-off-by: Colin Ian King Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qlge/qlge_mpi.c | 1 - 1 file changed, 1 deletion(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_mpi.c b/drivers/net/ethernet/qlogic/qlge/qlge_mpi.c index 384c8bc874f3..4be65d6761b3 100644 --- a/drivers/net/ethernet/qlogic/qlge/qlge_mpi.c +++ b/drivers/net/ethernet/qlogic/qlge/qlge_mpi.c @@ -213,7 +213,6 @@ static int ql_idc_req_aen(struct ql_adapter *qdev) /* Get the status data and start up a thread to * handle the request. */ - mbcp = &qdev->idc_mbc; mbcp->out_count = 4; status = ql_get_mb_sts(qdev, mbcp); if (status) { -- cgit v1.2.3 From 492d070f2495d4b200124ed44a35ab3d8f74ac93 Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Thu, 9 Nov 2017 08:01:22 +0000 Subject: net: sfc: remove redundant variable start Variable start is assigned but never read hence it is redundant and can be removed. Cleans up clang warning: drivers/net/ethernet/sfc/ptp.c:655:2: warning: Value stored to 'start' is never read Signed-off-by: Colin Ian King Acked-by: Bert Kenward Signed-off-by: David S. Miller --- drivers/net/ethernet/sfc/ptp.c | 2 -- 1 file changed, 2 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/sfc/ptp.c b/drivers/net/ethernet/sfc/ptp.c index 60cdb97f58e2..4f54245df0ec 100644 --- a/drivers/net/ethernet/sfc/ptp.c +++ b/drivers/net/ethernet/sfc/ptp.c @@ -648,11 +648,9 @@ static void efx_ptp_send_times(struct efx_nic *efx, struct pps_event_time now; struct timespec64 limit; struct efx_ptp_data *ptp = efx->ptp_data; - struct timespec64 start; int *mc_running = ptp->start.addr; pps_get_ts(&now); - start = now.ts_real; limit = now.ts_real; timespec64_add_ns(&limit, SYNCHRONISE_PERIOD_NS); -- cgit v1.2.3 From 3d67a5075295982fb055be6a5d5c78b0e0be3591 Mon Sep 17 00:00:00 2001 From: Aleksey Makarov Date: Thu, 9 Nov 2017 14:58:57 +0300 Subject: net: thunderx: fix double free error This patch fixes an error in memory allocation/freeing in ThunderX PF driver. I moved the allocation to the probe() function and made it managed. >From the Colin's email: While running static analysis on linux-next with CoverityScan I found 3 double free errors in the Cavium thunder driver. The issue occurs on the err_disable_device: label of function nic_probe when nic_free_lmacmem(nic) is called and a double free occurs on nic->duplex, nic->link and nic->speed. This occurs when nic_init_hw() fails: /* Initialize hardware */ err = nic_init_hw(nic); if (err) goto err_release_regions; nic_init_hw() calls nic_get_hw_info() and this calls nic_free_lmacmem() if any of the allocations fail. This free'ing occurs again by the call to nic_free_lmacmem() on the err_release_regions exit path in nic_probe(). Reported-by: Colin Ian King Signed-off-by: Aleksey Makarov Signed-off-by: David S. Miller --- drivers/net/ethernet/cavium/thunder/nic_main.c | 82 ++++++++++---------------- 1 file changed, 30 insertions(+), 52 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/cavium/thunder/nic_main.c b/drivers/net/ethernet/cavium/thunder/nic_main.c index 988c06a28e5e..8f1dd55b3e08 100644 --- a/drivers/net/ethernet/cavium/thunder/nic_main.c +++ b/drivers/net/ethernet/cavium/thunder/nic_main.c @@ -361,17 +361,8 @@ static void nic_set_lmac_vf_mapping(struct nicpf *nic) } } -static void nic_free_lmacmem(struct nicpf *nic) +static void nic_get_hw_info(struct nicpf *nic) { - kfree(nic->vf_lmac_map); - kfree(nic->link); - kfree(nic->duplex); - kfree(nic->speed); -} - -static int nic_get_hw_info(struct nicpf *nic) -{ - u8 max_lmac; u16 sdevid; struct hw_info *hw = nic->hw; @@ -419,41 +410,16 @@ static int nic_get_hw_info(struct nicpf *nic) break; } hw->tl4_cnt = MAX_QUEUES_PER_QSET * pci_sriov_get_totalvfs(nic->pdev); - - /* Allocate memory for LMAC tracking elements */ - max_lmac = hw->bgx_cnt * MAX_LMAC_PER_BGX; - nic->vf_lmac_map = kmalloc_array(max_lmac, sizeof(u8), GFP_KERNEL); - if (!nic->vf_lmac_map) - goto error; - nic->link = kmalloc_array(max_lmac, sizeof(u8), GFP_KERNEL); - if (!nic->link) - goto error; - nic->duplex = kmalloc_array(max_lmac, sizeof(u8), GFP_KERNEL); - if (!nic->duplex) - goto error; - nic->speed = kmalloc_array(max_lmac, sizeof(u32), GFP_KERNEL); - if (!nic->speed) - goto error; - return 0; - -error: - nic_free_lmacmem(nic); - return -ENOMEM; } #define BGX0_BLOCK 8 #define BGX1_BLOCK 9 -static int nic_init_hw(struct nicpf *nic) +static void nic_init_hw(struct nicpf *nic) { - int i, err; + int i; u64 cqm_cfg; - /* Get HW capability info */ - err = nic_get_hw_info(nic); - if (err) - return err; - /* Enable NIC HW block */ nic_reg_write(nic, NIC_PF_CFG, 0x3); @@ -498,8 +464,6 @@ static int nic_init_hw(struct nicpf *nic) cqm_cfg = nic_reg_read(nic, NIC_PF_CQM_CFG); if (cqm_cfg < NICPF_CQM_MIN_DROP_LEVEL) nic_reg_write(nic, NIC_PF_CQM_CFG, NICPF_CQM_MIN_DROP_LEVEL); - - return 0; } /* Channel parse index configuration */ @@ -1269,6 +1233,7 @@ static int nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { struct device *dev = &pdev->dev; struct nicpf *nic; + u8 max_lmac; int err; BUILD_BUG_ON(sizeof(union nic_mbx) > 16); @@ -1278,10 +1243,8 @@ static int nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) return -ENOMEM; nic->hw = devm_kzalloc(dev, sizeof(struct hw_info), GFP_KERNEL); - if (!nic->hw) { - devm_kfree(dev, nic); + if (!nic->hw) return -ENOMEM; - } pci_set_drvdata(pdev, nic); @@ -1322,11 +1285,33 @@ static int nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) nic->node = nic_get_node_id(pdev); - /* Initialize hardware */ - err = nic_init_hw(nic); - if (err) + /* Get HW capability info */ + nic_get_hw_info(nic); + + /* Allocate memory for LMAC tracking elements */ + err = -ENOMEM; + max_lmac = nic->hw->bgx_cnt * MAX_LMAC_PER_BGX; + + nic->vf_lmac_map = devm_kmalloc_array(dev, max_lmac, sizeof(u8), + GFP_KERNEL); + if (!nic->vf_lmac_map) + goto err_release_regions; + + nic->link = devm_kmalloc_array(dev, max_lmac, sizeof(u8), GFP_KERNEL); + if (!nic->link) goto err_release_regions; + nic->duplex = devm_kmalloc_array(dev, max_lmac, sizeof(u8), GFP_KERNEL); + if (!nic->duplex) + goto err_release_regions; + + nic->speed = devm_kmalloc_array(dev, max_lmac, sizeof(u32), GFP_KERNEL); + if (!nic->speed) + goto err_release_regions; + + /* Initialize hardware */ + nic_init_hw(nic); + nic_set_lmac_vf_mapping(nic); /* Register interrupts */ @@ -1360,9 +1345,6 @@ err_unregister_interrupts: err_release_regions: pci_release_regions(pdev); err_disable_device: - nic_free_lmacmem(nic); - devm_kfree(dev, nic->hw); - devm_kfree(dev, nic); pci_disable_device(pdev); pci_set_drvdata(pdev, NULL); return err; @@ -1384,10 +1366,6 @@ static void nic_remove(struct pci_dev *pdev) nic_unregister_interrupts(nic); pci_release_regions(pdev); - nic_free_lmacmem(nic); - devm_kfree(&pdev->dev, nic->hw); - devm_kfree(&pdev->dev, nic); - pci_disable_device(pdev); pci_set_drvdata(pdev, NULL); } -- cgit v1.2.3 From 940c9c458866725e0ade96d5c1c7dbe5fcf1cf85 Mon Sep 17 00:00:00 2001 From: Rahul Lakkireddy Date: Fri, 10 Nov 2017 13:03:37 +0530 Subject: cxgb4: collect vpd info directly from hardware Collect vpd information directly from hardware instead of software adapter context. Move EEPROM physical address to virtual address translation logic to t4_hw.c and update relevant files. Fixes: 6f92a6544f1a ("cxgb4: collect hardware misc dumps") Signed-off-by: Rahul Lakkireddy Signed-off-by: Ganesh Goudar Signed-off-by: David S. Miller --- drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h | 6 ++ drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c | 77 ++++++++++++++++++---- drivers/net/ethernet/chelsio/cxgb4/cxgb4.h | 1 + drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c | 33 +--------- drivers/net/ethernet/chelsio/cxgb4/t4_hw.c | 29 ++++++++ 5 files changed, 104 insertions(+), 42 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h b/drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h index 239c43084e77..1de1d811fde3 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h @@ -166,6 +166,12 @@ struct cudbg_mps_tcam { u8 reserved[2]; }; +#define CUDBG_VPD_PF_SIZE 0x800 +#define CUDBG_SCFG_VER_ADDR 0x06 +#define CUDBG_SCFG_VER_LEN 4 +#define CUDBG_VPD_VER_ADDR 0x18c7 +#define CUDBG_VPD_VER_LEN 2 + struct cudbg_vpd_data { u8 sn[SERNUM_LEN + 1]; u8 bn[PN_LEN + 1]; diff --git a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c index fe3a9ef0ec3f..32c9858da110 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c @@ -68,6 +68,22 @@ struct cudbg_entity_hdr *cudbg_get_entity_hdr(void *outbuf, int i) (sizeof(struct cudbg_entity_hdr) * (i - 1))); } +static int cudbg_read_vpd_reg(struct adapter *padap, u32 addr, u32 len, + void *dest) +{ + int vaddr, rc; + + vaddr = t4_eeprom_ptov(addr, padap->pf, EEPROMPFSIZE); + if (vaddr < 0) + return vaddr; + + rc = pci_read_vpd(padap->pdev, vaddr, len, dest); + if (rc < 0) + return rc; + + return 0; +} + int cudbg_collect_reg_dump(struct cudbg_init *pdbg_init, struct cudbg_buffer *dbg_buff, struct cudbg_error *cudbg_err) @@ -1289,8 +1305,47 @@ int cudbg_collect_vpd_data(struct cudbg_init *pdbg_init, { struct adapter *padap = pdbg_init->adap; struct cudbg_buffer temp_buff = { 0 }; + char vpd_str[CUDBG_VPD_VER_LEN + 1]; + u32 scfg_vers, vpd_vers, fw_vers; struct cudbg_vpd_data *vpd_data; - int rc; + struct vpd_params vpd = { 0 }; + int rc, ret; + + rc = t4_get_raw_vpd_params(padap, &vpd); + if (rc) + return rc; + + rc = t4_get_fw_version(padap, &fw_vers); + if (rc) + return rc; + + /* Serial Configuration Version is located beyond the PF's vpd size. + * Temporarily give access to entire EEPROM to get it. + */ + rc = pci_set_vpd_size(padap->pdev, EEPROMVSIZE); + if (rc < 0) + return rc; + + ret = cudbg_read_vpd_reg(padap, CUDBG_SCFG_VER_ADDR, CUDBG_SCFG_VER_LEN, + &scfg_vers); + + /* Restore back to original PF's vpd size */ + rc = pci_set_vpd_size(padap->pdev, CUDBG_VPD_PF_SIZE); + if (rc < 0) + return rc; + + if (ret) + return ret; + + rc = cudbg_read_vpd_reg(padap, CUDBG_VPD_VER_ADDR, CUDBG_VPD_VER_LEN, + vpd_str); + if (rc) + return rc; + + vpd_str[CUDBG_VPD_VER_LEN] = '\0'; + rc = kstrtouint(vpd_str, 0, &vpd_vers); + if (rc) + return rc; rc = cudbg_get_buff(dbg_buff, sizeof(struct cudbg_vpd_data), &temp_buff); @@ -1298,16 +1353,16 @@ int cudbg_collect_vpd_data(struct cudbg_init *pdbg_init, return rc; vpd_data = (struct cudbg_vpd_data *)temp_buff.data; - memcpy(vpd_data->sn, padap->params.vpd.sn, SERNUM_LEN + 1); - memcpy(vpd_data->bn, padap->params.vpd.pn, PN_LEN + 1); - memcpy(vpd_data->na, padap->params.vpd.na, MACADDR_LEN + 1); - memcpy(vpd_data->mn, padap->params.vpd.id, ID_LEN + 1); - vpd_data->scfg_vers = padap->params.scfg_vers; - vpd_data->vpd_vers = padap->params.vpd_vers; - vpd_data->fw_major = FW_HDR_FW_VER_MAJOR_G(padap->params.fw_vers); - vpd_data->fw_minor = FW_HDR_FW_VER_MINOR_G(padap->params.fw_vers); - vpd_data->fw_micro = FW_HDR_FW_VER_MICRO_G(padap->params.fw_vers); - vpd_data->fw_build = FW_HDR_FW_VER_BUILD_G(padap->params.fw_vers); + memcpy(vpd_data->sn, vpd.sn, SERNUM_LEN + 1); + memcpy(vpd_data->bn, vpd.pn, PN_LEN + 1); + memcpy(vpd_data->na, vpd.na, MACADDR_LEN + 1); + memcpy(vpd_data->mn, vpd.id, ID_LEN + 1); + vpd_data->scfg_vers = scfg_vers; + vpd_data->vpd_vers = vpd_vers; + vpd_data->fw_major = FW_HDR_FW_VER_MAJOR_G(fw_vers); + vpd_data->fw_minor = FW_HDR_FW_VER_MINOR_G(fw_vers); + vpd_data->fw_micro = FW_HDR_FW_VER_MICRO_G(fw_vers); + vpd_data->fw_build = FW_HDR_FW_VER_BUILD_G(fw_vers); cudbg_write_and_release_buff(&temp_buff, dbg_buff); return rc; } diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h index 0c83ceb5a1a6..0de1a4b2223e 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h @@ -1459,6 +1459,7 @@ static inline int t4_memory_write(struct adapter *adap, int mtype, u32 addr, unsigned int t4_get_regs_len(struct adapter *adapter); void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size); +int t4_eeprom_ptov(unsigned int phys_addr, unsigned int fn, unsigned int sz); int t4_seeprom_wp(struct adapter *adapter, bool enable); int t4_get_raw_vpd_params(struct adapter *adapter, struct vpd_params *p); int t4_get_vpd_params(struct adapter *adapter, struct vpd_params *p); diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c index 1b7f6b9ccc8b..eb338212f5af 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c @@ -1064,40 +1064,11 @@ static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c) return 0; } -/** - * eeprom_ptov - translate a physical EEPROM address to virtual - * @phys_addr: the physical EEPROM address - * @fn: the PCI function number - * @sz: size of function-specific area - * - * Translate a physical EEPROM address to virtual. The first 1K is - * accessed through virtual addresses starting at 31K, the rest is - * accessed through virtual addresses starting at 0. - * - * The mapping is as follows: - * [0..1K) -> [31K..32K) - * [1K..1K+A) -> [31K-A..31K) - * [1K+A..ES) -> [0..ES-A-1K) - * - * where A = @fn * @sz, and ES = EEPROM size. - */ -static int eeprom_ptov(unsigned int phys_addr, unsigned int fn, unsigned int sz) -{ - fn *= sz; - if (phys_addr < 1024) - return phys_addr + (31 << 10); - if (phys_addr < 1024 + fn) - return 31744 - fn + phys_addr - 1024; - if (phys_addr < EEPROMSIZE) - return phys_addr - 1024 - fn; - return -EINVAL; -} - /* The next two routines implement eeprom read/write from physical addresses. */ static int eeprom_rd_phys(struct adapter *adap, unsigned int phys_addr, u32 *v) { - int vaddr = eeprom_ptov(phys_addr, adap->pf, EEPROMPFSIZE); + int vaddr = t4_eeprom_ptov(phys_addr, adap->pf, EEPROMPFSIZE); if (vaddr >= 0) vaddr = pci_read_vpd(adap->pdev, vaddr, sizeof(u32), v); @@ -1106,7 +1077,7 @@ static int eeprom_rd_phys(struct adapter *adap, unsigned int phys_addr, u32 *v) static int eeprom_wr_phys(struct adapter *adap, unsigned int phys_addr, u32 v) { - int vaddr = eeprom_ptov(phys_addr, adap->pf, EEPROMPFSIZE); + int vaddr = t4_eeprom_ptov(phys_addr, adap->pf, EEPROMPFSIZE); if (vaddr >= 0) vaddr = pci_write_vpd(adap->pdev, vaddr, sizeof(u32), &v); diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c index efe9d3a20135..b4fad081ac78 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c @@ -2638,6 +2638,35 @@ void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size) #define VPD_LEN 1024 #define CHELSIO_VPD_UNIQUE_ID 0x82 +/** + * t4_eeprom_ptov - translate a physical EEPROM address to virtual + * @phys_addr: the physical EEPROM address + * @fn: the PCI function number + * @sz: size of function-specific area + * + * Translate a physical EEPROM address to virtual. The first 1K is + * accessed through virtual addresses starting at 31K, the rest is + * accessed through virtual addresses starting at 0. + * + * The mapping is as follows: + * [0..1K) -> [31K..32K) + * [1K..1K+A) -> [31K-A..31K) + * [1K+A..ES) -> [0..ES-A-1K) + * + * where A = @fn * @sz, and ES = EEPROM size. + */ +int t4_eeprom_ptov(unsigned int phys_addr, unsigned int fn, unsigned int sz) +{ + fn *= sz; + if (phys_addr < 1024) + return phys_addr + (31 << 10); + if (phys_addr < 1024 + fn) + return 31744 - fn + phys_addr - 1024; + if (phys_addr < EEPROMSIZE) + return phys_addr - 1024 - fn; + return -EINVAL; +} + /** * t4_seeprom_wp - enable/disable EEPROM write protection * @adapter: the adapter -- cgit v1.2.3 From 4d215ae730786f51b108873227c67dc7815dbdef Mon Sep 17 00:00:00 2001 From: Florian Fainelli Date: Fri, 10 Nov 2017 11:33:25 -0800 Subject: net: bgmac: Pad packets to a minimum size In preparation for enabling Broadcom tags with b53, pad packets to a minimum size of 64 bytes (sans FCS) in order for the Broadcom switch to accept ingressing frames. Without this, we would typically be able to DHCP, but not resolve with ARP because packets are too small and get rejected by the switch. Signed-off-by: Florian Fainelli Reviewed-by: Andrew Lunn Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bgmac.c | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c index 48d672b204a4..1d96cd594ade 100644 --- a/drivers/net/ethernet/broadcom/bgmac.c +++ b/drivers/net/ethernet/broadcom/bgmac.c @@ -15,6 +15,7 @@ #include #include #include +#include #include "bgmac.h" static bool bgmac_wait_value(struct bgmac *bgmac, u16 reg, u32 mask, @@ -127,6 +128,8 @@ bgmac_dma_tx_add_buf(struct bgmac *bgmac, struct bgmac_dma_ring *ring, dma_desc->ctl1 = cpu_to_le32(ctl1); } +#define ENET_BRCM_TAG_LEN 4 + static netdev_tx_t bgmac_dma_tx_add(struct bgmac *bgmac, struct bgmac_dma_ring *ring, struct sk_buff *skb) @@ -139,6 +142,18 @@ static netdev_tx_t bgmac_dma_tx_add(struct bgmac *bgmac, u32 flags; int i; + /* The Ethernet switch we are interfaced with needs packets to be at + * least 64 bytes (including FCS) otherwise they will be discarded when + * they enter the switch port logic. When Broadcom tags are enabled, we + * need to make sure that packets are at least 68 bytes + * (including FCS and tag) because the length verification is done after + * the Broadcom tag is stripped off the ingress packet. + */ + if (netdev_uses_dsa(net_dev)) { + if (skb_put_padto(skb, ETH_ZLEN + ENET_BRCM_TAG_LEN)) + goto err_stats; + } + if (skb->len > BGMAC_DESC_CTL1_LEN) { netdev_err(bgmac->net_dev, "Too long skb (%d)\n", skb->len); goto err_drop; @@ -225,6 +240,7 @@ err_dma_head: err_drop: dev_kfree_skb(skb); +err_stats: net_dev->stats.tx_dropped++; net_dev->stats.tx_errors++; return NETDEV_TX_OK; -- cgit v1.2.3 From 07842561a873e8f25272bcebe7f6c7f1af7c1a2d Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Wed, 8 Nov 2017 13:23:23 +0000 Subject: net: realtek: r8169: remove redundant assignment to giga_ctrl The variable giga_ctrl is being assigned to zero however this is never read and hence the assignment is redundant, so remove it. Cleans up clang warning: drivers/net/ethernet/realtek/r8169.c:1978:3: warning: Value stored to 'giga_ctrl' is never read Signed-off-by: Colin Ian King Signed-off-by: David S. Miller --- drivers/net/ethernet/realtek/r8169.c | 2 -- 1 file changed, 2 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c index fd218fd9ef3c..dcb8c39382e7 100644 --- a/drivers/net/ethernet/realtek/r8169.c +++ b/drivers/net/ethernet/realtek/r8169.c @@ -1982,8 +1982,6 @@ static int rtl8169_set_speed_xmii(struct net_device *dev, rtl_writephy(tp, MII_ADVERTISE, auto_nego); rtl_writephy(tp, MII_CTRL1000, giga_ctrl); } else { - giga_ctrl = 0; - if (speed == SPEED_10) bmcr = 0; else if (speed == SPEED_100) -- cgit v1.2.3 From a1b8714593b67d0f783a9d82ed7e5800d02776bb Mon Sep 17 00:00:00 2001 From: Slava Shwartsman Date: Fri, 10 Nov 2017 09:10:29 +0200 Subject: net/mlx4: Use Kconfig flag to remove support of old gen2 Mellanox devices Since Mellanox focus is on newer adapters, we would like to have the ability to disable the support for old gen2 adapters. This can be done by turning off the MLX4_CORE_GEN2 Kconfig flag. We keep it turned on by default. Signed-off-by: Slava Shwartsman Signed-off-by: Tariq Toukan Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx4/Kconfig | 8 ++++++++ drivers/net/ethernet/mellanox/mlx4/main.c | 2 ++ 2 files changed, 10 insertions(+) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/mellanox/mlx4/Kconfig b/drivers/net/ethernet/mellanox/mlx4/Kconfig index 22b1cc012bc9..36054e6fb9d3 100644 --- a/drivers/net/ethernet/mellanox/mlx4/Kconfig +++ b/drivers/net/ethernet/mellanox/mlx4/Kconfig @@ -38,3 +38,11 @@ config MLX4_DEBUG mlx4_core driver. The output can be turned on via the debug_level module parameter (which can also be set after the driver is loaded through sysfs). + +config MLX4_CORE_GEN2 + bool "Support for old gen2 Mellanox PCI IDs" if (MLX4_CORE) + depends on MLX4_CORE + default y + ---help--- + Say Y here if you want to use old gen2 Mellanox devices in the + driver. diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c index e61c99ef741d..4d84cab77105 100644 --- a/drivers/net/ethernet/mellanox/mlx4/main.c +++ b/drivers/net/ethernet/mellanox/mlx4/main.c @@ -4066,6 +4066,7 @@ int mlx4_restart_one(struct pci_dev *pdev) #define MLX_GN(id) { PCI_VDEVICE(MELLANOX, id), 0 } static const struct pci_device_id mlx4_pci_table[] = { +#ifdef CONFIG_MLX4_CORE_GEN2 /* MT25408 "Hermon" */ MLX_SP(PCI_DEVICE_ID_MELLANOX_HERMON_SDR), /* SDR */ MLX_SP(PCI_DEVICE_ID_MELLANOX_HERMON_DDR), /* DDR */ @@ -4085,6 +4086,7 @@ static const struct pci_device_id mlx4_pci_table[] = { MLX_SP(PCI_DEVICE_ID_MELLANOX_CONNECTX2), /* MT25400 Family [ConnectX-2] */ MLX_VF(0x1002), /* Virtual Function */ +#endif /* CONFIG_MLX4_CORE_GEN2 */ /* MT27500 Family [ConnectX-3] */ MLX_GN(PCI_DEVICE_ID_MELLANOX_CONNECTX3), MLX_VF(0x1004), /* Virtual Function */ -- cgit v1.2.3 From 0d728b844c2dd8dd3875ed304eee43967c5d14f6 Mon Sep 17 00:00:00 2001 From: Zhu Yanjun Date: Fri, 10 Nov 2017 21:10:00 -0500 Subject: forcedeth: remove redudant assignments in xmit In xmit process, the variables are set many times. In fact, it is enough for these variables to be set once. After a long time test, the throughput performance is better than before. CC: Srinivas Eeda CC: Joe Jin CC: Junxiao Bi Signed-off-by: Zhu Yanjun Signed-off-by: David S. Miller --- drivers/net/ethernet/nvidia/forcedeth.c | 28 ++++++++++++++++++++-------- 1 file changed, 20 insertions(+), 8 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/nvidia/forcedeth.c b/drivers/net/ethernet/nvidia/forcedeth.c index 31a943860f32..ac8439ceea10 100644 --- a/drivers/net/ethernet/nvidia/forcedeth.c +++ b/drivers/net/ethernet/nvidia/forcedeth.c @@ -2226,8 +2226,6 @@ static netdev_tx_t nv_start_xmit(struct sk_buff *skb, struct net_device *dev) /* setup the header buffer */ do { - prev_tx = put_tx; - prev_tx_ctx = np->put_tx_ctx; bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size; np->put_tx_ctx->dma = dma_map_single(&np->pci_dev->dev, skb->data + offset, bcnt, @@ -2262,8 +2260,6 @@ static netdev_tx_t nv_start_xmit(struct sk_buff *skb, struct net_device *dev) offset = 0; do { - prev_tx = put_tx; - prev_tx_ctx = np->put_tx_ctx; if (!start_tx_ctx) start_tx_ctx = tmp_tx_ctx = np->put_tx_ctx; @@ -2304,6 +2300,16 @@ static netdev_tx_t nv_start_xmit(struct sk_buff *skb, struct net_device *dev) } while (frag_size); } + if (unlikely(put_tx == np->first_tx.orig)) + prev_tx = np->last_tx.orig; + else + prev_tx = put_tx - 1; + + if (unlikely(np->put_tx_ctx == np->first_tx_ctx)) + prev_tx_ctx = np->last_tx_ctx; + else + prev_tx_ctx = np->put_tx_ctx - 1; + /* set last fragment flag */ prev_tx->flaglen |= cpu_to_le32(tx_flags_extra); @@ -2377,8 +2383,6 @@ static netdev_tx_t nv_start_xmit_optimized(struct sk_buff *skb, /* setup the header buffer */ do { - prev_tx = put_tx; - prev_tx_ctx = np->put_tx_ctx; bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size; np->put_tx_ctx->dma = dma_map_single(&np->pci_dev->dev, skb->data + offset, bcnt, @@ -2414,8 +2418,6 @@ static netdev_tx_t nv_start_xmit_optimized(struct sk_buff *skb, offset = 0; do { - prev_tx = put_tx; - prev_tx_ctx = np->put_tx_ctx; bcnt = (frag_size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : frag_size; if (!start_tx_ctx) start_tx_ctx = tmp_tx_ctx = np->put_tx_ctx; @@ -2456,6 +2458,16 @@ static netdev_tx_t nv_start_xmit_optimized(struct sk_buff *skb, } while (frag_size); } + if (unlikely(put_tx == np->first_tx.ex)) + prev_tx = np->last_tx.ex; + else + prev_tx = put_tx - 1; + + if (unlikely(np->put_tx_ctx == np->first_tx_ctx)) + prev_tx_ctx = np->last_tx_ctx; + else + prev_tx_ctx = np->put_tx_ctx - 1; + /* set last fragment flag */ prev_tx->flaglen |= cpu_to_le32(NV_TX2_LASTPACKET); -- cgit v1.2.3 From 03e98b9118bed1960993466f4d64f9f5a9146b66 Mon Sep 17 00:00:00 2001 From: Rahul Lakkireddy Date: Sat, 11 Nov 2017 19:48:15 +0530 Subject: cxgb4: collect LE-TCAM dump Signed-off-by: Rahul Lakkireddy Signed-off-by: Ganesh Goudar Signed-off-by: David S. Miller --- drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h | 30 ++++ drivers/net/ethernet/chelsio/cxgb4/cudbg_if.h | 1 + drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c | 175 ++++++++++++++++++++++ drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.h | 7 + drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c | 7 + drivers/net/ethernet/chelsio/cxgb4/t4_regs.h | 41 +++++ 6 files changed, 261 insertions(+) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h b/drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h index 1de1d811fde3..f99db7b283fc 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h @@ -185,6 +185,36 @@ struct cudbg_vpd_data { u32 vpd_vers; }; +#define CUDBG_MAX_TCAM_TID 0x800 + +enum cudbg_le_entry_types { + LE_ET_UNKNOWN = 0, + LE_ET_TCAM_CON = 1, + LE_ET_TCAM_SERVER = 2, + LE_ET_TCAM_FILTER = 3, + LE_ET_TCAM_CLIP = 4, + LE_ET_TCAM_ROUTING = 5, + LE_ET_HASH_CON = 6, + LE_ET_INVALID_TID = 8, +}; + +struct cudbg_tcam { + u32 filter_start; + u32 server_start; + u32 clip_start; + u32 routing_start; + u32 tid_hash_base; + u32 max_tid; +}; + +struct cudbg_tid_data { + u32 tid; + u32 dbig_cmd; + u32 dbig_conf; + u32 dbig_rsp_stat; + u32 data[NUM_LE_DB_DBGI_RSP_DATA_INSTANCES]; +}; + #define CUDBG_NUM_ULPTX 11 #define CUDBG_NUM_ULPTX_READ 512 diff --git a/drivers/net/ethernet/chelsio/cxgb4/cudbg_if.h b/drivers/net/ethernet/chelsio/cxgb4/cudbg_if.h index e484c514e9ae..4e5d189eae62 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cudbg_if.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cudbg_if.h @@ -65,6 +65,7 @@ enum cudbg_dbg_entity_type { CUDBG_TID_INFO = 54, CUDBG_MPS_TCAM = 57, CUDBG_VPD_DATA = 58, + CUDBG_LE_TCAM = 59, CUDBG_CCTRL = 60, CUDBG_MA_INDIRECT = 61, CUDBG_ULPTX_LA = 62, diff --git a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c index 32c9858da110..dd7e26be98cf 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c @@ -1367,6 +1367,181 @@ int cudbg_collect_vpd_data(struct cudbg_init *pdbg_init, return rc; } +static int cudbg_read_tid(struct cudbg_init *pdbg_init, u32 tid, + struct cudbg_tid_data *tid_data) +{ + struct adapter *padap = pdbg_init->adap; + int i, cmd_retry = 8; + u32 val; + + /* Fill REQ_DATA regs with 0's */ + for (i = 0; i < NUM_LE_DB_DBGI_REQ_DATA_INSTANCES; i++) + t4_write_reg(padap, LE_DB_DBGI_REQ_DATA_A + (i << 2), 0); + + /* Write DBIG command */ + val = DBGICMD_V(4) | DBGITID_V(tid); + t4_write_reg(padap, LE_DB_DBGI_REQ_TCAM_CMD_A, val); + tid_data->dbig_cmd = val; + + val = DBGICMDSTRT_F | DBGICMDMODE_V(1); /* LE mode */ + t4_write_reg(padap, LE_DB_DBGI_CONFIG_A, val); + tid_data->dbig_conf = val; + + /* Poll the DBGICMDBUSY bit */ + val = 1; + while (val) { + val = t4_read_reg(padap, LE_DB_DBGI_CONFIG_A); + val = val & DBGICMDBUSY_F; + cmd_retry--; + if (!cmd_retry) + return CUDBG_SYSTEM_ERROR; + } + + /* Check RESP status */ + val = t4_read_reg(padap, LE_DB_DBGI_RSP_STATUS_A); + tid_data->dbig_rsp_stat = val; + if (!(val & 1)) + return CUDBG_SYSTEM_ERROR; + + /* Read RESP data */ + for (i = 0; i < NUM_LE_DB_DBGI_RSP_DATA_INSTANCES; i++) + tid_data->data[i] = t4_read_reg(padap, + LE_DB_DBGI_RSP_DATA_A + + (i << 2)); + tid_data->tid = tid; + return 0; +} + +static int cudbg_get_le_type(u32 tid, struct cudbg_tcam tcam_region) +{ + int type = LE_ET_UNKNOWN; + + if (tid < tcam_region.server_start) + type = LE_ET_TCAM_CON; + else if (tid < tcam_region.filter_start) + type = LE_ET_TCAM_SERVER; + else if (tid < tcam_region.clip_start) + type = LE_ET_TCAM_FILTER; + else if (tid < tcam_region.routing_start) + type = LE_ET_TCAM_CLIP; + else if (tid < tcam_region.tid_hash_base) + type = LE_ET_TCAM_ROUTING; + else if (tid < tcam_region.max_tid) + type = LE_ET_HASH_CON; + else + type = LE_ET_INVALID_TID; + + return type; +} + +static int cudbg_is_ipv6_entry(struct cudbg_tid_data *tid_data, + struct cudbg_tcam tcam_region) +{ + int ipv6 = 0; + int le_type; + + le_type = cudbg_get_le_type(tid_data->tid, tcam_region); + if (tid_data->tid & 1) + return 0; + + if (le_type == LE_ET_HASH_CON) { + ipv6 = tid_data->data[16] & 0x8000; + } else if (le_type == LE_ET_TCAM_CON) { + ipv6 = tid_data->data[16] & 0x8000; + if (ipv6) + ipv6 = tid_data->data[9] == 0x00C00000; + } else { + ipv6 = 0; + } + return ipv6; +} + +void cudbg_fill_le_tcam_info(struct adapter *padap, + struct cudbg_tcam *tcam_region) +{ + u32 value; + + /* Get the LE regions */ + value = t4_read_reg(padap, LE_DB_TID_HASHBASE_A); /* hash base index */ + tcam_region->tid_hash_base = value; + + /* Get routing table index */ + value = t4_read_reg(padap, LE_DB_ROUTING_TABLE_INDEX_A); + tcam_region->routing_start = value; + + /*Get clip table index */ + value = t4_read_reg(padap, LE_DB_CLIP_TABLE_INDEX_A); + tcam_region->clip_start = value; + + /* Get filter table index */ + value = t4_read_reg(padap, LE_DB_FILTER_TABLE_INDEX_A); + tcam_region->filter_start = value; + + /* Get server table index */ + value = t4_read_reg(padap, LE_DB_SERVER_INDEX_A); + tcam_region->server_start = value; + + /* Check whether hash is enabled and calculate the max tids */ + value = t4_read_reg(padap, LE_DB_CONFIG_A); + if ((value >> HASHEN_S) & 1) { + value = t4_read_reg(padap, LE_DB_HASH_CONFIG_A); + if (CHELSIO_CHIP_VERSION(padap->params.chip) > CHELSIO_T5) { + tcam_region->max_tid = (value & 0xFFFFF) + + tcam_region->tid_hash_base; + } else { + value = HASHTIDSIZE_G(value); + value = 1 << value; + tcam_region->max_tid = value + + tcam_region->tid_hash_base; + } + } else { /* hash not enabled */ + tcam_region->max_tid = CUDBG_MAX_TCAM_TID; + } +} + +int cudbg_collect_le_tcam(struct cudbg_init *pdbg_init, + struct cudbg_buffer *dbg_buff, + struct cudbg_error *cudbg_err) +{ + struct adapter *padap = pdbg_init->adap; + struct cudbg_buffer temp_buff = { 0 }; + struct cudbg_tcam tcam_region = { 0 }; + struct cudbg_tid_data *tid_data; + u32 bytes = 0; + int rc, size; + u32 i; + + cudbg_fill_le_tcam_info(padap, &tcam_region); + + size = sizeof(struct cudbg_tid_data) * tcam_region.max_tid; + size += sizeof(struct cudbg_tcam); + rc = cudbg_get_buff(dbg_buff, size, &temp_buff); + if (rc) + return rc; + + memcpy(temp_buff.data, &tcam_region, sizeof(struct cudbg_tcam)); + bytes = sizeof(struct cudbg_tcam); + tid_data = (struct cudbg_tid_data *)(temp_buff.data + bytes); + /* read all tid */ + for (i = 0; i < tcam_region.max_tid; ) { + rc = cudbg_read_tid(pdbg_init, i, tid_data); + if (rc) { + cudbg_err->sys_err = rc; + cudbg_put_buff(&temp_buff, dbg_buff); + return rc; + } + + /* ipv6 takes two tids */ + cudbg_is_ipv6_entry(tid_data, tcam_region) ? i += 2 : i++; + + tid_data++; + bytes += sizeof(struct cudbg_tid_data); + } + + cudbg_write_and_release_buff(&temp_buff, dbg_buff); + return rc; +} + int cudbg_collect_cctrl(struct cudbg_init *pdbg_init, struct cudbg_buffer *dbg_buff, struct cudbg_error *cudbg_err) diff --git a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.h b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.h index 230ba88a6a81..ebb2d9907fc9 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.h @@ -129,6 +129,9 @@ int cudbg_collect_mps_tcam(struct cudbg_init *pdbg_init, int cudbg_collect_vpd_data(struct cudbg_init *pdbg_init, struct cudbg_buffer *dbg_buff, struct cudbg_error *cudbg_err); +int cudbg_collect_le_tcam(struct cudbg_init *pdbg_init, + struct cudbg_buffer *dbg_buff, + struct cudbg_error *cudbg_err); int cudbg_collect_cctrl(struct cudbg_init *pdbg_init, struct cudbg_buffer *dbg_buff, struct cudbg_error *cudbg_err); @@ -155,4 +158,8 @@ struct cudbg_entity_hdr *cudbg_get_entity_hdr(void *outbuf, int i); void cudbg_align_debug_buffer(struct cudbg_buffer *dbg_buff, struct cudbg_entity_hdr *entity_hdr); u32 cudbg_cim_obq_size(struct adapter *padap, int qid); + +struct cudbg_tcam; +void cudbg_fill_le_tcam_info(struct adapter *padap, + struct cudbg_tcam *tcam_region); #endif /* __CUDBG_LIB_H__ */ diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c index 7373617da635..05eb2d2ef592 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c @@ -62,6 +62,7 @@ static const struct cxgb4_collect_entity cxgb4_collect_hw_dump[] = { { CUDBG_TID_INFO, cudbg_collect_tid }, { CUDBG_MPS_TCAM, cudbg_collect_mps_tcam }, { CUDBG_VPD_DATA, cudbg_collect_vpd_data }, + { CUDBG_LE_TCAM, cudbg_collect_le_tcam }, { CUDBG_CCTRL, cudbg_collect_cctrl }, { CUDBG_MA_INDIRECT, cudbg_collect_ma_indirect }, { CUDBG_ULPTX_LA, cudbg_collect_ulptx_la }, @@ -72,6 +73,7 @@ static const struct cxgb4_collect_entity cxgb4_collect_hw_dump[] = { static u32 cxgb4_get_entity_length(struct adapter *adap, u32 entity) { + struct cudbg_tcam tcam_region = { 0 }; u32 value, n = 0, len = 0; switch (entity) { @@ -223,6 +225,11 @@ static u32 cxgb4_get_entity_length(struct adapter *adap, u32 entity) case CUDBG_VPD_DATA: len = sizeof(struct cudbg_vpd_data); break; + case CUDBG_LE_TCAM: + cudbg_fill_le_tcam_info(adap, &tcam_region); + len = sizeof(struct cudbg_tcam) + + sizeof(struct cudbg_tid_data) * tcam_region.max_tid; + break; case CUDBG_CCTRL: len = sizeof(u16) * NMTUS * NCCTRL_WIN; break; diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h index 623f453bd327..f5576ce004fa 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h @@ -65,6 +65,9 @@ #define PCIE_FW_REG(reg_addr, idx) ((reg_addr) + (idx) * 4) +#define NUM_LE_DB_DBGI_REQ_DATA_INSTANCES 17 +#define NUM_LE_DB_DBGI_RSP_DATA_INSTANCES 17 + #define SGE_PF_KDOORBELL_A 0x0 #define QID_S 15 @@ -2273,6 +2276,35 @@ #define CHNENABLE_V(x) ((x) << CHNENABLE_S) #define CHNENABLE_F CHNENABLE_V(1U) +#define LE_DB_DBGI_CONFIG_A 0x19cf0 + +#define DBGICMDBUSY_S 3 +#define DBGICMDBUSY_V(x) ((x) << DBGICMDBUSY_S) +#define DBGICMDBUSY_F DBGICMDBUSY_V(1U) + +#define DBGICMDSTRT_S 2 +#define DBGICMDSTRT_V(x) ((x) << DBGICMDSTRT_S) +#define DBGICMDSTRT_F DBGICMDSTRT_V(1U) + +#define DBGICMDMODE_S 0 +#define DBGICMDMODE_M 0x3U +#define DBGICMDMODE_V(x) ((x) << DBGICMDMODE_S) + +#define LE_DB_DBGI_REQ_TCAM_CMD_A 0x19cf4 + +#define DBGICMD_S 20 +#define DBGICMD_M 0xfU +#define DBGICMD_V(x) ((x) << DBGICMD_S) + +#define DBGITID_S 0 +#define DBGITID_M 0xfffffU +#define DBGITID_V(x) ((x) << DBGITID_S) + +#define LE_DB_DBGI_REQ_DATA_A 0x19d00 +#define LE_DB_DBGI_RSP_STATUS_A 0x19d94 + +#define LE_DB_DBGI_RSP_DATA_A 0x19da0 + #define PRTENABLE_S 29 #define PRTENABLE_V(x) ((x) << PRTENABLE_S) #define PRTENABLE_F PRTENABLE_V(1U) @@ -2882,11 +2914,20 @@ #define T6_LIPMISS_F T6_LIPMISS_V(1U) #define LE_DB_CONFIG_A 0x19c04 +#define LE_DB_ROUTING_TABLE_INDEX_A 0x19c10 #define LE_DB_ACTIVE_TABLE_START_INDEX_A 0x19c10 +#define LE_DB_FILTER_TABLE_INDEX_A 0x19c14 #define LE_DB_SERVER_INDEX_A 0x19c18 #define LE_DB_SRVR_START_INDEX_A 0x19c18 +#define LE_DB_CLIP_TABLE_INDEX_A 0x19c1c #define LE_DB_ACT_CNT_IPV4_A 0x19c20 #define LE_DB_ACT_CNT_IPV6_A 0x19c24 +#define LE_DB_HASH_CONFIG_A 0x19c28 + +#define HASHTIDSIZE_S 16 +#define HASHTIDSIZE_M 0x3fU +#define HASHTIDSIZE_G(x) (((x) >> HASHTIDSIZE_S) & HASHTIDSIZE_M) + #define LE_DB_HASH_TID_BASE_A 0x19c30 #define LE_DB_HASH_TBL_BASE_ADDR_A 0x19c30 #define LE_DB_INT_CAUSE_A 0x19c3c -- cgit v1.2.3 From 9e5c598c720792e210f83964441ee1c99451e8d1 Mon Sep 17 00:00:00 2001 From: Rahul Lakkireddy Date: Sat, 11 Nov 2017 19:48:16 +0530 Subject: cxgb4: collect SGE queue context dump Collect SGE freelist queue and congestion manager contexts. Signed-off-by: Rahul Lakkireddy Signed-off-by: Ganesh Goudar Signed-off-by: David S. Miller --- drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h | 8 +++ drivers/net/ethernet/chelsio/cxgb4/cudbg_if.h | 1 + drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c | 78 +++++++++++++++++++++++ drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.h | 4 ++ drivers/net/ethernet/chelsio/cxgb4/cxgb4.h | 4 ++ drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c | 4 ++ drivers/net/ethernet/chelsio/cxgb4/t4_hw.c | 62 ++++++++++++++++++ drivers/net/ethernet/chelsio/cxgb4/t4_hw.h | 7 ++ drivers/net/ethernet/chelsio/cxgb4/t4_regs.h | 27 ++++++++ 9 files changed, 195 insertions(+) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h b/drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h index f99db7b283fc..605689957496 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h @@ -145,6 +145,14 @@ struct cudbg_tid_info_region_rev1 { u32 reserved[16]; }; +#define CUDBG_MAX_FL_QIDS 1024 + +struct cudbg_ch_cntxt { + u32 cntxt_type; + u32 cntxt_id; + u32 data[SGE_CTXT_SIZE / 4]; +}; + #define CUDBG_MAX_RPLC_SIZE 128 struct cudbg_mps_tcam { diff --git a/drivers/net/ethernet/chelsio/cxgb4/cudbg_if.h b/drivers/net/ethernet/chelsio/cxgb4/cudbg_if.h index 4e5d189eae62..e10ff1ee62c5 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cudbg_if.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cudbg_if.h @@ -63,6 +63,7 @@ enum cudbg_dbg_entity_type { CUDBG_PCIE_INDIRECT = 50, CUDBG_PM_INDIRECT = 51, CUDBG_TID_INFO = 54, + CUDBG_DUMP_CONTEXT = 56, CUDBG_MPS_TCAM = 57, CUDBG_VPD_DATA = 58, CUDBG_LE_TCAM = 59, diff --git a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c index dd7e26be98cf..d699bf88d18f 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c @@ -1115,6 +1115,84 @@ int cudbg_collect_tid(struct cudbg_init *pdbg_init, return rc; } +int cudbg_dump_context_size(struct adapter *padap) +{ + u32 value, size; + u8 flq; + + value = t4_read_reg(padap, SGE_FLM_CFG_A); + + /* Get number of data freelist queues */ + flq = HDRSTARTFLQ_G(value); + size = CUDBG_MAX_FL_QIDS >> flq; + + /* Add extra space for congestion manager contexts. + * The number of CONM contexts are same as number of freelist + * queues. + */ + size += size; + return size * sizeof(struct cudbg_ch_cntxt); +} + +static void cudbg_read_sge_ctxt(struct cudbg_init *pdbg_init, u32 cid, + enum ctxt_type ctype, u32 *data) +{ + struct adapter *padap = pdbg_init->adap; + int rc = -1; + + /* Under heavy traffic, the SGE Queue contexts registers will be + * frequently accessed by firmware. + * + * To avoid conflicts with firmware, always ask firmware to fetch + * the SGE Queue contexts via mailbox. On failure, fallback to + * accessing hardware registers directly. + */ + if (is_fw_attached(pdbg_init)) + rc = t4_sge_ctxt_rd(padap, padap->mbox, cid, ctype, data); + if (rc) + t4_sge_ctxt_rd_bd(padap, cid, ctype, data); +} + +int cudbg_collect_dump_context(struct cudbg_init *pdbg_init, + struct cudbg_buffer *dbg_buff, + struct cudbg_error *cudbg_err) +{ + struct adapter *padap = pdbg_init->adap; + struct cudbg_buffer temp_buff = { 0 }; + struct cudbg_ch_cntxt *buff; + u32 size, i = 0; + int rc; + + rc = cudbg_dump_context_size(padap); + if (rc <= 0) + return CUDBG_STATUS_ENTITY_NOT_FOUND; + + size = rc; + rc = cudbg_get_buff(dbg_buff, size, &temp_buff); + if (rc) + return rc; + + buff = (struct cudbg_ch_cntxt *)temp_buff.data; + while (size > 0) { + buff->cntxt_type = CTXT_FLM; + buff->cntxt_id = i; + cudbg_read_sge_ctxt(pdbg_init, i, CTXT_FLM, buff->data); + buff++; + size -= sizeof(struct cudbg_ch_cntxt); + + buff->cntxt_type = CTXT_CNM; + buff->cntxt_id = i; + cudbg_read_sge_ctxt(pdbg_init, i, CTXT_CNM, buff->data); + buff++; + size -= sizeof(struct cudbg_ch_cntxt); + + i++; + } + + cudbg_write_and_release_buff(&temp_buff, dbg_buff); + return rc; +} + static inline void cudbg_tcamxy2valmask(u64 x, u64 y, u8 *addr, u64 *mask) { *mask = x | y; diff --git a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.h b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.h index ebb2d9907fc9..caeee8e33e86 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.h @@ -123,6 +123,9 @@ int cudbg_collect_pm_indirect(struct cudbg_init *pdbg_init, int cudbg_collect_tid(struct cudbg_init *pdbg_init, struct cudbg_buffer *dbg_buff, struct cudbg_error *cudbg_err); +int cudbg_collect_dump_context(struct cudbg_init *pdbg_init, + struct cudbg_buffer *dbg_buff, + struct cudbg_error *cudbg_err); int cudbg_collect_mps_tcam(struct cudbg_init *pdbg_init, struct cudbg_buffer *dbg_buff, struct cudbg_error *cudbg_err); @@ -158,6 +161,7 @@ struct cudbg_entity_hdr *cudbg_get_entity_hdr(void *outbuf, int i); void cudbg_align_debug_buffer(struct cudbg_buffer *dbg_buff, struct cudbg_entity_hdr *entity_hdr); u32 cudbg_cim_obq_size(struct adapter *padap, int qid); +int cudbg_dump_context_size(struct adapter *padap); struct cudbg_tcam; void cudbg_fill_le_tcam_info(struct adapter *padap, diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h index 0de1a4b2223e..6f9fa6e3c42a 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h @@ -1670,6 +1670,10 @@ int t4_fwaddrspace_write(struct adapter *adap, unsigned int mbox, void t4_read_pace_tbl(struct adapter *adap, unsigned int pace_vals[NTX_SCHED]); void t4_get_tx_sched(struct adapter *adap, unsigned int sched, unsigned int *kbps, unsigned int *ipg, bool sleep_ok); +int t4_sge_ctxt_rd(struct adapter *adap, unsigned int mbox, unsigned int cid, + enum ctxt_type ctype, u32 *data); +int t4_sge_ctxt_rd_bd(struct adapter *adap, unsigned int cid, + enum ctxt_type ctype, u32 *data); int t4_sched_params(struct adapter *adapter, int type, int level, int mode, int rateunit, int ratemode, int channel, int class, int minrate, int maxrate, int weight, int pktsize); diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c index 05eb2d2ef592..29cc625e9833 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c @@ -60,6 +60,7 @@ static const struct cxgb4_collect_entity cxgb4_collect_hw_dump[] = { { CUDBG_PCIE_INDIRECT, cudbg_collect_pcie_indirect }, { CUDBG_PM_INDIRECT, cudbg_collect_pm_indirect }, { CUDBG_TID_INFO, cudbg_collect_tid }, + { CUDBG_DUMP_CONTEXT, cudbg_collect_dump_context }, { CUDBG_MPS_TCAM, cudbg_collect_mps_tcam }, { CUDBG_VPD_DATA, cudbg_collect_vpd_data }, { CUDBG_LE_TCAM, cudbg_collect_le_tcam }, @@ -218,6 +219,9 @@ static u32 cxgb4_get_entity_length(struct adapter *adap, u32 entity) case CUDBG_TID_INFO: len = sizeof(struct cudbg_tid_info_region_rev1); break; + case CUDBG_DUMP_CONTEXT: + len = cudbg_dump_context_size(adap); + break; case CUDBG_MPS_TCAM: len = sizeof(struct cudbg_mps_tcam) * adap->params.arch.mps_tcam_size; diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c index b4fad081ac78..f63210f15579 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c @@ -9647,6 +9647,68 @@ void t4_get_tx_sched(struct adapter *adap, unsigned int sched, } } +/* t4_sge_ctxt_rd - read an SGE context through FW + * @adap: the adapter + * @mbox: mailbox to use for the FW command + * @cid: the context id + * @ctype: the context type + * @data: where to store the context data + * + * Issues a FW command through the given mailbox to read an SGE context. + */ +int t4_sge_ctxt_rd(struct adapter *adap, unsigned int mbox, unsigned int cid, + enum ctxt_type ctype, u32 *data) +{ + struct fw_ldst_cmd c; + int ret; + + if (ctype == CTXT_FLM) + ret = FW_LDST_ADDRSPC_SGE_FLMC; + else + ret = FW_LDST_ADDRSPC_SGE_CONMC; + + memset(&c, 0, sizeof(c)); + c.op_to_addrspace = cpu_to_be32(FW_CMD_OP_V(FW_LDST_CMD) | + FW_CMD_REQUEST_F | FW_CMD_READ_F | + FW_LDST_CMD_ADDRSPACE_V(ret)); + c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c)); + c.u.idctxt.physid = cpu_to_be32(cid); + + ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); + if (ret == 0) { + data[0] = be32_to_cpu(c.u.idctxt.ctxt_data0); + data[1] = be32_to_cpu(c.u.idctxt.ctxt_data1); + data[2] = be32_to_cpu(c.u.idctxt.ctxt_data2); + data[3] = be32_to_cpu(c.u.idctxt.ctxt_data3); + data[4] = be32_to_cpu(c.u.idctxt.ctxt_data4); + data[5] = be32_to_cpu(c.u.idctxt.ctxt_data5); + } + return ret; +} + +/** + * t4_sge_ctxt_rd_bd - read an SGE context bypassing FW + * @adap: the adapter + * @cid: the context id + * @ctype: the context type + * @data: where to store the context data + * + * Reads an SGE context directly, bypassing FW. This is only for + * debugging when FW is unavailable. + */ +int t4_sge_ctxt_rd_bd(struct adapter *adap, unsigned int cid, + enum ctxt_type ctype, u32 *data) +{ + int i, ret; + + t4_write_reg(adap, SGE_CTXT_CMD_A, CTXTQID_V(cid) | CTXTTYPE_V(ctype)); + ret = t4_wait_op_done(adap, SGE_CTXT_CMD_A, BUSY_F, 0, 3, 1); + if (!ret) + for (i = SGE_CTXT_DATA0_A; i <= SGE_CTXT_DATA5_A; i += 4) + *data++ = t4_read_reg(adap, i); + return ret; +} + int t4_sched_params(struct adapter *adapter, int type, int level, int mode, int rateunit, int ratemode, int channel, int class, int minrate, int maxrate, int weight, int pktsize) diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h index 7c6af14905c2..a964ed184356 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h @@ -68,6 +68,12 @@ enum { ULPRX_LA_SIZE = 512, /* # of 256-bit words in ULP_RX LA */ }; +/* SGE context types */ +enum ctxt_type { + CTXT_FLM = 2, + CTXT_CNM, +}; + enum { SF_PAGE_SIZE = 256, /* serial flash page size */ SF_SEC_SIZE = 64 * 1024, /* serial flash sector size */ @@ -79,6 +85,7 @@ enum { MBOX_OWNER_NONE, MBOX_OWNER_FW, MBOX_OWNER_DRV }; /* mailbox owners */ enum { SGE_MAX_WR_LEN = 512, /* max WR size in bytes */ + SGE_CTXT_SIZE = 24, /* size of SGE context */ SGE_NTIMERS = 6, /* # of interrupt holdoff timer values */ SGE_NCOUNTERS = 4, /* # of interrupt packet counter values */ SGE_MAX_IQ_SIZE = 65520, diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h index f5576ce004fa..a7cfece72828 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h @@ -153,6 +153,23 @@ #define T6_DBVFIFO_SIZE_M 0x1fffU #define T6_DBVFIFO_SIZE_G(x) (((x) >> T6_DBVFIFO_SIZE_S) & T6_DBVFIFO_SIZE_M) +#define SGE_CTXT_CMD_A 0x11fc + +#define BUSY_S 31 +#define BUSY_V(x) ((x) << BUSY_S) +#define BUSY_F BUSY_V(1U) + +#define CTXTTYPE_S 24 +#define CTXTTYPE_M 0x3U +#define CTXTTYPE_V(x) ((x) << CTXTTYPE_S) + +#define CTXTQID_S 0 +#define CTXTQID_M 0x1ffffU +#define CTXTQID_V(x) ((x) << CTXTQID_S) + +#define SGE_CTXT_DATA0_A 0x1200 +#define SGE_CTXT_DATA5_A 0x1214 + #define GLOBALENABLE_S 0 #define GLOBALENABLE_V(x) ((x) << GLOBALENABLE_S) #define GLOBALENABLE_F GLOBALENABLE_V(1U) @@ -322,6 +339,16 @@ #define SGE_IMSG_CTXT_BADDR_A 0x1088 #define SGE_FLM_CACHE_BADDR_A 0x108c +#define SGE_FLM_CFG_A 0x1090 + +#define NOHDR_S 18 +#define NOHDR_V(x) ((x) << NOHDR_S) +#define NOHDR_F NOHDR_V(1U) + +#define HDRSTARTFLQ_S 11 +#define HDRSTARTFLQ_M 0x7U +#define HDRSTARTFLQ_G(x) (((x) >> HDRSTARTFLQ_S) & HDRSTARTFLQ_M) + #define SGE_INGRESS_RX_THRESHOLD_A 0x10a0 #define THRESHOLD_0_S 24 -- cgit v1.2.3 From 442866ff9743d51957685cecaa722a7fd47b02e2 Mon Sep 17 00:00:00 2001 From: Zhu Yanjun Date: Sat, 11 Nov 2017 10:42:03 -0500 Subject: bnx2x: fix slowpath null crash When "NETDEV WATCHDOG: em4 (bnx2x): transmit queue 2 timed out" occurs, BNX2X_SP_RTNL_TX_TIMEOUT is set. In the function bnx2x_sp_rtnl_task, bnx2x_nic_unload and bnx2x_nic_load are executed to shutdown and open NIC. In the function bnx2x_nic_load, bnx2x_alloc_mem allocates dma failure. The message "bnx2x: [bnx2x_alloc_mem:8399(em4)]Can't allocate memory" pops out. The variable slowpath is set to NULL. When shutdown the NIC, the function bnx2x_nic_unload is called. In the function bnx2x_nic_unload, the following functions are executed. bnx2x_chip_cleanup bnx2x_set_storm_rx_mode bnx2x_set_q_rx_mode bnx2x_set_q_rx_mode bnx2x_config_rx_mode bnx2x_set_rx_mode_e2 In the function bnx2x_set_rx_mode_e2, the variable slowpath is operated. Then the crash occurs. To fix this crash, the variable slowpath is checked. And in the function bnx2x_sp_rtnl_task, after dma memory allocation fails, another shutdown and open NIC is executed. CC: Joe Jin CC: Junxiao Bi Signed-off-by: Zhu Yanjun Acked-by: Ariel Elior Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index 54d1571384a0..be9fd7d184d0 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c @@ -9332,7 +9332,7 @@ void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode, bool keep_link) /* Schedule the rx_mode command */ if (test_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state)) set_bit(BNX2X_FILTER_RX_MODE_SCHED, &bp->sp_state); - else + else if (bp->slowpath) bnx2x_set_storm_rx_mode(bp); /* Cleanup multicast configuration */ @@ -10271,8 +10271,15 @@ static void bnx2x_sp_rtnl_task(struct work_struct *work) smp_mb(); bnx2x_nic_unload(bp, UNLOAD_NORMAL, true); - bnx2x_nic_load(bp, LOAD_NORMAL); - + /* When ret value shows failure of allocation failure, + * the nic is rebooted again. If open still fails, a error + * message to notify the user. + */ + if (bnx2x_nic_load(bp, LOAD_NORMAL) == -ENOMEM) { + bnx2x_nic_unload(bp, UNLOAD_NORMAL, true); + if (bnx2x_nic_load(bp, LOAD_NORMAL)) + BNX2X_ERR("Open the NIC fails again!\n"); + } rtnl_unlock(); return; } -- cgit v1.2.3 From 2f53fbd521825078c0b801d416702cdd877af262 Mon Sep 17 00:00:00 2001 From: Shalom Toledo Date: Sun, 12 Nov 2017 09:01:24 +0100 Subject: mlxsw: spectrum: Update minimum firmware version to 13.1530.152 This new firmware contains: - Support Spectrum A1 revision - Batch deletion of IPv6 neighbours - Remove incorrect VPD capability Signed-off-by: Shalom Toledo Signed-off-by: Ido Schimmel Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/spectrum.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c index b2cd1ebf4e36..2d46ec84ebdf 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c @@ -74,8 +74,8 @@ #include "../mlxfw/mlxfw.h" #define MLXSW_FWREV_MAJOR 13 -#define MLXSW_FWREV_MINOR 1420 -#define MLXSW_FWREV_SUBMINOR 122 +#define MLXSW_FWREV_MINOR 1530 +#define MLXSW_FWREV_SUBMINOR 152 static const struct mlxsw_fw_rev mlxsw_sp_supported_fw_rev = { .major = MLXSW_FWREV_MAJOR, -- cgit v1.2.3 From 63dd00fa3e524c27cc0509190084ab147ecc8ae2 Mon Sep 17 00:00:00 2001 From: Ido Schimmel Date: Sun, 12 Nov 2017 09:02:56 +0100 Subject: mlxsw: spectrum_router: Add batch neighbour deletion In commit 4a3c67a6e7cd ("mlxsw: spectrum_router: Don't batch neighbour deletion") I removed the support for batch deletion of neighbours on a router interface (RIF) since at that time the firmware did not support it for IPv6 neighbours. This is now supported by the version enforced by the driver, so there is no reason to delete neighbours one by one anymore. Signed-off-by: Ido Schimmel Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c | 15 ++++++++++++--- 1 file changed, 12 insertions(+), 3 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c index e9187841d82a..632c7b229054 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c @@ -2416,16 +2416,25 @@ static void mlxsw_sp_neigh_fini(struct mlxsw_sp *mlxsw_sp) rhashtable_destroy(&mlxsw_sp->router->neigh_ht); } +static int mlxsw_sp_neigh_rif_flush(struct mlxsw_sp *mlxsw_sp, + const struct mlxsw_sp_rif *rif) +{ + char rauht_pl[MLXSW_REG_RAUHT_LEN]; + + mlxsw_reg_rauht_pack(rauht_pl, MLXSW_REG_RAUHT_OP_WRITE_DELETE_ALL, + rif->rif_index, rif->addr); + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rauht), rauht_pl); +} + static void mlxsw_sp_neigh_rif_gone_sync(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_rif *rif) { struct mlxsw_sp_neigh_entry *neigh_entry, *tmp; + mlxsw_sp_neigh_rif_flush(mlxsw_sp, rif); list_for_each_entry_safe(neigh_entry, tmp, &rif->neigh_list, - rif_list_node) { - mlxsw_sp_neigh_entry_update(mlxsw_sp, neigh_entry, false); + rif_list_node) mlxsw_sp_neigh_entry_destroy(mlxsw_sp, neigh_entry); - } } enum mlxsw_sp_nexthop_type { -- cgit v1.2.3 From 887c3820a3801a117a494aeca147ec52f95e1566 Mon Sep 17 00:00:00 2001 From: Salil Mehta Date: Mon, 13 Nov 2017 11:39:38 +0000 Subject: net: hns3: Updates MSI/MSI-X alloc/free APIs(depricated) to new APIs This patch migrates the HNS3 driver code from use of depricated PCI MSI/MSI-X interrupt vector allocation/free APIs to new common APIs. Signed-off-by: Salil Mehta Suggested-by: Christoph Hellwig Signed-off-by: David S. Miller --- .../ethernet/hisilicon/hns3/hns3pf/hclge_main.c | 107 +++++++-------------- .../ethernet/hisilicon/hns3/hns3pf/hclge_main.h | 17 ++-- 2 files changed, 43 insertions(+), 81 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c index 781d5a8cbb6a..59ed806a52c3 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c @@ -891,14 +891,14 @@ static int hclge_query_pf_resource(struct hclge_dev *hdev) hdev->pkt_buf_size = __le16_to_cpu(req->buf_size) << HCLGE_BUF_UNIT_S; if (hnae3_dev_roce_supported(hdev)) { - hdev->num_roce_msix = + hdev->num_roce_msi = hnae_get_field(__le16_to_cpu(req->pf_intr_vector_number), HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S); /* PF should have NIC vectors and Roce vectors, * NIC vectors are queued before Roce vectors. */ - hdev->num_msi = hdev->num_roce_msix + HCLGE_ROCE_VECTOR_OFFSET; + hdev->num_msi = hdev->num_roce_msi + HCLGE_ROCE_VECTOR_OFFSET; } else { hdev->num_msi = hnae_get_field(__le16_to_cpu(req->pf_intr_vector_number), @@ -1950,7 +1950,7 @@ static int hclge_init_roce_base_info(struct hclge_vport *vport) struct hnae3_handle *roce = &vport->roce; struct hnae3_handle *nic = &vport->nic; - roce->rinfo.num_vectors = vport->back->num_roce_msix; + roce->rinfo.num_vectors = vport->back->num_roce_msi; if (vport->back->num_msi_left < vport->roce.rinfo.num_vectors || vport->back->num_msi_left == 0) @@ -1968,67 +1968,47 @@ static int hclge_init_roce_base_info(struct hclge_vport *vport) return 0; } -static int hclge_init_msix(struct hclge_dev *hdev) +static int hclge_init_msi(struct hclge_dev *hdev) { struct pci_dev *pdev = hdev->pdev; - int ret, i; - - hdev->msix_entries = devm_kcalloc(&pdev->dev, hdev->num_msi, - sizeof(struct msix_entry), - GFP_KERNEL); - if (!hdev->msix_entries) - return -ENOMEM; - - hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi, - sizeof(u16), GFP_KERNEL); - if (!hdev->vector_status) - return -ENOMEM; + int vectors; + int i; - for (i = 0; i < hdev->num_msi; i++) { - hdev->msix_entries[i].entry = i; - hdev->vector_status[i] = HCLGE_INVALID_VPORT; + vectors = pci_alloc_irq_vectors(pdev, 1, hdev->num_msi, + PCI_IRQ_MSI | PCI_IRQ_MSIX); + if (vectors < 0) { + dev_err(&pdev->dev, + "failed(%d) to allocate MSI/MSI-X vectors\n", + vectors); + return vectors; } + if (vectors < hdev->num_msi) + dev_warn(&hdev->pdev->dev, + "requested %d MSI/MSI-X, but allocated %d MSI/MSI-X\n", + hdev->num_msi, vectors); - hdev->num_msi_left = hdev->num_msi; - hdev->base_msi_vector = hdev->pdev->irq; + hdev->num_msi = vectors; + hdev->num_msi_left = vectors; + hdev->base_msi_vector = pdev->irq; hdev->roce_base_vector = hdev->base_msi_vector + HCLGE_ROCE_VECTOR_OFFSET; - ret = pci_enable_msix_range(hdev->pdev, hdev->msix_entries, - hdev->num_msi, hdev->num_msi); - if (ret < 0) { - dev_info(&hdev->pdev->dev, - "MSI-X vector alloc failed: %d\n", ret); - return ret; - } - - return 0; -} - -static int hclge_init_msi(struct hclge_dev *hdev) -{ - struct pci_dev *pdev = hdev->pdev; - int vectors; - int i; - hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi, sizeof(u16), GFP_KERNEL); - if (!hdev->vector_status) + if (!hdev->vector_status) { + pci_free_irq_vectors(pdev); return -ENOMEM; + } for (i = 0; i < hdev->num_msi; i++) hdev->vector_status[i] = HCLGE_INVALID_VPORT; - vectors = pci_alloc_irq_vectors(pdev, 1, hdev->num_msi, PCI_IRQ_MSI); - if (vectors < 0) { - dev_err(&pdev->dev, "MSI vectors enable failed %d\n", vectors); - return -EINVAL; + hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi, + sizeof(int), GFP_KERNEL); + if (!hdev->vector_irq) { + pci_free_irq_vectors(pdev); + return -ENOMEM; } - hdev->num_msi = vectors; - hdev->num_msi_left = vectors; - hdev->base_msi_vector = pdev->irq; - hdev->roce_base_vector = hdev->base_msi_vector + - HCLGE_ROCE_VECTOR_OFFSET; return 0; } @@ -2704,6 +2684,7 @@ static int hclge_get_vector(struct hnae3_handle *handle, u16 vector_num, vport->vport_id * HCLGE_VECTOR_VF_OFFSET; hdev->vector_status[i] = vport->vport_id; + hdev->vector_irq[i] = vector->vector; vector++; alloc++; @@ -2722,15 +2703,10 @@ static int hclge_get_vector_index(struct hclge_dev *hdev, int vector) { int i; - for (i = 0; i < hdev->num_msi; i++) { - if (hdev->msix_entries) { - if (vector == hdev->msix_entries[i].vector) - return i; - } else { - if (vector == (hdev->base_msi_vector + i)) - return i; - } - } + for (i = 0; i < hdev->num_msi; i++) + if (vector == hdev->vector_irq[i]) + return i; + return -EINVAL; } @@ -4664,14 +4640,7 @@ static void hclge_pci_uninit(struct hclge_dev *hdev) { struct pci_dev *pdev = hdev->pdev; - if (hdev->flag & HCLGE_FLAG_USE_MSIX) { - pci_disable_msix(pdev); - devm_kfree(&pdev->dev, hdev->msix_entries); - hdev->msix_entries = NULL; - } else { - pci_disable_msi(pdev); - } - + pci_free_irq_vectors(pdev); pci_clear_master(pdev); pci_release_mem_regions(pdev); pci_disable_device(pdev); @@ -4689,7 +4658,6 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev) goto err_hclge_dev; } - hdev->flag |= HCLGE_FLAG_USE_MSIX; hdev->pdev = pdev; hdev->ae_dev = ae_dev; hdev->reset_type = HNAE3_NONE_RESET; @@ -4726,12 +4694,9 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev) return ret; } - if (hdev->flag & HCLGE_FLAG_USE_MSIX) - ret = hclge_init_msix(hdev); - else - ret = hclge_init_msi(hdev); + ret = hclge_init_msi(hdev); if (ret) { - dev_err(&pdev->dev, "Init msix/msi error, ret = %d.\n", ret); + dev_err(&pdev->dev, "Init MSI/MSI-X error, ret = %d.\n", ret); return ret; } diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h index 742e6ee9efaf..7027814ea5d7 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h @@ -425,9 +425,6 @@ struct hclge_dev { u16 num_tqps; /* Num task queue pairs of this PF */ u16 num_req_vfs; /* Num VFs requested for this PF */ - u16 num_roce_msix; /* Num of roce vectors for this PF */ - int roce_base_vector; - /* Base task tqp physical id of this PF */ u16 base_tqp_pid; u16 alloc_rss_size; /* Allocated RSS task queue */ @@ -457,8 +454,10 @@ struct hclge_dev { u16 num_msi_left; u16 num_msi_used; u32 base_msi_vector; - struct msix_entry *msix_entries; u16 *vector_status; + int *vector_irq; + u16 num_roce_msi; /* Num of roce vectors for this PF */ + int roce_base_vector; u16 pending_udp_bitmap; @@ -482,12 +481,10 @@ struct hclge_dev { struct hnae3_client *nic_client; struct hnae3_client *roce_client; -#define HCLGE_FLAG_USE_MSI 0x00000001 -#define HCLGE_FLAG_USE_MSIX 0x00000002 -#define HCLGE_FLAG_MAIN 0x00000004 -#define HCLGE_FLAG_DCB_CAPABLE 0x00000008 -#define HCLGE_FLAG_DCB_ENABLE 0x00000010 -#define HCLGE_FLAG_MQPRIO_ENABLE 0x00000020 +#define HCLGE_FLAG_MAIN BIT(0) +#define HCLGE_FLAG_DCB_CAPABLE BIT(1) +#define HCLGE_FLAG_DCB_ENABLE BIT(2) +#define HCLGE_FLAG_MQPRIO_ENABLE BIT(3) u32 flag; u32 pkt_buf_size; /* Total pf buf size for tx/rx */ -- cgit v1.2.3 From 0d63785c6b94b5d2f095f90755825f90eea791f5 Mon Sep 17 00:00:00 2001 From: Simon Guinot Date: Mon, 13 Nov 2017 16:27:02 +0100 Subject: net: mvneta: fix handling of the Tx descriptor counter The mvneta controller provides a 8-bit register to update the pending Tx descriptor counter. Then, a maximum of 255 Tx descriptors can be added at once. In the current code the mvneta_txq_pend_desc_add function assumes the caller takes care of this limit. But it is not the case. In some situations (xmit_more flag), more than 255 descriptors are added. When this happens, the Tx descriptor counter register is updated with a wrong value, which breaks the whole Tx queue management. This patch fixes the issue by allowing the mvneta_txq_pend_desc_add function to process more than 255 Tx descriptors. Fixes: 2a90f7e1d5d0 ("net: mvneta: add xmit_more support") Cc: stable@vger.kernel.org # 4.11+ Signed-off-by: Simon Guinot Signed-off-by: David S. Miller --- drivers/net/ethernet/marvell/mvneta.c | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c index 64a04975bcf8..bc93b69cfd1e 100644 --- a/drivers/net/ethernet/marvell/mvneta.c +++ b/drivers/net/ethernet/marvell/mvneta.c @@ -816,11 +816,14 @@ static void mvneta_txq_pend_desc_add(struct mvneta_port *pp, { u32 val; - /* Only 255 descriptors can be added at once ; Assume caller - * process TX desriptors in quanta less than 256 - */ - val = pend_desc + txq->pending; - mvreg_write(pp, MVNETA_TXQ_UPDATE_REG(txq->id), val); + pend_desc += txq->pending; + + /* Only 255 Tx descriptors can be added at once */ + do { + val = min(pend_desc, 255); + mvreg_write(pp, MVNETA_TXQ_UPDATE_REG(txq->id), val); + pend_desc -= val; + } while (pend_desc > 0); txq->pending = 0; } -- cgit v1.2.3 From 4e6759be28e4e69c397ab58c1e780b0a15d8a6fd Mon Sep 17 00:00:00 2001 From: Desnes Augusto Nunes do Rosario Date: Mon, 13 Nov 2017 15:59:19 -0200 Subject: ibmvnic: Feature implementation of Vital Product Data (VPD) for the ibmvnic driver This patch implements and enables VDP support for the ibmvnic driver. Moreover, it includes the implementation of suitable structs, signal transmission/handling and functions which allows the retrival of firmware information from the ibmvnic card through the ethtool command. Signed-off-by: Desnes A. Nunes do Rosario Signed-off-by: Thomas Falcon Signed-off-by: David S. Miller --- drivers/net/ethernet/ibm/ibmvnic.c | 153 ++++++++++++++++++++++++++++++++++++- drivers/net/ethernet/ibm/ibmvnic.h | 27 ++++++- 2 files changed, 176 insertions(+), 4 deletions(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c index b918bc2f2e4f..04aaacbc3d45 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.c +++ b/drivers/net/ethernet/ibm/ibmvnic.c @@ -574,6 +574,15 @@ static int reset_tx_pools(struct ibmvnic_adapter *adapter) return 0; } +static void release_vpd_data(struct ibmvnic_adapter *adapter) +{ + if (!adapter->vpd) + return; + + kfree(adapter->vpd->buff); + kfree(adapter->vpd); +} + static void release_tx_pools(struct ibmvnic_adapter *adapter) { struct ibmvnic_tx_pool *tx_pool; @@ -754,6 +763,8 @@ static void release_resources(struct ibmvnic_adapter *adapter) { int i; + release_vpd_data(adapter); + release_tx_pools(adapter); release_rx_pools(adapter); @@ -834,6 +845,57 @@ static int set_real_num_queues(struct net_device *netdev) return rc; } +static int ibmvnic_get_vpd(struct ibmvnic_adapter *adapter) +{ + struct device *dev = &adapter->vdev->dev; + union ibmvnic_crq crq; + dma_addr_t dma_addr; + int len = 0; + + if (adapter->vpd->buff) + len = adapter->vpd->len; + + reinit_completion(&adapter->fw_done); + crq.get_vpd_size.first = IBMVNIC_CRQ_CMD; + crq.get_vpd_size.cmd = GET_VPD_SIZE; + ibmvnic_send_crq(adapter, &crq); + wait_for_completion(&adapter->fw_done); + + if (!adapter->vpd->len) + return -ENODATA; + + if (!adapter->vpd->buff) + adapter->vpd->buff = kzalloc(adapter->vpd->len, GFP_KERNEL); + else if (adapter->vpd->len != len) + adapter->vpd->buff = + krealloc(adapter->vpd->buff, + adapter->vpd->len, GFP_KERNEL); + + if (!adapter->vpd->buff) { + dev_err(dev, "Could allocate VPD buffer\n"); + return -ENOMEM; + } + + adapter->vpd->dma_addr = + dma_map_single(dev, adapter->vpd->buff, adapter->vpd->len, + DMA_FROM_DEVICE); + if (dma_mapping_error(dev, dma_addr)) { + dev_err(dev, "Could not map VPD buffer\n"); + kfree(adapter->vpd->buff); + return -ENOMEM; + } + + reinit_completion(&adapter->fw_done); + crq.get_vpd.first = IBMVNIC_CRQ_CMD; + crq.get_vpd.cmd = GET_VPD; + crq.get_vpd.ioba = cpu_to_be32(adapter->vpd->dma_addr); + crq.get_vpd.len = cpu_to_be32((u32)adapter->vpd->len); + ibmvnic_send_crq(adapter, &crq); + wait_for_completion(&adapter->fw_done); + + return 0; +} + static int init_resources(struct ibmvnic_adapter *adapter) { struct net_device *netdev = adapter->netdev; @@ -851,6 +913,10 @@ static int init_resources(struct ibmvnic_adapter *adapter) if (rc) return rc; + adapter->vpd = kzalloc(sizeof(*adapter->vpd), GFP_KERNEL); + if (!adapter->vpd) + return -ENOMEM; + adapter->map_id = 1; adapter->napi = kcalloc(adapter->req_rx_queues, sizeof(struct napi_struct), GFP_KERNEL); @@ -924,7 +990,7 @@ static int __ibmvnic_open(struct net_device *netdev) static int ibmvnic_open(struct net_device *netdev) { struct ibmvnic_adapter *adapter = netdev_priv(netdev); - int rc; + int rc, vpd; mutex_lock(&adapter->reset_lock); @@ -951,6 +1017,12 @@ static int ibmvnic_open(struct net_device *netdev) rc = __ibmvnic_open(netdev); netif_carrier_on(netdev); + + /* Vital Product Data (VPD) */ + vpd = ibmvnic_get_vpd(adapter); + if (vpd) + netdev_err(netdev, "failed to initialize Vital Product Data (VPD)\n"); + mutex_unlock(&adapter->reset_lock); return rc; @@ -1879,11 +1951,15 @@ static int ibmvnic_get_link_ksettings(struct net_device *netdev, return 0; } -static void ibmvnic_get_drvinfo(struct net_device *dev, +static void ibmvnic_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *info) { + struct ibmvnic_adapter *adapter = netdev_priv(netdev); + strlcpy(info->driver, ibmvnic_driver_name, sizeof(info->driver)); strlcpy(info->version, IBMVNIC_DRIVER_VERSION, sizeof(info->version)); + strlcpy(info->fw_version, adapter->fw_version, + sizeof(info->fw_version)); } static u32 ibmvnic_get_msglevel(struct net_device *netdev) @@ -3140,6 +3216,73 @@ static void send_cap_queries(struct ibmvnic_adapter *adapter) ibmvnic_send_crq(adapter, &crq); } +static void handle_vpd_size_rsp(union ibmvnic_crq *crq, + struct ibmvnic_adapter *adapter) +{ + struct device *dev = &adapter->vdev->dev; + + if (crq->get_vpd_size_rsp.rc.code) { + dev_err(dev, "Error retrieving VPD size, rc=%x\n", + crq->get_vpd_size_rsp.rc.code); + complete(&adapter->fw_done); + return; + } + + adapter->vpd->len = be64_to_cpu(crq->get_vpd_size_rsp.len); + complete(&adapter->fw_done); +} + +static void handle_vpd_rsp(union ibmvnic_crq *crq, + struct ibmvnic_adapter *adapter) +{ + struct device *dev = &adapter->vdev->dev; + unsigned char *substr = NULL, *ptr = NULL; + u8 fw_level_len = 0; + + memset(adapter->fw_version, 0, 32); + + dma_unmap_single(dev, adapter->vpd->dma_addr, adapter->vpd->len, + DMA_FROM_DEVICE); + + if (crq->get_vpd_rsp.rc.code) { + dev_err(dev, "Error retrieving VPD from device, rc=%x\n", + crq->get_vpd_rsp.rc.code); + goto complete; + } + + /* get the position of the firmware version info + * located after the ASCII 'RM' substring in the buffer + */ + substr = strnstr(adapter->vpd->buff, "RM", adapter->vpd->len); + if (!substr) { + dev_info(dev, "No FW level provided by VPD\n"); + goto complete; + } + + /* get length of firmware level ASCII substring */ + if ((substr + 2) < (adapter->vpd->buff + adapter->vpd->len)) { + fw_level_len = *(substr + 2); + } else { + dev_info(dev, "Length of FW substr extrapolated VDP buff\n"); + goto complete; + } + + /* copy firmware version string from vpd into adapter */ + if ((substr + 3 + fw_level_len) < + (adapter->vpd->buff + adapter->vpd->len)) { + ptr = strncpy((char *)adapter->fw_version, + substr + 3, fw_level_len); + + if (!ptr) + dev_err(dev, "Failed to isolate FW level string\n"); + } else { + dev_info(dev, "FW substr extrapolated VPD buff\n"); + } + +complete: + complete(&adapter->fw_done); +} + static void handle_query_ip_offload_rsp(struct ibmvnic_adapter *adapter) { struct device *dev = &adapter->vdev->dev; @@ -3871,6 +4014,12 @@ static void ibmvnic_handle_crq(union ibmvnic_crq *crq, netdev_dbg(netdev, "Got Collect firmware trace Response\n"); complete(&adapter->fw_done); break; + case GET_VPD_SIZE_RSP: + handle_vpd_size_rsp(crq, adapter); + break; + case GET_VPD_RSP: + handle_vpd_rsp(crq, adapter); + break; default: netdev_err(netdev, "Got an invalid cmd type 0x%02x\n", gen_crq->cmd); diff --git a/drivers/net/ethernet/ibm/ibmvnic.h b/drivers/net/ethernet/ibm/ibmvnic.h index 8ed829c5b026..4487f1e2c266 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.h +++ b/drivers/net/ethernet/ibm/ibmvnic.h @@ -560,6 +560,12 @@ struct ibmvnic_multicast_ctrl { struct ibmvnic_rc rc; } __packed __aligned(8); +struct ibmvnic_get_vpd_size { + u8 first; + u8 cmd; + u8 reserved[14]; +} __packed __aligned(8); + struct ibmvnic_get_vpd_size_rsp { u8 first; u8 cmd; @@ -577,6 +583,13 @@ struct ibmvnic_get_vpd { u8 reserved[4]; } __packed __aligned(8); +struct ibmvnic_get_vpd_rsp { + u8 first; + u8 cmd; + u8 reserved[10]; + struct ibmvnic_rc rc; +} __packed __aligned(8); + struct ibmvnic_acl_change_indication { u8 first; u8 cmd; @@ -702,10 +715,10 @@ union ibmvnic_crq { struct ibmvnic_change_mac_addr change_mac_addr_rsp; struct ibmvnic_multicast_ctrl multicast_ctrl; struct ibmvnic_multicast_ctrl multicast_ctrl_rsp; - struct ibmvnic_generic_crq get_vpd_size; + struct ibmvnic_get_vpd_size get_vpd_size; struct ibmvnic_get_vpd_size_rsp get_vpd_size_rsp; struct ibmvnic_get_vpd get_vpd; - struct ibmvnic_generic_crq get_vpd_rsp; + struct ibmvnic_get_vpd_rsp get_vpd_rsp; struct ibmvnic_acl_change_indication acl_change_indication; struct ibmvnic_acl_query acl_query; struct ibmvnic_generic_crq acl_query_rsp; @@ -939,6 +952,12 @@ struct ibmvnic_error_buff { __be32 error_id; }; +struct ibmvnic_vpd { + unsigned char *buff; + dma_addr_t dma_addr; + u64 len; +}; + enum vnic_state {VNIC_PROBING = 1, VNIC_PROBED, VNIC_OPENING, @@ -980,6 +999,10 @@ struct ibmvnic_adapter { dma_addr_t ip_offload_ctrl_tok; u32 msg_enable; + /* Vital Product Data (VPD) */ + struct ibmvnic_vpd *vpd; + char fw_version[32]; + /* Statistics */ struct ibmvnic_statistics stats; dma_addr_t stats_token; -- cgit v1.2.3 From 228aa0121c8897d97fd0864d4ee4851545f7ebac Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Tue, 14 Nov 2017 09:14:17 +0300 Subject: liquidio: Missing error code in liquidio_init_nic_module() We accidentally return success if lio_vf_rep_modinit() fails instead of propogating the error code. Fixes: e20f469660ad ("liquidio: synchronize VF representor names with NIC firmware") Signed-off-by: Dan Carpenter Signed-off-by: David S. Miller --- drivers/net/ethernet/cavium/liquidio/lio_main.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c index f05045a69dcc..6aa0eee88ea5 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_main.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c @@ -4038,7 +4038,8 @@ static int liquidio_init_nic_module(struct octeon_device *oct) */ if (!oct->octeon_id && oct->fw_info.app_cap_flags & LIQUIDIO_SWITCHDEV_CAP) { - if (lio_vf_rep_modinit()) { + retval = lio_vf_rep_modinit(); + if (retval) { liquidio_stop_nic_module(oct); goto octnet_init_failure; } -- cgit v1.2.3 From 4497478c60c04d2bf37082e27fc98f4f835db96b Mon Sep 17 00:00:00 2001 From: Niklas Cassel Date: Tue, 14 Nov 2017 11:15:54 +0100 Subject: net: stmmac: fix LPI transitioning for dwmac4 The LPI transitioning logic in stmmac_main uses priv->tx_path_in_lpi_mode to enter/exit LPI. However, priv->tx_path_in_lpi_mode is assigned using the return value from host_irq_status(). So for dwmac4, priv->tx_path_in_lpi_mode was always false, so stmmac_tx_clean() would always try to put us in eee mode, and stmmac_xmit() would never take us out of eee mode. To fix this, make host_irq_status() read and return the LPI irq status also for dwmac4. This also increments the existing LPI counters, so that ethtool --statistics shows LPI transitions also for dwmac4. For dwmac1000, irqs are enabled/disabled using the register named "Interrupt Mask Register", and thus setting a bit disables that specific irq. For dwmac4 the matching register is named "MAC_Interrupt_Enable", and thus setting a bit enables that specific irq. Looking at dwmac1000_core.c, the irqs that are always enabled are: LPI and PMT. Looking at dwmac4_core.c, the irqs that are always enabled are: PMT. To be able to read the LPI irq status, we need to enable the LPI irq also for dwmac4. Signed-off-by: Niklas Cassel Signed-off-by: David S. Miller --- drivers/net/ethernet/stmicro/stmmac/dwmac4.h | 7 ++++++- drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c | 19 +++++++++++++++++++ 2 files changed, 25 insertions(+), 1 deletion(-) (limited to 'drivers/net/ethernet') diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4.h b/drivers/net/ethernet/stmicro/stmmac/dwmac4.h index aeda3ab2d761..789dad8a07b5 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4.h +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4.h @@ -98,7 +98,7 @@ #define GMAC_PCS_IRQ_DEFAULT (GMAC_INT_RGSMIIS | GMAC_INT_PCS_LINK | \ GMAC_INT_PCS_ANE) -#define GMAC_INT_DEFAULT_MASK GMAC_INT_PMT_EN +#define GMAC_INT_DEFAULT_MASK (GMAC_INT_PMT_EN | GMAC_INT_LPI_EN) enum dwmac4_irq_status { time_stamp_irq = 0x00001000, @@ -106,6 +106,7 @@ enum dwmac4_irq_status { mmc_tx_irq = 0x00000400, mmc_rx_irq = 0x00000200, mmc_irq = 0x00000100, + lpi_irq = 0x00000020, pmt_irq = 0x00000010, }; @@ -132,6 +133,10 @@ enum power_event { #define GMAC4_LPI_CTRL_STATUS_LPITXA BIT(19) /* Enable LPI TX Automate */ #define GMAC4_LPI_CTRL_STATUS_PLS BIT(17) /* PHY Link Status */ #define GMAC4_LPI_CTRL_STATUS_LPIEN BIT(16) /* LPI Enable */ +#define GMAC4_LPI_CTRL_STATUS_RLPIEX BIT(3) /* Receive LPI Exit */ +#define GMAC4_LPI_CTRL_STATUS_RLPIEN BIT(2) /* Receive LPI Entry */ +#define GMAC4_LPI_CTRL_STATUS_TLPIEX BIT(1) /* Transmit LPI Exit */ +#define GMAC4_LPI_CTRL_STATUS_TLPIEN BIT(0) /* Transmit LPI Entry */ /* MAC Debug bitmap */ #define GMAC_DEBUG_TFCSTS_MASK GENMASK(18, 17) diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c index 2f7d7ec59962..f3ed8f7853eb 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c @@ -580,6 +580,25 @@ static int dwmac4_irq_status(struct mac_device_info *hw, x->irq_receive_pmt_irq_n++; } + /* MAC tx/rx EEE LPI entry/exit interrupts */ + if (intr_status & lpi_irq) { + /* Clear LPI interrupt by reading MAC_LPI_Control_Status */ + u32 status = readl(ioaddr + GMAC4_LPI_CTRL_STATUS); + + if (status & GMAC4_LPI_CTRL_STATUS_TLPIEN) { + ret |= CORE_IRQ_TX_PATH_IN_LPI_MODE; + x->irq_tx_path_in_lpi_mode_n++; + } + if (status & GMAC4_LPI_CTRL_STATUS_TLPIEX) { + ret |= CORE_IRQ_TX_PATH_EXIT_LPI_MODE; + x->irq_tx_path_exit_lpi_mode_n++; + } + if (status & GMAC4_LPI_CTRL_STATUS_RLPIEN) + x->irq_rx_path_in_lpi_mode_n++; + if (status & GMAC4_LPI_CTRL_STATUS_RLPIEX) + x->irq_rx_path_exit_lpi_mode_n++; + } + dwmac_pcs_isr(ioaddr, GMAC_PCS_BASE, intr_status, x); if (intr_status & PCS_RGSMIIIS_IRQ) dwmac4_phystatus(ioaddr, x); -- cgit v1.2.3