From 424eb834a9be49273c4b32d0d6395dfdbe768a1a Mon Sep 17 00:00:00 2001 From: Salil Mehta Date: Thu, 14 Dec 2017 18:03:06 +0000 Subject: net: hns3: Unified HNS3 {VF|PF} Ethernet Driver for hip08 SoC Most of the NAPI handling interface, skb buffer management, management of the RX/TX descriptors, ethool interface etc. has quite a bit of code which is common to VF and PF driver. This patch makes the exisitng PF's HNS3 ENET driver as the common ENET driver for both Virtual & Physical Function. This will help in reduction of redundancy and better management of code. Signed-off-by: Salil Mehta Signed-off-by: lipeng Signed-off-by: David S. Miller --- drivers/net/ethernet/hisilicon/hns3/Makefile | 5 + drivers/net/ethernet/hisilicon/hns3/hnae3.c | 14 +- drivers/net/ethernet/hisilicon/hns3/hnae3.h | 7 +- drivers/net/ethernet/hisilicon/hns3/hns3_dcbnl.c | 100 + drivers/net/ethernet/hisilicon/hns3/hns3_enet.c | 3216 ++++++++++++++++++++ drivers/net/ethernet/hisilicon/hns3/hns3_enet.h | 613 ++++ drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c | 896 ++++++ .../net/ethernet/hisilicon/hns3/hns3pf/Makefile | 5 - .../ethernet/hisilicon/hns3/hns3pf/hns3_dcbnl.c | 100 - .../net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c | 3214 ------------------- .../net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.h | 613 ---- .../ethernet/hisilicon/hns3/hns3pf/hns3_ethtool.c | 876 ------ .../ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c | 1 + 13 files changed, 4847 insertions(+), 4813 deletions(-) create mode 100644 drivers/net/ethernet/hisilicon/hns3/hns3_dcbnl.c create mode 100644 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c create mode 100644 drivers/net/ethernet/hisilicon/hns3/hns3_enet.h create mode 100644 drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c delete mode 100644 drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_dcbnl.c delete mode 100644 drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c delete mode 100644 drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.h delete mode 100644 drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_ethtool.c (limited to 'drivers/net/ethernet/hisilicon') diff --git a/drivers/net/ethernet/hisilicon/hns3/Makefile b/drivers/net/ethernet/hisilicon/hns3/Makefile index c45094513c87..002534f12b66 100644 --- a/drivers/net/ethernet/hisilicon/hns3/Makefile +++ b/drivers/net/ethernet/hisilicon/hns3/Makefile @@ -7,3 +7,8 @@ obj-$(CONFIG_HNS3) += hns3pf/ obj-$(CONFIG_HNS3) += hns3vf/ obj-$(CONFIG_HNS3) += hnae3.o + +obj-$(CONFIG_HNS3_ENET) += hns3.o +hns3-objs = hns3_enet.o hns3_ethtool.o + +hns3-$(CONFIG_HNS3_DCB) += hns3_dcbnl.o diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.c b/drivers/net/ethernet/hisilicon/hns3/hnae3.c index 5bcb2238acb2..02145f2de820 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hnae3.c +++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.c @@ -196,9 +196,18 @@ int hnae3_register_ae_dev(struct hnae3_ae_dev *ae_dev) const struct pci_device_id *id; struct hnae3_ae_algo *ae_algo; struct hnae3_client *client; - int ret = 0; + int ret = 0, lock_acquired; + + /* we can get deadlocked if SRIOV is being enabled in context to probe + * and probe gets called again in same context. This can happen when + * pci_enable_sriov() is called to create VFs from PF probes context. + * Therefore, for simplicity uniformly defering further probing in all + * cases where we detect contention. + */ + lock_acquired = mutex_trylock(&hnae3_common_lock); + if (!lock_acquired) + return -EPROBE_DEFER; - mutex_lock(&hnae3_common_lock); list_add_tail(&ae_dev->node, &hnae3_ae_dev_list); /* Check if there are matched ae_algo */ @@ -211,6 +220,7 @@ int hnae3_register_ae_dev(struct hnae3_ae_dev *ae_dev) if (!ae_dev->ops) { dev_err(&ae_dev->pdev->dev, "ae_dev ops are null\n"); + ret = -EOPNOTSUPP; goto out_err; } diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.h b/drivers/net/ethernet/hisilicon/hns3/hnae3.h index 67c59e1039f2..a9e2b32834c5 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hnae3.h +++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h @@ -452,9 +452,10 @@ struct hnae3_unic_private_info { struct hnae3_queue **tqp; /* array base of all TQPs of this instance */ }; -#define HNAE3_SUPPORT_MAC_LOOPBACK 1 -#define HNAE3_SUPPORT_PHY_LOOPBACK 2 -#define HNAE3_SUPPORT_SERDES_LOOPBACK 4 +#define HNAE3_SUPPORT_MAC_LOOPBACK BIT(0) +#define HNAE3_SUPPORT_PHY_LOOPBACK BIT(1) +#define HNAE3_SUPPORT_SERDES_LOOPBACK BIT(2) +#define HNAE3_SUPPORT_VF BIT(3) struct hnae3_handle { struct hnae3_client *client; diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_dcbnl.c b/drivers/net/ethernet/hisilicon/hns3/hns3_dcbnl.c new file mode 100644 index 000000000000..eb82700da7d0 --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_dcbnl.c @@ -0,0 +1,100 @@ +/* + * Copyright (c) 2016-2017 Hisilicon Limited. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#include "hnae3.h" +#include "hns3_enet.h" + +static +int hns3_dcbnl_ieee_getets(struct net_device *ndev, struct ieee_ets *ets) +{ + struct hnae3_handle *h = hns3_get_handle(ndev); + + if (h->kinfo.dcb_ops->ieee_getets) + return h->kinfo.dcb_ops->ieee_getets(h, ets); + + return -EOPNOTSUPP; +} + +static +int hns3_dcbnl_ieee_setets(struct net_device *ndev, struct ieee_ets *ets) +{ + struct hnae3_handle *h = hns3_get_handle(ndev); + + if (h->kinfo.dcb_ops->ieee_setets) + return h->kinfo.dcb_ops->ieee_setets(h, ets); + + return -EOPNOTSUPP; +} + +static +int hns3_dcbnl_ieee_getpfc(struct net_device *ndev, struct ieee_pfc *pfc) +{ + struct hnae3_handle *h = hns3_get_handle(ndev); + + if (h->kinfo.dcb_ops->ieee_getpfc) + return h->kinfo.dcb_ops->ieee_getpfc(h, pfc); + + return -EOPNOTSUPP; +} + +static +int hns3_dcbnl_ieee_setpfc(struct net_device *ndev, struct ieee_pfc *pfc) +{ + struct hnae3_handle *h = hns3_get_handle(ndev); + + if (h->kinfo.dcb_ops->ieee_setpfc) + return h->kinfo.dcb_ops->ieee_setpfc(h, pfc); + + return -EOPNOTSUPP; +} + +/* DCBX configuration */ +static u8 hns3_dcbnl_getdcbx(struct net_device *ndev) +{ + struct hnae3_handle *h = hns3_get_handle(ndev); + + if (h->kinfo.dcb_ops->getdcbx) + return h->kinfo.dcb_ops->getdcbx(h); + + return 0; +} + +/* return 0 if successful, otherwise fail */ +static u8 hns3_dcbnl_setdcbx(struct net_device *ndev, u8 mode) +{ + struct hnae3_handle *h = hns3_get_handle(ndev); + + if (h->kinfo.dcb_ops->setdcbx) + return h->kinfo.dcb_ops->setdcbx(h, mode); + + return 1; +} + +static const struct dcbnl_rtnl_ops hns3_dcbnl_ops = { + .ieee_getets = hns3_dcbnl_ieee_getets, + .ieee_setets = hns3_dcbnl_ieee_setets, + .ieee_getpfc = hns3_dcbnl_ieee_getpfc, + .ieee_setpfc = hns3_dcbnl_ieee_setpfc, + .getdcbx = hns3_dcbnl_getdcbx, + .setdcbx = hns3_dcbnl_setdcbx, +}; + +/* hclge_dcbnl_setup - DCBNL setup + * @handle: the corresponding vport handle + * Set up DCBNL + */ +void hns3_dcbnl_setup(struct hnae3_handle *handle) +{ + struct net_device *dev = handle->kinfo.netdev; + + if ((!handle->kinfo.dcb_ops) || (handle->flags & HNAE3_SUPPORT_VF)) + return; + + dev->dcbnl_ops = &hns3_dcbnl_ops; +} diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c new file mode 100644 index 000000000000..c2c13238db52 --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c @@ -0,0 +1,3216 @@ +/* + * Copyright (c) 2016~2017 Hisilicon Limited. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "hnae3.h" +#include "hns3_enet.h" + +static const char hns3_driver_name[] = "hns3"; +const char hns3_driver_version[] = VERMAGIC_STRING; +static const char hns3_driver_string[] = + "Hisilicon Ethernet Network Driver for Hip08 Family"; +static const char hns3_copyright[] = "Copyright (c) 2017 Huawei Corporation."; +static struct hnae3_client client; + +/* hns3_pci_tbl - PCI Device ID Table + * + * Last entry must be all 0s + * + * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, + * Class, Class Mask, private data (not used) } + */ +static const struct pci_device_id hns3_pci_tbl[] = { + {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0}, + {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0}, + {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), + HNAE3_DEV_SUPPORT_ROCE_DCB_BITS}, + {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), + HNAE3_DEV_SUPPORT_ROCE_DCB_BITS}, + {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), + HNAE3_DEV_SUPPORT_ROCE_DCB_BITS}, + {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), + HNAE3_DEV_SUPPORT_ROCE_DCB_BITS}, + {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), + HNAE3_DEV_SUPPORT_ROCE_DCB_BITS}, + {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_VF), 0}, + {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_DCB_PFC_VF), 0}, + /* required last entry */ + {0, } +}; +MODULE_DEVICE_TABLE(pci, hns3_pci_tbl); + +static irqreturn_t hns3_irq_handle(int irq, void *dev) +{ + struct hns3_enet_tqp_vector *tqp_vector = dev; + + napi_schedule(&tqp_vector->napi); + + return IRQ_HANDLED; +} + +static void hns3_nic_uninit_irq(struct hns3_nic_priv *priv) +{ + struct hns3_enet_tqp_vector *tqp_vectors; + unsigned int i; + + for (i = 0; i < priv->vector_num; i++) { + tqp_vectors = &priv->tqp_vector[i]; + + if (tqp_vectors->irq_init_flag != HNS3_VECTOR_INITED) + continue; + + /* release the irq resource */ + free_irq(tqp_vectors->vector_irq, tqp_vectors); + tqp_vectors->irq_init_flag = HNS3_VECTOR_NOT_INITED; + } +} + +static int hns3_nic_init_irq(struct hns3_nic_priv *priv) +{ + struct hns3_enet_tqp_vector *tqp_vectors; + int txrx_int_idx = 0; + int rx_int_idx = 0; + int tx_int_idx = 0; + unsigned int i; + int ret; + + for (i = 0; i < priv->vector_num; i++) { + tqp_vectors = &priv->tqp_vector[i]; + + if (tqp_vectors->irq_init_flag == HNS3_VECTOR_INITED) + continue; + + if (tqp_vectors->tx_group.ring && tqp_vectors->rx_group.ring) { + snprintf(tqp_vectors->name, HNAE3_INT_NAME_LEN - 1, + "%s-%s-%d", priv->netdev->name, "TxRx", + txrx_int_idx++); + txrx_int_idx++; + } else if (tqp_vectors->rx_group.ring) { + snprintf(tqp_vectors->name, HNAE3_INT_NAME_LEN - 1, + "%s-%s-%d", priv->netdev->name, "Rx", + rx_int_idx++); + } else if (tqp_vectors->tx_group.ring) { + snprintf(tqp_vectors->name, HNAE3_INT_NAME_LEN - 1, + "%s-%s-%d", priv->netdev->name, "Tx", + tx_int_idx++); + } else { + /* Skip this unused q_vector */ + continue; + } + + tqp_vectors->name[HNAE3_INT_NAME_LEN - 1] = '\0'; + + ret = request_irq(tqp_vectors->vector_irq, hns3_irq_handle, 0, + tqp_vectors->name, + tqp_vectors); + if (ret) { + netdev_err(priv->netdev, "request irq(%d) fail\n", + tqp_vectors->vector_irq); + return ret; + } + + tqp_vectors->irq_init_flag = HNS3_VECTOR_INITED; + } + + return 0; +} + +static void hns3_mask_vector_irq(struct hns3_enet_tqp_vector *tqp_vector, + u32 mask_en) +{ + writel(mask_en, tqp_vector->mask_addr); +} + +static void hns3_vector_enable(struct hns3_enet_tqp_vector *tqp_vector) +{ + napi_enable(&tqp_vector->napi); + + /* enable vector */ + hns3_mask_vector_irq(tqp_vector, 1); +} + +static void hns3_vector_disable(struct hns3_enet_tqp_vector *tqp_vector) +{ + /* disable vector */ + hns3_mask_vector_irq(tqp_vector, 0); + + disable_irq(tqp_vector->vector_irq); + napi_disable(&tqp_vector->napi); +} + +static void hns3_set_vector_coalesc_gl(struct hns3_enet_tqp_vector *tqp_vector, + u32 gl_value) +{ + /* this defines the configuration for GL (Interrupt Gap Limiter) + * GL defines inter interrupt gap. + * GL and RL(Rate Limiter) are 2 ways to acheive interrupt coalescing + */ + writel(gl_value, tqp_vector->mask_addr + HNS3_VECTOR_GL0_OFFSET); + writel(gl_value, tqp_vector->mask_addr + HNS3_VECTOR_GL1_OFFSET); + writel(gl_value, tqp_vector->mask_addr + HNS3_VECTOR_GL2_OFFSET); +} + +static void hns3_set_vector_coalesc_rl(struct hns3_enet_tqp_vector *tqp_vector, + u32 rl_value) +{ + /* this defines the configuration for RL (Interrupt Rate Limiter). + * Rl defines rate of interrupts i.e. number of interrupts-per-second + * GL and RL(Rate Limiter) are 2 ways to acheive interrupt coalescing + */ + writel(rl_value, tqp_vector->mask_addr + HNS3_VECTOR_RL_OFFSET); +} + +static void hns3_vector_gl_rl_init(struct hns3_enet_tqp_vector *tqp_vector) +{ + /* initialize the configuration for interrupt coalescing. + * 1. GL (Interrupt Gap Limiter) + * 2. RL (Interrupt Rate Limiter) + */ + + /* Default :enable interrupt coalesce */ + tqp_vector->rx_group.int_gl = HNS3_INT_GL_50K; + tqp_vector->tx_group.int_gl = HNS3_INT_GL_50K; + hns3_set_vector_coalesc_gl(tqp_vector, HNS3_INT_GL_50K); + /* for now we are disabling Interrupt RL - we + * will re-enable later + */ + hns3_set_vector_coalesc_rl(tqp_vector, 0); + tqp_vector->rx_group.flow_level = HNS3_FLOW_LOW; + tqp_vector->tx_group.flow_level = HNS3_FLOW_LOW; +} + +static int hns3_nic_set_real_num_queue(struct net_device *netdev) +{ + struct hnae3_handle *h = hns3_get_handle(netdev); + struct hnae3_knic_private_info *kinfo = &h->kinfo; + unsigned int queue_size = kinfo->rss_size * kinfo->num_tc; + int ret; + + ret = netif_set_real_num_tx_queues(netdev, queue_size); + if (ret) { + netdev_err(netdev, + "netif_set_real_num_tx_queues fail, ret=%d!\n", + ret); + return ret; + } + + ret = netif_set_real_num_rx_queues(netdev, queue_size); + if (ret) { + netdev_err(netdev, + "netif_set_real_num_rx_queues fail, ret=%d!\n", ret); + return ret; + } + + return 0; +} + +static int hns3_nic_net_up(struct net_device *netdev) +{ + struct hns3_nic_priv *priv = netdev_priv(netdev); + struct hnae3_handle *h = priv->ae_handle; + int i, j; + int ret; + + /* get irq resource for all vectors */ + ret = hns3_nic_init_irq(priv); + if (ret) { + netdev_err(netdev, "hns init irq failed! ret=%d\n", ret); + return ret; + } + + /* enable the vectors */ + for (i = 0; i < priv->vector_num; i++) + hns3_vector_enable(&priv->tqp_vector[i]); + + /* start the ae_dev */ + ret = h->ae_algo->ops->start ? h->ae_algo->ops->start(h) : 0; + if (ret) + goto out_start_err; + + return 0; + +out_start_err: + for (j = i - 1; j >= 0; j--) + hns3_vector_disable(&priv->tqp_vector[j]); + + hns3_nic_uninit_irq(priv); + + return ret; +} + +static int hns3_nic_net_open(struct net_device *netdev) +{ + struct hns3_nic_priv *priv = netdev_priv(netdev); + int ret; + + netif_carrier_off(netdev); + + ret = hns3_nic_set_real_num_queue(netdev); + if (ret) + return ret; + + ret = hns3_nic_net_up(netdev); + if (ret) { + netdev_err(netdev, + "hns net up fail, ret=%d!\n", ret); + return ret; + } + + priv->last_reset_time = jiffies; + return 0; +} + +static void hns3_nic_net_down(struct net_device *netdev) +{ + struct hns3_nic_priv *priv = netdev_priv(netdev); + const struct hnae3_ae_ops *ops; + int i; + + /* stop ae_dev */ + ops = priv->ae_handle->ae_algo->ops; + if (ops->stop) + ops->stop(priv->ae_handle); + + /* disable vectors */ + for (i = 0; i < priv->vector_num; i++) + hns3_vector_disable(&priv->tqp_vector[i]); + + /* free irq resources */ + hns3_nic_uninit_irq(priv); +} + +static int hns3_nic_net_stop(struct net_device *netdev) +{ + netif_tx_stop_all_queues(netdev); + netif_carrier_off(netdev); + + hns3_nic_net_down(netdev); + + return 0; +} + +static int hns3_nic_uc_sync(struct net_device *netdev, + const unsigned char *addr) +{ + struct hnae3_handle *h = hns3_get_handle(netdev); + + if (h->ae_algo->ops->add_uc_addr) + return h->ae_algo->ops->add_uc_addr(h, addr); + + return 0; +} + +static int hns3_nic_uc_unsync(struct net_device *netdev, + const unsigned char *addr) +{ + struct hnae3_handle *h = hns3_get_handle(netdev); + + if (h->ae_algo->ops->rm_uc_addr) + return h->ae_algo->ops->rm_uc_addr(h, addr); + + return 0; +} + +static int hns3_nic_mc_sync(struct net_device *netdev, + const unsigned char *addr) +{ + struct hnae3_handle *h = hns3_get_handle(netdev); + + if (h->ae_algo->ops->add_mc_addr) + return h->ae_algo->ops->add_mc_addr(h, addr); + + return 0; +} + +static int hns3_nic_mc_unsync(struct net_device *netdev, + const unsigned char *addr) +{ + struct hnae3_handle *h = hns3_get_handle(netdev); + + if (h->ae_algo->ops->rm_mc_addr) + return h->ae_algo->ops->rm_mc_addr(h, addr); + + return 0; +} + +static void hns3_nic_set_rx_mode(struct net_device *netdev) +{ + struct hnae3_handle *h = hns3_get_handle(netdev); + + if (h->ae_algo->ops->set_promisc_mode) { + if (netdev->flags & IFF_PROMISC) + h->ae_algo->ops->set_promisc_mode(h, 1); + else + h->ae_algo->ops->set_promisc_mode(h, 0); + } + if (__dev_uc_sync(netdev, hns3_nic_uc_sync, hns3_nic_uc_unsync)) + netdev_err(netdev, "sync uc address fail\n"); + if (netdev->flags & IFF_MULTICAST) + if (__dev_mc_sync(netdev, hns3_nic_mc_sync, hns3_nic_mc_unsync)) + netdev_err(netdev, "sync mc address fail\n"); +} + +static int hns3_set_tso(struct sk_buff *skb, u32 *paylen, + u16 *mss, u32 *type_cs_vlan_tso) +{ + u32 l4_offset, hdr_len; + union l3_hdr_info l3; + union l4_hdr_info l4; + u32 l4_paylen; + int ret; + + if (!skb_is_gso(skb)) + return 0; + + ret = skb_cow_head(skb, 0); + if (ret) + return ret; + + l3.hdr = skb_network_header(skb); + l4.hdr = skb_transport_header(skb); + + /* Software should clear the IPv4's checksum field when tso is + * needed. + */ + if (l3.v4->version == 4) + l3.v4->check = 0; + + /* tunnel packet.*/ + if (skb_shinfo(skb)->gso_type & (SKB_GSO_GRE | + SKB_GSO_GRE_CSUM | + SKB_GSO_UDP_TUNNEL | + SKB_GSO_UDP_TUNNEL_CSUM)) { + if ((!(skb_shinfo(skb)->gso_type & + SKB_GSO_PARTIAL)) && + (skb_shinfo(skb)->gso_type & + SKB_GSO_UDP_TUNNEL_CSUM)) { + /* Software should clear the udp's checksum + * field when tso is needed. + */ + l4.udp->check = 0; + } + /* reset l3&l4 pointers from outer to inner headers */ + l3.hdr = skb_inner_network_header(skb); + l4.hdr = skb_inner_transport_header(skb); + + /* Software should clear the IPv4's checksum field when + * tso is needed. + */ + if (l3.v4->version == 4) + l3.v4->check = 0; + } + + /* normal or tunnel packet*/ + l4_offset = l4.hdr - skb->data; + hdr_len = (l4.tcp->doff * 4) + l4_offset; + + /* remove payload length from inner pseudo checksum when tso*/ + l4_paylen = skb->len - l4_offset; + csum_replace_by_diff(&l4.tcp->check, + (__force __wsum)htonl(l4_paylen)); + + /* find the txbd field values */ + *paylen = skb->len - hdr_len; + hnae_set_bit(*type_cs_vlan_tso, + HNS3_TXD_TSO_B, 1); + + /* get MSS for TSO */ + *mss = skb_shinfo(skb)->gso_size; + + return 0; +} + +static int hns3_get_l4_protocol(struct sk_buff *skb, u8 *ol4_proto, + u8 *il4_proto) +{ + union { + struct iphdr *v4; + struct ipv6hdr *v6; + unsigned char *hdr; + } l3; + unsigned char *l4_hdr; + unsigned char *exthdr; + u8 l4_proto_tmp; + __be16 frag_off; + + /* find outer header point */ + l3.hdr = skb_network_header(skb); + l4_hdr = skb_inner_transport_header(skb); + + if (skb->protocol == htons(ETH_P_IPV6)) { + exthdr = l3.hdr + sizeof(*l3.v6); + l4_proto_tmp = l3.v6->nexthdr; + if (l4_hdr != exthdr) + ipv6_skip_exthdr(skb, exthdr - skb->data, + &l4_proto_tmp, &frag_off); + } else if (skb->protocol == htons(ETH_P_IP)) { + l4_proto_tmp = l3.v4->protocol; + } else { + return -EINVAL; + } + + *ol4_proto = l4_proto_tmp; + + /* tunnel packet */ + if (!skb->encapsulation) { + *il4_proto = 0; + return 0; + } + + /* find inner header point */ + l3.hdr = skb_inner_network_header(skb); + l4_hdr = skb_inner_transport_header(skb); + + if (l3.v6->version == 6) { + exthdr = l3.hdr + sizeof(*l3.v6); + l4_proto_tmp = l3.v6->nexthdr; + if (l4_hdr != exthdr) + ipv6_skip_exthdr(skb, exthdr - skb->data, + &l4_proto_tmp, &frag_off); + } else if (l3.v4->version == 4) { + l4_proto_tmp = l3.v4->protocol; + } + + *il4_proto = l4_proto_tmp; + + return 0; +} + +static void hns3_set_l2l3l4_len(struct sk_buff *skb, u8 ol4_proto, + u8 il4_proto, u32 *type_cs_vlan_tso, + u32 *ol_type_vlan_len_msec) +{ + union { + struct iphdr *v4; + struct ipv6hdr *v6; + unsigned char *hdr; + } l3; + union { + struct tcphdr *tcp; + struct udphdr *udp; + struct gre_base_hdr *gre; + unsigned char *hdr; + } l4; + unsigned char *l2_hdr; + u8 l4_proto = ol4_proto; + u32 ol2_len; + u32 ol3_len; + u32 ol4_len; + u32 l2_len; + u32 l3_len; + + l3.hdr = skb_network_header(skb); + l4.hdr = skb_transport_header(skb); + + /* compute L2 header size for normal packet, defined in 2 Bytes */ + l2_len = l3.hdr - skb->data; + hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L2LEN_M, + HNS3_TXD_L2LEN_S, l2_len >> 1); + + /* tunnel packet*/ + if (skb->encapsulation) { + /* compute OL2 header size, defined in 2 Bytes */ + ol2_len = l2_len; + hnae_set_field(*ol_type_vlan_len_msec, + HNS3_TXD_L2LEN_M, + HNS3_TXD_L2LEN_S, ol2_len >> 1); + + /* compute OL3 header size, defined in 4 Bytes */ + ol3_len = l4.hdr - l3.hdr; + hnae_set_field(*ol_type_vlan_len_msec, HNS3_TXD_L3LEN_M, + HNS3_TXD_L3LEN_S, ol3_len >> 2); + + /* MAC in UDP, MAC in GRE (0x6558)*/ + if ((ol4_proto == IPPROTO_UDP) || (ol4_proto == IPPROTO_GRE)) { + /* switch MAC header ptr from outer to inner header.*/ + l2_hdr = skb_inner_mac_header(skb); + + /* compute OL4 header size, defined in 4 Bytes. */ + ol4_len = l2_hdr - l4.hdr; + hnae_set_field(*ol_type_vlan_len_msec, HNS3_TXD_L4LEN_M, + HNS3_TXD_L4LEN_S, ol4_len >> 2); + + /* switch IP header ptr from outer to inner header */ + l3.hdr = skb_inner_network_header(skb); + + /* compute inner l2 header size, defined in 2 Bytes. */ + l2_len = l3.hdr - l2_hdr; + hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L2LEN_M, + HNS3_TXD_L2LEN_S, l2_len >> 1); + } else { + /* skb packet types not supported by hardware, + * txbd len fild doesn't be filled. + */ + return; + } + + /* switch L4 header pointer from outer to inner */ + l4.hdr = skb_inner_transport_header(skb); + + l4_proto = il4_proto; + } + + /* compute inner(/normal) L3 header size, defined in 4 Bytes */ + l3_len = l4.hdr - l3.hdr; + hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L3LEN_M, + HNS3_TXD_L3LEN_S, l3_len >> 2); + + /* compute inner(/normal) L4 header size, defined in 4 Bytes */ + switch (l4_proto) { + case IPPROTO_TCP: + hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_M, + HNS3_TXD_L4LEN_S, l4.tcp->doff); + break; + case IPPROTO_SCTP: + hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_M, + HNS3_TXD_L4LEN_S, (sizeof(struct sctphdr) >> 2)); + break; + case IPPROTO_UDP: + hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_M, + HNS3_TXD_L4LEN_S, (sizeof(struct udphdr) >> 2)); + break; + default: + /* skb packet types not supported by hardware, + * txbd len fild doesn't be filled. + */ + return; + } +} + +static int hns3_set_l3l4_type_csum(struct sk_buff *skb, u8 ol4_proto, + u8 il4_proto, u32 *type_cs_vlan_tso, + u32 *ol_type_vlan_len_msec) +{ + union { + struct iphdr *v4; + struct ipv6hdr *v6; + unsigned char *hdr; + } l3; + u32 l4_proto = ol4_proto; + + l3.hdr = skb_network_header(skb); + + /* define OL3 type and tunnel type(OL4).*/ + if (skb->encapsulation) { + /* define outer network header type.*/ + if (skb->protocol == htons(ETH_P_IP)) { + if (skb_is_gso(skb)) + hnae_set_field(*ol_type_vlan_len_msec, + HNS3_TXD_OL3T_M, HNS3_TXD_OL3T_S, + HNS3_OL3T_IPV4_CSUM); + else + hnae_set_field(*ol_type_vlan_len_msec, + HNS3_TXD_OL3T_M, HNS3_TXD_OL3T_S, + HNS3_OL3T_IPV4_NO_CSUM); + + } else if (skb->protocol == htons(ETH_P_IPV6)) { + hnae_set_field(*ol_type_vlan_len_msec, HNS3_TXD_OL3T_M, + HNS3_TXD_OL3T_S, HNS3_OL3T_IPV6); + } + + /* define tunnel type(OL4).*/ + switch (l4_proto) { + case IPPROTO_UDP: + hnae_set_field(*ol_type_vlan_len_msec, + HNS3_TXD_TUNTYPE_M, + HNS3_TXD_TUNTYPE_S, + HNS3_TUN_MAC_IN_UDP); + break; + case IPPROTO_GRE: + hnae_set_field(*ol_type_vlan_len_msec, + HNS3_TXD_TUNTYPE_M, + HNS3_TXD_TUNTYPE_S, + HNS3_TUN_NVGRE); + break; + default: + /* drop the skb tunnel packet if hardware don't support, + * because hardware can't calculate csum when TSO. + */ + if (skb_is_gso(skb)) + return -EDOM; + + /* the stack computes the IP header already, + * driver calculate l4 checksum when not TSO. + */ + skb_checksum_help(skb); + return 0; + } + + l3.hdr = skb_inner_network_header(skb); + l4_proto = il4_proto; + } + + if (l3.v4->version == 4) { + hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L3T_M, + HNS3_TXD_L3T_S, HNS3_L3T_IPV4); + + /* the stack computes the IP header already, the only time we + * need the hardware to recompute it is in the case of TSO. + */ + if (skb_is_gso(skb)) + hnae_set_bit(*type_cs_vlan_tso, HNS3_TXD_L3CS_B, 1); + + hnae_set_bit(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1); + } else if (l3.v6->version == 6) { + hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L3T_M, + HNS3_TXD_L3T_S, HNS3_L3T_IPV6); + hnae_set_bit(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1); + } + + switch (l4_proto) { + case IPPROTO_TCP: + hnae_set_field(*type_cs_vlan_tso, + HNS3_TXD_L4T_M, + HNS3_TXD_L4T_S, + HNS3_L4T_TCP); + break; + case IPPROTO_UDP: + hnae_set_field(*type_cs_vlan_tso, + HNS3_TXD_L4T_M, + HNS3_TXD_L4T_S, + HNS3_L4T_UDP); + break; + case IPPROTO_SCTP: + hnae_set_field(*type_cs_vlan_tso, + HNS3_TXD_L4T_M, + HNS3_TXD_L4T_S, + HNS3_L4T_SCTP); + break; + default: + /* drop the skb tunnel packet if hardware don't support, + * because hardware can't calculate csum when TSO. + */ + if (skb_is_gso(skb)) + return -EDOM; + + /* the stack computes the IP header already, + * driver calculate l4 checksum when not TSO. + */ + skb_checksum_help(skb); + return 0; + } + + return 0; +} + +static void hns3_set_txbd_baseinfo(u16 *bdtp_fe_sc_vld_ra_ri, int frag_end) +{ + /* Config bd buffer end */ + hnae_set_field(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_BDTYPE_M, + HNS3_TXD_BDTYPE_M, 0); + hnae_set_bit(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_FE_B, !!frag_end); + hnae_set_bit(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_VLD_B, 1); + hnae_set_field(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_SC_M, HNS3_TXD_SC_S, 0); +} + +static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv, + int size, dma_addr_t dma, int frag_end, + enum hns_desc_type type) +{ + struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use]; + struct hns3_desc *desc = &ring->desc[ring->next_to_use]; + u32 ol_type_vlan_len_msec = 0; + u16 bdtp_fe_sc_vld_ra_ri = 0; + u32 type_cs_vlan_tso = 0; + struct sk_buff *skb; + u32 paylen = 0; + u16 mss = 0; + __be16 protocol; + u8 ol4_proto; + u8 il4_proto; + int ret; + + /* The txbd's baseinfo of DESC_TYPE_PAGE & DESC_TYPE_SKB */ + desc_cb->priv = priv; + desc_cb->length = size; + desc_cb->dma = dma; + desc_cb->type = type; + + /* now, fill the descriptor */ + desc->addr = cpu_to_le64(dma); + desc->tx.send_size = cpu_to_le16((u16)size); + hns3_set_txbd_baseinfo(&bdtp_fe_sc_vld_ra_ri, frag_end); + desc->tx.bdtp_fe_sc_vld_ra_ri = cpu_to_le16(bdtp_fe_sc_vld_ra_ri); + + if (type == DESC_TYPE_SKB) { + skb = (struct sk_buff *)priv; + paylen = skb->len; + + if (skb->ip_summed == CHECKSUM_PARTIAL) { + skb_reset_mac_len(skb); + protocol = skb->protocol; + + /* vlan packet*/ + if (protocol == htons(ETH_P_8021Q)) { + protocol = vlan_get_protocol(skb); + skb->protocol = protocol; + } + ret = hns3_get_l4_protocol(skb, &ol4_proto, &il4_proto); + if (ret) + return ret; + hns3_set_l2l3l4_len(skb, ol4_proto, il4_proto, + &type_cs_vlan_tso, + &ol_type_vlan_len_msec); + ret = hns3_set_l3l4_type_csum(skb, ol4_proto, il4_proto, + &type_cs_vlan_tso, + &ol_type_vlan_len_msec); + if (ret) + return ret; + + ret = hns3_set_tso(skb, &paylen, &mss, + &type_cs_vlan_tso); + if (ret) + return ret; + } + + /* Set txbd */ + desc->tx.ol_type_vlan_len_msec = + cpu_to_le32(ol_type_vlan_len_msec); + desc->tx.type_cs_vlan_tso_len = + cpu_to_le32(type_cs_vlan_tso); + desc->tx.paylen = cpu_to_le32(paylen); + desc->tx.mss = cpu_to_le16(mss); + } + + /* move ring pointer to next.*/ + ring_ptr_move_fw(ring, next_to_use); + + return 0; +} + +static int hns3_fill_desc_tso(struct hns3_enet_ring *ring, void *priv, + int size, dma_addr_t dma, int frag_end, + enum hns_desc_type type) +{ + unsigned int frag_buf_num; + unsigned int k; + int sizeoflast; + int ret; + + frag_buf_num = (size + HNS3_MAX_BD_SIZE - 1) / HNS3_MAX_BD_SIZE; + sizeoflast = size % HNS3_MAX_BD_SIZE; + sizeoflast = sizeoflast ? sizeoflast : HNS3_MAX_BD_SIZE; + + /* When the frag size is bigger than hardware, split this frag */ + for (k = 0; k < frag_buf_num; k++) { + ret = hns3_fill_desc(ring, priv, + (k == frag_buf_num - 1) ? + sizeoflast : HNS3_MAX_BD_SIZE, + dma + HNS3_MAX_BD_SIZE * k, + frag_end && (k == frag_buf_num - 1) ? 1 : 0, + (type == DESC_TYPE_SKB && !k) ? + DESC_TYPE_SKB : DESC_TYPE_PAGE); + if (ret) + return ret; + } + + return 0; +} + +static int hns3_nic_maybe_stop_tso(struct sk_buff **out_skb, int *bnum, + struct hns3_enet_ring *ring) +{ + struct sk_buff *skb = *out_skb; + struct skb_frag_struct *frag; + int bdnum_for_frag; + int frag_num; + int buf_num; + int size; + int i; + + size = skb_headlen(skb); + buf_num = (size + HNS3_MAX_BD_SIZE - 1) / HNS3_MAX_BD_SIZE; + + frag_num = skb_shinfo(skb)->nr_frags; + for (i = 0; i < frag_num; i++) { + frag = &skb_shinfo(skb)->frags[i]; + size = skb_frag_size(frag); + bdnum_for_frag = + (size + HNS3_MAX_BD_SIZE - 1) / HNS3_MAX_BD_SIZE; + if (bdnum_for_frag > HNS3_MAX_BD_PER_FRAG) + return -ENOMEM; + + buf_num += bdnum_for_frag; + } + + if (buf_num > ring_space(ring)) + return -EBUSY; + + *bnum = buf_num; + return 0; +} + +static int hns3_nic_maybe_stop_tx(struct sk_buff **out_skb, int *bnum, + struct hns3_enet_ring *ring) +{ + struct sk_buff *skb = *out_skb; + int buf_num; + + /* No. of segments (plus a header) */ + buf_num = skb_shinfo(skb)->nr_frags + 1; + + if (buf_num > ring_space(ring)) + return -EBUSY; + + *bnum = buf_num; + + return 0; +} + +static void hns_nic_dma_unmap(struct hns3_enet_ring *ring, int next_to_use_orig) +{ + struct device *dev = ring_to_dev(ring); + unsigned int i; + + for (i = 0; i < ring->desc_num; i++) { + /* check if this is where we started */ + if (ring->next_to_use == next_to_use_orig) + break; + + /* unmap the descriptor dma address */ + if (ring->desc_cb[ring->next_to_use].type == DESC_TYPE_SKB) + dma_unmap_single(dev, + ring->desc_cb[ring->next_to_use].dma, + ring->desc_cb[ring->next_to_use].length, + DMA_TO_DEVICE); + else + dma_unmap_page(dev, + ring->desc_cb[ring->next_to_use].dma, + ring->desc_cb[ring->next_to_use].length, + DMA_TO_DEVICE); + + /* rollback one */ + ring_ptr_move_bw(ring, next_to_use); + } +} + +netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev) +{ + struct hns3_nic_priv *priv = netdev_priv(netdev); + struct hns3_nic_ring_data *ring_data = + &tx_ring_data(priv, skb->queue_mapping); + struct hns3_enet_ring *ring = ring_data->ring; + struct device *dev = priv->dev; + struct netdev_queue *dev_queue; + struct skb_frag_struct *frag; + int next_to_use_head; + int next_to_use_frag; + dma_addr_t dma; + int buf_num; + int seg_num; + int size; + int ret; + int i; + + /* Prefetch the data used later */ + prefetch(skb->data); + + switch (priv->ops.maybe_stop_tx(&skb, &buf_num, ring)) { + case -EBUSY: + u64_stats_update_begin(&ring->syncp); + ring->stats.tx_busy++; + u64_stats_update_end(&ring->syncp); + + goto out_net_tx_busy; + case -ENOMEM: + u64_stats_update_begin(&ring->syncp); + ring->stats.sw_err_cnt++; + u64_stats_update_end(&ring->syncp); + netdev_err(netdev, "no memory to xmit!\n"); + + goto out_err_tx_ok; + default: + break; + } + + /* No. of segments (plus a header) */ + seg_num = skb_shinfo(skb)->nr_frags + 1; + /* Fill the first part */ + size = skb_headlen(skb); + + next_to_use_head = ring->next_to_use; + + dma = dma_map_single(dev, skb->data, size, DMA_TO_DEVICE); + if (dma_mapping_error(dev, dma)) { + netdev_err(netdev, "TX head DMA map failed\n"); + ring->stats.sw_err_cnt++; + goto out_err_tx_ok; + } + + ret = priv->ops.fill_desc(ring, skb, size, dma, seg_num == 1 ? 1 : 0, + DESC_TYPE_SKB); + if (ret) + goto head_dma_map_err; + + next_to_use_frag = ring->next_to_use; + /* Fill the fragments */ + for (i = 1; i < seg_num; i++) { + frag = &skb_shinfo(skb)->frags[i - 1]; + size = skb_frag_size(frag); + dma = skb_frag_dma_map(dev, frag, 0, size, DMA_TO_DEVICE); + if (dma_mapping_error(dev, dma)) { + netdev_err(netdev, "TX frag(%d) DMA map failed\n", i); + ring->stats.sw_err_cnt++; + goto frag_dma_map_err; + } + ret = priv->ops.fill_desc(ring, skb_frag_page(frag), size, dma, + seg_num - 1 == i ? 1 : 0, + DESC_TYPE_PAGE); + + if (ret) + goto frag_dma_map_err; + } + + /* Complete translate all packets */ + dev_queue = netdev_get_tx_queue(netdev, ring_data->queue_index); + netdev_tx_sent_queue(dev_queue, skb->len); + + wmb(); /* Commit all data before submit */ + + hnae_queue_xmit(ring->tqp, buf_num); + + return NETDEV_TX_OK; + +frag_dma_map_err: + hns_nic_dma_unmap(ring, next_to_use_frag); + +head_dma_map_err: + hns_nic_dma_unmap(ring, next_to_use_head); + +out_err_tx_ok: + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; + +out_net_tx_busy: + netif_stop_subqueue(netdev, ring_data->queue_index); + smp_mb(); /* Commit all data before submit */ + + return NETDEV_TX_BUSY; +} + +static int hns3_nic_net_set_mac_address(struct net_device *netdev, void *p) +{ + struct hnae3_handle *h = hns3_get_handle(netdev); + struct sockaddr *mac_addr = p; + int ret; + + if (!mac_addr || !is_valid_ether_addr((const u8 *)mac_addr->sa_data)) + return -EADDRNOTAVAIL; + + ret = h->ae_algo->ops->set_mac_addr(h, mac_addr->sa_data); + if (ret) { + netdev_err(netdev, "set_mac_address fail, ret=%d!\n", ret); + return ret; + } + + ether_addr_copy(netdev->dev_addr, mac_addr->sa_data); + + return 0; +} + +static int hns3_nic_set_features(struct net_device *netdev, + netdev_features_t features) +{ + struct hns3_nic_priv *priv = netdev_priv(netdev); + + if (features & (NETIF_F_TSO | NETIF_F_TSO6)) { + priv->ops.fill_desc = hns3_fill_desc_tso; + priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tso; + } else { + priv->ops.fill_desc = hns3_fill_desc; + priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tx; + } + + netdev->features = features; + return 0; +} + +static void +hns3_nic_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats) +{ + struct hns3_nic_priv *priv = netdev_priv(netdev); + int queue_num = priv->ae_handle->kinfo.num_tqps; + struct hns3_enet_ring *ring; + unsigned int start; + unsigned int idx; + u64 tx_bytes = 0; + u64 rx_bytes = 0; + u64 tx_pkts = 0; + u64 rx_pkts = 0; + + for (idx = 0; idx < queue_num; idx++) { + /* fetch the tx stats */ + ring = priv->ring_data[idx].ring; + do { + start = u64_stats_fetch_begin_irq(&ring->syncp); + tx_bytes += ring->stats.tx_bytes; + tx_pkts += ring->stats.tx_pkts; + } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); + + /* fetch the rx stats */ + ring = priv->ring_data[idx + queue_num].ring; + do { + start = u64_stats_fetch_begin_irq(&ring->syncp); + rx_bytes += ring->stats.rx_bytes; + rx_pkts += ring->stats.rx_pkts; + } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); + } + + stats->tx_bytes = tx_bytes; + stats->tx_packets = tx_pkts; + stats->rx_bytes = rx_bytes; + stats->rx_packets = rx_pkts; + + stats->rx_errors = netdev->stats.rx_errors; + stats->multicast = netdev->stats.multicast; + stats->rx_length_errors = netdev->stats.rx_length_errors; + stats->rx_crc_errors = netdev->stats.rx_crc_errors; + stats->rx_missed_errors = netdev->stats.rx_missed_errors; + + stats->tx_errors = netdev->stats.tx_errors; + stats->rx_dropped = netdev->stats.rx_dropped; + stats->tx_dropped = netdev->stats.tx_dropped; + stats->collisions = netdev->stats.collisions; + stats->rx_over_errors = netdev->stats.rx_over_errors; + stats->rx_frame_errors = netdev->stats.rx_frame_errors; + stats->rx_fifo_errors = netdev->stats.rx_fifo_errors; + stats->tx_aborted_errors = netdev->stats.tx_aborted_errors; + stats->tx_carrier_errors = netdev->stats.tx_carrier_errors; + stats->tx_fifo_errors = netdev->stats.tx_fifo_errors; + stats->tx_heartbeat_errors = netdev->stats.tx_heartbeat_errors; + stats->tx_window_errors = netdev->stats.tx_window_errors; + stats->rx_compressed = netdev->stats.rx_compressed; + stats->tx_compressed = netdev->stats.tx_compressed; +} + +static void hns3_add_tunnel_port(struct net_device *netdev, u16 port, + enum hns3_udp_tnl_type type) +{ + struct hns3_nic_priv *priv = netdev_priv(netdev); + struct hns3_udp_tunnel *udp_tnl = &priv->udp_tnl[type]; + struct hnae3_handle *h = priv->ae_handle; + + if (udp_tnl->used && udp_tnl->dst_port == port) { + udp_tnl->used++; + return; + } + + if (udp_tnl->used) { + netdev_warn(netdev, + "UDP tunnel [%d], port [%d] offload\n", type, port); + return; + } + + udp_tnl->dst_port = port; + udp_tnl->used = 1; + /* TBD send command to hardware to add port */ + if (h->ae_algo->ops->add_tunnel_udp) + h->ae_algo->ops->add_tunnel_udp(h, port); +} + +static void hns3_del_tunnel_port(struct net_device *netdev, u16 port, + enum hns3_udp_tnl_type type) +{ + struct hns3_nic_priv *priv = netdev_priv(netdev); + struct hns3_udp_tunnel *udp_tnl = &priv->udp_tnl[type]; + struct hnae3_handle *h = priv->ae_handle; + + if (!udp_tnl->used || udp_tnl->dst_port != port) { + netdev_warn(netdev, + "Invalid UDP tunnel port %d\n", port); + return; + } + + udp_tnl->used--; + if (udp_tnl->used) + return; + + udp_tnl->dst_port = 0; + /* TBD send command to hardware to del port */ + if (h->ae_algo->ops->del_tunnel_udp) + h->ae_algo->ops->del_tunnel_udp(h, port); +} + +/* hns3_nic_udp_tunnel_add - Get notifiacetion about UDP tunnel ports + * @netdev: This physical ports's netdev + * @ti: Tunnel information + */ +static void hns3_nic_udp_tunnel_add(struct net_device *netdev, + struct udp_tunnel_info *ti) +{ + u16 port_n = ntohs(ti->port); + + switch (ti->type) { + case UDP_TUNNEL_TYPE_VXLAN: + hns3_add_tunnel_port(netdev, port_n, HNS3_UDP_TNL_VXLAN); + break; + case UDP_TUNNEL_TYPE_GENEVE: + hns3_add_tunnel_port(netdev, port_n, HNS3_UDP_TNL_GENEVE); + break; + default: + netdev_err(netdev, "unsupported tunnel type %d\n", ti->type); + break; + } +} + +static void hns3_nic_udp_tunnel_del(struct net_device *netdev, + struct udp_tunnel_info *ti) +{ + u16 port_n = ntohs(ti->port); + + switch (ti->type) { + case UDP_TUNNEL_TYPE_VXLAN: + hns3_del_tunnel_port(netdev, port_n, HNS3_UDP_TNL_VXLAN); + break; + case UDP_TUNNEL_TYPE_GENEVE: + hns3_del_tunnel_port(netdev, port_n, HNS3_UDP_TNL_GENEVE); + break; + default: + break; + } +} + +static int hns3_setup_tc(struct net_device *netdev, void *type_data) +{ + struct tc_mqprio_qopt_offload *mqprio_qopt = type_data; + struct hnae3_handle *h = hns3_get_handle(netdev); + struct hnae3_knic_private_info *kinfo = &h->kinfo; + u8 *prio_tc = mqprio_qopt->qopt.prio_tc_map; + u8 tc = mqprio_qopt->qopt.num_tc; + u16 mode = mqprio_qopt->mode; + u8 hw = mqprio_qopt->qopt.hw; + bool if_running; + unsigned int i; + int ret; + + if (!((hw == TC_MQPRIO_HW_OFFLOAD_TCS && + mode == TC_MQPRIO_MODE_CHANNEL) || (!hw && tc == 0))) + return -EOPNOTSUPP; + + if (tc > HNAE3_MAX_TC) + return -EINVAL; + + if (!netdev) + return -EINVAL; + + if_running = netif_running(netdev); + if (if_running) { + hns3_nic_net_stop(netdev); + msleep(100); + } + + ret = (kinfo->dcb_ops && kinfo->dcb_ops->setup_tc) ? + kinfo->dcb_ops->setup_tc(h, tc, prio_tc) : -EOPNOTSUPP; + if (ret) + goto out; + + if (tc <= 1) { + netdev_reset_tc(netdev); + } else { + ret = netdev_set_num_tc(netdev, tc); + if (ret) + goto out; + + for (i = 0; i < HNAE3_MAX_TC; i++) { + if (!kinfo->tc_info[i].enable) + continue; + + netdev_set_tc_queue(netdev, + kinfo->tc_info[i].tc, + kinfo->tc_info[i].tqp_count, + kinfo->tc_info[i].tqp_offset); + } + } + + ret = hns3_nic_set_real_num_queue(netdev); + +out: + if (if_running) + hns3_nic_net_open(netdev); + + return ret; +} + +static int hns3_nic_setup_tc(struct net_device *dev, enum tc_setup_type type, + void *type_data) +{ + if (type != TC_SETUP_QDISC_MQPRIO) + return -EOPNOTSUPP; + + return hns3_setup_tc(dev, type_data); +} + +static int hns3_vlan_rx_add_vid(struct net_device *netdev, + __be16 proto, u16 vid) +{ + struct hnae3_handle *h = hns3_get_handle(netdev); + int ret = -EIO; + + if (h->ae_algo->ops->set_vlan_filter) + ret = h->ae_algo->ops->set_vlan_filter(h, proto, vid, false); + + return ret; +} + +static int hns3_vlan_rx_kill_vid(struct net_device *netdev, + __be16 proto, u16 vid) +{ + struct hnae3_handle *h = hns3_get_handle(netdev); + int ret = -EIO; + + if (h->ae_algo->ops->set_vlan_filter) + ret = h->ae_algo->ops->set_vlan_filter(h, proto, vid, true); + + return ret; +} + +static int hns3_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, + u8 qos, __be16 vlan_proto) +{ + struct hnae3_handle *h = hns3_get_handle(netdev); + int ret = -EIO; + + if (h->ae_algo->ops->set_vf_vlan_filter) + ret = h->ae_algo->ops->set_vf_vlan_filter(h, vf, vlan, + qos, vlan_proto); + + return ret; +} + +static int hns3_nic_change_mtu(struct net_device *netdev, int new_mtu) +{ + struct hnae3_handle *h = hns3_get_handle(netdev); + bool if_running = netif_running(netdev); + int ret; + + if (!h->ae_algo->ops->set_mtu) + return -EOPNOTSUPP; + + /* if this was called with netdev up then bring netdevice down */ + if (if_running) { + (void)hns3_nic_net_stop(netdev); + msleep(100); + } + + ret = h->ae_algo->ops->set_mtu(h, new_mtu); + if (ret) { + netdev_err(netdev, "failed to change MTU in hardware %d\n", + ret); + return ret; + } + + /* if the netdev was running earlier, bring it up again */ + if (if_running && hns3_nic_net_open(netdev)) + ret = -EINVAL; + + return ret; +} + +static bool hns3_get_tx_timeo_queue_info(struct net_device *ndev) +{ + struct hns3_nic_priv *priv = netdev_priv(ndev); + struct hns3_enet_ring *tx_ring = NULL; + int timeout_queue = 0; + int hw_head, hw_tail; + int i; + + /* Find the stopped queue the same way the stack does */ + for (i = 0; i < ndev->real_num_tx_queues; i++) { + struct netdev_queue *q; + unsigned long trans_start; + + q = netdev_get_tx_queue(ndev, i); + trans_start = q->trans_start; + if (netif_xmit_stopped(q) && + time_after(jiffies, + (trans_start + ndev->watchdog_timeo))) { + timeout_queue = i; + break; + } + } + + if (i == ndev->num_tx_queues) { + netdev_info(ndev, + "no netdev TX timeout queue found, timeout count: %llu\n", + priv->tx_timeout_count); + return false; + } + + tx_ring = priv->ring_data[timeout_queue].ring; + + hw_head = readl_relaxed(tx_ring->tqp->io_base + + HNS3_RING_TX_RING_HEAD_REG); + hw_tail = readl_relaxed(tx_ring->tqp->io_base + + HNS3_RING_TX_RING_TAIL_REG); + netdev_info(ndev, + "tx_timeout count: %llu, queue id: %d, SW_NTU: 0x%x, SW_NTC: 0x%x, HW_HEAD: 0x%x, HW_TAIL: 0x%x, INT: 0x%x\n", + priv->tx_timeout_count, + timeout_queue, + tx_ring->next_to_use, + tx_ring->next_to_clean, + hw_head, + hw_tail, + readl(tx_ring->tqp_vector->mask_addr)); + + return true; +} + +static void hns3_nic_net_timeout(struct net_device *ndev) +{ + struct hns3_nic_priv *priv = netdev_priv(ndev); + unsigned long last_reset_time = priv->last_reset_time; + struct hnae3_handle *h = priv->ae_handle; + + if (!hns3_get_tx_timeo_queue_info(ndev)) + return; + + priv->tx_timeout_count++; + + /* This timeout is far away enough from last timeout, + * if timeout again,set the reset type to PF reset + */ + if (time_after(jiffies, (last_reset_time + 20 * HZ))) + priv->reset_level = HNAE3_FUNC_RESET; + + /* Don't do any new action before the next timeout */ + else if (time_before(jiffies, (last_reset_time + ndev->watchdog_timeo))) + return; + + priv->last_reset_time = jiffies; + + if (h->ae_algo->ops->reset_event) + h->ae_algo->ops->reset_event(h, priv->reset_level); + + priv->reset_level++; + if (priv->reset_level > HNAE3_GLOBAL_RESET) + priv->reset_level = HNAE3_GLOBAL_RESET; +} + +static const struct net_device_ops hns3_nic_netdev_ops = { + .ndo_open = hns3_nic_net_open, + .ndo_stop = hns3_nic_net_stop, + .ndo_start_xmit = hns3_nic_net_xmit, + .ndo_tx_timeout = hns3_nic_net_timeout, + .ndo_set_mac_address = hns3_nic_net_set_mac_address, + .ndo_change_mtu = hns3_nic_change_mtu, + .ndo_set_features = hns3_nic_set_features, + .ndo_get_stats64 = hns3_nic_get_stats64, + .ndo_setup_tc = hns3_nic_setup_tc, + .ndo_set_rx_mode = hns3_nic_set_rx_mode, + .ndo_udp_tunnel_add = hns3_nic_udp_tunnel_add, + .ndo_udp_tunnel_del = hns3_nic_udp_tunnel_del, + .ndo_vlan_rx_add_vid = hns3_vlan_rx_add_vid, + .ndo_vlan_rx_kill_vid = hns3_vlan_rx_kill_vid, + .ndo_set_vf_vlan = hns3_ndo_set_vf_vlan, +}; + +/* hns3_probe - Device initialization routine + * @pdev: PCI device information struct + * @ent: entry in hns3_pci_tbl + * + * hns3_probe initializes a PF identified by a pci_dev structure. + * The OS initialization, configuring of the PF private structure, + * and a hardware reset occur. + * + * Returns 0 on success, negative on failure + */ +static int hns3_probe(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + struct hnae3_ae_dev *ae_dev; + int ret; + + ae_dev = devm_kzalloc(&pdev->dev, sizeof(*ae_dev), + GFP_KERNEL); + if (!ae_dev) { + ret = -ENOMEM; + return ret; + } + + ae_dev->pdev = pdev; + ae_dev->flag = ent->driver_data; + ae_dev->dev_type = HNAE3_DEV_KNIC; + pci_set_drvdata(pdev, ae_dev); + + return hnae3_register_ae_dev(ae_dev); +} + +/* hns3_remove - Device removal routine + * @pdev: PCI device information struct + */ +static void hns3_remove(struct pci_dev *pdev) +{ + struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev); + + hnae3_unregister_ae_dev(ae_dev); + + devm_kfree(&pdev->dev, ae_dev); + + pci_set_drvdata(pdev, NULL); +} + +static struct pci_driver hns3_driver = { + .name = hns3_driver_name, + .id_table = hns3_pci_tbl, + .probe = hns3_probe, + .remove = hns3_remove, +}; + +/* set default feature to hns3 */ +static void hns3_set_default_feature(struct net_device *netdev) +{ + netdev->priv_flags |= IFF_UNICAST_FLT; + + netdev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | + NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO | + NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE | + NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL | + NETIF_F_GSO_UDP_TUNNEL_CSUM; + + netdev->hw_enc_features |= NETIF_F_TSO_MANGLEID; + + netdev->gso_partial_features |= NETIF_F_GSO_GRE_CSUM; + + netdev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | + NETIF_F_HW_VLAN_CTAG_FILTER | + NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO | + NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE | + NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL | + NETIF_F_GSO_UDP_TUNNEL_CSUM; + + netdev->vlan_features |= + NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM | + NETIF_F_SG | NETIF_F_GSO | NETIF_F_GRO | + NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE | + NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL | + NETIF_F_GSO_UDP_TUNNEL_CSUM; + + netdev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | + NETIF_F_HW_VLAN_CTAG_FILTER | + NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO | + NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE | + NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL | + NETIF_F_GSO_UDP_TUNNEL_CSUM; +} + +static int hns3_alloc_buffer(struct hns3_enet_ring *ring, + struct hns3_desc_cb *cb) +{ + unsigned int order = hnae_page_order(ring); + struct page *p; + + p = dev_alloc_pages(order); + if (!p) + return -ENOMEM; + + cb->priv = p; + cb->page_offset = 0; + cb->reuse_flag = 0; + cb->buf = page_address(p); + cb->length = hnae_page_size(ring); + cb->type = DESC_TYPE_PAGE; + + return 0; +} + +static void hns3_free_buffer(struct hns3_enet_ring *ring, + struct hns3_desc_cb *cb) +{ + if (cb->type == DESC_TYPE_SKB) + dev_kfree_skb_any((struct sk_buff *)cb->priv); + else if (!HNAE3_IS_TX_RING(ring)) + put_page((struct page *)cb->priv); + memset(cb, 0, sizeof(*cb)); +} + +static int hns3_map_buffer(struct hns3_enet_ring *ring, struct hns3_desc_cb *cb) +{ + cb->dma = dma_map_page(ring_to_dev(ring), cb->priv, 0, + cb->length, ring_to_dma_dir(ring)); + + if (dma_mapping_error(ring_to_dev(ring), cb->dma)) + return -EIO; + + return 0; +} + +static void hns3_unmap_buffer(struct hns3_enet_ring *ring, + struct hns3_desc_cb *cb) +{ + if (cb->type == DESC_TYPE_SKB) + dma_unmap_single(ring_to_dev(ring), cb->dma, cb->length, + ring_to_dma_dir(ring)); + else + dma_unmap_page(ring_to_dev(ring), cb->dma, cb->length, + ring_to_dma_dir(ring)); +} + +static void hns3_buffer_detach(struct hns3_enet_ring *ring, int i) +{ + hns3_unmap_buffer(ring, &ring->desc_cb[i]); + ring->desc[i].addr = 0; +} + +static void hns3_free_buffer_detach(struct hns3_enet_ring *ring, int i) +{ + struct hns3_desc_cb *cb = &ring->desc_cb[i]; + + if (!ring->desc_cb[i].dma) + return; + + hns3_buffer_detach(ring, i); + hns3_free_buffer(ring, cb); +} + +static void hns3_free_buffers(struct hns3_enet_ring *ring) +{ + int i; + + for (i = 0; i < ring->desc_num; i++) + hns3_free_buffer_detach(ring, i); +} + +/* free desc along with its attached buffer */ +static void hns3_free_desc(struct hns3_enet_ring *ring) +{ + hns3_free_buffers(ring); + + dma_unmap_single(ring_to_dev(ring), ring->desc_dma_addr, + ring->desc_num * sizeof(ring->desc[0]), + DMA_BIDIRECTIONAL); + ring->desc_dma_addr = 0; + kfree(ring->desc); + ring->desc = NULL; +} + +static int hns3_alloc_desc(struct hns3_enet_ring *ring) +{ + int size = ring->desc_num * sizeof(ring->desc[0]); + + ring->desc = kzalloc(size, GFP_KERNEL); + if (!ring->desc) + return -ENOMEM; + + ring->desc_dma_addr = dma_map_single(ring_to_dev(ring), ring->desc, + size, DMA_BIDIRECTIONAL); + if (dma_mapping_error(ring_to_dev(ring), ring->desc_dma_addr)) { + ring->desc_dma_addr = 0; + kfree(ring->desc); + ring->desc = NULL; + return -ENOMEM; + } + + return 0; +} + +static int hns3_reserve_buffer_map(struct hns3_enet_ring *ring, + struct hns3_desc_cb *cb) +{ + int ret; + + ret = hns3_alloc_buffer(ring, cb); + if (ret) + goto out; + + ret = hns3_map_buffer(ring, cb); + if (ret) + goto out_with_buf; + + return 0; + +out_with_buf: + hns3_free_buffer(ring, cb); +out: + return ret; +} + +static int hns3_alloc_buffer_attach(struct hns3_enet_ring *ring, int i) +{ + int ret = hns3_reserve_buffer_map(ring, &ring->desc_cb[i]); + + if (ret) + return ret; + + ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma); + + return 0; +} + +/* Allocate memory for raw pkg, and map with dma */ +static int hns3_alloc_ring_buffers(struct hns3_enet_ring *ring) +{ + int i, j, ret; + + for (i = 0; i < ring->desc_num; i++) { + ret = hns3_alloc_buffer_attach(ring, i); + if (ret) + goto out_buffer_fail; + } + + return 0; + +out_buffer_fail: + for (j = i - 1; j >= 0; j--) + hns3_free_buffer_detach(ring, j); + return ret; +} + +/* detach a in-used buffer and replace with a reserved one */ +static void hns3_replace_buffer(struct hns3_enet_ring *ring, int i, + struct hns3_desc_cb *res_cb) +{ + hns3_unmap_buffer(ring, &ring->desc_cb[i]); + ring->desc_cb[i] = *res_cb; + ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma); +} + +static void hns3_reuse_buffer(struct hns3_enet_ring *ring, int i) +{ + ring->desc_cb[i].reuse_flag = 0; + ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma + + ring->desc_cb[i].page_offset); +} + +static void hns3_nic_reclaim_one_desc(struct hns3_enet_ring *ring, int *bytes, + int *pkts) +{ + struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_clean]; + + (*pkts) += (desc_cb->type == DESC_TYPE_SKB); + (*bytes) += desc_cb->length; + /* desc_cb will be cleaned, after hnae_free_buffer_detach*/ + hns3_free_buffer_detach(ring, ring->next_to_clean); + + ring_ptr_move_fw(ring, next_to_clean); +} + +static int is_valid_clean_head(struct hns3_enet_ring *ring, int h) +{ + int u = ring->next_to_use; + int c = ring->next_to_clean; + + if (unlikely(h > ring->desc_num)) + return 0; + + return u > c ? (h > c && h <= u) : (h > c || h <= u); +} + +bool hns3_clean_tx_ring(struct hns3_enet_ring *ring, int budget) +{ + struct net_device *netdev = ring->tqp->handle->kinfo.netdev; + struct netdev_queue *dev_queue; + int bytes, pkts; + int head; + + head = readl_relaxed(ring->tqp->io_base + HNS3_RING_TX_RING_HEAD_REG); + rmb(); /* Make sure head is ready before touch any data */ + + if (is_ring_empty(ring) || head == ring->next_to_clean) + return true; /* no data to poll */ + + if (!is_valid_clean_head(ring, head)) { + netdev_err(netdev, "wrong head (%d, %d-%d)\n", head, + ring->next_to_use, ring->next_to_clean); + + u64_stats_update_begin(&ring->syncp); + ring->stats.io_err_cnt++; + u64_stats_update_end(&ring->syncp); + return true; + } + + bytes = 0; + pkts = 0; + while (head != ring->next_to_clean && budget) { + hns3_nic_reclaim_one_desc(ring, &bytes, &pkts); + /* Issue prefetch for next Tx descriptor */ + prefetch(&ring->desc_cb[ring->next_to_clean]); + budget--; + } + + ring->tqp_vector->tx_group.total_bytes += bytes; + ring->tqp_vector->tx_group.total_packets += pkts; + + u64_stats_update_begin(&ring->syncp); + ring->stats.tx_bytes += bytes; + ring->stats.tx_pkts += pkts; + u64_stats_update_end(&ring->syncp); + + dev_queue = netdev_get_tx_queue(netdev, ring->tqp->tqp_index); + netdev_tx_completed_queue(dev_queue, pkts, bytes); + + if (unlikely(pkts && netif_carrier_ok(netdev) && + (ring_space(ring) > HNS3_MAX_BD_PER_PKT))) { + /* Make sure that anybody stopping the queue after this + * sees the new next_to_clean. + */ + smp_mb(); + if (netif_tx_queue_stopped(dev_queue)) { + netif_tx_wake_queue(dev_queue); + ring->stats.restart_queue++; + } + } + + return !!budget; +} + +static int hns3_desc_unused(struct hns3_enet_ring *ring) +{ + int ntc = ring->next_to_clean; + int ntu = ring->next_to_use; + + return ((ntc >= ntu) ? 0 : ring->desc_num) + ntc - ntu; +} + +static void +hns3_nic_alloc_rx_buffers(struct hns3_enet_ring *ring, int cleand_count) +{ + struct hns3_desc_cb *desc_cb; + struct hns3_desc_cb res_cbs; + int i, ret; + + for (i = 0; i < cleand_count; i++) { + desc_cb = &ring->desc_cb[ring->next_to_use]; + if (desc_cb->reuse_flag) { + u64_stats_update_begin(&ring->syncp); + ring->stats.reuse_pg_cnt++; + u64_stats_update_end(&ring->syncp); + + hns3_reuse_buffer(ring, ring->next_to_use); + } else { + ret = hns3_reserve_buffer_map(ring, &res_cbs); + if (ret) { + u64_stats_update_begin(&ring->syncp); + ring->stats.sw_err_cnt++; + u64_stats_update_end(&ring->syncp); + + netdev_err(ring->tqp->handle->kinfo.netdev, + "hnae reserve buffer map failed.\n"); + break; + } + hns3_replace_buffer(ring, ring->next_to_use, &res_cbs); + } + + ring_ptr_move_fw(ring, next_to_use); + } + + wmb(); /* Make all data has been write before submit */ + writel_relaxed(i, ring->tqp->io_base + HNS3_RING_RX_RING_HEAD_REG); +} + +/* hns3_nic_get_headlen - determine size of header for LRO/GRO + * @data: pointer to the start of the headers + * @max: total length of section to find headers in + * + * This function is meant to determine the length of headers that will + * be recognized by hardware for LRO, GRO, and RSC offloads. The main + * motivation of doing this is to only perform one pull for IPv4 TCP + * packets so that we can do basic things like calculating the gso_size + * based on the average data per packet. + */ +static unsigned int hns3_nic_get_headlen(unsigned char *data, u32 flag, + unsigned int max_size) +{ + unsigned char *network; + u8 hlen; + + /* This should never happen, but better safe than sorry */ + if (max_size < ETH_HLEN) + return max_size; + + /* Initialize network frame pointer */ + network = data; + + /* Set first protocol and move network header forward */ + network += ETH_HLEN; + + /* Handle any vlan tag if present */ + if (hnae_get_field(flag, HNS3_RXD_VLAN_M, HNS3_RXD_VLAN_S) + == HNS3_RX_FLAG_VLAN_PRESENT) { + if ((typeof(max_size))(network - data) > (max_size - VLAN_HLEN)) + return max_size; + + network += VLAN_HLEN; + } + + /* Handle L3 protocols */ + if (hnae_get_field(flag, HNS3_RXD_L3ID_M, HNS3_RXD_L3ID_S) + == HNS3_RX_FLAG_L3ID_IPV4) { + if ((typeof(max_size))(network - data) > + (max_size - sizeof(struct iphdr))) + return max_size; + + /* Access ihl as a u8 to avoid unaligned access on ia64 */ + hlen = (network[0] & 0x0F) << 2; + + /* Verify hlen meets minimum size requirements */ + if (hlen < sizeof(struct iphdr)) + return network - data; + + /* Record next protocol if header is present */ + } else if (hnae_get_field(flag, HNS3_RXD_L3ID_M, HNS3_RXD_L3ID_S) + == HNS3_RX_FLAG_L3ID_IPV6) { + if ((typeof(max_size))(network - data) > + (max_size - sizeof(struct ipv6hdr))) + return max_size; + + /* Record next protocol */ + hlen = sizeof(struct ipv6hdr); + } else { + return network - data; + } + + /* Relocate pointer to start of L4 header */ + network += hlen; + + /* Finally sort out TCP/UDP */ + if (hnae_get_field(flag, HNS3_RXD_L4ID_M, HNS3_RXD_L4ID_S) + == HNS3_RX_FLAG_L4ID_TCP) { + if ((typeof(max_size))(network - data) > + (max_size - sizeof(struct tcphdr))) + return max_size; + + /* Access doff as a u8 to avoid unaligned access on ia64 */ + hlen = (network[12] & 0xF0) >> 2; + + /* Verify hlen meets minimum size requirements */ + if (hlen < sizeof(struct tcphdr)) + return network - data; + + network += hlen; + } else if (hnae_get_field(flag, HNS3_RXD_L4ID_M, HNS3_RXD_L4ID_S) + == HNS3_RX_FLAG_L4ID_UDP) { + if ((typeof(max_size))(network - data) > + (max_size - sizeof(struct udphdr))) + return max_size; + + network += sizeof(struct udphdr); + } + + /* If everything has gone correctly network should be the + * data section of the packet and will be the end of the header. + * If not then it probably represents the end of the last recognized + * header. + */ + if ((typeof(max_size))(network - data) < max_size) + return network - data; + else + return max_size; +} + +static void hns3_nic_reuse_page(struct sk_buff *skb, int i, + struct hns3_enet_ring *ring, int pull_len, + struct hns3_desc_cb *desc_cb) +{ + struct hns3_desc *desc; + int truesize, size; + int last_offset; + bool twobufs; + + twobufs = ((PAGE_SIZE < 8192) && + hnae_buf_size(ring) == HNS3_BUFFER_SIZE_2048); + + desc = &ring->desc[ring->next_to_clean]; + size = le16_to_cpu(desc->rx.size); + + if (twobufs) { + truesize = hnae_buf_size(ring); + } else { + truesize = ALIGN(size, L1_CACHE_BYTES); + last_offset = hnae_page_size(ring) - hnae_buf_size(ring); + } + + skb_add_rx_frag(skb, i, desc_cb->priv, desc_cb->page_offset + pull_len, + size - pull_len, truesize - pull_len); + + /* Avoid re-using remote pages,flag default unreuse */ + if (unlikely(page_to_nid(desc_cb->priv) != numa_node_id())) + return; + + if (twobufs) { + /* If we are only owner of page we can reuse it */ + if (likely(page_count(desc_cb->priv) == 1)) { + /* Flip page offset to other buffer */ + desc_cb->page_offset ^= truesize; + + desc_cb->reuse_flag = 1; + /* bump ref count on page before it is given*/ + get_page(desc_cb->priv); + } + return; + } + + /* Move offset up to the next cache line */ + desc_cb->page_offset += truesize; + + if (desc_cb->page_offset <= last_offset) { + desc_cb->reuse_flag = 1; + /* Bump ref count on page before it is given*/ + get_page(desc_cb->priv); + } +} + +static void hns3_rx_checksum(struct hns3_enet_ring *ring, struct sk_buff *skb, + struct hns3_desc *desc) +{ + struct net_device *netdev = ring->tqp->handle->kinfo.netdev; + int l3_type, l4_type; + u32 bd_base_info; + int ol4_type; + u32 l234info; + + bd_base_info = le32_to_cpu(desc->rx.bd_base_info); + l234info = le32_to_cpu(desc->rx.l234_info); + + skb->ip_summed = CHECKSUM_NONE; + + skb_checksum_none_assert(skb); + + if (!(netdev->features & NETIF_F_RXCSUM)) + return; + + /* check if hardware has done checksum */ + if (!hnae_get_bit(bd_base_info, HNS3_RXD_L3L4P_B)) + return; + + if (unlikely(hnae_get_bit(l234info, HNS3_RXD_L3E_B) || + hnae_get_bit(l234info, HNS3_RXD_L4E_B) || + hnae_get_bit(l234info, HNS3_RXD_OL3E_B) || + hnae_get_bit(l234info, HNS3_RXD_OL4E_B))) { + netdev_err(netdev, "L3/L4 error pkt\n"); + u64_stats_update_begin(&ring->syncp); + ring->stats.l3l4_csum_err++; + u64_stats_update_end(&ring->syncp); + + return; + } + + l3_type = hnae_get_field(l234info, HNS3_RXD_L3ID_M, + HNS3_RXD_L3ID_S); + l4_type = hnae_get_field(l234info, HNS3_RXD_L4ID_M, + HNS3_RXD_L4ID_S); + + ol4_type = hnae_get_field(l234info, HNS3_RXD_OL4ID_M, HNS3_RXD_OL4ID_S); + switch (ol4_type) { + case HNS3_OL4_TYPE_MAC_IN_UDP: + case HNS3_OL4_TYPE_NVGRE: + skb->csum_level = 1; + case HNS3_OL4_TYPE_NO_TUN: + /* Can checksum ipv4 or ipv6 + UDP/TCP/SCTP packets */ + if (l3_type == HNS3_L3_TYPE_IPV4 || + (l3_type == HNS3_L3_TYPE_IPV6 && + (l4_type == HNS3_L4_TYPE_UDP || + l4_type == HNS3_L4_TYPE_TCP || + l4_type == HNS3_L4_TYPE_SCTP))) + skb->ip_summed = CHECKSUM_UNNECESSARY; + break; + } +} + +static void hns3_rx_skb(struct hns3_enet_ring *ring, struct sk_buff *skb) +{ + napi_gro_receive(&ring->tqp_vector->napi, skb); +} + +static int hns3_handle_rx_bd(struct hns3_enet_ring *ring, + struct sk_buff **out_skb, int *out_bnum) +{ + struct net_device *netdev = ring->tqp->handle->kinfo.netdev; + struct hns3_desc_cb *desc_cb; + struct hns3_desc *desc; + struct sk_buff *skb; + unsigned char *va; + u32 bd_base_info; + int pull_len; + u32 l234info; + int length; + int bnum; + + desc = &ring->desc[ring->next_to_clean]; + desc_cb = &ring->desc_cb[ring->next_to_clean]; + + prefetch(desc); + + length = le16_to_cpu(desc->rx.pkt_len); + bd_base_info = le32_to_cpu(desc->rx.bd_base_info); + l234info = le32_to_cpu(desc->rx.l234_info); + + /* Check valid BD */ + if (!hnae_get_bit(bd_base_info, HNS3_RXD_VLD_B)) + return -EFAULT; + + va = (unsigned char *)desc_cb->buf + desc_cb->page_offset; + + /* Prefetch first cache line of first page + * Idea is to cache few bytes of the header of the packet. Our L1 Cache + * line size is 64B so need to prefetch twice to make it 128B. But in + * actual we can have greater size of caches with 128B Level 1 cache + * lines. In such a case, single fetch would suffice to cache in the + * relevant part of the header. + */ + prefetch(va); +#if L1_CACHE_BYTES < 128 + prefetch(va + L1_CACHE_BYTES); +#endif + + skb = *out_skb = napi_alloc_skb(&ring->tqp_vector->napi, + HNS3_RX_HEAD_SIZE); + if (unlikely(!skb)) { + netdev_err(netdev, "alloc rx skb fail\n"); + + u64_stats_update_begin(&ring->syncp); + ring->stats.sw_err_cnt++; + u64_stats_update_end(&ring->syncp); + + return -ENOMEM; + } + + prefetchw(skb->data); + + bnum = 1; + if (length <= HNS3_RX_HEAD_SIZE) { + memcpy(__skb_put(skb, length), va, ALIGN(length, sizeof(long))); + + /* We can reuse buffer as-is, just make sure it is local */ + if (likely(page_to_nid(desc_cb->priv) == numa_node_id())) + desc_cb->reuse_flag = 1; + else /* This page cannot be reused so discard it */ + put_page(desc_cb->priv); + + ring_ptr_move_fw(ring, next_to_clean); + } else { + u64_stats_update_begin(&ring->syncp); + ring->stats.seg_pkt_cnt++; + u64_stats_update_end(&ring->syncp); + + pull_len = hns3_nic_get_headlen(va, l234info, + HNS3_RX_HEAD_SIZE); + memcpy(__skb_put(skb, pull_len), va, + ALIGN(pull_len, sizeof(long))); + + hns3_nic_reuse_page(skb, 0, ring, pull_len, desc_cb); + ring_ptr_move_fw(ring, next_to_clean); + + while (!hnae_get_bit(bd_base_info, HNS3_RXD_FE_B)) { + desc = &ring->desc[ring->next_to_clean]; + desc_cb = &ring->desc_cb[ring->next_to_clean]; + bd_base_info = le32_to_cpu(desc->rx.bd_base_info); + hns3_nic_reuse_page(skb, bnum, ring, 0, desc_cb); + ring_ptr_move_fw(ring, next_to_clean); + bnum++; + } + } + + *out_bnum = bnum; + + if (unlikely(!hnae_get_bit(bd_base_info, HNS3_RXD_VLD_B))) { + netdev_err(netdev, "no valid bd,%016llx,%016llx\n", + ((u64 *)desc)[0], ((u64 *)desc)[1]); + u64_stats_update_begin(&ring->syncp); + ring->stats.non_vld_descs++; + u64_stats_update_end(&ring->syncp); + + dev_kfree_skb_any(skb); + return -EINVAL; + } + + if (unlikely((!desc->rx.pkt_len) || + hnae_get_bit(l234info, HNS3_RXD_TRUNCAT_B))) { + netdev_err(netdev, "truncated pkt\n"); + u64_stats_update_begin(&ring->syncp); + ring->stats.err_pkt_len++; + u64_stats_update_end(&ring->syncp); + + dev_kfree_skb_any(skb); + return -EFAULT; + } + + if (unlikely(hnae_get_bit(l234info, HNS3_RXD_L2E_B))) { + netdev_err(netdev, "L2 error pkt\n"); + u64_stats_update_begin(&ring->syncp); + ring->stats.l2_err++; + u64_stats_update_end(&ring->syncp); + + dev_kfree_skb_any(skb); + return -EFAULT; + } + + u64_stats_update_begin(&ring->syncp); + ring->stats.rx_pkts++; + ring->stats.rx_bytes += skb->len; + u64_stats_update_end(&ring->syncp); + + ring->tqp_vector->rx_group.total_bytes += skb->len; + + hns3_rx_checksum(ring, skb, desc); + return 0; +} + +int hns3_clean_rx_ring( + struct hns3_enet_ring *ring, int budget, + void (*rx_fn)(struct hns3_enet_ring *, struct sk_buff *)) +{ +#define RCB_NOF_ALLOC_RX_BUFF_ONCE 16 + struct net_device *netdev = ring->tqp->handle->kinfo.netdev; + int recv_pkts, recv_bds, clean_count, err; + int unused_count = hns3_desc_unused(ring); + struct sk_buff *skb = NULL; + int num, bnum = 0; + + num = readl_relaxed(ring->tqp->io_base + HNS3_RING_RX_RING_FBDNUM_REG); + rmb(); /* Make sure num taken effect before the other data is touched */ + + recv_pkts = 0, recv_bds = 0, clean_count = 0; + num -= unused_count; + + while (recv_pkts < budget && recv_bds < num) { + /* Reuse or realloc buffers */ + if (clean_count + unused_count >= RCB_NOF_ALLOC_RX_BUFF_ONCE) { + hns3_nic_alloc_rx_buffers(ring, + clean_count + unused_count); + clean_count = 0; + unused_count = hns3_desc_unused(ring); + } + + /* Poll one pkt */ + err = hns3_handle_rx_bd(ring, &skb, &bnum); + if (unlikely(!skb)) /* This fault cannot be repaired */ + goto out; + + recv_bds += bnum; + clean_count += bnum; + if (unlikely(err)) { /* Do jump the err */ + recv_pkts++; + continue; + } + + /* Do update ip stack process */ + skb->protocol = eth_type_trans(skb, netdev); + rx_fn(ring, skb); + + recv_pkts++; + } + +out: + /* Make all data has been write before submit */ + if (clean_count + unused_count > 0) + hns3_nic_alloc_rx_buffers(ring, + clean_count + unused_count); + + return recv_pkts; +} + +static bool hns3_get_new_int_gl(struct hns3_enet_ring_group *ring_group) +{ +#define HNS3_RX_ULTRA_PACKET_RATE 40000 + enum hns3_flow_level_range new_flow_level; + struct hns3_enet_tqp_vector *tqp_vector; + int packets_per_secs; + int bytes_per_usecs; + u16 new_int_gl; + int usecs; + + if (!ring_group->int_gl) + return false; + + if (ring_group->total_packets == 0) { + ring_group->int_gl = HNS3_INT_GL_50K; + ring_group->flow_level = HNS3_FLOW_LOW; + return true; + } + + /* Simple throttlerate management + * 0-10MB/s lower (50000 ints/s) + * 10-20MB/s middle (20000 ints/s) + * 20-1249MB/s high (18000 ints/s) + * > 40000pps ultra (8000 ints/s) + */ + new_flow_level = ring_group->flow_level; + new_int_gl = ring_group->int_gl; + tqp_vector = ring_group->ring->tqp_vector; + usecs = (ring_group->int_gl << 1); + bytes_per_usecs = ring_group->total_bytes / usecs; + /* 1000000 microseconds */ + packets_per_secs = ring_group->total_packets * 1000000 / usecs; + + switch (new_flow_level) { + case HNS3_FLOW_LOW: + if (bytes_per_usecs > 10) + new_flow_level = HNS3_FLOW_MID; + break; + case HNS3_FLOW_MID: + if (bytes_per_usecs > 20) + new_flow_level = HNS3_FLOW_HIGH; + else if (bytes_per_usecs <= 10) + new_flow_level = HNS3_FLOW_LOW; + break; + case HNS3_FLOW_HIGH: + case HNS3_FLOW_ULTRA: + default: + if (bytes_per_usecs <= 20) + new_flow_level = HNS3_FLOW_MID; + break; + } + + if ((packets_per_secs > HNS3_RX_ULTRA_PACKET_RATE) && + (&tqp_vector->rx_group == ring_group)) + new_flow_level = HNS3_FLOW_ULTRA; + + switch (new_flow_level) { + case HNS3_FLOW_LOW: + new_int_gl = HNS3_INT_GL_50K; + break; + case HNS3_FLOW_MID: + new_int_gl = HNS3_INT_GL_20K; + break; + case HNS3_FLOW_HIGH: + new_int_gl = HNS3_INT_GL_18K; + break; + case HNS3_FLOW_ULTRA: + new_int_gl = HNS3_INT_GL_8K; + break; + default: + break; + } + + ring_group->total_bytes = 0; + ring_group->total_packets = 0; + ring_group->flow_level = new_flow_level; + if (new_int_gl != ring_group->int_gl) { + ring_group->int_gl = new_int_gl; + return true; + } + return false; +} + +static void hns3_update_new_int_gl(struct hns3_enet_tqp_vector *tqp_vector) +{ + u16 rx_int_gl, tx_int_gl; + bool rx, tx; + + rx = hns3_get_new_int_gl(&tqp_vector->rx_group); + tx = hns3_get_new_int_gl(&tqp_vector->tx_group); + rx_int_gl = tqp_vector->rx_group.int_gl; + tx_int_gl = tqp_vector->tx_group.int_gl; + if (rx && tx) { + if (rx_int_gl > tx_int_gl) { + tqp_vector->tx_group.int_gl = rx_int_gl; + tqp_vector->tx_group.flow_level = + tqp_vector->rx_group.flow_level; + hns3_set_vector_coalesc_gl(tqp_vector, rx_int_gl); + } else { + tqp_vector->rx_group.int_gl = tx_int_gl; + tqp_vector->rx_group.flow_level = + tqp_vector->tx_group.flow_level; + hns3_set_vector_coalesc_gl(tqp_vector, tx_int_gl); + } + } +} + +static int hns3_nic_common_poll(struct napi_struct *napi, int budget) +{ + struct hns3_enet_ring *ring; + int rx_pkt_total = 0; + + struct hns3_enet_tqp_vector *tqp_vector = + container_of(napi, struct hns3_enet_tqp_vector, napi); + bool clean_complete = true; + int rx_budget; + + /* Since the actual Tx work is minimal, we can give the Tx a larger + * budget and be more aggressive about cleaning up the Tx descriptors. + */ + hns3_for_each_ring(ring, tqp_vector->tx_group) { + if (!hns3_clean_tx_ring(ring, budget)) + clean_complete = false; + } + + /* make sure rx ring budget not smaller than 1 */ + rx_budget = max(budget / tqp_vector->num_tqps, 1); + + hns3_for_each_ring(ring, tqp_vector->rx_group) { + int rx_cleaned = hns3_clean_rx_ring(ring, rx_budget, + hns3_rx_skb); + + if (rx_cleaned >= rx_budget) + clean_complete = false; + + rx_pkt_total += rx_cleaned; + } + + tqp_vector->rx_group.total_packets += rx_pkt_total; + + if (!clean_complete) + return budget; + + napi_complete(napi); + hns3_update_new_int_gl(tqp_vector); + hns3_mask_vector_irq(tqp_vector, 1); + + return rx_pkt_total; +} + +static int hns3_get_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector, + struct hnae3_ring_chain_node *head) +{ + struct pci_dev *pdev = tqp_vector->handle->pdev; + struct hnae3_ring_chain_node *cur_chain = head; + struct hnae3_ring_chain_node *chain; + struct hns3_enet_ring *tx_ring; + struct hns3_enet_ring *rx_ring; + + tx_ring = tqp_vector->tx_group.ring; + if (tx_ring) { + cur_chain->tqp_index = tx_ring->tqp->tqp_index; + hnae_set_bit(cur_chain->flag, HNAE3_RING_TYPE_B, + HNAE3_RING_TYPE_TX); + + cur_chain->next = NULL; + + while (tx_ring->next) { + tx_ring = tx_ring->next; + + chain = devm_kzalloc(&pdev->dev, sizeof(*chain), + GFP_KERNEL); + if (!chain) + return -ENOMEM; + + cur_chain->next = chain; + chain->tqp_index = tx_ring->tqp->tqp_index; + hnae_set_bit(chain->flag, HNAE3_RING_TYPE_B, + HNAE3_RING_TYPE_TX); + + cur_chain = chain; + } + } + + rx_ring = tqp_vector->rx_group.ring; + if (!tx_ring && rx_ring) { + cur_chain->next = NULL; + cur_chain->tqp_index = rx_ring->tqp->tqp_index; + hnae_set_bit(cur_chain->flag, HNAE3_RING_TYPE_B, + HNAE3_RING_TYPE_RX); + + rx_ring = rx_ring->next; + } + + while (rx_ring) { + chain = devm_kzalloc(&pdev->dev, sizeof(*chain), GFP_KERNEL); + if (!chain) + return -ENOMEM; + + cur_chain->next = chain; + chain->tqp_index = rx_ring->tqp->tqp_index; + hnae_set_bit(chain->flag, HNAE3_RING_TYPE_B, + HNAE3_RING_TYPE_RX); + cur_chain = chain; + + rx_ring = rx_ring->next; + } + + return 0; +} + +static void hns3_free_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector, + struct hnae3_ring_chain_node *head) +{ + struct pci_dev *pdev = tqp_vector->handle->pdev; + struct hnae3_ring_chain_node *chain_tmp, *chain; + + chain = head->next; + + while (chain) { + chain_tmp = chain->next; + devm_kfree(&pdev->dev, chain); + chain = chain_tmp; + } +} + +static void hns3_add_ring_to_group(struct hns3_enet_ring_group *group, + struct hns3_enet_ring *ring) +{ + ring->next = group->ring; + group->ring = ring; + + group->count++; +} + +static int hns3_nic_init_vector_data(struct hns3_nic_priv *priv) +{ + struct hnae3_ring_chain_node vector_ring_chain; + struct hnae3_handle *h = priv->ae_handle; + struct hns3_enet_tqp_vector *tqp_vector; + struct hnae3_vector_info *vector; + struct pci_dev *pdev = h->pdev; + u16 tqp_num = h->kinfo.num_tqps; + u16 vector_num; + int ret = 0; + u16 i; + + /* RSS size, cpu online and vector_num should be the same */ + /* Should consider 2p/4p later */ + vector_num = min_t(u16, num_online_cpus(), tqp_num); + vector = devm_kcalloc(&pdev->dev, vector_num, sizeof(*vector), + GFP_KERNEL); + if (!vector) + return -ENOMEM; + + vector_num = h->ae_algo->ops->get_vector(h, vector_num, vector); + + priv->vector_num = vector_num; + priv->tqp_vector = (struct hns3_enet_tqp_vector *) + devm_kcalloc(&pdev->dev, vector_num, sizeof(*priv->tqp_vector), + GFP_KERNEL); + if (!priv->tqp_vector) + return -ENOMEM; + + for (i = 0; i < tqp_num; i++) { + u16 vector_i = i % vector_num; + + tqp_vector = &priv->tqp_vector[vector_i]; + + hns3_add_ring_to_group(&tqp_vector->tx_group, + priv->ring_data[i].ring); + + hns3_add_ring_to_group(&tqp_vector->rx_group, + priv->ring_data[i + tqp_num].ring); + + tqp_vector->idx = vector_i; + tqp_vector->mask_addr = vector[vector_i].io_addr; + tqp_vector->vector_irq = vector[vector_i].vector; + tqp_vector->num_tqps++; + + priv->ring_data[i].ring->tqp_vector = tqp_vector; + priv->ring_data[i + tqp_num].ring->tqp_vector = tqp_vector; + } + + for (i = 0; i < vector_num; i++) { + tqp_vector = &priv->tqp_vector[i]; + + tqp_vector->rx_group.total_bytes = 0; + tqp_vector->rx_group.total_packets = 0; + tqp_vector->tx_group.total_bytes = 0; + tqp_vector->tx_group.total_packets = 0; + hns3_vector_gl_rl_init(tqp_vector); + tqp_vector->handle = h; + + ret = hns3_get_vector_ring_chain(tqp_vector, + &vector_ring_chain); + if (ret) + goto out; + + ret = h->ae_algo->ops->map_ring_to_vector(h, + tqp_vector->vector_irq, &vector_ring_chain); + if (ret) + goto out; + + hns3_free_vector_ring_chain(tqp_vector, &vector_ring_chain); + + netif_napi_add(priv->netdev, &tqp_vector->napi, + hns3_nic_common_poll, NAPI_POLL_WEIGHT); + } + +out: + devm_kfree(&pdev->dev, vector); + return ret; +} + +static int hns3_nic_uninit_vector_data(struct hns3_nic_priv *priv) +{ + struct hnae3_ring_chain_node vector_ring_chain; + struct hnae3_handle *h = priv->ae_handle; + struct hns3_enet_tqp_vector *tqp_vector; + struct pci_dev *pdev = h->pdev; + int i, ret; + + for (i = 0; i < priv->vector_num; i++) { + tqp_vector = &priv->tqp_vector[i]; + + ret = hns3_get_vector_ring_chain(tqp_vector, + &vector_ring_chain); + if (ret) + return ret; + + ret = h->ae_algo->ops->unmap_ring_from_vector(h, + tqp_vector->vector_irq, &vector_ring_chain); + if (ret) + return ret; + + hns3_free_vector_ring_chain(tqp_vector, &vector_ring_chain); + + if (priv->tqp_vector[i].irq_init_flag == HNS3_VECTOR_INITED) { + (void)irq_set_affinity_hint( + priv->tqp_vector[i].vector_irq, + NULL); + free_irq(priv->tqp_vector[i].vector_irq, + &priv->tqp_vector[i]); + } + + priv->ring_data[i].ring->irq_init_flag = HNS3_VECTOR_NOT_INITED; + + netif_napi_del(&priv->tqp_vector[i].napi); + } + + devm_kfree(&pdev->dev, priv->tqp_vector); + + return 0; +} + +static int hns3_ring_get_cfg(struct hnae3_queue *q, struct hns3_nic_priv *priv, + int ring_type) +{ + struct hns3_nic_ring_data *ring_data = priv->ring_data; + int queue_num = priv->ae_handle->kinfo.num_tqps; + struct pci_dev *pdev = priv->ae_handle->pdev; + struct hns3_enet_ring *ring; + + ring = devm_kzalloc(&pdev->dev, sizeof(*ring), GFP_KERNEL); + if (!ring) + return -ENOMEM; + + if (ring_type == HNAE3_RING_TYPE_TX) { + ring_data[q->tqp_index].ring = ring; + ring_data[q->tqp_index].queue_index = q->tqp_index; + ring->io_base = (u8 __iomem *)q->io_base + HNS3_TX_REG_OFFSET; + } else { + ring_data[q->tqp_index + queue_num].ring = ring; + ring_data[q->tqp_index + queue_num].queue_index = q->tqp_index; + ring->io_base = q->io_base; + } + + hnae_set_bit(ring->flag, HNAE3_RING_TYPE_B, ring_type); + + ring->tqp = q; + ring->desc = NULL; + ring->desc_cb = NULL; + ring->dev = priv->dev; + ring->desc_dma_addr = 0; + ring->buf_size = q->buf_size; + ring->desc_num = q->desc_num; + ring->next_to_use = 0; + ring->next_to_clean = 0; + + return 0; +} + +static int hns3_queue_to_ring(struct hnae3_queue *tqp, + struct hns3_nic_priv *priv) +{ + int ret; + + ret = hns3_ring_get_cfg(tqp, priv, HNAE3_RING_TYPE_TX); + if (ret) + return ret; + + ret = hns3_ring_get_cfg(tqp, priv, HNAE3_RING_TYPE_RX); + if (ret) + return ret; + + return 0; +} + +static int hns3_get_ring_config(struct hns3_nic_priv *priv) +{ + struct hnae3_handle *h = priv->ae_handle; + struct pci_dev *pdev = h->pdev; + int i, ret; + + priv->ring_data = devm_kzalloc(&pdev->dev, h->kinfo.num_tqps * + sizeof(*priv->ring_data) * 2, + GFP_KERNEL); + if (!priv->ring_data) + return -ENOMEM; + + for (i = 0; i < h->kinfo.num_tqps; i++) { + ret = hns3_queue_to_ring(h->kinfo.tqp[i], priv); + if (ret) + goto err; + } + + return 0; +err: + devm_kfree(&pdev->dev, priv->ring_data); + return ret; +} + +static int hns3_alloc_ring_memory(struct hns3_enet_ring *ring) +{ + int ret; + + if (ring->desc_num <= 0 || ring->buf_size <= 0) + return -EINVAL; + + ring->desc_cb = kcalloc(ring->desc_num, sizeof(ring->desc_cb[0]), + GFP_KERNEL); + if (!ring->desc_cb) { + ret = -ENOMEM; + goto out; + } + + ret = hns3_alloc_desc(ring); + if (ret) + goto out_with_desc_cb; + + if (!HNAE3_IS_TX_RING(ring)) { + ret = hns3_alloc_ring_buffers(ring); + if (ret) + goto out_with_desc; + } + + return 0; + +out_with_desc: + hns3_free_desc(ring); +out_with_desc_cb: + kfree(ring->desc_cb); + ring->desc_cb = NULL; +out: + return ret; +} + +static void hns3_fini_ring(struct hns3_enet_ring *ring) +{ + hns3_free_desc(ring); + kfree(ring->desc_cb); + ring->desc_cb = NULL; + ring->next_to_clean = 0; + ring->next_to_use = 0; +} + +static int hns3_buf_size2type(u32 buf_size) +{ + int bd_size_type; + + switch (buf_size) { + case 512: + bd_size_type = HNS3_BD_SIZE_512_TYPE; + break; + case 1024: + bd_size_type = HNS3_BD_SIZE_1024_TYPE; + break; + case 2048: + bd_size_type = HNS3_BD_SIZE_2048_TYPE; + break; + case 4096: + bd_size_type = HNS3_BD_SIZE_4096_TYPE; + break; + default: + bd_size_type = HNS3_BD_SIZE_2048_TYPE; + } + + return bd_size_type; +} + +static void hns3_init_ring_hw(struct hns3_enet_ring *ring) +{ + dma_addr_t dma = ring->desc_dma_addr; + struct hnae3_queue *q = ring->tqp; + + if (!HNAE3_IS_TX_RING(ring)) { + hns3_write_dev(q, HNS3_RING_RX_RING_BASEADDR_L_REG, + (u32)dma); + hns3_write_dev(q, HNS3_RING_RX_RING_BASEADDR_H_REG, + (u32)((dma >> 31) >> 1)); + + hns3_write_dev(q, HNS3_RING_RX_RING_BD_LEN_REG, + hns3_buf_size2type(ring->buf_size)); + hns3_write_dev(q, HNS3_RING_RX_RING_BD_NUM_REG, + ring->desc_num / 8 - 1); + + } else { + hns3_write_dev(q, HNS3_RING_TX_RING_BASEADDR_L_REG, + (u32)dma); + hns3_write_dev(q, HNS3_RING_TX_RING_BASEADDR_H_REG, + (u32)((dma >> 31) >> 1)); + + hns3_write_dev(q, HNS3_RING_TX_RING_BD_LEN_REG, + hns3_buf_size2type(ring->buf_size)); + hns3_write_dev(q, HNS3_RING_TX_RING_BD_NUM_REG, + ring->desc_num / 8 - 1); + } +} + +int hns3_init_all_ring(struct hns3_nic_priv *priv) +{ + struct hnae3_handle *h = priv->ae_handle; + int ring_num = h->kinfo.num_tqps * 2; + int i, j; + int ret; + + for (i = 0; i < ring_num; i++) { + ret = hns3_alloc_ring_memory(priv->ring_data[i].ring); + if (ret) { + dev_err(priv->dev, + "Alloc ring memory fail! ret=%d\n", ret); + goto out_when_alloc_ring_memory; + } + + hns3_init_ring_hw(priv->ring_data[i].ring); + + u64_stats_init(&priv->ring_data[i].ring->syncp); + } + + return 0; + +out_when_alloc_ring_memory: + for (j = i - 1; j >= 0; j--) + hns3_fini_ring(priv->ring_data[j].ring); + + return -ENOMEM; +} + +int hns3_uninit_all_ring(struct hns3_nic_priv *priv) +{ + struct hnae3_handle *h = priv->ae_handle; + int i; + + for (i = 0; i < h->kinfo.num_tqps; i++) { + if (h->ae_algo->ops->reset_queue) + h->ae_algo->ops->reset_queue(h, i); + + hns3_fini_ring(priv->ring_data[i].ring); + hns3_fini_ring(priv->ring_data[i + h->kinfo.num_tqps].ring); + } + + return 0; +} + +/* Set mac addr if it is configured. or leave it to the AE driver */ +static void hns3_init_mac_addr(struct net_device *netdev) +{ + struct hns3_nic_priv *priv = netdev_priv(netdev); + struct hnae3_handle *h = priv->ae_handle; + u8 mac_addr_temp[ETH_ALEN]; + + if (h->ae_algo->ops->get_mac_addr) { + h->ae_algo->ops->get_mac_addr(h, mac_addr_temp); + ether_addr_copy(netdev->dev_addr, mac_addr_temp); + } + + /* Check if the MAC address is valid, if not get a random one */ + if (!is_valid_ether_addr(netdev->dev_addr)) { + eth_hw_addr_random(netdev); + dev_warn(priv->dev, "using random MAC address %pM\n", + netdev->dev_addr); + } + + if (h->ae_algo->ops->set_mac_addr) + h->ae_algo->ops->set_mac_addr(h, netdev->dev_addr); + +} + +static void hns3_nic_set_priv_ops(struct net_device *netdev) +{ + struct hns3_nic_priv *priv = netdev_priv(netdev); + + if ((netdev->features & NETIF_F_TSO) || + (netdev->features & NETIF_F_TSO6)) { + priv->ops.fill_desc = hns3_fill_desc_tso; + priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tso; + } else { + priv->ops.fill_desc = hns3_fill_desc; + priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tx; + } +} + +static int hns3_client_init(struct hnae3_handle *handle) +{ + struct pci_dev *pdev = handle->pdev; + struct hns3_nic_priv *priv; + struct net_device *netdev; + int ret; + + netdev = alloc_etherdev_mq(sizeof(struct hns3_nic_priv), + handle->kinfo.num_tqps); + if (!netdev) + return -ENOMEM; + + priv = netdev_priv(netdev); + priv->dev = &pdev->dev; + priv->netdev = netdev; + priv->ae_handle = handle; + priv->last_reset_time = jiffies; + priv->reset_level = HNAE3_FUNC_RESET; + priv->tx_timeout_count = 0; + + handle->kinfo.netdev = netdev; + handle->priv = (void *)priv; + + hns3_init_mac_addr(netdev); + + hns3_set_default_feature(netdev); + + netdev->watchdog_timeo = HNS3_TX_TIMEOUT; + netdev->priv_flags |= IFF_UNICAST_FLT; + netdev->netdev_ops = &hns3_nic_netdev_ops; + SET_NETDEV_DEV(netdev, &pdev->dev); + hns3_ethtool_set_ops(netdev); + hns3_nic_set_priv_ops(netdev); + + /* Carrier off reporting is important to ethtool even BEFORE open */ + netif_carrier_off(netdev); + + ret = hns3_get_ring_config(priv); + if (ret) { + ret = -ENOMEM; + goto out_get_ring_cfg; + } + + ret = hns3_nic_init_vector_data(priv); + if (ret) { + ret = -ENOMEM; + goto out_init_vector_data; + } + + ret = hns3_init_all_ring(priv); + if (ret) { + ret = -ENOMEM; + goto out_init_ring_data; + } + + ret = register_netdev(netdev); + if (ret) { + dev_err(priv->dev, "probe register netdev fail!\n"); + goto out_reg_netdev_fail; + } + + hns3_dcbnl_setup(handle); + + /* MTU range: (ETH_MIN_MTU(kernel default) - 9706) */ + netdev->max_mtu = HNS3_MAX_MTU - (ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN); + + return ret; + +out_reg_netdev_fail: +out_init_ring_data: + (void)hns3_nic_uninit_vector_data(priv); + priv->ring_data = NULL; +out_init_vector_data: +out_get_ring_cfg: + priv->ae_handle = NULL; + free_netdev(netdev); + return ret; +} + +static void hns3_client_uninit(struct hnae3_handle *handle, bool reset) +{ + struct net_device *netdev = handle->kinfo.netdev; + struct hns3_nic_priv *priv = netdev_priv(netdev); + int ret; + + if (netdev->reg_state != NETREG_UNINITIALIZED) + unregister_netdev(netdev); + + ret = hns3_nic_uninit_vector_data(priv); + if (ret) + netdev_err(netdev, "uninit vector error\n"); + + ret = hns3_uninit_all_ring(priv); + if (ret) + netdev_err(netdev, "uninit ring error\n"); + + priv->ring_data = NULL; + + free_netdev(netdev); +} + +static void hns3_link_status_change(struct hnae3_handle *handle, bool linkup) +{ + struct net_device *netdev = handle->kinfo.netdev; + + if (!netdev) + return; + + if (linkup) { + netif_carrier_on(netdev); + netif_tx_wake_all_queues(netdev); + netdev_info(netdev, "link up\n"); + } else { + netif_carrier_off(netdev); + netif_tx_stop_all_queues(netdev); + netdev_info(netdev, "link down\n"); + } +} + +static int hns3_client_setup_tc(struct hnae3_handle *handle, u8 tc) +{ + struct hnae3_knic_private_info *kinfo = &handle->kinfo; + struct net_device *ndev = kinfo->netdev; + bool if_running; + int ret; + u8 i; + + if (tc > HNAE3_MAX_TC) + return -EINVAL; + + if (!ndev) + return -ENODEV; + + if_running = netif_running(ndev); + + ret = netdev_set_num_tc(ndev, tc); + if (ret) + return ret; + + if (if_running) { + (void)hns3_nic_net_stop(ndev); + msleep(100); + } + + ret = (kinfo->dcb_ops && kinfo->dcb_ops->map_update) ? + kinfo->dcb_ops->map_update(handle) : -EOPNOTSUPP; + if (ret) + goto err_out; + + if (tc <= 1) { + netdev_reset_tc(ndev); + goto out; + } + + for (i = 0; i < HNAE3_MAX_TC; i++) { + struct hnae3_tc_info *tc_info = &kinfo->tc_info[i]; + + if (tc_info->enable) + netdev_set_tc_queue(ndev, + tc_info->tc, + tc_info->tqp_count, + tc_info->tqp_offset); + } + + for (i = 0; i < HNAE3_MAX_USER_PRIO; i++) { + netdev_set_prio_tc_map(ndev, i, + kinfo->prio_tc[i]); + } + +out: + ret = hns3_nic_set_real_num_queue(ndev); + +err_out: + if (if_running) + (void)hns3_nic_net_open(ndev); + + return ret; +} + +static void hns3_recover_hw_addr(struct net_device *ndev) +{ + struct netdev_hw_addr_list *list; + struct netdev_hw_addr *ha, *tmp; + + /* go through and sync uc_addr entries to the device */ + list = &ndev->uc; + list_for_each_entry_safe(ha, tmp, &list->list, list) + hns3_nic_uc_sync(ndev, ha->addr); + + /* go through and sync mc_addr entries to the device */ + list = &ndev->mc; + list_for_each_entry_safe(ha, tmp, &list->list, list) + hns3_nic_mc_sync(ndev, ha->addr); +} + +static void hns3_drop_skb_data(struct hns3_enet_ring *ring, struct sk_buff *skb) +{ + dev_kfree_skb_any(skb); +} + +static void hns3_clear_all_ring(struct hnae3_handle *h) +{ + struct net_device *ndev = h->kinfo.netdev; + struct hns3_nic_priv *priv = netdev_priv(ndev); + u32 i; + + for (i = 0; i < h->kinfo.num_tqps; i++) { + struct netdev_queue *dev_queue; + struct hns3_enet_ring *ring; + + ring = priv->ring_data[i].ring; + hns3_clean_tx_ring(ring, ring->desc_num); + dev_queue = netdev_get_tx_queue(ndev, + priv->ring_data[i].queue_index); + netdev_tx_reset_queue(dev_queue); + + ring = priv->ring_data[i + h->kinfo.num_tqps].ring; + hns3_clean_rx_ring(ring, ring->desc_num, hns3_drop_skb_data); + } +} + +static int hns3_reset_notify_down_enet(struct hnae3_handle *handle) +{ + struct hnae3_knic_private_info *kinfo = &handle->kinfo; + struct net_device *ndev = kinfo->netdev; + + if (!netif_running(ndev)) + return -EIO; + + return hns3_nic_net_stop(ndev); +} + +static int hns3_reset_notify_up_enet(struct hnae3_handle *handle) +{ + struct hnae3_knic_private_info *kinfo = &handle->kinfo; + struct hns3_nic_priv *priv = netdev_priv(kinfo->netdev); + int ret = 0; + + if (netif_running(kinfo->netdev)) { + ret = hns3_nic_net_up(kinfo->netdev); + if (ret) { + netdev_err(kinfo->netdev, + "hns net up fail, ret=%d!\n", ret); + return ret; + } + + priv->last_reset_time = jiffies; + } + + return ret; +} + +static int hns3_reset_notify_init_enet(struct hnae3_handle *handle) +{ + struct net_device *netdev = handle->kinfo.netdev; + struct hns3_nic_priv *priv = netdev_priv(netdev); + int ret; + + priv->reset_level = 1; + hns3_init_mac_addr(netdev); + hns3_nic_set_rx_mode(netdev); + hns3_recover_hw_addr(netdev); + + /* Carrier off reporting is important to ethtool even BEFORE open */ + netif_carrier_off(netdev); + + ret = hns3_get_ring_config(priv); + if (ret) + return ret; + + ret = hns3_nic_init_vector_data(priv); + if (ret) + return ret; + + ret = hns3_init_all_ring(priv); + if (ret) { + hns3_nic_uninit_vector_data(priv); + priv->ring_data = NULL; + } + + return ret; +} + +static int hns3_reset_notify_uninit_enet(struct hnae3_handle *handle) +{ + struct net_device *netdev = handle->kinfo.netdev; + struct hns3_nic_priv *priv = netdev_priv(netdev); + int ret; + + hns3_clear_all_ring(handle); + + ret = hns3_nic_uninit_vector_data(priv); + if (ret) { + netdev_err(netdev, "uninit vector error\n"); + return ret; + } + + ret = hns3_uninit_all_ring(priv); + if (ret) + netdev_err(netdev, "uninit ring error\n"); + + priv->ring_data = NULL; + + return ret; +} + +static int hns3_reset_notify(struct hnae3_handle *handle, + enum hnae3_reset_notify_type type) +{ + int ret = 0; + + switch (type) { + case HNAE3_UP_CLIENT: + ret = hns3_reset_notify_up_enet(handle); + break; + case HNAE3_DOWN_CLIENT: + ret = hns3_reset_notify_down_enet(handle); + break; + case HNAE3_INIT_CLIENT: + ret = hns3_reset_notify_init_enet(handle); + break; + case HNAE3_UNINIT_CLIENT: + ret = hns3_reset_notify_uninit_enet(handle); + break; + default: + break; + } + + return ret; +} + +static const struct hnae3_client_ops client_ops = { + .init_instance = hns3_client_init, + .uninit_instance = hns3_client_uninit, + .link_status_change = hns3_link_status_change, + .setup_tc = hns3_client_setup_tc, + .reset_notify = hns3_reset_notify, +}; + +/* hns3_init_module - Driver registration routine + * hns3_init_module is the first routine called when the driver is + * loaded. All it does is register with the PCI subsystem. + */ +static int __init hns3_init_module(void) +{ + int ret; + + pr_info("%s: %s - version\n", hns3_driver_name, hns3_driver_string); + pr_info("%s: %s\n", hns3_driver_name, hns3_copyright); + + client.type = HNAE3_CLIENT_KNIC; + snprintf(client.name, HNAE3_CLIENT_NAME_LENGTH - 1, "%s", + hns3_driver_name); + + client.ops = &client_ops; + + ret = hnae3_register_client(&client); + if (ret) + return ret; + + ret = pci_register_driver(&hns3_driver); + if (ret) + hnae3_unregister_client(&client); + + return ret; +} +module_init(hns3_init_module); + +/* hns3_exit_module - Driver exit cleanup routine + * hns3_exit_module is called just before the driver is removed + * from memory. + */ +static void __exit hns3_exit_module(void) +{ + pci_unregister_driver(&hns3_driver); + hnae3_unregister_client(&client); +} +module_exit(hns3_exit_module); + +MODULE_DESCRIPTION("HNS3: Hisilicon Ethernet Driver"); +MODULE_AUTHOR("Huawei Tech. Co., Ltd."); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("pci:hns-nic"); diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h new file mode 100644 index 000000000000..8a9de759957b --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h @@ -0,0 +1,613 @@ +/* + * Copyright (c) 2016 Hisilicon Limited. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#ifndef __HNS3_ENET_H +#define __HNS3_ENET_H + +#include "hnae3.h" + +extern const char hns3_driver_version[]; + +enum hns3_nic_state { + HNS3_NIC_STATE_TESTING, + HNS3_NIC_STATE_RESETTING, + HNS3_NIC_STATE_REINITING, + HNS3_NIC_STATE_DOWN, + HNS3_NIC_STATE_DISABLED, + HNS3_NIC_STATE_REMOVING, + HNS3_NIC_STATE_SERVICE_INITED, + HNS3_NIC_STATE_SERVICE_SCHED, + HNS3_NIC_STATE2_RESET_REQUESTED, + HNS3_NIC_STATE_MAX +}; + +#define HNS3_RING_RX_RING_BASEADDR_L_REG 0x00000 +#define HNS3_RING_RX_RING_BASEADDR_H_REG 0x00004 +#define HNS3_RING_RX_RING_BD_NUM_REG 0x00008 +#define HNS3_RING_RX_RING_BD_LEN_REG 0x0000C +#define HNS3_RING_RX_RING_TAIL_REG 0x00018 +#define HNS3_RING_RX_RING_HEAD_REG 0x0001C +#define HNS3_RING_RX_RING_FBDNUM_REG 0x00020 +#define HNS3_RING_RX_RING_PKTNUM_RECORD_REG 0x0002C + +#define HNS3_RING_TX_RING_BASEADDR_L_REG 0x00040 +#define HNS3_RING_TX_RING_BASEADDR_H_REG 0x00044 +#define HNS3_RING_TX_RING_BD_NUM_REG 0x00048 +#define HNS3_RING_TX_RING_BD_LEN_REG 0x0004C +#define HNS3_RING_TX_RING_TAIL_REG 0x00058 +#define HNS3_RING_TX_RING_HEAD_REG 0x0005C +#define HNS3_RING_TX_RING_FBDNUM_REG 0x00060 +#define HNS3_RING_TX_RING_OFFSET_REG 0x00064 +#define HNS3_RING_TX_RING_PKTNUM_RECORD_REG 0x0006C + +#define HNS3_RING_PREFETCH_EN_REG 0x0007C +#define HNS3_RING_CFG_VF_NUM_REG 0x00080 +#define HNS3_RING_ASID_REG 0x0008C +#define HNS3_RING_RX_VM_REG 0x00090 +#define HNS3_RING_T0_BE_RST 0x00094 +#define HNS3_RING_COULD_BE_RST 0x00098 +#define HNS3_RING_WRR_WEIGHT_REG 0x0009c + +#define HNS3_RING_INTMSK_RXWL_REG 0x000A0 +#define HNS3_RING_INTSTS_RX_RING_REG 0x000A4 +#define HNS3_RX_RING_INT_STS_REG 0x000A8 +#define HNS3_RING_INTMSK_TXWL_REG 0x000AC +#define HNS3_RING_INTSTS_TX_RING_REG 0x000B0 +#define HNS3_TX_RING_INT_STS_REG 0x000B4 +#define HNS3_RING_INTMSK_RX_OVERTIME_REG 0x000B8 +#define HNS3_RING_INTSTS_RX_OVERTIME_REG 0x000BC +#define HNS3_RING_INTMSK_TX_OVERTIME_REG 0x000C4 +#define HNS3_RING_INTSTS_TX_OVERTIME_REG 0x000C8 + +#define HNS3_RING_MB_CTRL_REG 0x00100 +#define HNS3_RING_MB_DATA_BASE_REG 0x00200 + +#define HNS3_TX_REG_OFFSET 0x40 + +#define HNS3_RX_HEAD_SIZE 256 + +#define HNS3_TX_TIMEOUT (5 * HZ) +#define HNS3_RING_NAME_LEN 16 +#define HNS3_BUFFER_SIZE_2048 2048 +#define HNS3_RING_MAX_PENDING 32768 +#define HNS3_RING_MIN_PENDING 8 +#define HNS3_RING_BD_MULTIPLE 8 +#define HNS3_MAX_MTU 9728 + +#define HNS3_BD_SIZE_512_TYPE 0 +#define HNS3_BD_SIZE_1024_TYPE 1 +#define HNS3_BD_SIZE_2048_TYPE 2 +#define HNS3_BD_SIZE_4096_TYPE 3 + +#define HNS3_RX_FLAG_VLAN_PRESENT 0x1 +#define HNS3_RX_FLAG_L3ID_IPV4 0x0 +#define HNS3_RX_FLAG_L3ID_IPV6 0x1 +#define HNS3_RX_FLAG_L4ID_UDP 0x0 +#define HNS3_RX_FLAG_L4ID_TCP 0x1 + +#define HNS3_RXD_DMAC_S 0 +#define HNS3_RXD_DMAC_M (0x3 << HNS3_RXD_DMAC_S) +#define HNS3_RXD_VLAN_S 2 +#define HNS3_RXD_VLAN_M (0x3 << HNS3_RXD_VLAN_S) +#define HNS3_RXD_L3ID_S 4 +#define HNS3_RXD_L3ID_M (0xf << HNS3_RXD_L3ID_S) +#define HNS3_RXD_L4ID_S 8 +#define HNS3_RXD_L4ID_M (0xf << HNS3_RXD_L4ID_S) +#define HNS3_RXD_FRAG_B 12 +#define HNS3_RXD_L2E_B 16 +#define HNS3_RXD_L3E_B 17 +#define HNS3_RXD_L4E_B 18 +#define HNS3_RXD_TRUNCAT_B 19 +#define HNS3_RXD_HOI_B 20 +#define HNS3_RXD_DOI_B 21 +#define HNS3_RXD_OL3E_B 22 +#define HNS3_RXD_OL4E_B 23 + +#define HNS3_RXD_ODMAC_S 0 +#define HNS3_RXD_ODMAC_M (0x3 << HNS3_RXD_ODMAC_S) +#define HNS3_RXD_OVLAN_S 2 +#define HNS3_RXD_OVLAN_M (0x3 << HNS3_RXD_OVLAN_S) +#define HNS3_RXD_OL3ID_S 4 +#define HNS3_RXD_OL3ID_M (0xf << HNS3_RXD_OL3ID_S) +#define HNS3_RXD_OL4ID_S 8 +#define HNS3_RXD_OL4ID_M (0xf << HNS3_RXD_OL4ID_S) +#define HNS3_RXD_FBHI_S 12 +#define HNS3_RXD_FBHI_M (0x3 << HNS3_RXD_FBHI_S) +#define HNS3_RXD_FBLI_S 14 +#define HNS3_RXD_FBLI_M (0x3 << HNS3_RXD_FBLI_S) + +#define HNS3_RXD_BDTYPE_S 0 +#define HNS3_RXD_BDTYPE_M (0xf << HNS3_RXD_BDTYPE_S) +#define HNS3_RXD_VLD_B 4 +#define HNS3_RXD_UDP0_B 5 +#define HNS3_RXD_EXTEND_B 7 +#define HNS3_RXD_FE_B 8 +#define HNS3_RXD_LUM_B 9 +#define HNS3_RXD_CRCP_B 10 +#define HNS3_RXD_L3L4P_B 11 +#define HNS3_RXD_TSIND_S 12 +#define HNS3_RXD_TSIND_M (0x7 << HNS3_RXD_TSIND_S) +#define HNS3_RXD_LKBK_B 15 +#define HNS3_RXD_HDL_S 16 +#define HNS3_RXD_HDL_M (0x7ff << HNS3_RXD_HDL_S) +#define HNS3_RXD_HSIND_B 31 + +#define HNS3_TXD_L3T_S 0 +#define HNS3_TXD_L3T_M (0x3 << HNS3_TXD_L3T_S) +#define HNS3_TXD_L4T_S 2 +#define HNS3_TXD_L4T_M (0x3 << HNS3_TXD_L4T_S) +#define HNS3_TXD_L3CS_B 4 +#define HNS3_TXD_L4CS_B 5 +#define HNS3_TXD_VLAN_B 6 +#define HNS3_TXD_TSO_B 7 + +#define HNS3_TXD_L2LEN_S 8 +#define HNS3_TXD_L2LEN_M (0xff << HNS3_TXD_L2LEN_S) +#define HNS3_TXD_L3LEN_S 16 +#define HNS3_TXD_L3LEN_M (0xff << HNS3_TXD_L3LEN_S) +#define HNS3_TXD_L4LEN_S 24 +#define HNS3_TXD_L4LEN_M (0xff << HNS3_TXD_L4LEN_S) + +#define HNS3_TXD_OL3T_S 0 +#define HNS3_TXD_OL3T_M (0x3 << HNS3_TXD_OL3T_S) +#define HNS3_TXD_OVLAN_B 2 +#define HNS3_TXD_MACSEC_B 3 +#define HNS3_TXD_TUNTYPE_S 4 +#define HNS3_TXD_TUNTYPE_M (0xf << HNS3_TXD_TUNTYPE_S) + +#define HNS3_TXD_BDTYPE_S 0 +#define HNS3_TXD_BDTYPE_M (0xf << HNS3_TXD_BDTYPE_S) +#define HNS3_TXD_FE_B 4 +#define HNS3_TXD_SC_S 5 +#define HNS3_TXD_SC_M (0x3 << HNS3_TXD_SC_S) +#define HNS3_TXD_EXTEND_B 7 +#define HNS3_TXD_VLD_B 8 +#define HNS3_TXD_RI_B 9 +#define HNS3_TXD_RA_B 10 +#define HNS3_TXD_TSYN_B 11 +#define HNS3_TXD_DECTTL_S 12 +#define HNS3_TXD_DECTTL_M (0xf << HNS3_TXD_DECTTL_S) + +#define HNS3_TXD_MSS_S 0 +#define HNS3_TXD_MSS_M (0x3fff << HNS3_TXD_MSS_S) + +#define HNS3_VECTOR_TX_IRQ BIT_ULL(0) +#define HNS3_VECTOR_RX_IRQ BIT_ULL(1) + +#define HNS3_VECTOR_NOT_INITED 0 +#define HNS3_VECTOR_INITED 1 + +#define HNS3_MAX_BD_SIZE 65535 +#define HNS3_MAX_BD_PER_FRAG 8 +#define HNS3_MAX_BD_PER_PKT MAX_SKB_FRAGS + +#define HNS3_VECTOR_GL0_OFFSET 0x100 +#define HNS3_VECTOR_GL1_OFFSET 0x200 +#define HNS3_VECTOR_GL2_OFFSET 0x300 +#define HNS3_VECTOR_RL_OFFSET 0x900 +#define HNS3_VECTOR_RL_EN_B 6 + +enum hns3_pkt_l3t_type { + HNS3_L3T_NONE, + HNS3_L3T_IPV6, + HNS3_L3T_IPV4, + HNS3_L3T_RESERVED +}; + +enum hns3_pkt_l4t_type { + HNS3_L4T_UNKNOWN, + HNS3_L4T_TCP, + HNS3_L4T_UDP, + HNS3_L4T_SCTP +}; + +enum hns3_pkt_ol3t_type { + HNS3_OL3T_NONE, + HNS3_OL3T_IPV6, + HNS3_OL3T_IPV4_NO_CSUM, + HNS3_OL3T_IPV4_CSUM +}; + +enum hns3_pkt_tun_type { + HNS3_TUN_NONE, + HNS3_TUN_MAC_IN_UDP, + HNS3_TUN_NVGRE, + HNS3_TUN_OTHER +}; + +/* hardware spec ring buffer format */ +struct __packed hns3_desc { + __le64 addr; + union { + struct { + __le16 vlan_tag; + __le16 send_size; + union { + __le32 type_cs_vlan_tso_len; + struct { + __u8 type_cs_vlan_tso; + __u8 l2_len; + __u8 l3_len; + __u8 l4_len; + }; + }; + __le16 outer_vlan_tag; + __le16 tv; + + union { + __le32 ol_type_vlan_len_msec; + struct { + __u8 ol_type_vlan_msec; + __u8 ol2_len; + __u8 ol3_len; + __u8 ol4_len; + }; + }; + + __le32 paylen; + __le16 bdtp_fe_sc_vld_ra_ri; + __le16 mss; + } tx; + + struct { + __le32 l234_info; + __le16 pkt_len; + __le16 size; + + __le32 rss_hash; + __le16 fd_id; + __le16 vlan_tag; + + union { + __le32 ol_info; + struct { + __le16 o_dm_vlan_id_fb; + __le16 ot_vlan_tag; + }; + }; + + __le32 bd_base_info; + } rx; + }; +}; + +struct hns3_desc_cb { + dma_addr_t dma; /* dma address of this desc */ + void *buf; /* cpu addr for a desc */ + + /* priv data for the desc, e.g. skb when use with ip stack*/ + void *priv; + u16 page_offset; + u16 reuse_flag; + + u16 length; /* length of the buffer */ + + /* desc type, used by the ring user to mark the type of the priv data */ + u16 type; +}; + +enum hns3_pkt_l3type { + HNS3_L3_TYPE_IPV4, + HNS3_L3_TYPE_IPV6, + HNS3_L3_TYPE_ARP, + HNS3_L3_TYPE_RARP, + HNS3_L3_TYPE_IPV4_OPT, + HNS3_L3_TYPE_IPV6_EXT, + HNS3_L3_TYPE_LLDP, + HNS3_L3_TYPE_BPDU, + HNS3_L3_TYPE_MAC_PAUSE, + HNS3_L3_TYPE_PFC_PAUSE,/* 0x9*/ + + /* reserved for 0xA~0xB*/ + + HNS3_L3_TYPE_CNM = 0xc, + + /* reserved for 0xD~0xE*/ + + HNS3_L3_TYPE_PARSE_FAIL = 0xf /* must be last */ +}; + +enum hns3_pkt_l4type { + HNS3_L4_TYPE_UDP, + HNS3_L4_TYPE_TCP, + HNS3_L4_TYPE_GRE, + HNS3_L4_TYPE_SCTP, + HNS3_L4_TYPE_IGMP, + HNS3_L4_TYPE_ICMP, + + /* reserved for 0x6~0xE */ + + HNS3_L4_TYPE_PARSE_FAIL = 0xf /* must be last */ +}; + +enum hns3_pkt_ol3type { + HNS3_OL3_TYPE_IPV4 = 0, + HNS3_OL3_TYPE_IPV6, + /* reserved for 0x2~0x3 */ + HNS3_OL3_TYPE_IPV4_OPT = 4, + HNS3_OL3_TYPE_IPV6_EXT, + + /* reserved for 0x6~0xE*/ + + HNS3_OL3_TYPE_PARSE_FAIL = 0xf /* must be last */ +}; + +enum hns3_pkt_ol4type { + HNS3_OL4_TYPE_NO_TUN, + HNS3_OL4_TYPE_MAC_IN_UDP, + HNS3_OL4_TYPE_NVGRE, + HNS3_OL4_TYPE_UNKNOWN +}; + +struct ring_stats { + u64 io_err_cnt; + u64 sw_err_cnt; + u64 seg_pkt_cnt; + union { + struct { + u64 tx_pkts; + u64 tx_bytes; + u64 tx_err_cnt; + u64 restart_queue; + u64 tx_busy; + }; + struct { + u64 rx_pkts; + u64 rx_bytes; + u64 rx_err_cnt; + u64 reuse_pg_cnt; + u64 err_pkt_len; + u64 non_vld_descs; + u64 err_bd_num; + u64 l2_err; + u64 l3l4_csum_err; + }; + }; +}; + +struct hns3_enet_ring { + u8 __iomem *io_base; /* base io address for the ring */ + struct hns3_desc *desc; /* dma map address space */ + struct hns3_desc_cb *desc_cb; + struct hns3_enet_ring *next; + struct hns3_enet_tqp_vector *tqp_vector; + struct hnae3_queue *tqp; + char ring_name[HNS3_RING_NAME_LEN]; + struct device *dev; /* will be used for DMA mapping of descriptors */ + + /* statistic */ + struct ring_stats stats; + struct u64_stats_sync syncp; + + dma_addr_t desc_dma_addr; + u32 buf_size; /* size for hnae_desc->addr, preset by AE */ + u16 desc_num; /* total number of desc */ + u16 max_desc_num_per_pkt; + u16 max_raw_data_sz_per_desc; + u16 max_pkt_size; + int next_to_use; /* idx of next spare desc */ + + /* idx of lastest sent desc, the ring is empty when equal to + * next_to_use + */ + int next_to_clean; + + u32 flag; /* ring attribute */ + int irq_init_flag; + + int numa_node; + cpumask_t affinity_mask; +}; + +struct hns_queue; + +struct hns3_nic_ring_data { + struct hns3_enet_ring *ring; + struct napi_struct napi; + int queue_index; + int (*poll_one)(struct hns3_nic_ring_data *, int, void *); + void (*ex_process)(struct hns3_nic_ring_data *, struct sk_buff *); + void (*fini_process)(struct hns3_nic_ring_data *); +}; + +struct hns3_nic_ops { + int (*fill_desc)(struct hns3_enet_ring *ring, void *priv, + int size, dma_addr_t dma, int frag_end, + enum hns_desc_type type); + int (*maybe_stop_tx)(struct sk_buff **out_skb, + int *bnum, struct hns3_enet_ring *ring); + void (*get_rxd_bnum)(u32 bnum_flag, int *out_bnum); +}; + +enum hns3_flow_level_range { + HNS3_FLOW_LOW = 0, + HNS3_FLOW_MID = 1, + HNS3_FLOW_HIGH = 2, + HNS3_FLOW_ULTRA = 3, +}; + +enum hns3_link_mode_bits { + HNS3_LM_FIBRE_BIT = BIT(0), + HNS3_LM_AUTONEG_BIT = BIT(1), + HNS3_LM_TP_BIT = BIT(2), + HNS3_LM_PAUSE_BIT = BIT(3), + HNS3_LM_BACKPLANE_BIT = BIT(4), + HNS3_LM_10BASET_HALF_BIT = BIT(5), + HNS3_LM_10BASET_FULL_BIT = BIT(6), + HNS3_LM_100BASET_HALF_BIT = BIT(7), + HNS3_LM_100BASET_FULL_BIT = BIT(8), + HNS3_LM_1000BASET_FULL_BIT = BIT(9), + HNS3_LM_10000BASEKR_FULL_BIT = BIT(10), + HNS3_LM_25000BASEKR_FULL_BIT = BIT(11), + HNS3_LM_40000BASELR4_FULL_BIT = BIT(12), + HNS3_LM_50000BASEKR2_FULL_BIT = BIT(13), + HNS3_LM_100000BASEKR4_FULL_BIT = BIT(14), + HNS3_LM_COUNT = 15 +}; + +#define HNS3_INT_GL_50K 0x000A +#define HNS3_INT_GL_20K 0x0019 +#define HNS3_INT_GL_18K 0x001B +#define HNS3_INT_GL_8K 0x003E + +struct hns3_enet_ring_group { + /* array of pointers to rings */ + struct hns3_enet_ring *ring; + u64 total_bytes; /* total bytes processed this group */ + u64 total_packets; /* total packets processed this group */ + u16 count; + enum hns3_flow_level_range flow_level; + u16 int_gl; +}; + +struct hns3_enet_tqp_vector { + struct hnae3_handle *handle; + u8 __iomem *mask_addr; + int vector_irq; + int irq_init_flag; + + u16 idx; /* index in the TQP vector array per handle. */ + + struct napi_struct napi; + + struct hns3_enet_ring_group rx_group; + struct hns3_enet_ring_group tx_group; + + u16 num_tqps; /* total number of tqps in TQP vector */ + + cpumask_t affinity_mask; + char name[HNAE3_INT_NAME_LEN]; + + /* when 0 should adjust interrupt coalesce parameter */ + u8 int_adapt_down; +} ____cacheline_internodealigned_in_smp; + +enum hns3_udp_tnl_type { + HNS3_UDP_TNL_VXLAN, + HNS3_UDP_TNL_GENEVE, + HNS3_UDP_TNL_MAX, +}; + +struct hns3_udp_tunnel { + u16 dst_port; + int used; +}; + +struct hns3_nic_priv { + struct hnae3_handle *ae_handle; + u32 enet_ver; + u32 port_id; + struct net_device *netdev; + struct device *dev; + struct hns3_nic_ops ops; + + /** + * the cb for nic to manage the ring buffer, the first half of the + * array is for tx_ring and vice versa for the second half + */ + struct hns3_nic_ring_data *ring_data; + struct hns3_enet_tqp_vector *tqp_vector; + u16 vector_num; + + /* The most recently read link state */ + int link; + u64 tx_timeout_count; + enum hnae3_reset_type reset_level; + unsigned long last_reset_time; + + unsigned long state; + + struct timer_list service_timer; + + struct work_struct service_task; + + struct notifier_block notifier_block; + /* Vxlan/Geneve information */ + struct hns3_udp_tunnel udp_tnl[HNS3_UDP_TNL_MAX]; +}; + +union l3_hdr_info { + struct iphdr *v4; + struct ipv6hdr *v6; + unsigned char *hdr; +}; + +union l4_hdr_info { + struct tcphdr *tcp; + struct udphdr *udp; + unsigned char *hdr; +}; + +/* the distance between [begin, end) in a ring buffer + * note: there is a unuse slot between the begin and the end + */ +static inline int ring_dist(struct hns3_enet_ring *ring, int begin, int end) +{ + return (end - begin + ring->desc_num) % ring->desc_num; +} + +static inline int ring_space(struct hns3_enet_ring *ring) +{ + return ring->desc_num - + ring_dist(ring, ring->next_to_clean, ring->next_to_use) - 1; +} + +static inline int is_ring_empty(struct hns3_enet_ring *ring) +{ + return ring->next_to_use == ring->next_to_clean; +} + +static inline void hns3_write_reg(void __iomem *base, u32 reg, u32 value) +{ + u8 __iomem *reg_addr = READ_ONCE(base); + + writel(value, reg_addr + reg); +} + +#define hns3_write_dev(a, reg, value) \ + hns3_write_reg((a)->io_base, (reg), (value)) + +#define hnae_queue_xmit(tqp, buf_num) writel_relaxed(buf_num, \ + (tqp)->io_base + HNS3_RING_TX_RING_TAIL_REG) + +#define ring_to_dev(ring) (&(ring)->tqp->handle->pdev->dev) + +#define ring_to_dma_dir(ring) (HNAE3_IS_TX_RING(ring) ? \ + DMA_TO_DEVICE : DMA_FROM_DEVICE) + +#define tx_ring_data(priv, idx) ((priv)->ring_data[idx]) + +#define hnae_buf_size(_ring) ((_ring)->buf_size) +#define hnae_page_order(_ring) (get_order(hnae_buf_size(_ring))) +#define hnae_page_size(_ring) (PAGE_SIZE << hnae_page_order(_ring)) + +/* iterator for handling rings in ring group */ +#define hns3_for_each_ring(pos, head) \ + for (pos = (head).ring; pos; pos = pos->next) + +#define hns3_get_handle(ndev) \ + (((struct hns3_nic_priv *)netdev_priv(ndev))->ae_handle) + +void hns3_ethtool_set_ops(struct net_device *netdev); + +bool hns3_clean_tx_ring(struct hns3_enet_ring *ring, int budget); +int hns3_init_all_ring(struct hns3_nic_priv *priv); +int hns3_uninit_all_ring(struct hns3_nic_priv *priv); +netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev); +int hns3_clean_rx_ring( + struct hns3_enet_ring *ring, int budget, + void (*rx_fn)(struct hns3_enet_ring *, struct sk_buff *)); + +#ifdef CONFIG_HNS3_DCB +void hns3_dcbnl_setup(struct hnae3_handle *handle); +#else +static inline void hns3_dcbnl_setup(struct hnae3_handle *handle) {} +#endif + +#endif diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c new file mode 100644 index 000000000000..65a69b439457 --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c @@ -0,0 +1,896 @@ +/* + * Copyright (c) 2016~2017 Hisilicon Limited. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#include +#include +#include + +#include "hns3_enet.h" + +struct hns3_stats { + char stats_string[ETH_GSTRING_LEN]; + int stats_size; + int stats_offset; +}; + +/* tqp related stats */ +#define HNS3_TQP_STAT(_string, _member) { \ + .stats_string = _string, \ + .stats_size = FIELD_SIZEOF(struct ring_stats, _member), \ + .stats_offset = offsetof(struct hns3_enet_ring, stats), \ +} \ + +static const struct hns3_stats hns3_txq_stats[] = { + /* Tx per-queue statistics */ + HNS3_TQP_STAT("tx_io_err_cnt", io_err_cnt), + HNS3_TQP_STAT("tx_sw_err_cnt", sw_err_cnt), + HNS3_TQP_STAT("tx_seg_pkt_cnt", seg_pkt_cnt), + HNS3_TQP_STAT("tx_pkts", tx_pkts), + HNS3_TQP_STAT("tx_bytes", tx_bytes), + HNS3_TQP_STAT("tx_err_cnt", tx_err_cnt), + HNS3_TQP_STAT("tx_restart_queue", restart_queue), + HNS3_TQP_STAT("tx_busy", tx_busy), +}; + +#define HNS3_TXQ_STATS_COUNT ARRAY_SIZE(hns3_txq_stats) + +static const struct hns3_stats hns3_rxq_stats[] = { + /* Rx per-queue statistics */ + HNS3_TQP_STAT("rx_io_err_cnt", io_err_cnt), + HNS3_TQP_STAT("rx_sw_err_cnt", sw_err_cnt), + HNS3_TQP_STAT("rx_seg_pkt_cnt", seg_pkt_cnt), + HNS3_TQP_STAT("rx_pkts", rx_pkts), + HNS3_TQP_STAT("rx_bytes", rx_bytes), + HNS3_TQP_STAT("rx_err_cnt", rx_err_cnt), + HNS3_TQP_STAT("rx_reuse_pg_cnt", reuse_pg_cnt), + HNS3_TQP_STAT("rx_err_pkt_len", err_pkt_len), + HNS3_TQP_STAT("rx_non_vld_descs", non_vld_descs), + HNS3_TQP_STAT("rx_err_bd_num", err_bd_num), + HNS3_TQP_STAT("rx_l2_err", l2_err), + HNS3_TQP_STAT("rx_l3l4_csum_err", l3l4_csum_err), +}; + +#define HNS3_RXQ_STATS_COUNT ARRAY_SIZE(hns3_rxq_stats) + +#define HNS3_TQP_STATS_COUNT (HNS3_TXQ_STATS_COUNT + HNS3_RXQ_STATS_COUNT) + +#define HNS3_SELF_TEST_TPYE_NUM 1 +#define HNS3_NIC_LB_TEST_PKT_NUM 1 +#define HNS3_NIC_LB_TEST_RING_ID 0 +#define HNS3_NIC_LB_TEST_PACKET_SIZE 128 + +/* Nic loopback test err */ +#define HNS3_NIC_LB_TEST_NO_MEM_ERR 1 +#define HNS3_NIC_LB_TEST_TX_CNT_ERR 2 +#define HNS3_NIC_LB_TEST_RX_CNT_ERR 3 + +struct hns3_link_mode_mapping { + u32 hns3_link_mode; + u32 ethtool_link_mode; +}; + +static const struct hns3_link_mode_mapping hns3_lm_map[] = { + {HNS3_LM_FIBRE_BIT, ETHTOOL_LINK_MODE_FIBRE_BIT}, + {HNS3_LM_AUTONEG_BIT, ETHTOOL_LINK_MODE_Autoneg_BIT}, + {HNS3_LM_TP_BIT, ETHTOOL_LINK_MODE_TP_BIT}, + {HNS3_LM_PAUSE_BIT, ETHTOOL_LINK_MODE_Pause_BIT}, + {HNS3_LM_BACKPLANE_BIT, ETHTOOL_LINK_MODE_Backplane_BIT}, + {HNS3_LM_10BASET_HALF_BIT, ETHTOOL_LINK_MODE_10baseT_Half_BIT}, + {HNS3_LM_10BASET_FULL_BIT, ETHTOOL_LINK_MODE_10baseT_Full_BIT}, + {HNS3_LM_100BASET_HALF_BIT, ETHTOOL_LINK_MODE_100baseT_Half_BIT}, + {HNS3_LM_100BASET_FULL_BIT, ETHTOOL_LINK_MODE_100baseT_Full_BIT}, + {HNS3_LM_1000BASET_FULL_BIT, ETHTOOL_LINK_MODE_1000baseT_Full_BIT}, +}; + +static int hns3_lp_setup(struct net_device *ndev, enum hnae3_loop loop) +{ + struct hnae3_handle *h = hns3_get_handle(ndev); + int ret; + + if (!h->ae_algo->ops->set_loopback || + !h->ae_algo->ops->set_promisc_mode) + return -EOPNOTSUPP; + + switch (loop) { + case HNAE3_MAC_INTER_LOOP_MAC: + ret = h->ae_algo->ops->set_loopback(h, loop, true); + break; + case HNAE3_MAC_LOOP_NONE: + ret = h->ae_algo->ops->set_loopback(h, + HNAE3_MAC_INTER_LOOP_MAC, false); + break; + default: + ret = -ENOTSUPP; + break; + } + + if (ret) + return ret; + + if (loop == HNAE3_MAC_LOOP_NONE) + h->ae_algo->ops->set_promisc_mode(h, ndev->flags & IFF_PROMISC); + else + h->ae_algo->ops->set_promisc_mode(h, 1); + + return ret; +} + +static int hns3_lp_up(struct net_device *ndev, enum hnae3_loop loop_mode) +{ + struct hnae3_handle *h = hns3_get_handle(ndev); + int ret; + + if (!h->ae_algo->ops->start) + return -EOPNOTSUPP; + + ret = h->ae_algo->ops->start(h); + if (ret) { + netdev_err(ndev, + "hns3_lb_up ae start return error: %d\n", ret); + return ret; + } + + ret = hns3_lp_setup(ndev, loop_mode); + usleep_range(10000, 20000); + + return ret; +} + +static int hns3_lp_down(struct net_device *ndev) +{ + struct hnae3_handle *h = hns3_get_handle(ndev); + int ret; + + if (!h->ae_algo->ops->stop) + return -EOPNOTSUPP; + + ret = hns3_lp_setup(ndev, HNAE3_MAC_LOOP_NONE); + if (ret) { + netdev_err(ndev, "lb_setup return error: %d\n", ret); + return ret; + } + + h->ae_algo->ops->stop(h); + usleep_range(10000, 20000); + + return 0; +} + +static void hns3_lp_setup_skb(struct sk_buff *skb) +{ + struct net_device *ndev = skb->dev; + unsigned char *packet; + struct ethhdr *ethh; + unsigned int i; + + skb_reserve(skb, NET_IP_ALIGN); + ethh = skb_put(skb, sizeof(struct ethhdr)); + packet = skb_put(skb, HNS3_NIC_LB_TEST_PACKET_SIZE); + + memcpy(ethh->h_dest, ndev->dev_addr, ETH_ALEN); + eth_zero_addr(ethh->h_source); + ethh->h_proto = htons(ETH_P_ARP); + skb_reset_mac_header(skb); + + for (i = 0; i < HNS3_NIC_LB_TEST_PACKET_SIZE; i++) + packet[i] = (unsigned char)(i & 0xff); +} + +static void hns3_lb_check_skb_data(struct hns3_enet_ring *ring, + struct sk_buff *skb) +{ + struct hns3_enet_tqp_vector *tqp_vector = ring->tqp_vector; + unsigned char *packet = skb->data; + u32 i; + + for (i = 0; i < skb->len; i++) + if (packet[i] != (unsigned char)(i & 0xff)) + break; + + /* The packet is correctly received */ + if (i == skb->len) + tqp_vector->rx_group.total_packets++; + else + print_hex_dump(KERN_ERR, "selftest:", DUMP_PREFIX_OFFSET, 16, 1, + skb->data, skb->len, true); + + dev_kfree_skb_any(skb); +} + +static u32 hns3_lb_check_rx_ring(struct hns3_nic_priv *priv, u32 budget) +{ + struct hnae3_handle *h = priv->ae_handle; + struct hnae3_knic_private_info *kinfo; + u32 i, rcv_good_pkt_total = 0; + + kinfo = &h->kinfo; + for (i = kinfo->num_tqps; i < kinfo->num_tqps * 2; i++) { + struct hns3_enet_ring *ring = priv->ring_data[i].ring; + struct hns3_enet_ring_group *rx_group; + u64 pre_rx_pkt; + + rx_group = &ring->tqp_vector->rx_group; + pre_rx_pkt = rx_group->total_packets; + + hns3_clean_rx_ring(ring, budget, hns3_lb_check_skb_data); + + rcv_good_pkt_total += (rx_group->total_packets - pre_rx_pkt); + rx_group->total_packets = pre_rx_pkt; + } + return rcv_good_pkt_total; +} + +static void hns3_lb_clear_tx_ring(struct hns3_nic_priv *priv, u32 start_ringid, + u32 end_ringid, u32 budget) +{ + u32 i; + + for (i = start_ringid; i <= end_ringid; i++) { + struct hns3_enet_ring *ring = priv->ring_data[i].ring; + + hns3_clean_tx_ring(ring, budget); + } +} + +/** + * hns3_lp_run_test - run loopback test + * @ndev: net device + * @mode: loopback type + */ +static int hns3_lp_run_test(struct net_device *ndev, enum hnae3_loop mode) +{ + struct hns3_nic_priv *priv = netdev_priv(ndev); + struct sk_buff *skb; + u32 i, good_cnt; + int ret_val = 0; + + skb = alloc_skb(HNS3_NIC_LB_TEST_PACKET_SIZE + ETH_HLEN + NET_IP_ALIGN, + GFP_KERNEL); + if (!skb) + return HNS3_NIC_LB_TEST_NO_MEM_ERR; + + skb->dev = ndev; + hns3_lp_setup_skb(skb); + skb->queue_mapping = HNS3_NIC_LB_TEST_RING_ID; + + good_cnt = 0; + for (i = 0; i < HNS3_NIC_LB_TEST_PKT_NUM; i++) { + netdev_tx_t tx_ret; + + skb_get(skb); + tx_ret = hns3_nic_net_xmit(skb, ndev); + if (tx_ret == NETDEV_TX_OK) + good_cnt++; + else + netdev_err(ndev, "hns3_lb_run_test xmit failed: %d\n", + tx_ret); + } + if (good_cnt != HNS3_NIC_LB_TEST_PKT_NUM) { + ret_val = HNS3_NIC_LB_TEST_TX_CNT_ERR; + netdev_err(ndev, "mode %d sent fail, cnt=0x%x, budget=0x%x\n", + mode, good_cnt, HNS3_NIC_LB_TEST_PKT_NUM); + goto out; + } + + /* Allow 200 milliseconds for packets to go from Tx to Rx */ + msleep(200); + + good_cnt = hns3_lb_check_rx_ring(priv, HNS3_NIC_LB_TEST_PKT_NUM); + if (good_cnt != HNS3_NIC_LB_TEST_PKT_NUM) { + ret_val = HNS3_NIC_LB_TEST_RX_CNT_ERR; + netdev_err(ndev, "mode %d recv fail, cnt=0x%x, budget=0x%x\n", + mode, good_cnt, HNS3_NIC_LB_TEST_PKT_NUM); + } + +out: + hns3_lb_clear_tx_ring(priv, HNS3_NIC_LB_TEST_RING_ID, + HNS3_NIC_LB_TEST_RING_ID, + HNS3_NIC_LB_TEST_PKT_NUM); + + kfree_skb(skb); + return ret_val; +} + +/** + * hns3_nic_self_test - self test + * @ndev: net device + * @eth_test: test cmd + * @data: test result + */ +static void hns3_self_test(struct net_device *ndev, + struct ethtool_test *eth_test, u64 *data) +{ + struct hns3_nic_priv *priv = netdev_priv(ndev); + struct hnae3_handle *h = priv->ae_handle; + int st_param[HNS3_SELF_TEST_TPYE_NUM][2]; + bool if_running = netif_running(ndev); + int test_index = 0; + u32 i; + + /* Only do offline selftest, or pass by default */ + if (eth_test->flags != ETH_TEST_FL_OFFLINE) + return; + + st_param[HNAE3_MAC_INTER_LOOP_MAC][0] = HNAE3_MAC_INTER_LOOP_MAC; + st_param[HNAE3_MAC_INTER_LOOP_MAC][1] = + h->flags & HNAE3_SUPPORT_MAC_LOOPBACK; + + if (if_running) + dev_close(ndev); + + set_bit(HNS3_NIC_STATE_TESTING, &priv->state); + + for (i = 0; i < HNS3_SELF_TEST_TPYE_NUM; i++) { + enum hnae3_loop loop_type = (enum hnae3_loop)st_param[i][0]; + + if (!st_param[i][1]) + continue; + + data[test_index] = hns3_lp_up(ndev, loop_type); + if (!data[test_index]) { + data[test_index] = hns3_lp_run_test(ndev, loop_type); + hns3_lp_down(ndev); + } + + if (data[test_index]) + eth_test->flags |= ETH_TEST_FL_FAILED; + + test_index++; + } + + clear_bit(HNS3_NIC_STATE_TESTING, &priv->state); + + if (if_running) + dev_open(ndev); +} + +static void hns3_driv_to_eth_caps(u32 caps, struct ethtool_link_ksettings *cmd, + bool is_advertised) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(hns3_lm_map); i++) { + if (!(caps & hns3_lm_map[i].hns3_link_mode)) + continue; + + if (is_advertised) + __set_bit(hns3_lm_map[i].ethtool_link_mode, + cmd->link_modes.advertising); + else + __set_bit(hns3_lm_map[i].ethtool_link_mode, + cmd->link_modes.supported); + } +} + +static int hns3_get_sset_count(struct net_device *netdev, int stringset) +{ + struct hnae3_handle *h = hns3_get_handle(netdev); + const struct hnae3_ae_ops *ops = h->ae_algo->ops; + + if (!ops->get_sset_count) + return -EOPNOTSUPP; + + switch (stringset) { + case ETH_SS_STATS: + return ((HNS3_TQP_STATS_COUNT * h->kinfo.num_tqps) + + ops->get_sset_count(h, stringset)); + + case ETH_SS_TEST: + return ops->get_sset_count(h, stringset); + } + + return 0; +} + +static void *hns3_update_strings(u8 *data, const struct hns3_stats *stats, + u32 stat_count, u32 num_tqps) +{ +#define MAX_PREFIX_SIZE (8 + 4) + u32 size_left; + u32 i, j; + u32 n1; + + for (i = 0; i < num_tqps; i++) { + for (j = 0; j < stat_count; j++) { + data[ETH_GSTRING_LEN - 1] = '\0'; + + /* first, prepend the prefix string */ + n1 = snprintf(data, MAX_PREFIX_SIZE, "rcb_q%d_", i); + n1 = min_t(uint, n1, MAX_PREFIX_SIZE - 1); + size_left = (ETH_GSTRING_LEN - 1) - n1; + + /* now, concatenate the stats string to it */ + strncat(data, stats[j].stats_string, size_left); + data += ETH_GSTRING_LEN; + } + } + + return data; +} + +static u8 *hns3_get_strings_tqps(struct hnae3_handle *handle, u8 *data) +{ + struct hnae3_knic_private_info *kinfo = &handle->kinfo; + + /* get strings for Tx */ + data = hns3_update_strings(data, hns3_txq_stats, HNS3_TXQ_STATS_COUNT, + kinfo->num_tqps); + + /* get strings for Rx */ + data = hns3_update_strings(data, hns3_rxq_stats, HNS3_RXQ_STATS_COUNT, + kinfo->num_tqps); + + return data; +} + +static void hns3_get_strings(struct net_device *netdev, u32 stringset, u8 *data) +{ + struct hnae3_handle *h = hns3_get_handle(netdev); + const struct hnae3_ae_ops *ops = h->ae_algo->ops; + char *buff = (char *)data; + + if (!ops->get_strings) + return; + + switch (stringset) { + case ETH_SS_STATS: + buff = hns3_get_strings_tqps(h, buff); + h->ae_algo->ops->get_strings(h, stringset, (u8 *)buff); + break; + case ETH_SS_TEST: + ops->get_strings(h, stringset, data); + break; + } +} + +static u64 *hns3_get_stats_tqps(struct hnae3_handle *handle, u64 *data) +{ + struct hns3_nic_priv *nic_priv = (struct hns3_nic_priv *)handle->priv; + struct hnae3_knic_private_info *kinfo = &handle->kinfo; + struct hns3_enet_ring *ring; + u8 *stat; + u32 i; + + /* get stats for Tx */ + for (i = 0; i < kinfo->num_tqps; i++) { + ring = nic_priv->ring_data[i].ring; + for (i = 0; i < HNS3_TXQ_STATS_COUNT; i++) { + stat = (u8 *)ring + hns3_txq_stats[i].stats_offset; + *data++ = *(u64 *)stat; + } + } + + /* get stats for Rx */ + for (i = 0; i < kinfo->num_tqps; i++) { + ring = nic_priv->ring_data[i + kinfo->num_tqps].ring; + for (i = 0; i < HNS3_RXQ_STATS_COUNT; i++) { + stat = (u8 *)ring + hns3_rxq_stats[i].stats_offset; + *data++ = *(u64 *)stat; + } + } + + return data; +} + +/* hns3_get_stats - get detail statistics. + * @netdev: net device + * @stats: statistics info. + * @data: statistics data. + */ +static void hns3_get_stats(struct net_device *netdev, + struct ethtool_stats *stats, u64 *data) +{ + struct hnae3_handle *h = hns3_get_handle(netdev); + u64 *p = data; + + if (!h->ae_algo->ops->get_stats || !h->ae_algo->ops->update_stats) { + netdev_err(netdev, "could not get any statistics\n"); + return; + } + + h->ae_algo->ops->update_stats(h, &netdev->stats); + + /* get per-queue stats */ + p = hns3_get_stats_tqps(h, p); + + /* get MAC & other misc hardware stats */ + h->ae_algo->ops->get_stats(h, p); +} + +static void hns3_get_drvinfo(struct net_device *netdev, + struct ethtool_drvinfo *drvinfo) +{ + struct hns3_nic_priv *priv = netdev_priv(netdev); + struct hnae3_handle *h = priv->ae_handle; + + strncpy(drvinfo->version, hns3_driver_version, + sizeof(drvinfo->version)); + drvinfo->version[sizeof(drvinfo->version) - 1] = '\0'; + + strncpy(drvinfo->driver, h->pdev->driver->name, + sizeof(drvinfo->driver)); + drvinfo->driver[sizeof(drvinfo->driver) - 1] = '\0'; + + strncpy(drvinfo->bus_info, pci_name(h->pdev), + sizeof(drvinfo->bus_info)); + drvinfo->bus_info[ETHTOOL_BUSINFO_LEN - 1] = '\0'; + + snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), "0x%08x", + priv->ae_handle->ae_algo->ops->get_fw_version(h)); +} + +static u32 hns3_get_link(struct net_device *netdev) +{ + struct hnae3_handle *h = hns3_get_handle(netdev); + + if (h->ae_algo && h->ae_algo->ops && h->ae_algo->ops->get_status) + return h->ae_algo->ops->get_status(h); + else + return 0; +} + +static void hns3_get_ringparam(struct net_device *netdev, + struct ethtool_ringparam *param) +{ + struct hns3_nic_priv *priv = netdev_priv(netdev); + struct hnae3_handle *h = priv->ae_handle; + int queue_num = h->kinfo.num_tqps; + + param->tx_max_pending = HNS3_RING_MAX_PENDING; + param->rx_max_pending = HNS3_RING_MAX_PENDING; + + param->tx_pending = priv->ring_data[0].ring->desc_num; + param->rx_pending = priv->ring_data[queue_num].ring->desc_num; +} + +static void hns3_get_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *param) +{ + struct hnae3_handle *h = hns3_get_handle(netdev); + + if (h->ae_algo && h->ae_algo->ops && h->ae_algo->ops->get_pauseparam) + h->ae_algo->ops->get_pauseparam(h, ¶m->autoneg, + ¶m->rx_pause, ¶m->tx_pause); +} + +static int hns3_get_link_ksettings(struct net_device *netdev, + struct ethtool_link_ksettings *cmd) +{ + struct hnae3_handle *h = hns3_get_handle(netdev); + u32 supported_caps; + u32 advertised_caps; + u8 media_type = HNAE3_MEDIA_TYPE_UNKNOWN; + u8 link_stat; + + if (!h->ae_algo || !h->ae_algo->ops) + return -EOPNOTSUPP; + + /* 1.auto_neg & speed & duplex from cmd */ + if (netdev->phydev) + phy_ethtool_ksettings_get(netdev->phydev, cmd); + else if (h->ae_algo->ops->get_ksettings_an_result) + h->ae_algo->ops->get_ksettings_an_result(h, + &cmd->base.autoneg, + &cmd->base.speed, + &cmd->base.duplex); + else + return -EOPNOTSUPP; + + link_stat = hns3_get_link(netdev); + if (!link_stat) { + cmd->base.speed = SPEED_UNKNOWN; + cmd->base.duplex = DUPLEX_UNKNOWN; + } + + /* 2.media_type get from bios parameter block */ + if (h->ae_algo->ops->get_media_type) { + h->ae_algo->ops->get_media_type(h, &media_type); + + switch (media_type) { + case HNAE3_MEDIA_TYPE_FIBER: + cmd->base.port = PORT_FIBRE; + supported_caps = HNS3_LM_FIBRE_BIT | + HNS3_LM_AUTONEG_BIT | + HNS3_LM_PAUSE_BIT | + HNS3_LM_1000BASET_FULL_BIT; + + advertised_caps = supported_caps; + break; + case HNAE3_MEDIA_TYPE_COPPER: + cmd->base.port = PORT_TP; + supported_caps = HNS3_LM_TP_BIT | + HNS3_LM_AUTONEG_BIT | + HNS3_LM_PAUSE_BIT | + HNS3_LM_1000BASET_FULL_BIT | + HNS3_LM_100BASET_FULL_BIT | + HNS3_LM_100BASET_HALF_BIT | + HNS3_LM_10BASET_FULL_BIT | + HNS3_LM_10BASET_HALF_BIT; + advertised_caps = supported_caps; + break; + case HNAE3_MEDIA_TYPE_BACKPLANE: + cmd->base.port = PORT_NONE; + supported_caps = HNS3_LM_BACKPLANE_BIT | + HNS3_LM_PAUSE_BIT | + HNS3_LM_AUTONEG_BIT | + HNS3_LM_1000BASET_FULL_BIT | + HNS3_LM_100BASET_FULL_BIT | + HNS3_LM_100BASET_HALF_BIT | + HNS3_LM_10BASET_FULL_BIT | + HNS3_LM_10BASET_HALF_BIT; + + advertised_caps = supported_caps; + break; + case HNAE3_MEDIA_TYPE_UNKNOWN: + default: + cmd->base.port = PORT_OTHER; + supported_caps = 0; + advertised_caps = 0; + break; + } + + if (!cmd->base.autoneg) + advertised_caps &= ~HNS3_LM_AUTONEG_BIT; + + /* now, map driver link modes to ethtool link modes */ + hns3_driv_to_eth_caps(supported_caps, cmd, false); + hns3_driv_to_eth_caps(advertised_caps, cmd, true); + } + + /* 3.mdix_ctrl&mdix get from phy reg */ + if (h->ae_algo->ops->get_mdix_mode) + h->ae_algo->ops->get_mdix_mode(h, &cmd->base.eth_tp_mdix_ctrl, + &cmd->base.eth_tp_mdix); + /* 4.mdio_support */ + cmd->base.mdio_support = ETH_MDIO_SUPPORTS_C22; + + return 0; +} + +static int hns3_set_link_ksettings(struct net_device *netdev, + const struct ethtool_link_ksettings *cmd) +{ + /* Only support ksettings_set for netdev with phy attached for now */ + if (netdev->phydev) + return phy_ethtool_ksettings_set(netdev->phydev, cmd); + + return -EOPNOTSUPP; +} + +static u32 hns3_get_rss_key_size(struct net_device *netdev) +{ + struct hnae3_handle *h = hns3_get_handle(netdev); + + if (!h->ae_algo || !h->ae_algo->ops || + !h->ae_algo->ops->get_rss_key_size) + return -EOPNOTSUPP; + + return h->ae_algo->ops->get_rss_key_size(h); +} + +static u32 hns3_get_rss_indir_size(struct net_device *netdev) +{ + struct hnae3_handle *h = hns3_get_handle(netdev); + + if (!h->ae_algo || !h->ae_algo->ops || + !h->ae_algo->ops->get_rss_indir_size) + return -EOPNOTSUPP; + + return h->ae_algo->ops->get_rss_indir_size(h); +} + +static int hns3_get_rss(struct net_device *netdev, u32 *indir, u8 *key, + u8 *hfunc) +{ + struct hnae3_handle *h = hns3_get_handle(netdev); + + if (!h->ae_algo || !h->ae_algo->ops || !h->ae_algo->ops->get_rss) + return -EOPNOTSUPP; + + return h->ae_algo->ops->get_rss(h, indir, key, hfunc); +} + +static int hns3_set_rss(struct net_device *netdev, const u32 *indir, + const u8 *key, const u8 hfunc) +{ + struct hnae3_handle *h = hns3_get_handle(netdev); + + if (!h->ae_algo || !h->ae_algo->ops || !h->ae_algo->ops->set_rss) + return -EOPNOTSUPP; + + /* currently we only support Toeplitz hash */ + if ((hfunc != ETH_RSS_HASH_NO_CHANGE) && (hfunc != ETH_RSS_HASH_TOP)) { + netdev_err(netdev, + "hash func not supported (only Toeplitz hash)\n"); + return -EOPNOTSUPP; + } + if (!indir) { + netdev_err(netdev, + "set rss failed for indir is empty\n"); + return -EOPNOTSUPP; + } + + return h->ae_algo->ops->set_rss(h, indir, key, hfunc); +} + +static int hns3_get_rxnfc(struct net_device *netdev, + struct ethtool_rxnfc *cmd, + u32 *rule_locs) +{ + struct hnae3_handle *h = hns3_get_handle(netdev); + + if (!h->ae_algo || !h->ae_algo->ops || !h->ae_algo->ops->get_rss_tuple) + return -EOPNOTSUPP; + + switch (cmd->cmd) { + case ETHTOOL_GRXRINGS: + cmd->data = h->kinfo.num_tc * h->kinfo.rss_size; + break; + case ETHTOOL_GRXFH: + return h->ae_algo->ops->get_rss_tuple(h, cmd); + default: + return -EOPNOTSUPP; + } + + return 0; +} + +static int hns3_change_all_ring_bd_num(struct hns3_nic_priv *priv, + u32 new_desc_num) +{ + struct hnae3_handle *h = priv->ae_handle; + int i; + + h->kinfo.num_desc = new_desc_num; + + for (i = 0; i < h->kinfo.num_tqps * 2; i++) + priv->ring_data[i].ring->desc_num = new_desc_num; + + return hns3_init_all_ring(priv); +} + +static int hns3_set_ringparam(struct net_device *ndev, + struct ethtool_ringparam *param) +{ + struct hns3_nic_priv *priv = netdev_priv(ndev); + struct hnae3_handle *h = priv->ae_handle; + bool if_running = netif_running(ndev); + u32 old_desc_num, new_desc_num; + int ret; + + if (param->rx_mini_pending || param->rx_jumbo_pending) + return -EINVAL; + + if (param->tx_pending != param->rx_pending) { + netdev_err(ndev, + "Descriptors of tx and rx must be equal"); + return -EINVAL; + } + + if (param->tx_pending > HNS3_RING_MAX_PENDING || + param->tx_pending < HNS3_RING_MIN_PENDING) { + netdev_err(ndev, + "Descriptors requested (Tx/Rx: %d) out of range [%d-%d]\n", + param->tx_pending, HNS3_RING_MIN_PENDING, + HNS3_RING_MAX_PENDING); + return -EINVAL; + } + + new_desc_num = param->tx_pending; + + /* Hardware requires that its descriptors must be multiple of eight */ + new_desc_num = ALIGN(new_desc_num, HNS3_RING_BD_MULTIPLE); + old_desc_num = h->kinfo.num_desc; + if (old_desc_num == new_desc_num) + return 0; + + netdev_info(ndev, + "Changing descriptor count from %d to %d.\n", + old_desc_num, new_desc_num); + + if (if_running) + dev_close(ndev); + + ret = hns3_uninit_all_ring(priv); + if (ret) + return ret; + + ret = hns3_change_all_ring_bd_num(priv, new_desc_num); + if (ret) { + ret = hns3_change_all_ring_bd_num(priv, old_desc_num); + if (ret) { + netdev_err(ndev, + "Revert to old bd num fail, ret=%d.\n", ret); + return ret; + } + } + + if (if_running) + ret = dev_open(ndev); + + return ret; +} + +static int hns3_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd) +{ + struct hnae3_handle *h = hns3_get_handle(netdev); + + if (!h->ae_algo || !h->ae_algo->ops || !h->ae_algo->ops->set_rss_tuple) + return -EOPNOTSUPP; + + switch (cmd->cmd) { + case ETHTOOL_SRXFH: + return h->ae_algo->ops->set_rss_tuple(h, cmd); + default: + return -EOPNOTSUPP; + } +} + +static int hns3_nway_reset(struct net_device *netdev) +{ + struct phy_device *phy = netdev->phydev; + + if (!netif_running(netdev)) + return 0; + + /* Only support nway_reset for netdev with phy attached for now */ + if (!phy) + return -EOPNOTSUPP; + + if (phy->autoneg != AUTONEG_ENABLE) + return -EINVAL; + + return genphy_restart_aneg(phy); +} + +static const struct ethtool_ops hns3vf_ethtool_ops = { + .get_drvinfo = hns3_get_drvinfo, + .get_ringparam = hns3_get_ringparam, + .set_ringparam = hns3_set_ringparam, + .get_strings = hns3_get_strings, + .get_ethtool_stats = hns3_get_stats, + .get_sset_count = hns3_get_sset_count, + .get_rxnfc = hns3_get_rxnfc, + .get_rxfh_key_size = hns3_get_rss_key_size, + .get_rxfh_indir_size = hns3_get_rss_indir_size, + .get_rxfh = hns3_get_rss, + .set_rxfh = hns3_set_rss, + .get_link_ksettings = hns3_get_link_ksettings, +}; + +static const struct ethtool_ops hns3_ethtool_ops = { + .self_test = hns3_self_test, + .get_drvinfo = hns3_get_drvinfo, + .get_link = hns3_get_link, + .get_ringparam = hns3_get_ringparam, + .set_ringparam = hns3_set_ringparam, + .get_pauseparam = hns3_get_pauseparam, + .get_strings = hns3_get_strings, + .get_ethtool_stats = hns3_get_stats, + .get_sset_count = hns3_get_sset_count, + .get_rxnfc = hns3_get_rxnfc, + .set_rxnfc = hns3_set_rxnfc, + .get_rxfh_key_size = hns3_get_rss_key_size, + .get_rxfh_indir_size = hns3_get_rss_indir_size, + .get_rxfh = hns3_get_rss, + .set_rxfh = hns3_set_rss, + .get_link_ksettings = hns3_get_link_ksettings, + .set_link_ksettings = hns3_set_link_ksettings, + .nway_reset = hns3_nway_reset, +}; + +void hns3_ethtool_set_ops(struct net_device *netdev) +{ + struct hnae3_handle *h = hns3_get_handle(netdev); + + if (h->flags & HNAE3_SUPPORT_VF) + netdev->ethtool_ops = &hns3vf_ethtool_ops; + else + netdev->ethtool_ops = &hns3_ethtool_ops; +} diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile b/drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile index d2b20d01a58c..d077fa02b66e 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile @@ -8,8 +8,3 @@ obj-$(CONFIG_HNS3_HCLGE) += hclge.o hclge-objs = hclge_main.o hclge_cmd.o hclge_mdio.o hclge_tm.o hclge-$(CONFIG_HNS3_DCB) += hclge_dcb.o - -obj-$(CONFIG_HNS3_ENET) += hns3.o -hns3-objs = hns3_enet.o hns3_ethtool.o - -hns3-$(CONFIG_HNS3_DCB) += hns3_dcbnl.o diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_dcbnl.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_dcbnl.c deleted file mode 100644 index 925619a7c50a..000000000000 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_dcbnl.c +++ /dev/null @@ -1,100 +0,0 @@ -/* - * Copyright (c) 2016-2017 Hisilicon Limited. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - */ - -#include "hnae3.h" -#include "hns3_enet.h" - -static -int hns3_dcbnl_ieee_getets(struct net_device *ndev, struct ieee_ets *ets) -{ - struct hnae3_handle *h = hns3_get_handle(ndev); - - if (h->kinfo.dcb_ops->ieee_getets) - return h->kinfo.dcb_ops->ieee_getets(h, ets); - - return -EOPNOTSUPP; -} - -static -int hns3_dcbnl_ieee_setets(struct net_device *ndev, struct ieee_ets *ets) -{ - struct hnae3_handle *h = hns3_get_handle(ndev); - - if (h->kinfo.dcb_ops->ieee_setets) - return h->kinfo.dcb_ops->ieee_setets(h, ets); - - return -EOPNOTSUPP; -} - -static -int hns3_dcbnl_ieee_getpfc(struct net_device *ndev, struct ieee_pfc *pfc) -{ - struct hnae3_handle *h = hns3_get_handle(ndev); - - if (h->kinfo.dcb_ops->ieee_getpfc) - return h->kinfo.dcb_ops->ieee_getpfc(h, pfc); - - return -EOPNOTSUPP; -} - -static -int hns3_dcbnl_ieee_setpfc(struct net_device *ndev, struct ieee_pfc *pfc) -{ - struct hnae3_handle *h = hns3_get_handle(ndev); - - if (h->kinfo.dcb_ops->ieee_setpfc) - return h->kinfo.dcb_ops->ieee_setpfc(h, pfc); - - return -EOPNOTSUPP; -} - -/* DCBX configuration */ -static u8 hns3_dcbnl_getdcbx(struct net_device *ndev) -{ - struct hnae3_handle *h = hns3_get_handle(ndev); - - if (h->kinfo.dcb_ops->getdcbx) - return h->kinfo.dcb_ops->getdcbx(h); - - return 0; -} - -/* return 0 if successful, otherwise fail */ -static u8 hns3_dcbnl_setdcbx(struct net_device *ndev, u8 mode) -{ - struct hnae3_handle *h = hns3_get_handle(ndev); - - if (h->kinfo.dcb_ops->setdcbx) - return h->kinfo.dcb_ops->setdcbx(h, mode); - - return 1; -} - -static const struct dcbnl_rtnl_ops hns3_dcbnl_ops = { - .ieee_getets = hns3_dcbnl_ieee_getets, - .ieee_setets = hns3_dcbnl_ieee_setets, - .ieee_getpfc = hns3_dcbnl_ieee_getpfc, - .ieee_setpfc = hns3_dcbnl_ieee_setpfc, - .getdcbx = hns3_dcbnl_getdcbx, - .setdcbx = hns3_dcbnl_setdcbx, -}; - -/* hclge_dcbnl_setup - DCBNL setup - * @handle: the corresponding vport handle - * Set up DCBNL - */ -void hns3_dcbnl_setup(struct hnae3_handle *handle) -{ - struct net_device *dev = handle->kinfo.netdev; - - if (!handle->kinfo.dcb_ops) - return; - - dev->dcbnl_ops = &hns3_dcbnl_ops; -} diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c deleted file mode 100644 index 59415090ff0f..000000000000 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c +++ /dev/null @@ -1,3214 +0,0 @@ -/* - * Copyright (c) 2016~2017 Hisilicon Limited. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "hnae3.h" -#include "hns3_enet.h" - -static const char hns3_driver_name[] = "hns3"; -const char hns3_driver_version[] = VERMAGIC_STRING; -static const char hns3_driver_string[] = - "Hisilicon Ethernet Network Driver for Hip08 Family"; -static const char hns3_copyright[] = "Copyright (c) 2017 Huawei Corporation."; -static struct hnae3_client client; - -/* hns3_pci_tbl - PCI Device ID Table - * - * Last entry must be all 0s - * - * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, - * Class, Class Mask, private data (not used) } - */ -static const struct pci_device_id hns3_pci_tbl[] = { - {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0}, - {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0}, - {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), - HNAE3_DEV_SUPPORT_ROCE_DCB_BITS}, - {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), - HNAE3_DEV_SUPPORT_ROCE_DCB_BITS}, - {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), - HNAE3_DEV_SUPPORT_ROCE_DCB_BITS}, - {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), - HNAE3_DEV_SUPPORT_ROCE_DCB_BITS}, - {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), - HNAE3_DEV_SUPPORT_ROCE_DCB_BITS}, - /* required last entry */ - {0, } -}; -MODULE_DEVICE_TABLE(pci, hns3_pci_tbl); - -static irqreturn_t hns3_irq_handle(int irq, void *dev) -{ - struct hns3_enet_tqp_vector *tqp_vector = dev; - - napi_schedule(&tqp_vector->napi); - - return IRQ_HANDLED; -} - -static void hns3_nic_uninit_irq(struct hns3_nic_priv *priv) -{ - struct hns3_enet_tqp_vector *tqp_vectors; - unsigned int i; - - for (i = 0; i < priv->vector_num; i++) { - tqp_vectors = &priv->tqp_vector[i]; - - if (tqp_vectors->irq_init_flag != HNS3_VECTOR_INITED) - continue; - - /* release the irq resource */ - free_irq(tqp_vectors->vector_irq, tqp_vectors); - tqp_vectors->irq_init_flag = HNS3_VECTOR_NOT_INITED; - } -} - -static int hns3_nic_init_irq(struct hns3_nic_priv *priv) -{ - struct hns3_enet_tqp_vector *tqp_vectors; - int txrx_int_idx = 0; - int rx_int_idx = 0; - int tx_int_idx = 0; - unsigned int i; - int ret; - - for (i = 0; i < priv->vector_num; i++) { - tqp_vectors = &priv->tqp_vector[i]; - - if (tqp_vectors->irq_init_flag == HNS3_VECTOR_INITED) - continue; - - if (tqp_vectors->tx_group.ring && tqp_vectors->rx_group.ring) { - snprintf(tqp_vectors->name, HNAE3_INT_NAME_LEN - 1, - "%s-%s-%d", priv->netdev->name, "TxRx", - txrx_int_idx++); - txrx_int_idx++; - } else if (tqp_vectors->rx_group.ring) { - snprintf(tqp_vectors->name, HNAE3_INT_NAME_LEN - 1, - "%s-%s-%d", priv->netdev->name, "Rx", - rx_int_idx++); - } else if (tqp_vectors->tx_group.ring) { - snprintf(tqp_vectors->name, HNAE3_INT_NAME_LEN - 1, - "%s-%s-%d", priv->netdev->name, "Tx", - tx_int_idx++); - } else { - /* Skip this unused q_vector */ - continue; - } - - tqp_vectors->name[HNAE3_INT_NAME_LEN - 1] = '\0'; - - ret = request_irq(tqp_vectors->vector_irq, hns3_irq_handle, 0, - tqp_vectors->name, - tqp_vectors); - if (ret) { - netdev_err(priv->netdev, "request irq(%d) fail\n", - tqp_vectors->vector_irq); - return ret; - } - - tqp_vectors->irq_init_flag = HNS3_VECTOR_INITED; - } - - return 0; -} - -static void hns3_mask_vector_irq(struct hns3_enet_tqp_vector *tqp_vector, - u32 mask_en) -{ - writel(mask_en, tqp_vector->mask_addr); -} - -static void hns3_vector_enable(struct hns3_enet_tqp_vector *tqp_vector) -{ - napi_enable(&tqp_vector->napi); - - /* enable vector */ - hns3_mask_vector_irq(tqp_vector, 1); -} - -static void hns3_vector_disable(struct hns3_enet_tqp_vector *tqp_vector) -{ - /* disable vector */ - hns3_mask_vector_irq(tqp_vector, 0); - - disable_irq(tqp_vector->vector_irq); - napi_disable(&tqp_vector->napi); -} - -static void hns3_set_vector_coalesc_gl(struct hns3_enet_tqp_vector *tqp_vector, - u32 gl_value) -{ - /* this defines the configuration for GL (Interrupt Gap Limiter) - * GL defines inter interrupt gap. - * GL and RL(Rate Limiter) are 2 ways to acheive interrupt coalescing - */ - writel(gl_value, tqp_vector->mask_addr + HNS3_VECTOR_GL0_OFFSET); - writel(gl_value, tqp_vector->mask_addr + HNS3_VECTOR_GL1_OFFSET); - writel(gl_value, tqp_vector->mask_addr + HNS3_VECTOR_GL2_OFFSET); -} - -static void hns3_set_vector_coalesc_rl(struct hns3_enet_tqp_vector *tqp_vector, - u32 rl_value) -{ - /* this defines the configuration for RL (Interrupt Rate Limiter). - * Rl defines rate of interrupts i.e. number of interrupts-per-second - * GL and RL(Rate Limiter) are 2 ways to acheive interrupt coalescing - */ - writel(rl_value, tqp_vector->mask_addr + HNS3_VECTOR_RL_OFFSET); -} - -static void hns3_vector_gl_rl_init(struct hns3_enet_tqp_vector *tqp_vector) -{ - /* initialize the configuration for interrupt coalescing. - * 1. GL (Interrupt Gap Limiter) - * 2. RL (Interrupt Rate Limiter) - */ - - /* Default :enable interrupt coalesce */ - tqp_vector->rx_group.int_gl = HNS3_INT_GL_50K; - tqp_vector->tx_group.int_gl = HNS3_INT_GL_50K; - hns3_set_vector_coalesc_gl(tqp_vector, HNS3_INT_GL_50K); - /* for now we are disabling Interrupt RL - we - * will re-enable later - */ - hns3_set_vector_coalesc_rl(tqp_vector, 0); - tqp_vector->rx_group.flow_level = HNS3_FLOW_LOW; - tqp_vector->tx_group.flow_level = HNS3_FLOW_LOW; -} - -static int hns3_nic_set_real_num_queue(struct net_device *netdev) -{ - struct hnae3_handle *h = hns3_get_handle(netdev); - struct hnae3_knic_private_info *kinfo = &h->kinfo; - unsigned int queue_size = kinfo->rss_size * kinfo->num_tc; - int ret; - - ret = netif_set_real_num_tx_queues(netdev, queue_size); - if (ret) { - netdev_err(netdev, - "netif_set_real_num_tx_queues fail, ret=%d!\n", - ret); - return ret; - } - - ret = netif_set_real_num_rx_queues(netdev, queue_size); - if (ret) { - netdev_err(netdev, - "netif_set_real_num_rx_queues fail, ret=%d!\n", ret); - return ret; - } - - return 0; -} - -static int hns3_nic_net_up(struct net_device *netdev) -{ - struct hns3_nic_priv *priv = netdev_priv(netdev); - struct hnae3_handle *h = priv->ae_handle; - int i, j; - int ret; - - /* get irq resource for all vectors */ - ret = hns3_nic_init_irq(priv); - if (ret) { - netdev_err(netdev, "hns init irq failed! ret=%d\n", ret); - return ret; - } - - /* enable the vectors */ - for (i = 0; i < priv->vector_num; i++) - hns3_vector_enable(&priv->tqp_vector[i]); - - /* start the ae_dev */ - ret = h->ae_algo->ops->start ? h->ae_algo->ops->start(h) : 0; - if (ret) - goto out_start_err; - - return 0; - -out_start_err: - for (j = i - 1; j >= 0; j--) - hns3_vector_disable(&priv->tqp_vector[j]); - - hns3_nic_uninit_irq(priv); - - return ret; -} - -static int hns3_nic_net_open(struct net_device *netdev) -{ - struct hns3_nic_priv *priv = netdev_priv(netdev); - int ret; - - netif_carrier_off(netdev); - - ret = hns3_nic_set_real_num_queue(netdev); - if (ret) - return ret; - - ret = hns3_nic_net_up(netdev); - if (ret) { - netdev_err(netdev, - "hns net up fail, ret=%d!\n", ret); - return ret; - } - - priv->last_reset_time = jiffies; - return 0; -} - -static void hns3_nic_net_down(struct net_device *netdev) -{ - struct hns3_nic_priv *priv = netdev_priv(netdev); - const struct hnae3_ae_ops *ops; - int i; - - /* stop ae_dev */ - ops = priv->ae_handle->ae_algo->ops; - if (ops->stop) - ops->stop(priv->ae_handle); - - /* disable vectors */ - for (i = 0; i < priv->vector_num; i++) - hns3_vector_disable(&priv->tqp_vector[i]); - - /* free irq resources */ - hns3_nic_uninit_irq(priv); -} - -static int hns3_nic_net_stop(struct net_device *netdev) -{ - netif_tx_stop_all_queues(netdev); - netif_carrier_off(netdev); - - hns3_nic_net_down(netdev); - - return 0; -} - -static int hns3_nic_uc_sync(struct net_device *netdev, - const unsigned char *addr) -{ - struct hnae3_handle *h = hns3_get_handle(netdev); - - if (h->ae_algo->ops->add_uc_addr) - return h->ae_algo->ops->add_uc_addr(h, addr); - - return 0; -} - -static int hns3_nic_uc_unsync(struct net_device *netdev, - const unsigned char *addr) -{ - struct hnae3_handle *h = hns3_get_handle(netdev); - - if (h->ae_algo->ops->rm_uc_addr) - return h->ae_algo->ops->rm_uc_addr(h, addr); - - return 0; -} - -static int hns3_nic_mc_sync(struct net_device *netdev, - const unsigned char *addr) -{ - struct hnae3_handle *h = hns3_get_handle(netdev); - - if (h->ae_algo->ops->add_mc_addr) - return h->ae_algo->ops->add_mc_addr(h, addr); - - return 0; -} - -static int hns3_nic_mc_unsync(struct net_device *netdev, - const unsigned char *addr) -{ - struct hnae3_handle *h = hns3_get_handle(netdev); - - if (h->ae_algo->ops->rm_mc_addr) - return h->ae_algo->ops->rm_mc_addr(h, addr); - - return 0; -} - -static void hns3_nic_set_rx_mode(struct net_device *netdev) -{ - struct hnae3_handle *h = hns3_get_handle(netdev); - - if (h->ae_algo->ops->set_promisc_mode) { - if (netdev->flags & IFF_PROMISC) - h->ae_algo->ops->set_promisc_mode(h, 1); - else - h->ae_algo->ops->set_promisc_mode(h, 0); - } - if (__dev_uc_sync(netdev, hns3_nic_uc_sync, hns3_nic_uc_unsync)) - netdev_err(netdev, "sync uc address fail\n"); - if (netdev->flags & IFF_MULTICAST) - if (__dev_mc_sync(netdev, hns3_nic_mc_sync, hns3_nic_mc_unsync)) - netdev_err(netdev, "sync mc address fail\n"); -} - -static int hns3_set_tso(struct sk_buff *skb, u32 *paylen, - u16 *mss, u32 *type_cs_vlan_tso) -{ - u32 l4_offset, hdr_len; - union l3_hdr_info l3; - union l4_hdr_info l4; - u32 l4_paylen; - int ret; - - if (!skb_is_gso(skb)) - return 0; - - ret = skb_cow_head(skb, 0); - if (ret) - return ret; - - l3.hdr = skb_network_header(skb); - l4.hdr = skb_transport_header(skb); - - /* Software should clear the IPv4's checksum field when tso is - * needed. - */ - if (l3.v4->version == 4) - l3.v4->check = 0; - - /* tunnel packet.*/ - if (skb_shinfo(skb)->gso_type & (SKB_GSO_GRE | - SKB_GSO_GRE_CSUM | - SKB_GSO_UDP_TUNNEL | - SKB_GSO_UDP_TUNNEL_CSUM)) { - if ((!(skb_shinfo(skb)->gso_type & - SKB_GSO_PARTIAL)) && - (skb_shinfo(skb)->gso_type & - SKB_GSO_UDP_TUNNEL_CSUM)) { - /* Software should clear the udp's checksum - * field when tso is needed. - */ - l4.udp->check = 0; - } - /* reset l3&l4 pointers from outer to inner headers */ - l3.hdr = skb_inner_network_header(skb); - l4.hdr = skb_inner_transport_header(skb); - - /* Software should clear the IPv4's checksum field when - * tso is needed. - */ - if (l3.v4->version == 4) - l3.v4->check = 0; - } - - /* normal or tunnel packet*/ - l4_offset = l4.hdr - skb->data; - hdr_len = (l4.tcp->doff * 4) + l4_offset; - - /* remove payload length from inner pseudo checksum when tso*/ - l4_paylen = skb->len - l4_offset; - csum_replace_by_diff(&l4.tcp->check, - (__force __wsum)htonl(l4_paylen)); - - /* find the txbd field values */ - *paylen = skb->len - hdr_len; - hnae_set_bit(*type_cs_vlan_tso, - HNS3_TXD_TSO_B, 1); - - /* get MSS for TSO */ - *mss = skb_shinfo(skb)->gso_size; - - return 0; -} - -static int hns3_get_l4_protocol(struct sk_buff *skb, u8 *ol4_proto, - u8 *il4_proto) -{ - union { - struct iphdr *v4; - struct ipv6hdr *v6; - unsigned char *hdr; - } l3; - unsigned char *l4_hdr; - unsigned char *exthdr; - u8 l4_proto_tmp; - __be16 frag_off; - - /* find outer header point */ - l3.hdr = skb_network_header(skb); - l4_hdr = skb_inner_transport_header(skb); - - if (skb->protocol == htons(ETH_P_IPV6)) { - exthdr = l3.hdr + sizeof(*l3.v6); - l4_proto_tmp = l3.v6->nexthdr; - if (l4_hdr != exthdr) - ipv6_skip_exthdr(skb, exthdr - skb->data, - &l4_proto_tmp, &frag_off); - } else if (skb->protocol == htons(ETH_P_IP)) { - l4_proto_tmp = l3.v4->protocol; - } else { - return -EINVAL; - } - - *ol4_proto = l4_proto_tmp; - - /* tunnel packet */ - if (!skb->encapsulation) { - *il4_proto = 0; - return 0; - } - - /* find inner header point */ - l3.hdr = skb_inner_network_header(skb); - l4_hdr = skb_inner_transport_header(skb); - - if (l3.v6->version == 6) { - exthdr = l3.hdr + sizeof(*l3.v6); - l4_proto_tmp = l3.v6->nexthdr; - if (l4_hdr != exthdr) - ipv6_skip_exthdr(skb, exthdr - skb->data, - &l4_proto_tmp, &frag_off); - } else if (l3.v4->version == 4) { - l4_proto_tmp = l3.v4->protocol; - } - - *il4_proto = l4_proto_tmp; - - return 0; -} - -static void hns3_set_l2l3l4_len(struct sk_buff *skb, u8 ol4_proto, - u8 il4_proto, u32 *type_cs_vlan_tso, - u32 *ol_type_vlan_len_msec) -{ - union { - struct iphdr *v4; - struct ipv6hdr *v6; - unsigned char *hdr; - } l3; - union { - struct tcphdr *tcp; - struct udphdr *udp; - struct gre_base_hdr *gre; - unsigned char *hdr; - } l4; - unsigned char *l2_hdr; - u8 l4_proto = ol4_proto; - u32 ol2_len; - u32 ol3_len; - u32 ol4_len; - u32 l2_len; - u32 l3_len; - - l3.hdr = skb_network_header(skb); - l4.hdr = skb_transport_header(skb); - - /* compute L2 header size for normal packet, defined in 2 Bytes */ - l2_len = l3.hdr - skb->data; - hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L2LEN_M, - HNS3_TXD_L2LEN_S, l2_len >> 1); - - /* tunnel packet*/ - if (skb->encapsulation) { - /* compute OL2 header size, defined in 2 Bytes */ - ol2_len = l2_len; - hnae_set_field(*ol_type_vlan_len_msec, - HNS3_TXD_L2LEN_M, - HNS3_TXD_L2LEN_S, ol2_len >> 1); - - /* compute OL3 header size, defined in 4 Bytes */ - ol3_len = l4.hdr - l3.hdr; - hnae_set_field(*ol_type_vlan_len_msec, HNS3_TXD_L3LEN_M, - HNS3_TXD_L3LEN_S, ol3_len >> 2); - - /* MAC in UDP, MAC in GRE (0x6558)*/ - if ((ol4_proto == IPPROTO_UDP) || (ol4_proto == IPPROTO_GRE)) { - /* switch MAC header ptr from outer to inner header.*/ - l2_hdr = skb_inner_mac_header(skb); - - /* compute OL4 header size, defined in 4 Bytes. */ - ol4_len = l2_hdr - l4.hdr; - hnae_set_field(*ol_type_vlan_len_msec, HNS3_TXD_L4LEN_M, - HNS3_TXD_L4LEN_S, ol4_len >> 2); - - /* switch IP header ptr from outer to inner header */ - l3.hdr = skb_inner_network_header(skb); - - /* compute inner l2 header size, defined in 2 Bytes. */ - l2_len = l3.hdr - l2_hdr; - hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L2LEN_M, - HNS3_TXD_L2LEN_S, l2_len >> 1); - } else { - /* skb packet types not supported by hardware, - * txbd len fild doesn't be filled. - */ - return; - } - - /* switch L4 header pointer from outer to inner */ - l4.hdr = skb_inner_transport_header(skb); - - l4_proto = il4_proto; - } - - /* compute inner(/normal) L3 header size, defined in 4 Bytes */ - l3_len = l4.hdr - l3.hdr; - hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L3LEN_M, - HNS3_TXD_L3LEN_S, l3_len >> 2); - - /* compute inner(/normal) L4 header size, defined in 4 Bytes */ - switch (l4_proto) { - case IPPROTO_TCP: - hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_M, - HNS3_TXD_L4LEN_S, l4.tcp->doff); - break; - case IPPROTO_SCTP: - hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_M, - HNS3_TXD_L4LEN_S, (sizeof(struct sctphdr) >> 2)); - break; - case IPPROTO_UDP: - hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_M, - HNS3_TXD_L4LEN_S, (sizeof(struct udphdr) >> 2)); - break; - default: - /* skb packet types not supported by hardware, - * txbd len fild doesn't be filled. - */ - return; - } -} - -static int hns3_set_l3l4_type_csum(struct sk_buff *skb, u8 ol4_proto, - u8 il4_proto, u32 *type_cs_vlan_tso, - u32 *ol_type_vlan_len_msec) -{ - union { - struct iphdr *v4; - struct ipv6hdr *v6; - unsigned char *hdr; - } l3; - u32 l4_proto = ol4_proto; - - l3.hdr = skb_network_header(skb); - - /* define OL3 type and tunnel type(OL4).*/ - if (skb->encapsulation) { - /* define outer network header type.*/ - if (skb->protocol == htons(ETH_P_IP)) { - if (skb_is_gso(skb)) - hnae_set_field(*ol_type_vlan_len_msec, - HNS3_TXD_OL3T_M, HNS3_TXD_OL3T_S, - HNS3_OL3T_IPV4_CSUM); - else - hnae_set_field(*ol_type_vlan_len_msec, - HNS3_TXD_OL3T_M, HNS3_TXD_OL3T_S, - HNS3_OL3T_IPV4_NO_CSUM); - - } else if (skb->protocol == htons(ETH_P_IPV6)) { - hnae_set_field(*ol_type_vlan_len_msec, HNS3_TXD_OL3T_M, - HNS3_TXD_OL3T_S, HNS3_OL3T_IPV6); - } - - /* define tunnel type(OL4).*/ - switch (l4_proto) { - case IPPROTO_UDP: - hnae_set_field(*ol_type_vlan_len_msec, - HNS3_TXD_TUNTYPE_M, - HNS3_TXD_TUNTYPE_S, - HNS3_TUN_MAC_IN_UDP); - break; - case IPPROTO_GRE: - hnae_set_field(*ol_type_vlan_len_msec, - HNS3_TXD_TUNTYPE_M, - HNS3_TXD_TUNTYPE_S, - HNS3_TUN_NVGRE); - break; - default: - /* drop the skb tunnel packet if hardware don't support, - * because hardware can't calculate csum when TSO. - */ - if (skb_is_gso(skb)) - return -EDOM; - - /* the stack computes the IP header already, - * driver calculate l4 checksum when not TSO. - */ - skb_checksum_help(skb); - return 0; - } - - l3.hdr = skb_inner_network_header(skb); - l4_proto = il4_proto; - } - - if (l3.v4->version == 4) { - hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L3T_M, - HNS3_TXD_L3T_S, HNS3_L3T_IPV4); - - /* the stack computes the IP header already, the only time we - * need the hardware to recompute it is in the case of TSO. - */ - if (skb_is_gso(skb)) - hnae_set_bit(*type_cs_vlan_tso, HNS3_TXD_L3CS_B, 1); - - hnae_set_bit(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1); - } else if (l3.v6->version == 6) { - hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L3T_M, - HNS3_TXD_L3T_S, HNS3_L3T_IPV6); - hnae_set_bit(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1); - } - - switch (l4_proto) { - case IPPROTO_TCP: - hnae_set_field(*type_cs_vlan_tso, - HNS3_TXD_L4T_M, - HNS3_TXD_L4T_S, - HNS3_L4T_TCP); - break; - case IPPROTO_UDP: - hnae_set_field(*type_cs_vlan_tso, - HNS3_TXD_L4T_M, - HNS3_TXD_L4T_S, - HNS3_L4T_UDP); - break; - case IPPROTO_SCTP: - hnae_set_field(*type_cs_vlan_tso, - HNS3_TXD_L4T_M, - HNS3_TXD_L4T_S, - HNS3_L4T_SCTP); - break; - default: - /* drop the skb tunnel packet if hardware don't support, - * because hardware can't calculate csum when TSO. - */ - if (skb_is_gso(skb)) - return -EDOM; - - /* the stack computes the IP header already, - * driver calculate l4 checksum when not TSO. - */ - skb_checksum_help(skb); - return 0; - } - - return 0; -} - -static void hns3_set_txbd_baseinfo(u16 *bdtp_fe_sc_vld_ra_ri, int frag_end) -{ - /* Config bd buffer end */ - hnae_set_field(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_BDTYPE_M, - HNS3_TXD_BDTYPE_M, 0); - hnae_set_bit(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_FE_B, !!frag_end); - hnae_set_bit(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_VLD_B, 1); - hnae_set_field(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_SC_M, HNS3_TXD_SC_S, 0); -} - -static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv, - int size, dma_addr_t dma, int frag_end, - enum hns_desc_type type) -{ - struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use]; - struct hns3_desc *desc = &ring->desc[ring->next_to_use]; - u32 ol_type_vlan_len_msec = 0; - u16 bdtp_fe_sc_vld_ra_ri = 0; - u32 type_cs_vlan_tso = 0; - struct sk_buff *skb; - u32 paylen = 0; - u16 mss = 0; - __be16 protocol; - u8 ol4_proto; - u8 il4_proto; - int ret; - - /* The txbd's baseinfo of DESC_TYPE_PAGE & DESC_TYPE_SKB */ - desc_cb->priv = priv; - desc_cb->length = size; - desc_cb->dma = dma; - desc_cb->type = type; - - /* now, fill the descriptor */ - desc->addr = cpu_to_le64(dma); - desc->tx.send_size = cpu_to_le16((u16)size); - hns3_set_txbd_baseinfo(&bdtp_fe_sc_vld_ra_ri, frag_end); - desc->tx.bdtp_fe_sc_vld_ra_ri = cpu_to_le16(bdtp_fe_sc_vld_ra_ri); - - if (type == DESC_TYPE_SKB) { - skb = (struct sk_buff *)priv; - paylen = skb->len; - - if (skb->ip_summed == CHECKSUM_PARTIAL) { - skb_reset_mac_len(skb); - protocol = skb->protocol; - - /* vlan packet*/ - if (protocol == htons(ETH_P_8021Q)) { - protocol = vlan_get_protocol(skb); - skb->protocol = protocol; - } - ret = hns3_get_l4_protocol(skb, &ol4_proto, &il4_proto); - if (ret) - return ret; - hns3_set_l2l3l4_len(skb, ol4_proto, il4_proto, - &type_cs_vlan_tso, - &ol_type_vlan_len_msec); - ret = hns3_set_l3l4_type_csum(skb, ol4_proto, il4_proto, - &type_cs_vlan_tso, - &ol_type_vlan_len_msec); - if (ret) - return ret; - - ret = hns3_set_tso(skb, &paylen, &mss, - &type_cs_vlan_tso); - if (ret) - return ret; - } - - /* Set txbd */ - desc->tx.ol_type_vlan_len_msec = - cpu_to_le32(ol_type_vlan_len_msec); - desc->tx.type_cs_vlan_tso_len = - cpu_to_le32(type_cs_vlan_tso); - desc->tx.paylen = cpu_to_le32(paylen); - desc->tx.mss = cpu_to_le16(mss); - } - - /* move ring pointer to next.*/ - ring_ptr_move_fw(ring, next_to_use); - - return 0; -} - -static int hns3_fill_desc_tso(struct hns3_enet_ring *ring, void *priv, - int size, dma_addr_t dma, int frag_end, - enum hns_desc_type type) -{ - unsigned int frag_buf_num; - unsigned int k; - int sizeoflast; - int ret; - - frag_buf_num = (size + HNS3_MAX_BD_SIZE - 1) / HNS3_MAX_BD_SIZE; - sizeoflast = size % HNS3_MAX_BD_SIZE; - sizeoflast = sizeoflast ? sizeoflast : HNS3_MAX_BD_SIZE; - - /* When the frag size is bigger than hardware, split this frag */ - for (k = 0; k < frag_buf_num; k++) { - ret = hns3_fill_desc(ring, priv, - (k == frag_buf_num - 1) ? - sizeoflast : HNS3_MAX_BD_SIZE, - dma + HNS3_MAX_BD_SIZE * k, - frag_end && (k == frag_buf_num - 1) ? 1 : 0, - (type == DESC_TYPE_SKB && !k) ? - DESC_TYPE_SKB : DESC_TYPE_PAGE); - if (ret) - return ret; - } - - return 0; -} - -static int hns3_nic_maybe_stop_tso(struct sk_buff **out_skb, int *bnum, - struct hns3_enet_ring *ring) -{ - struct sk_buff *skb = *out_skb; - struct skb_frag_struct *frag; - int bdnum_for_frag; - int frag_num; - int buf_num; - int size; - int i; - - size = skb_headlen(skb); - buf_num = (size + HNS3_MAX_BD_SIZE - 1) / HNS3_MAX_BD_SIZE; - - frag_num = skb_shinfo(skb)->nr_frags; - for (i = 0; i < frag_num; i++) { - frag = &skb_shinfo(skb)->frags[i]; - size = skb_frag_size(frag); - bdnum_for_frag = - (size + HNS3_MAX_BD_SIZE - 1) / HNS3_MAX_BD_SIZE; - if (bdnum_for_frag > HNS3_MAX_BD_PER_FRAG) - return -ENOMEM; - - buf_num += bdnum_for_frag; - } - - if (buf_num > ring_space(ring)) - return -EBUSY; - - *bnum = buf_num; - return 0; -} - -static int hns3_nic_maybe_stop_tx(struct sk_buff **out_skb, int *bnum, - struct hns3_enet_ring *ring) -{ - struct sk_buff *skb = *out_skb; - int buf_num; - - /* No. of segments (plus a header) */ - buf_num = skb_shinfo(skb)->nr_frags + 1; - - if (buf_num > ring_space(ring)) - return -EBUSY; - - *bnum = buf_num; - - return 0; -} - -static void hns_nic_dma_unmap(struct hns3_enet_ring *ring, int next_to_use_orig) -{ - struct device *dev = ring_to_dev(ring); - unsigned int i; - - for (i = 0; i < ring->desc_num; i++) { - /* check if this is where we started */ - if (ring->next_to_use == next_to_use_orig) - break; - - /* unmap the descriptor dma address */ - if (ring->desc_cb[ring->next_to_use].type == DESC_TYPE_SKB) - dma_unmap_single(dev, - ring->desc_cb[ring->next_to_use].dma, - ring->desc_cb[ring->next_to_use].length, - DMA_TO_DEVICE); - else - dma_unmap_page(dev, - ring->desc_cb[ring->next_to_use].dma, - ring->desc_cb[ring->next_to_use].length, - DMA_TO_DEVICE); - - /* rollback one */ - ring_ptr_move_bw(ring, next_to_use); - } -} - -netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev) -{ - struct hns3_nic_priv *priv = netdev_priv(netdev); - struct hns3_nic_ring_data *ring_data = - &tx_ring_data(priv, skb->queue_mapping); - struct hns3_enet_ring *ring = ring_data->ring; - struct device *dev = priv->dev; - struct netdev_queue *dev_queue; - struct skb_frag_struct *frag; - int next_to_use_head; - int next_to_use_frag; - dma_addr_t dma; - int buf_num; - int seg_num; - int size; - int ret; - int i; - - /* Prefetch the data used later */ - prefetch(skb->data); - - switch (priv->ops.maybe_stop_tx(&skb, &buf_num, ring)) { - case -EBUSY: - u64_stats_update_begin(&ring->syncp); - ring->stats.tx_busy++; - u64_stats_update_end(&ring->syncp); - - goto out_net_tx_busy; - case -ENOMEM: - u64_stats_update_begin(&ring->syncp); - ring->stats.sw_err_cnt++; - u64_stats_update_end(&ring->syncp); - netdev_err(netdev, "no memory to xmit!\n"); - - goto out_err_tx_ok; - default: - break; - } - - /* No. of segments (plus a header) */ - seg_num = skb_shinfo(skb)->nr_frags + 1; - /* Fill the first part */ - size = skb_headlen(skb); - - next_to_use_head = ring->next_to_use; - - dma = dma_map_single(dev, skb->data, size, DMA_TO_DEVICE); - if (dma_mapping_error(dev, dma)) { - netdev_err(netdev, "TX head DMA map failed\n"); - ring->stats.sw_err_cnt++; - goto out_err_tx_ok; - } - - ret = priv->ops.fill_desc(ring, skb, size, dma, seg_num == 1 ? 1 : 0, - DESC_TYPE_SKB); - if (ret) - goto head_dma_map_err; - - next_to_use_frag = ring->next_to_use; - /* Fill the fragments */ - for (i = 1; i < seg_num; i++) { - frag = &skb_shinfo(skb)->frags[i - 1]; - size = skb_frag_size(frag); - dma = skb_frag_dma_map(dev, frag, 0, size, DMA_TO_DEVICE); - if (dma_mapping_error(dev, dma)) { - netdev_err(netdev, "TX frag(%d) DMA map failed\n", i); - ring->stats.sw_err_cnt++; - goto frag_dma_map_err; - } - ret = priv->ops.fill_desc(ring, skb_frag_page(frag), size, dma, - seg_num - 1 == i ? 1 : 0, - DESC_TYPE_PAGE); - - if (ret) - goto frag_dma_map_err; - } - - /* Complete translate all packets */ - dev_queue = netdev_get_tx_queue(netdev, ring_data->queue_index); - netdev_tx_sent_queue(dev_queue, skb->len); - - wmb(); /* Commit all data before submit */ - - hnae_queue_xmit(ring->tqp, buf_num); - - return NETDEV_TX_OK; - -frag_dma_map_err: - hns_nic_dma_unmap(ring, next_to_use_frag); - -head_dma_map_err: - hns_nic_dma_unmap(ring, next_to_use_head); - -out_err_tx_ok: - dev_kfree_skb_any(skb); - return NETDEV_TX_OK; - -out_net_tx_busy: - netif_stop_subqueue(netdev, ring_data->queue_index); - smp_mb(); /* Commit all data before submit */ - - return NETDEV_TX_BUSY; -} - -static int hns3_nic_net_set_mac_address(struct net_device *netdev, void *p) -{ - struct hnae3_handle *h = hns3_get_handle(netdev); - struct sockaddr *mac_addr = p; - int ret; - - if (!mac_addr || !is_valid_ether_addr((const u8 *)mac_addr->sa_data)) - return -EADDRNOTAVAIL; - - ret = h->ae_algo->ops->set_mac_addr(h, mac_addr->sa_data); - if (ret) { - netdev_err(netdev, "set_mac_address fail, ret=%d!\n", ret); - return ret; - } - - ether_addr_copy(netdev->dev_addr, mac_addr->sa_data); - - return 0; -} - -static int hns3_nic_set_features(struct net_device *netdev, - netdev_features_t features) -{ - struct hns3_nic_priv *priv = netdev_priv(netdev); - - if (features & (NETIF_F_TSO | NETIF_F_TSO6)) { - priv->ops.fill_desc = hns3_fill_desc_tso; - priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tso; - } else { - priv->ops.fill_desc = hns3_fill_desc; - priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tx; - } - - netdev->features = features; - return 0; -} - -static void -hns3_nic_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats) -{ - struct hns3_nic_priv *priv = netdev_priv(netdev); - int queue_num = priv->ae_handle->kinfo.num_tqps; - struct hns3_enet_ring *ring; - unsigned int start; - unsigned int idx; - u64 tx_bytes = 0; - u64 rx_bytes = 0; - u64 tx_pkts = 0; - u64 rx_pkts = 0; - - for (idx = 0; idx < queue_num; idx++) { - /* fetch the tx stats */ - ring = priv->ring_data[idx].ring; - do { - start = u64_stats_fetch_begin_irq(&ring->syncp); - tx_bytes += ring->stats.tx_bytes; - tx_pkts += ring->stats.tx_pkts; - } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); - - /* fetch the rx stats */ - ring = priv->ring_data[idx + queue_num].ring; - do { - start = u64_stats_fetch_begin_irq(&ring->syncp); - rx_bytes += ring->stats.rx_bytes; - rx_pkts += ring->stats.rx_pkts; - } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); - } - - stats->tx_bytes = tx_bytes; - stats->tx_packets = tx_pkts; - stats->rx_bytes = rx_bytes; - stats->rx_packets = rx_pkts; - - stats->rx_errors = netdev->stats.rx_errors; - stats->multicast = netdev->stats.multicast; - stats->rx_length_errors = netdev->stats.rx_length_errors; - stats->rx_crc_errors = netdev->stats.rx_crc_errors; - stats->rx_missed_errors = netdev->stats.rx_missed_errors; - - stats->tx_errors = netdev->stats.tx_errors; - stats->rx_dropped = netdev->stats.rx_dropped; - stats->tx_dropped = netdev->stats.tx_dropped; - stats->collisions = netdev->stats.collisions; - stats->rx_over_errors = netdev->stats.rx_over_errors; - stats->rx_frame_errors = netdev->stats.rx_frame_errors; - stats->rx_fifo_errors = netdev->stats.rx_fifo_errors; - stats->tx_aborted_errors = netdev->stats.tx_aborted_errors; - stats->tx_carrier_errors = netdev->stats.tx_carrier_errors; - stats->tx_fifo_errors = netdev->stats.tx_fifo_errors; - stats->tx_heartbeat_errors = netdev->stats.tx_heartbeat_errors; - stats->tx_window_errors = netdev->stats.tx_window_errors; - stats->rx_compressed = netdev->stats.rx_compressed; - stats->tx_compressed = netdev->stats.tx_compressed; -} - -static void hns3_add_tunnel_port(struct net_device *netdev, u16 port, - enum hns3_udp_tnl_type type) -{ - struct hns3_nic_priv *priv = netdev_priv(netdev); - struct hns3_udp_tunnel *udp_tnl = &priv->udp_tnl[type]; - struct hnae3_handle *h = priv->ae_handle; - - if (udp_tnl->used && udp_tnl->dst_port == port) { - udp_tnl->used++; - return; - } - - if (udp_tnl->used) { - netdev_warn(netdev, - "UDP tunnel [%d], port [%d] offload\n", type, port); - return; - } - - udp_tnl->dst_port = port; - udp_tnl->used = 1; - /* TBD send command to hardware to add port */ - if (h->ae_algo->ops->add_tunnel_udp) - h->ae_algo->ops->add_tunnel_udp(h, port); -} - -static void hns3_del_tunnel_port(struct net_device *netdev, u16 port, - enum hns3_udp_tnl_type type) -{ - struct hns3_nic_priv *priv = netdev_priv(netdev); - struct hns3_udp_tunnel *udp_tnl = &priv->udp_tnl[type]; - struct hnae3_handle *h = priv->ae_handle; - - if (!udp_tnl->used || udp_tnl->dst_port != port) { - netdev_warn(netdev, - "Invalid UDP tunnel port %d\n", port); - return; - } - - udp_tnl->used--; - if (udp_tnl->used) - return; - - udp_tnl->dst_port = 0; - /* TBD send command to hardware to del port */ - if (h->ae_algo->ops->del_tunnel_udp) - h->ae_algo->ops->del_tunnel_udp(h, port); -} - -/* hns3_nic_udp_tunnel_add - Get notifiacetion about UDP tunnel ports - * @netdev: This physical ports's netdev - * @ti: Tunnel information - */ -static void hns3_nic_udp_tunnel_add(struct net_device *netdev, - struct udp_tunnel_info *ti) -{ - u16 port_n = ntohs(ti->port); - - switch (ti->type) { - case UDP_TUNNEL_TYPE_VXLAN: - hns3_add_tunnel_port(netdev, port_n, HNS3_UDP_TNL_VXLAN); - break; - case UDP_TUNNEL_TYPE_GENEVE: - hns3_add_tunnel_port(netdev, port_n, HNS3_UDP_TNL_GENEVE); - break; - default: - netdev_err(netdev, "unsupported tunnel type %d\n", ti->type); - break; - } -} - -static void hns3_nic_udp_tunnel_del(struct net_device *netdev, - struct udp_tunnel_info *ti) -{ - u16 port_n = ntohs(ti->port); - - switch (ti->type) { - case UDP_TUNNEL_TYPE_VXLAN: - hns3_del_tunnel_port(netdev, port_n, HNS3_UDP_TNL_VXLAN); - break; - case UDP_TUNNEL_TYPE_GENEVE: - hns3_del_tunnel_port(netdev, port_n, HNS3_UDP_TNL_GENEVE); - break; - default: - break; - } -} - -static int hns3_setup_tc(struct net_device *netdev, void *type_data) -{ - struct tc_mqprio_qopt_offload *mqprio_qopt = type_data; - struct hnae3_handle *h = hns3_get_handle(netdev); - struct hnae3_knic_private_info *kinfo = &h->kinfo; - u8 *prio_tc = mqprio_qopt->qopt.prio_tc_map; - u8 tc = mqprio_qopt->qopt.num_tc; - u16 mode = mqprio_qopt->mode; - u8 hw = mqprio_qopt->qopt.hw; - bool if_running; - unsigned int i; - int ret; - - if (!((hw == TC_MQPRIO_HW_OFFLOAD_TCS && - mode == TC_MQPRIO_MODE_CHANNEL) || (!hw && tc == 0))) - return -EOPNOTSUPP; - - if (tc > HNAE3_MAX_TC) - return -EINVAL; - - if (!netdev) - return -EINVAL; - - if_running = netif_running(netdev); - if (if_running) { - hns3_nic_net_stop(netdev); - msleep(100); - } - - ret = (kinfo->dcb_ops && kinfo->dcb_ops->setup_tc) ? - kinfo->dcb_ops->setup_tc(h, tc, prio_tc) : -EOPNOTSUPP; - if (ret) - goto out; - - if (tc <= 1) { - netdev_reset_tc(netdev); - } else { - ret = netdev_set_num_tc(netdev, tc); - if (ret) - goto out; - - for (i = 0; i < HNAE3_MAX_TC; i++) { - if (!kinfo->tc_info[i].enable) - continue; - - netdev_set_tc_queue(netdev, - kinfo->tc_info[i].tc, - kinfo->tc_info[i].tqp_count, - kinfo->tc_info[i].tqp_offset); - } - } - - ret = hns3_nic_set_real_num_queue(netdev); - -out: - if (if_running) - hns3_nic_net_open(netdev); - - return ret; -} - -static int hns3_nic_setup_tc(struct net_device *dev, enum tc_setup_type type, - void *type_data) -{ - if (type != TC_SETUP_QDISC_MQPRIO) - return -EOPNOTSUPP; - - return hns3_setup_tc(dev, type_data); -} - -static int hns3_vlan_rx_add_vid(struct net_device *netdev, - __be16 proto, u16 vid) -{ - struct hnae3_handle *h = hns3_get_handle(netdev); - int ret = -EIO; - - if (h->ae_algo->ops->set_vlan_filter) - ret = h->ae_algo->ops->set_vlan_filter(h, proto, vid, false); - - return ret; -} - -static int hns3_vlan_rx_kill_vid(struct net_device *netdev, - __be16 proto, u16 vid) -{ - struct hnae3_handle *h = hns3_get_handle(netdev); - int ret = -EIO; - - if (h->ae_algo->ops->set_vlan_filter) - ret = h->ae_algo->ops->set_vlan_filter(h, proto, vid, true); - - return ret; -} - -static int hns3_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, - u8 qos, __be16 vlan_proto) -{ - struct hnae3_handle *h = hns3_get_handle(netdev); - int ret = -EIO; - - if (h->ae_algo->ops->set_vf_vlan_filter) - ret = h->ae_algo->ops->set_vf_vlan_filter(h, vf, vlan, - qos, vlan_proto); - - return ret; -} - -static int hns3_nic_change_mtu(struct net_device *netdev, int new_mtu) -{ - struct hnae3_handle *h = hns3_get_handle(netdev); - bool if_running = netif_running(netdev); - int ret; - - if (!h->ae_algo->ops->set_mtu) - return -EOPNOTSUPP; - - /* if this was called with netdev up then bring netdevice down */ - if (if_running) { - (void)hns3_nic_net_stop(netdev); - msleep(100); - } - - ret = h->ae_algo->ops->set_mtu(h, new_mtu); - if (ret) { - netdev_err(netdev, "failed to change MTU in hardware %d\n", - ret); - return ret; - } - - /* if the netdev was running earlier, bring it up again */ - if (if_running && hns3_nic_net_open(netdev)) - ret = -EINVAL; - - return ret; -} - -static bool hns3_get_tx_timeo_queue_info(struct net_device *ndev) -{ - struct hns3_nic_priv *priv = netdev_priv(ndev); - struct hns3_enet_ring *tx_ring = NULL; - int timeout_queue = 0; - int hw_head, hw_tail; - int i; - - /* Find the stopped queue the same way the stack does */ - for (i = 0; i < ndev->real_num_tx_queues; i++) { - struct netdev_queue *q; - unsigned long trans_start; - - q = netdev_get_tx_queue(ndev, i); - trans_start = q->trans_start; - if (netif_xmit_stopped(q) && - time_after(jiffies, - (trans_start + ndev->watchdog_timeo))) { - timeout_queue = i; - break; - } - } - - if (i == ndev->num_tx_queues) { - netdev_info(ndev, - "no netdev TX timeout queue found, timeout count: %llu\n", - priv->tx_timeout_count); - return false; - } - - tx_ring = priv->ring_data[timeout_queue].ring; - - hw_head = readl_relaxed(tx_ring->tqp->io_base + - HNS3_RING_TX_RING_HEAD_REG); - hw_tail = readl_relaxed(tx_ring->tqp->io_base + - HNS3_RING_TX_RING_TAIL_REG); - netdev_info(ndev, - "tx_timeout count: %llu, queue id: %d, SW_NTU: 0x%x, SW_NTC: 0x%x, HW_HEAD: 0x%x, HW_TAIL: 0x%x, INT: 0x%x\n", - priv->tx_timeout_count, - timeout_queue, - tx_ring->next_to_use, - tx_ring->next_to_clean, - hw_head, - hw_tail, - readl(tx_ring->tqp_vector->mask_addr)); - - return true; -} - -static void hns3_nic_net_timeout(struct net_device *ndev) -{ - struct hns3_nic_priv *priv = netdev_priv(ndev); - unsigned long last_reset_time = priv->last_reset_time; - struct hnae3_handle *h = priv->ae_handle; - - if (!hns3_get_tx_timeo_queue_info(ndev)) - return; - - priv->tx_timeout_count++; - - /* This timeout is far away enough from last timeout, - * if timeout again,set the reset type to PF reset - */ - if (time_after(jiffies, (last_reset_time + 20 * HZ))) - priv->reset_level = HNAE3_FUNC_RESET; - - /* Don't do any new action before the next timeout */ - else if (time_before(jiffies, (last_reset_time + ndev->watchdog_timeo))) - return; - - priv->last_reset_time = jiffies; - - if (h->ae_algo->ops->reset_event) - h->ae_algo->ops->reset_event(h, priv->reset_level); - - priv->reset_level++; - if (priv->reset_level > HNAE3_GLOBAL_RESET) - priv->reset_level = HNAE3_GLOBAL_RESET; -} - -static const struct net_device_ops hns3_nic_netdev_ops = { - .ndo_open = hns3_nic_net_open, - .ndo_stop = hns3_nic_net_stop, - .ndo_start_xmit = hns3_nic_net_xmit, - .ndo_tx_timeout = hns3_nic_net_timeout, - .ndo_set_mac_address = hns3_nic_net_set_mac_address, - .ndo_change_mtu = hns3_nic_change_mtu, - .ndo_set_features = hns3_nic_set_features, - .ndo_get_stats64 = hns3_nic_get_stats64, - .ndo_setup_tc = hns3_nic_setup_tc, - .ndo_set_rx_mode = hns3_nic_set_rx_mode, - .ndo_udp_tunnel_add = hns3_nic_udp_tunnel_add, - .ndo_udp_tunnel_del = hns3_nic_udp_tunnel_del, - .ndo_vlan_rx_add_vid = hns3_vlan_rx_add_vid, - .ndo_vlan_rx_kill_vid = hns3_vlan_rx_kill_vid, - .ndo_set_vf_vlan = hns3_ndo_set_vf_vlan, -}; - -/* hns3_probe - Device initialization routine - * @pdev: PCI device information struct - * @ent: entry in hns3_pci_tbl - * - * hns3_probe initializes a PF identified by a pci_dev structure. - * The OS initialization, configuring of the PF private structure, - * and a hardware reset occur. - * - * Returns 0 on success, negative on failure - */ -static int hns3_probe(struct pci_dev *pdev, const struct pci_device_id *ent) -{ - struct hnae3_ae_dev *ae_dev; - int ret; - - ae_dev = devm_kzalloc(&pdev->dev, sizeof(*ae_dev), - GFP_KERNEL); - if (!ae_dev) { - ret = -ENOMEM; - return ret; - } - - ae_dev->pdev = pdev; - ae_dev->flag = ent->driver_data; - ae_dev->dev_type = HNAE3_DEV_KNIC; - pci_set_drvdata(pdev, ae_dev); - - return hnae3_register_ae_dev(ae_dev); -} - -/* hns3_remove - Device removal routine - * @pdev: PCI device information struct - */ -static void hns3_remove(struct pci_dev *pdev) -{ - struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev); - - hnae3_unregister_ae_dev(ae_dev); - - devm_kfree(&pdev->dev, ae_dev); - - pci_set_drvdata(pdev, NULL); -} - -static struct pci_driver hns3_driver = { - .name = hns3_driver_name, - .id_table = hns3_pci_tbl, - .probe = hns3_probe, - .remove = hns3_remove, -}; - -/* set default feature to hns3 */ -static void hns3_set_default_feature(struct net_device *netdev) -{ - netdev->priv_flags |= IFF_UNICAST_FLT; - - netdev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | - NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO | - NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE | - NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL | - NETIF_F_GSO_UDP_TUNNEL_CSUM; - - netdev->hw_enc_features |= NETIF_F_TSO_MANGLEID; - - netdev->gso_partial_features |= NETIF_F_GSO_GRE_CSUM; - - netdev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | - NETIF_F_HW_VLAN_CTAG_FILTER | - NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO | - NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE | - NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL | - NETIF_F_GSO_UDP_TUNNEL_CSUM; - - netdev->vlan_features |= - NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM | - NETIF_F_SG | NETIF_F_GSO | NETIF_F_GRO | - NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE | - NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL | - NETIF_F_GSO_UDP_TUNNEL_CSUM; - - netdev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | - NETIF_F_HW_VLAN_CTAG_FILTER | - NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO | - NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE | - NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL | - NETIF_F_GSO_UDP_TUNNEL_CSUM; -} - -static int hns3_alloc_buffer(struct hns3_enet_ring *ring, - struct hns3_desc_cb *cb) -{ - unsigned int order = hnae_page_order(ring); - struct page *p; - - p = dev_alloc_pages(order); - if (!p) - return -ENOMEM; - - cb->priv = p; - cb->page_offset = 0; - cb->reuse_flag = 0; - cb->buf = page_address(p); - cb->length = hnae_page_size(ring); - cb->type = DESC_TYPE_PAGE; - - return 0; -} - -static void hns3_free_buffer(struct hns3_enet_ring *ring, - struct hns3_desc_cb *cb) -{ - if (cb->type == DESC_TYPE_SKB) - dev_kfree_skb_any((struct sk_buff *)cb->priv); - else if (!HNAE3_IS_TX_RING(ring)) - put_page((struct page *)cb->priv); - memset(cb, 0, sizeof(*cb)); -} - -static int hns3_map_buffer(struct hns3_enet_ring *ring, struct hns3_desc_cb *cb) -{ - cb->dma = dma_map_page(ring_to_dev(ring), cb->priv, 0, - cb->length, ring_to_dma_dir(ring)); - - if (dma_mapping_error(ring_to_dev(ring), cb->dma)) - return -EIO; - - return 0; -} - -static void hns3_unmap_buffer(struct hns3_enet_ring *ring, - struct hns3_desc_cb *cb) -{ - if (cb->type == DESC_TYPE_SKB) - dma_unmap_single(ring_to_dev(ring), cb->dma, cb->length, - ring_to_dma_dir(ring)); - else - dma_unmap_page(ring_to_dev(ring), cb->dma, cb->length, - ring_to_dma_dir(ring)); -} - -static void hns3_buffer_detach(struct hns3_enet_ring *ring, int i) -{ - hns3_unmap_buffer(ring, &ring->desc_cb[i]); - ring->desc[i].addr = 0; -} - -static void hns3_free_buffer_detach(struct hns3_enet_ring *ring, int i) -{ - struct hns3_desc_cb *cb = &ring->desc_cb[i]; - - if (!ring->desc_cb[i].dma) - return; - - hns3_buffer_detach(ring, i); - hns3_free_buffer(ring, cb); -} - -static void hns3_free_buffers(struct hns3_enet_ring *ring) -{ - int i; - - for (i = 0; i < ring->desc_num; i++) - hns3_free_buffer_detach(ring, i); -} - -/* free desc along with its attached buffer */ -static void hns3_free_desc(struct hns3_enet_ring *ring) -{ - hns3_free_buffers(ring); - - dma_unmap_single(ring_to_dev(ring), ring->desc_dma_addr, - ring->desc_num * sizeof(ring->desc[0]), - DMA_BIDIRECTIONAL); - ring->desc_dma_addr = 0; - kfree(ring->desc); - ring->desc = NULL; -} - -static int hns3_alloc_desc(struct hns3_enet_ring *ring) -{ - int size = ring->desc_num * sizeof(ring->desc[0]); - - ring->desc = kzalloc(size, GFP_KERNEL); - if (!ring->desc) - return -ENOMEM; - - ring->desc_dma_addr = dma_map_single(ring_to_dev(ring), ring->desc, - size, DMA_BIDIRECTIONAL); - if (dma_mapping_error(ring_to_dev(ring), ring->desc_dma_addr)) { - ring->desc_dma_addr = 0; - kfree(ring->desc); - ring->desc = NULL; - return -ENOMEM; - } - - return 0; -} - -static int hns3_reserve_buffer_map(struct hns3_enet_ring *ring, - struct hns3_desc_cb *cb) -{ - int ret; - - ret = hns3_alloc_buffer(ring, cb); - if (ret) - goto out; - - ret = hns3_map_buffer(ring, cb); - if (ret) - goto out_with_buf; - - return 0; - -out_with_buf: - hns3_free_buffer(ring, cb); -out: - return ret; -} - -static int hns3_alloc_buffer_attach(struct hns3_enet_ring *ring, int i) -{ - int ret = hns3_reserve_buffer_map(ring, &ring->desc_cb[i]); - - if (ret) - return ret; - - ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma); - - return 0; -} - -/* Allocate memory for raw pkg, and map with dma */ -static int hns3_alloc_ring_buffers(struct hns3_enet_ring *ring) -{ - int i, j, ret; - - for (i = 0; i < ring->desc_num; i++) { - ret = hns3_alloc_buffer_attach(ring, i); - if (ret) - goto out_buffer_fail; - } - - return 0; - -out_buffer_fail: - for (j = i - 1; j >= 0; j--) - hns3_free_buffer_detach(ring, j); - return ret; -} - -/* detach a in-used buffer and replace with a reserved one */ -static void hns3_replace_buffer(struct hns3_enet_ring *ring, int i, - struct hns3_desc_cb *res_cb) -{ - hns3_unmap_buffer(ring, &ring->desc_cb[i]); - ring->desc_cb[i] = *res_cb; - ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma); -} - -static void hns3_reuse_buffer(struct hns3_enet_ring *ring, int i) -{ - ring->desc_cb[i].reuse_flag = 0; - ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma - + ring->desc_cb[i].page_offset); -} - -static void hns3_nic_reclaim_one_desc(struct hns3_enet_ring *ring, int *bytes, - int *pkts) -{ - struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_clean]; - - (*pkts) += (desc_cb->type == DESC_TYPE_SKB); - (*bytes) += desc_cb->length; - /* desc_cb will be cleaned, after hnae_free_buffer_detach*/ - hns3_free_buffer_detach(ring, ring->next_to_clean); - - ring_ptr_move_fw(ring, next_to_clean); -} - -static int is_valid_clean_head(struct hns3_enet_ring *ring, int h) -{ - int u = ring->next_to_use; - int c = ring->next_to_clean; - - if (unlikely(h > ring->desc_num)) - return 0; - - return u > c ? (h > c && h <= u) : (h > c || h <= u); -} - -bool hns3_clean_tx_ring(struct hns3_enet_ring *ring, int budget) -{ - struct net_device *netdev = ring->tqp->handle->kinfo.netdev; - struct netdev_queue *dev_queue; - int bytes, pkts; - int head; - - head = readl_relaxed(ring->tqp->io_base + HNS3_RING_TX_RING_HEAD_REG); - rmb(); /* Make sure head is ready before touch any data */ - - if (is_ring_empty(ring) || head == ring->next_to_clean) - return true; /* no data to poll */ - - if (!is_valid_clean_head(ring, head)) { - netdev_err(netdev, "wrong head (%d, %d-%d)\n", head, - ring->next_to_use, ring->next_to_clean); - - u64_stats_update_begin(&ring->syncp); - ring->stats.io_err_cnt++; - u64_stats_update_end(&ring->syncp); - return true; - } - - bytes = 0; - pkts = 0; - while (head != ring->next_to_clean && budget) { - hns3_nic_reclaim_one_desc(ring, &bytes, &pkts); - /* Issue prefetch for next Tx descriptor */ - prefetch(&ring->desc_cb[ring->next_to_clean]); - budget--; - } - - ring->tqp_vector->tx_group.total_bytes += bytes; - ring->tqp_vector->tx_group.total_packets += pkts; - - u64_stats_update_begin(&ring->syncp); - ring->stats.tx_bytes += bytes; - ring->stats.tx_pkts += pkts; - u64_stats_update_end(&ring->syncp); - - dev_queue = netdev_get_tx_queue(netdev, ring->tqp->tqp_index); - netdev_tx_completed_queue(dev_queue, pkts, bytes); - - if (unlikely(pkts && netif_carrier_ok(netdev) && - (ring_space(ring) > HNS3_MAX_BD_PER_PKT))) { - /* Make sure that anybody stopping the queue after this - * sees the new next_to_clean. - */ - smp_mb(); - if (netif_tx_queue_stopped(dev_queue)) { - netif_tx_wake_queue(dev_queue); - ring->stats.restart_queue++; - } - } - - return !!budget; -} - -static int hns3_desc_unused(struct hns3_enet_ring *ring) -{ - int ntc = ring->next_to_clean; - int ntu = ring->next_to_use; - - return ((ntc >= ntu) ? 0 : ring->desc_num) + ntc - ntu; -} - -static void -hns3_nic_alloc_rx_buffers(struct hns3_enet_ring *ring, int cleand_count) -{ - struct hns3_desc_cb *desc_cb; - struct hns3_desc_cb res_cbs; - int i, ret; - - for (i = 0; i < cleand_count; i++) { - desc_cb = &ring->desc_cb[ring->next_to_use]; - if (desc_cb->reuse_flag) { - u64_stats_update_begin(&ring->syncp); - ring->stats.reuse_pg_cnt++; - u64_stats_update_end(&ring->syncp); - - hns3_reuse_buffer(ring, ring->next_to_use); - } else { - ret = hns3_reserve_buffer_map(ring, &res_cbs); - if (ret) { - u64_stats_update_begin(&ring->syncp); - ring->stats.sw_err_cnt++; - u64_stats_update_end(&ring->syncp); - - netdev_err(ring->tqp->handle->kinfo.netdev, - "hnae reserve buffer map failed.\n"); - break; - } - hns3_replace_buffer(ring, ring->next_to_use, &res_cbs); - } - - ring_ptr_move_fw(ring, next_to_use); - } - - wmb(); /* Make all data has been write before submit */ - writel_relaxed(i, ring->tqp->io_base + HNS3_RING_RX_RING_HEAD_REG); -} - -/* hns3_nic_get_headlen - determine size of header for LRO/GRO - * @data: pointer to the start of the headers - * @max: total length of section to find headers in - * - * This function is meant to determine the length of headers that will - * be recognized by hardware for LRO, GRO, and RSC offloads. The main - * motivation of doing this is to only perform one pull for IPv4 TCP - * packets so that we can do basic things like calculating the gso_size - * based on the average data per packet. - */ -static unsigned int hns3_nic_get_headlen(unsigned char *data, u32 flag, - unsigned int max_size) -{ - unsigned char *network; - u8 hlen; - - /* This should never happen, but better safe than sorry */ - if (max_size < ETH_HLEN) - return max_size; - - /* Initialize network frame pointer */ - network = data; - - /* Set first protocol and move network header forward */ - network += ETH_HLEN; - - /* Handle any vlan tag if present */ - if (hnae_get_field(flag, HNS3_RXD_VLAN_M, HNS3_RXD_VLAN_S) - == HNS3_RX_FLAG_VLAN_PRESENT) { - if ((typeof(max_size))(network - data) > (max_size - VLAN_HLEN)) - return max_size; - - network += VLAN_HLEN; - } - - /* Handle L3 protocols */ - if (hnae_get_field(flag, HNS3_RXD_L3ID_M, HNS3_RXD_L3ID_S) - == HNS3_RX_FLAG_L3ID_IPV4) { - if ((typeof(max_size))(network - data) > - (max_size - sizeof(struct iphdr))) - return max_size; - - /* Access ihl as a u8 to avoid unaligned access on ia64 */ - hlen = (network[0] & 0x0F) << 2; - - /* Verify hlen meets minimum size requirements */ - if (hlen < sizeof(struct iphdr)) - return network - data; - - /* Record next protocol if header is present */ - } else if (hnae_get_field(flag, HNS3_RXD_L3ID_M, HNS3_RXD_L3ID_S) - == HNS3_RX_FLAG_L3ID_IPV6) { - if ((typeof(max_size))(network - data) > - (max_size - sizeof(struct ipv6hdr))) - return max_size; - - /* Record next protocol */ - hlen = sizeof(struct ipv6hdr); - } else { - return network - data; - } - - /* Relocate pointer to start of L4 header */ - network += hlen; - - /* Finally sort out TCP/UDP */ - if (hnae_get_field(flag, HNS3_RXD_L4ID_M, HNS3_RXD_L4ID_S) - == HNS3_RX_FLAG_L4ID_TCP) { - if ((typeof(max_size))(network - data) > - (max_size - sizeof(struct tcphdr))) - return max_size; - - /* Access doff as a u8 to avoid unaligned access on ia64 */ - hlen = (network[12] & 0xF0) >> 2; - - /* Verify hlen meets minimum size requirements */ - if (hlen < sizeof(struct tcphdr)) - return network - data; - - network += hlen; - } else if (hnae_get_field(flag, HNS3_RXD_L4ID_M, HNS3_RXD_L4ID_S) - == HNS3_RX_FLAG_L4ID_UDP) { - if ((typeof(max_size))(network - data) > - (max_size - sizeof(struct udphdr))) - return max_size; - - network += sizeof(struct udphdr); - } - - /* If everything has gone correctly network should be the - * data section of the packet and will be the end of the header. - * If not then it probably represents the end of the last recognized - * header. - */ - if ((typeof(max_size))(network - data) < max_size) - return network - data; - else - return max_size; -} - -static void hns3_nic_reuse_page(struct sk_buff *skb, int i, - struct hns3_enet_ring *ring, int pull_len, - struct hns3_desc_cb *desc_cb) -{ - struct hns3_desc *desc; - int truesize, size; - int last_offset; - bool twobufs; - - twobufs = ((PAGE_SIZE < 8192) && - hnae_buf_size(ring) == HNS3_BUFFER_SIZE_2048); - - desc = &ring->desc[ring->next_to_clean]; - size = le16_to_cpu(desc->rx.size); - - if (twobufs) { - truesize = hnae_buf_size(ring); - } else { - truesize = ALIGN(size, L1_CACHE_BYTES); - last_offset = hnae_page_size(ring) - hnae_buf_size(ring); - } - - skb_add_rx_frag(skb, i, desc_cb->priv, desc_cb->page_offset + pull_len, - size - pull_len, truesize - pull_len); - - /* Avoid re-using remote pages,flag default unreuse */ - if (unlikely(page_to_nid(desc_cb->priv) != numa_node_id())) - return; - - if (twobufs) { - /* If we are only owner of page we can reuse it */ - if (likely(page_count(desc_cb->priv) == 1)) { - /* Flip page offset to other buffer */ - desc_cb->page_offset ^= truesize; - - desc_cb->reuse_flag = 1; - /* bump ref count on page before it is given*/ - get_page(desc_cb->priv); - } - return; - } - - /* Move offset up to the next cache line */ - desc_cb->page_offset += truesize; - - if (desc_cb->page_offset <= last_offset) { - desc_cb->reuse_flag = 1; - /* Bump ref count on page before it is given*/ - get_page(desc_cb->priv); - } -} - -static void hns3_rx_checksum(struct hns3_enet_ring *ring, struct sk_buff *skb, - struct hns3_desc *desc) -{ - struct net_device *netdev = ring->tqp->handle->kinfo.netdev; - int l3_type, l4_type; - u32 bd_base_info; - int ol4_type; - u32 l234info; - - bd_base_info = le32_to_cpu(desc->rx.bd_base_info); - l234info = le32_to_cpu(desc->rx.l234_info); - - skb->ip_summed = CHECKSUM_NONE; - - skb_checksum_none_assert(skb); - - if (!(netdev->features & NETIF_F_RXCSUM)) - return; - - /* check if hardware has done checksum */ - if (!hnae_get_bit(bd_base_info, HNS3_RXD_L3L4P_B)) - return; - - if (unlikely(hnae_get_bit(l234info, HNS3_RXD_L3E_B) || - hnae_get_bit(l234info, HNS3_RXD_L4E_B) || - hnae_get_bit(l234info, HNS3_RXD_OL3E_B) || - hnae_get_bit(l234info, HNS3_RXD_OL4E_B))) { - netdev_err(netdev, "L3/L4 error pkt\n"); - u64_stats_update_begin(&ring->syncp); - ring->stats.l3l4_csum_err++; - u64_stats_update_end(&ring->syncp); - - return; - } - - l3_type = hnae_get_field(l234info, HNS3_RXD_L3ID_M, - HNS3_RXD_L3ID_S); - l4_type = hnae_get_field(l234info, HNS3_RXD_L4ID_M, - HNS3_RXD_L4ID_S); - - ol4_type = hnae_get_field(l234info, HNS3_RXD_OL4ID_M, HNS3_RXD_OL4ID_S); - switch (ol4_type) { - case HNS3_OL4_TYPE_MAC_IN_UDP: - case HNS3_OL4_TYPE_NVGRE: - skb->csum_level = 1; - case HNS3_OL4_TYPE_NO_TUN: - /* Can checksum ipv4 or ipv6 + UDP/TCP/SCTP packets */ - if (l3_type == HNS3_L3_TYPE_IPV4 || - (l3_type == HNS3_L3_TYPE_IPV6 && - (l4_type == HNS3_L4_TYPE_UDP || - l4_type == HNS3_L4_TYPE_TCP || - l4_type == HNS3_L4_TYPE_SCTP))) - skb->ip_summed = CHECKSUM_UNNECESSARY; - break; - } -} - -static void hns3_rx_skb(struct hns3_enet_ring *ring, struct sk_buff *skb) -{ - napi_gro_receive(&ring->tqp_vector->napi, skb); -} - -static int hns3_handle_rx_bd(struct hns3_enet_ring *ring, - struct sk_buff **out_skb, int *out_bnum) -{ - struct net_device *netdev = ring->tqp->handle->kinfo.netdev; - struct hns3_desc_cb *desc_cb; - struct hns3_desc *desc; - struct sk_buff *skb; - unsigned char *va; - u32 bd_base_info; - int pull_len; - u32 l234info; - int length; - int bnum; - - desc = &ring->desc[ring->next_to_clean]; - desc_cb = &ring->desc_cb[ring->next_to_clean]; - - prefetch(desc); - - length = le16_to_cpu(desc->rx.pkt_len); - bd_base_info = le32_to_cpu(desc->rx.bd_base_info); - l234info = le32_to_cpu(desc->rx.l234_info); - - /* Check valid BD */ - if (!hnae_get_bit(bd_base_info, HNS3_RXD_VLD_B)) - return -EFAULT; - - va = (unsigned char *)desc_cb->buf + desc_cb->page_offset; - - /* Prefetch first cache line of first page - * Idea is to cache few bytes of the header of the packet. Our L1 Cache - * line size is 64B so need to prefetch twice to make it 128B. But in - * actual we can have greater size of caches with 128B Level 1 cache - * lines. In such a case, single fetch would suffice to cache in the - * relevant part of the header. - */ - prefetch(va); -#if L1_CACHE_BYTES < 128 - prefetch(va + L1_CACHE_BYTES); -#endif - - skb = *out_skb = napi_alloc_skb(&ring->tqp_vector->napi, - HNS3_RX_HEAD_SIZE); - if (unlikely(!skb)) { - netdev_err(netdev, "alloc rx skb fail\n"); - - u64_stats_update_begin(&ring->syncp); - ring->stats.sw_err_cnt++; - u64_stats_update_end(&ring->syncp); - - return -ENOMEM; - } - - prefetchw(skb->data); - - bnum = 1; - if (length <= HNS3_RX_HEAD_SIZE) { - memcpy(__skb_put(skb, length), va, ALIGN(length, sizeof(long))); - - /* We can reuse buffer as-is, just make sure it is local */ - if (likely(page_to_nid(desc_cb->priv) == numa_node_id())) - desc_cb->reuse_flag = 1; - else /* This page cannot be reused so discard it */ - put_page(desc_cb->priv); - - ring_ptr_move_fw(ring, next_to_clean); - } else { - u64_stats_update_begin(&ring->syncp); - ring->stats.seg_pkt_cnt++; - u64_stats_update_end(&ring->syncp); - - pull_len = hns3_nic_get_headlen(va, l234info, - HNS3_RX_HEAD_SIZE); - memcpy(__skb_put(skb, pull_len), va, - ALIGN(pull_len, sizeof(long))); - - hns3_nic_reuse_page(skb, 0, ring, pull_len, desc_cb); - ring_ptr_move_fw(ring, next_to_clean); - - while (!hnae_get_bit(bd_base_info, HNS3_RXD_FE_B)) { - desc = &ring->desc[ring->next_to_clean]; - desc_cb = &ring->desc_cb[ring->next_to_clean]; - bd_base_info = le32_to_cpu(desc->rx.bd_base_info); - hns3_nic_reuse_page(skb, bnum, ring, 0, desc_cb); - ring_ptr_move_fw(ring, next_to_clean); - bnum++; - } - } - - *out_bnum = bnum; - - if (unlikely(!hnae_get_bit(bd_base_info, HNS3_RXD_VLD_B))) { - netdev_err(netdev, "no valid bd,%016llx,%016llx\n", - ((u64 *)desc)[0], ((u64 *)desc)[1]); - u64_stats_update_begin(&ring->syncp); - ring->stats.non_vld_descs++; - u64_stats_update_end(&ring->syncp); - - dev_kfree_skb_any(skb); - return -EINVAL; - } - - if (unlikely((!desc->rx.pkt_len) || - hnae_get_bit(l234info, HNS3_RXD_TRUNCAT_B))) { - netdev_err(netdev, "truncated pkt\n"); - u64_stats_update_begin(&ring->syncp); - ring->stats.err_pkt_len++; - u64_stats_update_end(&ring->syncp); - - dev_kfree_skb_any(skb); - return -EFAULT; - } - - if (unlikely(hnae_get_bit(l234info, HNS3_RXD_L2E_B))) { - netdev_err(netdev, "L2 error pkt\n"); - u64_stats_update_begin(&ring->syncp); - ring->stats.l2_err++; - u64_stats_update_end(&ring->syncp); - - dev_kfree_skb_any(skb); - return -EFAULT; - } - - u64_stats_update_begin(&ring->syncp); - ring->stats.rx_pkts++; - ring->stats.rx_bytes += skb->len; - u64_stats_update_end(&ring->syncp); - - ring->tqp_vector->rx_group.total_bytes += skb->len; - - hns3_rx_checksum(ring, skb, desc); - return 0; -} - -int hns3_clean_rx_ring( - struct hns3_enet_ring *ring, int budget, - void (*rx_fn)(struct hns3_enet_ring *, struct sk_buff *)) -{ -#define RCB_NOF_ALLOC_RX_BUFF_ONCE 16 - struct net_device *netdev = ring->tqp->handle->kinfo.netdev; - int recv_pkts, recv_bds, clean_count, err; - int unused_count = hns3_desc_unused(ring); - struct sk_buff *skb = NULL; - int num, bnum = 0; - - num = readl_relaxed(ring->tqp->io_base + HNS3_RING_RX_RING_FBDNUM_REG); - rmb(); /* Make sure num taken effect before the other data is touched */ - - recv_pkts = 0, recv_bds = 0, clean_count = 0; - num -= unused_count; - - while (recv_pkts < budget && recv_bds < num) { - /* Reuse or realloc buffers */ - if (clean_count + unused_count >= RCB_NOF_ALLOC_RX_BUFF_ONCE) { - hns3_nic_alloc_rx_buffers(ring, - clean_count + unused_count); - clean_count = 0; - unused_count = hns3_desc_unused(ring); - } - - /* Poll one pkt */ - err = hns3_handle_rx_bd(ring, &skb, &bnum); - if (unlikely(!skb)) /* This fault cannot be repaired */ - goto out; - - recv_bds += bnum; - clean_count += bnum; - if (unlikely(err)) { /* Do jump the err */ - recv_pkts++; - continue; - } - - /* Do update ip stack process */ - skb->protocol = eth_type_trans(skb, netdev); - rx_fn(ring, skb); - - recv_pkts++; - } - -out: - /* Make all data has been write before submit */ - if (clean_count + unused_count > 0) - hns3_nic_alloc_rx_buffers(ring, - clean_count + unused_count); - - return recv_pkts; -} - -static bool hns3_get_new_int_gl(struct hns3_enet_ring_group *ring_group) -{ -#define HNS3_RX_ULTRA_PACKET_RATE 40000 - enum hns3_flow_level_range new_flow_level; - struct hns3_enet_tqp_vector *tqp_vector; - int packets_per_secs; - int bytes_per_usecs; - u16 new_int_gl; - int usecs; - - if (!ring_group->int_gl) - return false; - - if (ring_group->total_packets == 0) { - ring_group->int_gl = HNS3_INT_GL_50K; - ring_group->flow_level = HNS3_FLOW_LOW; - return true; - } - - /* Simple throttlerate management - * 0-10MB/s lower (50000 ints/s) - * 10-20MB/s middle (20000 ints/s) - * 20-1249MB/s high (18000 ints/s) - * > 40000pps ultra (8000 ints/s) - */ - new_flow_level = ring_group->flow_level; - new_int_gl = ring_group->int_gl; - tqp_vector = ring_group->ring->tqp_vector; - usecs = (ring_group->int_gl << 1); - bytes_per_usecs = ring_group->total_bytes / usecs; - /* 1000000 microseconds */ - packets_per_secs = ring_group->total_packets * 1000000 / usecs; - - switch (new_flow_level) { - case HNS3_FLOW_LOW: - if (bytes_per_usecs > 10) - new_flow_level = HNS3_FLOW_MID; - break; - case HNS3_FLOW_MID: - if (bytes_per_usecs > 20) - new_flow_level = HNS3_FLOW_HIGH; - else if (bytes_per_usecs <= 10) - new_flow_level = HNS3_FLOW_LOW; - break; - case HNS3_FLOW_HIGH: - case HNS3_FLOW_ULTRA: - default: - if (bytes_per_usecs <= 20) - new_flow_level = HNS3_FLOW_MID; - break; - } - - if ((packets_per_secs > HNS3_RX_ULTRA_PACKET_RATE) && - (&tqp_vector->rx_group == ring_group)) - new_flow_level = HNS3_FLOW_ULTRA; - - switch (new_flow_level) { - case HNS3_FLOW_LOW: - new_int_gl = HNS3_INT_GL_50K; - break; - case HNS3_FLOW_MID: - new_int_gl = HNS3_INT_GL_20K; - break; - case HNS3_FLOW_HIGH: - new_int_gl = HNS3_INT_GL_18K; - break; - case HNS3_FLOW_ULTRA: - new_int_gl = HNS3_INT_GL_8K; - break; - default: - break; - } - - ring_group->total_bytes = 0; - ring_group->total_packets = 0; - ring_group->flow_level = new_flow_level; - if (new_int_gl != ring_group->int_gl) { - ring_group->int_gl = new_int_gl; - return true; - } - return false; -} - -static void hns3_update_new_int_gl(struct hns3_enet_tqp_vector *tqp_vector) -{ - u16 rx_int_gl, tx_int_gl; - bool rx, tx; - - rx = hns3_get_new_int_gl(&tqp_vector->rx_group); - tx = hns3_get_new_int_gl(&tqp_vector->tx_group); - rx_int_gl = tqp_vector->rx_group.int_gl; - tx_int_gl = tqp_vector->tx_group.int_gl; - if (rx && tx) { - if (rx_int_gl > tx_int_gl) { - tqp_vector->tx_group.int_gl = rx_int_gl; - tqp_vector->tx_group.flow_level = - tqp_vector->rx_group.flow_level; - hns3_set_vector_coalesc_gl(tqp_vector, rx_int_gl); - } else { - tqp_vector->rx_group.int_gl = tx_int_gl; - tqp_vector->rx_group.flow_level = - tqp_vector->tx_group.flow_level; - hns3_set_vector_coalesc_gl(tqp_vector, tx_int_gl); - } - } -} - -static int hns3_nic_common_poll(struct napi_struct *napi, int budget) -{ - struct hns3_enet_ring *ring; - int rx_pkt_total = 0; - - struct hns3_enet_tqp_vector *tqp_vector = - container_of(napi, struct hns3_enet_tqp_vector, napi); - bool clean_complete = true; - int rx_budget; - - /* Since the actual Tx work is minimal, we can give the Tx a larger - * budget and be more aggressive about cleaning up the Tx descriptors. - */ - hns3_for_each_ring(ring, tqp_vector->tx_group) { - if (!hns3_clean_tx_ring(ring, budget)) - clean_complete = false; - } - - /* make sure rx ring budget not smaller than 1 */ - rx_budget = max(budget / tqp_vector->num_tqps, 1); - - hns3_for_each_ring(ring, tqp_vector->rx_group) { - int rx_cleaned = hns3_clean_rx_ring(ring, rx_budget, - hns3_rx_skb); - - if (rx_cleaned >= rx_budget) - clean_complete = false; - - rx_pkt_total += rx_cleaned; - } - - tqp_vector->rx_group.total_packets += rx_pkt_total; - - if (!clean_complete) - return budget; - - napi_complete(napi); - hns3_update_new_int_gl(tqp_vector); - hns3_mask_vector_irq(tqp_vector, 1); - - return rx_pkt_total; -} - -static int hns3_get_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector, - struct hnae3_ring_chain_node *head) -{ - struct pci_dev *pdev = tqp_vector->handle->pdev; - struct hnae3_ring_chain_node *cur_chain = head; - struct hnae3_ring_chain_node *chain; - struct hns3_enet_ring *tx_ring; - struct hns3_enet_ring *rx_ring; - - tx_ring = tqp_vector->tx_group.ring; - if (tx_ring) { - cur_chain->tqp_index = tx_ring->tqp->tqp_index; - hnae_set_bit(cur_chain->flag, HNAE3_RING_TYPE_B, - HNAE3_RING_TYPE_TX); - - cur_chain->next = NULL; - - while (tx_ring->next) { - tx_ring = tx_ring->next; - - chain = devm_kzalloc(&pdev->dev, sizeof(*chain), - GFP_KERNEL); - if (!chain) - return -ENOMEM; - - cur_chain->next = chain; - chain->tqp_index = tx_ring->tqp->tqp_index; - hnae_set_bit(chain->flag, HNAE3_RING_TYPE_B, - HNAE3_RING_TYPE_TX); - - cur_chain = chain; - } - } - - rx_ring = tqp_vector->rx_group.ring; - if (!tx_ring && rx_ring) { - cur_chain->next = NULL; - cur_chain->tqp_index = rx_ring->tqp->tqp_index; - hnae_set_bit(cur_chain->flag, HNAE3_RING_TYPE_B, - HNAE3_RING_TYPE_RX); - - rx_ring = rx_ring->next; - } - - while (rx_ring) { - chain = devm_kzalloc(&pdev->dev, sizeof(*chain), GFP_KERNEL); - if (!chain) - return -ENOMEM; - - cur_chain->next = chain; - chain->tqp_index = rx_ring->tqp->tqp_index; - hnae_set_bit(chain->flag, HNAE3_RING_TYPE_B, - HNAE3_RING_TYPE_RX); - cur_chain = chain; - - rx_ring = rx_ring->next; - } - - return 0; -} - -static void hns3_free_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector, - struct hnae3_ring_chain_node *head) -{ - struct pci_dev *pdev = tqp_vector->handle->pdev; - struct hnae3_ring_chain_node *chain_tmp, *chain; - - chain = head->next; - - while (chain) { - chain_tmp = chain->next; - devm_kfree(&pdev->dev, chain); - chain = chain_tmp; - } -} - -static void hns3_add_ring_to_group(struct hns3_enet_ring_group *group, - struct hns3_enet_ring *ring) -{ - ring->next = group->ring; - group->ring = ring; - - group->count++; -} - -static int hns3_nic_init_vector_data(struct hns3_nic_priv *priv) -{ - struct hnae3_ring_chain_node vector_ring_chain; - struct hnae3_handle *h = priv->ae_handle; - struct hns3_enet_tqp_vector *tqp_vector; - struct hnae3_vector_info *vector; - struct pci_dev *pdev = h->pdev; - u16 tqp_num = h->kinfo.num_tqps; - u16 vector_num; - int ret = 0; - u16 i; - - /* RSS size, cpu online and vector_num should be the same */ - /* Should consider 2p/4p later */ - vector_num = min_t(u16, num_online_cpus(), tqp_num); - vector = devm_kcalloc(&pdev->dev, vector_num, sizeof(*vector), - GFP_KERNEL); - if (!vector) - return -ENOMEM; - - vector_num = h->ae_algo->ops->get_vector(h, vector_num, vector); - - priv->vector_num = vector_num; - priv->tqp_vector = (struct hns3_enet_tqp_vector *) - devm_kcalloc(&pdev->dev, vector_num, sizeof(*priv->tqp_vector), - GFP_KERNEL); - if (!priv->tqp_vector) - return -ENOMEM; - - for (i = 0; i < tqp_num; i++) { - u16 vector_i = i % vector_num; - - tqp_vector = &priv->tqp_vector[vector_i]; - - hns3_add_ring_to_group(&tqp_vector->tx_group, - priv->ring_data[i].ring); - - hns3_add_ring_to_group(&tqp_vector->rx_group, - priv->ring_data[i + tqp_num].ring); - - tqp_vector->idx = vector_i; - tqp_vector->mask_addr = vector[vector_i].io_addr; - tqp_vector->vector_irq = vector[vector_i].vector; - tqp_vector->num_tqps++; - - priv->ring_data[i].ring->tqp_vector = tqp_vector; - priv->ring_data[i + tqp_num].ring->tqp_vector = tqp_vector; - } - - for (i = 0; i < vector_num; i++) { - tqp_vector = &priv->tqp_vector[i]; - - tqp_vector->rx_group.total_bytes = 0; - tqp_vector->rx_group.total_packets = 0; - tqp_vector->tx_group.total_bytes = 0; - tqp_vector->tx_group.total_packets = 0; - hns3_vector_gl_rl_init(tqp_vector); - tqp_vector->handle = h; - - ret = hns3_get_vector_ring_chain(tqp_vector, - &vector_ring_chain); - if (ret) - goto out; - - ret = h->ae_algo->ops->map_ring_to_vector(h, - tqp_vector->vector_irq, &vector_ring_chain); - if (ret) - goto out; - - hns3_free_vector_ring_chain(tqp_vector, &vector_ring_chain); - - netif_napi_add(priv->netdev, &tqp_vector->napi, - hns3_nic_common_poll, NAPI_POLL_WEIGHT); - } - -out: - devm_kfree(&pdev->dev, vector); - return ret; -} - -static int hns3_nic_uninit_vector_data(struct hns3_nic_priv *priv) -{ - struct hnae3_ring_chain_node vector_ring_chain; - struct hnae3_handle *h = priv->ae_handle; - struct hns3_enet_tqp_vector *tqp_vector; - struct pci_dev *pdev = h->pdev; - int i, ret; - - for (i = 0; i < priv->vector_num; i++) { - tqp_vector = &priv->tqp_vector[i]; - - ret = hns3_get_vector_ring_chain(tqp_vector, - &vector_ring_chain); - if (ret) - return ret; - - ret = h->ae_algo->ops->unmap_ring_from_vector(h, - tqp_vector->vector_irq, &vector_ring_chain); - if (ret) - return ret; - - hns3_free_vector_ring_chain(tqp_vector, &vector_ring_chain); - - if (priv->tqp_vector[i].irq_init_flag == HNS3_VECTOR_INITED) { - (void)irq_set_affinity_hint( - priv->tqp_vector[i].vector_irq, - NULL); - free_irq(priv->tqp_vector[i].vector_irq, - &priv->tqp_vector[i]); - } - - priv->ring_data[i].ring->irq_init_flag = HNS3_VECTOR_NOT_INITED; - - netif_napi_del(&priv->tqp_vector[i].napi); - } - - devm_kfree(&pdev->dev, priv->tqp_vector); - - return 0; -} - -static int hns3_ring_get_cfg(struct hnae3_queue *q, struct hns3_nic_priv *priv, - int ring_type) -{ - struct hns3_nic_ring_data *ring_data = priv->ring_data; - int queue_num = priv->ae_handle->kinfo.num_tqps; - struct pci_dev *pdev = priv->ae_handle->pdev; - struct hns3_enet_ring *ring; - - ring = devm_kzalloc(&pdev->dev, sizeof(*ring), GFP_KERNEL); - if (!ring) - return -ENOMEM; - - if (ring_type == HNAE3_RING_TYPE_TX) { - ring_data[q->tqp_index].ring = ring; - ring_data[q->tqp_index].queue_index = q->tqp_index; - ring->io_base = (u8 __iomem *)q->io_base + HNS3_TX_REG_OFFSET; - } else { - ring_data[q->tqp_index + queue_num].ring = ring; - ring_data[q->tqp_index + queue_num].queue_index = q->tqp_index; - ring->io_base = q->io_base; - } - - hnae_set_bit(ring->flag, HNAE3_RING_TYPE_B, ring_type); - - ring->tqp = q; - ring->desc = NULL; - ring->desc_cb = NULL; - ring->dev = priv->dev; - ring->desc_dma_addr = 0; - ring->buf_size = q->buf_size; - ring->desc_num = q->desc_num; - ring->next_to_use = 0; - ring->next_to_clean = 0; - - return 0; -} - -static int hns3_queue_to_ring(struct hnae3_queue *tqp, - struct hns3_nic_priv *priv) -{ - int ret; - - ret = hns3_ring_get_cfg(tqp, priv, HNAE3_RING_TYPE_TX); - if (ret) - return ret; - - ret = hns3_ring_get_cfg(tqp, priv, HNAE3_RING_TYPE_RX); - if (ret) - return ret; - - return 0; -} - -static int hns3_get_ring_config(struct hns3_nic_priv *priv) -{ - struct hnae3_handle *h = priv->ae_handle; - struct pci_dev *pdev = h->pdev; - int i, ret; - - priv->ring_data = devm_kzalloc(&pdev->dev, h->kinfo.num_tqps * - sizeof(*priv->ring_data) * 2, - GFP_KERNEL); - if (!priv->ring_data) - return -ENOMEM; - - for (i = 0; i < h->kinfo.num_tqps; i++) { - ret = hns3_queue_to_ring(h->kinfo.tqp[i], priv); - if (ret) - goto err; - } - - return 0; -err: - devm_kfree(&pdev->dev, priv->ring_data); - return ret; -} - -static int hns3_alloc_ring_memory(struct hns3_enet_ring *ring) -{ - int ret; - - if (ring->desc_num <= 0 || ring->buf_size <= 0) - return -EINVAL; - - ring->desc_cb = kcalloc(ring->desc_num, sizeof(ring->desc_cb[0]), - GFP_KERNEL); - if (!ring->desc_cb) { - ret = -ENOMEM; - goto out; - } - - ret = hns3_alloc_desc(ring); - if (ret) - goto out_with_desc_cb; - - if (!HNAE3_IS_TX_RING(ring)) { - ret = hns3_alloc_ring_buffers(ring); - if (ret) - goto out_with_desc; - } - - return 0; - -out_with_desc: - hns3_free_desc(ring); -out_with_desc_cb: - kfree(ring->desc_cb); - ring->desc_cb = NULL; -out: - return ret; -} - -static void hns3_fini_ring(struct hns3_enet_ring *ring) -{ - hns3_free_desc(ring); - kfree(ring->desc_cb); - ring->desc_cb = NULL; - ring->next_to_clean = 0; - ring->next_to_use = 0; -} - -static int hns3_buf_size2type(u32 buf_size) -{ - int bd_size_type; - - switch (buf_size) { - case 512: - bd_size_type = HNS3_BD_SIZE_512_TYPE; - break; - case 1024: - bd_size_type = HNS3_BD_SIZE_1024_TYPE; - break; - case 2048: - bd_size_type = HNS3_BD_SIZE_2048_TYPE; - break; - case 4096: - bd_size_type = HNS3_BD_SIZE_4096_TYPE; - break; - default: - bd_size_type = HNS3_BD_SIZE_2048_TYPE; - } - - return bd_size_type; -} - -static void hns3_init_ring_hw(struct hns3_enet_ring *ring) -{ - dma_addr_t dma = ring->desc_dma_addr; - struct hnae3_queue *q = ring->tqp; - - if (!HNAE3_IS_TX_RING(ring)) { - hns3_write_dev(q, HNS3_RING_RX_RING_BASEADDR_L_REG, - (u32)dma); - hns3_write_dev(q, HNS3_RING_RX_RING_BASEADDR_H_REG, - (u32)((dma >> 31) >> 1)); - - hns3_write_dev(q, HNS3_RING_RX_RING_BD_LEN_REG, - hns3_buf_size2type(ring->buf_size)); - hns3_write_dev(q, HNS3_RING_RX_RING_BD_NUM_REG, - ring->desc_num / 8 - 1); - - } else { - hns3_write_dev(q, HNS3_RING_TX_RING_BASEADDR_L_REG, - (u32)dma); - hns3_write_dev(q, HNS3_RING_TX_RING_BASEADDR_H_REG, - (u32)((dma >> 31) >> 1)); - - hns3_write_dev(q, HNS3_RING_TX_RING_BD_LEN_REG, - hns3_buf_size2type(ring->buf_size)); - hns3_write_dev(q, HNS3_RING_TX_RING_BD_NUM_REG, - ring->desc_num / 8 - 1); - } -} - -int hns3_init_all_ring(struct hns3_nic_priv *priv) -{ - struct hnae3_handle *h = priv->ae_handle; - int ring_num = h->kinfo.num_tqps * 2; - int i, j; - int ret; - - for (i = 0; i < ring_num; i++) { - ret = hns3_alloc_ring_memory(priv->ring_data[i].ring); - if (ret) { - dev_err(priv->dev, - "Alloc ring memory fail! ret=%d\n", ret); - goto out_when_alloc_ring_memory; - } - - hns3_init_ring_hw(priv->ring_data[i].ring); - - u64_stats_init(&priv->ring_data[i].ring->syncp); - } - - return 0; - -out_when_alloc_ring_memory: - for (j = i - 1; j >= 0; j--) - hns3_fini_ring(priv->ring_data[j].ring); - - return -ENOMEM; -} - -int hns3_uninit_all_ring(struct hns3_nic_priv *priv) -{ - struct hnae3_handle *h = priv->ae_handle; - int i; - - for (i = 0; i < h->kinfo.num_tqps; i++) { - if (h->ae_algo->ops->reset_queue) - h->ae_algo->ops->reset_queue(h, i); - - hns3_fini_ring(priv->ring_data[i].ring); - hns3_fini_ring(priv->ring_data[i + h->kinfo.num_tqps].ring); - } - - return 0; -} - -/* Set mac addr if it is configured. or leave it to the AE driver */ -static void hns3_init_mac_addr(struct net_device *netdev) -{ - struct hns3_nic_priv *priv = netdev_priv(netdev); - struct hnae3_handle *h = priv->ae_handle; - u8 mac_addr_temp[ETH_ALEN]; - - if (h->ae_algo->ops->get_mac_addr) { - h->ae_algo->ops->get_mac_addr(h, mac_addr_temp); - ether_addr_copy(netdev->dev_addr, mac_addr_temp); - } - - /* Check if the MAC address is valid, if not get a random one */ - if (!is_valid_ether_addr(netdev->dev_addr)) { - eth_hw_addr_random(netdev); - dev_warn(priv->dev, "using random MAC address %pM\n", - netdev->dev_addr); - } - - if (h->ae_algo->ops->set_mac_addr) - h->ae_algo->ops->set_mac_addr(h, netdev->dev_addr); - -} - -static void hns3_nic_set_priv_ops(struct net_device *netdev) -{ - struct hns3_nic_priv *priv = netdev_priv(netdev); - - if ((netdev->features & NETIF_F_TSO) || - (netdev->features & NETIF_F_TSO6)) { - priv->ops.fill_desc = hns3_fill_desc_tso; - priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tso; - } else { - priv->ops.fill_desc = hns3_fill_desc; - priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tx; - } -} - -static int hns3_client_init(struct hnae3_handle *handle) -{ - struct pci_dev *pdev = handle->pdev; - struct hns3_nic_priv *priv; - struct net_device *netdev; - int ret; - - netdev = alloc_etherdev_mq(sizeof(struct hns3_nic_priv), - handle->kinfo.num_tqps); - if (!netdev) - return -ENOMEM; - - priv = netdev_priv(netdev); - priv->dev = &pdev->dev; - priv->netdev = netdev; - priv->ae_handle = handle; - priv->last_reset_time = jiffies; - priv->reset_level = HNAE3_FUNC_RESET; - priv->tx_timeout_count = 0; - - handle->kinfo.netdev = netdev; - handle->priv = (void *)priv; - - hns3_init_mac_addr(netdev); - - hns3_set_default_feature(netdev); - - netdev->watchdog_timeo = HNS3_TX_TIMEOUT; - netdev->priv_flags |= IFF_UNICAST_FLT; - netdev->netdev_ops = &hns3_nic_netdev_ops; - SET_NETDEV_DEV(netdev, &pdev->dev); - hns3_ethtool_set_ops(netdev); - hns3_nic_set_priv_ops(netdev); - - /* Carrier off reporting is important to ethtool even BEFORE open */ - netif_carrier_off(netdev); - - ret = hns3_get_ring_config(priv); - if (ret) { - ret = -ENOMEM; - goto out_get_ring_cfg; - } - - ret = hns3_nic_init_vector_data(priv); - if (ret) { - ret = -ENOMEM; - goto out_init_vector_data; - } - - ret = hns3_init_all_ring(priv); - if (ret) { - ret = -ENOMEM; - goto out_init_ring_data; - } - - ret = register_netdev(netdev); - if (ret) { - dev_err(priv->dev, "probe register netdev fail!\n"); - goto out_reg_netdev_fail; - } - - hns3_dcbnl_setup(handle); - - /* MTU range: (ETH_MIN_MTU(kernel default) - 9706) */ - netdev->max_mtu = HNS3_MAX_MTU - (ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN); - - return ret; - -out_reg_netdev_fail: -out_init_ring_data: - (void)hns3_nic_uninit_vector_data(priv); - priv->ring_data = NULL; -out_init_vector_data: -out_get_ring_cfg: - priv->ae_handle = NULL; - free_netdev(netdev); - return ret; -} - -static void hns3_client_uninit(struct hnae3_handle *handle, bool reset) -{ - struct net_device *netdev = handle->kinfo.netdev; - struct hns3_nic_priv *priv = netdev_priv(netdev); - int ret; - - if (netdev->reg_state != NETREG_UNINITIALIZED) - unregister_netdev(netdev); - - ret = hns3_nic_uninit_vector_data(priv); - if (ret) - netdev_err(netdev, "uninit vector error\n"); - - ret = hns3_uninit_all_ring(priv); - if (ret) - netdev_err(netdev, "uninit ring error\n"); - - priv->ring_data = NULL; - - free_netdev(netdev); -} - -static void hns3_link_status_change(struct hnae3_handle *handle, bool linkup) -{ - struct net_device *netdev = handle->kinfo.netdev; - - if (!netdev) - return; - - if (linkup) { - netif_carrier_on(netdev); - netif_tx_wake_all_queues(netdev); - netdev_info(netdev, "link up\n"); - } else { - netif_carrier_off(netdev); - netif_tx_stop_all_queues(netdev); - netdev_info(netdev, "link down\n"); - } -} - -static int hns3_client_setup_tc(struct hnae3_handle *handle, u8 tc) -{ - struct hnae3_knic_private_info *kinfo = &handle->kinfo; - struct net_device *ndev = kinfo->netdev; - bool if_running; - int ret; - u8 i; - - if (tc > HNAE3_MAX_TC) - return -EINVAL; - - if (!ndev) - return -ENODEV; - - if_running = netif_running(ndev); - - ret = netdev_set_num_tc(ndev, tc); - if (ret) - return ret; - - if (if_running) { - (void)hns3_nic_net_stop(ndev); - msleep(100); - } - - ret = (kinfo->dcb_ops && kinfo->dcb_ops->map_update) ? - kinfo->dcb_ops->map_update(handle) : -EOPNOTSUPP; - if (ret) - goto err_out; - - if (tc <= 1) { - netdev_reset_tc(ndev); - goto out; - } - - for (i = 0; i < HNAE3_MAX_TC; i++) { - struct hnae3_tc_info *tc_info = &kinfo->tc_info[i]; - - if (tc_info->enable) - netdev_set_tc_queue(ndev, - tc_info->tc, - tc_info->tqp_count, - tc_info->tqp_offset); - } - - for (i = 0; i < HNAE3_MAX_USER_PRIO; i++) { - netdev_set_prio_tc_map(ndev, i, - kinfo->prio_tc[i]); - } - -out: - ret = hns3_nic_set_real_num_queue(ndev); - -err_out: - if (if_running) - (void)hns3_nic_net_open(ndev); - - return ret; -} - -static void hns3_recover_hw_addr(struct net_device *ndev) -{ - struct netdev_hw_addr_list *list; - struct netdev_hw_addr *ha, *tmp; - - /* go through and sync uc_addr entries to the device */ - list = &ndev->uc; - list_for_each_entry_safe(ha, tmp, &list->list, list) - hns3_nic_uc_sync(ndev, ha->addr); - - /* go through and sync mc_addr entries to the device */ - list = &ndev->mc; - list_for_each_entry_safe(ha, tmp, &list->list, list) - hns3_nic_mc_sync(ndev, ha->addr); -} - -static void hns3_drop_skb_data(struct hns3_enet_ring *ring, struct sk_buff *skb) -{ - dev_kfree_skb_any(skb); -} - -static void hns3_clear_all_ring(struct hnae3_handle *h) -{ - struct net_device *ndev = h->kinfo.netdev; - struct hns3_nic_priv *priv = netdev_priv(ndev); - u32 i; - - for (i = 0; i < h->kinfo.num_tqps; i++) { - struct netdev_queue *dev_queue; - struct hns3_enet_ring *ring; - - ring = priv->ring_data[i].ring; - hns3_clean_tx_ring(ring, ring->desc_num); - dev_queue = netdev_get_tx_queue(ndev, - priv->ring_data[i].queue_index); - netdev_tx_reset_queue(dev_queue); - - ring = priv->ring_data[i + h->kinfo.num_tqps].ring; - hns3_clean_rx_ring(ring, ring->desc_num, hns3_drop_skb_data); - } -} - -static int hns3_reset_notify_down_enet(struct hnae3_handle *handle) -{ - struct hnae3_knic_private_info *kinfo = &handle->kinfo; - struct net_device *ndev = kinfo->netdev; - - if (!netif_running(ndev)) - return -EIO; - - return hns3_nic_net_stop(ndev); -} - -static int hns3_reset_notify_up_enet(struct hnae3_handle *handle) -{ - struct hnae3_knic_private_info *kinfo = &handle->kinfo; - struct hns3_nic_priv *priv = netdev_priv(kinfo->netdev); - int ret = 0; - - if (netif_running(kinfo->netdev)) { - ret = hns3_nic_net_up(kinfo->netdev); - if (ret) { - netdev_err(kinfo->netdev, - "hns net up fail, ret=%d!\n", ret); - return ret; - } - - priv->last_reset_time = jiffies; - } - - return ret; -} - -static int hns3_reset_notify_init_enet(struct hnae3_handle *handle) -{ - struct net_device *netdev = handle->kinfo.netdev; - struct hns3_nic_priv *priv = netdev_priv(netdev); - int ret; - - priv->reset_level = 1; - hns3_init_mac_addr(netdev); - hns3_nic_set_rx_mode(netdev); - hns3_recover_hw_addr(netdev); - - /* Carrier off reporting is important to ethtool even BEFORE open */ - netif_carrier_off(netdev); - - ret = hns3_get_ring_config(priv); - if (ret) - return ret; - - ret = hns3_nic_init_vector_data(priv); - if (ret) - return ret; - - ret = hns3_init_all_ring(priv); - if (ret) { - hns3_nic_uninit_vector_data(priv); - priv->ring_data = NULL; - } - - return ret; -} - -static int hns3_reset_notify_uninit_enet(struct hnae3_handle *handle) -{ - struct net_device *netdev = handle->kinfo.netdev; - struct hns3_nic_priv *priv = netdev_priv(netdev); - int ret; - - hns3_clear_all_ring(handle); - - ret = hns3_nic_uninit_vector_data(priv); - if (ret) { - netdev_err(netdev, "uninit vector error\n"); - return ret; - } - - ret = hns3_uninit_all_ring(priv); - if (ret) - netdev_err(netdev, "uninit ring error\n"); - - priv->ring_data = NULL; - - return ret; -} - -static int hns3_reset_notify(struct hnae3_handle *handle, - enum hnae3_reset_notify_type type) -{ - int ret = 0; - - switch (type) { - case HNAE3_UP_CLIENT: - ret = hns3_reset_notify_up_enet(handle); - break; - case HNAE3_DOWN_CLIENT: - ret = hns3_reset_notify_down_enet(handle); - break; - case HNAE3_INIT_CLIENT: - ret = hns3_reset_notify_init_enet(handle); - break; - case HNAE3_UNINIT_CLIENT: - ret = hns3_reset_notify_uninit_enet(handle); - break; - default: - break; - } - - return ret; -} - -static const struct hnae3_client_ops client_ops = { - .init_instance = hns3_client_init, - .uninit_instance = hns3_client_uninit, - .link_status_change = hns3_link_status_change, - .setup_tc = hns3_client_setup_tc, - .reset_notify = hns3_reset_notify, -}; - -/* hns3_init_module - Driver registration routine - * hns3_init_module is the first routine called when the driver is - * loaded. All it does is register with the PCI subsystem. - */ -static int __init hns3_init_module(void) -{ - int ret; - - pr_info("%s: %s - version\n", hns3_driver_name, hns3_driver_string); - pr_info("%s: %s\n", hns3_driver_name, hns3_copyright); - - client.type = HNAE3_CLIENT_KNIC; - snprintf(client.name, HNAE3_CLIENT_NAME_LENGTH - 1, "%s", - hns3_driver_name); - - client.ops = &client_ops; - - ret = hnae3_register_client(&client); - if (ret) - return ret; - - ret = pci_register_driver(&hns3_driver); - if (ret) - hnae3_unregister_client(&client); - - return ret; -} -module_init(hns3_init_module); - -/* hns3_exit_module - Driver exit cleanup routine - * hns3_exit_module is called just before the driver is removed - * from memory. - */ -static void __exit hns3_exit_module(void) -{ - pci_unregister_driver(&hns3_driver); - hnae3_unregister_client(&client); -} -module_exit(hns3_exit_module); - -MODULE_DESCRIPTION("HNS3: Hisilicon Ethernet Driver"); -MODULE_AUTHOR("Huawei Tech. Co., Ltd."); -MODULE_LICENSE("GPL"); -MODULE_ALIAS("pci:hns-nic"); diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.h deleted file mode 100644 index 8a9de759957b..000000000000 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.h +++ /dev/null @@ -1,613 +0,0 @@ -/* - * Copyright (c) 2016 Hisilicon Limited. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - */ - -#ifndef __HNS3_ENET_H -#define __HNS3_ENET_H - -#include "hnae3.h" - -extern const char hns3_driver_version[]; - -enum hns3_nic_state { - HNS3_NIC_STATE_TESTING, - HNS3_NIC_STATE_RESETTING, - HNS3_NIC_STATE_REINITING, - HNS3_NIC_STATE_DOWN, - HNS3_NIC_STATE_DISABLED, - HNS3_NIC_STATE_REMOVING, - HNS3_NIC_STATE_SERVICE_INITED, - HNS3_NIC_STATE_SERVICE_SCHED, - HNS3_NIC_STATE2_RESET_REQUESTED, - HNS3_NIC_STATE_MAX -}; - -#define HNS3_RING_RX_RING_BASEADDR_L_REG 0x00000 -#define HNS3_RING_RX_RING_BASEADDR_H_REG 0x00004 -#define HNS3_RING_RX_RING_BD_NUM_REG 0x00008 -#define HNS3_RING_RX_RING_BD_LEN_REG 0x0000C -#define HNS3_RING_RX_RING_TAIL_REG 0x00018 -#define HNS3_RING_RX_RING_HEAD_REG 0x0001C -#define HNS3_RING_RX_RING_FBDNUM_REG 0x00020 -#define HNS3_RING_RX_RING_PKTNUM_RECORD_REG 0x0002C - -#define HNS3_RING_TX_RING_BASEADDR_L_REG 0x00040 -#define HNS3_RING_TX_RING_BASEADDR_H_REG 0x00044 -#define HNS3_RING_TX_RING_BD_NUM_REG 0x00048 -#define HNS3_RING_TX_RING_BD_LEN_REG 0x0004C -#define HNS3_RING_TX_RING_TAIL_REG 0x00058 -#define HNS3_RING_TX_RING_HEAD_REG 0x0005C -#define HNS3_RING_TX_RING_FBDNUM_REG 0x00060 -#define HNS3_RING_TX_RING_OFFSET_REG 0x00064 -#define HNS3_RING_TX_RING_PKTNUM_RECORD_REG 0x0006C - -#define HNS3_RING_PREFETCH_EN_REG 0x0007C -#define HNS3_RING_CFG_VF_NUM_REG 0x00080 -#define HNS3_RING_ASID_REG 0x0008C -#define HNS3_RING_RX_VM_REG 0x00090 -#define HNS3_RING_T0_BE_RST 0x00094 -#define HNS3_RING_COULD_BE_RST 0x00098 -#define HNS3_RING_WRR_WEIGHT_REG 0x0009c - -#define HNS3_RING_INTMSK_RXWL_REG 0x000A0 -#define HNS3_RING_INTSTS_RX_RING_REG 0x000A4 -#define HNS3_RX_RING_INT_STS_REG 0x000A8 -#define HNS3_RING_INTMSK_TXWL_REG 0x000AC -#define HNS3_RING_INTSTS_TX_RING_REG 0x000B0 -#define HNS3_TX_RING_INT_STS_REG 0x000B4 -#define HNS3_RING_INTMSK_RX_OVERTIME_REG 0x000B8 -#define HNS3_RING_INTSTS_RX_OVERTIME_REG 0x000BC -#define HNS3_RING_INTMSK_TX_OVERTIME_REG 0x000C4 -#define HNS3_RING_INTSTS_TX_OVERTIME_REG 0x000C8 - -#define HNS3_RING_MB_CTRL_REG 0x00100 -#define HNS3_RING_MB_DATA_BASE_REG 0x00200 - -#define HNS3_TX_REG_OFFSET 0x40 - -#define HNS3_RX_HEAD_SIZE 256 - -#define HNS3_TX_TIMEOUT (5 * HZ) -#define HNS3_RING_NAME_LEN 16 -#define HNS3_BUFFER_SIZE_2048 2048 -#define HNS3_RING_MAX_PENDING 32768 -#define HNS3_RING_MIN_PENDING 8 -#define HNS3_RING_BD_MULTIPLE 8 -#define HNS3_MAX_MTU 9728 - -#define HNS3_BD_SIZE_512_TYPE 0 -#define HNS3_BD_SIZE_1024_TYPE 1 -#define HNS3_BD_SIZE_2048_TYPE 2 -#define HNS3_BD_SIZE_4096_TYPE 3 - -#define HNS3_RX_FLAG_VLAN_PRESENT 0x1 -#define HNS3_RX_FLAG_L3ID_IPV4 0x0 -#define HNS3_RX_FLAG_L3ID_IPV6 0x1 -#define HNS3_RX_FLAG_L4ID_UDP 0x0 -#define HNS3_RX_FLAG_L4ID_TCP 0x1 - -#define HNS3_RXD_DMAC_S 0 -#define HNS3_RXD_DMAC_M (0x3 << HNS3_RXD_DMAC_S) -#define HNS3_RXD_VLAN_S 2 -#define HNS3_RXD_VLAN_M (0x3 << HNS3_RXD_VLAN_S) -#define HNS3_RXD_L3ID_S 4 -#define HNS3_RXD_L3ID_M (0xf << HNS3_RXD_L3ID_S) -#define HNS3_RXD_L4ID_S 8 -#define HNS3_RXD_L4ID_M (0xf << HNS3_RXD_L4ID_S) -#define HNS3_RXD_FRAG_B 12 -#define HNS3_RXD_L2E_B 16 -#define HNS3_RXD_L3E_B 17 -#define HNS3_RXD_L4E_B 18 -#define HNS3_RXD_TRUNCAT_B 19 -#define HNS3_RXD_HOI_B 20 -#define HNS3_RXD_DOI_B 21 -#define HNS3_RXD_OL3E_B 22 -#define HNS3_RXD_OL4E_B 23 - -#define HNS3_RXD_ODMAC_S 0 -#define HNS3_RXD_ODMAC_M (0x3 << HNS3_RXD_ODMAC_S) -#define HNS3_RXD_OVLAN_S 2 -#define HNS3_RXD_OVLAN_M (0x3 << HNS3_RXD_OVLAN_S) -#define HNS3_RXD_OL3ID_S 4 -#define HNS3_RXD_OL3ID_M (0xf << HNS3_RXD_OL3ID_S) -#define HNS3_RXD_OL4ID_S 8 -#define HNS3_RXD_OL4ID_M (0xf << HNS3_RXD_OL4ID_S) -#define HNS3_RXD_FBHI_S 12 -#define HNS3_RXD_FBHI_M (0x3 << HNS3_RXD_FBHI_S) -#define HNS3_RXD_FBLI_S 14 -#define HNS3_RXD_FBLI_M (0x3 << HNS3_RXD_FBLI_S) - -#define HNS3_RXD_BDTYPE_S 0 -#define HNS3_RXD_BDTYPE_M (0xf << HNS3_RXD_BDTYPE_S) -#define HNS3_RXD_VLD_B 4 -#define HNS3_RXD_UDP0_B 5 -#define HNS3_RXD_EXTEND_B 7 -#define HNS3_RXD_FE_B 8 -#define HNS3_RXD_LUM_B 9 -#define HNS3_RXD_CRCP_B 10 -#define HNS3_RXD_L3L4P_B 11 -#define HNS3_RXD_TSIND_S 12 -#define HNS3_RXD_TSIND_M (0x7 << HNS3_RXD_TSIND_S) -#define HNS3_RXD_LKBK_B 15 -#define HNS3_RXD_HDL_S 16 -#define HNS3_RXD_HDL_M (0x7ff << HNS3_RXD_HDL_S) -#define HNS3_RXD_HSIND_B 31 - -#define HNS3_TXD_L3T_S 0 -#define HNS3_TXD_L3T_M (0x3 << HNS3_TXD_L3T_S) -#define HNS3_TXD_L4T_S 2 -#define HNS3_TXD_L4T_M (0x3 << HNS3_TXD_L4T_S) -#define HNS3_TXD_L3CS_B 4 -#define HNS3_TXD_L4CS_B 5 -#define HNS3_TXD_VLAN_B 6 -#define HNS3_TXD_TSO_B 7 - -#define HNS3_TXD_L2LEN_S 8 -#define HNS3_TXD_L2LEN_M (0xff << HNS3_TXD_L2LEN_S) -#define HNS3_TXD_L3LEN_S 16 -#define HNS3_TXD_L3LEN_M (0xff << HNS3_TXD_L3LEN_S) -#define HNS3_TXD_L4LEN_S 24 -#define HNS3_TXD_L4LEN_M (0xff << HNS3_TXD_L4LEN_S) - -#define HNS3_TXD_OL3T_S 0 -#define HNS3_TXD_OL3T_M (0x3 << HNS3_TXD_OL3T_S) -#define HNS3_TXD_OVLAN_B 2 -#define HNS3_TXD_MACSEC_B 3 -#define HNS3_TXD_TUNTYPE_S 4 -#define HNS3_TXD_TUNTYPE_M (0xf << HNS3_TXD_TUNTYPE_S) - -#define HNS3_TXD_BDTYPE_S 0 -#define HNS3_TXD_BDTYPE_M (0xf << HNS3_TXD_BDTYPE_S) -#define HNS3_TXD_FE_B 4 -#define HNS3_TXD_SC_S 5 -#define HNS3_TXD_SC_M (0x3 << HNS3_TXD_SC_S) -#define HNS3_TXD_EXTEND_B 7 -#define HNS3_TXD_VLD_B 8 -#define HNS3_TXD_RI_B 9 -#define HNS3_TXD_RA_B 10 -#define HNS3_TXD_TSYN_B 11 -#define HNS3_TXD_DECTTL_S 12 -#define HNS3_TXD_DECTTL_M (0xf << HNS3_TXD_DECTTL_S) - -#define HNS3_TXD_MSS_S 0 -#define HNS3_TXD_MSS_M (0x3fff << HNS3_TXD_MSS_S) - -#define HNS3_VECTOR_TX_IRQ BIT_ULL(0) -#define HNS3_VECTOR_RX_IRQ BIT_ULL(1) - -#define HNS3_VECTOR_NOT_INITED 0 -#define HNS3_VECTOR_INITED 1 - -#define HNS3_MAX_BD_SIZE 65535 -#define HNS3_MAX_BD_PER_FRAG 8 -#define HNS3_MAX_BD_PER_PKT MAX_SKB_FRAGS - -#define HNS3_VECTOR_GL0_OFFSET 0x100 -#define HNS3_VECTOR_GL1_OFFSET 0x200 -#define HNS3_VECTOR_GL2_OFFSET 0x300 -#define HNS3_VECTOR_RL_OFFSET 0x900 -#define HNS3_VECTOR_RL_EN_B 6 - -enum hns3_pkt_l3t_type { - HNS3_L3T_NONE, - HNS3_L3T_IPV6, - HNS3_L3T_IPV4, - HNS3_L3T_RESERVED -}; - -enum hns3_pkt_l4t_type { - HNS3_L4T_UNKNOWN, - HNS3_L4T_TCP, - HNS3_L4T_UDP, - HNS3_L4T_SCTP -}; - -enum hns3_pkt_ol3t_type { - HNS3_OL3T_NONE, - HNS3_OL3T_IPV6, - HNS3_OL3T_IPV4_NO_CSUM, - HNS3_OL3T_IPV4_CSUM -}; - -enum hns3_pkt_tun_type { - HNS3_TUN_NONE, - HNS3_TUN_MAC_IN_UDP, - HNS3_TUN_NVGRE, - HNS3_TUN_OTHER -}; - -/* hardware spec ring buffer format */ -struct __packed hns3_desc { - __le64 addr; - union { - struct { - __le16 vlan_tag; - __le16 send_size; - union { - __le32 type_cs_vlan_tso_len; - struct { - __u8 type_cs_vlan_tso; - __u8 l2_len; - __u8 l3_len; - __u8 l4_len; - }; - }; - __le16 outer_vlan_tag; - __le16 tv; - - union { - __le32 ol_type_vlan_len_msec; - struct { - __u8 ol_type_vlan_msec; - __u8 ol2_len; - __u8 ol3_len; - __u8 ol4_len; - }; - }; - - __le32 paylen; - __le16 bdtp_fe_sc_vld_ra_ri; - __le16 mss; - } tx; - - struct { - __le32 l234_info; - __le16 pkt_len; - __le16 size; - - __le32 rss_hash; - __le16 fd_id; - __le16 vlan_tag; - - union { - __le32 ol_info; - struct { - __le16 o_dm_vlan_id_fb; - __le16 ot_vlan_tag; - }; - }; - - __le32 bd_base_info; - } rx; - }; -}; - -struct hns3_desc_cb { - dma_addr_t dma; /* dma address of this desc */ - void *buf; /* cpu addr for a desc */ - - /* priv data for the desc, e.g. skb when use with ip stack*/ - void *priv; - u16 page_offset; - u16 reuse_flag; - - u16 length; /* length of the buffer */ - - /* desc type, used by the ring user to mark the type of the priv data */ - u16 type; -}; - -enum hns3_pkt_l3type { - HNS3_L3_TYPE_IPV4, - HNS3_L3_TYPE_IPV6, - HNS3_L3_TYPE_ARP, - HNS3_L3_TYPE_RARP, - HNS3_L3_TYPE_IPV4_OPT, - HNS3_L3_TYPE_IPV6_EXT, - HNS3_L3_TYPE_LLDP, - HNS3_L3_TYPE_BPDU, - HNS3_L3_TYPE_MAC_PAUSE, - HNS3_L3_TYPE_PFC_PAUSE,/* 0x9*/ - - /* reserved for 0xA~0xB*/ - - HNS3_L3_TYPE_CNM = 0xc, - - /* reserved for 0xD~0xE*/ - - HNS3_L3_TYPE_PARSE_FAIL = 0xf /* must be last */ -}; - -enum hns3_pkt_l4type { - HNS3_L4_TYPE_UDP, - HNS3_L4_TYPE_TCP, - HNS3_L4_TYPE_GRE, - HNS3_L4_TYPE_SCTP, - HNS3_L4_TYPE_IGMP, - HNS3_L4_TYPE_ICMP, - - /* reserved for 0x6~0xE */ - - HNS3_L4_TYPE_PARSE_FAIL = 0xf /* must be last */ -}; - -enum hns3_pkt_ol3type { - HNS3_OL3_TYPE_IPV4 = 0, - HNS3_OL3_TYPE_IPV6, - /* reserved for 0x2~0x3 */ - HNS3_OL3_TYPE_IPV4_OPT = 4, - HNS3_OL3_TYPE_IPV6_EXT, - - /* reserved for 0x6~0xE*/ - - HNS3_OL3_TYPE_PARSE_FAIL = 0xf /* must be last */ -}; - -enum hns3_pkt_ol4type { - HNS3_OL4_TYPE_NO_TUN, - HNS3_OL4_TYPE_MAC_IN_UDP, - HNS3_OL4_TYPE_NVGRE, - HNS3_OL4_TYPE_UNKNOWN -}; - -struct ring_stats { - u64 io_err_cnt; - u64 sw_err_cnt; - u64 seg_pkt_cnt; - union { - struct { - u64 tx_pkts; - u64 tx_bytes; - u64 tx_err_cnt; - u64 restart_queue; - u64 tx_busy; - }; - struct { - u64 rx_pkts; - u64 rx_bytes; - u64 rx_err_cnt; - u64 reuse_pg_cnt; - u64 err_pkt_len; - u64 non_vld_descs; - u64 err_bd_num; - u64 l2_err; - u64 l3l4_csum_err; - }; - }; -}; - -struct hns3_enet_ring { - u8 __iomem *io_base; /* base io address for the ring */ - struct hns3_desc *desc; /* dma map address space */ - struct hns3_desc_cb *desc_cb; - struct hns3_enet_ring *next; - struct hns3_enet_tqp_vector *tqp_vector; - struct hnae3_queue *tqp; - char ring_name[HNS3_RING_NAME_LEN]; - struct device *dev; /* will be used for DMA mapping of descriptors */ - - /* statistic */ - struct ring_stats stats; - struct u64_stats_sync syncp; - - dma_addr_t desc_dma_addr; - u32 buf_size; /* size for hnae_desc->addr, preset by AE */ - u16 desc_num; /* total number of desc */ - u16 max_desc_num_per_pkt; - u16 max_raw_data_sz_per_desc; - u16 max_pkt_size; - int next_to_use; /* idx of next spare desc */ - - /* idx of lastest sent desc, the ring is empty when equal to - * next_to_use - */ - int next_to_clean; - - u32 flag; /* ring attribute */ - int irq_init_flag; - - int numa_node; - cpumask_t affinity_mask; -}; - -struct hns_queue; - -struct hns3_nic_ring_data { - struct hns3_enet_ring *ring; - struct napi_struct napi; - int queue_index; - int (*poll_one)(struct hns3_nic_ring_data *, int, void *); - void (*ex_process)(struct hns3_nic_ring_data *, struct sk_buff *); - void (*fini_process)(struct hns3_nic_ring_data *); -}; - -struct hns3_nic_ops { - int (*fill_desc)(struct hns3_enet_ring *ring, void *priv, - int size, dma_addr_t dma, int frag_end, - enum hns_desc_type type); - int (*maybe_stop_tx)(struct sk_buff **out_skb, - int *bnum, struct hns3_enet_ring *ring); - void (*get_rxd_bnum)(u32 bnum_flag, int *out_bnum); -}; - -enum hns3_flow_level_range { - HNS3_FLOW_LOW = 0, - HNS3_FLOW_MID = 1, - HNS3_FLOW_HIGH = 2, - HNS3_FLOW_ULTRA = 3, -}; - -enum hns3_link_mode_bits { - HNS3_LM_FIBRE_BIT = BIT(0), - HNS3_LM_AUTONEG_BIT = BIT(1), - HNS3_LM_TP_BIT = BIT(2), - HNS3_LM_PAUSE_BIT = BIT(3), - HNS3_LM_BACKPLANE_BIT = BIT(4), - HNS3_LM_10BASET_HALF_BIT = BIT(5), - HNS3_LM_10BASET_FULL_BIT = BIT(6), - HNS3_LM_100BASET_HALF_BIT = BIT(7), - HNS3_LM_100BASET_FULL_BIT = BIT(8), - HNS3_LM_1000BASET_FULL_BIT = BIT(9), - HNS3_LM_10000BASEKR_FULL_BIT = BIT(10), - HNS3_LM_25000BASEKR_FULL_BIT = BIT(11), - HNS3_LM_40000BASELR4_FULL_BIT = BIT(12), - HNS3_LM_50000BASEKR2_FULL_BIT = BIT(13), - HNS3_LM_100000BASEKR4_FULL_BIT = BIT(14), - HNS3_LM_COUNT = 15 -}; - -#define HNS3_INT_GL_50K 0x000A -#define HNS3_INT_GL_20K 0x0019 -#define HNS3_INT_GL_18K 0x001B -#define HNS3_INT_GL_8K 0x003E - -struct hns3_enet_ring_group { - /* array of pointers to rings */ - struct hns3_enet_ring *ring; - u64 total_bytes; /* total bytes processed this group */ - u64 total_packets; /* total packets processed this group */ - u16 count; - enum hns3_flow_level_range flow_level; - u16 int_gl; -}; - -struct hns3_enet_tqp_vector { - struct hnae3_handle *handle; - u8 __iomem *mask_addr; - int vector_irq; - int irq_init_flag; - - u16 idx; /* index in the TQP vector array per handle. */ - - struct napi_struct napi; - - struct hns3_enet_ring_group rx_group; - struct hns3_enet_ring_group tx_group; - - u16 num_tqps; /* total number of tqps in TQP vector */ - - cpumask_t affinity_mask; - char name[HNAE3_INT_NAME_LEN]; - - /* when 0 should adjust interrupt coalesce parameter */ - u8 int_adapt_down; -} ____cacheline_internodealigned_in_smp; - -enum hns3_udp_tnl_type { - HNS3_UDP_TNL_VXLAN, - HNS3_UDP_TNL_GENEVE, - HNS3_UDP_TNL_MAX, -}; - -struct hns3_udp_tunnel { - u16 dst_port; - int used; -}; - -struct hns3_nic_priv { - struct hnae3_handle *ae_handle; - u32 enet_ver; - u32 port_id; - struct net_device *netdev; - struct device *dev; - struct hns3_nic_ops ops; - - /** - * the cb for nic to manage the ring buffer, the first half of the - * array is for tx_ring and vice versa for the second half - */ - struct hns3_nic_ring_data *ring_data; - struct hns3_enet_tqp_vector *tqp_vector; - u16 vector_num; - - /* The most recently read link state */ - int link; - u64 tx_timeout_count; - enum hnae3_reset_type reset_level; - unsigned long last_reset_time; - - unsigned long state; - - struct timer_list service_timer; - - struct work_struct service_task; - - struct notifier_block notifier_block; - /* Vxlan/Geneve information */ - struct hns3_udp_tunnel udp_tnl[HNS3_UDP_TNL_MAX]; -}; - -union l3_hdr_info { - struct iphdr *v4; - struct ipv6hdr *v6; - unsigned char *hdr; -}; - -union l4_hdr_info { - struct tcphdr *tcp; - struct udphdr *udp; - unsigned char *hdr; -}; - -/* the distance between [begin, end) in a ring buffer - * note: there is a unuse slot between the begin and the end - */ -static inline int ring_dist(struct hns3_enet_ring *ring, int begin, int end) -{ - return (end - begin + ring->desc_num) % ring->desc_num; -} - -static inline int ring_space(struct hns3_enet_ring *ring) -{ - return ring->desc_num - - ring_dist(ring, ring->next_to_clean, ring->next_to_use) - 1; -} - -static inline int is_ring_empty(struct hns3_enet_ring *ring) -{ - return ring->next_to_use == ring->next_to_clean; -} - -static inline void hns3_write_reg(void __iomem *base, u32 reg, u32 value) -{ - u8 __iomem *reg_addr = READ_ONCE(base); - - writel(value, reg_addr + reg); -} - -#define hns3_write_dev(a, reg, value) \ - hns3_write_reg((a)->io_base, (reg), (value)) - -#define hnae_queue_xmit(tqp, buf_num) writel_relaxed(buf_num, \ - (tqp)->io_base + HNS3_RING_TX_RING_TAIL_REG) - -#define ring_to_dev(ring) (&(ring)->tqp->handle->pdev->dev) - -#define ring_to_dma_dir(ring) (HNAE3_IS_TX_RING(ring) ? \ - DMA_TO_DEVICE : DMA_FROM_DEVICE) - -#define tx_ring_data(priv, idx) ((priv)->ring_data[idx]) - -#define hnae_buf_size(_ring) ((_ring)->buf_size) -#define hnae_page_order(_ring) (get_order(hnae_buf_size(_ring))) -#define hnae_page_size(_ring) (PAGE_SIZE << hnae_page_order(_ring)) - -/* iterator for handling rings in ring group */ -#define hns3_for_each_ring(pos, head) \ - for (pos = (head).ring; pos; pos = pos->next) - -#define hns3_get_handle(ndev) \ - (((struct hns3_nic_priv *)netdev_priv(ndev))->ae_handle) - -void hns3_ethtool_set_ops(struct net_device *netdev); - -bool hns3_clean_tx_ring(struct hns3_enet_ring *ring, int budget); -int hns3_init_all_ring(struct hns3_nic_priv *priv); -int hns3_uninit_all_ring(struct hns3_nic_priv *priv); -netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev); -int hns3_clean_rx_ring( - struct hns3_enet_ring *ring, int budget, - void (*rx_fn)(struct hns3_enet_ring *, struct sk_buff *)); - -#ifdef CONFIG_HNS3_DCB -void hns3_dcbnl_setup(struct hnae3_handle *handle); -#else -static inline void hns3_dcbnl_setup(struct hnae3_handle *handle) {} -#endif - -#endif diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_ethtool.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_ethtool.c deleted file mode 100644 index a21470c72da3..000000000000 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_ethtool.c +++ /dev/null @@ -1,876 +0,0 @@ -/* - * Copyright (c) 2016~2017 Hisilicon Limited. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - */ - -#include -#include -#include - -#include "hns3_enet.h" - -struct hns3_stats { - char stats_string[ETH_GSTRING_LEN]; - int stats_size; - int stats_offset; -}; - -/* tqp related stats */ -#define HNS3_TQP_STAT(_string, _member) { \ - .stats_string = _string, \ - .stats_size = FIELD_SIZEOF(struct ring_stats, _member), \ - .stats_offset = offsetof(struct hns3_enet_ring, stats), \ -} \ - -static const struct hns3_stats hns3_txq_stats[] = { - /* Tx per-queue statistics */ - HNS3_TQP_STAT("tx_io_err_cnt", io_err_cnt), - HNS3_TQP_STAT("tx_sw_err_cnt", sw_err_cnt), - HNS3_TQP_STAT("tx_seg_pkt_cnt", seg_pkt_cnt), - HNS3_TQP_STAT("tx_pkts", tx_pkts), - HNS3_TQP_STAT("tx_bytes", tx_bytes), - HNS3_TQP_STAT("tx_err_cnt", tx_err_cnt), - HNS3_TQP_STAT("tx_restart_queue", restart_queue), - HNS3_TQP_STAT("tx_busy", tx_busy), -}; - -#define HNS3_TXQ_STATS_COUNT ARRAY_SIZE(hns3_txq_stats) - -static const struct hns3_stats hns3_rxq_stats[] = { - /* Rx per-queue statistics */ - HNS3_TQP_STAT("rx_io_err_cnt", io_err_cnt), - HNS3_TQP_STAT("rx_sw_err_cnt", sw_err_cnt), - HNS3_TQP_STAT("rx_seg_pkt_cnt", seg_pkt_cnt), - HNS3_TQP_STAT("rx_pkts", rx_pkts), - HNS3_TQP_STAT("rx_bytes", rx_bytes), - HNS3_TQP_STAT("rx_err_cnt", rx_err_cnt), - HNS3_TQP_STAT("rx_reuse_pg_cnt", reuse_pg_cnt), - HNS3_TQP_STAT("rx_err_pkt_len", err_pkt_len), - HNS3_TQP_STAT("rx_non_vld_descs", non_vld_descs), - HNS3_TQP_STAT("rx_err_bd_num", err_bd_num), - HNS3_TQP_STAT("rx_l2_err", l2_err), - HNS3_TQP_STAT("rx_l3l4_csum_err", l3l4_csum_err), -}; - -#define HNS3_RXQ_STATS_COUNT ARRAY_SIZE(hns3_rxq_stats) - -#define HNS3_TQP_STATS_COUNT (HNS3_TXQ_STATS_COUNT + HNS3_RXQ_STATS_COUNT) - -#define HNS3_SELF_TEST_TPYE_NUM 1 -#define HNS3_NIC_LB_TEST_PKT_NUM 1 -#define HNS3_NIC_LB_TEST_RING_ID 0 -#define HNS3_NIC_LB_TEST_PACKET_SIZE 128 - -/* Nic loopback test err */ -#define HNS3_NIC_LB_TEST_NO_MEM_ERR 1 -#define HNS3_NIC_LB_TEST_TX_CNT_ERR 2 -#define HNS3_NIC_LB_TEST_RX_CNT_ERR 3 - -struct hns3_link_mode_mapping { - u32 hns3_link_mode; - u32 ethtool_link_mode; -}; - -static const struct hns3_link_mode_mapping hns3_lm_map[] = { - {HNS3_LM_FIBRE_BIT, ETHTOOL_LINK_MODE_FIBRE_BIT}, - {HNS3_LM_AUTONEG_BIT, ETHTOOL_LINK_MODE_Autoneg_BIT}, - {HNS3_LM_TP_BIT, ETHTOOL_LINK_MODE_TP_BIT}, - {HNS3_LM_PAUSE_BIT, ETHTOOL_LINK_MODE_Pause_BIT}, - {HNS3_LM_BACKPLANE_BIT, ETHTOOL_LINK_MODE_Backplane_BIT}, - {HNS3_LM_10BASET_HALF_BIT, ETHTOOL_LINK_MODE_10baseT_Half_BIT}, - {HNS3_LM_10BASET_FULL_BIT, ETHTOOL_LINK_MODE_10baseT_Full_BIT}, - {HNS3_LM_100BASET_HALF_BIT, ETHTOOL_LINK_MODE_100baseT_Half_BIT}, - {HNS3_LM_100BASET_FULL_BIT, ETHTOOL_LINK_MODE_100baseT_Full_BIT}, - {HNS3_LM_1000BASET_FULL_BIT, ETHTOOL_LINK_MODE_1000baseT_Full_BIT}, -}; - -static int hns3_lp_setup(struct net_device *ndev, enum hnae3_loop loop) -{ - struct hnae3_handle *h = hns3_get_handle(ndev); - int ret; - - if (!h->ae_algo->ops->set_loopback || - !h->ae_algo->ops->set_promisc_mode) - return -EOPNOTSUPP; - - switch (loop) { - case HNAE3_MAC_INTER_LOOP_MAC: - ret = h->ae_algo->ops->set_loopback(h, loop, true); - break; - case HNAE3_MAC_LOOP_NONE: - ret = h->ae_algo->ops->set_loopback(h, - HNAE3_MAC_INTER_LOOP_MAC, false); - break; - default: - ret = -ENOTSUPP; - break; - } - - if (ret) - return ret; - - if (loop == HNAE3_MAC_LOOP_NONE) - h->ae_algo->ops->set_promisc_mode(h, ndev->flags & IFF_PROMISC); - else - h->ae_algo->ops->set_promisc_mode(h, 1); - - return ret; -} - -static int hns3_lp_up(struct net_device *ndev, enum hnae3_loop loop_mode) -{ - struct hnae3_handle *h = hns3_get_handle(ndev); - int ret; - - if (!h->ae_algo->ops->start) - return -EOPNOTSUPP; - - ret = h->ae_algo->ops->start(h); - if (ret) { - netdev_err(ndev, - "hns3_lb_up ae start return error: %d\n", ret); - return ret; - } - - ret = hns3_lp_setup(ndev, loop_mode); - usleep_range(10000, 20000); - - return ret; -} - -static int hns3_lp_down(struct net_device *ndev) -{ - struct hnae3_handle *h = hns3_get_handle(ndev); - int ret; - - if (!h->ae_algo->ops->stop) - return -EOPNOTSUPP; - - ret = hns3_lp_setup(ndev, HNAE3_MAC_LOOP_NONE); - if (ret) { - netdev_err(ndev, "lb_setup return error: %d\n", ret); - return ret; - } - - h->ae_algo->ops->stop(h); - usleep_range(10000, 20000); - - return 0; -} - -static void hns3_lp_setup_skb(struct sk_buff *skb) -{ - struct net_device *ndev = skb->dev; - unsigned char *packet; - struct ethhdr *ethh; - unsigned int i; - - skb_reserve(skb, NET_IP_ALIGN); - ethh = skb_put(skb, sizeof(struct ethhdr)); - packet = skb_put(skb, HNS3_NIC_LB_TEST_PACKET_SIZE); - - memcpy(ethh->h_dest, ndev->dev_addr, ETH_ALEN); - eth_zero_addr(ethh->h_source); - ethh->h_proto = htons(ETH_P_ARP); - skb_reset_mac_header(skb); - - for (i = 0; i < HNS3_NIC_LB_TEST_PACKET_SIZE; i++) - packet[i] = (unsigned char)(i & 0xff); -} - -static void hns3_lb_check_skb_data(struct hns3_enet_ring *ring, - struct sk_buff *skb) -{ - struct hns3_enet_tqp_vector *tqp_vector = ring->tqp_vector; - unsigned char *packet = skb->data; - u32 i; - - for (i = 0; i < skb->len; i++) - if (packet[i] != (unsigned char)(i & 0xff)) - break; - - /* The packet is correctly received */ - if (i == skb->len) - tqp_vector->rx_group.total_packets++; - else - print_hex_dump(KERN_ERR, "selftest:", DUMP_PREFIX_OFFSET, 16, 1, - skb->data, skb->len, true); - - dev_kfree_skb_any(skb); -} - -static u32 hns3_lb_check_rx_ring(struct hns3_nic_priv *priv, u32 budget) -{ - struct hnae3_handle *h = priv->ae_handle; - struct hnae3_knic_private_info *kinfo; - u32 i, rcv_good_pkt_total = 0; - - kinfo = &h->kinfo; - for (i = kinfo->num_tqps; i < kinfo->num_tqps * 2; i++) { - struct hns3_enet_ring *ring = priv->ring_data[i].ring; - struct hns3_enet_ring_group *rx_group; - u64 pre_rx_pkt; - - rx_group = &ring->tqp_vector->rx_group; - pre_rx_pkt = rx_group->total_packets; - - hns3_clean_rx_ring(ring, budget, hns3_lb_check_skb_data); - - rcv_good_pkt_total += (rx_group->total_packets - pre_rx_pkt); - rx_group->total_packets = pre_rx_pkt; - } - return rcv_good_pkt_total; -} - -static void hns3_lb_clear_tx_ring(struct hns3_nic_priv *priv, u32 start_ringid, - u32 end_ringid, u32 budget) -{ - u32 i; - - for (i = start_ringid; i <= end_ringid; i++) { - struct hns3_enet_ring *ring = priv->ring_data[i].ring; - - hns3_clean_tx_ring(ring, budget); - } -} - -/** - * hns3_lp_run_test - run loopback test - * @ndev: net device - * @mode: loopback type - */ -static int hns3_lp_run_test(struct net_device *ndev, enum hnae3_loop mode) -{ - struct hns3_nic_priv *priv = netdev_priv(ndev); - struct sk_buff *skb; - u32 i, good_cnt; - int ret_val = 0; - - skb = alloc_skb(HNS3_NIC_LB_TEST_PACKET_SIZE + ETH_HLEN + NET_IP_ALIGN, - GFP_KERNEL); - if (!skb) - return HNS3_NIC_LB_TEST_NO_MEM_ERR; - - skb->dev = ndev; - hns3_lp_setup_skb(skb); - skb->queue_mapping = HNS3_NIC_LB_TEST_RING_ID; - - good_cnt = 0; - for (i = 0; i < HNS3_NIC_LB_TEST_PKT_NUM; i++) { - netdev_tx_t tx_ret; - - skb_get(skb); - tx_ret = hns3_nic_net_xmit(skb, ndev); - if (tx_ret == NETDEV_TX_OK) - good_cnt++; - else - netdev_err(ndev, "hns3_lb_run_test xmit failed: %d\n", - tx_ret); - } - if (good_cnt != HNS3_NIC_LB_TEST_PKT_NUM) { - ret_val = HNS3_NIC_LB_TEST_TX_CNT_ERR; - netdev_err(ndev, "mode %d sent fail, cnt=0x%x, budget=0x%x\n", - mode, good_cnt, HNS3_NIC_LB_TEST_PKT_NUM); - goto out; - } - - /* Allow 200 milliseconds for packets to go from Tx to Rx */ - msleep(200); - - good_cnt = hns3_lb_check_rx_ring(priv, HNS3_NIC_LB_TEST_PKT_NUM); - if (good_cnt != HNS3_NIC_LB_TEST_PKT_NUM) { - ret_val = HNS3_NIC_LB_TEST_RX_CNT_ERR; - netdev_err(ndev, "mode %d recv fail, cnt=0x%x, budget=0x%x\n", - mode, good_cnt, HNS3_NIC_LB_TEST_PKT_NUM); - } - -out: - hns3_lb_clear_tx_ring(priv, HNS3_NIC_LB_TEST_RING_ID, - HNS3_NIC_LB_TEST_RING_ID, - HNS3_NIC_LB_TEST_PKT_NUM); - - kfree_skb(skb); - return ret_val; -} - -/** - * hns3_nic_self_test - self test - * @ndev: net device - * @eth_test: test cmd - * @data: test result - */ -static void hns3_self_test(struct net_device *ndev, - struct ethtool_test *eth_test, u64 *data) -{ - struct hns3_nic_priv *priv = netdev_priv(ndev); - struct hnae3_handle *h = priv->ae_handle; - int st_param[HNS3_SELF_TEST_TPYE_NUM][2]; - bool if_running = netif_running(ndev); - int test_index = 0; - u32 i; - - /* Only do offline selftest, or pass by default */ - if (eth_test->flags != ETH_TEST_FL_OFFLINE) - return; - - st_param[HNAE3_MAC_INTER_LOOP_MAC][0] = HNAE3_MAC_INTER_LOOP_MAC; - st_param[HNAE3_MAC_INTER_LOOP_MAC][1] = - h->flags & HNAE3_SUPPORT_MAC_LOOPBACK; - - if (if_running) - dev_close(ndev); - - set_bit(HNS3_NIC_STATE_TESTING, &priv->state); - - for (i = 0; i < HNS3_SELF_TEST_TPYE_NUM; i++) { - enum hnae3_loop loop_type = (enum hnae3_loop)st_param[i][0]; - - if (!st_param[i][1]) - continue; - - data[test_index] = hns3_lp_up(ndev, loop_type); - if (!data[test_index]) { - data[test_index] = hns3_lp_run_test(ndev, loop_type); - hns3_lp_down(ndev); - } - - if (data[test_index]) - eth_test->flags |= ETH_TEST_FL_FAILED; - - test_index++; - } - - clear_bit(HNS3_NIC_STATE_TESTING, &priv->state); - - if (if_running) - dev_open(ndev); -} - -static void hns3_driv_to_eth_caps(u32 caps, struct ethtool_link_ksettings *cmd, - bool is_advertised) -{ - int i; - - for (i = 0; i < ARRAY_SIZE(hns3_lm_map); i++) { - if (!(caps & hns3_lm_map[i].hns3_link_mode)) - continue; - - if (is_advertised) - __set_bit(hns3_lm_map[i].ethtool_link_mode, - cmd->link_modes.advertising); - else - __set_bit(hns3_lm_map[i].ethtool_link_mode, - cmd->link_modes.supported); - } -} - -static int hns3_get_sset_count(struct net_device *netdev, int stringset) -{ - struct hnae3_handle *h = hns3_get_handle(netdev); - const struct hnae3_ae_ops *ops = h->ae_algo->ops; - - if (!ops->get_sset_count) - return -EOPNOTSUPP; - - switch (stringset) { - case ETH_SS_STATS: - return ((HNS3_TQP_STATS_COUNT * h->kinfo.num_tqps) + - ops->get_sset_count(h, stringset)); - - case ETH_SS_TEST: - return ops->get_sset_count(h, stringset); - } - - return 0; -} - -static void *hns3_update_strings(u8 *data, const struct hns3_stats *stats, - u32 stat_count, u32 num_tqps) -{ -#define MAX_PREFIX_SIZE (8 + 4) - u32 size_left; - u32 i, j; - u32 n1; - - for (i = 0; i < num_tqps; i++) { - for (j = 0; j < stat_count; j++) { - data[ETH_GSTRING_LEN - 1] = '\0'; - - /* first, prepend the prefix string */ - n1 = snprintf(data, MAX_PREFIX_SIZE, "rcb_q%d_", i); - n1 = min_t(uint, n1, MAX_PREFIX_SIZE - 1); - size_left = (ETH_GSTRING_LEN - 1) - n1; - - /* now, concatenate the stats string to it */ - strncat(data, stats[j].stats_string, size_left); - data += ETH_GSTRING_LEN; - } - } - - return data; -} - -static u8 *hns3_get_strings_tqps(struct hnae3_handle *handle, u8 *data) -{ - struct hnae3_knic_private_info *kinfo = &handle->kinfo; - - /* get strings for Tx */ - data = hns3_update_strings(data, hns3_txq_stats, HNS3_TXQ_STATS_COUNT, - kinfo->num_tqps); - - /* get strings for Rx */ - data = hns3_update_strings(data, hns3_rxq_stats, HNS3_RXQ_STATS_COUNT, - kinfo->num_tqps); - - return data; -} - -static void hns3_get_strings(struct net_device *netdev, u32 stringset, u8 *data) -{ - struct hnae3_handle *h = hns3_get_handle(netdev); - const struct hnae3_ae_ops *ops = h->ae_algo->ops; - char *buff = (char *)data; - - if (!ops->get_strings) - return; - - switch (stringset) { - case ETH_SS_STATS: - buff = hns3_get_strings_tqps(h, buff); - h->ae_algo->ops->get_strings(h, stringset, (u8 *)buff); - break; - case ETH_SS_TEST: - ops->get_strings(h, stringset, data); - break; - } -} - -static u64 *hns3_get_stats_tqps(struct hnae3_handle *handle, u64 *data) -{ - struct hns3_nic_priv *nic_priv = (struct hns3_nic_priv *)handle->priv; - struct hnae3_knic_private_info *kinfo = &handle->kinfo; - struct hns3_enet_ring *ring; - u8 *stat; - u32 i; - - /* get stats for Tx */ - for (i = 0; i < kinfo->num_tqps; i++) { - ring = nic_priv->ring_data[i].ring; - for (i = 0; i < HNS3_TXQ_STATS_COUNT; i++) { - stat = (u8 *)ring + hns3_txq_stats[i].stats_offset; - *data++ = *(u64 *)stat; - } - } - - /* get stats for Rx */ - for (i = 0; i < kinfo->num_tqps; i++) { - ring = nic_priv->ring_data[i + kinfo->num_tqps].ring; - for (i = 0; i < HNS3_RXQ_STATS_COUNT; i++) { - stat = (u8 *)ring + hns3_rxq_stats[i].stats_offset; - *data++ = *(u64 *)stat; - } - } - - return data; -} - -/* hns3_get_stats - get detail statistics. - * @netdev: net device - * @stats: statistics info. - * @data: statistics data. - */ -static void hns3_get_stats(struct net_device *netdev, - struct ethtool_stats *stats, u64 *data) -{ - struct hnae3_handle *h = hns3_get_handle(netdev); - u64 *p = data; - - if (!h->ae_algo->ops->get_stats || !h->ae_algo->ops->update_stats) { - netdev_err(netdev, "could not get any statistics\n"); - return; - } - - h->ae_algo->ops->update_stats(h, &netdev->stats); - - /* get per-queue stats */ - p = hns3_get_stats_tqps(h, p); - - /* get MAC & other misc hardware stats */ - h->ae_algo->ops->get_stats(h, p); -} - -static void hns3_get_drvinfo(struct net_device *netdev, - struct ethtool_drvinfo *drvinfo) -{ - struct hns3_nic_priv *priv = netdev_priv(netdev); - struct hnae3_handle *h = priv->ae_handle; - - strncpy(drvinfo->version, hns3_driver_version, - sizeof(drvinfo->version)); - drvinfo->version[sizeof(drvinfo->version) - 1] = '\0'; - - strncpy(drvinfo->driver, h->pdev->driver->name, - sizeof(drvinfo->driver)); - drvinfo->driver[sizeof(drvinfo->driver) - 1] = '\0'; - - strncpy(drvinfo->bus_info, pci_name(h->pdev), - sizeof(drvinfo->bus_info)); - drvinfo->bus_info[ETHTOOL_BUSINFO_LEN - 1] = '\0'; - - snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), "0x%08x", - priv->ae_handle->ae_algo->ops->get_fw_version(h)); -} - -static u32 hns3_get_link(struct net_device *netdev) -{ - struct hnae3_handle *h = hns3_get_handle(netdev); - - if (h->ae_algo && h->ae_algo->ops && h->ae_algo->ops->get_status) - return h->ae_algo->ops->get_status(h); - else - return 0; -} - -static void hns3_get_ringparam(struct net_device *netdev, - struct ethtool_ringparam *param) -{ - struct hns3_nic_priv *priv = netdev_priv(netdev); - struct hnae3_handle *h = priv->ae_handle; - int queue_num = h->kinfo.num_tqps; - - param->tx_max_pending = HNS3_RING_MAX_PENDING; - param->rx_max_pending = HNS3_RING_MAX_PENDING; - - param->tx_pending = priv->ring_data[0].ring->desc_num; - param->rx_pending = priv->ring_data[queue_num].ring->desc_num; -} - -static void hns3_get_pauseparam(struct net_device *netdev, - struct ethtool_pauseparam *param) -{ - struct hnae3_handle *h = hns3_get_handle(netdev); - - if (h->ae_algo && h->ae_algo->ops && h->ae_algo->ops->get_pauseparam) - h->ae_algo->ops->get_pauseparam(h, ¶m->autoneg, - ¶m->rx_pause, ¶m->tx_pause); -} - -static int hns3_get_link_ksettings(struct net_device *netdev, - struct ethtool_link_ksettings *cmd) -{ - struct hnae3_handle *h = hns3_get_handle(netdev); - u32 supported_caps; - u32 advertised_caps; - u8 media_type = HNAE3_MEDIA_TYPE_UNKNOWN; - u8 link_stat; - - if (!h->ae_algo || !h->ae_algo->ops) - return -EOPNOTSUPP; - - /* 1.auto_neg & speed & duplex from cmd */ - if (netdev->phydev) - phy_ethtool_ksettings_get(netdev->phydev, cmd); - else if (h->ae_algo->ops->get_ksettings_an_result) - h->ae_algo->ops->get_ksettings_an_result(h, - &cmd->base.autoneg, - &cmd->base.speed, - &cmd->base.duplex); - else - return -EOPNOTSUPP; - - link_stat = hns3_get_link(netdev); - if (!link_stat) { - cmd->base.speed = SPEED_UNKNOWN; - cmd->base.duplex = DUPLEX_UNKNOWN; - } - - /* 2.media_type get from bios parameter block */ - if (h->ae_algo->ops->get_media_type) { - h->ae_algo->ops->get_media_type(h, &media_type); - - switch (media_type) { - case HNAE3_MEDIA_TYPE_FIBER: - cmd->base.port = PORT_FIBRE; - supported_caps = HNS3_LM_FIBRE_BIT | - HNS3_LM_AUTONEG_BIT | - HNS3_LM_PAUSE_BIT | - HNS3_LM_1000BASET_FULL_BIT; - - advertised_caps = supported_caps; - break; - case HNAE3_MEDIA_TYPE_COPPER: - cmd->base.port = PORT_TP; - supported_caps = HNS3_LM_TP_BIT | - HNS3_LM_AUTONEG_BIT | - HNS3_LM_PAUSE_BIT | - HNS3_LM_1000BASET_FULL_BIT | - HNS3_LM_100BASET_FULL_BIT | - HNS3_LM_100BASET_HALF_BIT | - HNS3_LM_10BASET_FULL_BIT | - HNS3_LM_10BASET_HALF_BIT; - advertised_caps = supported_caps; - break; - case HNAE3_MEDIA_TYPE_BACKPLANE: - cmd->base.port = PORT_NONE; - supported_caps = HNS3_LM_BACKPLANE_BIT | - HNS3_LM_PAUSE_BIT | - HNS3_LM_AUTONEG_BIT | - HNS3_LM_1000BASET_FULL_BIT | - HNS3_LM_100BASET_FULL_BIT | - HNS3_LM_100BASET_HALF_BIT | - HNS3_LM_10BASET_FULL_BIT | - HNS3_LM_10BASET_HALF_BIT; - - advertised_caps = supported_caps; - break; - case HNAE3_MEDIA_TYPE_UNKNOWN: - default: - cmd->base.port = PORT_OTHER; - supported_caps = 0; - advertised_caps = 0; - break; - } - - if (!cmd->base.autoneg) - advertised_caps &= ~HNS3_LM_AUTONEG_BIT; - - /* now, map driver link modes to ethtool link modes */ - hns3_driv_to_eth_caps(supported_caps, cmd, false); - hns3_driv_to_eth_caps(advertised_caps, cmd, true); - } - - /* 3.mdix_ctrl&mdix get from phy reg */ - if (h->ae_algo->ops->get_mdix_mode) - h->ae_algo->ops->get_mdix_mode(h, &cmd->base.eth_tp_mdix_ctrl, - &cmd->base.eth_tp_mdix); - /* 4.mdio_support */ - cmd->base.mdio_support = ETH_MDIO_SUPPORTS_C22; - - return 0; -} - -static int hns3_set_link_ksettings(struct net_device *netdev, - const struct ethtool_link_ksettings *cmd) -{ - /* Only support ksettings_set for netdev with phy attached for now */ - if (netdev->phydev) - return phy_ethtool_ksettings_set(netdev->phydev, cmd); - - return -EOPNOTSUPP; -} - -static u32 hns3_get_rss_key_size(struct net_device *netdev) -{ - struct hnae3_handle *h = hns3_get_handle(netdev); - - if (!h->ae_algo || !h->ae_algo->ops || - !h->ae_algo->ops->get_rss_key_size) - return -EOPNOTSUPP; - - return h->ae_algo->ops->get_rss_key_size(h); -} - -static u32 hns3_get_rss_indir_size(struct net_device *netdev) -{ - struct hnae3_handle *h = hns3_get_handle(netdev); - - if (!h->ae_algo || !h->ae_algo->ops || - !h->ae_algo->ops->get_rss_indir_size) - return -EOPNOTSUPP; - - return h->ae_algo->ops->get_rss_indir_size(h); -} - -static int hns3_get_rss(struct net_device *netdev, u32 *indir, u8 *key, - u8 *hfunc) -{ - struct hnae3_handle *h = hns3_get_handle(netdev); - - if (!h->ae_algo || !h->ae_algo->ops || !h->ae_algo->ops->get_rss) - return -EOPNOTSUPP; - - return h->ae_algo->ops->get_rss(h, indir, key, hfunc); -} - -static int hns3_set_rss(struct net_device *netdev, const u32 *indir, - const u8 *key, const u8 hfunc) -{ - struct hnae3_handle *h = hns3_get_handle(netdev); - - if (!h->ae_algo || !h->ae_algo->ops || !h->ae_algo->ops->set_rss) - return -EOPNOTSUPP; - - /* currently we only support Toeplitz hash */ - if ((hfunc != ETH_RSS_HASH_NO_CHANGE) && (hfunc != ETH_RSS_HASH_TOP)) { - netdev_err(netdev, - "hash func not supported (only Toeplitz hash)\n"); - return -EOPNOTSUPP; - } - if (!indir) { - netdev_err(netdev, - "set rss failed for indir is empty\n"); - return -EOPNOTSUPP; - } - - return h->ae_algo->ops->set_rss(h, indir, key, hfunc); -} - -static int hns3_get_rxnfc(struct net_device *netdev, - struct ethtool_rxnfc *cmd, - u32 *rule_locs) -{ - struct hnae3_handle *h = hns3_get_handle(netdev); - - if (!h->ae_algo || !h->ae_algo->ops || !h->ae_algo->ops->get_rss_tuple) - return -EOPNOTSUPP; - - switch (cmd->cmd) { - case ETHTOOL_GRXRINGS: - cmd->data = h->kinfo.num_tc * h->kinfo.rss_size; - break; - case ETHTOOL_GRXFH: - return h->ae_algo->ops->get_rss_tuple(h, cmd); - default: - return -EOPNOTSUPP; - } - - return 0; -} - -static int hns3_change_all_ring_bd_num(struct hns3_nic_priv *priv, - u32 new_desc_num) -{ - struct hnae3_handle *h = priv->ae_handle; - int i; - - h->kinfo.num_desc = new_desc_num; - - for (i = 0; i < h->kinfo.num_tqps * 2; i++) - priv->ring_data[i].ring->desc_num = new_desc_num; - - return hns3_init_all_ring(priv); -} - -static int hns3_set_ringparam(struct net_device *ndev, - struct ethtool_ringparam *param) -{ - struct hns3_nic_priv *priv = netdev_priv(ndev); - struct hnae3_handle *h = priv->ae_handle; - bool if_running = netif_running(ndev); - u32 old_desc_num, new_desc_num; - int ret; - - if (param->rx_mini_pending || param->rx_jumbo_pending) - return -EINVAL; - - if (param->tx_pending != param->rx_pending) { - netdev_err(ndev, - "Descriptors of tx and rx must be equal"); - return -EINVAL; - } - - if (param->tx_pending > HNS3_RING_MAX_PENDING || - param->tx_pending < HNS3_RING_MIN_PENDING) { - netdev_err(ndev, - "Descriptors requested (Tx/Rx: %d) out of range [%d-%d]\n", - param->tx_pending, HNS3_RING_MIN_PENDING, - HNS3_RING_MAX_PENDING); - return -EINVAL; - } - - new_desc_num = param->tx_pending; - - /* Hardware requires that its descriptors must be multiple of eight */ - new_desc_num = ALIGN(new_desc_num, HNS3_RING_BD_MULTIPLE); - old_desc_num = h->kinfo.num_desc; - if (old_desc_num == new_desc_num) - return 0; - - netdev_info(ndev, - "Changing descriptor count from %d to %d.\n", - old_desc_num, new_desc_num); - - if (if_running) - dev_close(ndev); - - ret = hns3_uninit_all_ring(priv); - if (ret) - return ret; - - ret = hns3_change_all_ring_bd_num(priv, new_desc_num); - if (ret) { - ret = hns3_change_all_ring_bd_num(priv, old_desc_num); - if (ret) { - netdev_err(ndev, - "Revert to old bd num fail, ret=%d.\n", ret); - return ret; - } - } - - if (if_running) - ret = dev_open(ndev); - - return ret; -} - -static int hns3_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd) -{ - struct hnae3_handle *h = hns3_get_handle(netdev); - - if (!h->ae_algo || !h->ae_algo->ops || !h->ae_algo->ops->set_rss_tuple) - return -EOPNOTSUPP; - - switch (cmd->cmd) { - case ETHTOOL_SRXFH: - return h->ae_algo->ops->set_rss_tuple(h, cmd); - default: - return -EOPNOTSUPP; - } -} - -static int hns3_nway_reset(struct net_device *netdev) -{ - struct phy_device *phy = netdev->phydev; - - if (!netif_running(netdev)) - return 0; - - /* Only support nway_reset for netdev with phy attached for now */ - if (!phy) - return -EOPNOTSUPP; - - if (phy->autoneg != AUTONEG_ENABLE) - return -EINVAL; - - return genphy_restart_aneg(phy); -} - -static const struct ethtool_ops hns3_ethtool_ops = { - .self_test = hns3_self_test, - .get_drvinfo = hns3_get_drvinfo, - .get_link = hns3_get_link, - .get_ringparam = hns3_get_ringparam, - .set_ringparam = hns3_set_ringparam, - .get_pauseparam = hns3_get_pauseparam, - .get_strings = hns3_get_strings, - .get_ethtool_stats = hns3_get_stats, - .get_sset_count = hns3_get_sset_count, - .get_rxnfc = hns3_get_rxnfc, - .set_rxnfc = hns3_set_rxnfc, - .get_rxfh_key_size = hns3_get_rss_key_size, - .get_rxfh_indir_size = hns3_get_rss_indir_size, - .get_rxfh = hns3_get_rss, - .set_rxfh = hns3_set_rss, - .get_link_ksettings = hns3_get_link_ksettings, - .set_link_ksettings = hns3_set_link_ksettings, - .nway_reset = hns3_nway_reset, -}; - -void hns3_ethtool_set_ops(struct net_device *netdev) -{ - netdev->ethtool_ops = &hns3_ethtool_ops; -} diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c index f5a2a69f8249..31866057bc7d 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c @@ -310,6 +310,7 @@ static int hclgevf_set_handle_info(struct hclgevf_dev *hdev) nic->ae_algo = &ae_algovf; nic->pdev = hdev->pdev; nic->numa_node_mask = hdev->numa_node_mask; + nic->flags |= HNAE3_SUPPORT_VF; if (hdev->ae_dev->dev_type != HNAE3_DEV_KNIC) { dev_err(&hdev->pdev->dev, "unsupported device type %d\n", -- cgit v1.2.3