diff options
author | Haiyang Zhang <haiyangz@microsoft.com> | 2020-01-23 13:52:34 -0800 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2020-01-25 10:43:19 +0100 |
commit | 351e1581395fcc7fb952bbd7dda01238f69968fd (patch) | |
tree | 603affba2df25970a5036668b79c1717212e7dba /drivers/net/hyperv/netvsc.c | |
parent | 6ec8b6cd79a4360e375da99d848d63f8d4fb08b3 (diff) | |
download | linux-351e1581395fcc7fb952bbd7dda01238f69968fd.tar.bz2 |
hv_netvsc: Add XDP support
This patch adds support of XDP in native mode for hv_netvsc driver, and
transparently sets the XDP program on the associated VF NIC as well.
Setting / unsetting XDP program on synthetic NIC (netvsc) propagates to
VF NIC automatically. Setting / unsetting XDP program on VF NIC directly
is not recommended, also not propagated to synthetic NIC, and may be
overwritten by setting of synthetic NIC.
The Azure/Hyper-V synthetic NIC receive buffer doesn't provide headroom
for XDP. We thought about re-use the RNDIS header space, but it's too
small. So we decided to copy the packets to a page buffer for XDP. And,
most of our VMs on Azure have Accelerated Network (SRIOV) enabled, so
most of the packets run on VF NIC. The synthetic NIC is considered as a
fallback data-path. So the data copy on netvsc won't impact performance
significantly.
XDP program cannot run with LRO (RSC) enabled, so you need to disable LRO
before running XDP:
ethtool -K eth0 lro off
XDP actions not yet supported:
XDP_REDIRECT
Signed-off-by: Haiyang Zhang <haiyangz@microsoft.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/hyperv/netvsc.c')
-rw-r--r-- | drivers/net/hyperv/netvsc.c | 31 |
1 files changed, 26 insertions, 5 deletions
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c index eab83e71567a..ae3f3084c2ed 100644 --- a/drivers/net/hyperv/netvsc.c +++ b/drivers/net/hyperv/netvsc.c @@ -122,8 +122,10 @@ static void free_netvsc_device(struct rcu_head *head) vfree(nvdev->send_buf); kfree(nvdev->send_section_map); - for (i = 0; i < VRSS_CHANNEL_MAX; i++) + for (i = 0; i < VRSS_CHANNEL_MAX; i++) { + xdp_rxq_info_unreg(&nvdev->chan_table[i].xdp_rxq); vfree(nvdev->chan_table[i].mrc.slots); + } kfree(nvdev); } @@ -900,7 +902,8 @@ int netvsc_send(struct net_device *ndev, struct hv_netvsc_packet *packet, struct rndis_message *rndis_msg, struct hv_page_buffer *pb, - struct sk_buff *skb) + struct sk_buff *skb, + bool xdp_tx) { struct net_device_context *ndev_ctx = netdev_priv(ndev); struct netvsc_device *net_device @@ -923,10 +926,11 @@ int netvsc_send(struct net_device *ndev, packet->send_buf_index = NETVSC_INVALID_INDEX; packet->cp_partial = false; - /* Send control message directly without accessing msd (Multi-Send - * Data) field which may be changed during data packet processing. + /* Send a control message or XDP packet directly without accessing + * msd (Multi-Send Data) field which may be changed during data packet + * processing. */ - if (!skb) + if (!skb || xdp_tx) return netvsc_send_pkt(device, packet, net_device, pb, skb); /* batch packets in send buffer if possible */ @@ -1392,6 +1396,21 @@ struct netvsc_device *netvsc_device_add(struct hv_device *device, nvchan->net_device = net_device; u64_stats_init(&nvchan->tx_stats.syncp); u64_stats_init(&nvchan->rx_stats.syncp); + + ret = xdp_rxq_info_reg(&nvchan->xdp_rxq, ndev, i); + + if (ret) { + netdev_err(ndev, "xdp_rxq_info_reg fail: %d\n", ret); + goto cleanup2; + } + + ret = xdp_rxq_info_reg_mem_model(&nvchan->xdp_rxq, + MEM_TYPE_PAGE_SHARED, NULL); + + if (ret) { + netdev_err(ndev, "xdp reg_mem_model fail: %d\n", ret); + goto cleanup2; + } } /* Enable NAPI handler before init callbacks */ @@ -1437,6 +1456,8 @@ close: cleanup: netif_napi_del(&net_device->chan_table[0].napi); + +cleanup2: free_netvsc_device(&net_device->rcu); return ERR_PTR(ret); |