summaryrefslogtreecommitdiffstats
path: root/drivers/net
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2017-07-17 09:48:07 -0700
committerDavid S. Miller <davem@davemloft.net>2017-07-17 09:48:07 -0700
commit6093ec2dc313b57e6442c0d92acf137e60b42043 (patch)
tree68690d84b82c26f35be237fa8a37f345255f51f8 /drivers/net
parentff65fa6cd576909e31797bd7243a0d80b76db0b6 (diff)
parent9d6e005287ee23c7e25b04f4ad007bdbaf4fc438 (diff)
downloadlinux-6093ec2dc313b57e6442c0d92acf137e60b42043.tar.bz2
Merge branch 'xdp-redirect'
John Fastabend says: ==================== Implement XDP bpf_redirect This series adds two new XDP helper routines bpf_redirect() and bpf_redirect_map(). The first variant bpf_redirect() is meant to be used the same way it is currently being used by the cls_bpf classifier. An xdp packet will be redirected immediately when this is called. The other variant bpf_redirect_map(map, key, flags) uses a new map type called devmap. A devmap uses integers as keys and net_devices as values. The user provies key/ifindex pairs to update the map with new net_devices. This provides two benefits over the normal variant 'bpf_redirect()'. First the datapath bpf program is abstracted away from using hard-coded ifindex values. Allowing a single bpf program to be run any many different environments. Second, and perhaps more important, the map enables batching packet transmits. The map plus small driver changes allows for batching all send requests across a NAPI poll loop. This allows driver writers to optimize the driver xmit path and only call expensive operations once for a batch of xdp_buffs. The devmap was designed to support possible future work for multicast and broadcast as follow-up patches. To see, in more detail, how to leverage the new helpers and map from the userspace side please review these two patches, xdp: sample program for new bpf_redirect helper xdp: bpf redirect with map sample program Performance numbers provided by Jesper are the following, tested using the ixgbe driver with CPU E5-1650 v4 @ 3.60GHz: 13,939,674 pkt/s = XDP_DROP without touching memory 14,290,650 pkt/s = xdp1: XDP_DROP with reading packet data 13,221,812 pkt/s = xdp2: XDP_TX with swap mac (writes into pkt) 7,596,576 pkt/s = xdp_redirect: XDP_REDIRECT with swap mac (like XDP_TX) 13,058,435 pkt/s = xdp_redirect_map:XDP_REDIRECT with swap mac + devmap A big thanks to everyone who helped with this series. Jesper provided fixes, debugging, code review, performance benchmarks! Daniel provided lots of useful feedback and code review. And last but not least Andy provided useful feedback related to supporting additional drivers, generic xdp implementation, testing, etc. Any other feedback is welcome but I believe at this point these are ready to be merged! Whats left... get the rest of the drivers developers to implement this in all the drivers. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c8
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c63
2 files changed, 68 insertions, 3 deletions
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
index b45fdc98033d..f1bfae0c41d0 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
@@ -1018,8 +1018,12 @@ static void ixgbe_free_q_vector(struct ixgbe_adapter *adapter, int v_idx)
struct ixgbe_q_vector *q_vector = adapter->q_vector[v_idx];
struct ixgbe_ring *ring;
- ixgbe_for_each_ring(ring, q_vector->tx)
- adapter->tx_ring[ring->queue_index] = NULL;
+ ixgbe_for_each_ring(ring, q_vector->tx) {
+ if (ring_is_xdp(ring))
+ adapter->xdp_ring[ring->queue_index] = NULL;
+ else
+ adapter->tx_ring[ring->queue_index] = NULL;
+ }
ixgbe_for_each_ring(ring, q_vector->rx)
adapter->rx_ring[ring->queue_index] = NULL;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index f1dbdf26d8e1..0f867dcda65f 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -2214,7 +2214,7 @@ static struct sk_buff *ixgbe_run_xdp(struct ixgbe_adapter *adapter,
struct ixgbe_ring *rx_ring,
struct xdp_buff *xdp)
{
- int result = IXGBE_XDP_PASS;
+ int err, result = IXGBE_XDP_PASS;
struct bpf_prog *xdp_prog;
u32 act;
@@ -2231,6 +2231,13 @@ static struct sk_buff *ixgbe_run_xdp(struct ixgbe_adapter *adapter,
case XDP_TX:
result = ixgbe_xmit_xdp_ring(adapter, xdp);
break;
+ case XDP_REDIRECT:
+ err = xdp_do_redirect(adapter->netdev, xdp, xdp_prog);
+ if (!err)
+ result = IXGBE_XDP_TX;
+ else
+ result = IXGBE_XDP_CONSUMED;
+ break;
default:
bpf_warn_invalid_xdp_action(act);
/* fallthrough */
@@ -2408,6 +2415,8 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
*/
wmb();
writel(ring->next_to_use, ring->tail);
+
+ xdp_do_flush_map();
}
u64_stats_update_begin(&rx_ring->syncp);
@@ -5810,6 +5819,9 @@ void ixgbe_down(struct ixgbe_adapter *adapter)
usleep_range(10000, 20000);
+ /* synchronize_sched() needed for pending XDP buffers to drain */
+ if (adapter->xdp_ring[0])
+ synchronize_sched();
netif_tx_stop_all_queues(netdev);
/* call carrier off first to avoid false dev_watchdog timeouts */
@@ -9823,6 +9835,53 @@ static int ixgbe_xdp(struct net_device *dev, struct netdev_xdp *xdp)
}
}
+static int ixgbe_xdp_xmit(struct net_device *dev, struct xdp_buff *xdp)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(dev);
+ struct ixgbe_ring *ring;
+ int err;
+
+ if (unlikely(test_bit(__IXGBE_DOWN, &adapter->state)))
+ return -EINVAL;
+
+ /* During program transitions its possible adapter->xdp_prog is assigned
+ * but ring has not been configured yet. In this case simply abort xmit.
+ */
+ ring = adapter->xdp_prog ? adapter->xdp_ring[smp_processor_id()] : NULL;
+ if (unlikely(!ring))
+ return -EINVAL;
+
+ err = ixgbe_xmit_xdp_ring(adapter, xdp);
+ if (err != IXGBE_XDP_TX)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void ixgbe_xdp_flush(struct net_device *dev)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(dev);
+ struct ixgbe_ring *ring;
+
+ /* Its possible the device went down between xdp xmit and flush so
+ * we need to ensure device is still up.
+ */
+ if (unlikely(test_bit(__IXGBE_DOWN, &adapter->state)))
+ return;
+
+ ring = adapter->xdp_prog ? adapter->xdp_ring[smp_processor_id()] : NULL;
+ if (unlikely(!ring))
+ return;
+
+ /* Force memory writes to complete before letting h/w know there
+ * are new descriptors to fetch.
+ */
+ wmb();
+ writel(ring->next_to_use, ring->tail);
+
+ return;
+}
+
static const struct net_device_ops ixgbe_netdev_ops = {
.ndo_open = ixgbe_open,
.ndo_stop = ixgbe_close,
@@ -9869,6 +9928,8 @@ static const struct net_device_ops ixgbe_netdev_ops = {
.ndo_udp_tunnel_del = ixgbe_del_udp_tunnel_port,
.ndo_features_check = ixgbe_features_check,
.ndo_xdp = ixgbe_xdp,
+ .ndo_xdp_xmit = ixgbe_xdp_xmit,
+ .ndo_xdp_flush = ixgbe_xdp_flush,
};
/**