diff options
author | Björn Töpel <bjorn.topel@intel.com> | 2020-11-30 19:52:03 +0100 |
---|---|---|
committer | Daniel Borkmann <daniel@iogearbox.net> | 2020-12-01 00:09:25 +0100 |
commit | 284cbc61f851bf86326b28acfe6d161691d4a4ed (patch) | |
tree | c44687179178a42a5ebaead86c51d41f8489334c /samples | |
parent | f2d2728220ac6482c69c5f018ec09bafd688e7d1 (diff) | |
download | linux-284cbc61f851bf86326b28acfe6d161691d4a4ed.tar.bz2 |
samples/bpf: Use recvfrom() in xdpsock/l2fwd
Start using recvfrom() the l2fwd scenario, instead of poll() which is
more expensive and need additional knobs for busy-polling.
Signed-off-by: Björn Töpel <bjorn.topel@intel.com>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Acked-by: Magnus Karlsson <magnus.karlsson@intel.com>
Link: https://lore.kernel.org/bpf/20201130185205.196029-9-bjorn.topel@gmail.com
Diffstat (limited to 'samples')
-rw-r--r-- | samples/bpf/xdpsock_user.c | 26 |
1 files changed, 12 insertions, 14 deletions
diff --git a/samples/bpf/xdpsock_user.c b/samples/bpf/xdpsock_user.c index f90111b95b2e..a1a3d6f02ba9 100644 --- a/samples/bpf/xdpsock_user.c +++ b/samples/bpf/xdpsock_user.c @@ -1098,8 +1098,7 @@ static void kick_tx(struct xsk_socket_info *xsk) exit_with_error(errno); } -static inline void complete_tx_l2fwd(struct xsk_socket_info *xsk, - struct pollfd *fds) +static inline void complete_tx_l2fwd(struct xsk_socket_info *xsk) { struct xsk_umem_info *umem = xsk->umem; u32 idx_cq = 0, idx_fq = 0; @@ -1134,7 +1133,8 @@ static inline void complete_tx_l2fwd(struct xsk_socket_info *xsk, exit_with_error(-ret); if (xsk_ring_prod__needs_wakeup(&umem->fq)) { xsk->app_stats.fill_fail_polls++; - ret = poll(fds, num_socks, opt_timeout); + recvfrom(xsk_socket__fd(xsk->xsk), NULL, 0, MSG_DONTWAIT, NULL, + NULL); } ret = xsk_ring_prod__reserve(&umem->fq, rcvd, &idx_fq); } @@ -1331,19 +1331,19 @@ static void tx_only_all(void) complete_tx_only_all(); } -static void l2fwd(struct xsk_socket_info *xsk, struct pollfd *fds) +static void l2fwd(struct xsk_socket_info *xsk) { unsigned int rcvd, i; u32 idx_rx = 0, idx_tx = 0; int ret; - complete_tx_l2fwd(xsk, fds); + complete_tx_l2fwd(xsk); rcvd = xsk_ring_cons__peek(&xsk->rx, opt_batch_size, &idx_rx); if (!rcvd) { if (xsk_ring_prod__needs_wakeup(&xsk->umem->fq)) { xsk->app_stats.rx_empty_polls++; - ret = poll(fds, num_socks, opt_timeout); + recvfrom(xsk_socket__fd(xsk->xsk), NULL, 0, MSG_DONTWAIT, NULL, NULL); } return; } @@ -1353,7 +1353,7 @@ static void l2fwd(struct xsk_socket_info *xsk, struct pollfd *fds) while (ret != rcvd) { if (ret < 0) exit_with_error(-ret); - complete_tx_l2fwd(xsk, fds); + complete_tx_l2fwd(xsk); if (xsk_ring_prod__needs_wakeup(&xsk->tx)) { xsk->app_stats.tx_wakeup_sendtos++; kick_tx(xsk); @@ -1388,22 +1388,20 @@ static void l2fwd_all(void) struct pollfd fds[MAX_SOCKS] = {}; int i, ret; - for (i = 0; i < num_socks; i++) { - fds[i].fd = xsk_socket__fd(xsks[i]->xsk); - fds[i].events = POLLOUT | POLLIN; - } - for (;;) { if (opt_poll) { - for (i = 0; i < num_socks; i++) + for (i = 0; i < num_socks; i++) { + fds[i].fd = xsk_socket__fd(xsks[i]->xsk); + fds[i].events = POLLOUT | POLLIN; xsks[i]->app_stats.opt_polls++; + } ret = poll(fds, num_socks, opt_timeout); if (ret <= 0) continue; } for (i = 0; i < num_socks; i++) - l2fwd(xsks[i], fds); + l2fwd(xsks[i]); if (benchmark_done) break; |