summaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorJason Wang <jasowang@redhat.com>2018-02-07 17:14:46 +0800
committerDavid S. Miller <davem@davemloft.net>2018-02-08 14:10:30 -0500
commit762c330d670e3d4b795cf7a8d761866fdd1eef49 (patch)
tree249dbef044fc3ecf0b3f2d449e63080e9f802359 /drivers
parentcb9f7a9a5c96a773bbc9c70660dc600cfff82f82 (diff)
downloadlinux-762c330d670e3d4b795cf7a8d761866fdd1eef49.tar.bz2
tuntap: add missing xdp flush
When using devmap to redirect packets between interfaces, xdp_do_flush() is usually a must to flush any batched packets. Unfortunately this is missed in current tuntap implementation. Unlike most hardware driver which did XDP inside NAPI loop and call xdp_do_flush() at then end of each round of poll. TAP did it in the context of process e.g tun_get_user(). So fix this by count the pending redirected packets and flush when it exceeds NAPI_POLL_WEIGHT or MSG_MORE was cleared by sendmsg() caller. With this fix, xdp_redirect_map works again between two TAPs. Fixes: 761876c857cb ("tap: XDP support") Signed-off-by: Jason Wang <jasowang@redhat.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/net/tun.c15
1 files changed, 15 insertions, 0 deletions
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 0dc66e4fbb2c..17e496b88f81 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -181,6 +181,7 @@ struct tun_file {
struct tun_struct *detached;
struct ptr_ring tx_ring;
struct xdp_rxq_info xdp_rxq;
+ int xdp_pending_pkts;
};
struct tun_flow_entry {
@@ -1665,6 +1666,7 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun,
case XDP_REDIRECT:
get_page(alloc_frag->page);
alloc_frag->offset += buflen;
+ ++tfile->xdp_pending_pkts;
err = xdp_do_redirect(tun->dev, &xdp, xdp_prog);
if (err)
goto err_redirect;
@@ -1986,6 +1988,11 @@ static ssize_t tun_chr_write_iter(struct kiocb *iocb, struct iov_iter *from)
result = tun_get_user(tun, tfile, NULL, from,
file->f_flags & O_NONBLOCK, false);
+ if (tfile->xdp_pending_pkts) {
+ tfile->xdp_pending_pkts = 0;
+ xdp_do_flush_map();
+ }
+
tun_put(tun);
return result;
}
@@ -2322,6 +2329,13 @@ static int tun_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len)
ret = tun_get_user(tun, tfile, m->msg_control, &m->msg_iter,
m->msg_flags & MSG_DONTWAIT,
m->msg_flags & MSG_MORE);
+
+ if (tfile->xdp_pending_pkts >= NAPI_POLL_WEIGHT ||
+ !(m->msg_flags & MSG_MORE)) {
+ tfile->xdp_pending_pkts = 0;
+ xdp_do_flush_map();
+ }
+
tun_put(tun);
return ret;
}
@@ -3153,6 +3167,7 @@ static int tun_chr_open(struct inode *inode, struct file * file)
sock_set_flag(&tfile->sk, SOCK_ZEROCOPY);
memset(&tfile->tx_ring, 0, sizeof(tfile->tx_ring));
+ tfile->xdp_pending_pkts = 0;
return 0;
}