From 43a08e0f58b3f236165029710a4e3b303815253b Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Thu, 15 Feb 2018 14:47:15 -0800 Subject: tun: fix tun_napi_alloc_frags() frag allocator MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit While fuzzing arm64 v4.16-rc1 with Syzkaller, I've been hitting a misaligned atomic in __skb_clone:         atomic_inc(&(skb_shinfo(skb)->dataref)); where dataref doesn't have the required natural alignment, and the atomic operation faults. e.g. i often see it aligned to a single byte boundary rather than a four byte boundary. AFAICT, the skb_shared_info is misaligned at the instant it's allocated in __napi_alloc_skb() __napi_alloc_skb() Problem is caused by tun_napi_alloc_frags() using napi_alloc_frag() with user provided seg sizes, leading to other users of this API getting unaligned page fragments. Since we would like to not necessarily add paddings or alignments to the frags that tun_napi_alloc_frags() attaches to the skb, switch to another page frag allocator. As a bonus skb_page_frag_refill() can use GFP_KERNEL allocations, meaning that we can not deplete memory reserves as easily. Fixes: 90e33d459407 ("tun: enable napi_gro_frags() for TUN/TAP driver") Signed-off-by: Eric Dumazet Reported-by: Mark Rutland Tested-by: Mark Rutland Signed-off-by: David S. Miller --- drivers/net/tun.c | 16 ++++++---------- 1 file changed, 6 insertions(+), 10 deletions(-) (limited to 'drivers/net/tun.c') diff --git a/drivers/net/tun.c b/drivers/net/tun.c index 81e6cc951e7f..b52258c327d2 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@ -1489,27 +1489,23 @@ static struct sk_buff *tun_napi_alloc_frags(struct tun_file *tfile, skb->truesize += skb->data_len; for (i = 1; i < it->nr_segs; i++) { + struct page_frag *pfrag = ¤t->task_frag; size_t fragsz = it->iov[i].iov_len; - unsigned long offset; - struct page *page; - void *data; if (fragsz == 0 || fragsz > PAGE_SIZE) { err = -EINVAL; goto free; } - local_bh_disable(); - data = napi_alloc_frag(fragsz); - local_bh_enable(); - if (!data) { + if (!skb_page_frag_refill(fragsz, pfrag, GFP_KERNEL)) { err = -ENOMEM; goto free; } - page = virt_to_head_page(data); - offset = data - page_address(page); - skb_fill_page_desc(skb, i - 1, page, offset, fragsz); + skb_fill_page_desc(skb, i - 1, pfrag->page, + pfrag->offset, fragsz); + page_ref_inc(pfrag->page); + pfrag->offset += fragsz; } return skb; -- cgit v1.2.3 From f249be4d2c275fe2b98e389f471af75f758e5a59 Mon Sep 17 00:00:00 2001 From: Jason Wang Date: Sat, 24 Feb 2018 11:32:24 +0800 Subject: Revert "tuntap: add missing xdp flush" This reverts commit 762c330d670e3d4b795cf7a8d761866fdd1eef49. The reason is we try to batch packets for devmap which causes calling xdp_do_flush() in the process context. Simply disabling preemption may not work since process may move among processors which lead xdp_do_flush() to miss some flushes on some processors. So simply revert the patch, a follow-up patch will add the xdp flush correctly. Reported-by: Christoffer Dall Fixes: 762c330d670e ("tuntap: add missing xdp flush") Signed-off-by: Jason Wang Signed-off-by: David S. Miller --- drivers/net/tun.c | 15 --------------- 1 file changed, 15 deletions(-) (limited to 'drivers/net/tun.c') diff --git a/drivers/net/tun.c b/drivers/net/tun.c index b52258c327d2..2823a4a6f059 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@ -181,7 +181,6 @@ struct tun_file { struct tun_struct *detached; struct ptr_ring tx_ring; struct xdp_rxq_info xdp_rxq; - int xdp_pending_pkts; }; struct tun_flow_entry { @@ -1662,7 +1661,6 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun, case XDP_REDIRECT: get_page(alloc_frag->page); alloc_frag->offset += buflen; - ++tfile->xdp_pending_pkts; err = xdp_do_redirect(tun->dev, &xdp, xdp_prog); if (err) goto err_redirect; @@ -1984,11 +1982,6 @@ static ssize_t tun_chr_write_iter(struct kiocb *iocb, struct iov_iter *from) result = tun_get_user(tun, tfile, NULL, from, file->f_flags & O_NONBLOCK, false); - if (tfile->xdp_pending_pkts) { - tfile->xdp_pending_pkts = 0; - xdp_do_flush_map(); - } - tun_put(tun); return result; } @@ -2325,13 +2318,6 @@ static int tun_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len) ret = tun_get_user(tun, tfile, m->msg_control, &m->msg_iter, m->msg_flags & MSG_DONTWAIT, m->msg_flags & MSG_MORE); - - if (tfile->xdp_pending_pkts >= NAPI_POLL_WEIGHT || - !(m->msg_flags & MSG_MORE)) { - tfile->xdp_pending_pkts = 0; - xdp_do_flush_map(); - } - tun_put(tun); return ret; } @@ -3163,7 +3149,6 @@ static int tun_chr_open(struct inode *inode, struct file * file) sock_set_flag(&tfile->sk, SOCK_ZEROCOPY); memset(&tfile->tx_ring, 0, sizeof(tfile->tx_ring)); - tfile->xdp_pending_pkts = 0; return 0; } -- cgit v1.2.3 From 23e43f07f896f8578318cfcc9466f1e8b8ab21b6 Mon Sep 17 00:00:00 2001 From: Jason Wang Date: Sat, 24 Feb 2018 11:32:25 +0800 Subject: tuntap: disable preemption during XDP processing Except for tuntap, all other drivers' XDP was implemented at NAPI poll() routine in a bh. This guarantees all XDP operation were done at the same CPU which is required by e.g BFP_MAP_TYPE_PERCPU_ARRAY. But for tuntap, we do it in process context and we try to protect XDP processing by RCU reader lock. This is insufficient since CONFIG_PREEMPT_RCU can preempt the RCU reader critical section which breaks the assumption that all XDP were processed in the same CPU. Fixing this by simply disabling preemption during XDP processing. Fixes: 761876c857cb ("tap: XDP support") Signed-off-by: Jason Wang Signed-off-by: David S. Miller --- drivers/net/tun.c | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'drivers/net/tun.c') diff --git a/drivers/net/tun.c b/drivers/net/tun.c index 2823a4a6f059..63d39fe67b99 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@ -1642,6 +1642,7 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun, else *skb_xdp = 0; + preempt_disable(); rcu_read_lock(); xdp_prog = rcu_dereference(tun->xdp_prog); if (xdp_prog && !*skb_xdp) { @@ -1665,6 +1666,7 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun, if (err) goto err_redirect; rcu_read_unlock(); + preempt_enable(); return NULL; case XDP_TX: xdp_xmit = true; @@ -1686,6 +1688,7 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun, skb = build_skb(buf, buflen); if (!skb) { rcu_read_unlock(); + preempt_enable(); return ERR_PTR(-ENOMEM); } @@ -1698,10 +1701,12 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun, skb->dev = tun->dev; generic_xdp_tx(skb, xdp_prog); rcu_read_unlock(); + preempt_enable(); return NULL; } rcu_read_unlock(); + preempt_enable(); return skb; @@ -1709,6 +1714,7 @@ err_redirect: put_page(alloc_frag->page); err_xdp: rcu_read_unlock(); + preempt_enable(); this_cpu_inc(tun->pcpu_stats->rx_dropped); return NULL; } -- cgit v1.2.3 From 1bb4f2e868a2891ab8bc668b8173d6ccb8c4ce6f Mon Sep 17 00:00:00 2001 From: Jason Wang Date: Sat, 24 Feb 2018 11:32:26 +0800 Subject: tuntap: correctly add the missing XDP flush We don't flush batched XDP packets through xdp_do_flush_map(), this will cause packets stall at TX queue. Consider we don't do XDP on NAPI poll(), the only possible fix is to call xdp_do_flush_map() immediately after xdp_do_redirect(). Note, this in fact won't try to batch packets through devmap, we could address in the future. Reported-by: Christoffer Dall Fixes: 761876c857cb ("tap: XDP support") Signed-off-by: Jason Wang Signed-off-by: David S. Miller --- drivers/net/tun.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers/net/tun.c') diff --git a/drivers/net/tun.c b/drivers/net/tun.c index 63d39fe67b99..7433bb2e4451 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@ -1663,6 +1663,7 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun, get_page(alloc_frag->page); alloc_frag->offset += buflen; err = xdp_do_redirect(tun->dev, &xdp, xdp_prog); + xdp_do_flush_map(); if (err) goto err_redirect; rcu_read_unlock(); -- cgit v1.2.3