summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPaolo Abeni <pabeni@redhat.com>2017-08-25 14:31:01 +0200
committerDavid S. Miller <davem@davemloft.net>2017-08-25 20:09:13 -0700
commit64f0f5d18a47c703c85576375cc010e83dac6a48 (patch)
treecf982fdd2ab7757341873711bd9bed8bdb974712
parent2207d182c14294d78b98142f0b6a16bea5e8b0fb (diff)
downloadlinux-64f0f5d18a47c703c85576375cc010e83dac6a48.tar.bz2
udp6: set rx_dst_cookie on rx_dst updates
Currently, in the udp6 code, the dst cookie is not initialized/updated concurrently with the RX dst used by early demux. As a result, the dst_check() in the early_demux path always fails, the rx dst cache is always invalidated, and we can't really leverage significant gain from the demux lookup. Fix it adding udp6 specific variant of sk_rx_dst_set() and use it to set the dst cookie when the dst entry is really changed. The issue is there since the introduction of early demux for ipv6. Fixes: 5425077d73e0 ("net: ipv6: Add early demux handler for UDP unicast") Acked-by: Hannes Frederic Sowa <hannes@stressinduktion.org> Signed-off-by: Paolo Abeni <pabeni@redhat.com> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--include/net/udp.h2
-rw-r--r--net/ipv4/udp.c4
-rw-r--r--net/ipv6/udp.c11
3 files changed, 14 insertions, 3 deletions
diff --git a/include/net/udp.h b/include/net/udp.h
index 586de4b811b5..626c2d8a70c5 100644
--- a/include/net/udp.h
+++ b/include/net/udp.h
@@ -260,7 +260,7 @@ static inline struct sk_buff *skb_recv_udp(struct sock *sk, unsigned int flags,
}
void udp_v4_early_demux(struct sk_buff *skb);
-void udp_sk_rx_dst_set(struct sock *sk, struct dst_entry *dst);
+bool udp_sk_rx_dst_set(struct sock *sk, struct dst_entry *dst);
int udp_get_port(struct sock *sk, unsigned short snum,
int (*saddr_cmp)(const struct sock *,
const struct sock *));
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index cd1d044a7fa5..a6dc48d76a29 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -1929,14 +1929,16 @@ drop:
/* For TCP sockets, sk_rx_dst is protected by socket lock
* For UDP, we use xchg() to guard against concurrent changes.
*/
-void udp_sk_rx_dst_set(struct sock *sk, struct dst_entry *dst)
+bool udp_sk_rx_dst_set(struct sock *sk, struct dst_entry *dst)
{
struct dst_entry *old;
if (dst_hold_safe(dst)) {
old = xchg(&sk->sk_rx_dst, dst);
dst_release(old);
+ return old != dst;
}
+ return false;
}
EXPORT_SYMBOL(udp_sk_rx_dst_set);
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index 20039c8501eb..d6886228e1d0 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -768,6 +768,15 @@ start_lookup:
return 0;
}
+static void udp6_sk_rx_dst_set(struct sock *sk, struct dst_entry *dst)
+{
+ if (udp_sk_rx_dst_set(sk, dst)) {
+ const struct rt6_info *rt = (const struct rt6_info *)dst;
+
+ inet6_sk(sk)->rx_dst_cookie = rt6_get_cookie(rt);
+ }
+}
+
int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
int proto)
{
@@ -817,7 +826,7 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
int ret;
if (unlikely(sk->sk_rx_dst != dst))
- udp_sk_rx_dst_set(sk, dst);
+ udp6_sk_rx_dst_set(sk, dst);
ret = udpv6_queue_rcv_skb(sk, skb);
sock_put(sk);