diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2014-06-15 16:37:03 -1000 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2014-06-15 16:37:03 -1000 |
commit | a9be22425e767d936105679fdc9f568b97bd47cf (patch) | |
tree | 37a63136da83dcf272668462f96eed1e96f37de3 | |
parent | dd1845af24a47b70cf84c29126698884f740ff9c (diff) | |
parent | b58537a1f5629bdc98a8b9dc2051ce0e952f6b4b (diff) | |
download | linux-a9be22425e767d936105679fdc9f568b97bd47cf.tar.bz2 |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Pull networking fixes from David Miller:
1) Fix checksumming regressions, from Tom Herbert.
2) Undo unintentional permissions changes for SCTP rto_alpha and
rto_beta sysfs knobs, from Denial Borkmann.
3) VXLAN, like other IP tunnels, should advertize it's encapsulation
size using dev->needed_headroom instead of dev->hard_header_len.
From Cong Wang.
* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net:
net: sctp: fix permissions for rto_alpha and rto_beta knobs
vxlan: Checksum fixes
net: add skb_pop_rcv_encapsulation
udp: call __skb_checksum_complete when doing full checksum
net: Fix save software checksum complete
net: Fix GSO constants to match NETIF flags
udp: ipv4: do not waste time in __udp4_lib_mcast_demux_lookup
vxlan: use dev->needed_headroom instead of dev->hard_header_len
MAINTAINERS: update cxgb4 maintainer
-rw-r--r-- | MAINTAINERS | 2 | ||||
-rw-r--r-- | drivers/net/vxlan.c | 18 | ||||
-rw-r--r-- | include/linux/netdev_features.h | 1 | ||||
-rw-r--r-- | include/linux/netdevice.h | 7 | ||||
-rw-r--r-- | include/linux/skbuff.h | 23 | ||||
-rw-r--r-- | include/net/udp.h | 4 | ||||
-rw-r--r-- | net/core/datagram.c | 36 | ||||
-rw-r--r-- | net/core/skbuff.c | 3 | ||||
-rw-r--r-- | net/ipv4/udp.c | 4 | ||||
-rw-r--r-- | net/sctp/sysctl.c | 32 |
10 files changed, 96 insertions, 34 deletions
diff --git a/MAINTAINERS b/MAINTAINERS index 055f95238d88..134483f206e4 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -2594,7 +2594,7 @@ S: Supported F: drivers/infiniband/hw/cxgb3/ CXGB4 ETHERNET DRIVER (CXGB4) -M: Dimitris Michailidis <dm@chelsio.com> +M: Hariprasad S <hariprasad@chelsio.com> L: netdev@vger.kernel.org W: http://www.chelsio.com S: Supported diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index 1610d51dbb5c..ade33ef82823 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c @@ -1156,15 +1156,7 @@ static int vxlan_udp_encap_recv(struct sock *sk, struct sk_buff *skb) if (!vs) goto drop; - /* If the NIC driver gave us an encapsulated packet - * with the encapsulation mark, the device checksummed it - * for us. Otherwise force the upper layers to verify it. - */ - if ((skb->ip_summed != CHECKSUM_UNNECESSARY && skb->ip_summed != CHECKSUM_PARTIAL) || - !skb->encapsulation) - skb->ip_summed = CHECKSUM_NONE; - - skb->encapsulation = 0; + skb_pop_rcv_encapsulation(skb); vs->rcv(vs, skb, vxh->vx_vni); return 0; @@ -1201,6 +1193,7 @@ static void vxlan_rcv(struct vxlan_sock *vs, skb_reset_mac_header(skb); skb_scrub_packet(skb, !net_eq(vxlan->net, dev_net(vxlan->dev))); skb->protocol = eth_type_trans(skb, vxlan->dev); + skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN); /* Ignore packet loops (and multicast echo) */ if (ether_addr_equal(eth_hdr(skb)->h_source, vxlan->dev->dev_addr)) @@ -2247,9 +2240,9 @@ static void vxlan_setup(struct net_device *dev) eth_hw_addr_random(dev); ether_setup(dev); if (vxlan->default_dst.remote_ip.sa.sa_family == AF_INET6) - dev->hard_header_len = ETH_HLEN + VXLAN6_HEADROOM; + dev->needed_headroom = ETH_HLEN + VXLAN6_HEADROOM; else - dev->hard_header_len = ETH_HLEN + VXLAN_HEADROOM; + dev->needed_headroom = ETH_HLEN + VXLAN_HEADROOM; dev->netdev_ops = &vxlan_netdev_ops; dev->destructor = free_netdev; @@ -2646,8 +2639,7 @@ static int vxlan_newlink(struct net *net, struct net_device *dev, if (!tb[IFLA_MTU]) dev->mtu = lowerdev->mtu - (use_ipv6 ? VXLAN6_HEADROOM : VXLAN_HEADROOM); - /* update header length based on lower device */ - dev->hard_header_len = lowerdev->hard_header_len + + dev->needed_headroom = lowerdev->hard_header_len + (use_ipv6 ? VXLAN6_HEADROOM : VXLAN_HEADROOM); } else if (use_ipv6) vxlan->flags |= VXLAN_F_IPV6; diff --git a/include/linux/netdev_features.h b/include/linux/netdev_features.h index e5a589435e2b..d99800cbdcf3 100644 --- a/include/linux/netdev_features.h +++ b/include/linux/netdev_features.h @@ -117,6 +117,7 @@ enum { #define NETIF_F_GSO_IPIP __NETIF_F(GSO_IPIP) #define NETIF_F_GSO_SIT __NETIF_F(GSO_SIT) #define NETIF_F_GSO_UDP_TUNNEL __NETIF_F(GSO_UDP_TUNNEL) +#define NETIF_F_GSO_UDP_TUNNEL_CSUM __NETIF_F(GSO_UDP_TUNNEL_CSUM) #define NETIF_F_GSO_MPLS __NETIF_F(GSO_MPLS) #define NETIF_F_HW_VLAN_STAG_FILTER __NETIF_F(HW_VLAN_STAG_FILTER) #define NETIF_F_HW_VLAN_STAG_RX __NETIF_F(HW_VLAN_STAG_RX) diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index abe3de1db932..66f9a04ec270 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -3305,6 +3305,13 @@ static inline bool net_gso_ok(netdev_features_t features, int gso_type) BUILD_BUG_ON(SKB_GSO_TCP_ECN != (NETIF_F_TSO_ECN >> NETIF_F_GSO_SHIFT)); BUILD_BUG_ON(SKB_GSO_TCPV6 != (NETIF_F_TSO6 >> NETIF_F_GSO_SHIFT)); BUILD_BUG_ON(SKB_GSO_FCOE != (NETIF_F_FSO >> NETIF_F_GSO_SHIFT)); + BUILD_BUG_ON(SKB_GSO_GRE != (NETIF_F_GSO_GRE >> NETIF_F_GSO_SHIFT)); + BUILD_BUG_ON(SKB_GSO_GRE_CSUM != (NETIF_F_GSO_GRE_CSUM >> NETIF_F_GSO_SHIFT)); + BUILD_BUG_ON(SKB_GSO_IPIP != (NETIF_F_GSO_IPIP >> NETIF_F_GSO_SHIFT)); + BUILD_BUG_ON(SKB_GSO_SIT != (NETIF_F_GSO_SIT >> NETIF_F_GSO_SHIFT)); + BUILD_BUG_ON(SKB_GSO_UDP_TUNNEL != (NETIF_F_GSO_UDP_TUNNEL >> NETIF_F_GSO_SHIFT)); + BUILD_BUG_ON(SKB_GSO_UDP_TUNNEL_CSUM != (NETIF_F_GSO_UDP_TUNNEL_CSUM >> NETIF_F_GSO_SHIFT)); + BUILD_BUG_ON(SKB_GSO_MPLS != (NETIF_F_GSO_MPLS >> NETIF_F_GSO_SHIFT)); return (features & feature) == feature; } diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 5b5cd3189c98..ec89301ada41 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -338,17 +338,18 @@ enum { SKB_GSO_GRE = 1 << 6, - SKB_GSO_IPIP = 1 << 7, + SKB_GSO_GRE_CSUM = 1 << 7, - SKB_GSO_SIT = 1 << 8, + SKB_GSO_IPIP = 1 << 8, - SKB_GSO_UDP_TUNNEL = 1 << 9, + SKB_GSO_SIT = 1 << 9, - SKB_GSO_MPLS = 1 << 10, + SKB_GSO_UDP_TUNNEL = 1 << 10, SKB_GSO_UDP_TUNNEL_CSUM = 1 << 11, - SKB_GSO_GRE_CSUM = 1 << 12, + SKB_GSO_MPLS = 1 << 12, + }; #if BITS_PER_LONG > 32 @@ -1853,6 +1854,18 @@ static inline int pskb_network_may_pull(struct sk_buff *skb, unsigned int len) return pskb_may_pull(skb, skb_network_offset(skb) + len); } +static inline void skb_pop_rcv_encapsulation(struct sk_buff *skb) +{ + /* Only continue with checksum unnecessary if device indicated + * it is valid across encapsulation (skb->encapsulation was set). + */ + if (skb->ip_summed == CHECKSUM_UNNECESSARY && !skb->encapsulation) + skb->ip_summed = CHECKSUM_NONE; + + skb->encapsulation = 0; + skb->csum_valid = 0; +} + /* * CPUs often take a performance hit when accessing unaligned memory * locations. The actual performance hit varies, it can be small if the diff --git a/include/net/udp.h b/include/net/udp.h index 2ecfc6e15609..68a1fefe3dfe 100644 --- a/include/net/udp.h +++ b/include/net/udp.h @@ -111,7 +111,9 @@ struct sk_buff; */ static inline __sum16 __udp_lib_checksum_complete(struct sk_buff *skb) { - return __skb_checksum_complete_head(skb, UDP_SKB_CB(skb)->cscov); + return (UDP_SKB_CB(skb)->cscov == skb->len ? + __skb_checksum_complete(skb) : + __skb_checksum_complete_head(skb, UDP_SKB_CB(skb)->cscov)); } static inline int udp_lib_checksum_complete(struct sk_buff *skb) diff --git a/net/core/datagram.c b/net/core/datagram.c index 6b1c04ca1d50..488dd1a825c0 100644 --- a/net/core/datagram.c +++ b/net/core/datagram.c @@ -739,22 +739,38 @@ __sum16 __skb_checksum_complete_head(struct sk_buff *skb, int len) __sum16 sum; sum = csum_fold(skb_checksum(skb, 0, len, skb->csum)); - if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE) && !sum && - !skb->csum_complete_sw) - netdev_rx_csum_fault(skb->dev); - - /* Save checksum complete for later use */ - skb->csum = sum; - skb->ip_summed = CHECKSUM_COMPLETE; - skb->csum_complete_sw = 1; - + if (likely(!sum)) { + if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE) && + !skb->csum_complete_sw) + netdev_rx_csum_fault(skb->dev); + } + skb->csum_valid = !sum; return sum; } EXPORT_SYMBOL(__skb_checksum_complete_head); __sum16 __skb_checksum_complete(struct sk_buff *skb) { - return __skb_checksum_complete_head(skb, skb->len); + __wsum csum; + __sum16 sum; + + csum = skb_checksum(skb, 0, skb->len, 0); + + /* skb->csum holds pseudo checksum */ + sum = csum_fold(csum_add(skb->csum, csum)); + if (likely(!sum)) { + if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE) && + !skb->csum_complete_sw) + netdev_rx_csum_fault(skb->dev); + } + + /* Save full packet checksum */ + skb->csum = csum; + skb->ip_summed = CHECKSUM_COMPLETE; + skb->csum_complete_sw = 1; + skb->csum_valid = !sum; + + return sum; } EXPORT_SYMBOL(__skb_checksum_complete); diff --git a/net/core/skbuff.c b/net/core/skbuff.c index bf92824af3f7..9cd5344fad73 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -689,6 +689,9 @@ static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old) new->ooo_okay = old->ooo_okay; new->no_fcs = old->no_fcs; new->encapsulation = old->encapsulation; + new->encap_hdr_csum = old->encap_hdr_csum; + new->csum_valid = old->csum_valid; + new->csum_complete_sw = old->csum_complete_sw; #ifdef CONFIG_XFRM new->sp = secpath_get(old->sp); #endif diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index 185ed3e59802..d92f94b7e402 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c @@ -1861,6 +1861,10 @@ static struct sock *__udp4_lib_mcast_demux_lookup(struct net *net, unsigned int count, slot = udp_hashfn(net, hnum, udp_table.mask); struct udp_hslot *hslot = &udp_table.hash[slot]; + /* Do not bother scanning a too big list */ + if (hslot->count > 10) + return NULL; + rcu_read_lock(); begin: count = 0; diff --git a/net/sctp/sysctl.c b/net/sctp/sysctl.c index 7e5eb7554990..dcb19592761e 100644 --- a/net/sctp/sysctl.c +++ b/net/sctp/sysctl.c @@ -34,6 +34,8 @@ * Sridhar Samudrala <sri@us.ibm.com> */ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + #include <net/sctp/structs.h> #include <net/sctp/sctp.h> #include <linux/sysctl.h> @@ -46,6 +48,11 @@ static int sack_timer_min = 1; static int sack_timer_max = 500; static int addr_scope_max = 3; /* check sctp_scope_policy_t in include/net/sctp/constants.h for max entries */ static int rwnd_scale_max = 16; +static int rto_alpha_min = 0; +static int rto_beta_min = 0; +static int rto_alpha_max = 1000; +static int rto_beta_max = 1000; + static unsigned long max_autoclose_min = 0; static unsigned long max_autoclose_max = (MAX_SCHEDULE_TIMEOUT / HZ > UINT_MAX) @@ -64,6 +71,9 @@ static int proc_sctp_do_rto_min(struct ctl_table *ctl, int write, static int proc_sctp_do_rto_max(struct ctl_table *ctl, int write, void __user *buffer, size_t *lenp, loff_t *ppos); +static int proc_sctp_do_alpha_beta(struct ctl_table *ctl, int write, + void __user *buffer, size_t *lenp, + loff_t *ppos); static int proc_sctp_do_auth(struct ctl_table *ctl, int write, void __user *buffer, size_t *lenp, loff_t *ppos); @@ -126,15 +136,19 @@ static struct ctl_table sctp_net_table[] = { .procname = "rto_alpha_exp_divisor", .data = &init_net.sctp.rto_alpha, .maxlen = sizeof(int), - .mode = 0444, - .proc_handler = proc_dointvec, + .mode = 0644, + .proc_handler = proc_sctp_do_alpha_beta, + .extra1 = &rto_alpha_min, + .extra2 = &rto_alpha_max, }, { .procname = "rto_beta_exp_divisor", .data = &init_net.sctp.rto_beta, .maxlen = sizeof(int), - .mode = 0444, - .proc_handler = proc_dointvec, + .mode = 0644, + .proc_handler = proc_sctp_do_alpha_beta, + .extra1 = &rto_beta_min, + .extra2 = &rto_beta_max, }, { .procname = "max_burst", @@ -403,6 +417,16 @@ static int proc_sctp_do_rto_max(struct ctl_table *ctl, int write, return ret; } +static int proc_sctp_do_alpha_beta(struct ctl_table *ctl, int write, + void __user *buffer, size_t *lenp, + loff_t *ppos) +{ + pr_warn_once("Changing rto_alpha or rto_beta may lead to " + "suboptimal rtt/srtt estimations!\n"); + + return proc_dointvec_minmax(ctl, write, buffer, lenp, ppos); +} + static int proc_sctp_do_auth(struct ctl_table *ctl, int write, void __user *buffer, size_t *lenp, loff_t *ppos) |