summaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
Diffstat (limited to 'net')
-rw-r--r--net/core/dev.c4
-rw-r--r--net/core/sock.c2
-rw-r--r--net/core/sock_diag.c27
-rw-r--r--net/ipv4/af_inet.c17
-rw-r--r--net/ipv4/icmp.c23
-rw-r--r--net/ipv4/ip_gre.c25
-rw-r--r--net/ipv4/ping.c4
-rw-r--r--net/ipv4/tcp_output.c18
8 files changed, 75 insertions, 45 deletions
diff --git a/net/core/dev.c b/net/core/dev.c
index 17bc535115d3..18d8b5acc343 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1882,8 +1882,10 @@ int netif_set_xps_queue(struct net_device *dev, struct cpumask *mask, u16 index)
if (!new_dev_maps)
new_dev_maps = kzalloc(maps_sz, GFP_KERNEL);
- if (!new_dev_maps)
+ if (!new_dev_maps) {
+ mutex_unlock(&xps_map_mutex);
return -ENOMEM;
+ }
map = dev_maps ? xmap_dereference(dev_maps->cpu_map[cpu]) :
NULL;
diff --git a/net/core/sock.c b/net/core/sock.c
index fe96c5d34299..b261a7977746 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -186,8 +186,10 @@ void mem_cgroup_sockets_destroy(struct mem_cgroup *memcg)
static struct lock_class_key af_family_keys[AF_MAX];
static struct lock_class_key af_family_slock_keys[AF_MAX];
+#if defined(CONFIG_MEMCG_KMEM)
struct static_key memcg_socket_limit_enabled;
EXPORT_SYMBOL(memcg_socket_limit_enabled);
+#endif
/*
* Make lock validator output more readable. (we pre-construct these
diff --git a/net/core/sock_diag.c b/net/core/sock_diag.c
index 602cd637182e..a29e90cf36b7 100644
--- a/net/core/sock_diag.c
+++ b/net/core/sock_diag.c
@@ -97,21 +97,6 @@ void sock_diag_unregister(const struct sock_diag_handler *hnld)
}
EXPORT_SYMBOL_GPL(sock_diag_unregister);
-static const inline struct sock_diag_handler *sock_diag_lock_handler(int family)
-{
- if (sock_diag_handlers[family] == NULL)
- request_module("net-pf-%d-proto-%d-type-%d", PF_NETLINK,
- NETLINK_SOCK_DIAG, family);
-
- mutex_lock(&sock_diag_table_mutex);
- return sock_diag_handlers[family];
-}
-
-static inline void sock_diag_unlock_handler(const struct sock_diag_handler *h)
-{
- mutex_unlock(&sock_diag_table_mutex);
-}
-
static int __sock_diag_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
{
int err;
@@ -121,12 +106,20 @@ static int __sock_diag_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
if (nlmsg_len(nlh) < sizeof(*req))
return -EINVAL;
- hndl = sock_diag_lock_handler(req->sdiag_family);
+ if (req->sdiag_family >= AF_MAX)
+ return -EINVAL;
+
+ if (sock_diag_handlers[req->sdiag_family] == NULL)
+ request_module("net-pf-%d-proto-%d-type-%d", PF_NETLINK,
+ NETLINK_SOCK_DIAG, req->sdiag_family);
+
+ mutex_lock(&sock_diag_table_mutex);
+ hndl = sock_diag_handlers[req->sdiag_family];
if (hndl == NULL)
err = -ENOENT;
else
err = hndl->dump(skb, nlh);
- sock_diag_unlock_handler(hndl);
+ mutex_unlock(&sock_diag_table_mutex);
return err;
}
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index e225a4e5b572..68f6a94f7661 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -248,8 +248,12 @@ EXPORT_SYMBOL(inet_listen);
u32 inet_ehash_secret __read_mostly;
EXPORT_SYMBOL(inet_ehash_secret);
+u32 ipv6_hash_secret __read_mostly;
+EXPORT_SYMBOL(ipv6_hash_secret);
+
/*
- * inet_ehash_secret must be set exactly once
+ * inet_ehash_secret must be set exactly once, and to a non nul value
+ * ipv6_hash_secret must be set exactly once.
*/
void build_ehash_secret(void)
{
@@ -259,7 +263,8 @@ void build_ehash_secret(void)
get_random_bytes(&rnd, sizeof(rnd));
} while (rnd == 0);
- cmpxchg(&inet_ehash_secret, 0, rnd);
+ if (cmpxchg(&inet_ehash_secret, 0, rnd) == 0)
+ get_random_bytes(&ipv6_hash_secret, sizeof(ipv6_hash_secret));
}
EXPORT_SYMBOL(build_ehash_secret);
@@ -1327,8 +1332,10 @@ static struct sk_buff *inet_gso_segment(struct sk_buff *skb,
if (skb->next != NULL)
iph->frag_off |= htons(IP_MF);
offset += (skb->len - skb->mac_len - iph->ihl * 4);
- } else
- iph->id = htons(id++);
+ } else {
+ if (!(iph->frag_off & htons(IP_DF)))
+ iph->id = htons(id++);
+ }
iph->tot_len = htons(skb->len - skb->mac_len);
iph->check = 0;
iph->check = ip_fast_csum(skb_network_header(skb), iph->ihl);
@@ -1572,7 +1579,7 @@ static const struct net_offload udp_offload = {
static const struct net_protocol icmp_protocol = {
.handler = icmp_rcv,
- .err_handler = ping_err,
+ .err_handler = icmp_err,
.no_policy = 1,
.netns_ok = 1,
};
diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c
index 17ff9fd7cdda..3ac5dff79627 100644
--- a/net/ipv4/icmp.c
+++ b/net/ipv4/icmp.c
@@ -934,6 +934,29 @@ error:
goto drop;
}
+void icmp_err(struct sk_buff *skb, u32 info)
+{
+ struct iphdr *iph = (struct iphdr *)skb->data;
+ struct icmphdr *icmph = (struct icmphdr *)(skb->data+(iph->ihl<<2));
+ int type = icmp_hdr(skb)->type;
+ int code = icmp_hdr(skb)->code;
+ struct net *net = dev_net(skb->dev);
+
+ /*
+ * Use ping_err to handle all icmp errors except those
+ * triggered by ICMP_ECHOREPLY which sent from kernel.
+ */
+ if (icmph->type != ICMP_ECHOREPLY) {
+ ping_err(skb, info);
+ return;
+ }
+
+ if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED)
+ ipv4_update_pmtu(skb, net, info, 0, 0, IPPROTO_ICMP, 0);
+ else if (type == ICMP_REDIRECT)
+ ipv4_redirect(skb, net, 0, 0, IPPROTO_ICMP, 0);
+}
+
/*
* This table is the definition of how we handle ICMP.
*/
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
index 5ef4da780ac1..d0ef0e674ec5 100644
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@ -735,7 +735,7 @@ drop:
return 0;
}
-static struct sk_buff *handle_offloads(struct sk_buff *skb)
+static struct sk_buff *handle_offloads(struct ip_tunnel *tunnel, struct sk_buff *skb)
{
int err;
@@ -745,8 +745,12 @@ static struct sk_buff *handle_offloads(struct sk_buff *skb)
goto error;
skb_shinfo(skb)->gso_type |= SKB_GSO_GRE;
return skb;
- }
- if (skb->ip_summed != CHECKSUM_PARTIAL)
+ } else if (skb->ip_summed == CHECKSUM_PARTIAL &&
+ tunnel->parms.o_flags&GRE_CSUM) {
+ err = skb_checksum_help(skb);
+ if (unlikely(err))
+ goto error;
+ } else if (skb->ip_summed != CHECKSUM_PARTIAL)
skb->ip_summed = CHECKSUM_NONE;
return skb;
@@ -776,7 +780,7 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev
int err;
int pkt_len;
- skb = handle_offloads(skb);
+ skb = handle_offloads(tunnel, skb);
if (IS_ERR(skb)) {
dev->stats.tx_dropped++;
return NETDEV_TX_OK;
@@ -970,7 +974,8 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev
iph->daddr = fl4.daddr;
iph->saddr = fl4.saddr;
iph->ttl = ttl;
- iph->id = 0;
+
+ tunnel_ip_select_ident(skb, old_iph, &rt->dst);
if (ttl == 0) {
if (skb->protocol == htons(ETH_P_IP))
@@ -1101,14 +1106,8 @@ static int ipgre_tunnel_bind_dev(struct net_device *dev)
tunnel->hlen = addend;
/* TCP offload with GRE SEQ is not supported. */
if (!(tunnel->parms.o_flags & GRE_SEQ)) {
- /* device supports enc gso offload*/
- if (tdev->hw_enc_features & NETIF_F_GRE_GSO) {
- dev->features |= NETIF_F_TSO;
- dev->hw_features |= NETIF_F_TSO;
- } else {
- dev->features |= NETIF_F_GSO_SOFTWARE;
- dev->hw_features |= NETIF_F_GSO_SOFTWARE;
- }
+ dev->features |= NETIF_F_GSO_SOFTWARE;
+ dev->hw_features |= NETIF_F_GSO_SOFTWARE;
}
return mtu;
diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
index 55c4ee1bba06..2e91006d6076 100644
--- a/net/ipv4/ping.c
+++ b/net/ipv4/ping.c
@@ -322,8 +322,8 @@ void ping_err(struct sk_buff *skb, u32 info)
struct iphdr *iph = (struct iphdr *)skb->data;
struct icmphdr *icmph = (struct icmphdr *)(skb->data+(iph->ihl<<2));
struct inet_sock *inet_sock;
- int type = icmph->type;
- int code = icmph->code;
+ int type = icmp_hdr(skb)->type;
+ int code = icmp_hdr(skb)->code;
struct net *net = dev_net(skb->dev);
struct sock *sk;
int harderr;
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index fd0cea114b5d..e2b4461074da 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -1351,8 +1351,8 @@ int tcp_trim_head(struct sock *sk, struct sk_buff *skb, u32 len)
return 0;
}
-/* Calculate MSS. Not accounting for SACKs here. */
-int tcp_mtu_to_mss(struct sock *sk, int pmtu)
+/* Calculate MSS not accounting any TCP options. */
+static inline int __tcp_mtu_to_mss(struct sock *sk, int pmtu)
{
const struct tcp_sock *tp = tcp_sk(sk);
const struct inet_connection_sock *icsk = inet_csk(sk);
@@ -1381,13 +1381,17 @@ int tcp_mtu_to_mss(struct sock *sk, int pmtu)
/* Then reserve room for full set of TCP options and 8 bytes of data */
if (mss_now < 48)
mss_now = 48;
-
- /* Now subtract TCP options size, not including SACKs */
- mss_now -= tp->tcp_header_len - sizeof(struct tcphdr);
-
return mss_now;
}
+/* Calculate MSS. Not accounting for SACKs here. */
+int tcp_mtu_to_mss(struct sock *sk, int pmtu)
+{
+ /* Subtract TCP options size, not including SACKs */
+ return __tcp_mtu_to_mss(sk, pmtu) -
+ (tcp_sk(sk)->tcp_header_len - sizeof(struct tcphdr));
+}
+
/* Inverse of above */
int tcp_mss_to_mtu(struct sock *sk, int mss)
{
@@ -2930,7 +2934,7 @@ static int tcp_send_syn_data(struct sock *sk, struct sk_buff *syn)
*/
if (tp->rx_opt.user_mss && tp->rx_opt.user_mss < tp->rx_opt.mss_clamp)
tp->rx_opt.mss_clamp = tp->rx_opt.user_mss;
- space = tcp_mtu_to_mss(sk, inet_csk(sk)->icsk_pmtu_cookie) -
+ space = __tcp_mtu_to_mss(sk, inet_csk(sk)->icsk_pmtu_cookie) -
MAX_TCP_OPTION_SPACE;
syn_data = skb_copy_expand(syn, skb_headroom(syn), space,