summaryrefslogtreecommitdiffstats
path: root/net/core
diff options
context:
space:
mode:
Diffstat (limited to 'net/core')
-rw-r--r--net/core/dev.c47
-rw-r--r--net/core/dev_ioctl.c7
-rw-r--r--net/core/devlink.c62
-rw-r--r--net/core/ethtool.c5
-rw-r--r--net/core/filter.c66
-rw-r--r--net/core/gen_estimator.c1
-rw-r--r--net/core/skbuff.c59
-rw-r--r--net/core/sock.c21
-rw-r--r--net/core/sock_diag.c12
9 files changed, 189 insertions, 91 deletions
diff --git a/net/core/dev.c b/net/core/dev.c
index dda9d7b9a840..12be20535714 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -2382,8 +2382,11 @@ EXPORT_SYMBOL(netdev_set_num_tc);
*/
int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq)
{
+ bool disabling;
int rc;
+ disabling = txq < dev->real_num_tx_queues;
+
if (txq < 1 || txq > dev->num_tx_queues)
return -EINVAL;
@@ -2399,15 +2402,19 @@ int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq)
if (dev->num_tc)
netif_setup_tc(dev, txq);
- if (txq < dev->real_num_tx_queues) {
+ dev->real_num_tx_queues = txq;
+
+ if (disabling) {
+ synchronize_net();
qdisc_reset_all_tx_gt(dev, txq);
#ifdef CONFIG_XPS
netif_reset_xps_queues_gt(dev, txq);
#endif
}
+ } else {
+ dev->real_num_tx_queues = txq;
}
- dev->real_num_tx_queues = txq;
return 0;
}
EXPORT_SYMBOL(netif_set_real_num_tx_queues);
@@ -3271,15 +3278,23 @@ static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
#if IS_ENABLED(CONFIG_CGROUP_NET_PRIO)
static void skb_update_prio(struct sk_buff *skb)
{
- struct netprio_map *map = rcu_dereference_bh(skb->dev->priomap);
+ const struct netprio_map *map;
+ const struct sock *sk;
+ unsigned int prioidx;
- if (!skb->priority && skb->sk && map) {
- unsigned int prioidx =
- sock_cgroup_prioidx(&skb->sk->sk_cgrp_data);
+ if (skb->priority)
+ return;
+ map = rcu_dereference_bh(skb->dev->priomap);
+ if (!map)
+ return;
+ sk = skb_to_full_sk(skb);
+ if (!sk)
+ return;
- if (prioidx < map->priomap_len)
- skb->priority = map->priomap[prioidx];
- }
+ prioidx = sock_cgroup_prioidx(&sk->sk_cgrp_data);
+
+ if (prioidx < map->priomap_len)
+ skb->priority = map->priomap[prioidx];
}
#else
#define skb_update_prio(skb)
@@ -6389,6 +6404,7 @@ static int __netdev_upper_dev_link(struct net_device *dev,
.linking = true,
.upper_info = upper_info,
};
+ struct net_device *master_dev;
int ret = 0;
ASSERT_RTNL();
@@ -6400,11 +6416,14 @@ static int __netdev_upper_dev_link(struct net_device *dev,
if (netdev_has_upper_dev(upper_dev, dev))
return -EBUSY;
- if (netdev_has_upper_dev(dev, upper_dev))
- return -EEXIST;
-
- if (master && netdev_master_upper_dev_get(dev))
- return -EBUSY;
+ if (!master) {
+ if (netdev_has_upper_dev(dev, upper_dev))
+ return -EEXIST;
+ } else {
+ master_dev = netdev_master_upper_dev_get(dev);
+ if (master_dev)
+ return master_dev == upper_dev ? -EEXIST : -EBUSY;
+ }
ret = call_netdevice_notifiers_info(NETDEV_PRECHANGEUPPER,
&changeupper_info.info);
diff --git a/net/core/dev_ioctl.c b/net/core/dev_ioctl.c
index 0ab1af04296c..a04e1e88bf3a 100644
--- a/net/core/dev_ioctl.c
+++ b/net/core/dev_ioctl.c
@@ -402,8 +402,6 @@ int dev_ioctl(struct net *net, unsigned int cmd, struct ifreq *ifr, bool *need_c
if (colon)
*colon = 0;
- dev_load(net, ifr->ifr_name);
-
/*
* See which interface the caller is talking about.
*/
@@ -423,6 +421,7 @@ int dev_ioctl(struct net *net, unsigned int cmd, struct ifreq *ifr, bool *need_c
case SIOCGIFMAP:
case SIOCGIFINDEX:
case SIOCGIFTXQLEN:
+ dev_load(net, ifr->ifr_name);
rcu_read_lock();
ret = dev_ifsioc_locked(net, ifr, cmd);
rcu_read_unlock();
@@ -431,6 +430,7 @@ int dev_ioctl(struct net *net, unsigned int cmd, struct ifreq *ifr, bool *need_c
return ret;
case SIOCETHTOOL:
+ dev_load(net, ifr->ifr_name);
rtnl_lock();
ret = dev_ethtool(net, ifr);
rtnl_unlock();
@@ -447,6 +447,7 @@ int dev_ioctl(struct net *net, unsigned int cmd, struct ifreq *ifr, bool *need_c
case SIOCGMIIPHY:
case SIOCGMIIREG:
case SIOCSIFNAME:
+ dev_load(net, ifr->ifr_name);
if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
return -EPERM;
rtnl_lock();
@@ -494,6 +495,7 @@ int dev_ioctl(struct net *net, unsigned int cmd, struct ifreq *ifr, bool *need_c
/* fall through */
case SIOCBONDSLAVEINFOQUERY:
case SIOCBONDINFOQUERY:
+ dev_load(net, ifr->ifr_name);
rtnl_lock();
ret = dev_ifsioc(net, ifr, cmd);
rtnl_unlock();
@@ -518,6 +520,7 @@ int dev_ioctl(struct net *net, unsigned int cmd, struct ifreq *ifr, bool *need_c
cmd == SIOCGHWTSTAMP ||
(cmd >= SIOCDEVPRIVATE &&
cmd <= SIOCDEVPRIVATE + 15)) {
+ dev_load(net, ifr->ifr_name);
rtnl_lock();
ret = dev_ifsioc(net, ifr, cmd);
rtnl_unlock();
diff --git a/net/core/devlink.c b/net/core/devlink.c
index 18d385ed8237..effd4848c2b4 100644
--- a/net/core/devlink.c
+++ b/net/core/devlink.c
@@ -1695,10 +1695,11 @@ static int devlink_dpipe_table_put(struct sk_buff *skb,
goto nla_put_failure;
if (table->resource_valid) {
- nla_put_u64_64bit(skb, DEVLINK_ATTR_DPIPE_TABLE_RESOURCE_ID,
- table->resource_id, DEVLINK_ATTR_PAD);
- nla_put_u64_64bit(skb, DEVLINK_ATTR_DPIPE_TABLE_RESOURCE_UNITS,
- table->resource_units, DEVLINK_ATTR_PAD);
+ if (nla_put_u64_64bit(skb, DEVLINK_ATTR_DPIPE_TABLE_RESOURCE_ID,
+ table->resource_id, DEVLINK_ATTR_PAD) ||
+ nla_put_u64_64bit(skb, DEVLINK_ATTR_DPIPE_TABLE_RESOURCE_UNITS,
+ table->resource_units, DEVLINK_ATTR_PAD))
+ goto nla_put_failure;
}
if (devlink_dpipe_matches_put(table, skb))
goto nla_put_failure;
@@ -1797,7 +1798,7 @@ send_done:
if (!nlh) {
err = devlink_dpipe_send_and_alloc_skb(&skb, info);
if (err)
- goto err_skb_send_alloc;
+ return err;
goto send_done;
}
@@ -1806,7 +1807,6 @@ send_done:
nla_put_failure:
err = -EMSGSIZE;
err_table_put:
-err_skb_send_alloc:
genlmsg_cancel(skb, hdr);
nlmsg_free(skb);
return err;
@@ -2072,7 +2072,7 @@ static int devlink_dpipe_entries_fill(struct genl_info *info,
table->counters_enabled,
&dump_ctx);
if (err)
- goto err_entries_dump;
+ return err;
send_done:
nlh = nlmsg_put(dump_ctx.skb, info->snd_portid, info->snd_seq,
@@ -2080,16 +2080,10 @@ send_done:
if (!nlh) {
err = devlink_dpipe_send_and_alloc_skb(&dump_ctx.skb, info);
if (err)
- goto err_skb_send_alloc;
+ return err;
goto send_done;
}
return genlmsg_reply(dump_ctx.skb, info);
-
-err_entries_dump:
-err_skb_send_alloc:
- genlmsg_cancel(dump_ctx.skb, dump_ctx.hdr);
- nlmsg_free(dump_ctx.skb);
- return err;
}
static int devlink_nl_cmd_dpipe_entries_get(struct sk_buff *skb,
@@ -2228,7 +2222,7 @@ send_done:
if (!nlh) {
err = devlink_dpipe_send_and_alloc_skb(&skb, info);
if (err)
- goto err_skb_send_alloc;
+ return err;
goto send_done;
}
return genlmsg_reply(skb, info);
@@ -2236,7 +2230,6 @@ send_done:
nla_put_failure:
err = -EMSGSIZE;
err_table_put:
-err_skb_send_alloc:
genlmsg_cancel(skb, hdr);
nlmsg_free(skb);
return err;
@@ -2332,7 +2325,7 @@ devlink_resource_validate_children(struct devlink_resource *resource)
list_for_each_entry(child_resource, &resource->resource_list, list)
parts_size += child_resource->size_new;
- if (parts_size > resource->size)
+ if (parts_size > resource->size_new)
size_valid = false;
out:
resource->size_valid = size_valid;
@@ -2372,20 +2365,22 @@ static int devlink_nl_cmd_resource_set(struct sk_buff *skb,
return 0;
}
-static void
+static int
devlink_resource_size_params_put(struct devlink_resource *resource,
struct sk_buff *skb)
{
struct devlink_resource_size_params *size_params;
- size_params = resource->size_params;
- nla_put_u64_64bit(skb, DEVLINK_ATTR_RESOURCE_SIZE_GRAN,
- size_params->size_granularity, DEVLINK_ATTR_PAD);
- nla_put_u64_64bit(skb, DEVLINK_ATTR_RESOURCE_SIZE_MAX,
- size_params->size_max, DEVLINK_ATTR_PAD);
- nla_put_u64_64bit(skb, DEVLINK_ATTR_RESOURCE_SIZE_MIN,
- size_params->size_min, DEVLINK_ATTR_PAD);
- nla_put_u8(skb, DEVLINK_ATTR_RESOURCE_UNIT, size_params->unit);
+ size_params = &resource->size_params;
+ if (nla_put_u64_64bit(skb, DEVLINK_ATTR_RESOURCE_SIZE_GRAN,
+ size_params->size_granularity, DEVLINK_ATTR_PAD) ||
+ nla_put_u64_64bit(skb, DEVLINK_ATTR_RESOURCE_SIZE_MAX,
+ size_params->size_max, DEVLINK_ATTR_PAD) ||
+ nla_put_u64_64bit(skb, DEVLINK_ATTR_RESOURCE_SIZE_MIN,
+ size_params->size_min, DEVLINK_ATTR_PAD) ||
+ nla_put_u8(skb, DEVLINK_ATTR_RESOURCE_UNIT, size_params->unit))
+ return -EMSGSIZE;
+ return 0;
}
static int devlink_resource_put(struct devlink *devlink, struct sk_buff *skb,
@@ -2409,10 +2404,12 @@ static int devlink_resource_put(struct devlink *devlink, struct sk_buff *skb,
nla_put_u64_64bit(skb, DEVLINK_ATTR_RESOURCE_SIZE_NEW,
resource->size_new, DEVLINK_ATTR_PAD);
if (resource->resource_ops && resource->resource_ops->occ_get)
- nla_put_u64_64bit(skb, DEVLINK_ATTR_RESOURCE_OCC,
- resource->resource_ops->occ_get(devlink),
- DEVLINK_ATTR_PAD);
- devlink_resource_size_params_put(resource, skb);
+ if (nla_put_u64_64bit(skb, DEVLINK_ATTR_RESOURCE_OCC,
+ resource->resource_ops->occ_get(devlink),
+ DEVLINK_ATTR_PAD))
+ goto nla_put_failure;
+ if (devlink_resource_size_params_put(resource, skb))
+ goto nla_put_failure;
if (list_empty(&resource->resource_list))
goto out;
@@ -3151,7 +3148,7 @@ int devlink_resource_register(struct devlink *devlink,
u64 resource_size,
u64 resource_id,
u64 parent_resource_id,
- struct devlink_resource_size_params *size_params,
+ const struct devlink_resource_size_params *size_params,
const struct devlink_resource_ops *resource_ops)
{
struct devlink_resource *resource;
@@ -3194,7 +3191,8 @@ int devlink_resource_register(struct devlink *devlink,
resource->id = resource_id;
resource->resource_ops = resource_ops;
resource->size_valid = true;
- resource->size_params = size_params;
+ memcpy(&resource->size_params, size_params,
+ sizeof(resource->size_params));
INIT_LIST_HEAD(&resource->resource_list);
list_add_tail(&resource->list, resource_list);
out:
diff --git a/net/core/ethtool.c b/net/core/ethtool.c
index 494e6a5d7306..3f89c76d5c24 100644
--- a/net/core/ethtool.c
+++ b/net/core/ethtool.c
@@ -2520,11 +2520,14 @@ static int set_phy_tunable(struct net_device *dev, void __user *useraddr)
static int ethtool_get_fecparam(struct net_device *dev, void __user *useraddr)
{
struct ethtool_fecparam fecparam = { ETHTOOL_GFECPARAM };
+ int rc;
if (!dev->ethtool_ops->get_fecparam)
return -EOPNOTSUPP;
- dev->ethtool_ops->get_fecparam(dev, &fecparam);
+ rc = dev->ethtool_ops->get_fecparam(dev, &fecparam);
+ if (rc)
+ return rc;
if (copy_to_user(useraddr, &fecparam, sizeof(fecparam)))
return -EFAULT;
diff --git a/net/core/filter.c b/net/core/filter.c
index 08ab4c65a998..48aa7c7320db 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -2087,6 +2087,10 @@ static int bpf_skb_proto_4_to_6(struct sk_buff *skb)
u32 off = skb_mac_header_len(skb);
int ret;
+ /* SCTP uses GSO_BY_FRAGS, thus cannot adjust it. */
+ if (skb_is_gso(skb) && unlikely(skb_is_gso_sctp(skb)))
+ return -ENOTSUPP;
+
ret = skb_cow(skb, len_diff);
if (unlikely(ret < 0))
return ret;
@@ -2096,19 +2100,21 @@ static int bpf_skb_proto_4_to_6(struct sk_buff *skb)
return ret;
if (skb_is_gso(skb)) {
+ struct skb_shared_info *shinfo = skb_shinfo(skb);
+
/* SKB_GSO_TCPV4 needs to be changed into
* SKB_GSO_TCPV6.
*/
- if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4) {
- skb_shinfo(skb)->gso_type &= ~SKB_GSO_TCPV4;
- skb_shinfo(skb)->gso_type |= SKB_GSO_TCPV6;
+ if (shinfo->gso_type & SKB_GSO_TCPV4) {
+ shinfo->gso_type &= ~SKB_GSO_TCPV4;
+ shinfo->gso_type |= SKB_GSO_TCPV6;
}
/* Due to IPv6 header, MSS needs to be downgraded. */
- skb_shinfo(skb)->gso_size -= len_diff;
+ skb_decrease_gso_size(shinfo, len_diff);
/* Header must be checked, and gso_segs recomputed. */
- skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY;
- skb_shinfo(skb)->gso_segs = 0;
+ shinfo->gso_type |= SKB_GSO_DODGY;
+ shinfo->gso_segs = 0;
}
skb->protocol = htons(ETH_P_IPV6);
@@ -2123,6 +2129,10 @@ static int bpf_skb_proto_6_to_4(struct sk_buff *skb)
u32 off = skb_mac_header_len(skb);
int ret;
+ /* SCTP uses GSO_BY_FRAGS, thus cannot adjust it. */
+ if (skb_is_gso(skb) && unlikely(skb_is_gso_sctp(skb)))
+ return -ENOTSUPP;
+
ret = skb_unclone(skb, GFP_ATOMIC);
if (unlikely(ret < 0))
return ret;
@@ -2132,19 +2142,21 @@ static int bpf_skb_proto_6_to_4(struct sk_buff *skb)
return ret;
if (skb_is_gso(skb)) {
+ struct skb_shared_info *shinfo = skb_shinfo(skb);
+
/* SKB_GSO_TCPV6 needs to be changed into
* SKB_GSO_TCPV4.
*/
- if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) {
- skb_shinfo(skb)->gso_type &= ~SKB_GSO_TCPV6;
- skb_shinfo(skb)->gso_type |= SKB_GSO_TCPV4;
+ if (shinfo->gso_type & SKB_GSO_TCPV6) {
+ shinfo->gso_type &= ~SKB_GSO_TCPV6;
+ shinfo->gso_type |= SKB_GSO_TCPV4;
}
/* Due to IPv4 header, MSS can be upgraded. */
- skb_shinfo(skb)->gso_size += len_diff;
+ skb_increase_gso_size(shinfo, len_diff);
/* Header must be checked, and gso_segs recomputed. */
- skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY;
- skb_shinfo(skb)->gso_segs = 0;
+ shinfo->gso_type |= SKB_GSO_DODGY;
+ shinfo->gso_segs = 0;
}
skb->protocol = htons(ETH_P_IP);
@@ -2243,6 +2255,10 @@ static int bpf_skb_net_grow(struct sk_buff *skb, u32 len_diff)
u32 off = skb_mac_header_len(skb) + bpf_skb_net_base_len(skb);
int ret;
+ /* SCTP uses GSO_BY_FRAGS, thus cannot adjust it. */
+ if (skb_is_gso(skb) && unlikely(skb_is_gso_sctp(skb)))
+ return -ENOTSUPP;
+
ret = skb_cow(skb, len_diff);
if (unlikely(ret < 0))
return ret;
@@ -2252,11 +2268,13 @@ static int bpf_skb_net_grow(struct sk_buff *skb, u32 len_diff)
return ret;
if (skb_is_gso(skb)) {
+ struct skb_shared_info *shinfo = skb_shinfo(skb);
+
/* Due to header grow, MSS needs to be downgraded. */
- skb_shinfo(skb)->gso_size -= len_diff;
+ skb_decrease_gso_size(shinfo, len_diff);
/* Header must be checked, and gso_segs recomputed. */
- skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY;
- skb_shinfo(skb)->gso_segs = 0;
+ shinfo->gso_type |= SKB_GSO_DODGY;
+ shinfo->gso_segs = 0;
}
return 0;
@@ -2267,6 +2285,10 @@ static int bpf_skb_net_shrink(struct sk_buff *skb, u32 len_diff)
u32 off = skb_mac_header_len(skb) + bpf_skb_net_base_len(skb);
int ret;
+ /* SCTP uses GSO_BY_FRAGS, thus cannot adjust it. */
+ if (skb_is_gso(skb) && unlikely(skb_is_gso_sctp(skb)))
+ return -ENOTSUPP;
+
ret = skb_unclone(skb, GFP_ATOMIC);
if (unlikely(ret < 0))
return ret;
@@ -2276,11 +2298,13 @@ static int bpf_skb_net_shrink(struct sk_buff *skb, u32 len_diff)
return ret;
if (skb_is_gso(skb)) {
+ struct skb_shared_info *shinfo = skb_shinfo(skb);
+
/* Due to header shrink, MSS can be upgraded. */
- skb_shinfo(skb)->gso_size += len_diff;
+ skb_increase_gso_size(shinfo, len_diff);
/* Header must be checked, and gso_segs recomputed. */
- skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY;
- skb_shinfo(skb)->gso_segs = 0;
+ shinfo->gso_type |= SKB_GSO_DODGY;
+ shinfo->gso_segs = 0;
}
return 0;
@@ -3381,17 +3405,13 @@ BPF_CALL_2(bpf_sock_ops_cb_flags_set, struct bpf_sock_ops_kern *, bpf_sock,
struct sock *sk = bpf_sock->sk;
int val = argval & BPF_SOCK_OPS_ALL_CB_FLAGS;
- if (!sk_fullsock(sk))
+ if (!IS_ENABLED(CONFIG_INET) || !sk_fullsock(sk))
return -EINVAL;
-#ifdef CONFIG_INET
if (val)
tcp_sk(sk)->bpf_sock_ops_cb_flags = val;
return argval & (~BPF_SOCK_OPS_ALL_CB_FLAGS);
-#else
- return -EINVAL;
-#endif
}
static const struct bpf_func_proto bpf_sock_ops_cb_flags_set_proto = {
diff --git a/net/core/gen_estimator.c b/net/core/gen_estimator.c
index 0a3f88f08727..98fd12721221 100644
--- a/net/core/gen_estimator.c
+++ b/net/core/gen_estimator.c
@@ -66,6 +66,7 @@ struct net_rate_estimator {
static void est_fetch_counters(struct net_rate_estimator *e,
struct gnet_stats_basic_packed *b)
{
+ memset(b, 0, sizeof(*b));
if (e->stats_lock)
spin_lock(e->stats_lock);
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 09bd89c90a71..1e7acdc30732 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -4179,7 +4179,7 @@ int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb)
skb_queue_tail(&sk->sk_error_queue, skb);
if (!sock_flag(sk, SOCK_DEAD))
- sk->sk_data_ready(sk);
+ sk->sk_error_report(sk);
return 0;
}
EXPORT_SYMBOL(sock_queue_err_skb);
@@ -4891,7 +4891,7 @@ EXPORT_SYMBOL_GPL(skb_scrub_packet);
*
* The MAC/L2 or network (IP, IPv6) headers are not accounted for.
*/
-unsigned int skb_gso_transport_seglen(const struct sk_buff *skb)
+static unsigned int skb_gso_transport_seglen(const struct sk_buff *skb)
{
const struct skb_shared_info *shinfo = skb_shinfo(skb);
unsigned int thlen = 0;
@@ -4904,7 +4904,7 @@ unsigned int skb_gso_transport_seglen(const struct sk_buff *skb)
thlen += inner_tcp_hdrlen(skb);
} else if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))) {
thlen = tcp_hdrlen(skb);
- } else if (unlikely(shinfo->gso_type & SKB_GSO_SCTP)) {
+ } else if (unlikely(skb_is_gso_sctp(skb))) {
thlen = sizeof(struct sctphdr);
}
/* UFO sets gso_size to the size of the fragmentation
@@ -4913,7 +4913,40 @@ unsigned int skb_gso_transport_seglen(const struct sk_buff *skb)
*/
return thlen + shinfo->gso_size;
}
-EXPORT_SYMBOL_GPL(skb_gso_transport_seglen);
+
+/**
+ * skb_gso_network_seglen - Return length of individual segments of a gso packet
+ *
+ * @skb: GSO skb
+ *
+ * skb_gso_network_seglen is used to determine the real size of the
+ * individual segments, including Layer3 (IP, IPv6) and L4 headers (TCP/UDP).
+ *
+ * The MAC/L2 header is not accounted for.
+ */
+static unsigned int skb_gso_network_seglen(const struct sk_buff *skb)
+{
+ unsigned int hdr_len = skb_transport_header(skb) -
+ skb_network_header(skb);
+
+ return hdr_len + skb_gso_transport_seglen(skb);
+}
+
+/**
+ * skb_gso_mac_seglen - Return length of individual segments of a gso packet
+ *
+ * @skb: GSO skb
+ *
+ * skb_gso_mac_seglen is used to determine the real size of the
+ * individual segments, including MAC/L2, Layer3 (IP, IPv6) and L4
+ * headers (TCP/UDP).
+ */
+static unsigned int skb_gso_mac_seglen(const struct sk_buff *skb)
+{
+ unsigned int hdr_len = skb_transport_header(skb) - skb_mac_header(skb);
+
+ return hdr_len + skb_gso_transport_seglen(skb);
+}
/**
* skb_gso_size_check - check the skb size, considering GSO_BY_FRAGS
@@ -4955,19 +4988,20 @@ static inline bool skb_gso_size_check(const struct sk_buff *skb,
}
/**
- * skb_gso_validate_mtu - Return in case such skb fits a given MTU
+ * skb_gso_validate_network_len - Will a split GSO skb fit into a given MTU?
*
* @skb: GSO skb
* @mtu: MTU to validate against
*
- * skb_gso_validate_mtu validates if a given skb will fit a wanted MTU
- * once split.
+ * skb_gso_validate_network_len validates if a given skb will fit a
+ * wanted MTU once split. It considers L3 headers, L4 headers, and the
+ * payload.
*/
-bool skb_gso_validate_mtu(const struct sk_buff *skb, unsigned int mtu)
+bool skb_gso_validate_network_len(const struct sk_buff *skb, unsigned int mtu)
{
return skb_gso_size_check(skb, skb_gso_network_seglen(skb), mtu);
}
-EXPORT_SYMBOL_GPL(skb_gso_validate_mtu);
+EXPORT_SYMBOL_GPL(skb_gso_validate_network_len);
/**
* skb_gso_validate_mac_len - Will a split GSO skb fit in a given length?
@@ -4986,13 +5020,16 @@ EXPORT_SYMBOL_GPL(skb_gso_validate_mac_len);
static struct sk_buff *skb_reorder_vlan_header(struct sk_buff *skb)
{
+ int mac_len;
+
if (skb_cow(skb, skb_headroom(skb)) < 0) {
kfree_skb(skb);
return NULL;
}
- memmove(skb->data - ETH_HLEN, skb->data - skb->mac_len - VLAN_HLEN,
- 2 * ETH_ALEN);
+ mac_len = skb->data - skb_mac_header(skb);
+ memmove(skb_mac_header(skb) + VLAN_HLEN, skb_mac_header(skb),
+ mac_len - VLAN_HLEN - ETH_TLEN);
skb->mac_header += VLAN_HLEN;
return skb;
}
diff --git a/net/core/sock.c b/net/core/sock.c
index c501499a04fe..85b0b64e7f9d 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -3261,6 +3261,27 @@ void proto_unregister(struct proto *prot)
}
EXPORT_SYMBOL(proto_unregister);
+int sock_load_diag_module(int family, int protocol)
+{
+ if (!protocol) {
+ if (!sock_is_registered(family))
+ return -ENOENT;
+
+ return request_module("net-pf-%d-proto-%d-type-%d", PF_NETLINK,
+ NETLINK_SOCK_DIAG, family);
+ }
+
+#ifdef CONFIG_INET
+ if (family == AF_INET &&
+ !rcu_access_pointer(inet_protos[protocol]))
+ return -ENOENT;
+#endif
+
+ return request_module("net-pf-%d-proto-%d-type-%d-%d", PF_NETLINK,
+ NETLINK_SOCK_DIAG, family, protocol);
+}
+EXPORT_SYMBOL(sock_load_diag_module);
+
#ifdef CONFIG_PROC_FS
static void *proto_seq_start(struct seq_file *seq, loff_t *pos)
__acquires(proto_list_mutex)
diff --git a/net/core/sock_diag.c b/net/core/sock_diag.c
index 146b50e30659..c37b5be7c5e4 100644
--- a/net/core/sock_diag.c
+++ b/net/core/sock_diag.c
@@ -220,8 +220,7 @@ static int __sock_diag_cmd(struct sk_buff *skb, struct nlmsghdr *nlh)
return -EINVAL;
if (sock_diag_handlers[req->sdiag_family] == NULL)
- request_module("net-pf-%d-proto-%d-type-%d", PF_NETLINK,
- NETLINK_SOCK_DIAG, req->sdiag_family);
+ sock_load_diag_module(req->sdiag_family, 0);
mutex_lock(&sock_diag_table_mutex);
hndl = sock_diag_handlers[req->sdiag_family];
@@ -247,8 +246,7 @@ static int sock_diag_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh,
case TCPDIAG_GETSOCK:
case DCCPDIAG_GETSOCK:
if (inet_rcv_compat == NULL)
- request_module("net-pf-%d-proto-%d-type-%d", PF_NETLINK,
- NETLINK_SOCK_DIAG, AF_INET);
+ sock_load_diag_module(AF_INET, 0);
mutex_lock(&sock_diag_table_mutex);
if (inet_rcv_compat != NULL)
@@ -281,14 +279,12 @@ static int sock_diag_bind(struct net *net, int group)
case SKNLGRP_INET_TCP_DESTROY:
case SKNLGRP_INET_UDP_DESTROY:
if (!sock_diag_handlers[AF_INET])
- request_module("net-pf-%d-proto-%d-type-%d", PF_NETLINK,
- NETLINK_SOCK_DIAG, AF_INET);
+ sock_load_diag_module(AF_INET, 0);
break;
case SKNLGRP_INET6_TCP_DESTROY:
case SKNLGRP_INET6_UDP_DESTROY:
if (!sock_diag_handlers[AF_INET6])
- request_module("net-pf-%d-proto-%d-type-%d", PF_NETLINK,
- NETLINK_SOCK_DIAG, AF_INET6);
+ sock_load_diag_module(AF_INET6, 0);
break;
}
return 0;