summaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorDaniel Vetter <daniel.vetter@ffwll.ch>2021-12-14 09:43:25 +0100
committerDaniel Vetter <daniel.vetter@ffwll.ch>2021-12-14 10:24:28 +0100
commit99b03ca651f1c409d296d6c6e9440d9b005c722f (patch)
tree8773b8cbab631258b561ddb3d9b219072095bee4 /net
parent211b4dbc070090b4183d6f9db7dd3bd4e6170447 (diff)
parent2585cf9dfaaddf00b069673f27bb3f8530e2039c (diff)
downloadlinux-99b03ca651f1c409d296d6c6e9440d9b005c722f.tar.bz2
Merge v5.16-rc5 into drm-next
Thomas Zimmermann requested a fixes backmerge, specifically also for 96c5f82ef0a1 ("drm/vc4: fix error code in vc4_create_object()") Just a bunch of adjacent changes conflicts, even the big pile of them in vc4. Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Diffstat (limited to 'net')
-rw-r--r--net/8021q/vlan.c3
-rw-r--r--net/8021q/vlan_dev.c3
-rw-r--r--net/core/dev.c5
-rw-r--r--net/core/devlink.c16
-rw-r--r--net/core/dst_cache.c19
-rw-r--r--net/core/fib_rules.c2
-rw-r--r--net/core/neighbour.c4
-rw-r--r--net/core/skmsg.c5
-rw-r--r--net/core/sock_map.c15
-rw-r--r--net/ethtool/ioctl.c2
-rw-r--r--net/ethtool/netlink.c3
-rw-r--r--net/ipv4/fib_frontend.c2
-rw-r--r--net/ipv4/fib_rules.c5
-rw-r--r--net/ipv4/fib_semantics.c4
-rw-r--r--net/ipv4/inet_connection_sock.c2
-rw-r--r--net/ipv4/nexthop.c35
-rw-r--r--net/ipv4/tcp_cubic.c5
-rw-r--r--net/ipv4/tcp_minisocks.c4
-rw-r--r--net/ipv4/udp.c2
-rw-r--r--net/ipv6/af_inet6.c1
-rw-r--r--net/ipv6/fib6_rules.c4
-rw-r--r--net/ipv6/ip6_offload.c6
-rw-r--r--net/ipv6/ip6_output.c2
-rw-r--r--net/ipv6/route.c19
-rw-r--r--net/ipv6/seg6_iptunnel.c8
-rw-r--r--net/mctp/route.c9
-rw-r--r--net/mctp/test/utils.c2
-rw-r--r--net/mpls/af_mpls.c97
-rw-r--r--net/mpls/internal.h2
-rw-r--r--net/mptcp/options.c32
-rw-r--r--net/mptcp/protocol.c51
-rw-r--r--net/mptcp/protocol.h17
-rw-r--r--net/ncsi/ncsi-cmd.c24
-rw-r--r--net/netfilter/ipvs/ip_vs_core.c8
-rw-r--r--net/netfilter/nf_conntrack_core.c6
-rw-r--r--net/netfilter/nf_conntrack_netlink.c8
-rw-r--r--net/netfilter/nf_flow_table_core.c4
-rw-r--r--net/netfilter/nf_flow_table_offload.c4
-rw-r--r--net/netfilter/nfnetlink_queue.c2
-rw-r--r--net/netfilter/nft_exthdr.c11
-rw-r--r--net/netfilter/nft_payload.c1
-rw-r--r--net/netfilter/nft_set_pipapo_avx2.c2
-rw-r--r--net/netfilter/xt_IDLETIMER.c4
-rw-r--r--net/netlink/af_netlink.c5
-rw-r--r--net/nfc/netlink.c12
-rw-r--r--net/rds/tcp.c2
-rw-r--r--net/rxrpc/conn_client.c14
-rw-r--r--net/rxrpc/peer_object.c14
-rw-r--r--net/sched/sch_ets.c8
-rw-r--r--net/sched/sch_fq_pie.c1
-rw-r--r--net/smc/af_smc.c14
-rw-r--r--net/smc/smc_close.c14
-rw-r--r--net/smc/smc_core.c42
-rw-r--r--net/sunrpc/xprtsock.c10
-rw-r--r--net/tls/tls_main.c47
-rw-r--r--net/tls/tls_sw.c44
-rw-r--r--net/unix/af_unix.c3
57 files changed, 458 insertions, 232 deletions
diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
index a3a0a5e994f5..abaa5d96ded2 100644
--- a/net/8021q/vlan.c
+++ b/net/8021q/vlan.c
@@ -184,9 +184,6 @@ int register_vlan_dev(struct net_device *dev, struct netlink_ext_ack *extack)
if (err)
goto out_unregister_netdev;
- /* Account for reference in struct vlan_dev_priv */
- dev_hold(real_dev);
-
vlan_stacked_transfer_operstate(real_dev, dev, vlan);
linkwatch_fire_event(dev); /* _MUST_ call rfc2863_policy() */
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
index ab6dee28536d..a54535cbcf4c 100644
--- a/net/8021q/vlan_dev.c
+++ b/net/8021q/vlan_dev.c
@@ -615,6 +615,9 @@ static int vlan_dev_init(struct net_device *dev)
if (!vlan->vlan_pcpu_stats)
return -ENOMEM;
+ /* Get vlan's reference to real_dev */
+ dev_hold(real_dev);
+
return 0;
}
diff --git a/net/core/dev.c b/net/core/dev.c
index 15ac064b5562..2a352e668d10 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -4210,7 +4210,10 @@ static int __dev_queue_xmit(struct sk_buff *skb, struct net_device *sb_dev)
if (dev->flags & IFF_UP) {
int cpu = smp_processor_id(); /* ok because BHs are off */
- if (txq->xmit_lock_owner != cpu) {
+ /* Other cpus might concurrently change txq->xmit_lock_owner
+ * to -1 or to their cpu id, but not to our id.
+ */
+ if (READ_ONCE(txq->xmit_lock_owner) != cpu) {
if (dev_xmit_recursion())
goto recursion_alert;
diff --git a/net/core/devlink.c b/net/core/devlink.c
index 5ad72dbfcd07..c06c9ba6e8c5 100644
--- a/net/core/devlink.c
+++ b/net/core/devlink.c
@@ -4110,14 +4110,6 @@ static int devlink_nl_cmd_reload(struct sk_buff *skb, struct genl_info *info)
return err;
}
- if (info->attrs[DEVLINK_ATTR_NETNS_PID] ||
- info->attrs[DEVLINK_ATTR_NETNS_FD] ||
- info->attrs[DEVLINK_ATTR_NETNS_ID]) {
- dest_net = devlink_netns_get(skb, info);
- if (IS_ERR(dest_net))
- return PTR_ERR(dest_net);
- }
-
if (info->attrs[DEVLINK_ATTR_RELOAD_ACTION])
action = nla_get_u8(info->attrs[DEVLINK_ATTR_RELOAD_ACTION]);
else
@@ -4160,6 +4152,14 @@ static int devlink_nl_cmd_reload(struct sk_buff *skb, struct genl_info *info)
return -EINVAL;
}
}
+ if (info->attrs[DEVLINK_ATTR_NETNS_PID] ||
+ info->attrs[DEVLINK_ATTR_NETNS_FD] ||
+ info->attrs[DEVLINK_ATTR_NETNS_ID]) {
+ dest_net = devlink_netns_get(skb, info);
+ if (IS_ERR(dest_net))
+ return PTR_ERR(dest_net);
+ }
+
err = devlink_reload(devlink, dest_net, action, limit, &actions_performed, info->extack);
if (dest_net)
diff --git a/net/core/dst_cache.c b/net/core/dst_cache.c
index be74ab4551c2..0ccfd5fa5cb9 100644
--- a/net/core/dst_cache.c
+++ b/net/core/dst_cache.c
@@ -162,3 +162,22 @@ void dst_cache_destroy(struct dst_cache *dst_cache)
free_percpu(dst_cache->cache);
}
EXPORT_SYMBOL_GPL(dst_cache_destroy);
+
+void dst_cache_reset_now(struct dst_cache *dst_cache)
+{
+ int i;
+
+ if (!dst_cache->cache)
+ return;
+
+ dst_cache->reset_ts = jiffies;
+ for_each_possible_cpu(i) {
+ struct dst_cache_pcpu *idst = per_cpu_ptr(dst_cache->cache, i);
+ struct dst_entry *dst = idst->dst;
+
+ idst->cookie = 0;
+ idst->dst = NULL;
+ dst_release(dst);
+ }
+}
+EXPORT_SYMBOL_GPL(dst_cache_reset_now);
diff --git a/net/core/fib_rules.c b/net/core/fib_rules.c
index 79df7cd9dbc1..1bb567a3b329 100644
--- a/net/core/fib_rules.c
+++ b/net/core/fib_rules.c
@@ -323,7 +323,7 @@ jumped:
if (!err && ops->suppress && INDIRECT_CALL_MT(ops->suppress,
fib6_rule_suppress,
fib4_rule_suppress,
- rule, arg))
+ rule, flags, arg))
continue;
if (err != -EAGAIN) {
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index 47931c8be04b..dda12fbd177b 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -763,11 +763,10 @@ struct pneigh_entry * pneigh_lookup(struct neigh_table *tbl,
ASSERT_RTNL();
- n = kmalloc(sizeof(*n) + key_len, GFP_KERNEL);
+ n = kzalloc(sizeof(*n) + key_len, GFP_KERNEL);
if (!n)
goto out;
- n->protocol = 0;
write_pnet(&n->net, net);
memcpy(n->key, pkey, key_len);
n->dev = dev;
@@ -1779,6 +1778,7 @@ int neigh_table_clear(int index, struct neigh_table *tbl)
{
neigh_tables[index] = NULL;
/* It is not clean... Fix it to unload IPv6 module safely */
+ cancel_delayed_work_sync(&tbl->managed_work);
cancel_delayed_work_sync(&tbl->gc_work);
del_timer_sync(&tbl->proxy_timer);
pneigh_queue_purge(&tbl->proxy_queue);
diff --git a/net/core/skmsg.c b/net/core/skmsg.c
index 1ae52ac943f6..8eb671c827f9 100644
--- a/net/core/skmsg.c
+++ b/net/core/skmsg.c
@@ -1124,6 +1124,8 @@ void sk_psock_start_strp(struct sock *sk, struct sk_psock *psock)
void sk_psock_stop_strp(struct sock *sk, struct sk_psock *psock)
{
+ psock_set_prog(&psock->progs.stream_parser, NULL);
+
if (!psock->saved_data_ready)
return;
@@ -1212,6 +1214,9 @@ void sk_psock_start_verdict(struct sock *sk, struct sk_psock *psock)
void sk_psock_stop_verdict(struct sock *sk, struct sk_psock *psock)
{
+ psock_set_prog(&psock->progs.stream_verdict, NULL);
+ psock_set_prog(&psock->progs.skb_verdict, NULL);
+
if (!psock->saved_data_ready)
return;
diff --git a/net/core/sock_map.c b/net/core/sock_map.c
index f39ef79ced67..4ca4b11f4e5f 100644
--- a/net/core/sock_map.c
+++ b/net/core/sock_map.c
@@ -167,8 +167,11 @@ static void sock_map_del_link(struct sock *sk,
write_lock_bh(&sk->sk_callback_lock);
if (strp_stop)
sk_psock_stop_strp(sk, psock);
- else
+ if (verdict_stop)
sk_psock_stop_verdict(sk, psock);
+
+ if (psock->psock_update_sk_prot)
+ psock->psock_update_sk_prot(sk, psock, false);
write_unlock_bh(&sk->sk_callback_lock);
}
}
@@ -282,6 +285,12 @@ static int sock_map_link(struct bpf_map *map, struct sock *sk)
if (msg_parser)
psock_set_prog(&psock->progs.msg_parser, msg_parser);
+ if (stream_parser)
+ psock_set_prog(&psock->progs.stream_parser, stream_parser);
+ if (stream_verdict)
+ psock_set_prog(&psock->progs.stream_verdict, stream_verdict);
+ if (skb_verdict)
+ psock_set_prog(&psock->progs.skb_verdict, skb_verdict);
ret = sock_map_init_proto(sk, psock);
if (ret < 0)
@@ -292,14 +301,10 @@ static int sock_map_link(struct bpf_map *map, struct sock *sk)
ret = sk_psock_init_strp(sk, psock);
if (ret)
goto out_unlock_drop;
- psock_set_prog(&psock->progs.stream_verdict, stream_verdict);
- psock_set_prog(&psock->progs.stream_parser, stream_parser);
sk_psock_start_strp(sk, psock);
} else if (!stream_parser && stream_verdict && !psock->saved_data_ready) {
- psock_set_prog(&psock->progs.stream_verdict, stream_verdict);
sk_psock_start_verdict(sk,psock);
} else if (!stream_verdict && skb_verdict && !psock->saved_data_ready) {
- psock_set_prog(&psock->progs.skb_verdict, skb_verdict);
sk_psock_start_verdict(sk, psock);
}
write_unlock_bh(&sk->sk_callback_lock);
diff --git a/net/ethtool/ioctl.c b/net/ethtool/ioctl.c
index 65e9bc1058b5..20bcf86970ff 100644
--- a/net/ethtool/ioctl.c
+++ b/net/ethtool/ioctl.c
@@ -1719,7 +1719,7 @@ static noinline_for_stack int ethtool_set_coalesce(struct net_device *dev,
struct ethtool_coalesce coalesce;
int ret;
- if (!dev->ethtool_ops->set_coalesce && !dev->ethtool_ops->get_coalesce)
+ if (!dev->ethtool_ops->set_coalesce || !dev->ethtool_ops->get_coalesce)
return -EOPNOTSUPP;
ret = dev->ethtool_ops->get_coalesce(dev, &coalesce, &kernel_coalesce,
diff --git a/net/ethtool/netlink.c b/net/ethtool/netlink.c
index 38b44c0291b1..96f4180aabd2 100644
--- a/net/ethtool/netlink.c
+++ b/net/ethtool/netlink.c
@@ -40,7 +40,8 @@ int ethnl_ops_begin(struct net_device *dev)
if (dev->dev.parent)
pm_runtime_get_sync(dev->dev.parent);
- if (!netif_device_present(dev)) {
+ if (!netif_device_present(dev) ||
+ dev->reg_state == NETREG_UNREGISTERING) {
ret = -ENODEV;
goto err;
}
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
index 9fe13e4f5d08..4d61ddd8a0ec 100644
--- a/net/ipv4/fib_frontend.c
+++ b/net/ipv4/fib_frontend.c
@@ -1582,7 +1582,7 @@ static int __net_init fib_net_init(struct net *net)
int error;
#ifdef CONFIG_IP_ROUTE_CLASSID
- net->ipv4.fib_num_tclassid_users = 0;
+ atomic_set(&net->ipv4.fib_num_tclassid_users, 0);
#endif
error = ip_fib_net_init(net);
if (error < 0)
diff --git a/net/ipv4/fib_rules.c b/net/ipv4/fib_rules.c
index ce54a30c2ef1..d279cb8ac158 100644
--- a/net/ipv4/fib_rules.c
+++ b/net/ipv4/fib_rules.c
@@ -141,6 +141,7 @@ INDIRECT_CALLABLE_SCOPE int fib4_rule_action(struct fib_rule *rule,
}
INDIRECT_CALLABLE_SCOPE bool fib4_rule_suppress(struct fib_rule *rule,
+ int flags,
struct fib_lookup_arg *arg)
{
struct fib_result *result = (struct fib_result *) arg->result;
@@ -263,7 +264,7 @@ static int fib4_rule_configure(struct fib_rule *rule, struct sk_buff *skb,
if (tb[FRA_FLOW]) {
rule4->tclassid = nla_get_u32(tb[FRA_FLOW]);
if (rule4->tclassid)
- net->ipv4.fib_num_tclassid_users++;
+ atomic_inc(&net->ipv4.fib_num_tclassid_users);
}
#endif
@@ -295,7 +296,7 @@ static int fib4_rule_delete(struct fib_rule *rule)
#ifdef CONFIG_IP_ROUTE_CLASSID
if (((struct fib4_rule *)rule)->tclassid)
- net->ipv4.fib_num_tclassid_users--;
+ atomic_dec(&net->ipv4.fib_num_tclassid_users);
#endif
net->ipv4.fib_has_custom_rules = true;
diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
index 3364cb9c67e0..fde7797b5806 100644
--- a/net/ipv4/fib_semantics.c
+++ b/net/ipv4/fib_semantics.c
@@ -220,7 +220,7 @@ void fib_nh_release(struct net *net, struct fib_nh *fib_nh)
{
#ifdef CONFIG_IP_ROUTE_CLASSID
if (fib_nh->nh_tclassid)
- net->ipv4.fib_num_tclassid_users--;
+ atomic_dec(&net->ipv4.fib_num_tclassid_users);
#endif
fib_nh_common_release(&fib_nh->nh_common);
}
@@ -632,7 +632,7 @@ int fib_nh_init(struct net *net, struct fib_nh *nh,
#ifdef CONFIG_IP_ROUTE_CLASSID
nh->nh_tclassid = cfg->fc_flow;
if (nh->nh_tclassid)
- net->ipv4.fib_num_tclassid_users++;
+ atomic_inc(&net->ipv4.fib_num_tclassid_users);
#endif
#ifdef CONFIG_IP_ROUTE_MULTIPATH
nh->fib_nh_weight = nh_weight;
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
index f7fea3a7c5e6..62a67fdc344c 100644
--- a/net/ipv4/inet_connection_sock.c
+++ b/net/ipv4/inet_connection_sock.c
@@ -721,7 +721,7 @@ static struct request_sock *inet_reqsk_clone(struct request_sock *req,
sk_node_init(&nreq_sk->sk_node);
nreq_sk->sk_tx_queue_mapping = req_sk->sk_tx_queue_mapping;
-#ifdef CONFIG_XPS
+#ifdef CONFIG_SOCK_RX_QUEUE_MAPPING
nreq_sk->sk_rx_queue_mapping = req_sk->sk_rx_queue_mapping;
#endif
nreq_sk->sk_incoming_cpu = req_sk->sk_incoming_cpu;
diff --git a/net/ipv4/nexthop.c b/net/ipv4/nexthop.c
index 9e8100728d46..5dbd4b5505eb 100644
--- a/net/ipv4/nexthop.c
+++ b/net/ipv4/nexthop.c
@@ -1899,15 +1899,36 @@ static void remove_nexthop(struct net *net, struct nexthop *nh,
/* if any FIB entries reference this nexthop, any dst entries
* need to be regenerated
*/
-static void nh_rt_cache_flush(struct net *net, struct nexthop *nh)
+static void nh_rt_cache_flush(struct net *net, struct nexthop *nh,
+ struct nexthop *replaced_nh)
{
struct fib6_info *f6i;
+ struct nh_group *nhg;
+ int i;
if (!list_empty(&nh->fi_list))
rt_cache_flush(net);
list_for_each_entry(f6i, &nh->f6i_list, nh_list)
ipv6_stub->fib6_update_sernum(net, f6i);
+
+ /* if an IPv6 group was replaced, we have to release all old
+ * dsts to make sure all refcounts are released
+ */
+ if (!replaced_nh->is_group)
+ return;
+
+ /* new dsts must use only the new nexthop group */
+ synchronize_net();
+
+ nhg = rtnl_dereference(replaced_nh->nh_grp);
+ for (i = 0; i < nhg->num_nh; i++) {
+ struct nh_grp_entry *nhge = &nhg->nh_entries[i];
+ struct nh_info *nhi = rtnl_dereference(nhge->nh->nh_info);
+
+ if (nhi->family == AF_INET6)
+ ipv6_stub->fib6_nh_release_dsts(&nhi->fib6_nh);
+ }
}
static int replace_nexthop_grp(struct net *net, struct nexthop *old,
@@ -2247,7 +2268,7 @@ static int replace_nexthop(struct net *net, struct nexthop *old,
err = replace_nexthop_single(net, old, new, extack);
if (!err) {
- nh_rt_cache_flush(net, old);
+ nh_rt_cache_flush(net, old, new);
__remove_nexthop(net, new, NULL);
nexthop_put(new);
@@ -2544,11 +2565,15 @@ static int nh_create_ipv6(struct net *net, struct nexthop *nh,
/* sets nh_dev if successful */
err = ipv6_stub->fib6_nh_init(net, fib6_nh, &fib6_cfg, GFP_KERNEL,
extack);
- if (err)
+ if (err) {
+ /* IPv6 is not enabled, don't call fib6_nh_release */
+ if (err == -EAFNOSUPPORT)
+ goto out;
ipv6_stub->fib6_nh_release(fib6_nh);
- else
+ } else {
nh->nh_flags = fib6_nh->fib_nh_flags;
-
+ }
+out:
return err;
}
diff --git a/net/ipv4/tcp_cubic.c b/net/ipv4/tcp_cubic.c
index 5e9d9c51164c..e07837e23b3f 100644
--- a/net/ipv4/tcp_cubic.c
+++ b/net/ipv4/tcp_cubic.c
@@ -330,8 +330,6 @@ static void cubictcp_cong_avoid(struct sock *sk, u32 ack, u32 acked)
return;
if (tcp_in_slow_start(tp)) {
- if (hystart && after(ack, ca->end_seq))
- bictcp_hystart_reset(sk);
acked = tcp_slow_start(tp, acked);
if (!acked)
return;
@@ -391,6 +389,9 @@ static void hystart_update(struct sock *sk, u32 delay)
struct bictcp *ca = inet_csk_ca(sk);
u32 threshold;
+ if (after(tp->snd_una, ca->end_seq))
+ bictcp_hystart_reset(sk);
+
if (hystart_detect & HYSTART_ACK_TRAIN) {
u32 now = bictcp_clock_us(sk);
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
index cf913a66df17..7c2d3ac2363a 100644
--- a/net/ipv4/tcp_minisocks.c
+++ b/net/ipv4/tcp_minisocks.c
@@ -829,8 +829,8 @@ int tcp_child_process(struct sock *parent, struct sock *child,
int ret = 0;
int state = child->sk_state;
- /* record NAPI ID of child */
- sk_mark_napi_id(child, skb);
+ /* record sk_napi_id and sk_rx_queue_mapping of child. */
+ sk_mark_napi_id_set(child, skb);
tcp_segs_in(tcp_sk(child), skb);
if (!sock_owned_by_user(child)) {
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 8bcecdd6aeda..23b05e28490b 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -916,7 +916,7 @@ static int udp_send_skb(struct sk_buff *skb, struct flowi4 *fl4,
kfree_skb(skb);
return -EINVAL;
}
- if (skb->len > cork->gso_size * UDP_MAX_SEGMENTS) {
+ if (datalen > cork->gso_size * UDP_MAX_SEGMENTS) {
kfree_skb(skb);
return -EINVAL;
}
diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
index 0c4da163535a..dab4a047590b 100644
--- a/net/ipv6/af_inet6.c
+++ b/net/ipv6/af_inet6.c
@@ -1026,6 +1026,7 @@ static const struct ipv6_stub ipv6_stub_impl = {
.ip6_mtu_from_fib6 = ip6_mtu_from_fib6,
.fib6_nh_init = fib6_nh_init,
.fib6_nh_release = fib6_nh_release,
+ .fib6_nh_release_dsts = fib6_nh_release_dsts,
.fib6_update_sernum = fib6_update_sernum_stub,
.fib6_rt_update = fib6_rt_update,
.ip6_del_rt = ip6_del_rt,
diff --git a/net/ipv6/fib6_rules.c b/net/ipv6/fib6_rules.c
index 40f3e4f9f33a..dcedfe29d9d9 100644
--- a/net/ipv6/fib6_rules.c
+++ b/net/ipv6/fib6_rules.c
@@ -267,6 +267,7 @@ INDIRECT_CALLABLE_SCOPE int fib6_rule_action(struct fib_rule *rule,
}
INDIRECT_CALLABLE_SCOPE bool fib6_rule_suppress(struct fib_rule *rule,
+ int flags,
struct fib_lookup_arg *arg)
{
struct fib6_result *res = arg->result;
@@ -294,8 +295,7 @@ INDIRECT_CALLABLE_SCOPE bool fib6_rule_suppress(struct fib_rule *rule,
return false;
suppress_route:
- if (!(arg->flags & FIB_LOOKUP_NOREF))
- ip6_rt_put(rt);
+ ip6_rt_put_flags(rt, flags);
return true;
}
diff --git a/net/ipv6/ip6_offload.c b/net/ipv6/ip6_offload.c
index 1b9827ff8ccf..1cbd49d5788d 100644
--- a/net/ipv6/ip6_offload.c
+++ b/net/ipv6/ip6_offload.c
@@ -248,9 +248,9 @@ INDIRECT_CALLABLE_SCOPE struct sk_buff *ipv6_gro_receive(struct list_head *head,
* memcmp() alone below is sufficient, right?
*/
if ((first_word & htonl(0xF00FFFFF)) ||
- !ipv6_addr_equal(&iph->saddr, &iph2->saddr) ||
- !ipv6_addr_equal(&iph->daddr, &iph2->daddr) ||
- *(u16 *)&iph->nexthdr != *(u16 *)&iph2->nexthdr) {
+ !ipv6_addr_equal(&iph->saddr, &iph2->saddr) ||
+ !ipv6_addr_equal(&iph->daddr, &iph2->daddr) ||
+ *(u16 *)&iph->nexthdr != *(u16 *)&iph2->nexthdr) {
not_same_flow:
NAPI_GRO_CB(p)->same_flow = 0;
continue;
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index 2f044a49afa8..ff4e83e2a506 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -174,7 +174,7 @@ static int __ip6_finish_output(struct net *net, struct sock *sk, struct sk_buff
#if defined(CONFIG_NETFILTER) && defined(CONFIG_XFRM)
/* Policy lookup after SNAT yielded a new policy */
if (skb_dst(skb)->xfrm) {
- IPCB(skb)->flags |= IPSKB_REROUTED;
+ IP6CB(skb)->flags |= IP6SKB_REROUTED;
return dst_output(net, sk, skb);
}
#endif
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 3ae25b8ffbd6..42d60c76d30a 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -3680,6 +3680,25 @@ void fib6_nh_release(struct fib6_nh *fib6_nh)
fib_nh_common_release(&fib6_nh->nh_common);
}
+void fib6_nh_release_dsts(struct fib6_nh *fib6_nh)
+{
+ int cpu;
+
+ if (!fib6_nh->rt6i_pcpu)
+ return;
+
+ for_each_possible_cpu(cpu) {
+ struct rt6_info *pcpu_rt, **ppcpu_rt;
+
+ ppcpu_rt = per_cpu_ptr(fib6_nh->rt6i_pcpu, cpu);
+ pcpu_rt = xchg(ppcpu_rt, NULL);
+ if (pcpu_rt) {
+ dst_dev_put(&pcpu_rt->dst);
+ dst_release(&pcpu_rt->dst);
+ }
+ }
+}
+
static struct fib6_info *ip6_route_info_create(struct fib6_config *cfg,
gfp_t gfp_flags,
struct netlink_ext_ack *extack)
diff --git a/net/ipv6/seg6_iptunnel.c b/net/ipv6/seg6_iptunnel.c
index 3adc5d9211ad..d64855010948 100644
--- a/net/ipv6/seg6_iptunnel.c
+++ b/net/ipv6/seg6_iptunnel.c
@@ -161,6 +161,14 @@ int seg6_do_srh_encap(struct sk_buff *skb, struct ipv6_sr_hdr *osrh, int proto)
hdr->hop_limit = ip6_dst_hoplimit(skb_dst(skb));
memset(IP6CB(skb), 0, sizeof(*IP6CB(skb)));
+
+ /* the control block has been erased, so we have to set the
+ * iif once again.
+ * We read the receiving interface index directly from the
+ * skb->skb_iif as it is done in the IPv4 receiving path (i.e.:
+ * ip_rcv_core(...)).
+ */
+ IP6CB(skb)->iif = skb->skb_iif;
}
hdr->nexthdr = NEXTHDR_ROUTING;
diff --git a/net/mctp/route.c b/net/mctp/route.c
index 46c44823edb7..cdf09c2a7007 100644
--- a/net/mctp/route.c
+++ b/net/mctp/route.c
@@ -952,7 +952,7 @@ static int mctp_route_add(struct mctp_dev *mdev, mctp_eid_t daddr_start,
}
static int mctp_route_remove(struct mctp_dev *mdev, mctp_eid_t daddr_start,
- unsigned int daddr_extent)
+ unsigned int daddr_extent, unsigned char type)
{
struct net *net = dev_net(mdev->dev);
struct mctp_route *rt, *tmp;
@@ -969,7 +969,8 @@ static int mctp_route_remove(struct mctp_dev *mdev, mctp_eid_t daddr_start,
list_for_each_entry_safe(rt, tmp, &net->mctp.routes, list) {
if (rt->dev == mdev &&
- rt->min == daddr_start && rt->max == daddr_end) {
+ rt->min == daddr_start && rt->max == daddr_end &&
+ rt->type == type) {
list_del_rcu(&rt->list);
/* TODO: immediate RTM_DELROUTE */
mctp_route_release(rt);
@@ -987,7 +988,7 @@ int mctp_route_add_local(struct mctp_dev *mdev, mctp_eid_t addr)
int mctp_route_remove_local(struct mctp_dev *mdev, mctp_eid_t addr)
{
- return mctp_route_remove(mdev, addr, 0);
+ return mctp_route_remove(mdev, addr, 0, RTN_LOCAL);
}
/* removes all entries for a given device */
@@ -1195,7 +1196,7 @@ static int mctp_delroute(struct sk_buff *skb, struct nlmsghdr *nlh,
if (rtm->rtm_type != RTN_UNICAST)
return -EINVAL;
- rc = mctp_route_remove(mdev, daddr_start, rtm->rtm_dst_len);
+ rc = mctp_route_remove(mdev, daddr_start, rtm->rtm_dst_len, RTN_UNICAST);
return rc;
}
diff --git a/net/mctp/test/utils.c b/net/mctp/test/utils.c
index cc6b8803aa9d..7b7918702592 100644
--- a/net/mctp/test/utils.c
+++ b/net/mctp/test/utils.c
@@ -12,7 +12,7 @@
static netdev_tx_t mctp_test_dev_tx(struct sk_buff *skb,
struct net_device *ndev)
{
- kfree(skb);
+ kfree_skb(skb);
return NETDEV_TX_OK;
}
diff --git a/net/mpls/af_mpls.c b/net/mpls/af_mpls.c
index ffeb2df8be7a..0c7bde1c14a6 100644
--- a/net/mpls/af_mpls.c
+++ b/net/mpls/af_mpls.c
@@ -409,7 +409,7 @@ static int mpls_forward(struct sk_buff *skb, struct net_device *dev,
goto err;
/* Find the output device */
- out_dev = rcu_dereference(nh->nh_dev);
+ out_dev = nh->nh_dev;
if (!mpls_output_possible(out_dev))
goto tx_err;
@@ -698,7 +698,7 @@ static int mpls_nh_assign_dev(struct net *net, struct mpls_route *rt,
(dev->addr_len != nh->nh_via_alen))
goto errout;
- RCU_INIT_POINTER(nh->nh_dev, dev);
+ nh->nh_dev = dev;
if (!(dev->flags & IFF_UP)) {
nh->nh_flags |= RTNH_F_DEAD;
@@ -1491,26 +1491,53 @@ static void mpls_dev_destroy_rcu(struct rcu_head *head)
kfree(mdev);
}
-static void mpls_ifdown(struct net_device *dev, int event)
+static int mpls_ifdown(struct net_device *dev, int event)
{
struct mpls_route __rcu **platform_label;
struct net *net = dev_net(dev);
- u8 alive, deleted;
unsigned index;
platform_label = rtnl_dereference(net->mpls.platform_label);
for (index = 0; index < net->mpls.platform_labels; index++) {
struct mpls_route *rt = rtnl_dereference(platform_label[index]);
+ bool nh_del = false;
+ u8 alive = 0;
if (!rt)
continue;
- alive = 0;
- deleted = 0;
+ if (event == NETDEV_UNREGISTER) {
+ u8 deleted = 0;
+
+ for_nexthops(rt) {
+ if (!nh->nh_dev || nh->nh_dev == dev)
+ deleted++;
+ if (nh->nh_dev == dev)
+ nh_del = true;
+ } endfor_nexthops(rt);
+
+ /* if there are no more nexthops, delete the route */
+ if (deleted == rt->rt_nhn) {
+ mpls_route_update(net, index, NULL, NULL);
+ continue;
+ }
+
+ if (nh_del) {
+ size_t size = sizeof(*rt) + rt->rt_nhn *
+ rt->rt_nh_size;
+ struct mpls_route *orig = rt;
+
+ rt = kmalloc(size, GFP_KERNEL);
+ if (!rt)
+ return -ENOMEM;
+ memcpy(rt, orig, size);
+ }
+ }
+
change_nexthops(rt) {
unsigned int nh_flags = nh->nh_flags;
- if (rtnl_dereference(nh->nh_dev) != dev)
+ if (nh->nh_dev != dev)
goto next;
switch (event) {
@@ -1523,23 +1550,22 @@ static void mpls_ifdown(struct net_device *dev, int event)
break;
}
if (event == NETDEV_UNREGISTER)
- RCU_INIT_POINTER(nh->nh_dev, NULL);
+ nh->nh_dev = NULL;
if (nh->nh_flags != nh_flags)
WRITE_ONCE(nh->nh_flags, nh_flags);
next:
if (!(nh_flags & (RTNH_F_DEAD | RTNH_F_LINKDOWN)))
alive++;
- if (!rtnl_dereference(nh->nh_dev))
- deleted++;
} endfor_nexthops(rt);
WRITE_ONCE(rt->rt_nhn_alive, alive);
- /* if there are no more nexthops, delete the route */
- if (event == NETDEV_UNREGISTER && deleted == rt->rt_nhn)
- mpls_route_update(net, index, NULL, NULL);
+ if (nh_del)
+ mpls_route_update(net, index, rt, NULL);
}
+
+ return 0;
}
static void mpls_ifup(struct net_device *dev, unsigned int flags)
@@ -1559,14 +1585,12 @@ static void mpls_ifup(struct net_device *dev, unsigned int flags)
alive = 0;
change_nexthops(rt) {
unsigned int nh_flags = nh->nh_flags;
- struct net_device *nh_dev =
- rtnl_dereference(nh->nh_dev);
if (!(nh_flags & flags)) {
alive++;
continue;
}
- if (nh_dev != dev)
+ if (nh->nh_dev != dev)
continue;
alive++;
nh_flags &= ~flags;
@@ -1597,8 +1621,12 @@ static int mpls_dev_notify(struct notifier_block *this, unsigned long event,
return NOTIFY_OK;
switch (event) {
+ int err;
+
case NETDEV_DOWN:
- mpls_ifdown(dev, event);
+ err = mpls_ifdown(dev, event);
+ if (err)
+ return notifier_from_errno(err);
break;
case NETDEV_UP:
flags = dev_get_flags(dev);
@@ -1609,13 +1637,18 @@ static int mpls_dev_notify(struct notifier_block *this, unsigned long event,
break;
case NETDEV_CHANGE:
flags = dev_get_flags(dev);
- if (flags & (IFF_RUNNING | IFF_LOWER_UP))
+ if (flags & (IFF_RUNNING | IFF_LOWER_UP)) {
mpls_ifup(dev, RTNH_F_DEAD | RTNH_F_LINKDOWN);
- else
- mpls_ifdown(dev, event);
+ } else {
+ err = mpls_ifdown(dev, event);
+ if (err)
+ return notifier_from_errno(err);
+ }
break;
case NETDEV_UNREGISTER:
- mpls_ifdown(dev, event);
+ err = mpls_ifdown(dev, event);
+ if (err)
+ return notifier_from_errno(err);
mdev = mpls_dev_get(dev);
if (mdev) {
mpls_dev_sysctl_unregister(dev, mdev);
@@ -1626,8 +1659,6 @@ static int mpls_dev_notify(struct notifier_block *this, unsigned long event,
case NETDEV_CHANGENAME:
mdev = mpls_dev_get(dev);
if (mdev) {
- int err;
-
mpls_dev_sysctl_unregister(dev, mdev);
err = mpls_dev_sysctl_register(dev, mdev);
if (err)
@@ -1994,7 +2025,7 @@ static int mpls_dump_route(struct sk_buff *skb, u32 portid, u32 seq, int event,
nla_put_via(skb, nh->nh_via_table, mpls_nh_via(rt, nh),
nh->nh_via_alen))
goto nla_put_failure;
- dev = rtnl_dereference(nh->nh_dev);
+ dev = nh->nh_dev;
if (dev && nla_put_u32(skb, RTA_OIF, dev->ifindex))
goto nla_put_failure;
if (nh->nh_flags & RTNH_F_LINKDOWN)
@@ -2012,7 +2043,7 @@ static int mpls_dump_route(struct sk_buff *skb, u32 portid, u32 seq, int event,
goto nla_put_failure;
for_nexthops(rt) {
- dev = rtnl_dereference(nh->nh_dev);
+ dev = nh->nh_dev;
if (!dev)
continue;
@@ -2123,18 +2154,14 @@ static int mpls_valid_fib_dump_req(struct net *net, const struct nlmsghdr *nlh,
static bool mpls_rt_uses_dev(struct mpls_route *rt,
const struct net_device *dev)
{
- struct net_device *nh_dev;
-
if (rt->rt_nhn == 1) {
struct mpls_nh *nh = rt->rt_nh;
- nh_dev = rtnl_dereference(nh->nh_dev);
- if (dev == nh_dev)
+ if (nh->nh_dev == dev)
return true;
} else {
for_nexthops(rt) {
- nh_dev = rtnl_dereference(nh->nh_dev);
- if (nh_dev == dev)
+ if (nh->nh_dev == dev)
return true;
} endfor_nexthops(rt);
}
@@ -2222,7 +2249,7 @@ static inline size_t lfib_nlmsg_size(struct mpls_route *rt)
size_t nhsize = 0;
for_nexthops(rt) {
- if (!rtnl_dereference(nh->nh_dev))
+ if (!nh->nh_dev)
continue;
nhsize += nla_total_size(sizeof(struct rtnexthop));
/* RTA_VIA */
@@ -2468,7 +2495,7 @@ static int mpls_getroute(struct sk_buff *in_skb, struct nlmsghdr *in_nlh,
nla_put_via(skb, nh->nh_via_table, mpls_nh_via(rt, nh),
nh->nh_via_alen))
goto nla_put_failure;
- dev = rtnl_dereference(nh->nh_dev);
+ dev = nh->nh_dev;
if (dev && nla_put_u32(skb, RTA_OIF, dev->ifindex))
goto nla_put_failure;
@@ -2507,7 +2534,7 @@ static int resize_platform_label_table(struct net *net, size_t limit)
rt0 = mpls_rt_alloc(1, lo->addr_len, 0);
if (IS_ERR(rt0))
goto nort0;
- RCU_INIT_POINTER(rt0->rt_nh->nh_dev, lo);
+ rt0->rt_nh->nh_dev = lo;
rt0->rt_protocol = RTPROT_KERNEL;
rt0->rt_payload_type = MPT_IPV4;
rt0->rt_ttl_propagate = MPLS_TTL_PROP_DEFAULT;
@@ -2521,7 +2548,7 @@ static int resize_platform_label_table(struct net *net, size_t limit)
rt2 = mpls_rt_alloc(1, lo->addr_len, 0);
if (IS_ERR(rt2))
goto nort2;
- RCU_INIT_POINTER(rt2->rt_nh->nh_dev, lo);
+ rt2->rt_nh->nh_dev = lo;
rt2->rt_protocol = RTPROT_KERNEL;
rt2->rt_payload_type = MPT_IPV6;
rt2->rt_ttl_propagate = MPLS_TTL_PROP_DEFAULT;
diff --git a/net/mpls/internal.h b/net/mpls/internal.h
index 838cdfc10e47..893df00b77b6 100644
--- a/net/mpls/internal.h
+++ b/net/mpls/internal.h
@@ -87,7 +87,7 @@ enum mpls_payload_type {
};
struct mpls_nh { /* next hop label forwarding entry */
- struct net_device __rcu *nh_dev;
+ struct net_device *nh_dev;
/* nh_flags is accessed under RCU in the packet path; it is
* modified handling netdev events with rtnl lock held
diff --git a/net/mptcp/options.c b/net/mptcp/options.c
index 7c3420afb1a0..fe98e4f475ba 100644
--- a/net/mptcp/options.c
+++ b/net/mptcp/options.c
@@ -422,28 +422,6 @@ bool mptcp_syn_options(struct sock *sk, const struct sk_buff *skb,
return false;
}
-/* MP_JOIN client subflow must wait for 4th ack before sending any data:
- * TCP can't schedule delack timer before the subflow is fully established.
- * MPTCP uses the delack timer to do 3rd ack retransmissions
- */
-static void schedule_3rdack_retransmission(struct sock *sk)
-{
- struct inet_connection_sock *icsk = inet_csk(sk);
- struct tcp_sock *tp = tcp_sk(sk);
- unsigned long timeout;
-
- /* reschedule with a timeout above RTT, as we must look only for drop */
- if (tp->srtt_us)
- timeout = tp->srtt_us << 1;
- else
- timeout = TCP_TIMEOUT_INIT;
-
- WARN_ON_ONCE(icsk->icsk_ack.pending & ICSK_ACK_TIMER);
- icsk->icsk_ack.pending |= ICSK_ACK_SCHED | ICSK_ACK_TIMER;
- icsk->icsk_ack.timeout = timeout;
- sk_reset_timer(sk, &icsk->icsk_delack_timer, timeout);
-}
-
static void clear_3rdack_retransmission(struct sock *sk)
{
struct inet_connection_sock *icsk = inet_csk(sk);
@@ -526,7 +504,15 @@ static bool mptcp_established_options_mp(struct sock *sk, struct sk_buff *skb,
*size = TCPOLEN_MPTCP_MPJ_ACK;
pr_debug("subflow=%p", subflow);
- schedule_3rdack_retransmission(sk);
+ /* we can use the full delegate action helper only from BH context
+ * If we are in process context - sk is flushing the backlog at
+ * socket lock release time - just set the appropriate flag, will
+ * be handled by the release callback
+ */
+ if (sock_owned_by_user(sk))
+ set_bit(MPTCP_DELEGATE_ACK, &subflow->delegated_status);
+ else
+ mptcp_subflow_delegate(subflow, MPTCP_DELEGATE_ACK);
return true;
}
return false;
diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c
index b7e32e316738..c82a76d2d0bf 100644
--- a/net/mptcp/protocol.c
+++ b/net/mptcp/protocol.c
@@ -1596,7 +1596,8 @@ static void __mptcp_subflow_push_pending(struct sock *sk, struct sock *ssk)
if (!xmit_ssk)
goto out;
if (xmit_ssk != ssk) {
- mptcp_subflow_delegate(mptcp_subflow_ctx(xmit_ssk));
+ mptcp_subflow_delegate(mptcp_subflow_ctx(xmit_ssk),
+ MPTCP_DELEGATE_SEND);
goto out;
}
@@ -2943,7 +2944,7 @@ void __mptcp_check_push(struct sock *sk, struct sock *ssk)
if (xmit_ssk == ssk)
__mptcp_subflow_push_pending(sk, ssk);
else if (xmit_ssk)
- mptcp_subflow_delegate(mptcp_subflow_ctx(xmit_ssk));
+ mptcp_subflow_delegate(mptcp_subflow_ctx(xmit_ssk), MPTCP_DELEGATE_SEND);
} else {
set_bit(MPTCP_PUSH_PENDING, &mptcp_sk(sk)->flags);
}
@@ -2993,18 +2994,50 @@ static void mptcp_release_cb(struct sock *sk)
__mptcp_update_rmem(sk);
}
+/* MP_JOIN client subflow must wait for 4th ack before sending any data:
+ * TCP can't schedule delack timer before the subflow is fully established.
+ * MPTCP uses the delack timer to do 3rd ack retransmissions
+ */
+static void schedule_3rdack_retransmission(struct sock *ssk)
+{
+ struct inet_connection_sock *icsk = inet_csk(ssk);
+ struct tcp_sock *tp = tcp_sk(ssk);
+ unsigned long timeout;
+
+ if (mptcp_subflow_ctx(ssk)->fully_established)
+ return;
+
+ /* reschedule with a timeout above RTT, as we must look only for drop */
+ if (tp->srtt_us)
+ timeout = usecs_to_jiffies(tp->srtt_us >> (3 - 1));
+ else
+ timeout = TCP_TIMEOUT_INIT;
+ timeout += jiffies;
+
+ WARN_ON_ONCE(icsk->icsk_ack.pending & ICSK_ACK_TIMER);
+ icsk->icsk_ack.pending |= ICSK_ACK_SCHED | ICSK_ACK_TIMER;
+ icsk->icsk_ack.timeout = timeout;
+ sk_reset_timer(ssk, &icsk->icsk_delack_timer, timeout);
+}
+
void mptcp_subflow_process_delegated(struct sock *ssk)
{
struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
struct sock *sk = subflow->conn;
- mptcp_data_lock(sk);
- if (!sock_owned_by_user(sk))
- __mptcp_subflow_push_pending(sk, ssk);
- else
- set_bit(MPTCP_PUSH_PENDING, &mptcp_sk(sk)->flags);
- mptcp_data_unlock(sk);
- mptcp_subflow_delegated_done(subflow);
+ if (test_bit(MPTCP_DELEGATE_SEND, &subflow->delegated_status)) {
+ mptcp_data_lock(sk);
+ if (!sock_owned_by_user(sk))
+ __mptcp_subflow_push_pending(sk, ssk);
+ else
+ set_bit(MPTCP_PUSH_PENDING, &mptcp_sk(sk)->flags);
+ mptcp_data_unlock(sk);
+ mptcp_subflow_delegated_done(subflow, MPTCP_DELEGATE_SEND);
+ }
+ if (test_bit(MPTCP_DELEGATE_ACK, &subflow->delegated_status)) {
+ schedule_3rdack_retransmission(ssk);
+ mptcp_subflow_delegated_done(subflow, MPTCP_DELEGATE_ACK);
+ }
}
static int mptcp_hash(struct sock *sk)
diff --git a/net/mptcp/protocol.h b/net/mptcp/protocol.h
index 67a61ac48b20..d87cc040352e 100644
--- a/net/mptcp/protocol.h
+++ b/net/mptcp/protocol.h
@@ -387,6 +387,7 @@ struct mptcp_delegated_action {
DECLARE_PER_CPU(struct mptcp_delegated_action, mptcp_delegated_actions);
#define MPTCP_DELEGATE_SEND 0
+#define MPTCP_DELEGATE_ACK 1
/* MPTCP subflow context */
struct mptcp_subflow_context {
@@ -492,23 +493,23 @@ static inline void mptcp_add_pending_subflow(struct mptcp_sock *msk,
void mptcp_subflow_process_delegated(struct sock *ssk);
-static inline void mptcp_subflow_delegate(struct mptcp_subflow_context *subflow)
+static inline void mptcp_subflow_delegate(struct mptcp_subflow_context *subflow, int action)
{
struct mptcp_delegated_action *delegated;
bool schedule;
+ /* the caller held the subflow bh socket lock */
+ lockdep_assert_in_softirq();
+
/* The implied barrier pairs with mptcp_subflow_delegated_done(), and
* ensures the below list check sees list updates done prior to status
* bit changes
*/
- if (!test_and_set_bit(MPTCP_DELEGATE_SEND, &subflow->delegated_status)) {
+ if (!test_and_set_bit(action, &subflow->delegated_status)) {
/* still on delegated list from previous scheduling */
if (!list_empty(&subflow->delegated_node))
return;
- /* the caller held the subflow bh socket lock */
- lockdep_assert_in_softirq();
-
delegated = this_cpu_ptr(&mptcp_delegated_actions);
schedule = list_empty(&delegated->head);
list_add_tail(&subflow->delegated_node, &delegated->head);
@@ -533,16 +534,16 @@ mptcp_subflow_delegated_next(struct mptcp_delegated_action *delegated)
static inline bool mptcp_subflow_has_delegated_action(const struct mptcp_subflow_context *subflow)
{
- return test_bit(MPTCP_DELEGATE_SEND, &subflow->delegated_status);
+ return !!READ_ONCE(subflow->delegated_status);
}
-static inline void mptcp_subflow_delegated_done(struct mptcp_subflow_context *subflow)
+static inline void mptcp_subflow_delegated_done(struct mptcp_subflow_context *subflow, int action)
{
/* pairs with mptcp_subflow_delegate, ensures delegate_node is updated before
* touching the status bit
*/
smp_wmb();
- clear_bit(MPTCP_DELEGATE_SEND, &subflow->delegated_status);
+ clear_bit(action, &subflow->delegated_status);
}
int mptcp_is_enabled(const struct net *net);
diff --git a/net/ncsi/ncsi-cmd.c b/net/ncsi/ncsi-cmd.c
index ba9ae482141b..dda8b76b7798 100644
--- a/net/ncsi/ncsi-cmd.c
+++ b/net/ncsi/ncsi-cmd.c
@@ -18,6 +18,8 @@
#include "internal.h"
#include "ncsi-pkt.h"
+static const int padding_bytes = 26;
+
u32 ncsi_calculate_checksum(unsigned char *data, int len)
{
u32 checksum = 0;
@@ -213,12 +215,17 @@ static int ncsi_cmd_handler_oem(struct sk_buff *skb,
{
struct ncsi_cmd_oem_pkt *cmd;
unsigned int len;
+ int payload;
+ /* NC-SI spec DSP_0222_1.2.0, section 8.2.2.2
+ * requires payload to be padded with 0 to
+ * 32-bit boundary before the checksum field.
+ * Ensure the padding bytes are accounted for in
+ * skb allocation
+ */
+ payload = ALIGN(nca->payload, 4);
len = sizeof(struct ncsi_cmd_pkt_hdr) + 4;
- if (nca->payload < 26)
- len += 26;
- else
- len += nca->payload;
+ len += max(payload, padding_bytes);
cmd = skb_put_zero(skb, len);
memcpy(&cmd->mfr_id, nca->data, nca->payload);
@@ -272,6 +279,7 @@ static struct ncsi_request *ncsi_alloc_command(struct ncsi_cmd_arg *nca)
struct net_device *dev = nd->dev;
int hlen = LL_RESERVED_SPACE(dev);
int tlen = dev->needed_tailroom;
+ int payload;
int len = hlen + tlen;
struct sk_buff *skb;
struct ncsi_request *nr;
@@ -281,14 +289,14 @@ static struct ncsi_request *ncsi_alloc_command(struct ncsi_cmd_arg *nca)
return NULL;
/* NCSI command packet has 16-bytes header, payload, 4 bytes checksum.
+ * Payload needs padding so that the checksum field following payload is
+ * aligned to 32-bit boundary.
* The packet needs padding if its payload is less than 26 bytes to
* meet 64 bytes minimal ethernet frame length.
*/
len += sizeof(struct ncsi_cmd_pkt_hdr) + 4;
- if (nca->payload < 26)
- len += 26;
- else
- len += nca->payload;
+ payload = ALIGN(nca->payload, 4);
+ len += max(payload, padding_bytes);
/* Allocate skb */
skb = alloc_skb(len, GFP_ATOMIC);
diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
index e93c937a8bf0..51ad557a525b 100644
--- a/net/netfilter/ipvs/ip_vs_core.c
+++ b/net/netfilter/ipvs/ip_vs_core.c
@@ -1919,7 +1919,6 @@ ip_vs_in_hook(void *priv, struct sk_buff *skb, const struct nf_hook_state *state
struct ip_vs_proto_data *pd;
struct ip_vs_conn *cp;
int ret, pkts;
- int conn_reuse_mode;
struct sock *sk;
int af = state->pf;
@@ -1997,15 +1996,16 @@ ip_vs_in_hook(void *priv, struct sk_buff *skb, const struct nf_hook_state *state
cp = INDIRECT_CALL_1(pp->conn_in_get, ip_vs_conn_in_get_proto,
ipvs, af, skb, &iph);
- conn_reuse_mode = sysctl_conn_reuse_mode(ipvs);
- if (conn_reuse_mode && !iph.fragoffs && is_new_conn(skb, &iph) && cp) {
+ if (!iph.fragoffs && is_new_conn(skb, &iph) && cp) {
+ int conn_reuse_mode = sysctl_conn_reuse_mode(ipvs);
bool old_ct = false, resched = false;
if (unlikely(sysctl_expire_nodest_conn(ipvs)) && cp->dest &&
unlikely(!atomic_read(&cp->dest->weight))) {
resched = true;
old_ct = ip_vs_conn_uses_old_conntrack(cp, skb);
- } else if (is_new_conn_expected(cp, conn_reuse_mode)) {
+ } else if (conn_reuse_mode &&
+ is_new_conn_expected(cp, conn_reuse_mode)) {
old_ct = ip_vs_conn_uses_old_conntrack(cp, skb);
if (!atomic_read(&cp->n_control)) {
resched = true;
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index 770a63103c7a..4712a90a1820 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -684,7 +684,7 @@ bool nf_ct_delete(struct nf_conn *ct, u32 portid, int report)
tstamp = nf_conn_tstamp_find(ct);
if (tstamp) {
- s32 timeout = ct->timeout - nfct_time_stamp;
+ s32 timeout = READ_ONCE(ct->timeout) - nfct_time_stamp;
tstamp->stop = ktime_get_real_ns();
if (timeout < 0)
@@ -1036,7 +1036,7 @@ static int nf_ct_resolve_clash_harder(struct sk_buff *skb, u32 repl_idx)
}
/* We want the clashing entry to go away real soon: 1 second timeout. */
- loser_ct->timeout = nfct_time_stamp + HZ;
+ WRITE_ONCE(loser_ct->timeout, nfct_time_stamp + HZ);
/* IPS_NAT_CLASH removes the entry automatically on the first
* reply. Also prevents UDP tracker from moving the entry to
@@ -1560,7 +1560,7 @@ __nf_conntrack_alloc(struct net *net,
/* save hash for reusing when confirming */
*(unsigned long *)(&ct->tuplehash[IP_CT_DIR_REPLY].hnnode.pprev) = hash;
ct->status = 0;
- ct->timeout = 0;
+ WRITE_ONCE(ct->timeout, 0);
write_pnet(&ct->ct_net, net);
memset(&ct->__nfct_init_offset, 0,
offsetof(struct nf_conn, proto) -
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
index f1e5443fe7c7..81d03acf68d4 100644
--- a/net/netfilter/nf_conntrack_netlink.c
+++ b/net/netfilter/nf_conntrack_netlink.c
@@ -1011,11 +1011,9 @@ ctnetlink_alloc_filter(const struct nlattr * const cda[], u8 family)
CTA_TUPLE_REPLY,
filter->family,
&filter->zone,
- filter->orig_flags);
- if (err < 0) {
- err = -EINVAL;
+ filter->reply_flags);
+ if (err < 0)
goto err_filter;
- }
}
return filter;
@@ -2000,7 +1998,7 @@ static int ctnetlink_change_timeout(struct nf_conn *ct,
if (timeout > INT_MAX)
timeout = INT_MAX;
- ct->timeout = nfct_time_stamp + (u32)timeout;
+ WRITE_ONCE(ct->timeout, nfct_time_stamp + (u32)timeout);
if (test_bit(IPS_DYING_BIT, &ct->status))
return -ETIME;
diff --git a/net/netfilter/nf_flow_table_core.c b/net/netfilter/nf_flow_table_core.c
index 87a7388b6c89..ed37bb9b4e58 100644
--- a/net/netfilter/nf_flow_table_core.c
+++ b/net/netfilter/nf_flow_table_core.c
@@ -201,8 +201,8 @@ static void flow_offload_fixup_ct_timeout(struct nf_conn *ct)
if (timeout < 0)
timeout = 0;
- if (nf_flow_timeout_delta(ct->timeout) > (__s32)timeout)
- ct->timeout = nfct_time_stamp + timeout;
+ if (nf_flow_timeout_delta(READ_ONCE(ct->timeout)) > (__s32)timeout)
+ WRITE_ONCE(ct->timeout, nfct_time_stamp + timeout);
}
static void flow_offload_fixup_ct_state(struct nf_conn *ct)
diff --git a/net/netfilter/nf_flow_table_offload.c b/net/netfilter/nf_flow_table_offload.c
index d6bf1b2cd541..b561e0a44a45 100644
--- a/net/netfilter/nf_flow_table_offload.c
+++ b/net/netfilter/nf_flow_table_offload.c
@@ -65,11 +65,11 @@ static void nf_flow_rule_lwt_match(struct nf_flow_match *match,
sizeof(struct in6_addr));
if (memcmp(&key->enc_ipv6.src, &in6addr_any,
sizeof(struct in6_addr)))
- memset(&key->enc_ipv6.src, 0xff,
+ memset(&mask->enc_ipv6.src, 0xff,
sizeof(struct in6_addr));
if (memcmp(&key->enc_ipv6.dst, &in6addr_any,
sizeof(struct in6_addr)))
- memset(&key->enc_ipv6.dst, 0xff,
+ memset(&mask->enc_ipv6.dst, 0xff,
sizeof(struct in6_addr));
enc_keys |= BIT(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS);
key->enc_control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
diff --git a/net/netfilter/nfnetlink_queue.c b/net/netfilter/nfnetlink_queue.c
index 4acc4b8e9fe5..5837e8efc9c2 100644
--- a/net/netfilter/nfnetlink_queue.c
+++ b/net/netfilter/nfnetlink_queue.c
@@ -387,7 +387,7 @@ nfqnl_build_packet_message(struct net *net, struct nfqnl_instance *queue,
struct net_device *indev;
struct net_device *outdev;
struct nf_conn *ct = NULL;
- enum ip_conntrack_info ctinfo;
+ enum ip_conntrack_info ctinfo = 0;
struct nfnl_ct_hook *nfnl_ct;
bool csum_verify;
char *secdata = NULL;
diff --git a/net/netfilter/nft_exthdr.c b/net/netfilter/nft_exthdr.c
index af4ee874a067..dbe1f2e7dd9e 100644
--- a/net/netfilter/nft_exthdr.c
+++ b/net/netfilter/nft_exthdr.c
@@ -236,7 +236,7 @@ static void nft_exthdr_tcp_set_eval(const struct nft_expr *expr,
tcph = nft_tcp_header_pointer(pkt, sizeof(buff), buff, &tcphdr_len);
if (!tcph)
- return;
+ goto err;
opt = (u8 *)tcph;
for (i = sizeof(*tcph); i < tcphdr_len - 1; i += optl) {
@@ -251,16 +251,16 @@ static void nft_exthdr_tcp_set_eval(const struct nft_expr *expr,
continue;
if (i + optl > tcphdr_len || priv->len + priv->offset > optl)
- return;
+ goto err;
if (skb_ensure_writable(pkt->skb,
nft_thoff(pkt) + i + priv->len))
- return;
+ goto err;
tcph = nft_tcp_header_pointer(pkt, sizeof(buff), buff,
&tcphdr_len);
if (!tcph)
- return;
+ goto err;
offset = i + priv->offset;
@@ -303,6 +303,9 @@ static void nft_exthdr_tcp_set_eval(const struct nft_expr *expr,
return;
}
+ return;
+err:
+ regs->verdict.code = NFT_BREAK;
}
static void nft_exthdr_sctp_eval(const struct nft_expr *expr,
diff --git a/net/netfilter/nft_payload.c b/net/netfilter/nft_payload.c
index cbfe4e4a4ad7..bd689938a2e0 100644
--- a/net/netfilter/nft_payload.c
+++ b/net/netfilter/nft_payload.c
@@ -22,7 +22,6 @@
#include <linux/icmpv6.h>
#include <linux/ip.h>
#include <linux/ipv6.h>
-#include <linux/ip.h>
#include <net/sctp/checksum.h>
static bool nft_payload_rebuild_vlan_hdr(const struct sk_buff *skb, int mac_off,
diff --git a/net/netfilter/nft_set_pipapo_avx2.c b/net/netfilter/nft_set_pipapo_avx2.c
index e517663e0cd1..6f4116e72958 100644
--- a/net/netfilter/nft_set_pipapo_avx2.c
+++ b/net/netfilter/nft_set_pipapo_avx2.c
@@ -886,7 +886,7 @@ static int nft_pipapo_avx2_lookup_8b_6(unsigned long *map, unsigned long *fill,
NFT_PIPAPO_AVX2_BUCKET_LOAD8(4, lt, 4, pkt[4], bsize);
NFT_PIPAPO_AVX2_AND(5, 0, 1);
- NFT_PIPAPO_AVX2_BUCKET_LOAD8(6, lt, 6, pkt[5], bsize);
+ NFT_PIPAPO_AVX2_BUCKET_LOAD8(6, lt, 5, pkt[5], bsize);
NFT_PIPAPO_AVX2_AND(7, 2, 3);
/* Stall */
diff --git a/net/netfilter/xt_IDLETIMER.c b/net/netfilter/xt_IDLETIMER.c
index 2f7cf5ecebf4..0f8bb0bf558f 100644
--- a/net/netfilter/xt_IDLETIMER.c
+++ b/net/netfilter/xt_IDLETIMER.c
@@ -85,9 +85,9 @@ static ssize_t idletimer_tg_show(struct device *dev,
mutex_unlock(&list_mutex);
if (time_after(expires, jiffies) || ktimespec.tv_sec > 0)
- return snprintf(buf, PAGE_SIZE, "%ld\n", time_diff);
+ return sysfs_emit(buf, "%ld\n", time_diff);
- return snprintf(buf, PAGE_SIZE, "0\n");
+ return sysfs_emit(buf, "0\n");
}
static void idletimer_tg_work(struct work_struct *work)
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index 4c575324a985..9eba2e648385 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -1852,6 +1852,11 @@ static int netlink_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
if (msg->msg_flags & MSG_OOB)
return -EOPNOTSUPP;
+ if (len == 0) {
+ pr_warn_once("Zero length message leads to an empty skb\n");
+ return -ENODATA;
+ }
+
err = scm_send(sock, msg, &scm, true);
if (err < 0)
return err;
diff --git a/net/nfc/netlink.c b/net/nfc/netlink.c
index 334f63c9529e..f184b0db79d4 100644
--- a/net/nfc/netlink.c
+++ b/net/nfc/netlink.c
@@ -636,8 +636,10 @@ static int nfc_genl_dump_devices_done(struct netlink_callback *cb)
{
struct class_dev_iter *iter = (struct class_dev_iter *) cb->args[0];
- nfc_device_iter_exit(iter);
- kfree(iter);
+ if (iter) {
+ nfc_device_iter_exit(iter);
+ kfree(iter);
+ }
return 0;
}
@@ -1392,8 +1394,10 @@ static int nfc_genl_dump_ses_done(struct netlink_callback *cb)
{
struct class_dev_iter *iter = (struct class_dev_iter *) cb->args[0];
- nfc_device_iter_exit(iter);
- kfree(iter);
+ if (iter) {
+ nfc_device_iter_exit(iter);
+ kfree(iter);
+ }
return 0;
}
diff --git a/net/rds/tcp.c b/net/rds/tcp.c
index abf19c0e3ba0..5327d130c4b5 100644
--- a/net/rds/tcp.c
+++ b/net/rds/tcp.c
@@ -500,7 +500,7 @@ void rds_tcp_tune(struct socket *sock)
sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
}
if (rtn->rcvbuf_size > 0) {
- sk->sk_sndbuf = rtn->rcvbuf_size;
+ sk->sk_rcvbuf = rtn->rcvbuf_size;
sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
}
release_sock(sk);
diff --git a/net/rxrpc/conn_client.c b/net/rxrpc/conn_client.c
index dbea0bfee48e..8120138dac01 100644
--- a/net/rxrpc/conn_client.c
+++ b/net/rxrpc/conn_client.c
@@ -135,16 +135,20 @@ struct rxrpc_bundle *rxrpc_get_bundle(struct rxrpc_bundle *bundle)
return bundle;
}
+static void rxrpc_free_bundle(struct rxrpc_bundle *bundle)
+{
+ rxrpc_put_peer(bundle->params.peer);
+ kfree(bundle);
+}
+
void rxrpc_put_bundle(struct rxrpc_bundle *bundle)
{
unsigned int d = bundle->debug_id;
unsigned int u = atomic_dec_return(&bundle->usage);
_debug("PUT B=%x %u", d, u);
- if (u == 0) {
- rxrpc_put_peer(bundle->params.peer);
- kfree(bundle);
- }
+ if (u == 0)
+ rxrpc_free_bundle(bundle);
}
/*
@@ -328,7 +332,7 @@ static struct rxrpc_bundle *rxrpc_look_up_bundle(struct rxrpc_conn_parameters *c
return candidate;
found_bundle_free:
- kfree(candidate);
+ rxrpc_free_bundle(candidate);
found_bundle:
rxrpc_get_bundle(bundle);
spin_unlock(&local->client_bundles_lock);
diff --git a/net/rxrpc/peer_object.c b/net/rxrpc/peer_object.c
index 68396d052052..0298fe2ad6d3 100644
--- a/net/rxrpc/peer_object.c
+++ b/net/rxrpc/peer_object.c
@@ -299,6 +299,12 @@ static struct rxrpc_peer *rxrpc_create_peer(struct rxrpc_sock *rx,
return peer;
}
+static void rxrpc_free_peer(struct rxrpc_peer *peer)
+{
+ rxrpc_put_local(peer->local);
+ kfree_rcu(peer, rcu);
+}
+
/*
* Set up a new incoming peer. There shouldn't be any other matching peers
* since we've already done a search in the list from the non-reentrant context
@@ -365,7 +371,7 @@ struct rxrpc_peer *rxrpc_lookup_peer(struct rxrpc_sock *rx,
spin_unlock_bh(&rxnet->peer_hash_lock);
if (peer)
- kfree(candidate);
+ rxrpc_free_peer(candidate);
else
peer = candidate;
}
@@ -420,8 +426,7 @@ static void __rxrpc_put_peer(struct rxrpc_peer *peer)
list_del_init(&peer->keepalive_link);
spin_unlock_bh(&rxnet->peer_hash_lock);
- rxrpc_put_local(peer->local);
- kfree_rcu(peer, rcu);
+ rxrpc_free_peer(peer);
}
/*
@@ -457,8 +462,7 @@ void rxrpc_put_peer_locked(struct rxrpc_peer *peer)
if (n == 0) {
hash_del_rcu(&peer->hash_link);
list_del_init(&peer->keepalive_link);
- rxrpc_put_local(peer->local);
- kfree_rcu(peer, rcu);
+ rxrpc_free_peer(peer);
}
}
diff --git a/net/sched/sch_ets.c b/net/sched/sch_ets.c
index 0eae9ff5edf6..e007fc75ef2f 100644
--- a/net/sched/sch_ets.c
+++ b/net/sched/sch_ets.c
@@ -665,12 +665,14 @@ static int ets_qdisc_change(struct Qdisc *sch, struct nlattr *opt,
q->classes[i].deficit = quanta[i];
}
}
+ for (i = q->nbands; i < oldbands; i++) {
+ qdisc_tree_flush_backlog(q->classes[i].qdisc);
+ if (i >= q->nstrict)
+ list_del(&q->classes[i].alist);
+ }
q->nstrict = nstrict;
memcpy(q->prio2band, priomap, sizeof(priomap));
- for (i = q->nbands; i < oldbands; i++)
- qdisc_tree_flush_backlog(q->classes[i].qdisc);
-
for (i = 0; i < q->nbands; i++)
q->classes[i].quantum = quanta[i];
diff --git a/net/sched/sch_fq_pie.c b/net/sched/sch_fq_pie.c
index 830f3559f727..d6aba6edd16e 100644
--- a/net/sched/sch_fq_pie.c
+++ b/net/sched/sch_fq_pie.c
@@ -531,6 +531,7 @@ static void fq_pie_destroy(struct Qdisc *sch)
struct fq_pie_sched_data *q = qdisc_priv(sch);
tcf_block_put(q->block);
+ q->p_params.tupdate = 0;
del_timer_sync(&q->adapt_timer);
kvfree(q->flows);
}
diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c
index b61c802e3bf3..230072f9ec48 100644
--- a/net/smc/af_smc.c
+++ b/net/smc/af_smc.c
@@ -585,7 +585,7 @@ static void smc_switch_to_fallback(struct smc_sock *smc, int reason_code)
* to clcsocket->wq during the fallback.
*/
spin_lock_irqsave(&smc_wait->lock, flags);
- spin_lock(&clc_wait->lock);
+ spin_lock_nested(&clc_wait->lock, SINGLE_DEPTH_NESTING);
list_splice_init(&smc_wait->head, &clc_wait->head);
spin_unlock(&clc_wait->lock);
spin_unlock_irqrestore(&smc_wait->lock, flags);
@@ -2134,8 +2134,10 @@ static int smc_listen(struct socket *sock, int backlog)
smc->clcsock->sk->sk_user_data =
(void *)((uintptr_t)smc | SK_USER_DATA_NOCOPY);
rc = kernel_listen(smc->clcsock, backlog);
- if (rc)
+ if (rc) {
+ smc->clcsock->sk->sk_data_ready = smc->clcsk_data_ready;
goto out;
+ }
sk->sk_max_ack_backlog = backlog;
sk->sk_ack_backlog = 0;
sk->sk_state = SMC_LISTEN;
@@ -2368,8 +2370,10 @@ static __poll_t smc_poll(struct file *file, struct socket *sock,
static int smc_shutdown(struct socket *sock, int how)
{
struct sock *sk = sock->sk;
+ bool do_shutdown = true;
struct smc_sock *smc;
int rc = -EINVAL;
+ int old_state;
int rc1 = 0;
smc = smc_sk(sk);
@@ -2396,7 +2400,11 @@ static int smc_shutdown(struct socket *sock, int how)
}
switch (how) {
case SHUT_RDWR: /* shutdown in both directions */
+ old_state = sk->sk_state;
rc = smc_close_active(smc);
+ if (old_state == SMC_ACTIVE &&
+ sk->sk_state == SMC_PEERCLOSEWAIT1)
+ do_shutdown = false;
break;
case SHUT_WR:
rc = smc_close_shutdown_write(smc);
@@ -2406,7 +2414,7 @@ static int smc_shutdown(struct socket *sock, int how)
/* nothing more to do because peer is not involved */
break;
}
- if (smc->clcsock)
+ if (do_shutdown && smc->clcsock)
rc1 = kernel_sock_shutdown(smc->clcsock, how);
/* map sock_shutdown_cmd constants to sk_shutdown value range */
sk->sk_shutdown |= how + 1;
diff --git a/net/smc/smc_close.c b/net/smc/smc_close.c
index 0f9ffba07d26..292e4d904ab6 100644
--- a/net/smc/smc_close.c
+++ b/net/smc/smc_close.c
@@ -195,6 +195,7 @@ int smc_close_active(struct smc_sock *smc)
int old_state;
long timeout;
int rc = 0;
+ int rc1 = 0;
timeout = current->flags & PF_EXITING ?
0 : sock_flag(sk, SOCK_LINGER) ?
@@ -228,6 +229,15 @@ again:
/* send close request */
rc = smc_close_final(conn);
sk->sk_state = SMC_PEERCLOSEWAIT1;
+
+ /* actively shutdown clcsock before peer close it,
+ * prevent peer from entering TIME_WAIT state.
+ */
+ if (smc->clcsock && smc->clcsock->sk) {
+ rc1 = kernel_sock_shutdown(smc->clcsock,
+ SHUT_RDWR);
+ rc = rc ? rc : rc1;
+ }
} else {
/* peer event has changed the state */
goto again;
@@ -354,9 +364,9 @@ static void smc_close_passive_work(struct work_struct *work)
if (rxflags->peer_conn_abort) {
/* peer has not received all data */
smc_close_passive_abort_received(smc);
- release_sock(&smc->sk);
+ release_sock(sk);
cancel_delayed_work_sync(&conn->tx_work);
- lock_sock(&smc->sk);
+ lock_sock(sk);
goto wakeup;
}
diff --git a/net/smc/smc_core.c b/net/smc/smc_core.c
index 25ebd30feecd..387d28b2f8dd 100644
--- a/net/smc/smc_core.c
+++ b/net/smc/smc_core.c
@@ -625,18 +625,17 @@ int smcd_nl_get_lgr(struct sk_buff *skb, struct netlink_callback *cb)
void smc_lgr_cleanup_early(struct smc_connection *conn)
{
struct smc_link_group *lgr = conn->lgr;
- struct list_head *lgr_list;
spinlock_t *lgr_lock;
if (!lgr)
return;
smc_conn_free(conn);
- lgr_list = smc_lgr_list_head(lgr, &lgr_lock);
+ smc_lgr_list_head(lgr, &lgr_lock);
spin_lock_bh(lgr_lock);
/* do not use this link group for new connections */
- if (!list_empty(lgr_list))
- list_del_init(lgr_list);
+ if (!list_empty(&lgr->list))
+ list_del_init(&lgr->list);
spin_unlock_bh(lgr_lock);
__smc_lgr_terminate(lgr, true);
}
@@ -1672,14 +1671,26 @@ static void smc_link_down_work(struct work_struct *work)
mutex_unlock(&lgr->llc_conf_mutex);
}
-/* Determine vlan of internal TCP socket.
- * @vlan_id: address to store the determined vlan id into
- */
+static int smc_vlan_by_tcpsk_walk(struct net_device *lower_dev,
+ struct netdev_nested_priv *priv)
+{
+ unsigned short *vlan_id = (unsigned short *)priv->data;
+
+ if (is_vlan_dev(lower_dev)) {
+ *vlan_id = vlan_dev_vlan_id(lower_dev);
+ return 1;
+ }
+
+ return 0;
+}
+
+/* Determine vlan of internal TCP socket. */
int smc_vlan_by_tcpsk(struct socket *clcsock, struct smc_init_info *ini)
{
struct dst_entry *dst = sk_dst_get(clcsock->sk);
+ struct netdev_nested_priv priv;
struct net_device *ndev;
- int i, nest_lvl, rc = 0;
+ int rc = 0;
ini->vlan_id = 0;
if (!dst) {
@@ -1697,20 +1708,9 @@ int smc_vlan_by_tcpsk(struct socket *clcsock, struct smc_init_info *ini)
goto out_rel;
}
+ priv.data = (void *)&ini->vlan_id;
rtnl_lock();
- nest_lvl = ndev->lower_level;
- for (i = 0; i < nest_lvl; i++) {
- struct list_head *lower = &ndev->adj_list.lower;
-
- if (list_empty(lower))
- break;
- lower = lower->next;
- ndev = (struct net_device *)netdev_lower_get_next(ndev, &lower);
- if (is_vlan_dev(ndev)) {
- ini->vlan_id = vlan_dev_vlan_id(ndev);
- break;
- }
- }
+ netdev_walk_all_lower_dev(ndev, smc_vlan_by_tcpsk_walk, &priv);
rtnl_unlock();
out_rel:
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
index ae48c9c84ee1..d8ee06a9650a 100644
--- a/net/sunrpc/xprtsock.c
+++ b/net/sunrpc/xprtsock.c
@@ -1720,15 +1720,15 @@ static void xs_local_set_port(struct rpc_xprt *xprt, unsigned short port)
}
#ifdef CONFIG_DEBUG_LOCK_ALLOC
-static struct lock_class_key xs_key[2];
-static struct lock_class_key xs_slock_key[2];
+static struct lock_class_key xs_key[3];
+static struct lock_class_key xs_slock_key[3];
static inline void xs_reclassify_socketu(struct socket *sock)
{
struct sock *sk = sock->sk;
sock_lock_init_class_and_name(sk, "slock-AF_LOCAL-RPC",
- &xs_slock_key[1], "sk_lock-AF_LOCAL-RPC", &xs_key[1]);
+ &xs_slock_key[0], "sk_lock-AF_LOCAL-RPC", &xs_key[0]);
}
static inline void xs_reclassify_socket4(struct socket *sock)
@@ -1736,7 +1736,7 @@ static inline void xs_reclassify_socket4(struct socket *sock)
struct sock *sk = sock->sk;
sock_lock_init_class_and_name(sk, "slock-AF_INET-RPC",
- &xs_slock_key[0], "sk_lock-AF_INET-RPC", &xs_key[0]);
+ &xs_slock_key[1], "sk_lock-AF_INET-RPC", &xs_key[1]);
}
static inline void xs_reclassify_socket6(struct socket *sock)
@@ -1744,7 +1744,7 @@ static inline void xs_reclassify_socket6(struct socket *sock)
struct sock *sk = sock->sk;
sock_lock_init_class_and_name(sk, "slock-AF_INET6-RPC",
- &xs_slock_key[1], "sk_lock-AF_INET6-RPC", &xs_key[1]);
+ &xs_slock_key[2], "sk_lock-AF_INET6-RPC", &xs_key[2]);
}
static inline void xs_reclassify_socket(int family, struct socket *sock)
diff --git a/net/tls/tls_main.c b/net/tls/tls_main.c
index acfba9f1ba72..6bc2879ba637 100644
--- a/net/tls/tls_main.c
+++ b/net/tls/tls_main.c
@@ -61,7 +61,7 @@ static DEFINE_MUTEX(tcpv6_prot_mutex);
static const struct proto *saved_tcpv4_prot;
static DEFINE_MUTEX(tcpv4_prot_mutex);
static struct proto tls_prots[TLS_NUM_PROTS][TLS_NUM_CONFIG][TLS_NUM_CONFIG];
-static struct proto_ops tls_sw_proto_ops;
+static struct proto_ops tls_proto_ops[TLS_NUM_PROTS][TLS_NUM_CONFIG][TLS_NUM_CONFIG];
static void build_protos(struct proto prot[TLS_NUM_CONFIG][TLS_NUM_CONFIG],
const struct proto *base);
@@ -71,6 +71,8 @@ void update_sk_prot(struct sock *sk, struct tls_context *ctx)
WRITE_ONCE(sk->sk_prot,
&tls_prots[ip_ver][ctx->tx_conf][ctx->rx_conf]);
+ WRITE_ONCE(sk->sk_socket->ops,
+ &tls_proto_ops[ip_ver][ctx->tx_conf][ctx->rx_conf]);
}
int wait_on_pending_writer(struct sock *sk, long *timeo)
@@ -669,8 +671,6 @@ static int do_tls_setsockopt_conf(struct sock *sk, sockptr_t optval,
if (tx) {
ctx->sk_write_space = sk->sk_write_space;
sk->sk_write_space = tls_write_space;
- } else {
- sk->sk_socket->ops = &tls_sw_proto_ops;
}
goto out;
@@ -728,6 +728,39 @@ struct tls_context *tls_ctx_create(struct sock *sk)
return ctx;
}
+static void build_proto_ops(struct proto_ops ops[TLS_NUM_CONFIG][TLS_NUM_CONFIG],
+ const struct proto_ops *base)
+{
+ ops[TLS_BASE][TLS_BASE] = *base;
+
+ ops[TLS_SW ][TLS_BASE] = ops[TLS_BASE][TLS_BASE];
+ ops[TLS_SW ][TLS_BASE].sendpage_locked = tls_sw_sendpage_locked;
+
+ ops[TLS_BASE][TLS_SW ] = ops[TLS_BASE][TLS_BASE];
+ ops[TLS_BASE][TLS_SW ].splice_read = tls_sw_splice_read;
+
+ ops[TLS_SW ][TLS_SW ] = ops[TLS_SW ][TLS_BASE];
+ ops[TLS_SW ][TLS_SW ].splice_read = tls_sw_splice_read;
+
+#ifdef CONFIG_TLS_DEVICE
+ ops[TLS_HW ][TLS_BASE] = ops[TLS_BASE][TLS_BASE];
+ ops[TLS_HW ][TLS_BASE].sendpage_locked = NULL;
+
+ ops[TLS_HW ][TLS_SW ] = ops[TLS_BASE][TLS_SW ];
+ ops[TLS_HW ][TLS_SW ].sendpage_locked = NULL;
+
+ ops[TLS_BASE][TLS_HW ] = ops[TLS_BASE][TLS_SW ];
+
+ ops[TLS_SW ][TLS_HW ] = ops[TLS_SW ][TLS_SW ];
+
+ ops[TLS_HW ][TLS_HW ] = ops[TLS_HW ][TLS_SW ];
+ ops[TLS_HW ][TLS_HW ].sendpage_locked = NULL;
+#endif
+#ifdef CONFIG_TLS_TOE
+ ops[TLS_HW_RECORD][TLS_HW_RECORD] = *base;
+#endif
+}
+
static void tls_build_proto(struct sock *sk)
{
int ip_ver = sk->sk_family == AF_INET6 ? TLSV6 : TLSV4;
@@ -739,6 +772,8 @@ static void tls_build_proto(struct sock *sk)
mutex_lock(&tcpv6_prot_mutex);
if (likely(prot != saved_tcpv6_prot)) {
build_protos(tls_prots[TLSV6], prot);
+ build_proto_ops(tls_proto_ops[TLSV6],
+ sk->sk_socket->ops);
smp_store_release(&saved_tcpv6_prot, prot);
}
mutex_unlock(&tcpv6_prot_mutex);
@@ -749,6 +784,8 @@ static void tls_build_proto(struct sock *sk)
mutex_lock(&tcpv4_prot_mutex);
if (likely(prot != saved_tcpv4_prot)) {
build_protos(tls_prots[TLSV4], prot);
+ build_proto_ops(tls_proto_ops[TLSV4],
+ sk->sk_socket->ops);
smp_store_release(&saved_tcpv4_prot, prot);
}
mutex_unlock(&tcpv4_prot_mutex);
@@ -959,10 +996,6 @@ static int __init tls_register(void)
if (err)
return err;
- tls_sw_proto_ops = inet_stream_ops;
- tls_sw_proto_ops.splice_read = tls_sw_splice_read;
- tls_sw_proto_ops.sendpage_locked = tls_sw_sendpage_locked;
-
tls_device_init();
tcp_register_ulp(&tcp_tls_ulp_ops);
diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c
index d81564078557..dfe623a4e72f 100644
--- a/net/tls/tls_sw.c
+++ b/net/tls/tls_sw.c
@@ -521,7 +521,7 @@ static int tls_do_encryption(struct sock *sk,
memcpy(&rec->iv_data[iv_offset], tls_ctx->tx.iv,
prot->iv_size + prot->salt_size);
- xor_iv_with_seq(prot, rec->iv_data, tls_ctx->tx.rec_seq);
+ xor_iv_with_seq(prot, rec->iv_data + iv_offset, tls_ctx->tx.rec_seq);
sge->offset += prot->prepend_size;
sge->length -= prot->prepend_size;
@@ -1499,7 +1499,7 @@ static int decrypt_internal(struct sock *sk, struct sk_buff *skb,
else
memcpy(iv + iv_offset, tls_ctx->rx.iv, prot->salt_size);
- xor_iv_with_seq(prot, iv, tls_ctx->rx.rec_seq);
+ xor_iv_with_seq(prot, iv + iv_offset, tls_ctx->rx.rec_seq);
/* Prepare AAD */
tls_make_aad(aad, rxm->full_len - prot->overhead_size +
@@ -2005,6 +2005,7 @@ ssize_t tls_sw_splice_read(struct socket *sock, loff_t *ppos,
struct sock *sk = sock->sk;
struct sk_buff *skb;
ssize_t copied = 0;
+ bool from_queue;
int err = 0;
long timeo;
int chunk;
@@ -2014,25 +2015,28 @@ ssize_t tls_sw_splice_read(struct socket *sock, loff_t *ppos,
timeo = sock_rcvtimeo(sk, flags & SPLICE_F_NONBLOCK);
- skb = tls_wait_data(sk, NULL, flags & SPLICE_F_NONBLOCK, timeo, &err);
- if (!skb)
- goto splice_read_end;
-
- if (!ctx->decrypted) {
- err = decrypt_skb_update(sk, skb, NULL, &chunk, &zc, false);
-
- /* splice does not support reading control messages */
- if (ctx->control != TLS_RECORD_TYPE_DATA) {
- err = -EINVAL;
+ from_queue = !skb_queue_empty(&ctx->rx_list);
+ if (from_queue) {
+ skb = __skb_dequeue(&ctx->rx_list);
+ } else {
+ skb = tls_wait_data(sk, NULL, flags & SPLICE_F_NONBLOCK, timeo,
+ &err);
+ if (!skb)
goto splice_read_end;
- }
+ err = decrypt_skb_update(sk, skb, NULL, &chunk, &zc, false);
if (err < 0) {
tls_err_abort(sk, -EBADMSG);
goto splice_read_end;
}
- ctx->decrypted = 1;
}
+
+ /* splice does not support reading control messages */
+ if (ctx->control != TLS_RECORD_TYPE_DATA) {
+ err = -EINVAL;
+ goto splice_read_end;
+ }
+
rxm = strp_msg(skb);
chunk = min_t(unsigned int, rxm->full_len, len);
@@ -2040,7 +2044,17 @@ ssize_t tls_sw_splice_read(struct socket *sock, loff_t *ppos,
if (copied < 0)
goto splice_read_end;
- tls_sw_advance_skb(sk, skb, copied);
+ if (!from_queue) {
+ ctx->recv_pkt = NULL;
+ __strp_unpause(&ctx->strp);
+ }
+ if (chunk < rxm->full_len) {
+ __skb_queue_head(&ctx->rx_list, skb);
+ rxm->offset += len;
+ rxm->full_len -= len;
+ } else {
+ consume_skb(skb);
+ }
splice_read_end:
release_sock(sk);
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index 78e08e82c08c..b0bfc78e421c 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -2882,9 +2882,6 @@ static int unix_shutdown(struct socket *sock, int mode)
unix_state_lock(sk);
sk->sk_shutdown |= mode;
- if ((sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET) &&
- mode == SHUTDOWN_MASK)
- sk->sk_state = TCP_CLOSE;
other = unix_peer(sk);
if (other)
sock_hold(other);