summaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
Diffstat (limited to 'net')
-rw-r--r--net/8021q/vlan_dev.c9
-rw-r--r--net/9p/mod.c1
-rw-r--r--net/ax25/af_ax25.c10
-rw-r--r--net/bpf/test_run.c19
-rw-r--r--net/bpfilter/bpfilter_kern.c2
-rw-r--r--net/bridge/br_mrp.c12
-rw-r--r--net/bridge/br_multicast.c2
-rw-r--r--net/bridge/br_private.h4
-rw-r--r--net/bridge/br_private_mrp.h2
-rw-r--r--net/bridge/netfilter/nft_meta_bridge.c1
-rw-r--r--net/bridge/netfilter/nft_reject_bridge.c1
-rw-r--r--net/ceph/ceph_common.c14
-rw-r--r--net/ceph/osd_client.c9
-rw-r--r--net/core/dev.c11
-rw-r--r--net/core/dev_addr_lists.c10
-rw-r--r--net/core/drop_monitor.c1
-rw-r--r--net/core/filter.c10
-rw-r--r--net/core/flow_dissector.c32
-rw-r--r--net/core/flow_offload.c48
-rw-r--r--net/core/net-sysfs.c2
-rw-r--r--net/core/rtnetlink.c3
-rw-r--r--net/core/skmsg.c23
-rw-r--r--net/core/sock.c6
-rw-r--r--net/core/sock_map.c53
-rw-r--r--net/core/sock_reuseport.c1
-rw-r--r--net/core/sysctl_net_core.c2
-rw-r--r--net/core/xdp.c1
-rw-r--r--net/dsa/tag_edsa.c37
-rw-r--r--net/ethtool/cabletest.c17
-rw-r--r--net/ethtool/common.c2
-rw-r--r--net/ethtool/ioctl.c2
-rw-r--r--net/ethtool/linkstate.c11
-rw-r--r--net/ethtool/netlink.c27
-rw-r--r--net/hsr/hsr_device.c32
-rw-r--r--net/hsr/hsr_device.h2
-rw-r--r--net/hsr/hsr_forward.c18
-rw-r--r--net/hsr/hsr_framereg.c3
-rw-r--r--net/hsr/hsr_main.c9
-rw-r--r--net/hsr/hsr_netlink.c17
-rw-r--r--net/ipv4/Kconfig34
-rw-r--r--net/ipv4/esp4_offload.c1
-rw-r--r--net/ipv4/fib_semantics.c2
-rw-r--r--net/ipv4/fou.c1
-rw-r--r--net/ipv4/icmp.c4
-rw-r--r--net/ipv4/ip_output.c2
-rw-r--r--net/ipv4/ip_tunnel.c14
-rw-r--r--net/ipv4/ip_tunnel_core.c18
-rw-r--r--net/ipv4/ip_vti.c1
-rw-r--r--net/ipv4/ipip.c1
-rw-r--r--net/ipv4/netfilter/ip_tables.c15
-rw-r--r--net/ipv4/netfilter/ipt_SYNPROXY.c1
-rw-r--r--net/ipv4/netfilter/iptable_filter.c10
-rw-r--r--net/ipv4/netfilter/iptable_mangle.c10
-rw-r--r--net/ipv4/netfilter/iptable_nat.c10
-rw-r--r--net/ipv4/netfilter/iptable_raw.c10
-rw-r--r--net/ipv4/netfilter/iptable_security.c11
-rw-r--r--net/ipv4/netfilter/nf_flow_table_ipv4.c1
-rw-r--r--net/ipv4/netfilter/nft_dup_ipv4.c1
-rw-r--r--net/ipv4/netfilter/nft_fib_ipv4.c1
-rw-r--r--net/ipv4/netfilter/nft_reject_ipv4.c1
-rw-r--r--net/ipv4/ping.c3
-rw-r--r--net/ipv4/route.c2
-rw-r--r--net/ipv4/tcp.c17
-rw-r--r--net/ipv4/tcp_cong.c2
-rw-r--r--net/ipv4/tcp_cubic.c5
-rw-r--r--net/ipv4/tcp_input.c39
-rw-r--r--net/ipv4/tcp_ipv4.c20
-rw-r--r--net/ipv4/tcp_output.c21
-rw-r--r--net/ipv4/udp.c17
-rw-r--r--net/ipv6/Kconfig34
-rw-r--r--net/ipv6/esp6_offload.c1
-rw-r--r--net/ipv6/fou6.c1
-rw-r--r--net/ipv6/icmp.c4
-rw-r--r--net/ipv6/ila/ila_main.c1
-rw-r--r--net/ipv6/ip6_gre.c20
-rw-r--r--net/ipv6/ip6_tunnel.c1
-rw-r--r--net/ipv6/ip6_vti.c1
-rw-r--r--net/ipv6/mcast.c1
-rw-r--r--net/ipv6/netfilter/ip6_tables.c15
-rw-r--r--net/ipv6/netfilter/ip6t_SYNPROXY.c1
-rw-r--r--net/ipv6/netfilter/ip6table_filter.c10
-rw-r--r--net/ipv6/netfilter/ip6table_mangle.c10
-rw-r--r--net/ipv6/netfilter/ip6table_nat.c10
-rw-r--r--net/ipv6/netfilter/ip6table_raw.c10
-rw-r--r--net/ipv6/netfilter/ip6table_security.c10
-rw-r--r--net/ipv6/netfilter/nf_flow_table_ipv6.c1
-rw-r--r--net/ipv6/netfilter/nft_dup_ipv6.c1
-rw-r--r--net/ipv6/netfilter/nft_fib_ipv6.c1
-rw-r--r--net/ipv6/netfilter/nft_reject_ipv6.c1
-rw-r--r--net/ipv6/route.c7
-rw-r--r--net/ipv6/sit.c1
-rw-r--r--net/ipv6/udp.c17
-rw-r--r--net/l2tp/l2tp_core.c5
-rw-r--r--net/llc/af_llc.c10
-rw-r--r--net/mac80211/mesh_hwmp.c7
-rw-r--r--net/mac80211/rx.c26
-rw-r--r--net/mac80211/status.c22
-rw-r--r--net/mac80211/tx.c8
-rw-r--r--net/mptcp/options.c8
-rw-r--r--net/mptcp/protocol.h8
-rw-r--r--net/mptcp/subflow.c61
-rw-r--r--net/netfilter/ipset/ip_set_bitmap_ip.c2
-rw-r--r--net/netfilter/ipset/ip_set_bitmap_ipmac.c2
-rw-r--r--net/netfilter/ipset/ip_set_bitmap_port.c2
-rw-r--r--net/netfilter/ipset/ip_set_core.c2
-rw-r--r--net/netfilter/ipset/ip_set_hash_gen.h4
-rw-r--r--net/netfilter/ipvs/ip_vs_sync.c12
-rw-r--r--net/netfilter/nf_conntrack_core.c2
-rw-r--r--net/netfilter/nf_conntrack_netlink.c32
-rw-r--r--net/netfilter/nf_dup_netdev.c1
-rw-r--r--net/netfilter/nf_flow_table_core.c46
-rw-r--r--net/netfilter/nf_flow_table_inet.c1
-rw-r--r--net/netfilter/nf_flow_table_offload.c1
-rw-r--r--net/netfilter/nf_synproxy_core.c1
-rw-r--r--net/netfilter/nf_tables_api.c72
-rw-r--r--net/netfilter/nf_tables_offload.c1
-rw-r--r--net/netfilter/nfnetlink.c1
-rw-r--r--net/netfilter/nft_compat.c1
-rw-r--r--net/netfilter/nft_connlimit.c1
-rw-r--r--net/netfilter/nft_counter.c1
-rw-r--r--net/netfilter/nft_ct.c1
-rw-r--r--net/netfilter/nft_dup_netdev.c1
-rw-r--r--net/netfilter/nft_fib_inet.c1
-rw-r--r--net/netfilter/nft_fib_netdev.c1
-rw-r--r--net/netfilter/nft_flow_offload.c1
-rw-r--r--net/netfilter/nft_hash.c1
-rw-r--r--net/netfilter/nft_limit.c1
-rw-r--r--net/netfilter/nft_log.c1
-rw-r--r--net/netfilter/nft_masq.c1
-rw-r--r--net/netfilter/nft_nat.c1
-rw-r--r--net/netfilter/nft_numgen.c1
-rw-r--r--net/netfilter/nft_objref.c1
-rw-r--r--net/netfilter/nft_osf.c1
-rw-r--r--net/netfilter/nft_queue.c1
-rw-r--r--net/netfilter/nft_quota.c1
-rw-r--r--net/netfilter/nft_redir.c1
-rw-r--r--net/netfilter/nft_reject.c1
-rw-r--r--net/netfilter/nft_reject_inet.c1
-rw-r--r--net/netfilter/nft_set_pipapo.c6
-rw-r--r--net/netfilter/nft_set_rbtree.c21
-rw-r--r--net/netfilter/nft_synproxy.c1
-rw-r--r--net/netfilter/nft_tunnel.c1
-rw-r--r--net/netfilter/xt_nat.c1
-rw-r--r--net/netlink/genetlink.c97
-rw-r--r--net/nfc/nci/core.c5
-rw-r--r--net/openvswitch/actions.c9
-rw-r--r--net/qrtr/qrtr.c11
-rw-r--r--net/rds/connection.c11
-rw-r--r--net/rds/ib.h8
-rw-r--r--net/rds/rds.h7
-rw-r--r--net/rds/send.c3
-rw-r--r--net/rds/transport.c26
-rw-r--r--net/rxrpc/call_accept.c7
-rw-r--r--net/rxrpc/call_event.c2
-rw-r--r--net/rxrpc/input.c7
-rw-r--r--net/rxrpc/recvmsg.c2
-rw-r--r--net/rxrpc/sendmsg.c2
-rw-r--r--net/sched/act_connmark.c9
-rw-r--r--net/sched/act_csum.c2
-rw-r--r--net/sched/act_ct.c38
-rw-r--r--net/sched/act_ctinfo.c9
-rw-r--r--net/sched/act_gate.c126
-rw-r--r--net/sched/act_mpls.c2
-rw-r--r--net/sched/act_skbedit.c2
-rw-r--r--net/sched/cls_api.c28
-rw-r--r--net/sched/cls_flow.c8
-rw-r--r--net/sched/cls_flower.c2
-rw-r--r--net/sched/em_ipset.c2
-rw-r--r--net/sched/em_ipt.c2
-rw-r--r--net/sched/em_meta.c2
-rw-r--r--net/sched/sch_atm.c8
-rw-r--r--net/sched/sch_cake.c62
-rw-r--r--net/sched/sch_dsmark.c6
-rw-r--r--net/sched/sch_fq.c1
-rw-r--r--net/sched/sch_fq_codel.c1
-rw-r--r--net/sched/sch_hhf.c1
-rw-r--r--net/sched/sch_teql.c2
-rw-r--r--net/sctp/associola.c5
-rw-r--r--net/sctp/bind_addr.c1
-rw-r--r--net/sctp/protocol.c3
-rw-r--r--net/sctp/stream.c27
-rw-r--r--net/smc/af_smc.c12
-rw-r--r--net/smc/smc_cdc.c6
-rw-r--r--net/smc/smc_clc.c45
-rw-r--r--net/smc/smc_clc.h2
-rw-r--r--net/smc/smc_core.c136
-rw-r--r--net/smc/smc_core.h9
-rw-r--r--net/smc/smc_ib.c27
-rw-r--r--net/smc/smc_ib.h4
-rw-r--r--net/smc/smc_ism.c11
-rw-r--r--net/smc/smc_ism.h3
-rw-r--r--net/smc/smc_llc.c212
-rw-r--r--net/smc/smc_llc.h2
-rw-r--r--net/smc/smc_pnet.c37
-rw-r--r--net/smc/smc_wr.c10
-rw-r--r--net/sunrpc/rpc_pipe.c1
-rw-r--r--net/sunrpc/svcsock.c1
-rw-r--r--net/sunrpc/xdr.c4
-rw-r--r--net/sunrpc/xprtrdma/frwr_ops.c8
-rw-r--r--net/sunrpc/xprtrdma/rpc_rdma.c13
-rw-r--r--net/sunrpc/xprtrdma/transport.c7
-rw-r--r--net/sunrpc/xprtrdma/verbs.c106
-rw-r--r--net/sunrpc/xprtrdma/xprt_rdma.h3
-rw-r--r--net/tipc/link.c28
-rw-r--r--net/vmw_vsock/virtio_transport.c2
-rw-r--r--net/wireless/nl80211.c5
-rw-r--r--net/xdp/xsk_buff_pool.c54
-rw-r--r--net/xfrm/Kconfig24
-rw-r--r--net/xfrm/xfrm_device.c4
-rw-r--r--net/xfrm/xfrm_interface.c2
-rw-r--r--net/xfrm/xfrm_output.c4
211 files changed, 1567 insertions, 1049 deletions
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
index c8d6a07e23c5..3dd7c972677b 100644
--- a/net/8021q/vlan_dev.c
+++ b/net/8021q/vlan_dev.c
@@ -503,11 +503,10 @@ static void vlan_dev_set_lockdep_one(struct net_device *dev,
lockdep_set_class(&txq->_xmit_lock, &vlan_netdev_xmit_lock_key);
}
-static void vlan_dev_set_lockdep_class(struct net_device *dev, int subclass)
+static void vlan_dev_set_lockdep_class(struct net_device *dev)
{
- lockdep_set_class_and_subclass(&dev->addr_list_lock,
- &vlan_netdev_addr_lock_key,
- subclass);
+ lockdep_set_class(&dev->addr_list_lock,
+ &vlan_netdev_addr_lock_key);
netdev_for_each_tx_queue(dev, vlan_dev_set_lockdep_one, NULL);
}
@@ -601,7 +600,7 @@ static int vlan_dev_init(struct net_device *dev)
SET_NETDEV_DEVTYPE(dev, &vlan_type);
- vlan_dev_set_lockdep_class(dev, dev->lower_level);
+ vlan_dev_set_lockdep_class(dev);
vlan->vlan_pcpu_stats = netdev_alloc_pcpu_stats(struct vlan_pcpu_stats);
if (!vlan->vlan_pcpu_stats)
diff --git a/net/9p/mod.c b/net/9p/mod.c
index c1b62428da7b..5126566850bd 100644
--- a/net/9p/mod.c
+++ b/net/9p/mod.c
@@ -189,3 +189,4 @@ MODULE_AUTHOR("Latchesar Ionkov <lucho@ionkov.net>");
MODULE_AUTHOR("Eric Van Hensbergen <ericvh@gmail.com>");
MODULE_AUTHOR("Ron Minnich <rminnich@lanl.gov>");
MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Plan 9 Resource Sharing Support (9P2000)");
diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c
index fd91cd34f25e..dec3f35467c9 100644
--- a/net/ax25/af_ax25.c
+++ b/net/ax25/af_ax25.c
@@ -1187,7 +1187,10 @@ static int __must_check ax25_connect(struct socket *sock,
if (addr_len > sizeof(struct sockaddr_ax25) &&
fsa->fsa_ax25.sax25_ndigis != 0) {
/* Valid number of digipeaters ? */
- if (fsa->fsa_ax25.sax25_ndigis < 1 || fsa->fsa_ax25.sax25_ndigis > AX25_MAX_DIGIS) {
+ if (fsa->fsa_ax25.sax25_ndigis < 1 ||
+ fsa->fsa_ax25.sax25_ndigis > AX25_MAX_DIGIS ||
+ addr_len < sizeof(struct sockaddr_ax25) +
+ sizeof(ax25_address) * fsa->fsa_ax25.sax25_ndigis) {
err = -EINVAL;
goto out_release;
}
@@ -1507,7 +1510,10 @@ static int ax25_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
struct full_sockaddr_ax25 *fsa = (struct full_sockaddr_ax25 *)usax;
/* Valid number of digipeaters ? */
- if (usax->sax25_ndigis < 1 || usax->sax25_ndigis > AX25_MAX_DIGIS) {
+ if (usax->sax25_ndigis < 1 ||
+ usax->sax25_ndigis > AX25_MAX_DIGIS ||
+ addr_len < sizeof(struct sockaddr_ax25) +
+ sizeof(ax25_address) * usax->sax25_ndigis) {
err = -EINVAL;
goto out;
}
diff --git a/net/bpf/test_run.c b/net/bpf/test_run.c
index bfd4ccd80847..b03c469cd01f 100644
--- a/net/bpf/test_run.c
+++ b/net/bpf/test_run.c
@@ -147,6 +147,20 @@ int noinline bpf_fentry_test6(u64 a, void *b, short c, int d, void *e, u64 f)
return a + (long)b + c + d + (long)e + f;
}
+struct bpf_fentry_test_t {
+ struct bpf_fentry_test_t *a;
+};
+
+int noinline bpf_fentry_test7(struct bpf_fentry_test_t *arg)
+{
+ return (long)arg;
+}
+
+int noinline bpf_fentry_test8(struct bpf_fentry_test_t *arg)
+{
+ return (long)arg->a;
+}
+
int noinline bpf_modify_return_test(int a, int *b)
{
*b += 1;
@@ -185,6 +199,7 @@ int bpf_prog_test_run_tracing(struct bpf_prog *prog,
const union bpf_attr *kattr,
union bpf_attr __user *uattr)
{
+ struct bpf_fentry_test_t arg = {};
u16 side_effect = 0, ret = 0;
int b = 2, err = -EFAULT;
u32 retval = 0;
@@ -197,7 +212,9 @@ int bpf_prog_test_run_tracing(struct bpf_prog *prog,
bpf_fentry_test3(4, 5, 6) != 15 ||
bpf_fentry_test4((void *)7, 8, 9, 10) != 34 ||
bpf_fentry_test5(11, (void *)12, 13, 14, 15) != 65 ||
- bpf_fentry_test6(16, (void *)17, 18, 19, (void *)20, 21) != 111)
+ bpf_fentry_test6(16, (void *)17, 18, 19, (void *)20, 21) != 111 ||
+ bpf_fentry_test7((struct bpf_fentry_test_t *)0) != 0 ||
+ bpf_fentry_test8(&arg) != 0)
goto out;
break;
case BPF_MODIFY_RETURN:
diff --git a/net/bpfilter/bpfilter_kern.c b/net/bpfilter/bpfilter_kern.c
index c0f0990f30b6..1905e01c3aa9 100644
--- a/net/bpfilter/bpfilter_kern.c
+++ b/net/bpfilter/bpfilter_kern.c
@@ -50,7 +50,7 @@ static int __bpfilter_process_sockopt(struct sock *sk, int optname,
req.len = optlen;
if (!bpfilter_ops.info.pid)
goto out;
- n = __kernel_write(bpfilter_ops.info.pipe_to_umh, &req, sizeof(req),
+ n = kernel_write(bpfilter_ops.info.pipe_to_umh, &req, sizeof(req),
&pos);
if (n != sizeof(req)) {
pr_err("write fail %zd\n", n);
diff --git a/net/bridge/br_mrp.c b/net/bridge/br_mrp.c
index 24986ec7d38c..90592af9db61 100644
--- a/net/bridge/br_mrp.c
+++ b/net/bridge/br_mrp.c
@@ -86,7 +86,7 @@ static struct sk_buff *br_mrp_skb_alloc(struct net_bridge_port *p,
{
struct ethhdr *eth_hdr;
struct sk_buff *skb;
- u16 *version;
+ __be16 *version;
skb = dev_alloc_skb(MRP_MAX_FRAME_LENGTH);
if (!skb)
@@ -411,10 +411,16 @@ int br_mrp_set_port_role(struct net_bridge_port *p,
if (!mrp)
return -EINVAL;
- if (role == BR_MRP_PORT_ROLE_PRIMARY)
+ switch (role) {
+ case BR_MRP_PORT_ROLE_PRIMARY:
rcu_assign_pointer(mrp->p_port, p);
- else
+ break;
+ case BR_MRP_PORT_ROLE_SECONDARY:
rcu_assign_pointer(mrp->s_port, p);
+ break;
+ default:
+ return -EINVAL;
+ }
br_mrp_port_switchdev_set_role(p, role);
diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
index 83490bf73a13..4c4a93abde68 100644
--- a/net/bridge/br_multicast.c
+++ b/net/bridge/br_multicast.c
@@ -1007,7 +1007,7 @@ static int br_ip6_multicast_mld2_report(struct net_bridge *br,
nsrcs_offset = len + offsetof(struct mld2_grec, grec_nsrcs);
if (skb_transport_offset(skb) + ipv6_transport_len(skb) <
- nsrcs_offset + sizeof(_nsrcs))
+ nsrcs_offset + sizeof(__nsrcs))
return -EINVAL;
_nsrcs = skb_header_pointer(skb, nsrcs_offset,
diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
index 7501be4eeba0..e0ea6dbbc97e 100644
--- a/net/bridge/br_private.h
+++ b/net/bridge/br_private.h
@@ -217,8 +217,8 @@ struct net_bridge_port_group {
struct rcu_head rcu;
struct timer_list timer;
struct br_ip addr;
+ unsigned char eth_addr[ETH_ALEN] __aligned(2);
unsigned char flags;
- unsigned char eth_addr[ETH_ALEN];
};
struct net_bridge_mdb_entry {
@@ -430,7 +430,7 @@ struct net_bridge {
struct hlist_head fdb_list;
#if IS_ENABLED(CONFIG_BRIDGE_MRP)
- struct list_head __rcu mrp_list;
+ struct list_head mrp_list;
#endif
};
diff --git a/net/bridge/br_private_mrp.h b/net/bridge/br_private_mrp.h
index 33b255e38ffe..315eb37d89f0 100644
--- a/net/bridge/br_private_mrp.h
+++ b/net/bridge/br_private_mrp.h
@@ -8,7 +8,7 @@
struct br_mrp {
/* list of mrp instances */
- struct list_head __rcu list;
+ struct list_head list;
struct net_bridge_port __rcu *p_port;
struct net_bridge_port __rcu *s_port;
diff --git a/net/bridge/netfilter/nft_meta_bridge.c b/net/bridge/netfilter/nft_meta_bridge.c
index 7c9e92b2f806..8e8ffac037cd 100644
--- a/net/bridge/netfilter/nft_meta_bridge.c
+++ b/net/bridge/netfilter/nft_meta_bridge.c
@@ -155,3 +155,4 @@ module_exit(nft_meta_bridge_module_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("wenxu <wenxu@ucloud.cn>");
MODULE_ALIAS_NFT_AF_EXPR(AF_BRIDGE, "meta");
+MODULE_DESCRIPTION("Support for bridge dedicated meta key");
diff --git a/net/bridge/netfilter/nft_reject_bridge.c b/net/bridge/netfilter/nft_reject_bridge.c
index f48cf4cfb80f..deae2c9a0f69 100644
--- a/net/bridge/netfilter/nft_reject_bridge.c
+++ b/net/bridge/netfilter/nft_reject_bridge.c
@@ -455,3 +455,4 @@ module_exit(nft_reject_bridge_module_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Pablo Neira Ayuso <pablo@netfilter.org>");
MODULE_ALIAS_NFT_AF_EXPR(AF_BRIDGE, "reject");
+MODULE_DESCRIPTION("Reject packets from bridge via nftables");
diff --git a/net/ceph/ceph_common.c b/net/ceph/ceph_common.c
index afe0e8184c23..4e7edd707a14 100644
--- a/net/ceph/ceph_common.c
+++ b/net/ceph/ceph_common.c
@@ -332,6 +332,7 @@ struct ceph_options *ceph_alloc_options(void)
opt->mount_timeout = CEPH_MOUNT_TIMEOUT_DEFAULT;
opt->osd_idle_ttl = CEPH_OSD_IDLE_TTL_DEFAULT;
opt->osd_request_timeout = CEPH_OSD_REQUEST_TIMEOUT_DEFAULT;
+ opt->read_from_replica = CEPH_READ_FROM_REPLICA_DEFAULT;
return opt;
}
EXPORT_SYMBOL(ceph_alloc_options);
@@ -490,16 +491,13 @@ int ceph_parse_param(struct fs_parameter *param, struct ceph_options *opt,
case Opt_read_from_replica:
switch (result.uint_32) {
case Opt_read_from_replica_no:
- opt->osd_req_flags &= ~(CEPH_OSD_FLAG_BALANCE_READS |
- CEPH_OSD_FLAG_LOCALIZE_READS);
+ opt->read_from_replica = 0;
break;
case Opt_read_from_replica_balance:
- opt->osd_req_flags |= CEPH_OSD_FLAG_BALANCE_READS;
- opt->osd_req_flags &= ~CEPH_OSD_FLAG_LOCALIZE_READS;
+ opt->read_from_replica = CEPH_OSD_FLAG_BALANCE_READS;
break;
case Opt_read_from_replica_localize:
- opt->osd_req_flags |= CEPH_OSD_FLAG_LOCALIZE_READS;
- opt->osd_req_flags &= ~CEPH_OSD_FLAG_BALANCE_READS;
+ opt->read_from_replica = CEPH_OSD_FLAG_LOCALIZE_READS;
break;
default:
BUG();
@@ -613,9 +611,9 @@ int ceph_print_client_options(struct seq_file *m, struct ceph_client *client,
}
seq_putc(m, ',');
}
- if (opt->osd_req_flags & CEPH_OSD_FLAG_BALANCE_READS) {
+ if (opt->read_from_replica == CEPH_OSD_FLAG_BALANCE_READS) {
seq_puts(m, "read_from_replica=balance,");
- } else if (opt->osd_req_flags & CEPH_OSD_FLAG_LOCALIZE_READS) {
+ } else if (opt->read_from_replica == CEPH_OSD_FLAG_LOCALIZE_READS) {
seq_puts(m, "read_from_replica=localize,");
}
diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c
index 4fea3c33af2a..2db8b44e70c2 100644
--- a/net/ceph/osd_client.c
+++ b/net/ceph/osd_client.c
@@ -445,8 +445,10 @@ static void target_copy(struct ceph_osd_request_target *dest,
dest->size = src->size;
dest->min_size = src->min_size;
dest->sort_bitwise = src->sort_bitwise;
+ dest->recovery_deletes = src->recovery_deletes;
dest->flags = src->flags;
+ dest->used_replica = src->used_replica;
dest->paused = src->paused;
dest->epoch = src->epoch;
@@ -1117,10 +1119,10 @@ struct ceph_osd_request *ceph_osdc_new_request(struct ceph_osd_client *osdc,
truncate_size, truncate_seq);
}
- req->r_flags = flags;
req->r_base_oloc.pool = layout->pool_id;
req->r_base_oloc.pool_ns = ceph_try_get_string(layout->pool_ns);
ceph_oid_printf(&req->r_base_oid, "%llx.%08llx", vino.ino, objnum);
+ req->r_flags = flags | osdc->client->options->read_from_replica;
req->r_snapid = vino.snap;
if (flags & CEPH_OSD_FLAG_WRITE)
@@ -2431,14 +2433,11 @@ promote:
static void account_request(struct ceph_osd_request *req)
{
- struct ceph_osd_client *osdc = req->r_osdc;
-
WARN_ON(req->r_flags & (CEPH_OSD_FLAG_ACK | CEPH_OSD_FLAG_ONDISK));
WARN_ON(!(req->r_flags & (CEPH_OSD_FLAG_READ | CEPH_OSD_FLAG_WRITE)));
req->r_flags |= CEPH_OSD_FLAG_ONDISK;
- req->r_flags |= osdc->client->options->osd_req_flags;
- atomic_inc(&osdc->num_requests);
+ atomic_inc(&req->r_osdc->num_requests);
req->r_start_stamp = jiffies;
req->r_start_latency = ktime_get();
diff --git a/net/core/dev.c b/net/core/dev.c
index 6bc2388141f6..7a774ebf64e2 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -4192,10 +4192,12 @@ int dev_direct_xmit(struct sk_buff *skb, u16 queue_id)
local_bh_disable();
+ dev_xmit_recursion_inc();
HARD_TX_LOCK(dev, txq, smp_processor_id());
if (!netif_xmit_frozen_or_drv_stopped(txq))
ret = netdev_start_xmit(skb, dev, txq, false);
HARD_TX_UNLOCK(dev, txq);
+ dev_xmit_recursion_dec();
local_bh_enable();
@@ -5599,7 +5601,7 @@ static void flush_backlog(struct work_struct *work)
skb_queue_walk_safe(&sd->input_pkt_queue, skb, tmp) {
if (skb->dev->reg_state == NETREG_UNREGISTERING) {
__skb_unlink(skb, &sd->input_pkt_queue);
- kfree_skb(skb);
+ dev_kfree_skb_irq(skb);
input_queue_head_incr(sd);
}
}
@@ -9547,6 +9549,13 @@ int register_netdevice(struct net_device *dev)
rcu_barrier();
dev->reg_state = NETREG_UNREGISTERED;
+ /* We should put the kobject that hold in
+ * netdev_unregister_kobject(), otherwise
+ * the net device cannot be freed when
+ * driver calls free_netdev(), because the
+ * kobject is being hold.
+ */
+ kobject_put(&dev->dev.kobj);
}
/*
* Prevent userspace races by waiting until the network
diff --git a/net/core/dev_addr_lists.c b/net/core/dev_addr_lists.c
index 6393ba930097..54cd568e7c2f 100644
--- a/net/core/dev_addr_lists.c
+++ b/net/core/dev_addr_lists.c
@@ -690,6 +690,15 @@ void dev_uc_unsync(struct net_device *to, struct net_device *from)
if (to->addr_len != from->addr_len)
return;
+ /* netif_addr_lock_bh() uses lockdep subclass 0, this is okay for two
+ * reasons:
+ * 1) This is always called without any addr_list_lock, so as the
+ * outermost one here, it must be 0.
+ * 2) This is called by some callers after unlinking the upper device,
+ * so the dev->lower_level becomes 1 again.
+ * Therefore, the subclass for 'from' is 0, for 'to' is either 1 or
+ * larger.
+ */
netif_addr_lock_bh(from);
netif_addr_lock_nested(to);
__hw_addr_unsync(&to->uc, &from->uc, to->addr_len);
@@ -911,6 +920,7 @@ void dev_mc_unsync(struct net_device *to, struct net_device *from)
if (to->addr_len != from->addr_len)
return;
+ /* See the above comments inside dev_uc_unsync(). */
netif_addr_lock_bh(from);
netif_addr_lock_nested(to);
__hw_addr_unsync(&to->mc, &from->mc, to->addr_len);
diff --git a/net/core/drop_monitor.c b/net/core/drop_monitor.c
index 2ee7bc4c9e03..b09bebeadf0b 100644
--- a/net/core/drop_monitor.c
+++ b/net/core/drop_monitor.c
@@ -1721,3 +1721,4 @@ module_exit(exit_net_drop_monitor);
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Neil Horman <nhorman@tuxdriver.com>");
MODULE_ALIAS_GENL_FAMILY("NET_DM");
+MODULE_DESCRIPTION("Monitoring code for network dropped packet alerts");
diff --git a/net/core/filter.c b/net/core/filter.c
index 73395384afe2..82e1b5b06167 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -5853,12 +5853,16 @@ BPF_CALL_1(bpf_skb_ecn_set_ce, struct sk_buff *, skb)
{
unsigned int iphdr_len;
- if (skb->protocol == cpu_to_be16(ETH_P_IP))
+ switch (skb_protocol(skb, true)) {
+ case cpu_to_be16(ETH_P_IP):
iphdr_len = sizeof(struct iphdr);
- else if (skb->protocol == cpu_to_be16(ETH_P_IPV6))
+ break;
+ case cpu_to_be16(ETH_P_IPV6):
iphdr_len = sizeof(struct ipv6hdr);
- else
+ break;
+ default:
return 0;
+ }
if (skb_headlen(skb) < iphdr_len)
return 0;
diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c
index d02df0b6d0d9..142a8824f0a8 100644
--- a/net/core/flow_dissector.c
+++ b/net/core/flow_dissector.c
@@ -70,10 +70,10 @@ void skb_flow_dissector_init(struct flow_dissector *flow_dissector,
EXPORT_SYMBOL(skb_flow_dissector_init);
#ifdef CONFIG_BPF_SYSCALL
-int flow_dissector_bpf_prog_attach(struct net *net, struct bpf_prog *prog)
+int flow_dissector_bpf_prog_attach_check(struct net *net,
+ struct bpf_prog *prog)
{
enum netns_bpf_attach_type type = NETNS_BPF_FLOW_DISSECTOR;
- struct bpf_prog *attached;
if (net == &init_net) {
/* BPF flow dissector in the root namespace overrides
@@ -86,26 +86,17 @@ int flow_dissector_bpf_prog_attach(struct net *net, struct bpf_prog *prog)
for_each_net(ns) {
if (ns == &init_net)
continue;
- if (rcu_access_pointer(ns->bpf.progs[type]))
+ if (rcu_access_pointer(ns->bpf.run_array[type]))
return -EEXIST;
}
} else {
/* Make sure root flow dissector is not attached
* when attaching to the non-root namespace.
*/
- if (rcu_access_pointer(init_net.bpf.progs[type]))
+ if (rcu_access_pointer(init_net.bpf.run_array[type]))
return -EEXIST;
}
- attached = rcu_dereference_protected(net->bpf.progs[type],
- lockdep_is_held(&netns_bpf_mutex));
- if (attached == prog)
- /* The same program cannot be attached twice */
- return -EINVAL;
-
- rcu_assign_pointer(net->bpf.progs[type], prog);
- if (attached)
- bpf_prog_put(attached);
return 0;
}
#endif /* CONFIG_BPF_SYSCALL */
@@ -903,7 +894,6 @@ bool __skb_flow_dissect(const struct net *net,
struct flow_dissector_key_addrs *key_addrs;
struct flow_dissector_key_tags *key_tags;
struct flow_dissector_key_vlan *key_vlan;
- struct bpf_prog *attached = NULL;
enum flow_dissect_ret fdret;
enum flow_dissector_key_id dissector_vlan = FLOW_DISSECTOR_KEY_MAX;
bool mpls_el = false;
@@ -960,14 +950,14 @@ bool __skb_flow_dissect(const struct net *net,
WARN_ON_ONCE(!net);
if (net) {
enum netns_bpf_attach_type type = NETNS_BPF_FLOW_DISSECTOR;
+ struct bpf_prog_array *run_array;
rcu_read_lock();
- attached = rcu_dereference(init_net.bpf.progs[type]);
-
- if (!attached)
- attached = rcu_dereference(net->bpf.progs[type]);
+ run_array = rcu_dereference(init_net.bpf.run_array[type]);
+ if (!run_array)
+ run_array = rcu_dereference(net->bpf.run_array[type]);
- if (attached) {
+ if (run_array) {
struct bpf_flow_keys flow_keys;
struct bpf_flow_dissector ctx = {
.flow_keys = &flow_keys,
@@ -975,6 +965,7 @@ bool __skb_flow_dissect(const struct net *net,
.data_end = data + hlen,
};
__be16 n_proto = proto;
+ struct bpf_prog *prog;
if (skb) {
ctx.skb = skb;
@@ -985,7 +976,8 @@ bool __skb_flow_dissect(const struct net *net,
n_proto = skb->protocol;
}
- ret = bpf_flow_dissect(attached, &ctx, n_proto, nhoff,
+ prog = READ_ONCE(run_array->items[0].prog);
+ ret = bpf_flow_dissect(prog, &ctx, n_proto, nhoff,
hlen, flags);
__skb_flow_bpf_to_target(&flow_keys, flow_dissector,
target_container);
diff --git a/net/core/flow_offload.c b/net/core/flow_offload.c
index 0cfc35e6be28..2076219b8ba5 100644
--- a/net/core/flow_offload.c
+++ b/net/core/flow_offload.c
@@ -4,6 +4,7 @@
#include <net/flow_offload.h>
#include <linux/rtnetlink.h>
#include <linux/mutex.h>
+#include <linux/rhashtable.h>
struct flow_rule *flow_rule_alloc(unsigned int num_actions)
{
@@ -372,14 +373,15 @@ int flow_indr_dev_register(flow_indr_block_bind_cb_t *cb, void *cb_priv)
}
EXPORT_SYMBOL(flow_indr_dev_register);
-static void __flow_block_indr_cleanup(flow_setup_cb_t *setup_cb, void *cb_priv,
+static void __flow_block_indr_cleanup(void (*release)(void *cb_priv),
+ void *cb_priv,
struct list_head *cleanup_list)
{
struct flow_block_cb *this, *next;
list_for_each_entry_safe(this, next, &flow_block_indr_list, indr.list) {
- if (this->cb == setup_cb &&
- this->cb_priv == cb_priv) {
+ if (this->release == release &&
+ this->indr.cb_priv == cb_priv) {
list_move(&this->indr.list, cleanup_list);
return;
}
@@ -397,7 +399,7 @@ static void flow_block_indr_notify(struct list_head *cleanup_list)
}
void flow_indr_dev_unregister(flow_indr_block_bind_cb_t *cb, void *cb_priv,
- flow_setup_cb_t *setup_cb)
+ void (*release)(void *cb_priv))
{
struct flow_indr_dev *this, *next, *indr_dev = NULL;
LIST_HEAD(cleanup_list);
@@ -418,7 +420,7 @@ void flow_indr_dev_unregister(flow_indr_block_bind_cb_t *cb, void *cb_priv,
return;
}
- __flow_block_indr_cleanup(setup_cb, cb_priv, &cleanup_list);
+ __flow_block_indr_cleanup(release, cb_priv, &cleanup_list);
mutex_unlock(&flow_indr_block_lock);
flow_block_indr_notify(&cleanup_list);
@@ -429,32 +431,37 @@ EXPORT_SYMBOL(flow_indr_dev_unregister);
static void flow_block_indr_init(struct flow_block_cb *flow_block,
struct flow_block_offload *bo,
struct net_device *dev, void *data,
+ void *cb_priv,
void (*cleanup)(struct flow_block_cb *block_cb))
{
flow_block->indr.binder_type = bo->binder_type;
flow_block->indr.data = data;
+ flow_block->indr.cb_priv = cb_priv;
flow_block->indr.dev = dev;
flow_block->indr.cleanup = cleanup;
}
-static void __flow_block_indr_binding(struct flow_block_offload *bo,
- struct net_device *dev, void *data,
- void (*cleanup)(struct flow_block_cb *block_cb))
+struct flow_block_cb *flow_indr_block_cb_alloc(flow_setup_cb_t *cb,
+ void *cb_ident, void *cb_priv,
+ void (*release)(void *cb_priv),
+ struct flow_block_offload *bo,
+ struct net_device *dev, void *data,
+ void *indr_cb_priv,
+ void (*cleanup)(struct flow_block_cb *block_cb))
{
struct flow_block_cb *block_cb;
- list_for_each_entry(block_cb, &bo->cb_list, list) {
- switch (bo->command) {
- case FLOW_BLOCK_BIND:
- flow_block_indr_init(block_cb, bo, dev, data, cleanup);
- list_add(&block_cb->indr.list, &flow_block_indr_list);
- break;
- case FLOW_BLOCK_UNBIND:
- list_del(&block_cb->indr.list);
- break;
- }
- }
+ block_cb = flow_block_cb_alloc(cb, cb_ident, cb_priv, release);
+ if (IS_ERR(block_cb))
+ goto out;
+
+ flow_block_indr_init(block_cb, bo, dev, data, indr_cb_priv, cleanup);
+ list_add(&block_cb->indr.list, &flow_block_indr_list);
+
+out:
+ return block_cb;
}
+EXPORT_SYMBOL(flow_indr_block_cb_alloc);
int flow_indr_dev_setup_offload(struct net_device *dev,
enum tc_setup_type type, void *data,
@@ -465,9 +472,8 @@ int flow_indr_dev_setup_offload(struct net_device *dev,
mutex_lock(&flow_indr_block_lock);
list_for_each_entry(this, &flow_block_indr_dev_list, list)
- this->cb(dev, this->cb_priv, type, bo);
+ this->cb(dev, this->cb_priv, type, bo, data, cleanup);
- __flow_block_indr_binding(bo, dev, data, cleanup);
mutex_unlock(&flow_indr_block_lock);
return list_empty(&bo->cb_list) ? -EOPNOTSUPP : 0;
diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
index e353b822bb15..7bd6440c63bf 100644
--- a/net/core/net-sysfs.c
+++ b/net/core/net-sysfs.c
@@ -1108,7 +1108,7 @@ static ssize_t tx_timeout_show(struct netdev_queue *queue, char *buf)
trans_timeout = queue->trans_timeout;
spin_unlock_irq(&queue->_xmit_lock);
- return sprintf(buf, "%lu", trans_timeout);
+ return sprintf(buf, fmt_ulong, trans_timeout);
}
static unsigned int get_netdev_queue_index(struct netdev_queue *queue)
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index 9aedc15736ad..85a4b0101f76 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -3343,7 +3343,8 @@ replay:
*/
if (err < 0) {
/* If device is not registered at all, free it now */
- if (dev->reg_state == NETREG_UNINITIALIZED)
+ if (dev->reg_state == NETREG_UNINITIALIZED ||
+ dev->reg_state == NETREG_UNREGISTERED)
free_netdev(dev);
goto out;
}
diff --git a/net/core/skmsg.c b/net/core/skmsg.c
index 351afbf6bfba..6a32a1fd34f8 100644
--- a/net/core/skmsg.c
+++ b/net/core/skmsg.c
@@ -683,7 +683,7 @@ static struct sk_psock *sk_psock_from_strp(struct strparser *strp)
return container_of(parser, struct sk_psock, parser);
}
-static void sk_psock_skb_redirect(struct sk_psock *psock, struct sk_buff *skb)
+static void sk_psock_skb_redirect(struct sk_buff *skb)
{
struct sk_psock *psock_other;
struct sock *sk_other;
@@ -715,12 +715,11 @@ static void sk_psock_skb_redirect(struct sk_psock *psock, struct sk_buff *skb)
}
}
-static void sk_psock_tls_verdict_apply(struct sk_psock *psock,
- struct sk_buff *skb, int verdict)
+static void sk_psock_tls_verdict_apply(struct sk_buff *skb, int verdict)
{
switch (verdict) {
case __SK_REDIRECT:
- sk_psock_skb_redirect(psock, skb);
+ sk_psock_skb_redirect(skb);
break;
case __SK_PASS:
case __SK_DROP:
@@ -741,8 +740,8 @@ int sk_psock_tls_strp_read(struct sk_psock *psock, struct sk_buff *skb)
ret = sk_psock_bpf_run(psock, prog, skb);
ret = sk_psock_map_verd(ret, tcp_skb_bpf_redirect_fetch(skb));
}
+ sk_psock_tls_verdict_apply(skb, ret);
rcu_read_unlock();
- sk_psock_tls_verdict_apply(psock, skb, ret);
return ret;
}
EXPORT_SYMBOL_GPL(sk_psock_tls_strp_read);
@@ -770,7 +769,7 @@ static void sk_psock_verdict_apply(struct sk_psock *psock,
}
goto out_free;
case __SK_REDIRECT:
- sk_psock_skb_redirect(psock, skb);
+ sk_psock_skb_redirect(skb);
break;
case __SK_DROP:
/* fall-through */
@@ -782,11 +781,18 @@ out_free:
static void sk_psock_strp_read(struct strparser *strp, struct sk_buff *skb)
{
- struct sk_psock *psock = sk_psock_from_strp(strp);
+ struct sk_psock *psock;
struct bpf_prog *prog;
int ret = __SK_DROP;
+ struct sock *sk;
rcu_read_lock();
+ sk = strp->sk;
+ psock = sk_psock(sk);
+ if (unlikely(!psock)) {
+ kfree_skb(skb);
+ goto out;
+ }
prog = READ_ONCE(psock->progs.skb_verdict);
if (likely(prog)) {
skb_orphan(skb);
@@ -794,8 +800,9 @@ static void sk_psock_strp_read(struct strparser *strp, struct sk_buff *skb)
ret = sk_psock_bpf_run(psock, prog, skb);
ret = sk_psock_map_verd(ret, tcp_skb_bpf_redirect_fetch(skb));
}
- rcu_read_unlock();
sk_psock_verdict_apply(psock, skb, ret);
+out:
+ rcu_read_unlock();
}
static int sk_psock_strp_read_done(struct strparser *strp, int err)
diff --git a/net/core/sock.c b/net/core/sock.c
index 6c4acf1f0220..2e5b7870e5d3 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -718,7 +718,7 @@ bool sk_mc_loop(struct sock *sk)
return inet6_sk(sk)->mc_loop;
#endif
}
- WARN_ON(1);
+ WARN_ON_ONCE(1);
return true;
}
EXPORT_SYMBOL(sk_mc_loop);
@@ -1767,6 +1767,7 @@ struct sock *sk_alloc(struct net *net, int family, gfp_t priority,
cgroup_sk_alloc(&sk->sk_cgrp_data);
sock_update_classid(&sk->sk_cgrp_data);
sock_update_netprioidx(&sk->sk_cgrp_data);
+ sk_tx_queue_clear(sk);
}
return sk;
@@ -1925,7 +1926,7 @@ struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority)
/* sk->sk_memcg will be populated at accept() time */
newsk->sk_memcg = NULL;
- cgroup_sk_alloc(&newsk->sk_cgrp_data);
+ cgroup_sk_clone(&newsk->sk_cgrp_data);
rcu_read_lock();
filter = rcu_dereference(sk->sk_filter);
@@ -1990,6 +1991,7 @@ struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority)
*/
sk_refcnt_debug_inc(newsk);
sk_set_socket(newsk, NULL);
+ sk_tx_queue_clear(newsk);
RCU_INIT_POINTER(newsk->sk_wq, NULL);
if (newsk->sk_prot->sockets_allocated)
diff --git a/net/core/sock_map.c b/net/core/sock_map.c
index 4059f94e9bb5..0971f17e8e54 100644
--- a/net/core/sock_map.c
+++ b/net/core/sock_map.c
@@ -70,11 +70,49 @@ int sock_map_get_from_fd(const union bpf_attr *attr, struct bpf_prog *prog)
struct fd f;
int ret;
+ if (attr->attach_flags || attr->replace_bpf_fd)
+ return -EINVAL;
+
f = fdget(ufd);
map = __bpf_map_get(f);
if (IS_ERR(map))
return PTR_ERR(map);
- ret = sock_map_prog_update(map, prog, attr->attach_type);
+ ret = sock_map_prog_update(map, prog, NULL, attr->attach_type);
+ fdput(f);
+ return ret;
+}
+
+int sock_map_prog_detach(const union bpf_attr *attr, enum bpf_prog_type ptype)
+{
+ u32 ufd = attr->target_fd;
+ struct bpf_prog *prog;
+ struct bpf_map *map;
+ struct fd f;
+ int ret;
+
+ if (attr->attach_flags || attr->replace_bpf_fd)
+ return -EINVAL;
+
+ f = fdget(ufd);
+ map = __bpf_map_get(f);
+ if (IS_ERR(map))
+ return PTR_ERR(map);
+
+ prog = bpf_prog_get(attr->attach_bpf_fd);
+ if (IS_ERR(prog)) {
+ ret = PTR_ERR(prog);
+ goto put_map;
+ }
+
+ if (prog->type != ptype) {
+ ret = -EINVAL;
+ goto put_prog;
+ }
+
+ ret = sock_map_prog_update(map, NULL, prog, attr->attach_type);
+put_prog:
+ bpf_prog_put(prog);
+put_map:
fdput(f);
return ret;
}
@@ -1203,27 +1241,32 @@ static struct sk_psock_progs *sock_map_progs(struct bpf_map *map)
}
int sock_map_prog_update(struct bpf_map *map, struct bpf_prog *prog,
- u32 which)
+ struct bpf_prog *old, u32 which)
{
struct sk_psock_progs *progs = sock_map_progs(map);
+ struct bpf_prog **pprog;
if (!progs)
return -EOPNOTSUPP;
switch (which) {
case BPF_SK_MSG_VERDICT:
- psock_set_prog(&progs->msg_parser, prog);
+ pprog = &progs->msg_parser;
break;
case BPF_SK_SKB_STREAM_PARSER:
- psock_set_prog(&progs->skb_parser, prog);
+ pprog = &progs->skb_parser;
break;
case BPF_SK_SKB_STREAM_VERDICT:
- psock_set_prog(&progs->skb_verdict, prog);
+ pprog = &progs->skb_verdict;
break;
default:
return -EOPNOTSUPP;
}
+ if (old)
+ return psock_replace_prog(pprog, prog, old);
+
+ psock_set_prog(pprog, prog);
return 0;
}
diff --git a/net/core/sock_reuseport.c b/net/core/sock_reuseport.c
index adcb3aea576d..bbdd3c7b6cb5 100644
--- a/net/core/sock_reuseport.c
+++ b/net/core/sock_reuseport.c
@@ -101,6 +101,7 @@ static struct sock_reuseport *reuseport_grow(struct sock_reuseport *reuse)
more_reuse->prog = reuse->prog;
more_reuse->reuseport_id = reuse->reuseport_id;
more_reuse->bind_inany = reuse->bind_inany;
+ more_reuse->has_conns = reuse->has_conns;
memcpy(more_reuse->socks, reuse->socks,
reuse->num_socks * sizeof(struct sock *));
diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c
index f93f8ace6c56..6ada114bbcca 100644
--- a/net/core/sysctl_net_core.c
+++ b/net/core/sysctl_net_core.c
@@ -274,7 +274,7 @@ static int proc_dointvec_minmax_bpf_enable(struct ctl_table *table, int write,
ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos);
if (write && !ret) {
if (jit_enable < 2 ||
- (jit_enable == 2 && bpf_dump_raw_ok())) {
+ (jit_enable == 2 && bpf_dump_raw_ok(current_cred()))) {
*(int *)table->data = jit_enable;
if (jit_enable == 2)
pr_warn("bpf_jit_enable = 2 was set! NEVER use this in production, only for JIT debugging!\n");
diff --git a/net/core/xdp.c b/net/core/xdp.c
index 90f44f382115..3c45f99e26d5 100644
--- a/net/core/xdp.c
+++ b/net/core/xdp.c
@@ -462,6 +462,7 @@ struct xdp_frame *xdp_convert_zc_to_xdp_frame(struct xdp_buff *xdp)
xdpf->len = totsize - metasize;
xdpf->headroom = 0;
xdpf->metasize = metasize;
+ xdpf->frame_sz = PAGE_SIZE;
xdpf->mem.type = MEM_TYPE_PAGE_ORDER0;
xsk_buff_free(xdp);
diff --git a/net/dsa/tag_edsa.c b/net/dsa/tag_edsa.c
index e8eaa804ccb9..d6200ff98200 100644
--- a/net/dsa/tag_edsa.c
+++ b/net/dsa/tag_edsa.c
@@ -13,6 +13,16 @@
#define DSA_HLEN 4
#define EDSA_HLEN 8
+#define FRAME_TYPE_TO_CPU 0x00
+#define FRAME_TYPE_FORWARD 0x03
+
+#define TO_CPU_CODE_MGMT_TRAP 0x00
+#define TO_CPU_CODE_FRAME2REG 0x01
+#define TO_CPU_CODE_IGMP_MLD_TRAP 0x02
+#define TO_CPU_CODE_POLICY_TRAP 0x03
+#define TO_CPU_CODE_ARP_MIRROR 0x04
+#define TO_CPU_CODE_POLICY_MIRROR 0x05
+
static struct sk_buff *edsa_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct dsa_port *dp = dsa_slave_to_port(dev);
@@ -77,6 +87,8 @@ static struct sk_buff *edsa_rcv(struct sk_buff *skb, struct net_device *dev,
struct packet_type *pt)
{
u8 *edsa_header;
+ int frame_type;
+ int code;
int source_device;
int source_port;
@@ -91,8 +103,29 @@ static struct sk_buff *edsa_rcv(struct sk_buff *skb, struct net_device *dev,
/*
* Check that frame type is either TO_CPU or FORWARD.
*/
- if ((edsa_header[0] & 0xc0) != 0x00 && (edsa_header[0] & 0xc0) != 0xc0)
+ frame_type = edsa_header[0] >> 6;
+
+ switch (frame_type) {
+ case FRAME_TYPE_TO_CPU:
+ code = (edsa_header[1] & 0x6) | ((edsa_header[2] >> 4) & 1);
+
+ /*
+ * Mark the frame to never egress on any port of the same switch
+ * unless it's a trapped IGMP/MLD packet, in which case the
+ * bridge might want to forward it.
+ */
+ if (code != TO_CPU_CODE_IGMP_MLD_TRAP)
+ skb->offload_fwd_mark = 1;
+
+ break;
+
+ case FRAME_TYPE_FORWARD:
+ skb->offload_fwd_mark = 1;
+ break;
+
+ default:
return NULL;
+ }
/*
* Determine source device and port.
@@ -156,8 +189,6 @@ static struct sk_buff *edsa_rcv(struct sk_buff *skb, struct net_device *dev,
2 * ETH_ALEN);
}
- skb->offload_fwd_mark = 1;
-
return skb;
}
diff --git a/net/ethtool/cabletest.c b/net/ethtool/cabletest.c
index 7b7a0456c15c..7194956aa09e 100644
--- a/net/ethtool/cabletest.c
+++ b/net/ethtool/cabletest.c
@@ -234,6 +234,14 @@ static int ethnl_act_cable_test_tdr_cfg(const struct nlattr *nest,
struct nlattr *tb[ETHTOOL_A_CABLE_TEST_TDR_CFG_MAX + 1];
int ret;
+ cfg->first = 100;
+ cfg->step = 100;
+ cfg->last = MAX_CABLE_LENGTH_CM;
+ cfg->pair = PHY_PAIR_ALL;
+
+ if (!nest)
+ return 0;
+
ret = nla_parse_nested(tb, ETHTOOL_A_CABLE_TEST_TDR_CFG_MAX, nest,
cable_test_tdr_act_cfg_policy, info->extack);
if (ret < 0)
@@ -242,17 +250,12 @@ static int ethnl_act_cable_test_tdr_cfg(const struct nlattr *nest,
if (tb[ETHTOOL_A_CABLE_TEST_TDR_CFG_FIRST])
cfg->first = nla_get_u32(
tb[ETHTOOL_A_CABLE_TEST_TDR_CFG_FIRST]);
- else
- cfg->first = 100;
+
if (tb[ETHTOOL_A_CABLE_TEST_TDR_CFG_LAST])
cfg->last = nla_get_u32(tb[ETHTOOL_A_CABLE_TEST_TDR_CFG_LAST]);
- else
- cfg->last = MAX_CABLE_LENGTH_CM;
if (tb[ETHTOOL_A_CABLE_TEST_TDR_CFG_STEP])
cfg->step = nla_get_u32(tb[ETHTOOL_A_CABLE_TEST_TDR_CFG_STEP]);
- else
- cfg->step = 100;
if (tb[ETHTOOL_A_CABLE_TEST_TDR_CFG_PAIR]) {
cfg->pair = nla_get_u8(tb[ETHTOOL_A_CABLE_TEST_TDR_CFG_PAIR]);
@@ -263,8 +266,6 @@ static int ethnl_act_cable_test_tdr_cfg(const struct nlattr *nest,
"invalid pair parameter");
return -EINVAL;
}
- } else {
- cfg->pair = PHY_PAIR_ALL;
}
if (cfg->first > MAX_CABLE_LENGTH_CM) {
diff --git a/net/ethtool/common.c b/net/ethtool/common.c
index 423e640e3876..aaecfc916a4d 100644
--- a/net/ethtool/common.c
+++ b/net/ethtool/common.c
@@ -40,9 +40,11 @@ const char netdev_features_strings[NETDEV_FEATURE_COUNT][ETH_GSTRING_LEN] = {
[NETIF_F_GSO_UDP_TUNNEL_BIT] = "tx-udp_tnl-segmentation",
[NETIF_F_GSO_UDP_TUNNEL_CSUM_BIT] = "tx-udp_tnl-csum-segmentation",
[NETIF_F_GSO_PARTIAL_BIT] = "tx-gso-partial",
+ [NETIF_F_GSO_TUNNEL_REMCSUM_BIT] = "tx-tunnel-remcsum-segmentation",
[NETIF_F_GSO_SCTP_BIT] = "tx-sctp-segmentation",
[NETIF_F_GSO_ESP_BIT] = "tx-esp-segmentation",
[NETIF_F_GSO_UDP_L4_BIT] = "tx-udp-segmentation",
+ [NETIF_F_GSO_FRAGLIST_BIT] = "tx-gso-list",
[NETIF_F_FCOE_CRC_BIT] = "tx-checksum-fcoe-crc",
[NETIF_F_SCTP_CRC_BIT] = "tx-checksum-sctp",
diff --git a/net/ethtool/ioctl.c b/net/ethtool/ioctl.c
index b5df90c981c2..21d5fc0f6bb3 100644
--- a/net/ethtool/ioctl.c
+++ b/net/ethtool/ioctl.c
@@ -2978,7 +2978,7 @@ ethtool_rx_flow_rule_create(const struct ethtool_rx_flow_spec_input *input)
sizeof(match->mask.ipv6.dst));
}
if (memcmp(v6_m_spec->ip6src, &zero_addr, sizeof(zero_addr)) ||
- memcmp(v6_m_spec->ip6src, &zero_addr, sizeof(zero_addr))) {
+ memcmp(v6_m_spec->ip6dst, &zero_addr, sizeof(zero_addr))) {
match->dissector.used_keys |=
BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS);
match->dissector.offset[FLOW_DISSECTOR_KEY_IPV6_ADDRS] =
diff --git a/net/ethtool/linkstate.c b/net/ethtool/linkstate.c
index 7f47ba89054e..afe5ac8a0f00 100644
--- a/net/ethtool/linkstate.c
+++ b/net/ethtool/linkstate.c
@@ -78,19 +78,18 @@ static int linkstate_prepare_data(const struct ethnl_req_info *req_base,
ret = linkstate_get_sqi(dev);
if (ret < 0 && ret != -EOPNOTSUPP)
- return ret;
-
+ goto out;
data->sqi = ret;
ret = linkstate_get_sqi_max(dev);
if (ret < 0 && ret != -EOPNOTSUPP)
- return ret;
-
+ goto out;
data->sqi_max = ret;
+ ret = 0;
+out:
ethnl_ops_complete(dev);
-
- return 0;
+ return ret;
}
static int linkstate_reply_size(const struct ethnl_req_info *req_base,
diff --git a/net/ethtool/netlink.c b/net/ethtool/netlink.c
index 88fd07f47040..dd8a1c1dc07d 100644
--- a/net/ethtool/netlink.c
+++ b/net/ethtool/netlink.c
@@ -376,10 +376,17 @@ err_dev:
}
static int ethnl_default_dump_one(struct sk_buff *skb, struct net_device *dev,
- const struct ethnl_dump_ctx *ctx)
+ const struct ethnl_dump_ctx *ctx,
+ struct netlink_callback *cb)
{
+ void *ehdr;
int ret;
+ ehdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
+ &ethtool_genl_family, 0, ctx->ops->reply_cmd);
+ if (!ehdr)
+ return -EMSGSIZE;
+
ethnl_init_reply_data(ctx->reply_data, ctx->ops, dev);
rtnl_lock();
ret = ctx->ops->prepare_data(ctx->req_info, ctx->reply_data, NULL);
@@ -395,6 +402,10 @@ out:
if (ctx->ops->cleanup_data)
ctx->ops->cleanup_data(ctx->reply_data);
ctx->reply_data->dev = NULL;
+ if (ret < 0)
+ genlmsg_cancel(skb, ehdr);
+ else
+ genlmsg_end(skb, ehdr);
return ret;
}
@@ -411,7 +422,6 @@ static int ethnl_default_dumpit(struct sk_buff *skb,
int s_idx = ctx->pos_idx;
int h, idx = 0;
int ret = 0;
- void *ehdr;
rtnl_lock();
for (h = ctx->pos_hash; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
@@ -431,26 +441,15 @@ restart_chain:
dev_hold(dev);
rtnl_unlock();
- ehdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid,
- cb->nlh->nlmsg_seq,
- &ethtool_genl_family, 0,
- ctx->ops->reply_cmd);
- if (!ehdr) {
- dev_put(dev);
- ret = -EMSGSIZE;
- goto out;
- }
- ret = ethnl_default_dump_one(skb, dev, ctx);
+ ret = ethnl_default_dump_one(skb, dev, ctx, cb);
dev_put(dev);
if (ret < 0) {
- genlmsg_cancel(skb, ehdr);
if (ret == -EOPNOTSUPP)
goto lock_and_cont;
if (likely(skb->len))
ret = skb->len;
goto out;
}
- genlmsg_end(skb, ehdr);
lock_and_cont:
rtnl_lock();
if (net->dev_base_seq != seq) {
diff --git a/net/hsr/hsr_device.c b/net/hsr/hsr_device.c
index cd99f548e440..a6f4e9f65b14 100644
--- a/net/hsr/hsr_device.c
+++ b/net/hsr/hsr_device.c
@@ -339,7 +339,7 @@ static void hsr_announce(struct timer_list *t)
rcu_read_unlock();
}
-static void hsr_del_ports(struct hsr_priv *hsr)
+void hsr_del_ports(struct hsr_priv *hsr)
{
struct hsr_port *port;
@@ -356,31 +356,12 @@ static void hsr_del_ports(struct hsr_priv *hsr)
hsr_del_port(port);
}
-/* This has to be called after all the readers are gone.
- * Otherwise we would have to check the return value of
- * hsr_port_get_hsr().
- */
-static void hsr_dev_destroy(struct net_device *hsr_dev)
-{
- struct hsr_priv *hsr = netdev_priv(hsr_dev);
-
- hsr_debugfs_term(hsr);
- hsr_del_ports(hsr);
-
- del_timer_sync(&hsr->prune_timer);
- del_timer_sync(&hsr->announce_timer);
-
- hsr_del_self_node(hsr);
- hsr_del_nodes(&hsr->node_db);
-}
-
static const struct net_device_ops hsr_device_ops = {
.ndo_change_mtu = hsr_dev_change_mtu,
.ndo_open = hsr_dev_open,
.ndo_stop = hsr_dev_close,
.ndo_start_xmit = hsr_dev_xmit,
.ndo_fix_features = hsr_fix_features,
- .ndo_uninit = hsr_dev_destroy,
};
static struct device_type hsr_type = {
@@ -434,6 +415,7 @@ int hsr_dev_finalize(struct net_device *hsr_dev, struct net_device *slave[2],
unsigned char multicast_spec, u8 protocol_version,
struct netlink_ext_ack *extack)
{
+ bool unregister = false;
struct hsr_priv *hsr;
int res;
@@ -485,25 +467,27 @@ int hsr_dev_finalize(struct net_device *hsr_dev, struct net_device *slave[2],
if (res)
goto err_unregister;
+ unregister = true;
+
res = hsr_add_port(hsr, slave[0], HSR_PT_SLAVE_A, extack);
if (res)
- goto err_add_slaves;
+ goto err_unregister;
res = hsr_add_port(hsr, slave[1], HSR_PT_SLAVE_B, extack);
if (res)
- goto err_add_slaves;
+ goto err_unregister;
hsr_debugfs_init(hsr, hsr_dev);
mod_timer(&hsr->prune_timer, jiffies + msecs_to_jiffies(PRUNE_PERIOD));
return 0;
-err_add_slaves:
- unregister_netdevice(hsr_dev);
err_unregister:
hsr_del_ports(hsr);
err_add_master:
hsr_del_self_node(hsr);
+ if (unregister)
+ unregister_netdevice(hsr_dev);
return res;
}
diff --git a/net/hsr/hsr_device.h b/net/hsr/hsr_device.h
index a099d7de7e79..b8f9262ed101 100644
--- a/net/hsr/hsr_device.h
+++ b/net/hsr/hsr_device.h
@@ -11,6 +11,7 @@
#include <linux/netdevice.h>
#include "hsr_main.h"
+void hsr_del_ports(struct hsr_priv *hsr);
void hsr_dev_setup(struct net_device *dev);
int hsr_dev_finalize(struct net_device *hsr_dev, struct net_device *slave[2],
unsigned char multicast_spec, u8 protocol_version,
@@ -18,5 +19,4 @@ int hsr_dev_finalize(struct net_device *hsr_dev, struct net_device *slave[2],
void hsr_check_carrier_and_operstate(struct hsr_priv *hsr);
bool is_hsr_master(struct net_device *dev);
int hsr_get_max_mtu(struct hsr_priv *hsr);
-
#endif /* __HSR_DEVICE_H */
diff --git a/net/hsr/hsr_forward.c b/net/hsr/hsr_forward.c
index ed13760463de..1ea17752fffc 100644
--- a/net/hsr/hsr_forward.c
+++ b/net/hsr/hsr_forward.c
@@ -120,13 +120,18 @@ static struct sk_buff *frame_get_stripped_skb(struct hsr_frame_info *frame,
return skb_clone(frame->skb_std, GFP_ATOMIC);
}
-static void hsr_fill_tag(struct sk_buff *skb, struct hsr_frame_info *frame,
- struct hsr_port *port, u8 proto_version)
+static struct sk_buff *hsr_fill_tag(struct sk_buff *skb,
+ struct hsr_frame_info *frame,
+ struct hsr_port *port, u8 proto_version)
{
struct hsr_ethhdr *hsr_ethhdr;
int lane_id;
int lsdu_size;
+ /* pad to minimum packet size which is 60 + 6 (HSR tag) */
+ if (skb_put_padto(skb, ETH_ZLEN + HSR_HLEN))
+ return NULL;
+
if (port->type == HSR_PT_SLAVE_A)
lane_id = 0;
else
@@ -144,6 +149,8 @@ static void hsr_fill_tag(struct sk_buff *skb, struct hsr_frame_info *frame,
hsr_ethhdr->hsr_tag.encap_proto = hsr_ethhdr->ethhdr.h_proto;
hsr_ethhdr->ethhdr.h_proto = htons(proto_version ?
ETH_P_HSR : ETH_P_PRP);
+
+ return skb;
}
static struct sk_buff *create_tagged_skb(struct sk_buff *skb_o,
@@ -172,9 +179,10 @@ static struct sk_buff *create_tagged_skb(struct sk_buff *skb_o,
memmove(dst, src, movelen);
skb_reset_mac_header(skb);
- hsr_fill_tag(skb, frame, port, port->hsr->prot_version);
-
- return skb;
+ /* skb_put_padto free skb on error and hsr_fill_tag returns NULL in
+ * that case
+ */
+ return hsr_fill_tag(skb, frame, port, port->hsr->prot_version);
}
/* If the original frame was an HSR tagged frame, just clone it to be sent
diff --git a/net/hsr/hsr_framereg.c b/net/hsr/hsr_framereg.c
index 03b891904314..530de24b1fb5 100644
--- a/net/hsr/hsr_framereg.c
+++ b/net/hsr/hsr_framereg.c
@@ -325,7 +325,8 @@ void hsr_addr_subst_dest(struct hsr_node *node_src, struct sk_buff *skb,
if (port->type != node_dst->addr_B_port)
return;
- ether_addr_copy(eth_hdr(skb)->h_dest, node_dst->macaddress_B);
+ if (is_valid_ether_addr(node_dst->macaddress_B))
+ ether_addr_copy(eth_hdr(skb)->h_dest, node_dst->macaddress_B);
}
void hsr_register_frame_in(struct hsr_node *node, struct hsr_port *port,
diff --git a/net/hsr/hsr_main.c b/net/hsr/hsr_main.c
index e2564de67603..144da15f0a81 100644
--- a/net/hsr/hsr_main.c
+++ b/net/hsr/hsr_main.c
@@ -6,6 +6,7 @@
*/
#include <linux/netdevice.h>
+#include <net/rtnetlink.h>
#include <linux/rculist.h>
#include <linux/timer.h>
#include <linux/etherdevice.h>
@@ -100,8 +101,10 @@ static int hsr_netdev_notify(struct notifier_block *nb, unsigned long event,
master = hsr_port_get_hsr(port->hsr, HSR_PT_MASTER);
hsr_del_port(port);
if (hsr_slave_empty(master->hsr)) {
- unregister_netdevice_queue(master->dev,
- &list_kill);
+ const struct rtnl_link_ops *ops;
+
+ ops = master->dev->rtnl_link_ops;
+ ops->dellink(master->dev, &list_kill);
unregister_netdevice_many(&list_kill);
}
}
@@ -144,9 +147,9 @@ static int __init hsr_init(void)
static void __exit hsr_exit(void)
{
- unregister_netdevice_notifier(&hsr_nb);
hsr_netlink_exit();
hsr_debugfs_remove_root();
+ unregister_netdevice_notifier(&hsr_nb);
}
module_init(hsr_init);
diff --git a/net/hsr/hsr_netlink.c b/net/hsr/hsr_netlink.c
index 1decb25f6764..6e14b7d22639 100644
--- a/net/hsr/hsr_netlink.c
+++ b/net/hsr/hsr_netlink.c
@@ -83,6 +83,22 @@ static int hsr_newlink(struct net *src_net, struct net_device *dev,
return hsr_dev_finalize(dev, link, multicast_spec, hsr_version, extack);
}
+static void hsr_dellink(struct net_device *dev, struct list_head *head)
+{
+ struct hsr_priv *hsr = netdev_priv(dev);
+
+ del_timer_sync(&hsr->prune_timer);
+ del_timer_sync(&hsr->announce_timer);
+
+ hsr_debugfs_term(hsr);
+ hsr_del_ports(hsr);
+
+ hsr_del_self_node(hsr);
+ hsr_del_nodes(&hsr->node_db);
+
+ unregister_netdevice_queue(dev, head);
+}
+
static int hsr_fill_info(struct sk_buff *skb, const struct net_device *dev)
{
struct hsr_priv *hsr = netdev_priv(dev);
@@ -118,6 +134,7 @@ static struct rtnl_link_ops hsr_link_ops __read_mostly = {
.priv_size = sizeof(struct hsr_priv),
.setup = hsr_dev_setup,
.newlink = hsr_newlink,
+ .dellink = hsr_dellink,
.fill_info = hsr_fill_info,
};
diff --git a/net/ipv4/Kconfig b/net/ipv4/Kconfig
index 6ecbb0ced177..e64e59b536d3 100644
--- a/net/ipv4/Kconfig
+++ b/net/ipv4/Kconfig
@@ -340,29 +340,31 @@ config NET_FOU_IP_TUNNELS
config INET_AH
tristate "IP: AH transformation"
- select XFRM_ALGO
- select CRYPTO
- select CRYPTO_HMAC
- select CRYPTO_MD5
- select CRYPTO_SHA1
+ select XFRM_AH
help
- Support for IPsec AH.
+ Support for IPsec AH (Authentication Header).
+
+ AH can be used with various authentication algorithms. Besides
+ enabling AH support itself, this option enables the generic
+ implementations of the algorithms that RFC 8221 lists as MUST be
+ implemented. If you need any other algorithms, you'll need to enable
+ them in the crypto API. You should also enable accelerated
+ implementations of any needed algorithms when available.
If unsure, say Y.
config INET_ESP
tristate "IP: ESP transformation"
- select XFRM_ALGO
- select CRYPTO
- select CRYPTO_AUTHENC
- select CRYPTO_HMAC
- select CRYPTO_MD5
- select CRYPTO_CBC
- select CRYPTO_SHA1
- select CRYPTO_DES
- select CRYPTO_ECHAINIV
+ select XFRM_ESP
help
- Support for IPsec ESP.
+ Support for IPsec ESP (Encapsulating Security Payload).
+
+ ESP can be used with various encryption and authentication algorithms.
+ Besides enabling ESP support itself, this option enables the generic
+ implementations of the algorithms that RFC 8221 lists as MUST be
+ implemented. If you need any other algorithms, you'll need to enable
+ them in the crypto API. You should also enable accelerated
+ implementations of any needed algorithms when available.
If unsure, say Y.
diff --git a/net/ipv4/esp4_offload.c b/net/ipv4/esp4_offload.c
index d14133eac476..5bda5aeda579 100644
--- a/net/ipv4/esp4_offload.c
+++ b/net/ipv4/esp4_offload.c
@@ -361,3 +361,4 @@ module_exit(esp4_offload_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Steffen Klassert <steffen.klassert@secunet.com>");
MODULE_ALIAS_XFRM_OFFLOAD_TYPE(AF_INET, XFRM_PROTO_ESP);
+MODULE_DESCRIPTION("IPV4 GSO/GRO offload support");
diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
index e53871e4a097..1f75dc686b6b 100644
--- a/net/ipv4/fib_semantics.c
+++ b/net/ipv4/fib_semantics.c
@@ -1109,7 +1109,7 @@ static int fib_check_nh_v4_gw(struct net *net, struct fib_nh *nh, u32 table,
if (fl4.flowi4_scope < RT_SCOPE_LINK)
fl4.flowi4_scope = RT_SCOPE_LINK;
- if (table)
+ if (table && table != RT_TABLE_MAIN)
tbl = fib_get_table(net, table);
if (tbl)
diff --git a/net/ipv4/fou.c b/net/ipv4/fou.c
index dcc79ff54b41..abd083415f89 100644
--- a/net/ipv4/fou.c
+++ b/net/ipv4/fou.c
@@ -1304,3 +1304,4 @@ module_init(fou_init);
module_exit(fou_fini);
MODULE_AUTHOR("Tom Herbert <therbert@google.com>");
MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Foo over UDP");
diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c
index 956a806649f7..e30515f89802 100644
--- a/net/ipv4/icmp.c
+++ b/net/ipv4/icmp.c
@@ -427,7 +427,7 @@ static void icmp_reply(struct icmp_bxm *icmp_param, struct sk_buff *skb)
ipcm_init(&ipc);
inet->tos = ip_hdr(skb)->tos;
- sk->sk_mark = mark;
+ ipc.sockc.mark = mark;
daddr = ipc.addr = ip_hdr(skb)->saddr;
saddr = fib_compute_spec_dst(skb);
@@ -710,10 +710,10 @@ void __icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info,
icmp_param.skb = skb_in;
icmp_param.offset = skb_network_offset(skb_in);
inet_sk(sk)->tos = tos;
- sk->sk_mark = mark;
ipcm_init(&ipc);
ipc.addr = iph->saddr;
ipc.opt = &icmp_param.replyopts.opt;
+ ipc.sockc.mark = mark;
rt = icmp_route_lookup(net, &fl4, skb_in, iph, saddr, tos, mark,
type, code, &icmp_param);
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index 090d3097ee15..17206677d503 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -1702,7 +1702,7 @@ void ip_send_unicast_reply(struct sock *sk, struct sk_buff *skb,
sk->sk_protocol = ip_hdr(skb)->protocol;
sk->sk_bound_dev_if = arg->bound_dev_if;
sk->sk_sndbuf = sysctl_wmem_default;
- sk->sk_mark = fl4.flowi4_mark;
+ ipc.sockc.mark = fl4.flowi4_mark;
err = ip_append_data(sk, &fl4, ip_reply_glue_bits, arg->iov->iov_base,
len, 0, &ipc, &rt, MSG_DONTWAIT);
if (unlikely(err)) {
diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c
index f4f1d11eab50..0c1f36404471 100644
--- a/net/ipv4/ip_tunnel.c
+++ b/net/ipv4/ip_tunnel.c
@@ -85,9 +85,10 @@ struct ip_tunnel *ip_tunnel_lookup(struct ip_tunnel_net *itn,
__be32 remote, __be32 local,
__be32 key)
{
- unsigned int hash;
struct ip_tunnel *t, *cand = NULL;
struct hlist_head *head;
+ struct net_device *ndev;
+ unsigned int hash;
hash = ip_tunnel_hash(key, remote);
head = &itn->tunnels[hash];
@@ -162,8 +163,9 @@ struct ip_tunnel *ip_tunnel_lookup(struct ip_tunnel_net *itn,
if (t && t->dev->flags & IFF_UP)
return t;
- if (itn->fb_tunnel_dev && itn->fb_tunnel_dev->flags & IFF_UP)
- return netdev_priv(itn->fb_tunnel_dev);
+ ndev = READ_ONCE(itn->fb_tunnel_dev);
+ if (ndev && ndev->flags & IFF_UP)
+ return netdev_priv(ndev);
return NULL;
}
@@ -1259,9 +1261,9 @@ void ip_tunnel_uninit(struct net_device *dev)
struct ip_tunnel_net *itn;
itn = net_generic(net, tunnel->ip_tnl_net_id);
- /* fb_tunnel_dev will be unregisted in net-exit call. */
- if (itn->fb_tunnel_dev != dev)
- ip_tunnel_del(itn, netdev_priv(dev));
+ ip_tunnel_del(itn, netdev_priv(dev));
+ if (itn->fb_tunnel_dev == dev)
+ WRITE_ONCE(itn->fb_tunnel_dev, NULL);
dst_cache_reset(&tunnel->dst_cache);
}
diff --git a/net/ipv4/ip_tunnel_core.c b/net/ipv4/ip_tunnel_core.c
index 181b7a2a0247..f8b419e2475c 100644
--- a/net/ipv4/ip_tunnel_core.c
+++ b/net/ipv4/ip_tunnel_core.c
@@ -844,3 +844,21 @@ void ip_tunnel_unneed_metadata(void)
static_branch_dec(&ip_tunnel_metadata_cnt);
}
EXPORT_SYMBOL_GPL(ip_tunnel_unneed_metadata);
+
+/* Returns either the correct skb->protocol value, or 0 if invalid. */
+__be16 ip_tunnel_parse_protocol(const struct sk_buff *skb)
+{
+ if (skb_network_header(skb) >= skb->head &&
+ (skb_network_header(skb) + sizeof(struct iphdr)) <= skb_tail_pointer(skb) &&
+ ip_hdr(skb)->version == 4)
+ return htons(ETH_P_IP);
+ if (skb_network_header(skb) >= skb->head &&
+ (skb_network_header(skb) + sizeof(struct ipv6hdr)) <= skb_tail_pointer(skb) &&
+ ipv6_hdr(skb)->version == 6)
+ return htons(ETH_P_IPV6);
+ return 0;
+}
+EXPORT_SYMBOL(ip_tunnel_parse_protocol);
+
+const struct header_ops ip_tunnel_header_ops = { .parse_protocol = ip_tunnel_parse_protocol };
+EXPORT_SYMBOL(ip_tunnel_header_ops);
diff --git a/net/ipv4/ip_vti.c b/net/ipv4/ip_vti.c
index 1d9c8cff5ac3..460ca1099e8a 100644
--- a/net/ipv4/ip_vti.c
+++ b/net/ipv4/ip_vti.c
@@ -441,6 +441,7 @@ static const struct net_device_ops vti_netdev_ops = {
static void vti_tunnel_setup(struct net_device *dev)
{
dev->netdev_ops = &vti_netdev_ops;
+ dev->header_ops = &ip_tunnel_header_ops;
dev->type = ARPHRD_TUNNEL;
ip_tunnel_setup(dev, vti_net_id);
}
diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c
index 40fea52c8277..75d35e76bec2 100644
--- a/net/ipv4/ipip.c
+++ b/net/ipv4/ipip.c
@@ -361,6 +361,7 @@ static const struct net_device_ops ipip_netdev_ops = {
static void ipip_tunnel_setup(struct net_device *dev)
{
dev->netdev_ops = &ipip_netdev_ops;
+ dev->header_ops = &ip_tunnel_header_ops;
dev->type = ARPHRD_TUNNEL;
dev->flags = IFF_NOARP;
diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
index c2670eaa74e6..5bf9fa06aee0 100644
--- a/net/ipv4/netfilter/ip_tables.c
+++ b/net/ipv4/netfilter/ip_tables.c
@@ -1797,11 +1797,22 @@ out_free:
return ret;
}
+void ipt_unregister_table_pre_exit(struct net *net, struct xt_table *table,
+ const struct nf_hook_ops *ops)
+{
+ nf_unregister_net_hooks(net, ops, hweight32(table->valid_hooks));
+}
+
+void ipt_unregister_table_exit(struct net *net, struct xt_table *table)
+{
+ __ipt_unregister_table(net, table);
+}
+
void ipt_unregister_table(struct net *net, struct xt_table *table,
const struct nf_hook_ops *ops)
{
if (ops)
- nf_unregister_net_hooks(net, ops, hweight32(table->valid_hooks));
+ ipt_unregister_table_pre_exit(net, table, ops);
__ipt_unregister_table(net, table);
}
@@ -1958,6 +1969,8 @@ static void __exit ip_tables_fini(void)
EXPORT_SYMBOL(ipt_register_table);
EXPORT_SYMBOL(ipt_unregister_table);
+EXPORT_SYMBOL(ipt_unregister_table_pre_exit);
+EXPORT_SYMBOL(ipt_unregister_table_exit);
EXPORT_SYMBOL(ipt_do_table);
module_init(ip_tables_init);
module_exit(ip_tables_fini);
diff --git a/net/ipv4/netfilter/ipt_SYNPROXY.c b/net/ipv4/netfilter/ipt_SYNPROXY.c
index 748dc3ce58d3..f2984c7eef40 100644
--- a/net/ipv4/netfilter/ipt_SYNPROXY.c
+++ b/net/ipv4/netfilter/ipt_SYNPROXY.c
@@ -118,3 +118,4 @@ module_exit(synproxy_tg4_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
+MODULE_DESCRIPTION("Intercept TCP connections and establish them using syncookies");
diff --git a/net/ipv4/netfilter/iptable_filter.c b/net/ipv4/netfilter/iptable_filter.c
index 9d54b4017e50..8f7bc1ee7453 100644
--- a/net/ipv4/netfilter/iptable_filter.c
+++ b/net/ipv4/netfilter/iptable_filter.c
@@ -72,16 +72,24 @@ static int __net_init iptable_filter_net_init(struct net *net)
return 0;
}
+static void __net_exit iptable_filter_net_pre_exit(struct net *net)
+{
+ if (net->ipv4.iptable_filter)
+ ipt_unregister_table_pre_exit(net, net->ipv4.iptable_filter,
+ filter_ops);
+}
+
static void __net_exit iptable_filter_net_exit(struct net *net)
{
if (!net->ipv4.iptable_filter)
return;
- ipt_unregister_table(net, net->ipv4.iptable_filter, filter_ops);
+ ipt_unregister_table_exit(net, net->ipv4.iptable_filter);
net->ipv4.iptable_filter = NULL;
}
static struct pernet_operations iptable_filter_net_ops = {
.init = iptable_filter_net_init,
+ .pre_exit = iptable_filter_net_pre_exit,
.exit = iptable_filter_net_exit,
};
diff --git a/net/ipv4/netfilter/iptable_mangle.c b/net/ipv4/netfilter/iptable_mangle.c
index bb9266ea3785..f703a717ab1d 100644
--- a/net/ipv4/netfilter/iptable_mangle.c
+++ b/net/ipv4/netfilter/iptable_mangle.c
@@ -100,15 +100,23 @@ static int __net_init iptable_mangle_table_init(struct net *net)
return ret;
}
+static void __net_exit iptable_mangle_net_pre_exit(struct net *net)
+{
+ if (net->ipv4.iptable_mangle)
+ ipt_unregister_table_pre_exit(net, net->ipv4.iptable_mangle,
+ mangle_ops);
+}
+
static void __net_exit iptable_mangle_net_exit(struct net *net)
{
if (!net->ipv4.iptable_mangle)
return;
- ipt_unregister_table(net, net->ipv4.iptable_mangle, mangle_ops);
+ ipt_unregister_table_exit(net, net->ipv4.iptable_mangle);
net->ipv4.iptable_mangle = NULL;
}
static struct pernet_operations iptable_mangle_net_ops = {
+ .pre_exit = iptable_mangle_net_pre_exit,
.exit = iptable_mangle_net_exit,
};
diff --git a/net/ipv4/netfilter/iptable_nat.c b/net/ipv4/netfilter/iptable_nat.c
index ad33687b7444..b0143b109f25 100644
--- a/net/ipv4/netfilter/iptable_nat.c
+++ b/net/ipv4/netfilter/iptable_nat.c
@@ -113,16 +113,22 @@ static int __net_init iptable_nat_table_init(struct net *net)
return ret;
}
+static void __net_exit iptable_nat_net_pre_exit(struct net *net)
+{
+ if (net->ipv4.nat_table)
+ ipt_nat_unregister_lookups(net);
+}
+
static void __net_exit iptable_nat_net_exit(struct net *net)
{
if (!net->ipv4.nat_table)
return;
- ipt_nat_unregister_lookups(net);
- ipt_unregister_table(net, net->ipv4.nat_table, NULL);
+ ipt_unregister_table_exit(net, net->ipv4.nat_table);
net->ipv4.nat_table = NULL;
}
static struct pernet_operations iptable_nat_net_ops = {
+ .pre_exit = iptable_nat_net_pre_exit,
.exit = iptable_nat_net_exit,
};
diff --git a/net/ipv4/netfilter/iptable_raw.c b/net/ipv4/netfilter/iptable_raw.c
index 69697eb4bfc6..9abfe6bf2cb9 100644
--- a/net/ipv4/netfilter/iptable_raw.c
+++ b/net/ipv4/netfilter/iptable_raw.c
@@ -67,15 +67,23 @@ static int __net_init iptable_raw_table_init(struct net *net)
return ret;
}
+static void __net_exit iptable_raw_net_pre_exit(struct net *net)
+{
+ if (net->ipv4.iptable_raw)
+ ipt_unregister_table_pre_exit(net, net->ipv4.iptable_raw,
+ rawtable_ops);
+}
+
static void __net_exit iptable_raw_net_exit(struct net *net)
{
if (!net->ipv4.iptable_raw)
return;
- ipt_unregister_table(net, net->ipv4.iptable_raw, rawtable_ops);
+ ipt_unregister_table_exit(net, net->ipv4.iptable_raw);
net->ipv4.iptable_raw = NULL;
}
static struct pernet_operations iptable_raw_net_ops = {
+ .pre_exit = iptable_raw_net_pre_exit,
.exit = iptable_raw_net_exit,
};
diff --git a/net/ipv4/netfilter/iptable_security.c b/net/ipv4/netfilter/iptable_security.c
index ac633c1db97e..415c1975d770 100644
--- a/net/ipv4/netfilter/iptable_security.c
+++ b/net/ipv4/netfilter/iptable_security.c
@@ -62,16 +62,23 @@ static int __net_init iptable_security_table_init(struct net *net)
return ret;
}
+static void __net_exit iptable_security_net_pre_exit(struct net *net)
+{
+ if (net->ipv4.iptable_security)
+ ipt_unregister_table_pre_exit(net, net->ipv4.iptable_security,
+ sectbl_ops);
+}
+
static void __net_exit iptable_security_net_exit(struct net *net)
{
if (!net->ipv4.iptable_security)
return;
-
- ipt_unregister_table(net, net->ipv4.iptable_security, sectbl_ops);
+ ipt_unregister_table_exit(net, net->ipv4.iptable_security);
net->ipv4.iptable_security = NULL;
}
static struct pernet_operations iptable_security_net_ops = {
+ .pre_exit = iptable_security_net_pre_exit,
.exit = iptable_security_net_exit,
};
diff --git a/net/ipv4/netfilter/nf_flow_table_ipv4.c b/net/ipv4/netfilter/nf_flow_table_ipv4.c
index e32e41b99f0f..aba65fe90345 100644
--- a/net/ipv4/netfilter/nf_flow_table_ipv4.c
+++ b/net/ipv4/netfilter/nf_flow_table_ipv4.c
@@ -34,3 +34,4 @@ module_exit(nf_flow_ipv4_module_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Pablo Neira Ayuso <pablo@netfilter.org>");
MODULE_ALIAS_NF_FLOWTABLE(AF_INET);
+MODULE_DESCRIPTION("Netfilter flow table support");
diff --git a/net/ipv4/netfilter/nft_dup_ipv4.c b/net/ipv4/netfilter/nft_dup_ipv4.c
index abf89b972094..bcdb37f86a94 100644
--- a/net/ipv4/netfilter/nft_dup_ipv4.c
+++ b/net/ipv4/netfilter/nft_dup_ipv4.c
@@ -107,3 +107,4 @@ module_exit(nft_dup_ipv4_module_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Pablo Neira Ayuso <pablo@netfilter.org>");
MODULE_ALIAS_NFT_AF_EXPR(AF_INET, "dup");
+MODULE_DESCRIPTION("IPv4 nftables packet duplication support");
diff --git a/net/ipv4/netfilter/nft_fib_ipv4.c b/net/ipv4/netfilter/nft_fib_ipv4.c
index ce294113dbcd..03df986217b7 100644
--- a/net/ipv4/netfilter/nft_fib_ipv4.c
+++ b/net/ipv4/netfilter/nft_fib_ipv4.c
@@ -210,3 +210,4 @@ module_exit(nft_fib4_module_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Florian Westphal <fw@strlen.de>");
MODULE_ALIAS_NFT_AF_EXPR(2, "fib");
+MODULE_DESCRIPTION("nftables fib / ip route lookup support");
diff --git a/net/ipv4/netfilter/nft_reject_ipv4.c b/net/ipv4/netfilter/nft_reject_ipv4.c
index 7e6fd5cde50f..e408f813f5d8 100644
--- a/net/ipv4/netfilter/nft_reject_ipv4.c
+++ b/net/ipv4/netfilter/nft_reject_ipv4.c
@@ -71,3 +71,4 @@ module_exit(nft_reject_ipv4_module_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
MODULE_ALIAS_NFT_AF_EXPR(AF_INET, "reject");
+MODULE_DESCRIPTION("IPv4 packet rejection for nftables");
diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
index 535427292194..df6fbefe44d4 100644
--- a/net/ipv4/ping.c
+++ b/net/ipv4/ping.c
@@ -786,6 +786,9 @@ static int ping_v4_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
inet_sk_flowi_flags(sk), faddr, saddr, 0, 0,
sk->sk_uid);
+ fl4.fl4_icmp_type = user_icmph.type;
+ fl4.fl4_icmp_code = user_icmph.code;
+
security_sk_classify_flow(sk, flowi4_to_flowi(&fl4));
rt = ip_route_output_flow(net, &fl4, sk);
if (IS_ERR(rt)) {
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 1d7076b78e63..a01efa062f6b 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -2027,7 +2027,7 @@ int ip_route_use_hint(struct sk_buff *skb, __be32 daddr, __be32 saddr,
const struct sk_buff *hint)
{
struct in_device *in_dev = __in_dev_get_rcu(dev);
- struct rtable *rt = (struct rtable *)hint;
+ struct rtable *rt = skb_rtable(hint);
struct net *net = dev_net(dev);
int err = -EINVAL;
u32 tag = 0;
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 810cc164f795..6f0caf9a866d 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -2691,6 +2691,9 @@ int tcp_disconnect(struct sock *sk, int flags)
tp->window_clamp = 0;
tp->delivered = 0;
tp->delivered_ce = 0;
+ if (icsk->icsk_ca_ops->release)
+ icsk->icsk_ca_ops->release(sk);
+ memset(icsk->icsk_ca_priv, 0, sizeof(icsk->icsk_ca_priv));
tcp_set_ca_state(sk, TCP_CA_Open);
tp->is_sack_reneg = 0;
tcp_clear_retrans(tp);
@@ -3246,10 +3249,7 @@ static int do_tcp_setsockopt(struct sock *sk, int level,
#ifdef CONFIG_TCP_MD5SIG
case TCP_MD5SIG:
case TCP_MD5SIG_EXT:
- if ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))
- err = tp->af_specific->md5_parse(sk, optname, optval, optlen);
- else
- err = -EINVAL;
+ err = tp->af_specific->md5_parse(sk, optname, optval, optlen);
break;
#endif
case TCP_USER_TIMEOUT:
@@ -4033,11 +4033,14 @@ EXPORT_SYMBOL(tcp_md5_hash_skb_data);
int tcp_md5_hash_key(struct tcp_md5sig_pool *hp, const struct tcp_md5sig_key *key)
{
+ u8 keylen = READ_ONCE(key->keylen); /* paired with WRITE_ONCE() in tcp_md5_do_add */
struct scatterlist sg;
- sg_init_one(&sg, key->key, key->keylen);
- ahash_request_set_crypt(hp->md5_req, &sg, NULL, key->keylen);
- return crypto_ahash_update(hp->md5_req);
+ sg_init_one(&sg, key->key, keylen);
+ ahash_request_set_crypt(hp->md5_req, &sg, NULL, keylen);
+
+ /* We use data_race() because tcp_md5_do_add() might change key->key under us */
+ return data_race(crypto_ahash_update(hp->md5_req));
}
EXPORT_SYMBOL(tcp_md5_hash_key);
diff --git a/net/ipv4/tcp_cong.c b/net/ipv4/tcp_cong.c
index 3172e31987be..62878cf26d9c 100644
--- a/net/ipv4/tcp_cong.c
+++ b/net/ipv4/tcp_cong.c
@@ -197,7 +197,7 @@ static void tcp_reinit_congestion_control(struct sock *sk,
icsk->icsk_ca_setsockopt = 1;
memset(icsk->icsk_ca_priv, 0, sizeof(icsk->icsk_ca_priv));
- if (sk->sk_state != TCP_CLOSE)
+ if (!((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN)))
tcp_init_congestion_control(sk);
}
diff --git a/net/ipv4/tcp_cubic.c b/net/ipv4/tcp_cubic.c
index 8f8eefd3a3ce..c7bf5b26bf0c 100644
--- a/net/ipv4/tcp_cubic.c
+++ b/net/ipv4/tcp_cubic.c
@@ -432,10 +432,9 @@ static void hystart_update(struct sock *sk, u32 delay)
if (hystart_detect & HYSTART_DELAY) {
/* obtain the minimum delay of more than sampling packets */
+ if (ca->curr_rtt > delay)
+ ca->curr_rtt = delay;
if (ca->sample_cnt < HYSTART_MIN_SAMPLES) {
- if (ca->curr_rtt > delay)
- ca->curr_rtt = delay;
-
ca->sample_cnt++;
} else {
if (ca->curr_rtt > ca->delay_min +
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 83330a6cb242..518f04355fbf 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -261,7 +261,8 @@ static void tcp_ecn_accept_cwr(struct sock *sk, const struct sk_buff *skb)
* cwnd may be very low (even just 1 packet), so we should ACK
* immediately.
*/
- inet_csk(sk)->icsk_ack.pending |= ICSK_ACK_NOW;
+ if (TCP_SKB_CB(skb)->seq != TCP_SKB_CB(skb)->end_seq)
+ inet_csk(sk)->icsk_ack.pending |= ICSK_ACK_NOW;
}
}
@@ -3487,10 +3488,8 @@ static void tcp_replace_ts_recent(struct tcp_sock *tp, u32 seq)
}
}
-/* This routine deals with acks during a TLP episode.
- * We mark the end of a TLP episode on receiving TLP dupack or when
- * ack is after tlp_high_seq.
- * Ref: loss detection algorithm in draft-dukkipati-tcpm-tcp-loss-probe.
+/* This routine deals with acks during a TLP episode and ends an episode by
+ * resetting tlp_high_seq. Ref: TLP algorithm in draft-ietf-tcpm-rack
*/
static void tcp_process_tlp_ack(struct sock *sk, u32 ack, int flag)
{
@@ -3499,7 +3498,10 @@ static void tcp_process_tlp_ack(struct sock *sk, u32 ack, int flag)
if (before(ack, tp->tlp_high_seq))
return;
- if (flag & FLAG_DSACKING_ACK) {
+ if (!tp->tlp_retrans) {
+ /* TLP of new data has been acknowledged */
+ tp->tlp_high_seq = 0;
+ } else if (flag & FLAG_DSACKING_ACK) {
/* This DSACK means original and TLP probe arrived; no loss */
tp->tlp_high_seq = 0;
} else if (after(ack, tp->tlp_high_seq)) {
@@ -3665,6 +3667,15 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
tcp_in_ack_event(sk, ack_ev_flags);
}
+ /* This is a deviation from RFC3168 since it states that:
+ * "When the TCP data sender is ready to set the CWR bit after reducing
+ * the congestion window, it SHOULD set the CWR bit only on the first
+ * new data packet that it transmits."
+ * We accept CWR on pure ACKs to be more robust
+ * with widely-deployed TCP implementations that do this.
+ */
+ tcp_ecn_accept_cwr(sk, skb);
+
/* We passed data and got it acked, remove any soft error
* log. Something worked...
*/
@@ -4572,6 +4583,7 @@ static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb)
if (unlikely(tcp_try_rmem_schedule(sk, skb, skb->truesize))) {
NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPOFODROP);
+ sk->sk_data_ready(sk);
tcp_drop(sk, skb);
return;
}
@@ -4605,7 +4617,11 @@ static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb)
if (tcp_ooo_try_coalesce(sk, tp->ooo_last_skb,
skb, &fragstolen)) {
coalesce_done:
- tcp_grow_window(sk, skb);
+ /* For non sack flows, do not grow window to force DUPACK
+ * and trigger fast retransmit.
+ */
+ if (tcp_is_sack(tp))
+ tcp_grow_window(sk, skb);
kfree_skb_partial(skb, fragstolen);
skb = NULL;
goto add_sack;
@@ -4689,7 +4705,11 @@ add_sack:
tcp_sack_new_ofo_skb(sk, seq, end_seq);
end:
if (skb) {
- tcp_grow_window(sk, skb);
+ /* For non sack flows, do not grow window to force DUPACK
+ * and trigger fast retransmit.
+ */
+ if (tcp_is_sack(tp))
+ tcp_grow_window(sk, skb);
skb_condense(skb);
skb_set_owner_r(skb, sk);
}
@@ -4792,8 +4812,6 @@ static void tcp_data_queue(struct sock *sk, struct sk_buff *skb)
skb_dst_drop(skb);
__skb_pull(skb, tcp_hdr(skb)->doff * 4);
- tcp_ecn_accept_cwr(sk, skb);
-
tp->rx_opt.dsack = 0;
/* Queue data for delivery to the user.
@@ -4812,6 +4830,7 @@ queue_and_out:
sk_forced_mem_schedule(sk, skb->truesize);
else if (tcp_try_rmem_schedule(sk, skb, skb->truesize)) {
NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPRCVQDROP);
+ sk->sk_data_ready(sk);
goto drop;
}
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index ad6435ba6d72..04bfcbbfee83 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -1111,9 +1111,21 @@ int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr,
key = tcp_md5_do_lookup_exact(sk, addr, family, prefixlen, l3index);
if (key) {
- /* Pre-existing entry - just update that one. */
- memcpy(key->key, newkey, newkeylen);
- key->keylen = newkeylen;
+ /* Pre-existing entry - just update that one.
+ * Note that the key might be used concurrently.
+ * data_race() is telling kcsan that we do not care of
+ * key mismatches, since changing MD5 key on live flows
+ * can lead to packet drops.
+ */
+ data_race(memcpy(key->key, newkey, newkeylen));
+
+ /* Pairs with READ_ONCE() in tcp_md5_hash_key().
+ * Also note that a reader could catch new key->keylen value
+ * but old key->key[], this is the reason we use __GFP_ZERO
+ * at sock_kmalloc() time below these lines.
+ */
+ WRITE_ONCE(key->keylen, newkeylen);
+
return 0;
}
@@ -1129,7 +1141,7 @@ int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr,
rcu_assign_pointer(tp->md5sig_info, md5sig);
}
- key = sock_kmalloc(sk, sizeof(*key), gfp);
+ key = sock_kmalloc(sk, sizeof(*key), gfp | __GFP_ZERO);
if (!key)
return -ENOMEM;
if (!tcp_alloc_md5sig_pool()) {
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index a50e1990a845..0bc05d68cd74 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -700,7 +700,8 @@ static unsigned int tcp_synack_options(const struct sock *sk,
unsigned int mss, struct sk_buff *skb,
struct tcp_out_options *opts,
const struct tcp_md5sig_key *md5,
- struct tcp_fastopen_cookie *foc)
+ struct tcp_fastopen_cookie *foc,
+ enum tcp_synack_type synack_type)
{
struct inet_request_sock *ireq = inet_rsk(req);
unsigned int remaining = MAX_TCP_OPTION_SPACE;
@@ -715,7 +716,8 @@ static unsigned int tcp_synack_options(const struct sock *sk,
* rather than TS in order to fit in better with old,
* buggy kernels, but that was deemed to be unnecessary.
*/
- ireq->tstamp_ok &= !ireq->sack_ok;
+ if (synack_type != TCP_SYNACK_COOKIE)
+ ireq->tstamp_ok &= !ireq->sack_ok;
}
#endif
@@ -2622,6 +2624,11 @@ void tcp_send_loss_probe(struct sock *sk)
int pcount;
int mss = tcp_current_mss(sk);
+ /* At most one outstanding TLP */
+ if (tp->tlp_high_seq)
+ goto rearm_timer;
+
+ tp->tlp_retrans = 0;
skb = tcp_send_head(sk);
if (skb && tcp_snd_wnd_test(tp, skb, mss)) {
pcount = tp->packets_out;
@@ -2639,10 +2646,6 @@ void tcp_send_loss_probe(struct sock *sk)
return;
}
- /* At most one outstanding TLP retransmission. */
- if (tp->tlp_high_seq)
- goto rearm_timer;
-
if (skb_still_in_host_queue(sk, skb))
goto rearm_timer;
@@ -2664,10 +2667,12 @@ void tcp_send_loss_probe(struct sock *sk)
if (__tcp_retransmit_skb(sk, skb, 1))
goto rearm_timer;
+ tp->tlp_retrans = 1;
+
+probe_sent:
/* Record snd_nxt for loss detection. */
tp->tlp_high_seq = tp->snd_nxt;
-probe_sent:
NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPLOSSPROBES);
/* Reset s.t. tcp_rearm_rto will restart timer from now */
inet_csk(sk)->icsk_pending = 0;
@@ -3394,7 +3399,7 @@ struct sk_buff *tcp_make_synack(const struct sock *sk, struct dst_entry *dst,
#endif
skb_set_hash(skb, tcp_rsk(req)->txhash, PKT_HASH_TYPE_L4);
tcp_header_size = tcp_synack_options(sk, req, mss, skb, &opts, md5,
- foc) + sizeof(*th);
+ foc, synack_type) + sizeof(*th);
skb_push(skb, tcp_header_size);
skb_reset_transport_header(skb);
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 1b7ebbcae497..4077d589b72e 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -416,7 +416,7 @@ static struct sock *udp4_lib_lookup2(struct net *net,
struct udp_hslot *hslot2,
struct sk_buff *skb)
{
- struct sock *sk, *result;
+ struct sock *sk, *result, *reuseport_result;
int score, badness;
u32 hash = 0;
@@ -426,17 +426,20 @@ static struct sock *udp4_lib_lookup2(struct net *net,
score = compute_score(sk, net, saddr, sport,
daddr, hnum, dif, sdif);
if (score > badness) {
+ reuseport_result = NULL;
+
if (sk->sk_reuseport &&
sk->sk_state != TCP_ESTABLISHED) {
hash = udp_ehashfn(net, daddr, hnum,
saddr, sport);
- result = reuseport_select_sock(sk, hash, skb,
- sizeof(struct udphdr));
- if (result && !reuseport_has_conns(sk, false))
- return result;
+ reuseport_result = reuseport_select_sock(sk, hash, skb,
+ sizeof(struct udphdr));
+ if (reuseport_result && !reuseport_has_conns(sk, false))
+ return reuseport_result;
}
+
+ result = reuseport_result ? : sk;
badness = score;
- result = sk;
}
}
return result;
@@ -2051,7 +2054,7 @@ static int udp_queue_rcv_one_skb(struct sock *sk, struct sk_buff *skb)
/*
* UDP-Lite specific tests, ignored on UDP sockets
*/
- if ((is_udplite & UDPLITE_RECV_CC) && UDP_SKB_CB(skb)->partial_cov) {
+ if ((up->pcflag & UDPLITE_RECV_CC) && UDP_SKB_CB(skb)->partial_cov) {
/*
* MIB statistics other than incrementing the error count are
diff --git a/net/ipv6/Kconfig b/net/ipv6/Kconfig
index 992cf45fb4f6..f4f19e89af5e 100644
--- a/net/ipv6/Kconfig
+++ b/net/ipv6/Kconfig
@@ -49,29 +49,31 @@ config IPV6_OPTIMISTIC_DAD
config INET6_AH
tristate "IPv6: AH transformation"
- select XFRM_ALGO
- select CRYPTO
- select CRYPTO_HMAC
- select CRYPTO_MD5
- select CRYPTO_SHA1
+ select XFRM_AH
help
- Support for IPsec AH.
+ Support for IPsec AH (Authentication Header).
+
+ AH can be used with various authentication algorithms. Besides
+ enabling AH support itself, this option enables the generic
+ implementations of the algorithms that RFC 8221 lists as MUST be
+ implemented. If you need any other algorithms, you'll need to enable
+ them in the crypto API. You should also enable accelerated
+ implementations of any needed algorithms when available.
If unsure, say Y.
config INET6_ESP
tristate "IPv6: ESP transformation"
- select XFRM_ALGO
- select CRYPTO
- select CRYPTO_AUTHENC
- select CRYPTO_HMAC
- select CRYPTO_MD5
- select CRYPTO_CBC
- select CRYPTO_SHA1
- select CRYPTO_DES
- select CRYPTO_ECHAINIV
+ select XFRM_ESP
help
- Support for IPsec ESP.
+ Support for IPsec ESP (Encapsulating Security Payload).
+
+ ESP can be used with various encryption and authentication algorithms.
+ Besides enabling ESP support itself, this option enables the generic
+ implementations of the algorithms that RFC 8221 lists as MUST be
+ implemented. If you need any other algorithms, you'll need to enable
+ them in the crypto API. You should also enable accelerated
+ implementations of any needed algorithms when available.
If unsure, say Y.
diff --git a/net/ipv6/esp6_offload.c b/net/ipv6/esp6_offload.c
index 55addea1948f..1ca516fb30e1 100644
--- a/net/ipv6/esp6_offload.c
+++ b/net/ipv6/esp6_offload.c
@@ -395,3 +395,4 @@ module_exit(esp6_offload_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Steffen Klassert <steffen.klassert@secunet.com>");
MODULE_ALIAS_XFRM_OFFLOAD_TYPE(AF_INET6, XFRM_PROTO_ESP);
+MODULE_DESCRIPTION("IPV6 GSO/GRO offload support");
diff --git a/net/ipv6/fou6.c b/net/ipv6/fou6.c
index 091f94184dc1..430518ae26fa 100644
--- a/net/ipv6/fou6.c
+++ b/net/ipv6/fou6.c
@@ -224,3 +224,4 @@ module_init(fou6_init);
module_exit(fou6_fini);
MODULE_AUTHOR("Tom Herbert <therbert@google.com>");
MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Foo over UDP (IPv6)");
diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c
index fc5000370030..9df8737ae0d3 100644
--- a/net/ipv6/icmp.c
+++ b/net/ipv6/icmp.c
@@ -566,7 +566,6 @@ static void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info,
fl6.mp_hash = rt6_multipath_hash(net, &fl6, skb, NULL);
security_skb_classify_flow(skb, flowi6_to_flowi(&fl6));
- sk->sk_mark = mark;
np = inet6_sk(sk);
if (!icmpv6_xrlim_allow(sk, type, &fl6))
@@ -583,6 +582,7 @@ static void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info,
fl6.flowi6_oif = np->ucast_oif;
ipcm6_init_sk(&ipc6, np);
+ ipc6.sockc.mark = mark;
fl6.flowlabel = ip6_make_flowinfo(ipc6.tclass, fl6.flowlabel);
dst = icmpv6_route_lookup(net, skb, sk, &fl6);
@@ -751,7 +751,6 @@ static void icmpv6_echo_reply(struct sk_buff *skb)
sk = icmpv6_xmit_lock(net);
if (!sk)
goto out_bh_enable;
- sk->sk_mark = mark;
np = inet6_sk(sk);
if (!fl6.flowi6_oif && ipv6_addr_is_multicast(&fl6.daddr))
@@ -779,6 +778,7 @@ static void icmpv6_echo_reply(struct sk_buff *skb)
ipcm6_init_sk(&ipc6, np);
ipc6.hlimit = ip6_sk_dst_hoplimit(np, &fl6, dst);
ipc6.tclass = ipv6_get_dsfield(ipv6_hdr(skb));
+ ipc6.sockc.mark = mark;
if (ip6_append_data(sk, icmpv6_getfrag, &msg,
skb->len + sizeof(struct icmp6hdr),
diff --git a/net/ipv6/ila/ila_main.c b/net/ipv6/ila/ila_main.c
index 257d2b681246..36c58aa257e8 100644
--- a/net/ipv6/ila/ila_main.c
+++ b/net/ipv6/ila/ila_main.c
@@ -120,3 +120,4 @@ module_init(ila_init);
module_exit(ila_fini);
MODULE_AUTHOR("Tom Herbert <tom@herbertland.com>");
MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("IPv6: Identifier Locator Addressing (ILA)");
diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
index 781ca8c07a0d..3a57fb9ce049 100644
--- a/net/ipv6/ip6_gre.c
+++ b/net/ipv6/ip6_gre.c
@@ -127,6 +127,7 @@ static struct ip6_tnl *ip6gre_tunnel_lookup(struct net_device *dev,
gre_proto == htons(ETH_P_ERSPAN2)) ?
ARPHRD_ETHER : ARPHRD_IP6GRE;
int score, cand_score = 4;
+ struct net_device *ndev;
for_each_ip_tunnel_rcu(t, ign->tunnels_r_l[h0 ^ h1]) {
if (!ipv6_addr_equal(local, &t->parms.laddr) ||
@@ -238,9 +239,9 @@ static struct ip6_tnl *ip6gre_tunnel_lookup(struct net_device *dev,
if (t && t->dev->flags & IFF_UP)
return t;
- dev = ign->fb_tunnel_dev;
- if (dev && dev->flags & IFF_UP)
- return netdev_priv(dev);
+ ndev = READ_ONCE(ign->fb_tunnel_dev);
+ if (ndev && ndev->flags & IFF_UP)
+ return netdev_priv(ndev);
return NULL;
}
@@ -413,6 +414,8 @@ static void ip6gre_tunnel_uninit(struct net_device *dev)
ip6gre_tunnel_unlink_md(ign, t);
ip6gre_tunnel_unlink(ign, t);
+ if (ign->fb_tunnel_dev == dev)
+ WRITE_ONCE(ign->fb_tunnel_dev, NULL);
dst_cache_reset(&t->dst_cache);
dev_put(dev);
}
@@ -1559,17 +1562,18 @@ static void ip6gre_destroy_tunnels(struct net *net, struct list_head *head)
static int __net_init ip6gre_init_net(struct net *net)
{
struct ip6gre_net *ign = net_generic(net, ip6gre_net_id);
+ struct net_device *ndev;
int err;
if (!net_has_fallback_tunnels(net))
return 0;
- ign->fb_tunnel_dev = alloc_netdev(sizeof(struct ip6_tnl), "ip6gre0",
- NET_NAME_UNKNOWN,
- ip6gre_tunnel_setup);
- if (!ign->fb_tunnel_dev) {
+ ndev = alloc_netdev(sizeof(struct ip6_tnl), "ip6gre0",
+ NET_NAME_UNKNOWN, ip6gre_tunnel_setup);
+ if (!ndev) {
err = -ENOMEM;
goto err_alloc_dev;
}
+ ign->fb_tunnel_dev = ndev;
dev_net_set(ign->fb_tunnel_dev, net);
/* FB netdevice is special: we have one, and only one per netns.
* Allowing to move it to another netns is clearly unsafe.
@@ -1589,7 +1593,7 @@ static int __net_init ip6gre_init_net(struct net *net)
return 0;
err_reg_dev:
- free_netdev(ign->fb_tunnel_dev);
+ free_netdev(ndev);
err_alloc_dev:
return err;
}
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index 821d96c720b9..a18c378ca5f4 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -1846,6 +1846,7 @@ static const struct net_device_ops ip6_tnl_netdev_ops = {
static void ip6_tnl_dev_setup(struct net_device *dev)
{
dev->netdev_ops = &ip6_tnl_netdev_ops;
+ dev->header_ops = &ip_tunnel_header_ops;
dev->needs_free_netdev = true;
dev->priv_destructor = ip6_dev_free;
diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c
index 1147f647b9a0..0d964160a9dd 100644
--- a/net/ipv6/ip6_vti.c
+++ b/net/ipv6/ip6_vti.c
@@ -905,6 +905,7 @@ static const struct net_device_ops vti6_netdev_ops = {
static void vti6_dev_setup(struct net_device *dev)
{
dev->netdev_ops = &vti6_netdev_ops;
+ dev->header_ops = &ip_tunnel_header_ops;
dev->needs_free_netdev = true;
dev->priv_destructor = vti6_dev_free;
diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c
index 7e12d2114158..8cd2782a31e4 100644
--- a/net/ipv6/mcast.c
+++ b/net/ipv6/mcast.c
@@ -2615,6 +2615,7 @@ void ipv6_mc_destroy_dev(struct inet6_dev *idev)
idev->mc_list = i->next;
write_unlock_bh(&idev->lock);
+ ip6_mc_clear_src(i);
ma_put(i);
write_lock_bh(&idev->lock);
}
diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
index e27393498ecb..e96a431549bc 100644
--- a/net/ipv6/netfilter/ip6_tables.c
+++ b/net/ipv6/netfilter/ip6_tables.c
@@ -1807,11 +1807,22 @@ out_free:
return ret;
}
+void ip6t_unregister_table_pre_exit(struct net *net, struct xt_table *table,
+ const struct nf_hook_ops *ops)
+{
+ nf_unregister_net_hooks(net, ops, hweight32(table->valid_hooks));
+}
+
+void ip6t_unregister_table_exit(struct net *net, struct xt_table *table)
+{
+ __ip6t_unregister_table(net, table);
+}
+
void ip6t_unregister_table(struct net *net, struct xt_table *table,
const struct nf_hook_ops *ops)
{
if (ops)
- nf_unregister_net_hooks(net, ops, hweight32(table->valid_hooks));
+ ip6t_unregister_table_pre_exit(net, table, ops);
__ip6t_unregister_table(net, table);
}
@@ -1969,6 +1980,8 @@ static void __exit ip6_tables_fini(void)
EXPORT_SYMBOL(ip6t_register_table);
EXPORT_SYMBOL(ip6t_unregister_table);
+EXPORT_SYMBOL(ip6t_unregister_table_pre_exit);
+EXPORT_SYMBOL(ip6t_unregister_table_exit);
EXPORT_SYMBOL(ip6t_do_table);
module_init(ip6_tables_init);
diff --git a/net/ipv6/netfilter/ip6t_SYNPROXY.c b/net/ipv6/netfilter/ip6t_SYNPROXY.c
index fd1f52a21bf1..d51d0c3e5fe9 100644
--- a/net/ipv6/netfilter/ip6t_SYNPROXY.c
+++ b/net/ipv6/netfilter/ip6t_SYNPROXY.c
@@ -121,3 +121,4 @@ module_exit(synproxy_tg6_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
+MODULE_DESCRIPTION("Intercept IPv6 TCP connections and establish them using syncookies");
diff --git a/net/ipv6/netfilter/ip6table_filter.c b/net/ipv6/netfilter/ip6table_filter.c
index 32667f5d5a33..88337b51ffbf 100644
--- a/net/ipv6/netfilter/ip6table_filter.c
+++ b/net/ipv6/netfilter/ip6table_filter.c
@@ -73,16 +73,24 @@ static int __net_init ip6table_filter_net_init(struct net *net)
return 0;
}
+static void __net_exit ip6table_filter_net_pre_exit(struct net *net)
+{
+ if (net->ipv6.ip6table_filter)
+ ip6t_unregister_table_pre_exit(net, net->ipv6.ip6table_filter,
+ filter_ops);
+}
+
static void __net_exit ip6table_filter_net_exit(struct net *net)
{
if (!net->ipv6.ip6table_filter)
return;
- ip6t_unregister_table(net, net->ipv6.ip6table_filter, filter_ops);
+ ip6t_unregister_table_exit(net, net->ipv6.ip6table_filter);
net->ipv6.ip6table_filter = NULL;
}
static struct pernet_operations ip6table_filter_net_ops = {
.init = ip6table_filter_net_init,
+ .pre_exit = ip6table_filter_net_pre_exit,
.exit = ip6table_filter_net_exit,
};
diff --git a/net/ipv6/netfilter/ip6table_mangle.c b/net/ipv6/netfilter/ip6table_mangle.c
index 070afb97fa2b..1a2748611e00 100644
--- a/net/ipv6/netfilter/ip6table_mangle.c
+++ b/net/ipv6/netfilter/ip6table_mangle.c
@@ -93,16 +93,24 @@ static int __net_init ip6table_mangle_table_init(struct net *net)
return ret;
}
+static void __net_exit ip6table_mangle_net_pre_exit(struct net *net)
+{
+ if (net->ipv6.ip6table_mangle)
+ ip6t_unregister_table_pre_exit(net, net->ipv6.ip6table_mangle,
+ mangle_ops);
+}
+
static void __net_exit ip6table_mangle_net_exit(struct net *net)
{
if (!net->ipv6.ip6table_mangle)
return;
- ip6t_unregister_table(net, net->ipv6.ip6table_mangle, mangle_ops);
+ ip6t_unregister_table_exit(net, net->ipv6.ip6table_mangle);
net->ipv6.ip6table_mangle = NULL;
}
static struct pernet_operations ip6table_mangle_net_ops = {
+ .pre_exit = ip6table_mangle_net_pre_exit,
.exit = ip6table_mangle_net_exit,
};
diff --git a/net/ipv6/netfilter/ip6table_nat.c b/net/ipv6/netfilter/ip6table_nat.c
index 0f4875952efc..0a23265e3caa 100644
--- a/net/ipv6/netfilter/ip6table_nat.c
+++ b/net/ipv6/netfilter/ip6table_nat.c
@@ -114,16 +114,22 @@ static int __net_init ip6table_nat_table_init(struct net *net)
return ret;
}
+static void __net_exit ip6table_nat_net_pre_exit(struct net *net)
+{
+ if (net->ipv6.ip6table_nat)
+ ip6t_nat_unregister_lookups(net);
+}
+
static void __net_exit ip6table_nat_net_exit(struct net *net)
{
if (!net->ipv6.ip6table_nat)
return;
- ip6t_nat_unregister_lookups(net);
- ip6t_unregister_table(net, net->ipv6.ip6table_nat, NULL);
+ ip6t_unregister_table_exit(net, net->ipv6.ip6table_nat);
net->ipv6.ip6table_nat = NULL;
}
static struct pernet_operations ip6table_nat_net_ops = {
+ .pre_exit = ip6table_nat_net_pre_exit,
.exit = ip6table_nat_net_exit,
};
diff --git a/net/ipv6/netfilter/ip6table_raw.c b/net/ipv6/netfilter/ip6table_raw.c
index a22100b1cf2c..8f9e742226f7 100644
--- a/net/ipv6/netfilter/ip6table_raw.c
+++ b/net/ipv6/netfilter/ip6table_raw.c
@@ -66,15 +66,23 @@ static int __net_init ip6table_raw_table_init(struct net *net)
return ret;
}
+static void __net_exit ip6table_raw_net_pre_exit(struct net *net)
+{
+ if (net->ipv6.ip6table_raw)
+ ip6t_unregister_table_pre_exit(net, net->ipv6.ip6table_raw,
+ rawtable_ops);
+}
+
static void __net_exit ip6table_raw_net_exit(struct net *net)
{
if (!net->ipv6.ip6table_raw)
return;
- ip6t_unregister_table(net, net->ipv6.ip6table_raw, rawtable_ops);
+ ip6t_unregister_table_exit(net, net->ipv6.ip6table_raw);
net->ipv6.ip6table_raw = NULL;
}
static struct pernet_operations ip6table_raw_net_ops = {
+ .pre_exit = ip6table_raw_net_pre_exit,
.exit = ip6table_raw_net_exit,
};
diff --git a/net/ipv6/netfilter/ip6table_security.c b/net/ipv6/netfilter/ip6table_security.c
index a74335fe2bd9..5e8c48fed032 100644
--- a/net/ipv6/netfilter/ip6table_security.c
+++ b/net/ipv6/netfilter/ip6table_security.c
@@ -61,15 +61,23 @@ static int __net_init ip6table_security_table_init(struct net *net)
return ret;
}
+static void __net_exit ip6table_security_net_pre_exit(struct net *net)
+{
+ if (net->ipv6.ip6table_security)
+ ip6t_unregister_table_pre_exit(net, net->ipv6.ip6table_security,
+ sectbl_ops);
+}
+
static void __net_exit ip6table_security_net_exit(struct net *net)
{
if (!net->ipv6.ip6table_security)
return;
- ip6t_unregister_table(net, net->ipv6.ip6table_security, sectbl_ops);
+ ip6t_unregister_table_exit(net, net->ipv6.ip6table_security);
net->ipv6.ip6table_security = NULL;
}
static struct pernet_operations ip6table_security_net_ops = {
+ .pre_exit = ip6table_security_net_pre_exit,
.exit = ip6table_security_net_exit,
};
diff --git a/net/ipv6/netfilter/nf_flow_table_ipv6.c b/net/ipv6/netfilter/nf_flow_table_ipv6.c
index a8566ee12e83..667b8af2546a 100644
--- a/net/ipv6/netfilter/nf_flow_table_ipv6.c
+++ b/net/ipv6/netfilter/nf_flow_table_ipv6.c
@@ -35,3 +35,4 @@ module_exit(nf_flow_ipv6_module_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Pablo Neira Ayuso <pablo@netfilter.org>");
MODULE_ALIAS_NF_FLOWTABLE(AF_INET6);
+MODULE_DESCRIPTION("Netfilter flow table IPv6 module");
diff --git a/net/ipv6/netfilter/nft_dup_ipv6.c b/net/ipv6/netfilter/nft_dup_ipv6.c
index 2af32200507d..8b5193efb1f1 100644
--- a/net/ipv6/netfilter/nft_dup_ipv6.c
+++ b/net/ipv6/netfilter/nft_dup_ipv6.c
@@ -105,3 +105,4 @@ module_exit(nft_dup_ipv6_module_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Pablo Neira Ayuso <pablo@netfilter.org>");
MODULE_ALIAS_NFT_AF_EXPR(AF_INET6, "dup");
+MODULE_DESCRIPTION("IPv6 nftables packet duplication support");
diff --git a/net/ipv6/netfilter/nft_fib_ipv6.c b/net/ipv6/netfilter/nft_fib_ipv6.c
index 7ece86afd079..e204163c7036 100644
--- a/net/ipv6/netfilter/nft_fib_ipv6.c
+++ b/net/ipv6/netfilter/nft_fib_ipv6.c
@@ -255,3 +255,4 @@ module_exit(nft_fib6_module_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Florian Westphal <fw@strlen.de>");
MODULE_ALIAS_NFT_AF_EXPR(10, "fib");
+MODULE_DESCRIPTION("nftables fib / ipv6 route lookup support");
diff --git a/net/ipv6/netfilter/nft_reject_ipv6.c b/net/ipv6/netfilter/nft_reject_ipv6.c
index 680a28ce29fd..c1098a1968e1 100644
--- a/net/ipv6/netfilter/nft_reject_ipv6.c
+++ b/net/ipv6/netfilter/nft_reject_ipv6.c
@@ -72,3 +72,4 @@ module_exit(nft_reject_ipv6_module_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
MODULE_ALIAS_NFT_AF_EXPR(AF_INET6, "reject");
+MODULE_DESCRIPTION("IPv6 packet rejection for nftables");
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 82cbb46a2a4f..f3279810d765 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -431,9 +431,12 @@ void fib6_select_path(const struct net *net, struct fib6_result *res,
struct fib6_info *sibling, *next_sibling;
struct fib6_info *match = res->f6i;
- if ((!match->fib6_nsiblings && !match->nh) || have_oif_match)
+ if (!match->nh && (!match->fib6_nsiblings || have_oif_match))
goto out;
+ if (match->nh && have_oif_match && res->nh)
+ return;
+
/* We might have already computed the hash for ICMPv6 errors. In such
* case it will always be non-zero. Otherwise now is the time to do it.
*/
@@ -3402,7 +3405,7 @@ static bool fib6_is_reject(u32 flags, struct net_device *dev, int addr_type)
if ((flags & RTF_REJECT) ||
(dev && (dev->flags & IFF_LOOPBACK) &&
!(addr_type & IPV6_ADDR_LOOPBACK) &&
- !(flags & RTF_LOCAL)))
+ !(flags & (RTF_ANYCAST | RTF_LOCAL))))
return true;
return false;
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
index 1fbb4dfbb191..5e2c34c0ac97 100644
--- a/net/ipv6/sit.c
+++ b/net/ipv6/sit.c
@@ -1421,6 +1421,7 @@ static void ipip6_tunnel_setup(struct net_device *dev)
int t_hlen = tunnel->hlen + sizeof(struct iphdr);
dev->netdev_ops = &ipip6_netdev_ops;
+ dev->header_ops = &ip_tunnel_header_ops;
dev->needs_free_netdev = true;
dev->priv_destructor = ipip6_dev_free;
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index 7d4151747340..a8d74f44056a 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -148,7 +148,7 @@ static struct sock *udp6_lib_lookup2(struct net *net,
int dif, int sdif, struct udp_hslot *hslot2,
struct sk_buff *skb)
{
- struct sock *sk, *result;
+ struct sock *sk, *result, *reuseport_result;
int score, badness;
u32 hash = 0;
@@ -158,17 +158,20 @@ static struct sock *udp6_lib_lookup2(struct net *net,
score = compute_score(sk, net, saddr, sport,
daddr, hnum, dif, sdif);
if (score > badness) {
+ reuseport_result = NULL;
+
if (sk->sk_reuseport &&
sk->sk_state != TCP_ESTABLISHED) {
hash = udp6_ehashfn(net, daddr, hnum,
saddr, sport);
- result = reuseport_select_sock(sk, hash, skb,
- sizeof(struct udphdr));
- if (result && !reuseport_has_conns(sk, false))
- return result;
+ reuseport_result = reuseport_select_sock(sk, hash, skb,
+ sizeof(struct udphdr));
+ if (reuseport_result && !reuseport_has_conns(sk, false))
+ return reuseport_result;
}
- result = sk;
+
+ result = reuseport_result ? : sk;
badness = score;
}
}
@@ -643,7 +646,7 @@ static int udpv6_queue_rcv_one_skb(struct sock *sk, struct sk_buff *skb)
/*
* UDP-Lite specific tests, ignored on UDP sockets (see net/ipv4/udp.c).
*/
- if ((is_udplite & UDPLITE_RECV_CC) && UDP_SKB_CB(skb)->partial_cov) {
+ if ((up->pcflag & UDPLITE_RECV_CC) && UDP_SKB_CB(skb)->partial_cov) {
if (up->pcrlen == 0) { /* full coverage was set */
net_dbg_ratelimited("UDPLITE6: partial coverage %d while full coverage %d requested\n",
diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c
index 6d7ef78c88af..6434d17e6e8e 100644
--- a/net/l2tp/l2tp_core.c
+++ b/net/l2tp/l2tp_core.c
@@ -1028,6 +1028,7 @@ static void l2tp_xmit_core(struct l2tp_session *session, struct sk_buff *skb,
/* Queue the packet to IP for output */
skb->ignore_df = 1;
+ skb_dst_drop(skb);
#if IS_ENABLED(CONFIG_IPV6)
if (l2tp_sk_is_v6(tunnel->sock))
error = inet6_csk_xmit(tunnel->sock, skb, NULL);
@@ -1099,10 +1100,6 @@ int l2tp_xmit_skb(struct l2tp_session *session, struct sk_buff *skb, int hdr_len
goto out_unlock;
}
- /* Get routing info from the tunnel socket */
- skb_dst_drop(skb);
- skb_dst_set(skb, sk_dst_check(sk, 0));
-
inet = inet_sk(sk);
fl = &inet->cork.fl;
switch (tunnel->encap) {
diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c
index 54fb8d452a7b..6e53e43c1907 100644
--- a/net/llc/af_llc.c
+++ b/net/llc/af_llc.c
@@ -273,6 +273,10 @@ static int llc_ui_autobind(struct socket *sock, struct sockaddr_llc *addr)
if (!sock_flag(sk, SOCK_ZAPPED))
goto out;
+ if (!addr->sllc_arphrd)
+ addr->sllc_arphrd = ARPHRD_ETHER;
+ if (addr->sllc_arphrd != ARPHRD_ETHER)
+ goto out;
rc = -ENODEV;
if (sk->sk_bound_dev_if) {
llc->dev = dev_get_by_index(&init_net, sk->sk_bound_dev_if);
@@ -328,7 +332,9 @@ static int llc_ui_bind(struct socket *sock, struct sockaddr *uaddr, int addrlen)
if (unlikely(!sock_flag(sk, SOCK_ZAPPED) || addrlen != sizeof(*addr)))
goto out;
rc = -EAFNOSUPPORT;
- if (unlikely(addr->sllc_family != AF_LLC))
+ if (!addr->sllc_arphrd)
+ addr->sllc_arphrd = ARPHRD_ETHER;
+ if (unlikely(addr->sllc_family != AF_LLC || addr->sllc_arphrd != ARPHRD_ETHER))
goto out;
dprintk("%s: binding %02X\n", __func__, addr->sllc_sap);
rc = -ENODEV;
@@ -336,8 +342,6 @@ static int llc_ui_bind(struct socket *sock, struct sockaddr *uaddr, int addrlen)
if (sk->sk_bound_dev_if) {
llc->dev = dev_get_by_index_rcu(&init_net, sk->sk_bound_dev_if);
if (llc->dev) {
- if (!addr->sllc_arphrd)
- addr->sllc_arphrd = llc->dev->type;
if (is_zero_ether_addr(addr->sllc_mac))
memcpy(addr->sllc_mac, llc->dev->dev_addr,
IFHWADDRLEN);
diff --git a/net/mac80211/mesh_hwmp.c b/net/mac80211/mesh_hwmp.c
index aa5150929996..02cde0fd08fe 100644
--- a/net/mac80211/mesh_hwmp.c
+++ b/net/mac80211/mesh_hwmp.c
@@ -1105,11 +1105,8 @@ void mesh_path_start_discovery(struct ieee80211_sub_if_data *sdata)
ttl, lifetime, 0, ifmsh->preq_id++, sdata);
spin_lock_bh(&mpath->state_lock);
- if (mpath->flags & MESH_PATH_DELETED) {
- spin_unlock_bh(&mpath->state_lock);
- goto enddiscovery;
- }
- mod_timer(&mpath->timer, jiffies + mpath->discovery_timeout);
+ if (!(mpath->flags & MESH_PATH_DELETED))
+ mod_timer(&mpath->timer, jiffies + mpath->discovery_timeout);
spin_unlock_bh(&mpath->state_lock);
enddiscovery:
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index a88ab6fb16f2..5c5af4b5fc08 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -2396,6 +2396,7 @@ static int ieee80211_802_1x_port_control(struct ieee80211_rx_data *rx)
static int ieee80211_drop_unencrypted(struct ieee80211_rx_data *rx, __le16 fc)
{
+ struct ieee80211_hdr *hdr = (void *)rx->skb->data;
struct sk_buff *skb = rx->skb;
struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
@@ -2406,6 +2407,31 @@ static int ieee80211_drop_unencrypted(struct ieee80211_rx_data *rx, __le16 fc)
if (status->flag & RX_FLAG_DECRYPTED)
return 0;
+ /* check mesh EAPOL frames first */
+ if (unlikely(rx->sta && ieee80211_vif_is_mesh(&rx->sdata->vif) &&
+ ieee80211_is_data(fc))) {
+ struct ieee80211s_hdr *mesh_hdr;
+ u16 hdr_len = ieee80211_hdrlen(fc);
+ u16 ethertype_offset;
+ __be16 ethertype;
+
+ if (!ether_addr_equal(hdr->addr1, rx->sdata->vif.addr))
+ goto drop_check;
+
+ /* make sure fixed part of mesh header is there, also checks skb len */
+ if (!pskb_may_pull(rx->skb, hdr_len + 6))
+ goto drop_check;
+
+ mesh_hdr = (struct ieee80211s_hdr *)(skb->data + hdr_len);
+ ethertype_offset = hdr_len + ieee80211_get_mesh_hdrlen(mesh_hdr) +
+ sizeof(rfc1042_header);
+
+ if (skb_copy_bits(rx->skb, ethertype_offset, &ethertype, 2) == 0 &&
+ ethertype == rx->sdata->control_port_protocol)
+ return 0;
+ }
+
+drop_check:
/* Drop unencrypted frames if key is set. */
if (unlikely(!ieee80211_has_protected(fc) &&
!ieee80211_is_any_nullfunc(fc) &&
diff --git a/net/mac80211/status.c b/net/mac80211/status.c
index 7b1bacac39c6..cbc40b358ba2 100644
--- a/net/mac80211/status.c
+++ b/net/mac80211/status.c
@@ -639,11 +639,23 @@ static void ieee80211_report_ack_skb(struct ieee80211_local *local,
u64 cookie = IEEE80211_SKB_CB(skb)->ack.cookie;
struct ieee80211_sub_if_data *sdata;
struct ieee80211_hdr *hdr = (void *)skb->data;
+ __be16 ethertype = 0;
+
+ if (skb->len >= ETH_HLEN && skb->protocol == cpu_to_be16(ETH_P_802_3))
+ skb_copy_bits(skb, 2 * ETH_ALEN, &ethertype, ETH_TLEN);
rcu_read_lock();
sdata = ieee80211_sdata_from_skb(local, skb);
if (sdata) {
- if (ieee80211_is_any_nullfunc(hdr->frame_control))
+ if (ethertype == sdata->control_port_protocol ||
+ ethertype == cpu_to_be16(ETH_P_PREAUTH))
+ cfg80211_control_port_tx_status(&sdata->wdev,
+ cookie,
+ skb->data,
+ skb->len,
+ acked,
+ GFP_ATOMIC);
+ else if (ieee80211_is_any_nullfunc(hdr->frame_control))
cfg80211_probe_status(sdata->dev, hdr->addr1,
cookie, acked,
info->status.ack_signal,
@@ -654,12 +666,8 @@ static void ieee80211_report_ack_skb(struct ieee80211_local *local,
skb->data, skb->len,
acked, GFP_ATOMIC);
else
- cfg80211_control_port_tx_status(&sdata->wdev,
- cookie,
- skb->data,
- skb->len,
- acked,
- GFP_ATOMIC);
+ pr_warn("Unknown status report in ack skb\n");
+
}
rcu_read_unlock();
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index e9ce658141f5..1a2941e5244f 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -3996,6 +3996,9 @@ void __ieee80211_subif_start_xmit(struct sk_buff *skb,
skb_list_walk_safe(skb, skb, next) {
skb_mark_not_on_list(skb);
+ if (skb->protocol == sdata->control_port_protocol)
+ ctrl_flags |= IEEE80211_TX_CTRL_SKIP_MPATH_LOOKUP;
+
skb = ieee80211_build_hdr(sdata, skb, info_flags,
sta, ctrl_flags, cookie);
if (IS_ERR(skb)) {
@@ -4206,7 +4209,7 @@ static void ieee80211_8023_xmit(struct ieee80211_sub_if_data *sdata,
(!sta || !test_sta_flag(sta, WLAN_STA_TDLS_PEER)))
ra = sdata->u.mgd.bssid;
- if (!is_valid_ether_addr(ra))
+ if (is_zero_ether_addr(ra))
goto out_free;
multicast = is_multicast_ether_addr(ra);
@@ -5371,7 +5374,8 @@ int ieee80211_tx_control_port(struct wiphy *wiphy, struct net_device *dev,
return -EINVAL;
if (proto == sdata->control_port_protocol)
- ctrl_flags |= IEEE80211_TX_CTRL_PORT_CTRL_PROTO;
+ ctrl_flags |= IEEE80211_TX_CTRL_PORT_CTRL_PROTO |
+ IEEE80211_TX_CTRL_SKIP_MPATH_LOOKUP;
if (unencrypted)
flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT;
diff --git a/net/mptcp/options.c b/net/mptcp/options.c
index 490b92534afc..8f940be42f98 100644
--- a/net/mptcp/options.c
+++ b/net/mptcp/options.c
@@ -336,9 +336,7 @@ bool mptcp_syn_options(struct sock *sk, const struct sk_buff *skb,
*/
subflow->snd_isn = TCP_SKB_CB(skb)->end_seq;
if (subflow->request_mptcp) {
- pr_debug("local_key=%llu", subflow->local_key);
opts->suboptions = OPTION_MPTCP_MPC_SYN;
- opts->sndr_key = subflow->local_key;
*size = TCPOLEN_MPTCP_MPC_SYN;
return true;
} else if (subflow->request_join) {
@@ -451,9 +449,9 @@ static bool mptcp_established_options_mp(struct sock *sk, struct sk_buff *skb,
}
static void mptcp_write_data_fin(struct mptcp_subflow_context *subflow,
- struct mptcp_ext *ext)
+ struct sk_buff *skb, struct mptcp_ext *ext)
{
- if (!ext->use_map) {
+ if (!ext->use_map || !skb->len) {
/* RFC6824 requires a DSS mapping with specific values
* if DATA_FIN is set but no data payload is mapped
*/
@@ -505,7 +503,7 @@ static bool mptcp_established_options_dss(struct sock *sk, struct sk_buff *skb,
opts->ext_copy = *mpext;
if (skb && tcp_fin && subflow->data_fin_tx_enable)
- mptcp_write_data_fin(subflow, &opts->ext_copy);
+ mptcp_write_data_fin(subflow, skb, &opts->ext_copy);
ret = true;
}
diff --git a/net/mptcp/protocol.h b/net/mptcp/protocol.h
index 809687d3f410..c6eeaf3e8dcb 100644
--- a/net/mptcp/protocol.h
+++ b/net/mptcp/protocol.h
@@ -135,8 +135,6 @@ static inline __be32 mptcp_option(u8 subopt, u8 len, u8 nib, u8 field)
((nib & 0xF) << 8) | field);
}
-#define MPTCP_PM_MAX_ADDR 4
-
struct mptcp_addr_info {
sa_family_t family;
__be16 port;
@@ -234,10 +232,7 @@ static inline struct mptcp_data_frag *mptcp_rtx_head(const struct sock *sk)
{
struct mptcp_sock *msk = mptcp_sk(sk);
- if (list_empty(&msk->rtx_queue))
- return NULL;
-
- return list_first_entry(&msk->rtx_queue, struct mptcp_data_frag, list);
+ return list_first_entry_or_null(&msk->rtx_queue, struct mptcp_data_frag, list);
}
struct mptcp_subflow_request_sock {
@@ -254,6 +249,7 @@ struct mptcp_subflow_request_sock {
u64 thmac;
u32 local_nonce;
u32 remote_nonce;
+ struct mptcp_sock *msk;
};
static inline struct mptcp_subflow_request_sock *
diff --git a/net/mptcp/subflow.c b/net/mptcp/subflow.c
index bf132575040d..3838a0b3a21f 100644
--- a/net/mptcp/subflow.c
+++ b/net/mptcp/subflow.c
@@ -69,6 +69,9 @@ static void subflow_req_destructor(struct request_sock *req)
pr_debug("subflow_req=%p", subflow_req);
+ if (subflow_req->msk)
+ sock_put((struct sock *)subflow_req->msk);
+
if (subflow_req->mp_capable)
mptcp_token_destroy_request(subflow_req->token);
tcp_request_sock_ops.destructor(req);
@@ -86,8 +89,8 @@ static void subflow_generate_hmac(u64 key1, u64 key2, u32 nonce1, u32 nonce2,
}
/* validate received token and create truncated hmac and nonce for SYN-ACK */
-static bool subflow_token_join_request(struct request_sock *req,
- const struct sk_buff *skb)
+static struct mptcp_sock *subflow_token_join_request(struct request_sock *req,
+ const struct sk_buff *skb)
{
struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(req);
u8 hmac[SHA256_DIGEST_SIZE];
@@ -97,13 +100,13 @@ static bool subflow_token_join_request(struct request_sock *req,
msk = mptcp_token_get_sock(subflow_req->token);
if (!msk) {
SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINNOTOKEN);
- return false;
+ return NULL;
}
local_id = mptcp_pm_get_local_id(msk, (struct sock_common *)req);
if (local_id < 0) {
sock_put((struct sock *)msk);
- return false;
+ return NULL;
}
subflow_req->local_id = local_id;
@@ -114,9 +117,7 @@ static bool subflow_token_join_request(struct request_sock *req,
subflow_req->remote_nonce, hmac);
subflow_req->thmac = get_unaligned_be64(hmac);
-
- sock_put((struct sock *)msk);
- return true;
+ return msk;
}
static void subflow_init_req(struct request_sock *req,
@@ -133,6 +134,7 @@ static void subflow_init_req(struct request_sock *req,
subflow_req->mp_capable = 0;
subflow_req->mp_join = 0;
+ subflow_req->msk = NULL;
#ifdef CONFIG_TCP_MD5SIG
/* no MPTCP if MD5SIG is enabled on this socket or we may run out of
@@ -166,12 +168,9 @@ static void subflow_init_req(struct request_sock *req,
subflow_req->remote_id = mp_opt.join_id;
subflow_req->token = mp_opt.token;
subflow_req->remote_nonce = mp_opt.nonce;
- pr_debug("token=%u, remote_nonce=%u", subflow_req->token,
- subflow_req->remote_nonce);
- if (!subflow_token_join_request(req, skb)) {
- subflow_req->mp_join = 0;
- // @@ need to trigger RST
- }
+ subflow_req->msk = subflow_token_join_request(req, skb);
+ pr_debug("token=%u, remote_nonce=%u msk=%p", subflow_req->token,
+ subflow_req->remote_nonce, subflow_req->msk);
}
}
@@ -354,10 +353,9 @@ static bool subflow_hmac_valid(const struct request_sock *req,
const struct mptcp_subflow_request_sock *subflow_req;
u8 hmac[SHA256_DIGEST_SIZE];
struct mptcp_sock *msk;
- bool ret;
subflow_req = mptcp_subflow_rsk(req);
- msk = mptcp_token_get_sock(subflow_req->token);
+ msk = subflow_req->msk;
if (!msk)
return false;
@@ -365,12 +363,7 @@ static bool subflow_hmac_valid(const struct request_sock *req,
subflow_req->remote_nonce,
subflow_req->local_nonce, hmac);
- ret = true;
- if (crypto_memneq(hmac, mp_opt->hmac, MPTCPOPT_HMAC_LEN))
- ret = false;
-
- sock_put((struct sock *)msk);
- return ret;
+ return !crypto_memneq(hmac, mp_opt->hmac, MPTCPOPT_HMAC_LEN);
}
static void mptcp_sock_destruct(struct sock *sk)
@@ -438,22 +431,25 @@ static struct sock *subflow_syn_recv_sock(const struct sock *sk,
struct mptcp_subflow_context *listener = mptcp_subflow_ctx(sk);
struct mptcp_subflow_request_sock *subflow_req;
struct mptcp_options_received mp_opt;
- bool fallback_is_fatal = false;
+ bool fallback, fallback_is_fatal;
struct sock *new_msk = NULL;
- bool fallback = false;
struct sock *child;
pr_debug("listener=%p, req=%p, conn=%p", listener, req, listener->conn);
- /* we need later a valid 'mp_capable' value even when options are not
- * parsed
+ /* After child creation we must look for 'mp_capable' even when options
+ * are not parsed
*/
mp_opt.mp_capable = 0;
- if (tcp_rsk(req)->is_mptcp == 0)
+
+ /* hopefully temporary handling for MP_JOIN+syncookie */
+ subflow_req = mptcp_subflow_rsk(req);
+ fallback_is_fatal = subflow_req->mp_join;
+ fallback = !tcp_rsk(req)->is_mptcp;
+ if (fallback)
goto create_child;
/* if the sk is MP_CAPABLE, we try to fetch the client key */
- subflow_req = mptcp_subflow_rsk(req);
if (subflow_req->mp_capable) {
if (TCP_SKB_CB(skb)->seq != subflow_req->ssn_offset + 1) {
/* here we can receive and accept an in-window,
@@ -474,12 +470,11 @@ create_msk:
if (!new_msk)
fallback = true;
} else if (subflow_req->mp_join) {
- fallback_is_fatal = true;
mptcp_get_options(skb, &mp_opt);
if (!mp_opt.mp_join ||
!subflow_hmac_valid(req, &mp_opt)) {
SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINACKMAC);
- return NULL;
+ fallback = true;
}
}
@@ -522,10 +517,12 @@ create_child:
} else if (ctx->mp_join) {
struct mptcp_sock *owner;
- owner = mptcp_token_get_sock(ctx->token);
+ owner = subflow_req->msk;
if (!owner)
goto dispose_child;
+ /* move the msk reference ownership to the subflow */
+ subflow_req->msk = NULL;
ctx->conn = (struct sock *)owner;
if (!mptcp_finish_join(child))
goto dispose_child;
@@ -1053,8 +1050,10 @@ int mptcp_subflow_create_socket(struct sock *sk, struct socket **new_sock)
err = tcp_set_ulp(sf->sk, "mptcp");
release_sock(sf->sk);
- if (err)
+ if (err) {
+ sock_release(sf);
return err;
+ }
/* the newly created socket really belongs to the owning MPTCP master
* socket, even if for additional subflows the allocation is performed
diff --git a/net/netfilter/ipset/ip_set_bitmap_ip.c b/net/netfilter/ipset/ip_set_bitmap_ip.c
index 486959f70cf3..a8ce04a4bb72 100644
--- a/net/netfilter/ipset/ip_set_bitmap_ip.c
+++ b/net/netfilter/ipset/ip_set_bitmap_ip.c
@@ -326,7 +326,7 @@ bitmap_ip_create(struct net *net, struct ip_set *set, struct nlattr *tb[],
set->variant = &bitmap_ip;
if (!init_map_ip(set, map, first_ip, last_ip,
elements, hosts, netmask)) {
- kfree(map);
+ ip_set_free(map);
return -ENOMEM;
}
if (tb[IPSET_ATTR_TIMEOUT]) {
diff --git a/net/netfilter/ipset/ip_set_bitmap_ipmac.c b/net/netfilter/ipset/ip_set_bitmap_ipmac.c
index 2310a316e0af..2c625e0f49ec 100644
--- a/net/netfilter/ipset/ip_set_bitmap_ipmac.c
+++ b/net/netfilter/ipset/ip_set_bitmap_ipmac.c
@@ -363,7 +363,7 @@ bitmap_ipmac_create(struct net *net, struct ip_set *set, struct nlattr *tb[],
map->memsize = BITS_TO_LONGS(elements) * sizeof(unsigned long);
set->variant = &bitmap_ipmac;
if (!init_map_ipmac(set, map, first_ip, last_ip, elements)) {
- kfree(map);
+ ip_set_free(map);
return -ENOMEM;
}
if (tb[IPSET_ATTR_TIMEOUT]) {
diff --git a/net/netfilter/ipset/ip_set_bitmap_port.c b/net/netfilter/ipset/ip_set_bitmap_port.c
index e56ced66f202..7138e080def4 100644
--- a/net/netfilter/ipset/ip_set_bitmap_port.c
+++ b/net/netfilter/ipset/ip_set_bitmap_port.c
@@ -274,7 +274,7 @@ bitmap_port_create(struct net *net, struct ip_set *set, struct nlattr *tb[],
map->memsize = BITS_TO_LONGS(elements) * sizeof(unsigned long);
set->variant = &bitmap_port;
if (!init_map_port(set, map, first_port, last_port)) {
- kfree(map);
+ ip_set_free(map);
return -ENOMEM;
}
if (tb[IPSET_ATTR_TIMEOUT]) {
diff --git a/net/netfilter/ipset/ip_set_core.c b/net/netfilter/ipset/ip_set_core.c
index 340cb955af25..56621d6bfd29 100644
--- a/net/netfilter/ipset/ip_set_core.c
+++ b/net/netfilter/ipset/ip_set_core.c
@@ -460,6 +460,8 @@ ip_set_elem_len(struct ip_set *set, struct nlattr *tb[], size_t len,
for (id = 0; id < IPSET_EXT_ID_MAX; id++) {
if (!add_extension(id, cadt_flags, tb))
continue;
+ if (align < ip_set_extensions[id].align)
+ align = ip_set_extensions[id].align;
len = ALIGN(len, ip_set_extensions[id].align);
set->offset[id] = len;
set->extensions |= ip_set_extensions[id].type;
diff --git a/net/netfilter/ipset/ip_set_hash_gen.h b/net/netfilter/ipset/ip_set_hash_gen.h
index 1ee43752d6d3..521e970be402 100644
--- a/net/netfilter/ipset/ip_set_hash_gen.h
+++ b/net/netfilter/ipset/ip_set_hash_gen.h
@@ -682,7 +682,7 @@ retry:
}
t->hregion = ip_set_alloc(ahash_sizeof_regions(htable_bits));
if (!t->hregion) {
- kfree(t);
+ ip_set_free(t);
ret = -ENOMEM;
goto out;
}
@@ -1533,7 +1533,7 @@ IPSET_TOKEN(HTYPE, _create)(struct net *net, struct ip_set *set,
}
t->hregion = ip_set_alloc(ahash_sizeof_regions(hbits));
if (!t->hregion) {
- kfree(t);
+ ip_set_free(t);
kfree(h);
return -ENOMEM;
}
diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c
index 605e0f68f8bd..2b8abbfe018c 100644
--- a/net/netfilter/ipvs/ip_vs_sync.c
+++ b/net/netfilter/ipvs/ip_vs_sync.c
@@ -1717,6 +1717,8 @@ static int sync_thread_backup(void *data)
{
struct ip_vs_sync_thread_data *tinfo = data;
struct netns_ipvs *ipvs = tinfo->ipvs;
+ struct sock *sk = tinfo->sock->sk;
+ struct udp_sock *up = udp_sk(sk);
int len;
pr_info("sync thread started: state = BACKUP, mcast_ifn = %s, "
@@ -1724,12 +1726,14 @@ static int sync_thread_backup(void *data)
ipvs->bcfg.mcast_ifn, ipvs->bcfg.syncid, tinfo->id);
while (!kthread_should_stop()) {
- wait_event_interruptible(*sk_sleep(tinfo->sock->sk),
- !skb_queue_empty(&tinfo->sock->sk->sk_receive_queue)
- || kthread_should_stop());
+ wait_event_interruptible(*sk_sleep(sk),
+ !skb_queue_empty_lockless(&sk->sk_receive_queue) ||
+ !skb_queue_empty_lockless(&up->reader_queue) ||
+ kthread_should_stop());
/* do we have data now? */
- while (!skb_queue_empty(&(tinfo->sock->sk->sk_receive_queue))) {
+ while (!skb_queue_empty_lockless(&sk->sk_receive_queue) ||
+ !skb_queue_empty_lockless(&up->reader_queue)) {
len = ip_vs_receive(tinfo->sock, tinfo->buf,
ipvs->bcfg.sync_maxlen);
if (len <= 0) {
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index 79cd9dde457b..f33d72c5b06e 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -2158,6 +2158,8 @@ static int nf_conntrack_update(struct net *net, struct sk_buff *skb)
err = __nf_conntrack_update(net, skb, ct, ctinfo);
if (err < 0)
return err;
+
+ ct = nf_ct_get(skb, &ctinfo);
}
return nf_confirm_cthelper(skb, ct, ctinfo);
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
index d7bd8b1f27d5..832eabecfbdd 100644
--- a/net/netfilter/nf_conntrack_netlink.c
+++ b/net/netfilter/nf_conntrack_netlink.c
@@ -939,7 +939,8 @@ ctnetlink_alloc_filter(const struct nlattr * const cda[], u8 family)
filter->mark.mask = 0xffffffff;
}
} else if (cda[CTA_MARK_MASK]) {
- return ERR_PTR(-EINVAL);
+ err = -EINVAL;
+ goto err_filter;
}
#endif
if (!cda[CTA_FILTER])
@@ -947,15 +948,17 @@ ctnetlink_alloc_filter(const struct nlattr * const cda[], u8 family)
err = ctnetlink_parse_zone(cda[CTA_ZONE], &filter->zone);
if (err < 0)
- return ERR_PTR(err);
+ goto err_filter;
err = ctnetlink_parse_filter(cda[CTA_FILTER], filter);
if (err < 0)
- return ERR_PTR(err);
+ goto err_filter;
if (filter->orig_flags) {
- if (!cda[CTA_TUPLE_ORIG])
- return ERR_PTR(-EINVAL);
+ if (!cda[CTA_TUPLE_ORIG]) {
+ err = -EINVAL;
+ goto err_filter;
+ }
err = ctnetlink_parse_tuple_filter(cda, &filter->orig,
CTA_TUPLE_ORIG,
@@ -963,23 +966,32 @@ ctnetlink_alloc_filter(const struct nlattr * const cda[], u8 family)
&filter->zone,
filter->orig_flags);
if (err < 0)
- return ERR_PTR(err);
+ goto err_filter;
}
if (filter->reply_flags) {
- if (!cda[CTA_TUPLE_REPLY])
- return ERR_PTR(-EINVAL);
+ if (!cda[CTA_TUPLE_REPLY]) {
+ err = -EINVAL;
+ goto err_filter;
+ }
err = ctnetlink_parse_tuple_filter(cda, &filter->reply,
CTA_TUPLE_REPLY,
filter->family,
&filter->zone,
filter->orig_flags);
- if (err < 0)
- return ERR_PTR(err);
+ if (err < 0) {
+ err = -EINVAL;
+ goto err_filter;
+ }
}
return filter;
+
+err_filter:
+ kfree(filter);
+
+ return ERR_PTR(err);
}
static bool ctnetlink_needs_filter(u8 family, const struct nlattr * const *cda)
diff --git a/net/netfilter/nf_dup_netdev.c b/net/netfilter/nf_dup_netdev.c
index f108a76925dd..2b01a151eaa8 100644
--- a/net/netfilter/nf_dup_netdev.c
+++ b/net/netfilter/nf_dup_netdev.c
@@ -73,3 +73,4 @@ EXPORT_SYMBOL_GPL(nft_fwd_dup_netdev_offload);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Pablo Neira Ayuso <pablo@netfilter.org>");
+MODULE_DESCRIPTION("Netfilter packet duplication support");
diff --git a/net/netfilter/nf_flow_table_core.c b/net/netfilter/nf_flow_table_core.c
index 6a3034f84ab6..b1eb5272b379 100644
--- a/net/netfilter/nf_flow_table_core.c
+++ b/net/netfilter/nf_flow_table_core.c
@@ -387,51 +387,6 @@ static void nf_flow_offload_work_gc(struct work_struct *work)
queue_delayed_work(system_power_efficient_wq, &flow_table->gc_work, HZ);
}
-int nf_flow_table_offload_add_cb(struct nf_flowtable *flow_table,
- flow_setup_cb_t *cb, void *cb_priv)
-{
- struct flow_block *block = &flow_table->flow_block;
- struct flow_block_cb *block_cb;
- int err = 0;
-
- down_write(&flow_table->flow_block_lock);
- block_cb = flow_block_cb_lookup(block, cb, cb_priv);
- if (block_cb) {
- err = -EEXIST;
- goto unlock;
- }
-
- block_cb = flow_block_cb_alloc(cb, cb_priv, cb_priv, NULL);
- if (IS_ERR(block_cb)) {
- err = PTR_ERR(block_cb);
- goto unlock;
- }
-
- list_add_tail(&block_cb->list, &block->cb_list);
-
-unlock:
- up_write(&flow_table->flow_block_lock);
- return err;
-}
-EXPORT_SYMBOL_GPL(nf_flow_table_offload_add_cb);
-
-void nf_flow_table_offload_del_cb(struct nf_flowtable *flow_table,
- flow_setup_cb_t *cb, void *cb_priv)
-{
- struct flow_block *block = &flow_table->flow_block;
- struct flow_block_cb *block_cb;
-
- down_write(&flow_table->flow_block_lock);
- block_cb = flow_block_cb_lookup(block, cb, cb_priv);
- if (block_cb) {
- list_del(&block_cb->list);
- flow_block_cb_free(block_cb);
- } else {
- WARN_ON(true);
- }
- up_write(&flow_table->flow_block_lock);
-}
-EXPORT_SYMBOL_GPL(nf_flow_table_offload_del_cb);
static int nf_flow_nat_port_tcp(struct sk_buff *skb, unsigned int thoff,
__be16 port, __be16 new_port)
@@ -639,3 +594,4 @@ module_exit(nf_flow_table_module_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Pablo Neira Ayuso <pablo@netfilter.org>");
+MODULE_DESCRIPTION("Netfilter flow table module");
diff --git a/net/netfilter/nf_flow_table_inet.c b/net/netfilter/nf_flow_table_inet.c
index 88bedf1ff1ae..bc4126d8ef65 100644
--- a/net/netfilter/nf_flow_table_inet.c
+++ b/net/netfilter/nf_flow_table_inet.c
@@ -72,3 +72,4 @@ module_exit(nf_flow_inet_module_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Pablo Neira Ayuso <pablo@netfilter.org>");
MODULE_ALIAS_NF_FLOWTABLE(1); /* NFPROTO_INET */
+MODULE_DESCRIPTION("Netfilter flow table mixed IPv4/IPv6 module");
diff --git a/net/netfilter/nf_flow_table_offload.c b/net/netfilter/nf_flow_table_offload.c
index 62651e6683f6..5fff1e040168 100644
--- a/net/netfilter/nf_flow_table_offload.c
+++ b/net/netfilter/nf_flow_table_offload.c
@@ -950,6 +950,7 @@ static void nf_flow_table_indr_cleanup(struct flow_block_cb *block_cb)
nf_flow_table_gc_cleanup(flowtable, dev);
down_write(&flowtable->flow_block_lock);
list_del(&block_cb->list);
+ list_del(&block_cb->driver_list);
flow_block_cb_free(block_cb);
up_write(&flowtable->flow_block_lock);
}
diff --git a/net/netfilter/nf_synproxy_core.c b/net/netfilter/nf_synproxy_core.c
index b9cbe1e2453e..ebcdc8e54476 100644
--- a/net/netfilter/nf_synproxy_core.c
+++ b/net/netfilter/nf_synproxy_core.c
@@ -1237,3 +1237,4 @@ EXPORT_SYMBOL_GPL(nf_synproxy_ipv6_fini);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
+MODULE_DESCRIPTION("nftables SYNPROXY expression support");
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
index 073aa1051d43..88325b264737 100644
--- a/net/netfilter/nf_tables_api.c
+++ b/net/netfilter/nf_tables_api.c
@@ -188,24 +188,6 @@ static void nft_netdev_unregister_hooks(struct net *net,
nf_unregister_net_hook(net, &hook->ops);
}
-static int nft_register_basechain_hooks(struct net *net, int family,
- struct nft_base_chain *basechain)
-{
- if (family == NFPROTO_NETDEV)
- return nft_netdev_register_hooks(net, &basechain->hook_list);
-
- return nf_register_net_hook(net, &basechain->ops);
-}
-
-static void nft_unregister_basechain_hooks(struct net *net, int family,
- struct nft_base_chain *basechain)
-{
- if (family == NFPROTO_NETDEV)
- nft_netdev_unregister_hooks(net, &basechain->hook_list);
- else
- nf_unregister_net_hook(net, &basechain->ops);
-}
-
static int nf_tables_register_hook(struct net *net,
const struct nft_table *table,
struct nft_chain *chain)
@@ -223,7 +205,10 @@ static int nf_tables_register_hook(struct net *net,
if (basechain->type->ops_register)
return basechain->type->ops_register(net, ops);
- return nft_register_basechain_hooks(net, table->family, basechain);
+ if (table->family == NFPROTO_NETDEV)
+ return nft_netdev_register_hooks(net, &basechain->hook_list);
+
+ return nf_register_net_hook(net, &basechain->ops);
}
static void nf_tables_unregister_hook(struct net *net,
@@ -242,7 +227,10 @@ static void nf_tables_unregister_hook(struct net *net,
if (basechain->type->ops_unregister)
return basechain->type->ops_unregister(net, ops);
- nft_unregister_basechain_hooks(net, table->family, basechain);
+ if (table->family == NFPROTO_NETDEV)
+ nft_netdev_unregister_hooks(net, &basechain->hook_list);
+ else
+ nf_unregister_net_hook(net, &basechain->ops);
}
static int nft_trans_table_add(struct nft_ctx *ctx, int msg_type)
@@ -832,8 +820,7 @@ static void nft_table_disable(struct net *net, struct nft_table *table, u32 cnt)
if (cnt && i++ == cnt)
break;
- nft_unregister_basechain_hooks(net, table->family,
- nft_base_chain(chain));
+ nf_tables_unregister_hook(net, table, chain);
}
}
@@ -848,8 +835,7 @@ static int nf_tables_table_enable(struct net *net, struct nft_table *table)
if (!nft_is_base_chain(chain))
continue;
- err = nft_register_basechain_hooks(net, table->family,
- nft_base_chain(chain));
+ err = nf_tables_register_hook(net, table, chain);
if (err < 0)
goto err_register_hooks;
@@ -894,11 +880,12 @@ static int nf_tables_updtable(struct nft_ctx *ctx)
nft_trans_table_enable(trans) = false;
} else if (!(flags & NFT_TABLE_F_DORMANT) &&
ctx->table->flags & NFT_TABLE_F_DORMANT) {
+ ctx->table->flags &= ~NFT_TABLE_F_DORMANT;
ret = nf_tables_table_enable(ctx->net, ctx->table);
- if (ret >= 0) {
- ctx->table->flags &= ~NFT_TABLE_F_DORMANT;
+ if (ret >= 0)
nft_trans_table_enable(trans) = true;
- }
+ else
+ ctx->table->flags |= NFT_TABLE_F_DORMANT;
}
if (ret < 0)
goto err;
@@ -6550,12 +6537,22 @@ err1:
return err;
}
+static void nft_flowtable_hook_release(struct nft_flowtable_hook *flowtable_hook)
+{
+ struct nft_hook *this, *next;
+
+ list_for_each_entry_safe(this, next, &flowtable_hook->list, list) {
+ list_del(&this->list);
+ kfree(this);
+ }
+}
+
static int nft_delflowtable_hook(struct nft_ctx *ctx,
struct nft_flowtable *flowtable)
{
const struct nlattr * const *nla = ctx->nla;
struct nft_flowtable_hook flowtable_hook;
- struct nft_hook *this, *next, *hook;
+ struct nft_hook *this, *hook;
struct nft_trans *trans;
int err;
@@ -6564,33 +6561,40 @@ static int nft_delflowtable_hook(struct nft_ctx *ctx,
if (err < 0)
return err;
- list_for_each_entry_safe(this, next, &flowtable_hook.list, list) {
+ list_for_each_entry(this, &flowtable_hook.list, list) {
hook = nft_hook_list_find(&flowtable->hook_list, this);
if (!hook) {
err = -ENOENT;
goto err_flowtable_del_hook;
}
hook->inactive = true;
- list_del(&this->list);
- kfree(this);
}
trans = nft_trans_alloc(ctx, NFT_MSG_DELFLOWTABLE,
sizeof(struct nft_trans_flowtable));
- if (!trans)
- return -ENOMEM;
+ if (!trans) {
+ err = -ENOMEM;
+ goto err_flowtable_del_hook;
+ }
nft_trans_flowtable(trans) = flowtable;
nft_trans_flowtable_update(trans) = true;
INIT_LIST_HEAD(&nft_trans_flowtable_hooks(trans));
+ nft_flowtable_hook_release(&flowtable_hook);
list_add_tail(&trans->list, &ctx->net->nft.commit_list);
return 0;
err_flowtable_del_hook:
- list_for_each_entry(hook, &flowtable_hook.list, list)
+ list_for_each_entry(this, &flowtable_hook.list, list) {
+ hook = nft_hook_list_find(&flowtable->hook_list, this);
+ if (!hook)
+ break;
+
hook->inactive = false;
+ }
+ nft_flowtable_hook_release(&flowtable_hook);
return err;
}
diff --git a/net/netfilter/nf_tables_offload.c b/net/netfilter/nf_tables_offload.c
index 185fc82c99aa..c7cf1cde46de 100644
--- a/net/netfilter/nf_tables_offload.c
+++ b/net/netfilter/nf_tables_offload.c
@@ -296,6 +296,7 @@ static void nft_indr_block_cleanup(struct flow_block_cb *block_cb)
nft_flow_block_offload_init(&bo, dev_net(dev), FLOW_BLOCK_UNBIND,
basechain, &extack);
mutex_lock(&net->nft.commit_mutex);
+ list_del(&block_cb->driver_list);
list_move(&block_cb->list, &bo.cb_list);
nft_flow_offload_unbind(&bo, basechain);
mutex_unlock(&net->nft.commit_mutex);
diff --git a/net/netfilter/nfnetlink.c b/net/netfilter/nfnetlink.c
index 99127e2d95a8..5f24edf95830 100644
--- a/net/netfilter/nfnetlink.c
+++ b/net/netfilter/nfnetlink.c
@@ -33,6 +33,7 @@
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>");
MODULE_ALIAS_NET_PF_PROTO(PF_NETLINK, NETLINK_NETFILTER);
+MODULE_DESCRIPTION("Netfilter messages via netlink socket");
#define nfnl_dereference_protected(id) \
rcu_dereference_protected(table[(id)].subsys, \
diff --git a/net/netfilter/nft_compat.c b/net/netfilter/nft_compat.c
index f9adca62ccb3..aa1a066cb74b 100644
--- a/net/netfilter/nft_compat.c
+++ b/net/netfilter/nft_compat.c
@@ -902,3 +902,4 @@ MODULE_LICENSE("GPL");
MODULE_AUTHOR("Pablo Neira Ayuso <pablo@netfilter.org>");
MODULE_ALIAS_NFT_EXPR("match");
MODULE_ALIAS_NFT_EXPR("target");
+MODULE_DESCRIPTION("x_tables over nftables support");
diff --git a/net/netfilter/nft_connlimit.c b/net/netfilter/nft_connlimit.c
index 69d6173f91e2..7d0761fad37e 100644
--- a/net/netfilter/nft_connlimit.c
+++ b/net/netfilter/nft_connlimit.c
@@ -280,3 +280,4 @@ MODULE_LICENSE("GPL");
MODULE_AUTHOR("Pablo Neira Ayuso");
MODULE_ALIAS_NFT_EXPR("connlimit");
MODULE_ALIAS_NFT_OBJ(NFT_OBJECT_CONNLIMIT);
+MODULE_DESCRIPTION("nftables connlimit rule support");
diff --git a/net/netfilter/nft_counter.c b/net/netfilter/nft_counter.c
index f6d4d0fa23a6..85ed461ec24e 100644
--- a/net/netfilter/nft_counter.c
+++ b/net/netfilter/nft_counter.c
@@ -303,3 +303,4 @@ MODULE_LICENSE("GPL");
MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
MODULE_ALIAS_NFT_EXPR("counter");
MODULE_ALIAS_NFT_OBJ(NFT_OBJECT_COUNTER);
+MODULE_DESCRIPTION("nftables counter rule support");
diff --git a/net/netfilter/nft_ct.c b/net/netfilter/nft_ct.c
index faea72c2df32..77258af1fce0 100644
--- a/net/netfilter/nft_ct.c
+++ b/net/netfilter/nft_ct.c
@@ -1345,3 +1345,4 @@ MODULE_ALIAS_NFT_EXPR("notrack");
MODULE_ALIAS_NFT_OBJ(NFT_OBJECT_CT_HELPER);
MODULE_ALIAS_NFT_OBJ(NFT_OBJECT_CT_TIMEOUT);
MODULE_ALIAS_NFT_OBJ(NFT_OBJECT_CT_EXPECT);
+MODULE_DESCRIPTION("Netfilter nf_tables conntrack module");
diff --git a/net/netfilter/nft_dup_netdev.c b/net/netfilter/nft_dup_netdev.c
index c2e78c160fd7..40788b3f1071 100644
--- a/net/netfilter/nft_dup_netdev.c
+++ b/net/netfilter/nft_dup_netdev.c
@@ -102,3 +102,4 @@ module_exit(nft_dup_netdev_module_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Pablo Neira Ayuso <pablo@netfilter.org>");
MODULE_ALIAS_NFT_AF_EXPR(5, "dup");
+MODULE_DESCRIPTION("nftables netdev packet duplication support");
diff --git a/net/netfilter/nft_fib_inet.c b/net/netfilter/nft_fib_inet.c
index 465432e0531b..a88d44e163d1 100644
--- a/net/netfilter/nft_fib_inet.c
+++ b/net/netfilter/nft_fib_inet.c
@@ -76,3 +76,4 @@ module_exit(nft_fib_inet_module_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Florian Westphal <fw@strlen.de>");
MODULE_ALIAS_NFT_AF_EXPR(1, "fib");
+MODULE_DESCRIPTION("nftables fib inet support");
diff --git a/net/netfilter/nft_fib_netdev.c b/net/netfilter/nft_fib_netdev.c
index a2e726ae7f07..3f3478abd845 100644
--- a/net/netfilter/nft_fib_netdev.c
+++ b/net/netfilter/nft_fib_netdev.c
@@ -85,3 +85,4 @@ module_exit(nft_fib_netdev_module_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Pablo M. Bermudo Garay <pablombg@gmail.com>");
MODULE_ALIAS_NFT_AF_EXPR(5, "fib");
+MODULE_DESCRIPTION("nftables netdev fib lookups support");
diff --git a/net/netfilter/nft_flow_offload.c b/net/netfilter/nft_flow_offload.c
index b70b48996801..3b9b97aa4b32 100644
--- a/net/netfilter/nft_flow_offload.c
+++ b/net/netfilter/nft_flow_offload.c
@@ -286,3 +286,4 @@ module_exit(nft_flow_offload_module_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Pablo Neira Ayuso <pablo@netfilter.org>");
MODULE_ALIAS_NFT_EXPR("flow_offload");
+MODULE_DESCRIPTION("nftables hardware flow offload module");
diff --git a/net/netfilter/nft_hash.c b/net/netfilter/nft_hash.c
index b836d550b919..96371d878e7e 100644
--- a/net/netfilter/nft_hash.c
+++ b/net/netfilter/nft_hash.c
@@ -248,3 +248,4 @@ module_exit(nft_hash_module_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Laura Garcia <nevola@gmail.com>");
MODULE_ALIAS_NFT_EXPR("hash");
+MODULE_DESCRIPTION("Netfilter nftables hash module");
diff --git a/net/netfilter/nft_limit.c b/net/netfilter/nft_limit.c
index 35b67d7e3694..0e2c315c3b5e 100644
--- a/net/netfilter/nft_limit.c
+++ b/net/netfilter/nft_limit.c
@@ -372,3 +372,4 @@ MODULE_LICENSE("GPL");
MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
MODULE_ALIAS_NFT_EXPR("limit");
MODULE_ALIAS_NFT_OBJ(NFT_OBJECT_LIMIT);
+MODULE_DESCRIPTION("nftables limit expression support");
diff --git a/net/netfilter/nft_log.c b/net/netfilter/nft_log.c
index fe4831f2258f..57899454a530 100644
--- a/net/netfilter/nft_log.c
+++ b/net/netfilter/nft_log.c
@@ -298,3 +298,4 @@ module_exit(nft_log_module_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
MODULE_ALIAS_NFT_EXPR("log");
+MODULE_DESCRIPTION("Netfilter nf_tables log module");
diff --git a/net/netfilter/nft_masq.c b/net/netfilter/nft_masq.c
index bc9fd98c5d6d..71390b727040 100644
--- a/net/netfilter/nft_masq.c
+++ b/net/netfilter/nft_masq.c
@@ -305,3 +305,4 @@ module_exit(nft_masq_module_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Arturo Borrero Gonzalez <arturo@debian.org>");
MODULE_ALIAS_NFT_EXPR("masq");
+MODULE_DESCRIPTION("Netfilter nftables masquerade expression support");
diff --git a/net/netfilter/nft_nat.c b/net/netfilter/nft_nat.c
index 23a7bfd10521..4bcf33b049c4 100644
--- a/net/netfilter/nft_nat.c
+++ b/net/netfilter/nft_nat.c
@@ -402,3 +402,4 @@ module_exit(nft_nat_module_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Tomasz Bursztyka <tomasz.bursztyka@linux.intel.com>");
MODULE_ALIAS_NFT_EXPR("nat");
+MODULE_DESCRIPTION("Network Address Translation support");
diff --git a/net/netfilter/nft_numgen.c b/net/netfilter/nft_numgen.c
index 48edb9d5f012..f1fc824f9737 100644
--- a/net/netfilter/nft_numgen.c
+++ b/net/netfilter/nft_numgen.c
@@ -217,3 +217,4 @@ module_exit(nft_ng_module_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Laura Garcia <nevola@gmail.com>");
MODULE_ALIAS_NFT_EXPR("numgen");
+MODULE_DESCRIPTION("nftables number generator module");
diff --git a/net/netfilter/nft_objref.c b/net/netfilter/nft_objref.c
index bfd18d2b65a2..5f9207a9f485 100644
--- a/net/netfilter/nft_objref.c
+++ b/net/netfilter/nft_objref.c
@@ -252,3 +252,4 @@ module_exit(nft_objref_module_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Pablo Neira Ayuso <pablo@netfilter.org>");
MODULE_ALIAS_NFT_EXPR("objref");
+MODULE_DESCRIPTION("nftables stateful object reference module");
diff --git a/net/netfilter/nft_osf.c b/net/netfilter/nft_osf.c
index b42247aa48a9..c261d57a666a 100644
--- a/net/netfilter/nft_osf.c
+++ b/net/netfilter/nft_osf.c
@@ -149,3 +149,4 @@ module_exit(nft_osf_module_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Fernando Fernandez <ffmancera@riseup.net>");
MODULE_ALIAS_NFT_EXPR("osf");
+MODULE_DESCRIPTION("nftables passive OS fingerprint support");
diff --git a/net/netfilter/nft_queue.c b/net/netfilter/nft_queue.c
index 5ece0a6aa8c3..23265d757acb 100644
--- a/net/netfilter/nft_queue.c
+++ b/net/netfilter/nft_queue.c
@@ -216,3 +216,4 @@ module_exit(nft_queue_module_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Eric Leblond <eric@regit.org>");
MODULE_ALIAS_NFT_EXPR("queue");
+MODULE_DESCRIPTION("Netfilter nftables queue module");
diff --git a/net/netfilter/nft_quota.c b/net/netfilter/nft_quota.c
index 4413690591f2..0363f533a42b 100644
--- a/net/netfilter/nft_quota.c
+++ b/net/netfilter/nft_quota.c
@@ -254,3 +254,4 @@ MODULE_LICENSE("GPL");
MODULE_AUTHOR("Pablo Neira Ayuso <pablo@netfilter.org>");
MODULE_ALIAS_NFT_EXPR("quota");
MODULE_ALIAS_NFT_OBJ(NFT_OBJECT_QUOTA);
+MODULE_DESCRIPTION("Netfilter nftables quota module");
diff --git a/net/netfilter/nft_redir.c b/net/netfilter/nft_redir.c
index 5b779171565c..2056051c0af0 100644
--- a/net/netfilter/nft_redir.c
+++ b/net/netfilter/nft_redir.c
@@ -292,3 +292,4 @@ module_exit(nft_redir_module_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Arturo Borrero Gonzalez <arturo@debian.org>");
MODULE_ALIAS_NFT_EXPR("redir");
+MODULE_DESCRIPTION("Netfilter nftables redirect support");
diff --git a/net/netfilter/nft_reject.c b/net/netfilter/nft_reject.c
index 00f865fb80ca..86eafbb0fdd0 100644
--- a/net/netfilter/nft_reject.c
+++ b/net/netfilter/nft_reject.c
@@ -119,3 +119,4 @@ EXPORT_SYMBOL_GPL(nft_reject_icmpv6_code);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
+MODULE_DESCRIPTION("Netfilter x_tables over nftables module");
diff --git a/net/netfilter/nft_reject_inet.c b/net/netfilter/nft_reject_inet.c
index f41f414b72d1..cf8f2646e93c 100644
--- a/net/netfilter/nft_reject_inet.c
+++ b/net/netfilter/nft_reject_inet.c
@@ -149,3 +149,4 @@ module_exit(nft_reject_inet_module_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
MODULE_ALIAS_NFT_AF_EXPR(1, "reject");
+MODULE_DESCRIPTION("Netfilter nftables reject inet support");
diff --git a/net/netfilter/nft_set_pipapo.c b/net/netfilter/nft_set_pipapo.c
index 8b5acc6910fd..8c04388296b0 100644
--- a/net/netfilter/nft_set_pipapo.c
+++ b/net/netfilter/nft_set_pipapo.c
@@ -1242,7 +1242,9 @@ static int nft_pipapo_insert(const struct net *net, const struct nft_set *set,
end += NFT_PIPAPO_GROUPS_PADDED_SIZE(f);
}
- if (!*this_cpu_ptr(m->scratch) || bsize_max > m->bsize_max) {
+ if (!*get_cpu_ptr(m->scratch) || bsize_max > m->bsize_max) {
+ put_cpu_ptr(m->scratch);
+
err = pipapo_realloc_scratch(m, bsize_max);
if (err)
return err;
@@ -1250,6 +1252,8 @@ static int nft_pipapo_insert(const struct net *net, const struct nft_set *set,
this_cpu_write(nft_pipapo_scratch_index, false);
m->bsize_max = bsize_max;
+ } else {
+ put_cpu_ptr(m->scratch);
}
*ext2 = &e->ext;
diff --git a/net/netfilter/nft_set_rbtree.c b/net/netfilter/nft_set_rbtree.c
index 62f416bc0579..b6aad3fc46c3 100644
--- a/net/netfilter/nft_set_rbtree.c
+++ b/net/netfilter/nft_set_rbtree.c
@@ -271,12 +271,14 @@ static int __nft_rbtree_insert(const struct net *net, const struct nft_set *set,
if (nft_rbtree_interval_start(new)) {
if (nft_rbtree_interval_end(rbe) &&
- nft_set_elem_active(&rbe->ext, genmask))
+ nft_set_elem_active(&rbe->ext, genmask) &&
+ !nft_set_elem_expired(&rbe->ext))
overlap = false;
} else {
overlap = nft_rbtree_interval_end(rbe) &&
nft_set_elem_active(&rbe->ext,
- genmask);
+ genmask) &&
+ !nft_set_elem_expired(&rbe->ext);
}
} else if (d > 0) {
p = &parent->rb_right;
@@ -284,9 +286,11 @@ static int __nft_rbtree_insert(const struct net *net, const struct nft_set *set,
if (nft_rbtree_interval_end(new)) {
overlap = nft_rbtree_interval_end(rbe) &&
nft_set_elem_active(&rbe->ext,
- genmask);
+ genmask) &&
+ !nft_set_elem_expired(&rbe->ext);
} else if (nft_rbtree_interval_end(rbe) &&
- nft_set_elem_active(&rbe->ext, genmask)) {
+ nft_set_elem_active(&rbe->ext, genmask) &&
+ !nft_set_elem_expired(&rbe->ext)) {
overlap = true;
}
} else {
@@ -294,15 +298,18 @@ static int __nft_rbtree_insert(const struct net *net, const struct nft_set *set,
nft_rbtree_interval_start(new)) {
p = &parent->rb_left;
- if (nft_set_elem_active(&rbe->ext, genmask))
+ if (nft_set_elem_active(&rbe->ext, genmask) &&
+ !nft_set_elem_expired(&rbe->ext))
overlap = false;
} else if (nft_rbtree_interval_start(rbe) &&
nft_rbtree_interval_end(new)) {
p = &parent->rb_right;
- if (nft_set_elem_active(&rbe->ext, genmask))
+ if (nft_set_elem_active(&rbe->ext, genmask) &&
+ !nft_set_elem_expired(&rbe->ext))
overlap = false;
- } else if (nft_set_elem_active(&rbe->ext, genmask)) {
+ } else if (nft_set_elem_active(&rbe->ext, genmask) &&
+ !nft_set_elem_expired(&rbe->ext)) {
*ext = &rbe->ext;
return -EEXIST;
} else {
diff --git a/net/netfilter/nft_synproxy.c b/net/netfilter/nft_synproxy.c
index e2c1fc608841..4fda8b3f1762 100644
--- a/net/netfilter/nft_synproxy.c
+++ b/net/netfilter/nft_synproxy.c
@@ -388,3 +388,4 @@ MODULE_LICENSE("GPL");
MODULE_AUTHOR("Fernando Fernandez <ffmancera@riseup.net>");
MODULE_ALIAS_NFT_EXPR("synproxy");
MODULE_ALIAS_NFT_OBJ(NFT_OBJECT_SYNPROXY);
+MODULE_DESCRIPTION("nftables SYNPROXY expression support");
diff --git a/net/netfilter/nft_tunnel.c b/net/netfilter/nft_tunnel.c
index 30be5787fbde..d3eb953d0333 100644
--- a/net/netfilter/nft_tunnel.c
+++ b/net/netfilter/nft_tunnel.c
@@ -719,3 +719,4 @@ MODULE_LICENSE("GPL");
MODULE_AUTHOR("Pablo Neira Ayuso <pablo@netfilter.org>");
MODULE_ALIAS_NFT_EXPR("tunnel");
MODULE_ALIAS_NFT_OBJ(NFT_OBJECT_TUNNEL);
+MODULE_DESCRIPTION("nftables tunnel expression support");
diff --git a/net/netfilter/xt_nat.c b/net/netfilter/xt_nat.c
index a8e5f6c8db7a..b4f7bbc3f3ca 100644
--- a/net/netfilter/xt_nat.c
+++ b/net/netfilter/xt_nat.c
@@ -244,3 +244,4 @@ MODULE_ALIAS("ipt_SNAT");
MODULE_ALIAS("ipt_DNAT");
MODULE_ALIAS("ip6t_SNAT");
MODULE_ALIAS("ip6t_DNAT");
+MODULE_DESCRIPTION("SNAT and DNAT targets support");
diff --git a/net/netlink/genetlink.c b/net/netlink/genetlink.c
index 55ee680e9db1..9395ee8a868d 100644
--- a/net/netlink/genetlink.c
+++ b/net/netlink/genetlink.c
@@ -351,22 +351,11 @@ int genl_register_family(struct genl_family *family)
start = end = GENL_ID_VFS_DQUOT;
}
- if (family->maxattr && !family->parallel_ops) {
- family->attrbuf = kmalloc_array(family->maxattr + 1,
- sizeof(struct nlattr *),
- GFP_KERNEL);
- if (family->attrbuf == NULL) {
- err = -ENOMEM;
- goto errout_locked;
- }
- } else
- family->attrbuf = NULL;
-
family->id = idr_alloc_cyclic(&genl_fam_idr, family,
start, end + 1, GFP_KERNEL);
if (family->id < 0) {
err = family->id;
- goto errout_free;
+ goto errout_locked;
}
err = genl_validate_assign_mc_groups(family);
@@ -385,8 +374,6 @@ int genl_register_family(struct genl_family *family)
errout_remove:
idr_remove(&genl_fam_idr, family->id);
-errout_free:
- kfree(family->attrbuf);
errout_locked:
genl_unlock_all();
return err;
@@ -419,8 +406,6 @@ int genl_unregister_family(const struct genl_family *family)
atomic_read(&genl_sk_destructing_cnt) == 0);
genl_unlock();
- kfree(family->attrbuf);
-
genl_ctrl_event(CTRL_CMD_DELFAMILY, family, NULL, 0);
return 0;
@@ -485,30 +470,23 @@ genl_family_rcv_msg_attrs_parse(const struct genl_family *family,
if (!family->maxattr)
return NULL;
- if (family->parallel_ops) {
- attrbuf = kmalloc_array(family->maxattr + 1,
- sizeof(struct nlattr *), GFP_KERNEL);
- if (!attrbuf)
- return ERR_PTR(-ENOMEM);
- } else {
- attrbuf = family->attrbuf;
- }
+ attrbuf = kmalloc_array(family->maxattr + 1,
+ sizeof(struct nlattr *), GFP_KERNEL);
+ if (!attrbuf)
+ return ERR_PTR(-ENOMEM);
err = __nlmsg_parse(nlh, hdrlen, attrbuf, family->maxattr,
family->policy, validate, extack);
if (err) {
- if (family->parallel_ops)
- kfree(attrbuf);
+ kfree(attrbuf);
return ERR_PTR(err);
}
return attrbuf;
}
-static void genl_family_rcv_msg_attrs_free(const struct genl_family *family,
- struct nlattr **attrbuf)
+static void genl_family_rcv_msg_attrs_free(struct nlattr **attrbuf)
{
- if (family->parallel_ops)
- kfree(attrbuf);
+ kfree(attrbuf);
}
struct genl_start_context {
@@ -542,7 +520,7 @@ static int genl_start(struct netlink_callback *cb)
no_attrs:
info = genl_dumpit_info_alloc();
if (!info) {
- genl_family_rcv_msg_attrs_free(ctx->family, attrs);
+ genl_family_rcv_msg_attrs_free(attrs);
return -ENOMEM;
}
info->family = ctx->family;
@@ -559,7 +537,7 @@ no_attrs:
}
if (rc) {
- genl_family_rcv_msg_attrs_free(info->family, info->attrs);
+ genl_family_rcv_msg_attrs_free(info->attrs);
genl_dumpit_info_free(info);
cb->data = NULL;
}
@@ -588,7 +566,7 @@ static int genl_lock_done(struct netlink_callback *cb)
rc = ops->done(cb);
genl_unlock();
}
- genl_family_rcv_msg_attrs_free(info->family, info->attrs);
+ genl_family_rcv_msg_attrs_free(info->attrs);
genl_dumpit_info_free(info);
return rc;
}
@@ -601,7 +579,7 @@ static int genl_parallel_done(struct netlink_callback *cb)
if (ops->done)
rc = ops->done(cb);
- genl_family_rcv_msg_attrs_free(info->family, info->attrs);
+ genl_family_rcv_msg_attrs_free(info->attrs);
genl_dumpit_info_free(info);
return rc;
}
@@ -694,7 +672,7 @@ static int genl_family_rcv_msg_doit(const struct genl_family *family,
family->post_doit(ops, skb, &info);
out:
- genl_family_rcv_msg_attrs_free(family, attrbuf);
+ genl_family_rcv_msg_attrs_free(attrbuf);
return err;
}
@@ -1166,60 +1144,11 @@ static struct genl_family genl_ctrl __ro_after_init = {
.netnsok = true,
};
-static int genl_bind(struct net *net, int group)
-{
- struct genl_family *f;
- int err = -ENOENT;
- unsigned int id;
-
- down_read(&cb_lock);
-
- idr_for_each_entry(&genl_fam_idr, f, id) {
- if (group >= f->mcgrp_offset &&
- group < f->mcgrp_offset + f->n_mcgrps) {
- int fam_grp = group - f->mcgrp_offset;
-
- if (!f->netnsok && net != &init_net)
- err = -ENOENT;
- else if (f->mcast_bind)
- err = f->mcast_bind(net, fam_grp);
- else
- err = 0;
- break;
- }
- }
- up_read(&cb_lock);
-
- return err;
-}
-
-static void genl_unbind(struct net *net, int group)
-{
- struct genl_family *f;
- unsigned int id;
-
- down_read(&cb_lock);
-
- idr_for_each_entry(&genl_fam_idr, f, id) {
- if (group >= f->mcgrp_offset &&
- group < f->mcgrp_offset + f->n_mcgrps) {
- int fam_grp = group - f->mcgrp_offset;
-
- if (f->mcast_unbind)
- f->mcast_unbind(net, fam_grp);
- break;
- }
- }
- up_read(&cb_lock);
-}
-
static int __net_init genl_pernet_init(struct net *net)
{
struct netlink_kernel_cfg cfg = {
.input = genl_rcv,
.flags = NL_CFG_F_NONROOT_RECV,
- .bind = genl_bind,
- .unbind = genl_unbind,
};
/* we'll bump the group number right afterwards */
diff --git a/net/nfc/nci/core.c b/net/nfc/nci/core.c
index 7cd524884304..78ea8c94dcba 100644
--- a/net/nfc/nci/core.c
+++ b/net/nfc/nci/core.c
@@ -1228,10 +1228,13 @@ int nci_register_device(struct nci_dev *ndev)
rc = nfc_register_device(ndev->nfc_dev);
if (rc)
- goto destroy_rx_wq_exit;
+ goto destroy_tx_wq_exit;
goto exit;
+destroy_tx_wq_exit:
+ destroy_workqueue(ndev->tx_wq);
+
destroy_rx_wq_exit:
destroy_workqueue(ndev->rx_wq);
diff --git a/net/openvswitch/actions.c b/net/openvswitch/actions.c
index fc0efd8833c8..2611657f40ca 100644
--- a/net/openvswitch/actions.c
+++ b/net/openvswitch/actions.c
@@ -1169,9 +1169,10 @@ static int execute_check_pkt_len(struct datapath *dp, struct sk_buff *skb,
struct sw_flow_key *key,
const struct nlattr *attr, bool last)
{
+ struct ovs_skb_cb *ovs_cb = OVS_CB(skb);
const struct nlattr *actions, *cpl_arg;
+ int len, max_len, rem = nla_len(attr);
const struct check_pkt_len_arg *arg;
- int rem = nla_len(attr);
bool clone_flow_key;
/* The first netlink attribute in 'attr' is always
@@ -1180,7 +1181,11 @@ static int execute_check_pkt_len(struct datapath *dp, struct sk_buff *skb,
cpl_arg = nla_data(attr);
arg = nla_data(cpl_arg);
- if (skb->len <= arg->pkt_len) {
+ len = ovs_cb->mru ? ovs_cb->mru + skb->mac_len : skb->len;
+ max_len = arg->pkt_len;
+
+ if ((skb_is_gso(skb) && skb_gso_validate_mac_len(skb, max_len)) ||
+ len <= max_len) {
/* Second netlink attribute in 'attr' is always
* 'OVS_CHECK_PKT_LEN_ATTR_ACTIONS_IF_LESS_EQUAL'.
*/
diff --git a/net/qrtr/qrtr.c b/net/qrtr/qrtr.c
index 2d8d6131bc5f..300a104b9a0f 100644
--- a/net/qrtr/qrtr.c
+++ b/net/qrtr/qrtr.c
@@ -166,6 +166,7 @@ static void __qrtr_node_release(struct kref *kref)
{
struct qrtr_node *node = container_of(kref, struct qrtr_node, ref);
struct radix_tree_iter iter;
+ struct qrtr_tx_flow *flow;
unsigned long flags;
void __rcu **slot;
@@ -181,8 +182,9 @@ static void __qrtr_node_release(struct kref *kref)
/* Free tx flow counters */
radix_tree_for_each_slot(slot, &node->qrtr_tx_flow, &iter, 0) {
+ flow = *slot;
radix_tree_iter_delete(&node->qrtr_tx_flow, &iter, slot);
- kfree(*slot);
+ kfree(flow);
}
kfree(node);
}
@@ -427,7 +429,7 @@ int qrtr_endpoint_post(struct qrtr_endpoint *ep, const void *data, size_t len)
unsigned int ver;
size_t hdrlen;
- if (len & 3)
+ if (len == 0 || len & 3)
return -EINVAL;
skb = netdev_alloc_skb(NULL, len);
@@ -441,6 +443,8 @@ int qrtr_endpoint_post(struct qrtr_endpoint *ep, const void *data, size_t len)
switch (ver) {
case QRTR_PROTO_VER_1:
+ if (len < sizeof(*v1))
+ goto err;
v1 = data;
hdrlen = sizeof(*v1);
@@ -454,6 +458,8 @@ int qrtr_endpoint_post(struct qrtr_endpoint *ep, const void *data, size_t len)
size = le32_to_cpu(v1->size);
break;
case QRTR_PROTO_VER_2:
+ if (len < sizeof(*v2))
+ goto err;
v2 = data;
hdrlen = sizeof(*v2) + v2->optlen;
@@ -1174,6 +1180,7 @@ static int qrtr_release(struct socket *sock)
sk->sk_state_change(sk);
sock_set_flag(sk, SOCK_DEAD);
+ sock_orphan(sk);
sock->sk = NULL;
if (!sock_flag(sk, SOCK_ZAPPED))
diff --git a/net/rds/connection.c b/net/rds/connection.c
index ed7f2133acc2..f2fcab182095 100644
--- a/net/rds/connection.c
+++ b/net/rds/connection.c
@@ -905,6 +905,17 @@ void rds_conn_path_connect_if_down(struct rds_conn_path *cp)
}
EXPORT_SYMBOL_GPL(rds_conn_path_connect_if_down);
+/* Check connectivity of all paths
+ */
+void rds_check_all_paths(struct rds_connection *conn)
+{
+ int i = 0;
+
+ do {
+ rds_conn_path_connect_if_down(&conn->c_path[i]);
+ } while (++i < conn->c_npaths);
+}
+
void rds_conn_connect_if_down(struct rds_connection *conn)
{
WARN_ON(conn->c_trans->t_mp_capable);
diff --git a/net/rds/ib.h b/net/rds/ib.h
index 5ae069d39eab..8dfff43cf07f 100644
--- a/net/rds/ib.h
+++ b/net/rds/ib.h
@@ -264,7 +264,13 @@ struct rds_ib_device {
int *vector_load;
};
-#define ibdev_to_node(ibdev) dev_to_node((ibdev)->dev.parent)
+static inline int ibdev_to_node(struct ib_device *ibdev)
+{
+ struct device *parent;
+
+ parent = ibdev->dev.parent;
+ return parent ? dev_to_node(parent) : NUMA_NO_NODE;
+}
#define rdsibdev_to_node(rdsibdev) ibdev_to_node(rdsibdev->dev)
/* bits for i_ack_flags */
diff --git a/net/rds/rds.h b/net/rds/rds.h
index 6019b0c004a9..106e862996b9 100644
--- a/net/rds/rds.h
+++ b/net/rds/rds.h
@@ -778,6 +778,7 @@ void rds_conn_drop(struct rds_connection *conn);
void rds_conn_path_drop(struct rds_conn_path *cpath, bool destroy);
void rds_conn_connect_if_down(struct rds_connection *conn);
void rds_conn_path_connect_if_down(struct rds_conn_path *cp);
+void rds_check_all_paths(struct rds_connection *conn);
void rds_for_each_conn_info(struct socket *sock, unsigned int len,
struct rds_info_iterator *iter,
struct rds_info_lengths *lens,
@@ -823,6 +824,12 @@ rds_conn_path_up(struct rds_conn_path *cp)
}
static inline int
+rds_conn_path_down(struct rds_conn_path *cp)
+{
+ return atomic_read(&cp->cp_state) == RDS_CONN_DOWN;
+}
+
+static inline int
rds_conn_up(struct rds_connection *conn)
{
WARN_ON(conn->c_trans->t_mp_capable);
diff --git a/net/rds/send.c b/net/rds/send.c
index 68e2bdb08fd0..9a529a01cdc6 100644
--- a/net/rds/send.c
+++ b/net/rds/send.c
@@ -1340,7 +1340,8 @@ int rds_sendmsg(struct socket *sock, struct msghdr *msg, size_t payload_len)
goto out;
}
- rds_conn_path_connect_if_down(cpath);
+ if (rds_conn_path_down(cpath))
+ rds_check_all_paths(conn);
ret = rds_cong_wait(conn->c_fcong, dport, nonblock, rs);
if (ret) {
diff --git a/net/rds/transport.c b/net/rds/transport.c
index 46f709a4b577..f8001ec80867 100644
--- a/net/rds/transport.c
+++ b/net/rds/transport.c
@@ -38,6 +38,12 @@
#include "rds.h"
#include "loop.h"
+static char * const rds_trans_modules[] = {
+ [RDS_TRANS_IB] = "rds_rdma",
+ [RDS_TRANS_GAP] = NULL,
+ [RDS_TRANS_TCP] = "rds_tcp",
+};
+
static struct rds_transport *transports[RDS_TRANS_COUNT];
static DECLARE_RWSEM(rds_trans_sem);
@@ -110,18 +116,20 @@ struct rds_transport *rds_trans_get(int t_type)
{
struct rds_transport *ret = NULL;
struct rds_transport *trans;
- unsigned int i;
down_read(&rds_trans_sem);
- for (i = 0; i < RDS_TRANS_COUNT; i++) {
- trans = transports[i];
-
- if (trans && trans->t_type == t_type &&
- (!trans->t_owner || try_module_get(trans->t_owner))) {
- ret = trans;
- break;
- }
+ trans = transports[t_type];
+ if (!trans) {
+ up_read(&rds_trans_sem);
+ if (rds_trans_modules[t_type])
+ request_module(rds_trans_modules[t_type]);
+ down_read(&rds_trans_sem);
+ trans = transports[t_type];
}
+ if (trans && trans->t_type == t_type &&
+ (!trans->t_owner || try_module_get(trans->t_owner)))
+ ret = trans;
+
up_read(&rds_trans_sem);
return ret;
diff --git a/net/rxrpc/call_accept.c b/net/rxrpc/call_accept.c
index b7611cc159e5..032ed76c0166 100644
--- a/net/rxrpc/call_accept.c
+++ b/net/rxrpc/call_accept.c
@@ -22,6 +22,11 @@
#include <net/ip.h>
#include "ar-internal.h"
+static void rxrpc_dummy_notify(struct sock *sk, struct rxrpc_call *call,
+ unsigned long user_call_ID)
+{
+}
+
/*
* Preallocate a single service call, connection and peer and, if possible,
* give them a user ID and attach the user's side of the ID to them.
@@ -228,6 +233,8 @@ void rxrpc_discard_prealloc(struct rxrpc_sock *rx)
if (rx->discard_new_call) {
_debug("discard %lx", call->user_call_ID);
rx->discard_new_call(call, call->user_call_ID);
+ if (call->notify_rx)
+ call->notify_rx = rxrpc_dummy_notify;
rxrpc_put_call(call, rxrpc_call_put_kernel);
}
rxrpc_call_completed(call);
diff --git a/net/rxrpc/call_event.c b/net/rxrpc/call_event.c
index aa1c8eee6557..6be2672a65ea 100644
--- a/net/rxrpc/call_event.c
+++ b/net/rxrpc/call_event.c
@@ -253,7 +253,7 @@ static void rxrpc_resend(struct rxrpc_call *call, unsigned long now_j)
* confuse things
*/
annotation &= ~RXRPC_TX_ANNO_MASK;
- annotation |= RXRPC_TX_ANNO_RESENT;
+ annotation |= RXRPC_TX_ANNO_UNACK | RXRPC_TX_ANNO_RESENT;
call->rxtx_annotations[ix] = annotation;
skb = call->rxtx_buffer[ix];
diff --git a/net/rxrpc/input.c b/net/rxrpc/input.c
index 299ac98e9754..767579328a06 100644
--- a/net/rxrpc/input.c
+++ b/net/rxrpc/input.c
@@ -722,13 +722,12 @@ static void rxrpc_input_ackinfo(struct rxrpc_call *call, struct sk_buff *skb,
ntohl(ackinfo->rxMTU), ntohl(ackinfo->maxMTU),
rwind, ntohl(ackinfo->jumbo_max));
+ if (rwind > RXRPC_RXTX_BUFF_SIZE - 1)
+ rwind = RXRPC_RXTX_BUFF_SIZE - 1;
if (call->tx_winsize != rwind) {
- if (rwind > RXRPC_RXTX_BUFF_SIZE - 1)
- rwind = RXRPC_RXTX_BUFF_SIZE - 1;
if (rwind > call->tx_winsize)
wake = true;
- trace_rxrpc_rx_rwind_change(call, sp->hdr.serial,
- ntohl(ackinfo->rwind), wake);
+ trace_rxrpc_rx_rwind_change(call, sp->hdr.serial, rwind, wake);
call->tx_winsize = rwind;
}
diff --git a/net/rxrpc/recvmsg.c b/net/rxrpc/recvmsg.c
index 2989742a4aa1..490b1927215c 100644
--- a/net/rxrpc/recvmsg.c
+++ b/net/rxrpc/recvmsg.c
@@ -543,7 +543,7 @@ try_again:
list_empty(&rx->recvmsg_q) &&
rx->sk.sk_state != RXRPC_SERVER_LISTENING) {
release_sock(&rx->sk);
- return -ENODATA;
+ return -EAGAIN;
}
if (list_empty(&rx->recvmsg_q)) {
diff --git a/net/rxrpc/sendmsg.c b/net/rxrpc/sendmsg.c
index 1304b8608f56..03a30d014bb6 100644
--- a/net/rxrpc/sendmsg.c
+++ b/net/rxrpc/sendmsg.c
@@ -304,7 +304,7 @@ static int rxrpc_send_data(struct rxrpc_sock *rx,
/* this should be in poll */
sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk);
- if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN))
+ if (sk->sk_shutdown & SEND_SHUTDOWN)
return -EPIPE;
more = msg->msg_flags & MSG_MORE;
diff --git a/net/sched/act_connmark.c b/net/sched/act_connmark.c
index 43a243081e7d..f901421b0634 100644
--- a/net/sched/act_connmark.c
+++ b/net/sched/act_connmark.c
@@ -43,17 +43,20 @@ static int tcf_connmark_act(struct sk_buff *skb, const struct tc_action *a,
tcf_lastuse_update(&ca->tcf_tm);
bstats_update(&ca->tcf_bstats, skb);
- if (skb->protocol == htons(ETH_P_IP)) {
+ switch (skb_protocol(skb, true)) {
+ case htons(ETH_P_IP):
if (skb->len < sizeof(struct iphdr))
goto out;
proto = NFPROTO_IPV4;
- } else if (skb->protocol == htons(ETH_P_IPV6)) {
+ break;
+ case htons(ETH_P_IPV6):
if (skb->len < sizeof(struct ipv6hdr))
goto out;
proto = NFPROTO_IPV6;
- } else {
+ break;
+ default:
goto out;
}
diff --git a/net/sched/act_csum.c b/net/sched/act_csum.c
index cb8608f0a77a..c60674cf25c4 100644
--- a/net/sched/act_csum.c
+++ b/net/sched/act_csum.c
@@ -587,7 +587,7 @@ static int tcf_csum_act(struct sk_buff *skb, const struct tc_action *a,
goto drop;
update_flags = params->update_flags;
- protocol = tc_skb_protocol(skb);
+ protocol = skb_protocol(skb, false);
again:
switch (protocol) {
case cpu_to_be16(ETH_P_IP):
diff --git a/net/sched/act_ct.c b/net/sched/act_ct.c
index e29f0f45d688..5928efb6449c 100644
--- a/net/sched/act_ct.c
+++ b/net/sched/act_ct.c
@@ -624,7 +624,7 @@ static u8 tcf_ct_skb_nf_family(struct sk_buff *skb)
{
u8 family = NFPROTO_UNSPEC;
- switch (skb->protocol) {
+ switch (skb_protocol(skb, true)) {
case htons(ETH_P_IP):
family = NFPROTO_IPV4;
break;
@@ -673,9 +673,10 @@ static int tcf_ct_ipv6_is_fragment(struct sk_buff *skb, bool *frag)
}
static int tcf_ct_handle_fragments(struct net *net, struct sk_buff *skb,
- u8 family, u16 zone)
+ u8 family, u16 zone, bool *defrag)
{
enum ip_conntrack_info ctinfo;
+ struct qdisc_skb_cb cb;
struct nf_conn *ct;
int err = 0;
bool frag;
@@ -693,6 +694,7 @@ static int tcf_ct_handle_fragments(struct net *net, struct sk_buff *skb,
return err;
skb_get(skb);
+ cb = *qdisc_skb_cb(skb);
if (family == NFPROTO_IPV4) {
enum ip_defrag_users user = IP_DEFRAG_CONNTRACK_IN + zone;
@@ -703,6 +705,9 @@ static int tcf_ct_handle_fragments(struct net *net, struct sk_buff *skb,
local_bh_enable();
if (err && err != -EINPROGRESS)
goto out_free;
+
+ if (!err)
+ *defrag = true;
} else { /* NFPROTO_IPV6 */
#if IS_ENABLED(CONFIG_NF_DEFRAG_IPV6)
enum ip6_defrag_users user = IP6_DEFRAG_CONNTRACK_IN + zone;
@@ -711,12 +716,16 @@ static int tcf_ct_handle_fragments(struct net *net, struct sk_buff *skb,
err = nf_ct_frag6_gather(net, skb, user);
if (err && err != -EINPROGRESS)
goto out_free;
+
+ if (!err)
+ *defrag = true;
#else
err = -EOPNOTSUPP;
goto out_free;
#endif
}
+ *qdisc_skb_cb(skb) = cb;
skb_clear_hash(skb);
skb->ignore_df = 1;
return err;
@@ -748,6 +757,7 @@ static int ct_nat_execute(struct sk_buff *skb, struct nf_conn *ct,
const struct nf_nat_range2 *range,
enum nf_nat_manip_type maniptype)
{
+ __be16 proto = skb_protocol(skb, true);
int hooknum, err = NF_ACCEPT;
/* See HOOK2MANIP(). */
@@ -759,14 +769,13 @@ static int ct_nat_execute(struct sk_buff *skb, struct nf_conn *ct,
switch (ctinfo) {
case IP_CT_RELATED:
case IP_CT_RELATED_REPLY:
- if (skb->protocol == htons(ETH_P_IP) &&
+ if (proto == htons(ETH_P_IP) &&
ip_hdr(skb)->protocol == IPPROTO_ICMP) {
if (!nf_nat_icmp_reply_translation(skb, ct, ctinfo,
hooknum))
err = NF_DROP;
goto out;
- } else if (IS_ENABLED(CONFIG_IPV6) &&
- skb->protocol == htons(ETH_P_IPV6)) {
+ } else if (IS_ENABLED(CONFIG_IPV6) && proto == htons(ETH_P_IPV6)) {
__be16 frag_off;
u8 nexthdr = ipv6_hdr(skb)->nexthdr;
int hdrlen = ipv6_skip_exthdr(skb,
@@ -914,6 +923,7 @@ static int tcf_ct_act(struct sk_buff *skb, const struct tc_action *a,
int nh_ofs, err, retval;
struct tcf_ct_params *p;
bool skip_add = false;
+ bool defrag = false;
struct nf_conn *ct;
u8 family;
@@ -925,6 +935,8 @@ static int tcf_ct_act(struct sk_buff *skb, const struct tc_action *a,
force = p->ct_action & TCA_CT_ACT_FORCE;
tmpl = p->tmpl;
+ tcf_lastuse_update(&c->tcf_tm);
+
if (clear) {
ct = nf_ct_get(skb, &ctinfo);
if (ct) {
@@ -944,7 +956,7 @@ static int tcf_ct_act(struct sk_buff *skb, const struct tc_action *a,
*/
nh_ofs = skb_network_offset(skb);
skb_pull_rcsum(skb, nh_ofs);
- err = tcf_ct_handle_fragments(net, skb, family, p->zone);
+ err = tcf_ct_handle_fragments(net, skb, family, p->zone, &defrag);
if (err == -EINPROGRESS) {
retval = TC_ACT_STOLEN;
goto out;
@@ -1012,6 +1024,8 @@ out_push:
out:
tcf_action_update_bstats(&c->common, skb);
+ if (defrag)
+ qdisc_skb_cb(skb)->pkt_len = skb->len;
return retval;
drop:
@@ -1543,17 +1557,6 @@ static void __exit ct_cleanup_module(void)
destroy_workqueue(act_ct_wq);
}
-void tcf_ct_flow_table_restore_skb(struct sk_buff *skb, unsigned long cookie)
-{
- enum ip_conntrack_info ctinfo = cookie & NFCT_INFOMASK;
- struct nf_conn *ct;
-
- ct = (struct nf_conn *)(cookie & NFCT_PTRMASK);
- nf_conntrack_get(&ct->ct_general);
- nf_ct_set(skb, ct, ctinfo);
-}
-EXPORT_SYMBOL_GPL(tcf_ct_flow_table_restore_skb);
-
module_init(ct_init_module);
module_exit(ct_cleanup_module);
MODULE_AUTHOR("Paul Blakey <paulb@mellanox.com>");
@@ -1561,4 +1564,3 @@ MODULE_AUTHOR("Yossi Kuperman <yossiku@mellanox.com>");
MODULE_AUTHOR("Marcelo Ricardo Leitner <marcelo.leitner@gmail.com>");
MODULE_DESCRIPTION("Connection tracking action");
MODULE_LICENSE("GPL v2");
-
diff --git a/net/sched/act_ctinfo.c b/net/sched/act_ctinfo.c
index 19649623493b..b5042f3ea079 100644
--- a/net/sched/act_ctinfo.c
+++ b/net/sched/act_ctinfo.c
@@ -96,19 +96,22 @@ static int tcf_ctinfo_act(struct sk_buff *skb, const struct tc_action *a,
action = READ_ONCE(ca->tcf_action);
wlen = skb_network_offset(skb);
- if (tc_skb_protocol(skb) == htons(ETH_P_IP)) {
+ switch (skb_protocol(skb, true)) {
+ case htons(ETH_P_IP):
wlen += sizeof(struct iphdr);
if (!pskb_may_pull(skb, wlen))
goto out;
proto = NFPROTO_IPV4;
- } else if (tc_skb_protocol(skb) == htons(ETH_P_IPV6)) {
+ break;
+ case htons(ETH_P_IPV6):
wlen += sizeof(struct ipv6hdr);
if (!pskb_may_pull(skb, wlen))
goto out;
proto = NFPROTO_IPV6;
- } else {
+ break;
+ default:
goto out;
}
diff --git a/net/sched/act_gate.c b/net/sched/act_gate.c
index 9c628591f452..323ae7f6315d 100644
--- a/net/sched/act_gate.c
+++ b/net/sched/act_gate.c
@@ -32,7 +32,7 @@ static ktime_t gate_get_time(struct tcf_gate *gact)
return KTIME_MAX;
}
-static int gate_get_start_time(struct tcf_gate *gact, ktime_t *start)
+static void gate_get_start_time(struct tcf_gate *gact, ktime_t *start)
{
struct tcf_gate_params *param = &gact->param;
ktime_t now, base, cycle;
@@ -43,18 +43,13 @@ static int gate_get_start_time(struct tcf_gate *gact, ktime_t *start)
if (ktime_after(base, now)) {
*start = base;
- return 0;
+ return;
}
cycle = param->tcfg_cycletime;
- /* cycle time should not be zero */
- if (!cycle)
- return -EFAULT;
-
n = div64_u64(ktime_sub_ns(now, base), cycle);
*start = ktime_add_ns(base, (n + 1) * cycle);
- return 0;
}
static void gate_start_timer(struct tcf_gate *gact, ktime_t start)
@@ -277,6 +272,27 @@ release_list:
return err;
}
+static void gate_setup_timer(struct tcf_gate *gact, u64 basetime,
+ enum tk_offsets tko, s32 clockid,
+ bool do_init)
+{
+ if (!do_init) {
+ if (basetime == gact->param.tcfg_basetime &&
+ tko == gact->tk_offset &&
+ clockid == gact->param.tcfg_clockid)
+ return;
+
+ spin_unlock_bh(&gact->tcf_lock);
+ hrtimer_cancel(&gact->hitimer);
+ spin_lock_bh(&gact->tcf_lock);
+ }
+ gact->param.tcfg_basetime = basetime;
+ gact->param.tcfg_clockid = clockid;
+ gact->tk_offset = tko;
+ hrtimer_init(&gact->hitimer, clockid, HRTIMER_MODE_ABS_SOFT);
+ gact->hitimer.function = gate_timer_func;
+}
+
static int tcf_gate_init(struct net *net, struct nlattr *nla,
struct nlattr *est, struct tc_action **a,
int ovr, int bind, bool rtnl_held,
@@ -287,12 +303,12 @@ static int tcf_gate_init(struct net *net, struct nlattr *nla,
enum tk_offsets tk_offset = TK_OFFS_TAI;
struct nlattr *tb[TCA_GATE_MAX + 1];
struct tcf_chain *goto_ch = NULL;
+ u64 cycletime = 0, basetime = 0;
struct tcf_gate_params *p;
s32 clockid = CLOCK_TAI;
struct tcf_gate *gact;
struct tc_gate *parm;
int ret = 0, err;
- u64 basetime = 0;
u32 gflags = 0;
s32 prio = -1;
ktime_t start;
@@ -308,6 +324,27 @@ static int tcf_gate_init(struct net *net, struct nlattr *nla,
if (!tb[TCA_GATE_PARMS])
return -EINVAL;
+ if (tb[TCA_GATE_CLOCKID]) {
+ clockid = nla_get_s32(tb[TCA_GATE_CLOCKID]);
+ switch (clockid) {
+ case CLOCK_REALTIME:
+ tk_offset = TK_OFFS_REAL;
+ break;
+ case CLOCK_MONOTONIC:
+ tk_offset = TK_OFFS_MAX;
+ break;
+ case CLOCK_BOOTTIME:
+ tk_offset = TK_OFFS_BOOT;
+ break;
+ case CLOCK_TAI:
+ tk_offset = TK_OFFS_TAI;
+ break;
+ default:
+ NL_SET_ERR_MSG(extack, "Invalid 'clockid'");
+ return -EINVAL;
+ }
+ }
+
parm = nla_data(tb[TCA_GATE_PARMS]);
index = parm->index;
@@ -331,10 +368,6 @@ static int tcf_gate_init(struct net *net, struct nlattr *nla,
tcf_idr_release(*a, bind);
return -EEXIST;
}
- if (ret == ACT_P_CREATED) {
- to_gate(*a)->param.tcfg_clockid = -1;
- INIT_LIST_HEAD(&(to_gate(*a)->param.entries));
- }
if (tb[TCA_GATE_PRIORITY])
prio = nla_get_s32(tb[TCA_GATE_PRIORITY]);
@@ -345,41 +378,19 @@ static int tcf_gate_init(struct net *net, struct nlattr *nla,
if (tb[TCA_GATE_FLAGS])
gflags = nla_get_u32(tb[TCA_GATE_FLAGS]);
- if (tb[TCA_GATE_CLOCKID]) {
- clockid = nla_get_s32(tb[TCA_GATE_CLOCKID]);
- switch (clockid) {
- case CLOCK_REALTIME:
- tk_offset = TK_OFFS_REAL;
- break;
- case CLOCK_MONOTONIC:
- tk_offset = TK_OFFS_MAX;
- break;
- case CLOCK_BOOTTIME:
- tk_offset = TK_OFFS_BOOT;
- break;
- case CLOCK_TAI:
- tk_offset = TK_OFFS_TAI;
- break;
- default:
- NL_SET_ERR_MSG(extack, "Invalid 'clockid'");
- goto release_idr;
- }
- }
+ gact = to_gate(*a);
+ if (ret == ACT_P_CREATED)
+ INIT_LIST_HEAD(&gact->param.entries);
err = tcf_action_check_ctrlact(parm->action, tp, &goto_ch, extack);
if (err < 0)
goto release_idr;
- gact = to_gate(*a);
-
spin_lock_bh(&gact->tcf_lock);
p = &gact->param;
- if (tb[TCA_GATE_CYCLE_TIME]) {
- p->tcfg_cycletime = nla_get_u64(tb[TCA_GATE_CYCLE_TIME]);
- if (!p->tcfg_cycletime_ext)
- goto chain_put;
- }
+ if (tb[TCA_GATE_CYCLE_TIME])
+ cycletime = nla_get_u64(tb[TCA_GATE_CYCLE_TIME]);
if (tb[TCA_GATE_ENTRY_LIST]) {
err = parse_gate_list(tb[TCA_GATE_ENTRY_LIST], p, extack);
@@ -387,35 +398,29 @@ static int tcf_gate_init(struct net *net, struct nlattr *nla,
goto chain_put;
}
- if (!p->tcfg_cycletime) {
+ if (!cycletime) {
struct tcfg_gate_entry *entry;
ktime_t cycle = 0;
list_for_each_entry(entry, &p->entries, list)
cycle = ktime_add_ns(cycle, entry->interval);
- p->tcfg_cycletime = cycle;
+ cycletime = cycle;
+ if (!cycletime) {
+ err = -EINVAL;
+ goto chain_put;
+ }
}
+ p->tcfg_cycletime = cycletime;
if (tb[TCA_GATE_CYCLE_TIME_EXT])
p->tcfg_cycletime_ext =
nla_get_u64(tb[TCA_GATE_CYCLE_TIME_EXT]);
+ gate_setup_timer(gact, basetime, tk_offset, clockid,
+ ret == ACT_P_CREATED);
p->tcfg_priority = prio;
- p->tcfg_basetime = basetime;
- p->tcfg_clockid = clockid;
p->tcfg_flags = gflags;
-
- gact->tk_offset = tk_offset;
- hrtimer_init(&gact->hitimer, clockid, HRTIMER_MODE_ABS_SOFT);
- gact->hitimer.function = gate_timer_func;
-
- err = gate_get_start_time(gact, &start);
- if (err < 0) {
- NL_SET_ERR_MSG(extack,
- "Internal error: failed get start time");
- release_entry_list(&p->entries);
- goto chain_put;
- }
+ gate_get_start_time(gact, &start);
gact->current_close_time = start;
gact->current_gate_status = GATE_ACT_GATE_OPEN | GATE_ACT_PENDING;
@@ -443,6 +448,13 @@ chain_put:
if (goto_ch)
tcf_chain_put_by_act(goto_ch);
release_idr:
+ /* action is not inserted in any list: it's safe to init hitimer
+ * without taking tcf_lock.
+ */
+ if (ret == ACT_P_CREATED)
+ gate_setup_timer(gact, gact->param.tcfg_basetime,
+ gact->tk_offset, gact->param.tcfg_clockid,
+ true);
tcf_idr_release(*a, bind);
return err;
}
@@ -453,9 +465,7 @@ static void tcf_gate_cleanup(struct tc_action *a)
struct tcf_gate_params *p;
p = &gact->param;
- if (p->tcfg_clockid != -1)
- hrtimer_cancel(&gact->hitimer);
-
+ hrtimer_cancel(&gact->hitimer);
release_entry_list(&p->entries);
}
diff --git a/net/sched/act_mpls.c b/net/sched/act_mpls.c
index be3f215cd027..8118e2640979 100644
--- a/net/sched/act_mpls.c
+++ b/net/sched/act_mpls.c
@@ -82,7 +82,7 @@ static int tcf_mpls_act(struct sk_buff *skb, const struct tc_action *a,
goto drop;
break;
case TCA_MPLS_ACT_PUSH:
- new_lse = tcf_mpls_get_lse(NULL, p, !eth_p_mpls(skb->protocol));
+ new_lse = tcf_mpls_get_lse(NULL, p, !eth_p_mpls(skb_protocol(skb, true)));
if (skb_mpls_push(skb, new_lse, p->tcfm_proto, mac_len,
skb->dev && skb->dev->type == ARPHRD_ETHER))
goto drop;
diff --git a/net/sched/act_skbedit.c b/net/sched/act_skbedit.c
index b125b2be4467..b2b3faa57294 100644
--- a/net/sched/act_skbedit.c
+++ b/net/sched/act_skbedit.c
@@ -41,7 +41,7 @@ static int tcf_skbedit_act(struct sk_buff *skb, const struct tc_action *a,
if (params->flags & SKBEDIT_F_INHERITDSFIELD) {
int wlen = skb_network_offset(skb);
- switch (tc_skb_protocol(skb)) {
+ switch (skb_protocol(skb, true)) {
case htons(ETH_P_IP):
wlen += sizeof(struct iphdr);
if (!pskb_may_pull(skb, wlen))
diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
index a00a203b2ef5..4619cb3cb0a8 100644
--- a/net/sched/cls_api.c
+++ b/net/sched/cls_api.c
@@ -20,7 +20,6 @@
#include <linux/kmod.h>
#include <linux/slab.h>
#include <linux/idr.h>
-#include <linux/rhashtable.h>
#include <linux/jhash.h>
#include <linux/rculist.h>
#include <net/net_namespace.h>
@@ -652,6 +651,7 @@ static void tc_block_indr_cleanup(struct flow_block_cb *block_cb)
&block->flow_block, tcf_block_shared(block),
&extack);
down_write(&block->cb_lock);
+ list_del(&block_cb->driver_list);
list_move(&block_cb->list, &bo.cb_list);
up_write(&block->cb_lock);
rtnl_lock();
@@ -671,25 +671,29 @@ static int tcf_block_offload_cmd(struct tcf_block *block,
struct netlink_ext_ack *extack)
{
struct flow_block_offload bo = {};
- int err;
tcf_block_offload_init(&bo, dev, command, ei->binder_type,
&block->flow_block, tcf_block_shared(block),
extack);
- if (dev->netdev_ops->ndo_setup_tc)
+ if (dev->netdev_ops->ndo_setup_tc) {
+ int err;
+
err = dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_BLOCK, &bo);
- else
- err = flow_indr_dev_setup_offload(dev, TC_SETUP_BLOCK, block,
- &bo, tc_block_indr_cleanup);
+ if (err < 0) {
+ if (err != -EOPNOTSUPP)
+ NL_SET_ERR_MSG(extack, "Driver ndo_setup_tc failed");
+ return err;
+ }
- if (err < 0) {
- if (err != -EOPNOTSUPP)
- NL_SET_ERR_MSG(extack, "Driver ndo_setup_tc failed");
- return err;
+ return tcf_block_setup(block, &bo);
}
- return tcf_block_setup(block, &bo);
+ flow_indr_dev_setup_offload(dev, TC_SETUP_BLOCK, block, &bo,
+ tc_block_indr_cleanup);
+ tcf_block_setup(block, &bo);
+
+ return -EOPNOTSUPP;
}
static int tcf_block_offload_bind(struct tcf_block *block, struct Qdisc *q,
@@ -1533,7 +1537,7 @@ static inline int __tcf_classify(struct sk_buff *skb,
reclassify:
#endif
for (; tp; tp = rcu_dereference_bh(tp->next)) {
- __be16 protocol = tc_skb_protocol(skb);
+ __be16 protocol = skb_protocol(skb, false);
int err;
if (tp->protocol != protocol &&
diff --git a/net/sched/cls_flow.c b/net/sched/cls_flow.c
index 80ae7b9fa90a..ab53a93b2f2b 100644
--- a/net/sched/cls_flow.c
+++ b/net/sched/cls_flow.c
@@ -80,7 +80,7 @@ static u32 flow_get_dst(const struct sk_buff *skb, const struct flow_keys *flow)
if (dst)
return ntohl(dst);
- return addr_fold(skb_dst(skb)) ^ (__force u16) tc_skb_protocol(skb);
+ return addr_fold(skb_dst(skb)) ^ (__force u16)skb_protocol(skb, true);
}
static u32 flow_get_proto(const struct sk_buff *skb,
@@ -104,7 +104,7 @@ static u32 flow_get_proto_dst(const struct sk_buff *skb,
if (flow->ports.ports)
return ntohs(flow->ports.dst);
- return addr_fold(skb_dst(skb)) ^ (__force u16) tc_skb_protocol(skb);
+ return addr_fold(skb_dst(skb)) ^ (__force u16)skb_protocol(skb, true);
}
static u32 flow_get_iif(const struct sk_buff *skb)
@@ -151,7 +151,7 @@ static u32 flow_get_nfct(const struct sk_buff *skb)
static u32 flow_get_nfct_src(const struct sk_buff *skb,
const struct flow_keys *flow)
{
- switch (tc_skb_protocol(skb)) {
+ switch (skb_protocol(skb, true)) {
case htons(ETH_P_IP):
return ntohl(CTTUPLE(skb, src.u3.ip));
case htons(ETH_P_IPV6):
@@ -164,7 +164,7 @@ fallback:
static u32 flow_get_nfct_dst(const struct sk_buff *skb,
const struct flow_keys *flow)
{
- switch (tc_skb_protocol(skb)) {
+ switch (skb_protocol(skb, true)) {
case htons(ETH_P_IP):
return ntohl(CTTUPLE(skb, dst.u3.ip));
case htons(ETH_P_IPV6):
diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c
index b2da37286082..e30bd969fc48 100644
--- a/net/sched/cls_flower.c
+++ b/net/sched/cls_flower.c
@@ -313,7 +313,7 @@ static int fl_classify(struct sk_buff *skb, const struct tcf_proto *tp,
/* skb_flow_dissect() does not set n_proto in case an unknown
* protocol, so do it rather here.
*/
- skb_key.basic.n_proto = skb->protocol;
+ skb_key.basic.n_proto = skb_protocol(skb, false);
skb_flow_dissect_tunnel_info(skb, &mask->dissector, &skb_key);
skb_flow_dissect_ct(skb, &mask->dissector, &skb_key,
fl_ct_info_to_flower_map,
diff --git a/net/sched/em_ipset.c b/net/sched/em_ipset.c
index df00566d327d..c95cf86fb431 100644
--- a/net/sched/em_ipset.c
+++ b/net/sched/em_ipset.c
@@ -59,7 +59,7 @@ static int em_ipset_match(struct sk_buff *skb, struct tcf_ematch *em,
};
int ret, network_offset;
- switch (tc_skb_protocol(skb)) {
+ switch (skb_protocol(skb, true)) {
case htons(ETH_P_IP):
state.pf = NFPROTO_IPV4;
if (!pskb_network_may_pull(skb, sizeof(struct iphdr)))
diff --git a/net/sched/em_ipt.c b/net/sched/em_ipt.c
index 18755d29fd15..3650117da47f 100644
--- a/net/sched/em_ipt.c
+++ b/net/sched/em_ipt.c
@@ -212,7 +212,7 @@ static int em_ipt_match(struct sk_buff *skb, struct tcf_ematch *em,
struct nf_hook_state state;
int ret;
- switch (tc_skb_protocol(skb)) {
+ switch (skb_protocol(skb, true)) {
case htons(ETH_P_IP):
if (!pskb_network_may_pull(skb, sizeof(struct iphdr)))
return 0;
diff --git a/net/sched/em_meta.c b/net/sched/em_meta.c
index d99966a55c84..46254968d390 100644
--- a/net/sched/em_meta.c
+++ b/net/sched/em_meta.c
@@ -195,7 +195,7 @@ META_COLLECTOR(int_priority)
META_COLLECTOR(int_protocol)
{
/* Let userspace take care of the byte ordering */
- dst->value = tc_skb_protocol(skb);
+ dst->value = skb_protocol(skb, false);
}
META_COLLECTOR(int_pkttype)
diff --git a/net/sched/sch_atm.c b/net/sched/sch_atm.c
index ee12ca9f55b4..1c281cc81f57 100644
--- a/net/sched/sch_atm.c
+++ b/net/sched/sch_atm.c
@@ -553,16 +553,16 @@ static int atm_tc_init(struct Qdisc *sch, struct nlattr *opt,
if (!p->link.q)
p->link.q = &noop_qdisc;
pr_debug("atm_tc_init: link (%p) qdisc %p\n", &p->link, p->link.q);
+ p->link.vcc = NULL;
+ p->link.sock = NULL;
+ p->link.common.classid = sch->handle;
+ p->link.ref = 1;
err = tcf_block_get(&p->link.block, &p->link.filter_list, sch,
extack);
if (err)
return err;
- p->link.vcc = NULL;
- p->link.sock = NULL;
- p->link.common.classid = sch->handle;
- p->link.ref = 1;
tasklet_init(&p->task, sch_atm_dequeue, (unsigned long)sch);
return 0;
}
diff --git a/net/sched/sch_cake.c b/net/sched/sch_cake.c
index 60f8ae578819..ebaeec1e5c82 100644
--- a/net/sched/sch_cake.c
+++ b/net/sched/sch_cake.c
@@ -592,7 +592,7 @@ static bool cake_update_flowkeys(struct flow_keys *keys,
bool rev = !skb->_nfct, upd = false;
__be32 ip;
- if (tc_skb_protocol(skb) != htons(ETH_P_IP))
+ if (skb_protocol(skb, true) != htons(ETH_P_IP))
return false;
if (!nf_ct_get_tuple_skb(&tuple, skb))
@@ -1551,32 +1551,51 @@ static unsigned int cake_drop(struct Qdisc *sch, struct sk_buff **to_free)
return idx + (tin << 16);
}
-static u8 cake_handle_diffserv(struct sk_buff *skb, u16 wash)
+static u8 cake_handle_diffserv(struct sk_buff *skb, bool wash)
{
- int wlen = skb_network_offset(skb);
+ const int offset = skb_network_offset(skb);
+ u16 *buf, buf_;
u8 dscp;
- switch (tc_skb_protocol(skb)) {
+ switch (skb_protocol(skb, true)) {
case htons(ETH_P_IP):
- wlen += sizeof(struct iphdr);
- if (!pskb_may_pull(skb, wlen) ||
- skb_try_make_writable(skb, wlen))
+ buf = skb_header_pointer(skb, offset, sizeof(buf_), &buf_);
+ if (unlikely(!buf))
return 0;
- dscp = ipv4_get_dsfield(ip_hdr(skb)) >> 2;
- if (wash && dscp)
+ /* ToS is in the second byte of iphdr */
+ dscp = ipv4_get_dsfield((struct iphdr *)buf) >> 2;
+
+ if (wash && dscp) {
+ const int wlen = offset + sizeof(struct iphdr);
+
+ if (!pskb_may_pull(skb, wlen) ||
+ skb_try_make_writable(skb, wlen))
+ return 0;
+
ipv4_change_dsfield(ip_hdr(skb), INET_ECN_MASK, 0);
+ }
+
return dscp;
case htons(ETH_P_IPV6):
- wlen += sizeof(struct ipv6hdr);
- if (!pskb_may_pull(skb, wlen) ||
- skb_try_make_writable(skb, wlen))
+ buf = skb_header_pointer(skb, offset, sizeof(buf_), &buf_);
+ if (unlikely(!buf))
return 0;
- dscp = ipv6_get_dsfield(ipv6_hdr(skb)) >> 2;
- if (wash && dscp)
+ /* Traffic class is in the first and second bytes of ipv6hdr */
+ dscp = ipv6_get_dsfield((struct ipv6hdr *)buf) >> 2;
+
+ if (wash && dscp) {
+ const int wlen = offset + sizeof(struct ipv6hdr);
+
+ if (!pskb_may_pull(skb, wlen) ||
+ skb_try_make_writable(skb, wlen))
+ return 0;
+
ipv6_change_dsfield(ipv6_hdr(skb), INET_ECN_MASK, 0);
+ }
+
return dscp;
case htons(ETH_P_ARP):
@@ -1593,14 +1612,17 @@ static struct cake_tin_data *cake_select_tin(struct Qdisc *sch,
{
struct cake_sched_data *q = qdisc_priv(sch);
u32 tin, mark;
+ bool wash;
u8 dscp;
/* Tin selection: Default to diffserv-based selection, allow overriding
- * using firewall marks or skb->priority.
+ * using firewall marks or skb->priority. Call DSCP parsing early if
+ * wash is enabled, otherwise defer to below to skip unneeded parsing.
*/
- dscp = cake_handle_diffserv(skb,
- q->rate_flags & CAKE_FLAG_WASH);
mark = (skb->mark & q->fwmark_mask) >> q->fwmark_shft;
+ wash = !!(q->rate_flags & CAKE_FLAG_WASH);
+ if (wash)
+ dscp = cake_handle_diffserv(skb, wash);
if (q->tin_mode == CAKE_DIFFSERV_BESTEFFORT)
tin = 0;
@@ -1614,6 +1636,8 @@ static struct cake_tin_data *cake_select_tin(struct Qdisc *sch,
tin = q->tin_order[TC_H_MIN(skb->priority) - 1];
else {
+ if (!wash)
+ dscp = cake_handle_diffserv(skb, wash);
tin = q->tin_index[dscp];
if (unlikely(tin >= q->tin_cnt))
@@ -2691,7 +2715,7 @@ static int cake_init(struct Qdisc *sch, struct nlattr *opt,
qdisc_watchdog_init(&q->watchdog, sch);
if (opt) {
- int err = cake_change(sch, opt, extack);
+ err = cake_change(sch, opt, extack);
if (err)
return err;
@@ -3008,7 +3032,7 @@ static int cake_dump_class_stats(struct Qdisc *sch, unsigned long cl,
PUT_STAT_S32(BLUE_TIMER_US,
ktime_to_us(
ktime_sub(now,
- flow->cvars.blue_timer)));
+ flow->cvars.blue_timer)));
}
if (flow->cvars.dropping) {
PUT_STAT_S32(DROP_NEXT_US,
diff --git a/net/sched/sch_dsmark.c b/net/sched/sch_dsmark.c
index 05605b30bef3..2b88710994d7 100644
--- a/net/sched/sch_dsmark.c
+++ b/net/sched/sch_dsmark.c
@@ -210,7 +210,7 @@ static int dsmark_enqueue(struct sk_buff *skb, struct Qdisc *sch,
if (p->set_tc_index) {
int wlen = skb_network_offset(skb);
- switch (tc_skb_protocol(skb)) {
+ switch (skb_protocol(skb, true)) {
case htons(ETH_P_IP):
wlen += sizeof(struct iphdr);
if (!pskb_may_pull(skb, wlen) ||
@@ -303,7 +303,7 @@ static struct sk_buff *dsmark_dequeue(struct Qdisc *sch)
index = skb->tc_index & (p->indices - 1);
pr_debug("index %d->%d\n", skb->tc_index, index);
- switch (tc_skb_protocol(skb)) {
+ switch (skb_protocol(skb, true)) {
case htons(ETH_P_IP):
ipv4_change_dsfield(ip_hdr(skb), p->mv[index].mask,
p->mv[index].value);
@@ -320,7 +320,7 @@ static struct sk_buff *dsmark_dequeue(struct Qdisc *sch)
*/
if (p->mv[index].mask != 0xff || p->mv[index].value)
pr_warn("%s: unsupported protocol %d\n",
- __func__, ntohs(tc_skb_protocol(skb)));
+ __func__, ntohs(skb_protocol(skb, true)));
break;
}
diff --git a/net/sched/sch_fq.c b/net/sched/sch_fq.c
index 8f06a808c59a..2fb76fc0cc31 100644
--- a/net/sched/sch_fq.c
+++ b/net/sched/sch_fq.c
@@ -1075,3 +1075,4 @@ module_init(fq_module_init)
module_exit(fq_module_exit)
MODULE_AUTHOR("Eric Dumazet");
MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Fair Queue Packet Scheduler");
diff --git a/net/sched/sch_fq_codel.c b/net/sched/sch_fq_codel.c
index 436160be9c18..459a784056c0 100644
--- a/net/sched/sch_fq_codel.c
+++ b/net/sched/sch_fq_codel.c
@@ -721,3 +721,4 @@ module_init(fq_codel_module_init)
module_exit(fq_codel_module_exit)
MODULE_AUTHOR("Eric Dumazet");
MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Fair Queue CoDel discipline");
diff --git a/net/sched/sch_hhf.c b/net/sched/sch_hhf.c
index be35f03b657b..420ede875322 100644
--- a/net/sched/sch_hhf.c
+++ b/net/sched/sch_hhf.c
@@ -721,3 +721,4 @@ module_exit(hhf_module_exit)
MODULE_AUTHOR("Terry Lam");
MODULE_AUTHOR("Nandita Dukkipati");
MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Heavy-Hitter Filter (HHF)");
diff --git a/net/sched/sch_teql.c b/net/sched/sch_teql.c
index 689ef6f3ded8..2f1f0a378408 100644
--- a/net/sched/sch_teql.c
+++ b/net/sched/sch_teql.c
@@ -239,7 +239,7 @@ __teql_resolve(struct sk_buff *skb, struct sk_buff *skb_res,
char haddr[MAX_ADDR_LEN];
neigh_ha_snapshot(haddr, n, dev);
- err = dev_hard_header(skb, dev, ntohs(tc_skb_protocol(skb)),
+ err = dev_hard_header(skb, dev, ntohs(skb_protocol(skb, false)),
haddr, NULL, skb->len);
if (err < 0)
diff --git a/net/sctp/associola.c b/net/sctp/associola.c
index 72315137d7e7..8d735461fa19 100644
--- a/net/sctp/associola.c
+++ b/net/sctp/associola.c
@@ -1565,12 +1565,15 @@ void sctp_assoc_rwnd_decrease(struct sctp_association *asoc, unsigned int len)
int sctp_assoc_set_bind_addr_from_ep(struct sctp_association *asoc,
enum sctp_scope scope, gfp_t gfp)
{
+ struct sock *sk = asoc->base.sk;
int flags;
/* Use scoping rules to determine the subset of addresses from
* the endpoint.
*/
- flags = (PF_INET6 == asoc->base.sk->sk_family) ? SCTP_ADDR6_ALLOWED : 0;
+ flags = (PF_INET6 == sk->sk_family) ? SCTP_ADDR6_ALLOWED : 0;
+ if (!inet_v6_ipv6only(sk))
+ flags |= SCTP_ADDR4_ALLOWED;
if (asoc->peer.ipv4_address)
flags |= SCTP_ADDR4_PEERSUPP;
if (asoc->peer.ipv6_address)
diff --git a/net/sctp/bind_addr.c b/net/sctp/bind_addr.c
index 53bc61537f44..701c5a4e441d 100644
--- a/net/sctp/bind_addr.c
+++ b/net/sctp/bind_addr.c
@@ -461,6 +461,7 @@ static int sctp_copy_one_addr(struct net *net, struct sctp_bind_addr *dest,
* well as the remote peer.
*/
if ((((AF_INET == addr->sa.sa_family) &&
+ (flags & SCTP_ADDR4_ALLOWED) &&
(flags & SCTP_ADDR4_PEERSUPP))) ||
(((AF_INET6 == addr->sa.sa_family) &&
(flags & SCTP_ADDR6_ALLOWED) &&
diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
index 092d1afdee0d..cde29f3c7fb3 100644
--- a/net/sctp/protocol.c
+++ b/net/sctp/protocol.c
@@ -148,7 +148,8 @@ int sctp_copy_local_addr_list(struct net *net, struct sctp_bind_addr *bp,
* sock as well as the remote peer.
*/
if (addr->a.sa.sa_family == AF_INET &&
- !(copy_flags & SCTP_ADDR4_PEERSUPP))
+ (!(copy_flags & SCTP_ADDR4_ALLOWED) ||
+ !(copy_flags & SCTP_ADDR4_PEERSUPP)))
continue;
if (addr->a.sa.sa_family == AF_INET6 &&
(!(copy_flags & SCTP_ADDR6_ALLOWED) ||
diff --git a/net/sctp/stream.c b/net/sctp/stream.c
index 67f7e71f9129..bda2536dd740 100644
--- a/net/sctp/stream.c
+++ b/net/sctp/stream.c
@@ -22,17 +22,11 @@
#include <net/sctp/sm.h>
#include <net/sctp/stream_sched.h>
-/* Migrates chunks from stream queues to new stream queues if needed,
- * but not across associations. Also, removes those chunks to streams
- * higher than the new max.
- */
-static void sctp_stream_outq_migrate(struct sctp_stream *stream,
- struct sctp_stream *new, __u16 outcnt)
+static void sctp_stream_shrink_out(struct sctp_stream *stream, __u16 outcnt)
{
struct sctp_association *asoc;
struct sctp_chunk *ch, *temp;
struct sctp_outq *outq;
- int i;
asoc = container_of(stream, struct sctp_association, stream);
outq = &asoc->outqueue;
@@ -56,6 +50,19 @@ static void sctp_stream_outq_migrate(struct sctp_stream *stream,
sctp_chunk_free(ch);
}
+}
+
+/* Migrates chunks from stream queues to new stream queues if needed,
+ * but not across associations. Also, removes those chunks to streams
+ * higher than the new max.
+ */
+static void sctp_stream_outq_migrate(struct sctp_stream *stream,
+ struct sctp_stream *new, __u16 outcnt)
+{
+ int i;
+
+ if (stream->outcnt > outcnt)
+ sctp_stream_shrink_out(stream, outcnt);
if (new) {
/* Here we actually move the old ext stuff into the new
@@ -1037,11 +1044,13 @@ struct sctp_chunk *sctp_process_strreset_resp(
nums = ntohs(addstrm->number_of_streams);
number = stream->outcnt - nums;
- if (result == SCTP_STRRESET_PERFORMED)
+ if (result == SCTP_STRRESET_PERFORMED) {
for (i = number; i < stream->outcnt; i++)
SCTP_SO(stream, i)->state = SCTP_STREAM_OPEN;
- else
+ } else {
+ sctp_stream_shrink_out(stream, number);
stream->outcnt = number;
+ }
*evp = sctp_ulpevent_make_stream_change_event(asoc, flags,
0, nums, GFP_ATOMIC);
diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c
index 903321543838..1163d51196da 100644
--- a/net/smc/af_smc.c
+++ b/net/smc/af_smc.c
@@ -126,8 +126,10 @@ EXPORT_SYMBOL_GPL(smc_proto6);
static void smc_restore_fallback_changes(struct smc_sock *smc)
{
- smc->clcsock->file->private_data = smc->sk.sk_socket;
- smc->clcsock->file = NULL;
+ if (smc->clcsock->file) { /* non-accepted sockets have no file yet */
+ smc->clcsock->file->private_data = smc->sk.sk_socket;
+ smc->clcsock->file = NULL;
+ }
}
static int __smc_release(struct smc_sock *smc)
@@ -352,7 +354,7 @@ static int smcr_lgr_reg_rmbs(struct smc_link *link,
*/
mutex_lock(&lgr->llc_conf_mutex);
for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
- if (lgr->lnk[i].state != SMC_LNK_ACTIVE)
+ if (!smc_link_active(&lgr->lnk[i]))
continue;
rc = smcr_link_reg_rmb(&lgr->lnk[i], rmb_desc);
if (rc)
@@ -632,7 +634,9 @@ static int smc_connect_rdma(struct smc_sock *smc,
for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
struct smc_link *l = &smc->conn.lgr->lnk[i];
- if (l->peer_qpn == ntoh24(aclc->qpn)) {
+ if (l->peer_qpn == ntoh24(aclc->qpn) &&
+ !memcmp(l->peer_gid, &aclc->lcl.gid, SMC_GID_SIZE) &&
+ !memcmp(l->peer_mac, &aclc->lcl.mac, sizeof(l->peer_mac))) {
link = l;
break;
}
diff --git a/net/smc/smc_cdc.c b/net/smc/smc_cdc.c
index a47e8855e045..ce468ff62a19 100644
--- a/net/smc/smc_cdc.c
+++ b/net/smc/smc_cdc.c
@@ -66,9 +66,13 @@ int smc_cdc_get_free_slot(struct smc_connection *conn,
rc = smc_wr_tx_get_free_slot(link, smc_cdc_tx_handler, wr_buf,
wr_rdma_buf,
(struct smc_wr_tx_pend_priv **)pend);
- if (conn->killed)
+ if (conn->killed) {
/* abnormal termination */
+ if (!rc)
+ smc_wr_tx_put_slot(link,
+ (struct smc_wr_tx_pend_priv *)pend);
rc = -EPIPE;
+ }
return rc;
}
diff --git a/net/smc/smc_clc.c b/net/smc/smc_clc.c
index d5627df24215..779f4142a11d 100644
--- a/net/smc/smc_clc.c
+++ b/net/smc/smc_clc.c
@@ -27,6 +27,7 @@
#define SMCR_CLC_ACCEPT_CONFIRM_LEN 68
#define SMCD_CLC_ACCEPT_CONFIRM_LEN 48
+#define SMC_CLC_RECV_BUF_LEN 100
/* eye catcher "SMCR" EBCDIC for CLC messages */
static const char SMC_EYECATCHER[4] = {'\xe2', '\xd4', '\xc3', '\xd9'};
@@ -36,7 +37,7 @@ static const char SMCD_EYECATCHER[4] = {'\xe2', '\xd4', '\xc3', '\xc4'};
/* check if received message has a correct header length and contains valid
* heading and trailing eyecatchers
*/
-static bool smc_clc_msg_hdr_valid(struct smc_clc_msg_hdr *clcm)
+static bool smc_clc_msg_hdr_valid(struct smc_clc_msg_hdr *clcm, bool check_trl)
{
struct smc_clc_msg_proposal_prefix *pclc_prfx;
struct smc_clc_msg_accept_confirm *clc;
@@ -49,12 +50,9 @@ static bool smc_clc_msg_hdr_valid(struct smc_clc_msg_hdr *clcm)
return false;
switch (clcm->type) {
case SMC_CLC_PROPOSAL:
- if (clcm->path != SMC_TYPE_R && clcm->path != SMC_TYPE_D &&
- clcm->path != SMC_TYPE_B)
- return false;
pclc = (struct smc_clc_msg_proposal *)clcm;
pclc_prfx = smc_clc_proposal_get_prefix(pclc);
- if (ntohs(pclc->hdr.length) !=
+ if (ntohs(pclc->hdr.length) <
sizeof(*pclc) + ntohs(pclc->iparea_offset) +
sizeof(*pclc_prfx) +
pclc_prfx->ipv6_prefixes_cnt *
@@ -86,7 +84,8 @@ static bool smc_clc_msg_hdr_valid(struct smc_clc_msg_hdr *clcm)
default:
return false;
}
- if (memcmp(trl->eyecatcher, SMC_EYECATCHER, sizeof(SMC_EYECATCHER)) &&
+ if (check_trl &&
+ memcmp(trl->eyecatcher, SMC_EYECATCHER, sizeof(SMC_EYECATCHER)) &&
memcmp(trl->eyecatcher, SMCD_EYECATCHER, sizeof(SMCD_EYECATCHER)))
return false;
return true;
@@ -276,7 +275,8 @@ int smc_clc_wait_msg(struct smc_sock *smc, void *buf, int buflen,
struct msghdr msg = {NULL, 0};
int reason_code = 0;
struct kvec vec = {buf, buflen};
- int len, datlen;
+ int len, datlen, recvlen;
+ bool check_trl = true;
int krflags;
/* peek the first few bytes to determine length of data to receive
@@ -320,10 +320,7 @@ int smc_clc_wait_msg(struct smc_sock *smc, void *buf, int buflen,
}
datlen = ntohs(clcm->length);
if ((len < sizeof(struct smc_clc_msg_hdr)) ||
- (datlen > buflen) ||
- (clcm->version != SMC_CLC_V1) ||
- (clcm->path != SMC_TYPE_R && clcm->path != SMC_TYPE_D &&
- clcm->path != SMC_TYPE_B) ||
+ (clcm->version < SMC_CLC_V1) ||
((clcm->type != SMC_CLC_DECLINE) &&
(clcm->type != expected_type))) {
smc->sk.sk_err = EPROTO;
@@ -331,16 +328,38 @@ int smc_clc_wait_msg(struct smc_sock *smc, void *buf, int buflen,
goto out;
}
+ if (clcm->type == SMC_CLC_PROPOSAL && clcm->path == SMC_TYPE_N)
+ reason_code = SMC_CLC_DECL_VERSMISMAT; /* just V2 offered */
+
/* receive the complete CLC message */
memset(&msg, 0, sizeof(struct msghdr));
- iov_iter_kvec(&msg.msg_iter, READ, &vec, 1, datlen);
+ if (datlen > buflen) {
+ check_trl = false;
+ recvlen = buflen;
+ } else {
+ recvlen = datlen;
+ }
+ iov_iter_kvec(&msg.msg_iter, READ, &vec, 1, recvlen);
krflags = MSG_WAITALL;
len = sock_recvmsg(smc->clcsock, &msg, krflags);
- if (len < datlen || !smc_clc_msg_hdr_valid(clcm)) {
+ if (len < recvlen || !smc_clc_msg_hdr_valid(clcm, check_trl)) {
smc->sk.sk_err = EPROTO;
reason_code = -EPROTO;
goto out;
}
+ datlen -= len;
+ while (datlen) {
+ u8 tmp[SMC_CLC_RECV_BUF_LEN];
+
+ vec.iov_base = &tmp;
+ vec.iov_len = SMC_CLC_RECV_BUF_LEN;
+ /* receive remaining proposal message */
+ recvlen = datlen > SMC_CLC_RECV_BUF_LEN ?
+ SMC_CLC_RECV_BUF_LEN : datlen;
+ iov_iter_kvec(&msg.msg_iter, READ, &vec, 1, recvlen);
+ len = sock_recvmsg(smc->clcsock, &msg, krflags);
+ datlen -= len;
+ }
if (clcm->type == SMC_CLC_DECLINE) {
struct smc_clc_msg_decline *dclc;
diff --git a/net/smc/smc_clc.h b/net/smc/smc_clc.h
index 465876701b75..76c2b150d040 100644
--- a/net/smc/smc_clc.h
+++ b/net/smc/smc_clc.h
@@ -25,6 +25,7 @@
#define SMC_CLC_V1 0x1 /* SMC version */
#define SMC_TYPE_R 0 /* SMC-R only */
#define SMC_TYPE_D 1 /* SMC-D only */
+#define SMC_TYPE_N 2 /* neither SMC-R nor SMC-D */
#define SMC_TYPE_B 3 /* SMC-R and SMC-D */
#define CLC_WAIT_TIME (6 * HZ) /* max. wait time on clcsock */
#define CLC_WAIT_TIME_SHORT HZ /* short wait time on clcsock */
@@ -46,6 +47,7 @@
#define SMC_CLC_DECL_ISMVLANERR 0x03090000 /* err to reg vlan id on ism dev */
#define SMC_CLC_DECL_NOACTLINK 0x030a0000 /* no active smc-r link in lgr */
#define SMC_CLC_DECL_NOSRVLINK 0x030b0000 /* SMC-R link from srv not found */
+#define SMC_CLC_DECL_VERSMISMAT 0x030c0000 /* SMC version mismatch */
#define SMC_CLC_DECL_SYNCERR 0x04000000 /* synchronization error */
#define SMC_CLC_DECL_PEERDECL 0x05000000 /* peer declined during handshake */
#define SMC_CLC_DECL_INTERR 0x09990000 /* internal error */
diff --git a/net/smc/smc_core.c b/net/smc/smc_core.c
index 7964a21e5e6f..f82a2e599917 100644
--- a/net/smc/smc_core.c
+++ b/net/smc/smc_core.c
@@ -15,6 +15,7 @@
#include <linux/workqueue.h>
#include <linux/wait.h>
#include <linux/reboot.h>
+#include <linux/mutex.h>
#include <net/tcp.h>
#include <net/sock.h>
#include <rdma/ib_verbs.h>
@@ -44,18 +45,10 @@ static struct smc_lgr_list smc_lgr_list = { /* established link groups */
static atomic_t lgr_cnt = ATOMIC_INIT(0); /* number of existing link groups */
static DECLARE_WAIT_QUEUE_HEAD(lgrs_deleted);
-struct smc_ib_up_work {
- struct work_struct work;
- struct smc_link_group *lgr;
- struct smc_ib_device *smcibdev;
- u8 ibport;
-};
-
static void smc_buf_free(struct smc_link_group *lgr, bool is_rmb,
struct smc_buf_desc *buf_desc);
static void __smc_lgr_terminate(struct smc_link_group *lgr, bool soft);
-static void smc_link_up_work(struct work_struct *work);
static void smc_link_down_work(struct work_struct *work);
/* return head of link group list and its lock for a given link group */
@@ -247,7 +240,8 @@ static void smcr_lgr_link_deactivate_all(struct smc_link_group *lgr)
if (smc_link_usable(lnk))
lnk->state = SMC_LNK_INACTIVE;
}
- wake_up_interruptible_all(&lgr->llc_waiter);
+ wake_up_all(&lgr->llc_msg_waiter);
+ wake_up_all(&lgr->llc_flow_waiter);
}
static void smc_lgr_free(struct smc_link_group *lgr);
@@ -324,7 +318,6 @@ int smcr_link_init(struct smc_link_group *lgr, struct smc_link *lnk,
get_device(&ini->ib_dev->ibdev->dev);
atomic_inc(&ini->ib_dev->lnk_cnt);
- lnk->state = SMC_LNK_ACTIVATING;
lnk->link_id = smcr_next_link_id(lgr);
lnk->lgr = lgr;
lnk->link_idx = link_idx;
@@ -360,6 +353,7 @@ int smcr_link_init(struct smc_link_group *lgr, struct smc_link *lnk,
rc = smc_wr_create_link(lnk);
if (rc)
goto destroy_qp;
+ lnk->state = SMC_LNK_ACTIVATING;
return 0;
destroy_qp:
@@ -450,7 +444,7 @@ static int smc_lgr_create(struct smc_sock *smc, struct smc_init_info *ini)
}
smc->conn.lgr = lgr;
spin_lock_bh(lgr_lock);
- list_add(&lgr->list, lgr_list);
+ list_add_tail(&lgr->list, lgr_list);
spin_unlock_bh(lgr_lock);
return 0;
@@ -548,8 +542,7 @@ struct smc_link *smc_switch_conns(struct smc_link_group *lgr,
smc_wr_wakeup_tx_wait(from_lnk);
for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
- if (lgr->lnk[i].state != SMC_LNK_ACTIVE ||
- i == from_lnk->link_idx)
+ if (!smc_link_active(&lgr->lnk[i]) || i == from_lnk->link_idx)
continue;
if (is_dev_err && from_lnk->smcibdev == lgr->lnk[i].smcibdev &&
from_lnk->ibport == lgr->lnk[i].ibport) {
@@ -1104,66 +1097,23 @@ static void smc_conn_abort_work(struct work_struct *work)
sock_put(&smc->sk); /* sock_hold done by schedulers of abort_work */
}
-/* link is up - establish alternate link if applicable */
-static void smcr_link_up(struct smc_link_group *lgr,
- struct smc_ib_device *smcibdev, u8 ibport)
-{
- struct smc_link *link = NULL;
-
- if (list_empty(&lgr->list) ||
- lgr->type == SMC_LGR_SYMMETRIC ||
- lgr->type == SMC_LGR_ASYMMETRIC_PEER)
- return;
-
- if (lgr->role == SMC_SERV) {
- /* trigger local add link processing */
- link = smc_llc_usable_link(lgr);
- if (!link)
- return;
- smc_llc_srv_add_link_local(link);
- } else {
- /* invite server to start add link processing */
- u8 gid[SMC_GID_SIZE];
-
- if (smc_ib_determine_gid(smcibdev, ibport, lgr->vlan_id, gid,
- NULL))
- return;
- if (lgr->llc_flow_lcl.type != SMC_LLC_FLOW_NONE) {
- /* some other llc task is ongoing */
- wait_event_interruptible_timeout(lgr->llc_waiter,
- (lgr->llc_flow_lcl.type == SMC_LLC_FLOW_NONE),
- SMC_LLC_WAIT_TIME);
- }
- if (list_empty(&lgr->list) ||
- !smc_ib_port_active(smcibdev, ibport))
- return; /* lgr or device no longer active */
- link = smc_llc_usable_link(lgr);
- if (!link)
- return;
- smc_llc_send_add_link(link, smcibdev->mac[ibport - 1], gid,
- NULL, SMC_LLC_REQ);
- }
-}
-
void smcr_port_add(struct smc_ib_device *smcibdev, u8 ibport)
{
- struct smc_ib_up_work *ib_work;
struct smc_link_group *lgr, *n;
list_for_each_entry_safe(lgr, n, &smc_lgr_list.list, list) {
+ struct smc_link *link;
+
if (strncmp(smcibdev->pnetid[ibport - 1], lgr->pnet_id,
SMC_MAX_PNETID_LEN) ||
lgr->type == SMC_LGR_SYMMETRIC ||
lgr->type == SMC_LGR_ASYMMETRIC_PEER)
continue;
- ib_work = kmalloc(sizeof(*ib_work), GFP_KERNEL);
- if (!ib_work)
- continue;
- INIT_WORK(&ib_work->work, smc_link_up_work);
- ib_work->lgr = lgr;
- ib_work->smcibdev = smcibdev;
- ib_work->ibport = ibport;
- schedule_work(&ib_work->work);
+
+ /* trigger local add link processing */
+ link = smc_llc_usable_link(lgr);
+ if (link)
+ smc_llc_add_link_local(link);
}
}
@@ -1195,13 +1145,19 @@ static void smcr_link_down(struct smc_link *lnk)
if (lgr->llc_flow_lcl.type != SMC_LLC_FLOW_NONE) {
/* another llc task is ongoing */
mutex_unlock(&lgr->llc_conf_mutex);
- wait_event_interruptible_timeout(lgr->llc_waiter,
- (lgr->llc_flow_lcl.type == SMC_LLC_FLOW_NONE),
+ wait_event_timeout(lgr->llc_flow_waiter,
+ (list_empty(&lgr->list) ||
+ lgr->llc_flow_lcl.type == SMC_LLC_FLOW_NONE),
SMC_LLC_WAIT_TIME);
mutex_lock(&lgr->llc_conf_mutex);
}
- smc_llc_send_delete_link(to_lnk, del_link_id, SMC_LLC_REQ, true,
- SMC_LLC_DEL_LOST_PATH);
+ if (!list_empty(&lgr->list)) {
+ smc_llc_send_delete_link(to_lnk, del_link_id,
+ SMC_LLC_REQ, true,
+ SMC_LLC_DEL_LOST_PATH);
+ smcr_link_clear(lnk, true);
+ }
+ wake_up(&lgr->llc_flow_waiter); /* wake up next waiter */
}
}
@@ -1240,20 +1196,6 @@ void smcr_port_err(struct smc_ib_device *smcibdev, u8 ibport)
}
}
-static void smc_link_up_work(struct work_struct *work)
-{
- struct smc_ib_up_work *ib_work = container_of(work,
- struct smc_ib_up_work,
- work);
- struct smc_link_group *lgr = ib_work->lgr;
-
- if (list_empty(&lgr->list))
- goto out;
- smcr_link_up(lgr, ib_work->smcibdev, ib_work->ibport);
-out:
- kfree(ib_work);
-}
-
static void smc_link_down_work(struct work_struct *work)
{
struct smc_link *link = container_of(work, struct smc_link,
@@ -1262,7 +1204,7 @@ static void smc_link_down_work(struct work_struct *work)
if (list_empty(&lgr->list))
return;
- wake_up_interruptible_all(&lgr->llc_waiter);
+ wake_up_all(&lgr->llc_msg_waiter);
mutex_lock(&lgr->llc_conf_mutex);
smcr_link_down(link);
mutex_unlock(&lgr->llc_conf_mutex);
@@ -1326,7 +1268,7 @@ static bool smcr_lgr_match(struct smc_link_group *lgr,
return false;
for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
- if (lgr->lnk[i].state != SMC_LNK_ACTIVE)
+ if (!smc_link_active(&lgr->lnk[i]))
continue;
if ((lgr->role == SMC_SERV || lgr->lnk[i].peer_qpn == clcqpn) &&
!memcmp(lgr->lnk[i].peer_gid, &lcl->gid, SMC_GID_SIZE) &&
@@ -1369,7 +1311,7 @@ int smc_conn_create(struct smc_sock *smc, struct smc_init_info *ini)
smcr_lgr_match(lgr, ini->ib_lcl, role, ini->ib_clcqpn)) &&
!lgr->sync_err &&
lgr->vlan_id == ini->vlan_id &&
- (role == SMC_CLNT ||
+ (role == SMC_CLNT || ini->is_smcd ||
lgr->conns_num < SMC_RMBS_PER_LGR_MAX)) {
/* link group found */
ini->cln_first_contact = SMC_REUSE_CONTACT;
@@ -1774,14 +1716,14 @@ static int __smc_buf_create(struct smc_sock *smc, bool is_smcd, bool is_rmb)
void smc_sndbuf_sync_sg_for_cpu(struct smc_connection *conn)
{
- if (!conn->lgr || conn->lgr->is_smcd || !smc_link_usable(conn->lnk))
+ if (!conn->lgr || conn->lgr->is_smcd || !smc_link_active(conn->lnk))
return;
smc_ib_sync_sg_for_cpu(conn->lnk, conn->sndbuf_desc, DMA_TO_DEVICE);
}
void smc_sndbuf_sync_sg_for_device(struct smc_connection *conn)
{
- if (!conn->lgr || conn->lgr->is_smcd || !smc_link_usable(conn->lnk))
+ if (!conn->lgr || conn->lgr->is_smcd || !smc_link_active(conn->lnk))
return;
smc_ib_sync_sg_for_device(conn->lnk, conn->sndbuf_desc, DMA_TO_DEVICE);
}
@@ -1793,7 +1735,7 @@ void smc_rmb_sync_sg_for_cpu(struct smc_connection *conn)
if (!conn->lgr || conn->lgr->is_smcd)
return;
for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
- if (!smc_link_usable(&conn->lgr->lnk[i]))
+ if (!smc_link_active(&conn->lgr->lnk[i]))
continue;
smc_ib_sync_sg_for_cpu(&conn->lgr->lnk[i], conn->rmb_desc,
DMA_FROM_DEVICE);
@@ -1807,7 +1749,7 @@ void smc_rmb_sync_sg_for_device(struct smc_connection *conn)
if (!conn->lgr || conn->lgr->is_smcd)
return;
for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
- if (!smc_link_usable(&conn->lgr->lnk[i]))
+ if (!smc_link_active(&conn->lgr->lnk[i]))
continue;
smc_ib_sync_sg_for_device(&conn->lgr->lnk[i], conn->rmb_desc,
DMA_FROM_DEVICE);
@@ -1830,8 +1772,12 @@ int smc_buf_create(struct smc_sock *smc, bool is_smcd)
return rc;
/* create rmb */
rc = __smc_buf_create(smc, is_smcd, true);
- if (rc)
+ if (rc) {
+ mutex_lock(&smc->conn.lgr->sndbufs_lock);
+ list_del(&smc->conn.sndbuf_desc->list);
+ mutex_unlock(&smc->conn.lgr->sndbufs_lock);
smc_buf_free(smc->conn.lgr, false, smc->conn.sndbuf_desc);
+ }
return rc;
}
@@ -1955,20 +1901,20 @@ static void smc_core_going_away(void)
struct smc_ib_device *smcibdev;
struct smcd_dev *smcd;
- spin_lock(&smc_ib_devices.lock);
+ mutex_lock(&smc_ib_devices.mutex);
list_for_each_entry(smcibdev, &smc_ib_devices.list, list) {
int i;
for (i = 0; i < SMC_MAX_PORTS; i++)
set_bit(i, smcibdev->ports_going_away);
}
- spin_unlock(&smc_ib_devices.lock);
+ mutex_unlock(&smc_ib_devices.mutex);
- spin_lock(&smcd_dev_list.lock);
+ mutex_lock(&smcd_dev_list.mutex);
list_for_each_entry(smcd, &smcd_dev_list.list, list) {
smcd->going_away = 1;
}
- spin_unlock(&smcd_dev_list.lock);
+ mutex_unlock(&smcd_dev_list.mutex);
}
/* Clean up all SMC link groups */
@@ -1980,10 +1926,10 @@ static void smc_lgrs_shutdown(void)
smc_smcr_terminate_all(NULL);
- spin_lock(&smcd_dev_list.lock);
+ mutex_lock(&smcd_dev_list.mutex);
list_for_each_entry(smcd, &smcd_dev_list.list, list)
smc_smcd_terminate_all(smcd);
- spin_unlock(&smcd_dev_list.lock);
+ mutex_unlock(&smcd_dev_list.mutex);
}
static int smc_core_reboot_event(struct notifier_block *this,
diff --git a/net/smc/smc_core.h b/net/smc/smc_core.h
index 86d160f0d187..1c4d5439d0ff 100644
--- a/net/smc/smc_core.h
+++ b/net/smc/smc_core.h
@@ -262,8 +262,10 @@ struct smc_link_group {
struct work_struct llc_del_link_work;
struct work_struct llc_event_work;
/* llc event worker */
- wait_queue_head_t llc_waiter;
+ wait_queue_head_t llc_flow_waiter;
/* w4 next llc event */
+ wait_queue_head_t llc_msg_waiter;
+ /* w4 next llc msg */
struct smc_llc_flow llc_flow_lcl;
/* llc local control field */
struct smc_llc_flow llc_flow_rmt;
@@ -347,6 +349,11 @@ static inline bool smc_link_usable(struct smc_link *lnk)
return true;
}
+static inline bool smc_link_active(struct smc_link *lnk)
+{
+ return lnk->state == SMC_LNK_ACTIVE;
+}
+
struct smc_sock;
struct smc_clc_msg_accept_confirm;
struct smc_clc_msg_local;
diff --git a/net/smc/smc_ib.c b/net/smc/smc_ib.c
index 562a52d01ad1..1c314dbdc7fa 100644
--- a/net/smc/smc_ib.c
+++ b/net/smc/smc_ib.c
@@ -16,6 +16,7 @@
#include <linux/workqueue.h>
#include <linux/scatterlist.h>
#include <linux/wait.h>
+#include <linux/mutex.h>
#include <rdma/ib_verbs.h>
#include <rdma/ib_cache.h>
@@ -33,7 +34,7 @@
#define SMC_QP_RNR_RETRY 7 /* 7: infinite */
struct smc_ib_devices smc_ib_devices = { /* smc-registered ib devices */
- .lock = __SPIN_LOCK_UNLOCKED(smc_ib_devices.lock),
+ .mutex = __MUTEX_INITIALIZER(smc_ib_devices.mutex),
.list = LIST_HEAD_INIT(smc_ib_devices.list),
};
@@ -505,6 +506,10 @@ long smc_ib_setup_per_ibdev(struct smc_ib_device *smcibdev)
int cqe_size_order, smc_order;
long rc;
+ mutex_lock(&smcibdev->mutex);
+ rc = 0;
+ if (smcibdev->initialized)
+ goto out;
/* the calculated number of cq entries fits to mlx5 cq allocation */
cqe_size_order = cache_line_size() == 128 ? 7 : 6;
smc_order = MAX_ORDER - cqe_size_order - 1;
@@ -516,7 +521,7 @@ long smc_ib_setup_per_ibdev(struct smc_ib_device *smcibdev)
rc = PTR_ERR_OR_ZERO(smcibdev->roce_cq_send);
if (IS_ERR(smcibdev->roce_cq_send)) {
smcibdev->roce_cq_send = NULL;
- return rc;
+ goto out;
}
smcibdev->roce_cq_recv = ib_create_cq(smcibdev->ibdev,
smc_wr_rx_cq_handler, NULL,
@@ -528,21 +533,26 @@ long smc_ib_setup_per_ibdev(struct smc_ib_device *smcibdev)
}
smc_wr_add_dev(smcibdev);
smcibdev->initialized = 1;
- return rc;
+ goto out;
err:
ib_destroy_cq(smcibdev->roce_cq_send);
+out:
+ mutex_unlock(&smcibdev->mutex);
return rc;
}
static void smc_ib_cleanup_per_ibdev(struct smc_ib_device *smcibdev)
{
+ mutex_lock(&smcibdev->mutex);
if (!smcibdev->initialized)
- return;
+ goto out;
smcibdev->initialized = 0;
ib_destroy_cq(smcibdev->roce_cq_recv);
ib_destroy_cq(smcibdev->roce_cq_send);
smc_wr_remove_dev(smcibdev);
+out:
+ mutex_unlock(&smcibdev->mutex);
}
static struct ib_client smc_ib_client;
@@ -565,9 +575,10 @@ static int smc_ib_add_dev(struct ib_device *ibdev)
INIT_WORK(&smcibdev->port_event_work, smc_ib_port_event_work);
atomic_set(&smcibdev->lnk_cnt, 0);
init_waitqueue_head(&smcibdev->lnks_deleted);
- spin_lock(&smc_ib_devices.lock);
+ mutex_init(&smcibdev->mutex);
+ mutex_lock(&smc_ib_devices.mutex);
list_add_tail(&smcibdev->list, &smc_ib_devices.list);
- spin_unlock(&smc_ib_devices.lock);
+ mutex_unlock(&smc_ib_devices.mutex);
ib_set_client_data(ibdev, &smc_ib_client, smcibdev);
INIT_IB_EVENT_HANDLER(&smcibdev->event_handler, smcibdev->ibdev,
smc_ib_global_event_handler);
@@ -602,9 +613,9 @@ static void smc_ib_remove_dev(struct ib_device *ibdev, void *client_data)
{
struct smc_ib_device *smcibdev = client_data;
- spin_lock(&smc_ib_devices.lock);
+ mutex_lock(&smc_ib_devices.mutex);
list_del_init(&smcibdev->list); /* remove from smc_ib_devices */
- spin_unlock(&smc_ib_devices.lock);
+ mutex_unlock(&smc_ib_devices.mutex);
pr_warn_ratelimited("smc: removing ib device %s\n",
smcibdev->ibdev->name);
smc_smcr_terminate_all(smcibdev);
diff --git a/net/smc/smc_ib.h b/net/smc/smc_ib.h
index e6a696ae15f3..2ce481187dd0 100644
--- a/net/smc/smc_ib.h
+++ b/net/smc/smc_ib.h
@@ -14,6 +14,7 @@
#include <linux/interrupt.h>
#include <linux/if_ether.h>
+#include <linux/mutex.h>
#include <linux/wait.h>
#include <rdma/ib_verbs.h>
#include <net/smc.h>
@@ -25,7 +26,7 @@
struct smc_ib_devices { /* list of smc ib devices definition */
struct list_head list;
- spinlock_t lock; /* protects list of smc ib devices */
+ struct mutex mutex; /* protects list of smc ib devices */
};
extern struct smc_ib_devices smc_ib_devices; /* list of smc ib devices */
@@ -51,6 +52,7 @@ struct smc_ib_device { /* ib-device infos for smc */
DECLARE_BITMAP(ports_going_away, SMC_MAX_PORTS);
atomic_t lnk_cnt; /* number of links on ibdev */
wait_queue_head_t lnks_deleted; /* wait 4 removal of all links*/
+ struct mutex mutex; /* protect dev setup+cleanup */
};
struct smc_buf_desc;
diff --git a/net/smc/smc_ism.c b/net/smc/smc_ism.c
index 91f85fc09fb8..998c525de785 100644
--- a/net/smc/smc_ism.c
+++ b/net/smc/smc_ism.c
@@ -7,6 +7,7 @@
*/
#include <linux/spinlock.h>
+#include <linux/mutex.h>
#include <linux/slab.h>
#include <asm/page.h>
@@ -17,7 +18,7 @@
struct smcd_dev_list smcd_dev_list = {
.list = LIST_HEAD_INIT(smcd_dev_list.list),
- .lock = __SPIN_LOCK_UNLOCKED(smcd_dev_list.lock)
+ .mutex = __MUTEX_INITIALIZER(smcd_dev_list.mutex)
};
/* Test if an ISM communication is possible. */
@@ -317,9 +318,9 @@ EXPORT_SYMBOL_GPL(smcd_alloc_dev);
int smcd_register_dev(struct smcd_dev *smcd)
{
- spin_lock(&smcd_dev_list.lock);
+ mutex_lock(&smcd_dev_list.mutex);
list_add_tail(&smcd->list, &smcd_dev_list.list);
- spin_unlock(&smcd_dev_list.lock);
+ mutex_unlock(&smcd_dev_list.mutex);
pr_warn_ratelimited("smc: adding smcd device %s with pnetid %.16s%s\n",
dev_name(&smcd->dev), smcd->pnetid,
@@ -333,9 +334,9 @@ void smcd_unregister_dev(struct smcd_dev *smcd)
{
pr_warn_ratelimited("smc: removing smcd device %s\n",
dev_name(&smcd->dev));
- spin_lock(&smcd_dev_list.lock);
+ mutex_lock(&smcd_dev_list.mutex);
list_del_init(&smcd->list);
- spin_unlock(&smcd_dev_list.lock);
+ mutex_unlock(&smcd_dev_list.mutex);
smcd->going_away = 1;
smc_smcd_terminate_all(smcd);
flush_workqueue(smcd->event_wq);
diff --git a/net/smc/smc_ism.h b/net/smc/smc_ism.h
index 4da946cbfa29..81cc4537efd3 100644
--- a/net/smc/smc_ism.h
+++ b/net/smc/smc_ism.h
@@ -10,12 +10,13 @@
#define SMCD_ISM_H
#include <linux/uio.h>
+#include <linux/mutex.h>
#include "smc.h"
struct smcd_dev_list { /* List of SMCD devices */
struct list_head list;
- spinlock_t lock; /* Protects list of devices */
+ struct mutex mutex; /* Protects list of devices */
};
extern struct smcd_dev_list smcd_dev_list; /* list of smcd devices */
diff --git a/net/smc/smc_llc.c b/net/smc/smc_llc.c
index 391237b601fe..df5b0a6ea848 100644
--- a/net/smc/smc_llc.c
+++ b/net/smc/smc_llc.c
@@ -186,6 +186,26 @@ static inline void smc_llc_flow_qentry_set(struct smc_llc_flow *flow,
flow->qentry = qentry;
}
+static void smc_llc_flow_parallel(struct smc_link_group *lgr, u8 flow_type,
+ struct smc_llc_qentry *qentry)
+{
+ u8 msg_type = qentry->msg.raw.hdr.common.type;
+
+ if ((msg_type == SMC_LLC_ADD_LINK || msg_type == SMC_LLC_DELETE_LINK) &&
+ flow_type != msg_type && !lgr->delayed_event) {
+ lgr->delayed_event = qentry;
+ return;
+ }
+ /* drop parallel or already-in-progress llc requests */
+ if (flow_type != msg_type)
+ pr_warn_once("smc: SMC-R lg %*phN dropped parallel "
+ "LLC msg: msg %d flow %d role %d\n",
+ SMC_LGR_ID_SIZE, &lgr->id,
+ qentry->msg.raw.hdr.common.type,
+ flow_type, lgr->role);
+ kfree(qentry);
+}
+
/* try to start a new llc flow, initiated by an incoming llc msg */
static bool smc_llc_flow_start(struct smc_llc_flow *flow,
struct smc_llc_qentry *qentry)
@@ -195,14 +215,7 @@ static bool smc_llc_flow_start(struct smc_llc_flow *flow,
spin_lock_bh(&lgr->llc_flow_lock);
if (flow->type) {
/* a flow is already active */
- if ((qentry->msg.raw.hdr.common.type == SMC_LLC_ADD_LINK ||
- qentry->msg.raw.hdr.common.type == SMC_LLC_DELETE_LINK) &&
- !lgr->delayed_event) {
- lgr->delayed_event = qentry;
- } else {
- /* forget this llc request */
- kfree(qentry);
- }
+ smc_llc_flow_parallel(lgr, flow->type, qentry);
spin_unlock_bh(&lgr->llc_flow_lock);
return false;
}
@@ -222,8 +235,8 @@ static bool smc_llc_flow_start(struct smc_llc_flow *flow,
}
if (qentry == lgr->delayed_event)
lgr->delayed_event = NULL;
- spin_unlock_bh(&lgr->llc_flow_lock);
smc_llc_flow_qentry_set(flow, qentry);
+ spin_unlock_bh(&lgr->llc_flow_lock);
return true;
}
@@ -251,11 +264,11 @@ again:
return 0;
}
spin_unlock_bh(&lgr->llc_flow_lock);
- rc = wait_event_interruptible_timeout(lgr->llc_waiter,
- (lgr->llc_flow_lcl.type == SMC_LLC_FLOW_NONE &&
- (lgr->llc_flow_rmt.type == SMC_LLC_FLOW_NONE ||
- lgr->llc_flow_rmt.type == allowed_remote)),
- SMC_LLC_WAIT_TIME);
+ rc = wait_event_timeout(lgr->llc_flow_waiter, (list_empty(&lgr->list) ||
+ (lgr->llc_flow_lcl.type == SMC_LLC_FLOW_NONE &&
+ (lgr->llc_flow_rmt.type == SMC_LLC_FLOW_NONE ||
+ lgr->llc_flow_rmt.type == allowed_remote))),
+ SMC_LLC_WAIT_TIME * 10);
if (!rc)
return -ETIMEDOUT;
goto again;
@@ -272,7 +285,7 @@ void smc_llc_flow_stop(struct smc_link_group *lgr, struct smc_llc_flow *flow)
flow == &lgr->llc_flow_lcl)
schedule_work(&lgr->llc_event_work);
else
- wake_up_interruptible(&lgr->llc_waiter);
+ wake_up(&lgr->llc_flow_waiter);
}
/* lnk is optional and used for early wakeup when link goes down, useful in
@@ -283,26 +296,32 @@ struct smc_llc_qentry *smc_llc_wait(struct smc_link_group *lgr,
int time_out, u8 exp_msg)
{
struct smc_llc_flow *flow = &lgr->llc_flow_lcl;
+ u8 rcv_msg;
- wait_event_interruptible_timeout(lgr->llc_waiter,
- (flow->qentry ||
- (lnk && !smc_link_usable(lnk)) ||
- list_empty(&lgr->list)),
- time_out);
+ wait_event_timeout(lgr->llc_msg_waiter,
+ (flow->qentry ||
+ (lnk && !smc_link_usable(lnk)) ||
+ list_empty(&lgr->list)),
+ time_out);
if (!flow->qentry ||
(lnk && !smc_link_usable(lnk)) || list_empty(&lgr->list)) {
smc_llc_flow_qentry_del(flow);
goto out;
}
- if (exp_msg && flow->qentry->msg.raw.hdr.common.type != exp_msg) {
+ rcv_msg = flow->qentry->msg.raw.hdr.common.type;
+ if (exp_msg && rcv_msg != exp_msg) {
if (exp_msg == SMC_LLC_ADD_LINK &&
- flow->qentry->msg.raw.hdr.common.type ==
- SMC_LLC_DELETE_LINK) {
+ rcv_msg == SMC_LLC_DELETE_LINK) {
/* flow_start will delay the unexpected msg */
smc_llc_flow_start(&lgr->llc_flow_lcl,
smc_llc_flow_qentry_clr(flow));
return NULL;
}
+ pr_warn_once("smc: SMC-R lg %*phN dropped unexpected LLC msg: "
+ "msg %d exp %d flow %d role %d flags %x\n",
+ SMC_LGR_ID_SIZE, &lgr->id, rcv_msg, exp_msg,
+ flow->type, lgr->role,
+ flow->qentry->msg.raw.hdr.flags);
smc_llc_flow_qentry_del(flow);
}
out:
@@ -409,7 +428,7 @@ static int smc_llc_send_confirm_rkey(struct smc_link *send_link,
rtok_ix = 1;
for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
link = &send_link->lgr->lnk[i];
- if (link->state == SMC_LNK_ACTIVE && link != send_link) {
+ if (smc_link_active(link) && link != send_link) {
rkeyllc->rtoken[rtok_ix].link_id = link->link_id;
rkeyllc->rtoken[rtok_ix].rmb_key =
htonl(rmb_desc->mr_rx[link->link_idx]->rkey);
@@ -876,6 +895,36 @@ out:
return rc;
}
+/* as an SMC client, invite server to start the add_link processing */
+static void smc_llc_cli_add_link_invite(struct smc_link *link,
+ struct smc_llc_qentry *qentry)
+{
+ struct smc_link_group *lgr = smc_get_lgr(link);
+ struct smc_init_info ini;
+
+ if (lgr->type == SMC_LGR_SYMMETRIC ||
+ lgr->type == SMC_LGR_ASYMMETRIC_PEER)
+ goto out;
+
+ ini.vlan_id = lgr->vlan_id;
+ smc_pnet_find_alt_roce(lgr, &ini, link->smcibdev);
+ if (!ini.ib_dev)
+ goto out;
+
+ smc_llc_send_add_link(link, ini.ib_dev->mac[ini.ib_port - 1],
+ ini.ib_gid, NULL, SMC_LLC_REQ);
+out:
+ kfree(qentry);
+}
+
+static bool smc_llc_is_local_add_link(union smc_llc_msg *llc)
+{
+ if (llc->raw.hdr.common.type == SMC_LLC_ADD_LINK &&
+ !llc->add_link.qp_mtu && !llc->add_link.link_num)
+ return true;
+ return false;
+}
+
static void smc_llc_process_cli_add_link(struct smc_link_group *lgr)
{
struct smc_llc_qentry *qentry;
@@ -883,7 +932,10 @@ static void smc_llc_process_cli_add_link(struct smc_link_group *lgr)
qentry = smc_llc_flow_qentry_clr(&lgr->llc_flow_lcl);
mutex_lock(&lgr->llc_conf_mutex);
- smc_llc_cli_add_link(qentry->link, qentry);
+ if (smc_llc_is_local_add_link(&qentry->msg))
+ smc_llc_cli_add_link_invite(qentry->link, qentry);
+ else
+ smc_llc_cli_add_link(qentry->link, qentry);
mutex_unlock(&lgr->llc_conf_mutex);
}
@@ -892,7 +944,7 @@ static int smc_llc_active_link_count(struct smc_link_group *lgr)
int i, link_count = 0;
for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
- if (!smc_link_usable(&lgr->lnk[i]))
+ if (!smc_link_active(&lgr->lnk[i]))
continue;
link_count++;
}
@@ -1032,12 +1084,14 @@ static int smc_llc_srv_conf_link(struct smc_link *link,
if (rc)
return -ENOLINK;
/* receive CONFIRM LINK response over the RoCE fabric */
- qentry = smc_llc_wait(lgr, link, SMC_LLC_WAIT_FIRST_TIME,
- SMC_LLC_CONFIRM_LINK);
- if (!qentry) {
+ qentry = smc_llc_wait(lgr, link, SMC_LLC_WAIT_FIRST_TIME, 0);
+ if (!qentry ||
+ qentry->msg.raw.hdr.common.type != SMC_LLC_CONFIRM_LINK) {
/* send DELETE LINK */
smc_llc_send_delete_link(link, link_new->link_id, SMC_LLC_REQ,
false, SMC_LLC_DEL_LOST_PATH);
+ if (qentry)
+ smc_llc_flow_qentry_del(&lgr->llc_flow_lcl);
return -ENOLINK;
}
smc_llc_save_peer_uid(qentry);
@@ -1139,14 +1193,14 @@ static void smc_llc_process_srv_add_link(struct smc_link_group *lgr)
mutex_unlock(&lgr->llc_conf_mutex);
}
-/* enqueue a local add_link req to trigger a new add_link flow, only as SERV */
-void smc_llc_srv_add_link_local(struct smc_link *link)
+/* enqueue a local add_link req to trigger a new add_link flow */
+void smc_llc_add_link_local(struct smc_link *link)
{
struct smc_llc_msg_add_link add_llc = {0};
add_llc.hd.length = sizeof(add_llc);
add_llc.hd.common.type = SMC_LLC_ADD_LINK;
- /* no dev and port needed, we as server ignore client data anyway */
+ /* no dev and port needed */
smc_llc_enqueue(link, (union smc_llc_msg *)&add_llc);
}
@@ -1222,8 +1276,8 @@ static void smc_llc_process_cli_delete_link(struct smc_link_group *lgr)
smc_llc_send_message(lnk, &qentry->msg); /* response */
if (smc_link_downing(&lnk_del->state)) {
- smc_switch_conns(lgr, lnk_del, false);
- smc_wr_tx_wait_no_pending_sends(lnk_del);
+ if (smc_switch_conns(lgr, lnk_del, false))
+ smc_wr_tx_wait_no_pending_sends(lnk_del);
}
smcr_link_clear(lnk_del, true);
@@ -1297,8 +1351,8 @@ static void smc_llc_process_srv_delete_link(struct smc_link_group *lgr)
goto out; /* asymmetric link already deleted */
if (smc_link_downing(&lnk_del->state)) {
- smc_switch_conns(lgr, lnk_del, false);
- smc_wr_tx_wait_no_pending_sends(lnk_del);
+ if (smc_switch_conns(lgr, lnk_del, false))
+ smc_wr_tx_wait_no_pending_sends(lnk_del);
}
if (!list_empty(&lgr->list)) {
/* qentry is either a request from peer (send it back to
@@ -1326,7 +1380,7 @@ static void smc_llc_process_srv_delete_link(struct smc_link_group *lgr)
if (lgr->type == SMC_LGR_SINGLE && !list_empty(&lgr->list)) {
/* trigger setup of asymm alt link */
- smc_llc_srv_add_link_local(lnk);
+ smc_llc_add_link_local(lnk);
}
out:
mutex_unlock(&lgr->llc_conf_mutex);
@@ -1455,11 +1509,22 @@ static void smc_llc_event_handler(struct smc_llc_qentry *qentry)
if (list_empty(&lgr->list))
goto out; /* lgr is terminating */
if (lgr->role == SMC_CLNT) {
- if (lgr->llc_flow_lcl.type == SMC_LLC_FLOW_ADD_LINK) {
+ if (smc_llc_is_local_add_link(llc)) {
+ if (lgr->llc_flow_lcl.type ==
+ SMC_LLC_FLOW_ADD_LINK)
+ break; /* add_link in progress */
+ if (smc_llc_flow_start(&lgr->llc_flow_lcl,
+ qentry)) {
+ schedule_work(&lgr->llc_add_link_work);
+ }
+ return;
+ }
+ if (lgr->llc_flow_lcl.type == SMC_LLC_FLOW_ADD_LINK &&
+ !lgr->llc_flow_lcl.qentry) {
/* a flow is waiting for this message */
smc_llc_flow_qentry_set(&lgr->llc_flow_lcl,
qentry);
- wake_up_interruptible(&lgr->llc_waiter);
+ wake_up(&lgr->llc_msg_waiter);
} else if (smc_llc_flow_start(&lgr->llc_flow_lcl,
qentry)) {
schedule_work(&lgr->llc_add_link_work);
@@ -1474,33 +1539,18 @@ static void smc_llc_event_handler(struct smc_llc_qentry *qentry)
if (lgr->llc_flow_lcl.type != SMC_LLC_FLOW_NONE) {
/* a flow is waiting for this message */
smc_llc_flow_qentry_set(&lgr->llc_flow_lcl, qentry);
- wake_up_interruptible(&lgr->llc_waiter);
+ wake_up(&lgr->llc_msg_waiter);
return;
}
break;
case SMC_LLC_DELETE_LINK:
- if (lgr->role == SMC_CLNT) {
- /* server requests to delete this link, send response */
- if (lgr->llc_flow_lcl.type != SMC_LLC_FLOW_NONE) {
- /* DEL LINK REQ during ADD LINK SEQ */
- smc_llc_flow_qentry_set(&lgr->llc_flow_lcl,
- qentry);
- wake_up_interruptible(&lgr->llc_waiter);
- } else if (smc_llc_flow_start(&lgr->llc_flow_lcl,
- qentry)) {
- schedule_work(&lgr->llc_del_link_work);
- }
- } else {
- if (lgr->llc_flow_lcl.type == SMC_LLC_FLOW_ADD_LINK &&
- !lgr->llc_flow_lcl.qentry) {
- /* DEL LINK REQ during ADD LINK SEQ */
- smc_llc_flow_qentry_set(&lgr->llc_flow_lcl,
- qentry);
- wake_up_interruptible(&lgr->llc_waiter);
- } else if (smc_llc_flow_start(&lgr->llc_flow_lcl,
- qentry)) {
- schedule_work(&lgr->llc_del_link_work);
- }
+ if (lgr->llc_flow_lcl.type == SMC_LLC_FLOW_ADD_LINK &&
+ !lgr->llc_flow_lcl.qentry) {
+ /* DEL LINK REQ during ADD LINK SEQ */
+ smc_llc_flow_qentry_set(&lgr->llc_flow_lcl, qentry);
+ wake_up(&lgr->llc_msg_waiter);
+ } else if (smc_llc_flow_start(&lgr->llc_flow_lcl, qentry)) {
+ schedule_work(&lgr->llc_del_link_work);
}
return;
case SMC_LLC_CONFIRM_RKEY:
@@ -1566,23 +1616,30 @@ again:
static void smc_llc_rx_response(struct smc_link *link,
struct smc_llc_qentry *qentry)
{
+ enum smc_llc_flowtype flowtype = link->lgr->llc_flow_lcl.type;
+ struct smc_llc_flow *flow = &link->lgr->llc_flow_lcl;
u8 llc_type = qentry->msg.raw.hdr.common.type;
switch (llc_type) {
case SMC_LLC_TEST_LINK:
- if (link->state == SMC_LNK_ACTIVE)
+ if (smc_link_active(link))
complete(&link->llc_testlink_resp);
break;
case SMC_LLC_ADD_LINK:
- case SMC_LLC_DELETE_LINK:
- case SMC_LLC_CONFIRM_LINK:
case SMC_LLC_ADD_LINK_CONT:
+ case SMC_LLC_CONFIRM_LINK:
+ if (flowtype != SMC_LLC_FLOW_ADD_LINK || flow->qentry)
+ break; /* drop out-of-flow response */
+ goto assign;
+ case SMC_LLC_DELETE_LINK:
+ if (flowtype != SMC_LLC_FLOW_DEL_LINK || flow->qentry)
+ break; /* drop out-of-flow response */
+ goto assign;
case SMC_LLC_CONFIRM_RKEY:
case SMC_LLC_DELETE_RKEY:
- /* assign responses to the local flow, we requested them */
- smc_llc_flow_qentry_set(&link->lgr->llc_flow_lcl, qentry);
- wake_up_interruptible(&link->lgr->llc_waiter);
- return;
+ if (flowtype != SMC_LLC_FLOW_RKEY || flow->qentry)
+ break; /* drop out-of-flow response */
+ goto assign;
case SMC_LLC_CONFIRM_RKEY_CONT:
/* not used because max links is 3 */
break;
@@ -1591,6 +1648,11 @@ static void smc_llc_rx_response(struct smc_link *link,
break;
}
kfree(qentry);
+ return;
+assign:
+ /* assign responses to the local flow, we requested them */
+ smc_llc_flow_qentry_set(&link->lgr->llc_flow_lcl, qentry);
+ wake_up(&link->lgr->llc_msg_waiter);
}
static void smc_llc_enqueue(struct smc_link *link, union smc_llc_msg *llc)
@@ -1616,7 +1678,7 @@ static void smc_llc_enqueue(struct smc_link *link, union smc_llc_msg *llc)
spin_lock_irqsave(&lgr->llc_event_q_lock, flags);
list_add_tail(&qentry->list, &lgr->llc_event_q);
spin_unlock_irqrestore(&lgr->llc_event_q_lock, flags);
- schedule_work(&link->lgr->llc_event_work);
+ schedule_work(&lgr->llc_event_work);
}
/* copy received msg and add it to the event queue */
@@ -1644,7 +1706,7 @@ static void smc_llc_testlink_work(struct work_struct *work)
u8 user_data[16] = { 0 };
int rc;
- if (link->state != SMC_LNK_ACTIVE)
+ if (!smc_link_active(link))
return; /* don't reschedule worker */
expire_time = link->wr_rx_tstamp + link->llc_testlink_time;
if (time_is_after_jiffies(expire_time)) {
@@ -1656,7 +1718,7 @@ static void smc_llc_testlink_work(struct work_struct *work)
/* receive TEST LINK response over RoCE fabric */
rc = wait_for_completion_interruptible_timeout(&link->llc_testlink_resp,
SMC_LLC_WAIT_TIME);
- if (link->state != SMC_LNK_ACTIVE)
+ if (!smc_link_active(link))
return; /* link state changed */
if (rc <= 0) {
smcr_link_down_cond_sched(link);
@@ -1677,7 +1739,8 @@ void smc_llc_lgr_init(struct smc_link_group *lgr, struct smc_sock *smc)
INIT_LIST_HEAD(&lgr->llc_event_q);
spin_lock_init(&lgr->llc_event_q_lock);
spin_lock_init(&lgr->llc_flow_lock);
- init_waitqueue_head(&lgr->llc_waiter);
+ init_waitqueue_head(&lgr->llc_flow_waiter);
+ init_waitqueue_head(&lgr->llc_msg_waiter);
mutex_init(&lgr->llc_conf_mutex);
lgr->llc_testlink_time = net->ipv4.sysctl_tcp_keepalive_time;
}
@@ -1686,7 +1749,8 @@ void smc_llc_lgr_init(struct smc_link_group *lgr, struct smc_sock *smc)
void smc_llc_lgr_clear(struct smc_link_group *lgr)
{
smc_llc_event_flush(lgr);
- wake_up_interruptible_all(&lgr->llc_waiter);
+ wake_up_all(&lgr->llc_flow_waiter);
+ wake_up_all(&lgr->llc_msg_waiter);
cancel_work_sync(&lgr->llc_event_work);
cancel_work_sync(&lgr->llc_add_link_work);
cancel_work_sync(&lgr->llc_del_link_work);
diff --git a/net/smc/smc_llc.h b/net/smc/smc_llc.h
index a5d2fe3eea61..cc00a2ec4e92 100644
--- a/net/smc/smc_llc.h
+++ b/net/smc/smc_llc.h
@@ -103,7 +103,7 @@ void smc_llc_send_link_delete_all(struct smc_link_group *lgr, bool ord,
u32 rsn);
int smc_llc_cli_add_link(struct smc_link *link, struct smc_llc_qentry *qentry);
int smc_llc_srv_add_link(struct smc_link *link);
-void smc_llc_srv_add_link_local(struct smc_link *link);
+void smc_llc_add_link_local(struct smc_link *link);
int smc_llc_init(void) __init;
#endif /* SMC_LLC_H */
diff --git a/net/smc/smc_pnet.c b/net/smc/smc_pnet.c
index 014d91b9778e..30e5fac7034e 100644
--- a/net/smc/smc_pnet.c
+++ b/net/smc/smc_pnet.c
@@ -12,6 +12,7 @@
#include <linux/module.h>
#include <linux/list.h>
#include <linux/ctype.h>
+#include <linux/mutex.h>
#include <net/netlink.h>
#include <net/genetlink.h>
@@ -129,7 +130,7 @@ static int smc_pnet_remove_by_pnetid(struct net *net, char *pnet_name)
return rc;
/* remove ib devices */
- spin_lock(&smc_ib_devices.lock);
+ mutex_lock(&smc_ib_devices.mutex);
list_for_each_entry(ibdev, &smc_ib_devices.list, list) {
for (ibport = 0; ibport < SMC_MAX_PORTS; ibport++) {
if (ibdev->pnetid_by_user[ibport] &&
@@ -149,9 +150,9 @@ static int smc_pnet_remove_by_pnetid(struct net *net, char *pnet_name)
}
}
}
- spin_unlock(&smc_ib_devices.lock);
+ mutex_unlock(&smc_ib_devices.mutex);
/* remove smcd devices */
- spin_lock(&smcd_dev_list.lock);
+ mutex_lock(&smcd_dev_list.mutex);
list_for_each_entry(smcd_dev, &smcd_dev_list.list, list) {
if (smcd_dev->pnetid_by_user &&
(!pnet_name ||
@@ -165,7 +166,7 @@ static int smc_pnet_remove_by_pnetid(struct net *net, char *pnet_name)
rc = 0;
}
}
- spin_unlock(&smcd_dev_list.lock);
+ mutex_unlock(&smcd_dev_list.mutex);
return rc;
}
@@ -240,14 +241,14 @@ static bool smc_pnet_apply_ib(struct smc_ib_device *ib_dev, u8 ib_port,
u8 pnet_null[SMC_MAX_PNETID_LEN] = {0};
bool applied = false;
- spin_lock(&smc_ib_devices.lock);
+ mutex_lock(&smc_ib_devices.mutex);
if (smc_pnet_match(ib_dev->pnetid[ib_port - 1], pnet_null)) {
memcpy(ib_dev->pnetid[ib_port - 1], pnet_name,
SMC_MAX_PNETID_LEN);
ib_dev->pnetid_by_user[ib_port - 1] = true;
applied = true;
}
- spin_unlock(&smc_ib_devices.lock);
+ mutex_unlock(&smc_ib_devices.mutex);
return applied;
}
@@ -258,13 +259,13 @@ static bool smc_pnet_apply_smcd(struct smcd_dev *smcd_dev, char *pnet_name)
u8 pnet_null[SMC_MAX_PNETID_LEN] = {0};
bool applied = false;
- spin_lock(&smcd_dev_list.lock);
+ mutex_lock(&smcd_dev_list.mutex);
if (smc_pnet_match(smcd_dev->pnetid, pnet_null)) {
memcpy(smcd_dev->pnetid, pnet_name, SMC_MAX_PNETID_LEN);
smcd_dev->pnetid_by_user = true;
applied = true;
}
- spin_unlock(&smcd_dev_list.lock);
+ mutex_unlock(&smcd_dev_list.mutex);
return applied;
}
@@ -300,7 +301,7 @@ static struct smc_ib_device *smc_pnet_find_ib(char *ib_name)
{
struct smc_ib_device *ibdev;
- spin_lock(&smc_ib_devices.lock);
+ mutex_lock(&smc_ib_devices.mutex);
list_for_each_entry(ibdev, &smc_ib_devices.list, list) {
if (!strncmp(ibdev->ibdev->name, ib_name,
sizeof(ibdev->ibdev->name)) ||
@@ -311,7 +312,7 @@ static struct smc_ib_device *smc_pnet_find_ib(char *ib_name)
}
ibdev = NULL;
out:
- spin_unlock(&smc_ib_devices.lock);
+ mutex_unlock(&smc_ib_devices.mutex);
return ibdev;
}
@@ -320,7 +321,7 @@ static struct smcd_dev *smc_pnet_find_smcd(char *smcd_name)
{
struct smcd_dev *smcd_dev;
- spin_lock(&smcd_dev_list.lock);
+ mutex_lock(&smcd_dev_list.mutex);
list_for_each_entry(smcd_dev, &smcd_dev_list.list, list) {
if (!strncmp(dev_name(&smcd_dev->dev), smcd_name,
IB_DEVICE_NAME_MAX - 1))
@@ -328,7 +329,7 @@ static struct smcd_dev *smc_pnet_find_smcd(char *smcd_name)
}
smcd_dev = NULL;
out:
- spin_unlock(&smcd_dev_list.lock);
+ mutex_unlock(&smcd_dev_list.mutex);
return smcd_dev;
}
@@ -825,7 +826,7 @@ static void _smc_pnet_find_roce_by_pnetid(u8 *pnet_id,
int i;
ini->ib_dev = NULL;
- spin_lock(&smc_ib_devices.lock);
+ mutex_lock(&smc_ib_devices.mutex);
list_for_each_entry(ibdev, &smc_ib_devices.list, list) {
if (ibdev == known_dev)
continue;
@@ -844,7 +845,7 @@ static void _smc_pnet_find_roce_by_pnetid(u8 *pnet_id,
}
}
out:
- spin_unlock(&smc_ib_devices.lock);
+ mutex_unlock(&smc_ib_devices.mutex);
}
/* find alternate roce device with same pnet_id and vlan_id */
@@ -863,7 +864,7 @@ static void smc_pnet_find_rdma_dev(struct net_device *netdev,
{
struct smc_ib_device *ibdev;
- spin_lock(&smc_ib_devices.lock);
+ mutex_lock(&smc_ib_devices.mutex);
list_for_each_entry(ibdev, &smc_ib_devices.list, list) {
struct net_device *ndev;
int i;
@@ -888,7 +889,7 @@ static void smc_pnet_find_rdma_dev(struct net_device *netdev,
}
}
}
- spin_unlock(&smc_ib_devices.lock);
+ mutex_unlock(&smc_ib_devices.mutex);
}
/* Determine the corresponding IB device port based on the hardware PNETID.
@@ -924,7 +925,7 @@ static void smc_pnet_find_ism_by_pnetid(struct net_device *ndev,
smc_pnet_find_ndev_pnetid_by_table(ndev, ndev_pnetid))
return; /* pnetid could not be determined */
- spin_lock(&smcd_dev_list.lock);
+ mutex_lock(&smcd_dev_list.mutex);
list_for_each_entry(ismdev, &smcd_dev_list.list, list) {
if (smc_pnet_match(ismdev->pnetid, ndev_pnetid) &&
!ismdev->going_away) {
@@ -932,7 +933,7 @@ static void smc_pnet_find_ism_by_pnetid(struct net_device *ndev,
break;
}
}
- spin_unlock(&smcd_dev_list.lock);
+ mutex_unlock(&smcd_dev_list.mutex);
}
/* PNET table analysis for a given sock:
diff --git a/net/smc/smc_wr.c b/net/smc/smc_wr.c
index 7239ba9b99dc..1e23cdd41eb1 100644
--- a/net/smc/smc_wr.c
+++ b/net/smc/smc_wr.c
@@ -169,6 +169,8 @@ void smc_wr_tx_cq_handler(struct ib_cq *ib_cq, void *cq_context)
static inline int smc_wr_tx_get_free_slot_index(struct smc_link *link, u32 *idx)
{
*idx = link->wr_tx_cnt;
+ if (!smc_link_usable(link))
+ return -ENOLINK;
for_each_clear_bit(*idx, link->wr_tx_mask, link->wr_tx_cnt) {
if (!test_and_set_bit(*idx, link->wr_tx_mask))
return 0;
@@ -560,15 +562,15 @@ void smc_wr_free_link(struct smc_link *lnk)
{
struct ib_device *ibdev;
+ if (!lnk->smcibdev)
+ return;
+ ibdev = lnk->smcibdev->ibdev;
+
if (smc_wr_tx_wait_no_pending_sends(lnk))
memset(lnk->wr_tx_mask, 0,
BITS_TO_LONGS(SMC_WR_BUF_CNT) *
sizeof(*lnk->wr_tx_mask));
- if (!lnk->smcibdev)
- return;
- ibdev = lnk->smcibdev->ibdev;
-
if (lnk->wr_rx_dma_addr) {
ib_dma_unmap_single(ibdev, lnk->wr_rx_dma_addr,
SMC_WR_BUF_SIZE * lnk->wr_rx_cnt,
diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c
index 39e14d5edaf1..e9d0953522f0 100644
--- a/net/sunrpc/rpc_pipe.c
+++ b/net/sunrpc/rpc_pipe.c
@@ -1317,6 +1317,7 @@ rpc_gssd_dummy_populate(struct dentry *root, struct rpc_pipe *pipe_data)
q.len = strlen(gssd_dummy_clnt_dir[0].name);
clnt_dentry = d_hash_and_lookup(gssd_dentry, &q);
if (!clnt_dentry) {
+ __rpc_depopulate(gssd_dentry, gssd_dummy_clnt_dir, 0, 1);
pipe_dentry = ERR_PTR(-ENOENT);
goto out;
}
diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
index 5c4ec9386f81..c537272f9c7e 100644
--- a/net/sunrpc/svcsock.c
+++ b/net/sunrpc/svcsock.c
@@ -44,6 +44,7 @@
#include <net/tcp.h>
#include <net/tcp_states.h>
#include <linux/uaccess.h>
+#include <linux/highmem.h>
#include <asm/ioctls.h>
#include <linux/sunrpc/types.h>
diff --git a/net/sunrpc/xdr.c b/net/sunrpc/xdr.c
index 6f7d82fb1eb0..be11d672b5b9 100644
--- a/net/sunrpc/xdr.c
+++ b/net/sunrpc/xdr.c
@@ -1118,6 +1118,7 @@ xdr_buf_subsegment(struct xdr_buf *buf, struct xdr_buf *subbuf,
base = 0;
} else {
base -= buf->head[0].iov_len;
+ subbuf->head[0].iov_base = buf->head[0].iov_base;
subbuf->head[0].iov_len = 0;
}
@@ -1130,6 +1131,8 @@ xdr_buf_subsegment(struct xdr_buf *buf, struct xdr_buf *subbuf,
base = 0;
} else {
base -= buf->page_len;
+ subbuf->pages = buf->pages;
+ subbuf->page_base = 0;
subbuf->page_len = 0;
}
@@ -1141,6 +1144,7 @@ xdr_buf_subsegment(struct xdr_buf *buf, struct xdr_buf *subbuf,
base = 0;
} else {
base -= buf->tail[0].iov_len;
+ subbuf->tail[0].iov_base = buf->tail[0].iov_base;
subbuf->tail[0].iov_len = 0;
}
diff --git a/net/sunrpc/xprtrdma/frwr_ops.c b/net/sunrpc/xprtrdma/frwr_ops.c
index ef997880e17a..b647562a26dd 100644
--- a/net/sunrpc/xprtrdma/frwr_ops.c
+++ b/net/sunrpc/xprtrdma/frwr_ops.c
@@ -367,7 +367,7 @@ static void frwr_wc_fastreg(struct ib_cq *cq, struct ib_wc *wc)
trace_xprtrdma_wc_fastreg(wc, frwr);
/* The MR will get recycled when the associated req is retransmitted */
- rpcrdma_flush_disconnect(cq, wc);
+ rpcrdma_flush_disconnect(cq->cq_context, wc);
}
/**
@@ -452,7 +452,7 @@ static void frwr_wc_localinv(struct ib_cq *cq, struct ib_wc *wc)
trace_xprtrdma_wc_li(wc, frwr);
__frwr_release_mr(wc, mr);
- rpcrdma_flush_disconnect(cq, wc);
+ rpcrdma_flush_disconnect(cq->cq_context, wc);
}
/**
@@ -474,7 +474,7 @@ static void frwr_wc_localinv_wake(struct ib_cq *cq, struct ib_wc *wc)
__frwr_release_mr(wc, mr);
complete(&frwr->fr_linv_done);
- rpcrdma_flush_disconnect(cq, wc);
+ rpcrdma_flush_disconnect(cq->cq_context, wc);
}
/**
@@ -582,7 +582,7 @@ static void frwr_wc_localinv_done(struct ib_cq *cq, struct ib_wc *wc)
smp_rmb();
rpcrdma_complete_rqst(rep);
- rpcrdma_flush_disconnect(cq, wc);
+ rpcrdma_flush_disconnect(cq->cq_context, wc);
}
/**
diff --git a/net/sunrpc/xprtrdma/rpc_rdma.c b/net/sunrpc/xprtrdma/rpc_rdma.c
index 2081c8fbfa48..453bacc99907 100644
--- a/net/sunrpc/xprtrdma/rpc_rdma.c
+++ b/net/sunrpc/xprtrdma/rpc_rdma.c
@@ -71,7 +71,7 @@ static unsigned int rpcrdma_max_call_header_size(unsigned int maxsegs)
size = RPCRDMA_HDRLEN_MIN;
/* Maximum Read list size */
- size = maxsegs * rpcrdma_readchunk_maxsz * sizeof(__be32);
+ size += maxsegs * rpcrdma_readchunk_maxsz * sizeof(__be32);
/* Minimal Read chunk size */
size += sizeof(__be32); /* segment count */
@@ -94,7 +94,7 @@ static unsigned int rpcrdma_max_reply_header_size(unsigned int maxsegs)
size = RPCRDMA_HDRLEN_MIN;
/* Maximum Write list size */
- size = sizeof(__be32); /* segment count */
+ size += sizeof(__be32); /* segment count */
size += maxsegs * rpcrdma_segment_maxsz * sizeof(__be32);
size += sizeof(__be32); /* list discriminator */
@@ -1349,8 +1349,7 @@ rpcrdma_decode_error(struct rpcrdma_xprt *r_xprt, struct rpcrdma_rep *rep,
be32_to_cpup(p), be32_to_cpu(rep->rr_xid));
}
- r_xprt->rx_stats.bad_reply_count++;
- return -EREMOTEIO;
+ return -EIO;
}
/* Perform XID lookup, reconstruction of the RPC reply, and
@@ -1387,13 +1386,11 @@ out:
spin_unlock(&xprt->queue_lock);
return;
-/* If the incoming reply terminated a pending RPC, the next
- * RPC call will post a replacement receive buffer as it is
- * being marshaled.
- */
out_badheader:
trace_xprtrdma_reply_hdr(rep);
r_xprt->rx_stats.bad_reply_count++;
+ rqst->rq_task->tk_status = status;
+ status = 0;
goto out;
}
diff --git a/net/sunrpc/xprtrdma/transport.c b/net/sunrpc/xprtrdma/transport.c
index 0c4af7f5e241..053c8ab1265a 100644
--- a/net/sunrpc/xprtrdma/transport.c
+++ b/net/sunrpc/xprtrdma/transport.c
@@ -242,13 +242,18 @@ xprt_rdma_connect_worker(struct work_struct *work)
rc = rpcrdma_xprt_connect(r_xprt);
xprt_clear_connecting(xprt);
- if (r_xprt->rx_ep && r_xprt->rx_ep->re_connect_status > 0) {
+ if (!rc) {
xprt->connect_cookie++;
xprt->stat.connect_count++;
xprt->stat.connect_time += (long)jiffies -
xprt->stat.connect_start;
xprt_set_connected(xprt);
rc = -EAGAIN;
+ } else {
+ /* Force a call to xprt_rdma_close to clean up */
+ spin_lock(&xprt->transport_lock);
+ set_bit(XPRT_CLOSE_WAIT, &xprt->state);
+ spin_unlock(&xprt->transport_lock);
}
xprt_wake_pending_tasks(xprt, rc);
}
diff --git a/net/sunrpc/xprtrdma/verbs.c b/net/sunrpc/xprtrdma/verbs.c
index 2ae348377806..75c646743df3 100644
--- a/net/sunrpc/xprtrdma/verbs.c
+++ b/net/sunrpc/xprtrdma/verbs.c
@@ -84,7 +84,8 @@ static void rpcrdma_rep_destroy(struct rpcrdma_rep *rep);
static void rpcrdma_reps_unmap(struct rpcrdma_xprt *r_xprt);
static void rpcrdma_mrs_create(struct rpcrdma_xprt *r_xprt);
static void rpcrdma_mrs_destroy(struct rpcrdma_xprt *r_xprt);
-static int rpcrdma_ep_destroy(struct rpcrdma_ep *ep);
+static void rpcrdma_ep_get(struct rpcrdma_ep *ep);
+static int rpcrdma_ep_put(struct rpcrdma_ep *ep);
static struct rpcrdma_regbuf *
rpcrdma_regbuf_alloc(size_t size, enum dma_data_direction direction,
gfp_t flags);
@@ -97,7 +98,8 @@ static void rpcrdma_regbuf_free(struct rpcrdma_regbuf *rb);
*/
static void rpcrdma_xprt_drain(struct rpcrdma_xprt *r_xprt)
{
- struct rdma_cm_id *id = r_xprt->rx_ep->re_id;
+ struct rpcrdma_ep *ep = r_xprt->rx_ep;
+ struct rdma_cm_id *id = ep->re_id;
/* Flush Receives, then wait for deferred Reply work
* to complete.
@@ -108,6 +110,8 @@ static void rpcrdma_xprt_drain(struct rpcrdma_xprt *r_xprt)
* local invalidations.
*/
ib_drain_sq(id->qp);
+
+ rpcrdma_ep_put(ep);
}
/**
@@ -126,23 +130,27 @@ static void rpcrdma_qp_event_handler(struct ib_event *event, void *context)
trace_xprtrdma_qp_event(ep, event);
}
+/* Ensure xprt_force_disconnect() is invoked exactly once when a
+ * connection is closed or lost. (The important thing is it needs
+ * to be invoked "at least" once).
+ */
+static void rpcrdma_force_disconnect(struct rpcrdma_ep *ep)
+{
+ if (atomic_add_unless(&ep->re_force_disconnect, 1, 1))
+ xprt_force_disconnect(ep->re_xprt);
+}
+
/**
* rpcrdma_flush_disconnect - Disconnect on flushed completion
- * @cq: completion queue
+ * @r_xprt: transport to disconnect
* @wc: work completion entry
*
* Must be called in process context.
*/
-void rpcrdma_flush_disconnect(struct ib_cq *cq, struct ib_wc *wc)
+void rpcrdma_flush_disconnect(struct rpcrdma_xprt *r_xprt, struct ib_wc *wc)
{
- struct rpcrdma_xprt *r_xprt = cq->cq_context;
- struct rpc_xprt *xprt = &r_xprt->rx_xprt;
-
- if (wc->status != IB_WC_SUCCESS &&
- r_xprt->rx_ep->re_connect_status == 1) {
- r_xprt->rx_ep->re_connect_status = -ECONNABORTED;
- xprt_force_disconnect(xprt);
- }
+ if (wc->status != IB_WC_SUCCESS)
+ rpcrdma_force_disconnect(r_xprt->rx_ep);
}
/**
@@ -156,11 +164,12 @@ static void rpcrdma_wc_send(struct ib_cq *cq, struct ib_wc *wc)
struct ib_cqe *cqe = wc->wr_cqe;
struct rpcrdma_sendctx *sc =
container_of(cqe, struct rpcrdma_sendctx, sc_cqe);
+ struct rpcrdma_xprt *r_xprt = cq->cq_context;
/* WARNING: Only wr_cqe and status are reliable at this point */
trace_xprtrdma_wc_send(sc, wc);
- rpcrdma_sendctx_put_locked((struct rpcrdma_xprt *)cq->cq_context, sc);
- rpcrdma_flush_disconnect(cq, wc);
+ rpcrdma_sendctx_put_locked(r_xprt, sc);
+ rpcrdma_flush_disconnect(r_xprt, wc);
}
/**
@@ -195,7 +204,7 @@ static void rpcrdma_wc_receive(struct ib_cq *cq, struct ib_wc *wc)
return;
out_flushed:
- rpcrdma_flush_disconnect(cq, wc);
+ rpcrdma_flush_disconnect(r_xprt, wc);
rpcrdma_rep_destroy(rep);
}
@@ -239,7 +248,6 @@ rpcrdma_cm_event_handler(struct rdma_cm_id *id, struct rdma_cm_event *event)
{
struct sockaddr *sap = (struct sockaddr *)&id->route.addr.dst_addr;
struct rpcrdma_ep *ep = id->context;
- struct rpc_xprt *xprt = ep->re_xprt;
might_sleep();
@@ -263,10 +271,9 @@ rpcrdma_cm_event_handler(struct rdma_cm_id *id, struct rdma_cm_event *event)
/* fall through */
case RDMA_CM_EVENT_ADDR_CHANGE:
ep->re_connect_status = -ENODEV;
- xprt_force_disconnect(xprt);
goto disconnected;
case RDMA_CM_EVENT_ESTABLISHED:
- kref_get(&ep->re_kref);
+ rpcrdma_ep_get(ep);
ep->re_connect_status = 1;
rpcrdma_update_cm_private(ep, &event->param.conn);
trace_xprtrdma_inline_thresh(ep);
@@ -274,22 +281,24 @@ rpcrdma_cm_event_handler(struct rdma_cm_id *id, struct rdma_cm_event *event)
break;
case RDMA_CM_EVENT_CONNECT_ERROR:
ep->re_connect_status = -ENOTCONN;
- goto disconnected;
+ goto wake_connect_worker;
case RDMA_CM_EVENT_UNREACHABLE:
ep->re_connect_status = -ENETUNREACH;
- goto disconnected;
+ goto wake_connect_worker;
case RDMA_CM_EVENT_REJECTED:
dprintk("rpcrdma: connection to %pISpc rejected: %s\n",
sap, rdma_reject_msg(id, event->status));
ep->re_connect_status = -ECONNREFUSED;
if (event->status == IB_CM_REJ_STALE_CONN)
- ep->re_connect_status = -EAGAIN;
- goto disconnected;
+ ep->re_connect_status = -ENOTCONN;
+wake_connect_worker:
+ wake_up_all(&ep->re_connect_wait);
+ return 0;
case RDMA_CM_EVENT_DISCONNECTED:
ep->re_connect_status = -ECONNABORTED;
disconnected:
- xprt_force_disconnect(xprt);
- return rpcrdma_ep_destroy(ep);
+ rpcrdma_force_disconnect(ep);
+ return rpcrdma_ep_put(ep);
default:
break;
}
@@ -345,7 +354,7 @@ out:
return ERR_PTR(rc);
}
-static void rpcrdma_ep_put(struct kref *kref)
+static void rpcrdma_ep_destroy(struct kref *kref)
{
struct rpcrdma_ep *ep = container_of(kref, struct rpcrdma_ep, re_kref);
@@ -369,13 +378,18 @@ static void rpcrdma_ep_put(struct kref *kref)
module_put(THIS_MODULE);
}
+static noinline void rpcrdma_ep_get(struct rpcrdma_ep *ep)
+{
+ kref_get(&ep->re_kref);
+}
+
/* Returns:
* %0 if @ep still has a positive kref count, or
* %1 if @ep was destroyed successfully.
*/
-static int rpcrdma_ep_destroy(struct rpcrdma_ep *ep)
+static noinline int rpcrdma_ep_put(struct rpcrdma_ep *ep)
{
- return kref_put(&ep->re_kref, rpcrdma_ep_put);
+ return kref_put(&ep->re_kref, rpcrdma_ep_destroy);
}
static int rpcrdma_ep_create(struct rpcrdma_xprt *r_xprt)
@@ -388,14 +402,14 @@ static int rpcrdma_ep_create(struct rpcrdma_xprt *r_xprt)
ep = kzalloc(sizeof(*ep), GFP_NOFS);
if (!ep)
- return -EAGAIN;
+ return -ENOTCONN;
ep->re_xprt = &r_xprt->rx_xprt;
kref_init(&ep->re_kref);
id = rpcrdma_create_id(r_xprt, ep);
if (IS_ERR(id)) {
- rc = PTR_ERR(id);
- goto out_free;
+ kfree(ep);
+ return PTR_ERR(id);
}
__module_get(THIS_MODULE);
device = id->device;
@@ -492,11 +506,8 @@ static int rpcrdma_ep_create(struct rpcrdma_xprt *r_xprt)
return 0;
out_destroy:
- rpcrdma_ep_destroy(ep);
+ rpcrdma_ep_put(ep);
rdma_destroy_id(id);
-out_free:
- kfree(ep);
- r_xprt->rx_ep = NULL;
return rc;
}
@@ -512,22 +523,19 @@ int rpcrdma_xprt_connect(struct rpcrdma_xprt *r_xprt)
struct rpcrdma_ep *ep;
int rc;
-retry:
- rpcrdma_xprt_disconnect(r_xprt);
rc = rpcrdma_ep_create(r_xprt);
if (rc)
return rc;
ep = r_xprt->rx_ep;
- ep->re_connect_status = 0;
xprt_clear_connected(xprt);
-
rpcrdma_reset_cwnd(r_xprt);
- rpcrdma_post_recvs(r_xprt, true);
- rc = rpcrdma_sendctxs_create(r_xprt);
- if (rc)
- goto out;
+ /* Bump the ep's reference count while there are
+ * outstanding Receives.
+ */
+ rpcrdma_ep_get(ep);
+ rpcrdma_post_recvs(r_xprt, true);
rc = rdma_connect(ep->re_id, &ep->re_remote_cma);
if (rc)
@@ -538,22 +546,24 @@ retry:
wait_event_interruptible(ep->re_connect_wait,
ep->re_connect_status != 0);
if (ep->re_connect_status <= 0) {
- if (ep->re_connect_status == -EAGAIN)
- goto retry;
rc = ep->re_connect_status;
goto out;
}
+ rc = rpcrdma_sendctxs_create(r_xprt);
+ if (rc) {
+ rc = -ENOTCONN;
+ goto out;
+ }
+
rc = rpcrdma_reqs_setup(r_xprt);
if (rc) {
- rpcrdma_xprt_disconnect(r_xprt);
+ rc = -ENOTCONN;
goto out;
}
rpcrdma_mrs_create(r_xprt);
out:
- if (rc)
- ep->re_connect_status = rc;
trace_xprtrdma_connect(r_xprt, rc);
return rc;
}
@@ -587,7 +597,7 @@ void rpcrdma_xprt_disconnect(struct rpcrdma_xprt *r_xprt)
rpcrdma_mrs_destroy(r_xprt);
rpcrdma_sendctxs_destroy(r_xprt);
- if (rpcrdma_ep_destroy(ep))
+ if (rpcrdma_ep_put(ep))
rdma_destroy_id(id);
r_xprt->rx_ep = NULL;
diff --git a/net/sunrpc/xprtrdma/xprt_rdma.h b/net/sunrpc/xprtrdma/xprt_rdma.h
index 0a16fdb09b2c..43974ef39a50 100644
--- a/net/sunrpc/xprtrdma/xprt_rdma.h
+++ b/net/sunrpc/xprtrdma/xprt_rdma.h
@@ -82,6 +82,7 @@ struct rpcrdma_ep {
unsigned int re_max_inline_recv;
int re_async_rc;
int re_connect_status;
+ atomic_t re_force_disconnect;
struct ib_qp_init_attr re_attr;
wait_queue_head_t re_connect_wait;
struct rpc_xprt *re_xprt;
@@ -446,7 +447,7 @@ extern unsigned int xprt_rdma_memreg_strategy;
/*
* Endpoint calls - xprtrdma/verbs.c
*/
-void rpcrdma_flush_disconnect(struct ib_cq *cq, struct ib_wc *wc);
+void rpcrdma_flush_disconnect(struct rpcrdma_xprt *r_xprt, struct ib_wc *wc);
int rpcrdma_xprt_connect(struct rpcrdma_xprt *r_xprt);
void rpcrdma_xprt_disconnect(struct rpcrdma_xprt *r_xprt);
diff --git a/net/tipc/link.c b/net/tipc/link.c
index ee3b8d0576b8..d40f8e5b7683 100644
--- a/net/tipc/link.c
+++ b/net/tipc/link.c
@@ -827,11 +827,11 @@ int tipc_link_timeout(struct tipc_link *l, struct sk_buff_head *xmitq)
state |= l->bc_rcvlink->rcv_unacked;
state |= l->rcv_unacked;
state |= !skb_queue_empty(&l->transmq);
- state |= !skb_queue_empty(&l->deferdq);
probe = mstate->probing;
probe |= l->silent_intv_cnt;
if (probe || mstate->monitoring)
l->silent_intv_cnt++;
+ probe |= !skb_queue_empty(&l->deferdq);
if (l->snd_nxt == l->checkpoint) {
tipc_link_update_cwin(l, 0, 0);
probe = true;
@@ -921,6 +921,21 @@ static void link_prepare_wakeup(struct tipc_link *l)
}
+/**
+ * tipc_link_set_skb_retransmit_time - set the time at which retransmission of
+ * the given skb should be next attempted
+ * @skb: skb to set a future retransmission time for
+ * @l: link the skb will be transmitted on
+ */
+static void tipc_link_set_skb_retransmit_time(struct sk_buff *skb,
+ struct tipc_link *l)
+{
+ if (link_is_bc_sndlink(l))
+ TIPC_SKB_CB(skb)->nxt_retr = TIPC_BC_RETR_LIM;
+ else
+ TIPC_SKB_CB(skb)->nxt_retr = TIPC_UC_RETR_TIME;
+}
+
void tipc_link_reset(struct tipc_link *l)
{
struct sk_buff_head list;
@@ -1036,9 +1051,7 @@ int tipc_link_xmit(struct tipc_link *l, struct sk_buff_head *list,
return -ENOBUFS;
}
__skb_queue_tail(transmq, skb);
- /* next retransmit attempt */
- if (link_is_bc_sndlink(l))
- TIPC_SKB_CB(skb)->nxt_retr = TIPC_BC_RETR_LIM;
+ tipc_link_set_skb_retransmit_time(skb, l);
__skb_queue_tail(xmitq, _skb);
TIPC_SKB_CB(skb)->ackers = l->ackers;
l->rcv_unacked = 0;
@@ -1139,9 +1152,7 @@ static void tipc_link_advance_backlog(struct tipc_link *l,
if (unlikely(skb == l->backlog[imp].target_bskb))
l->backlog[imp].target_bskb = NULL;
__skb_queue_tail(&l->transmq, skb);
- /* next retransmit attempt */
- if (link_is_bc_sndlink(l))
- TIPC_SKB_CB(skb)->nxt_retr = TIPC_BC_RETR_LIM;
+ tipc_link_set_skb_retransmit_time(skb, l);
__skb_queue_tail(xmitq, _skb);
TIPC_SKB_CB(skb)->ackers = l->ackers;
@@ -1584,8 +1595,7 @@ release:
/* retransmit skb if unrestricted*/
if (time_before(jiffies, TIPC_SKB_CB(skb)->nxt_retr))
continue;
- TIPC_SKB_CB(skb)->nxt_retr = (is_uc) ?
- TIPC_UC_RETR_TIME : TIPC_BC_RETR_LIM;
+ tipc_link_set_skb_retransmit_time(skb, l);
_skb = pskb_copy(skb, GFP_ATOMIC);
if (!_skb)
continue;
diff --git a/net/vmw_vsock/virtio_transport.c b/net/vmw_vsock/virtio_transport.c
index dfbaf6bd8b1c..2700a63ab095 100644
--- a/net/vmw_vsock/virtio_transport.c
+++ b/net/vmw_vsock/virtio_transport.c
@@ -22,7 +22,7 @@
#include <net/af_vsock.h>
static struct workqueue_struct *virtio_vsock_workqueue;
-static struct virtio_vsock *the_virtio_vsock;
+static struct virtio_vsock __rcu *the_virtio_vsock;
static DEFINE_MUTEX(the_virtio_vsock_mutex); /* protects the_virtio_vsock */
struct virtio_vsock {
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index 263ae395ad44..0e07fb8585fb 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -5016,7 +5016,8 @@ static int nl80211_start_ap(struct sk_buff *skb, struct genl_info *info)
err = nl80211_parse_he_obss_pd(
info->attrs[NL80211_ATTR_HE_OBSS_PD],
&params.he_obss_pd);
- goto out;
+ if (err)
+ goto out;
}
if (info->attrs[NL80211_ATTR_HE_BSS_COLOR]) {
@@ -5024,7 +5025,7 @@ static int nl80211_start_ap(struct sk_buff *skb, struct genl_info *info)
info->attrs[NL80211_ATTR_HE_BSS_COLOR],
&params.he_bss_color);
if (err)
- return err;
+ goto out;
}
nl80211_calculate_ap_params(&params);
diff --git a/net/xdp/xsk_buff_pool.c b/net/xdp/xsk_buff_pool.c
index 540ed75e4482..08b80669f649 100644
--- a/net/xdp/xsk_buff_pool.c
+++ b/net/xdp/xsk_buff_pool.c
@@ -2,9 +2,6 @@
#include <net/xsk_buff_pool.h>
#include <net/xdp_sock.h>
-#include <linux/dma-direct.h>
-#include <linux/dma-noncoherent.h>
-#include <linux/swiotlb.h>
#include "xsk_queue.h"
@@ -55,7 +52,6 @@ struct xsk_buff_pool *xp_create(struct page **pages, u32 nr_pages, u32 chunks,
pool->free_heads_cnt = chunks;
pool->headroom = headroom;
pool->chunk_size = chunk_size;
- pool->cheap_dma = true;
pool->unaligned = unaligned;
pool->frame_len = chunk_size - headroom - XDP_PACKET_HEADROOM;
INIT_LIST_HEAD(&pool->free_list);
@@ -125,48 +121,6 @@ static void xp_check_dma_contiguity(struct xsk_buff_pool *pool)
}
}
-static bool __maybe_unused xp_check_swiotlb_dma(struct xsk_buff_pool *pool)
-{
-#if defined(CONFIG_SWIOTLB)
- phys_addr_t paddr;
- u32 i;
-
- for (i = 0; i < pool->dma_pages_cnt; i++) {
- paddr = dma_to_phys(pool->dev, pool->dma_pages[i]);
- if (is_swiotlb_buffer(paddr))
- return false;
- }
-#endif
- return true;
-}
-
-static bool xp_check_cheap_dma(struct xsk_buff_pool *pool)
-{
-#if defined(CONFIG_HAS_DMA)
- const struct dma_map_ops *ops = get_dma_ops(pool->dev);
-
- if (ops) {
- return !ops->sync_single_for_cpu &&
- !ops->sync_single_for_device;
- }
-
- if (!dma_is_direct(ops))
- return false;
-
- if (!xp_check_swiotlb_dma(pool))
- return false;
-
- if (!dev_is_dma_coherent(pool->dev)) {
-#if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) || \
- defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL) || \
- defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE)
- return false;
-#endif
- }
-#endif
- return true;
-}
-
int xp_dma_map(struct xsk_buff_pool *pool, struct device *dev,
unsigned long attrs, struct page **pages, u32 nr_pages)
{
@@ -180,6 +134,7 @@ int xp_dma_map(struct xsk_buff_pool *pool, struct device *dev,
pool->dev = dev;
pool->dma_pages_cnt = nr_pages;
+ pool->dma_need_sync = false;
for (i = 0; i < pool->dma_pages_cnt; i++) {
dma = dma_map_page_attrs(dev, pages[i], 0, PAGE_SIZE,
@@ -188,14 +143,13 @@ int xp_dma_map(struct xsk_buff_pool *pool, struct device *dev,
xp_dma_unmap(pool, attrs);
return -ENOMEM;
}
+ if (dma_need_sync(dev, dma))
+ pool->dma_need_sync = true;
pool->dma_pages[i] = dma;
}
if (pool->unaligned)
xp_check_dma_contiguity(pool);
-
- pool->dev = dev;
- pool->cheap_dma = xp_check_cheap_dma(pool);
return 0;
}
EXPORT_SYMBOL(xp_dma_map);
@@ -280,7 +234,7 @@ struct xdp_buff *xp_alloc(struct xsk_buff_pool *pool)
xskb->xdp.data = xskb->xdp.data_hard_start + XDP_PACKET_HEADROOM;
xskb->xdp.data_meta = xskb->xdp.data;
- if (!pool->cheap_dma) {
+ if (pool->dma_need_sync) {
dma_sync_single_range_for_device(pool->dev, xskb->dma, 0,
pool->frame_len,
DMA_BIDIRECTIONAL);
diff --git a/net/xfrm/Kconfig b/net/xfrm/Kconfig
index b5d4a1ef04b9..5b9a5ab48111 100644
--- a/net/xfrm/Kconfig
+++ b/net/xfrm/Kconfig
@@ -67,6 +67,30 @@ config XFRM_STATISTICS
If unsure, say N.
+# This option selects XFRM_ALGO along with the AH authentication algorithms that
+# RFC 8221 lists as MUST be implemented.
+config XFRM_AH
+ tristate
+ select XFRM_ALGO
+ select CRYPTO
+ select CRYPTO_HMAC
+ select CRYPTO_SHA256
+
+# This option selects XFRM_ALGO along with the ESP encryption and authentication
+# algorithms that RFC 8221 lists as MUST be implemented.
+config XFRM_ESP
+ tristate
+ select XFRM_ALGO
+ select CRYPTO
+ select CRYPTO_AES
+ select CRYPTO_AUTHENC
+ select CRYPTO_CBC
+ select CRYPTO_ECHAINIV
+ select CRYPTO_GCM
+ select CRYPTO_HMAC
+ select CRYPTO_SEQIV
+ select CRYPTO_SHA256
+
config XFRM_IPCOMP
tristate
select XFRM_ALGO
diff --git a/net/xfrm/xfrm_device.c b/net/xfrm/xfrm_device.c
index f50d1f97cf8e..626096bd0d29 100644
--- a/net/xfrm/xfrm_device.c
+++ b/net/xfrm/xfrm_device.c
@@ -108,7 +108,7 @@ struct sk_buff *validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t featur
struct xfrm_offload *xo = xfrm_offload(skb);
struct sec_path *sp;
- if (!xo)
+ if (!xo || (xo->flags & XFRM_XMIT))
return skb;
if (!(features & NETIF_F_HW_ESP))
@@ -129,6 +129,8 @@ struct sk_buff *validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t featur
return skb;
}
+ xo->flags |= XFRM_XMIT;
+
if (skb_is_gso(skb)) {
struct net_device *dev = skb->dev;
diff --git a/net/xfrm/xfrm_interface.c b/net/xfrm/xfrm_interface.c
index c407ecbc5d46..b615729812e5 100644
--- a/net/xfrm/xfrm_interface.c
+++ b/net/xfrm/xfrm_interface.c
@@ -37,6 +37,7 @@
#include <net/ip.h>
#include <net/ipv6.h>
#include <net/ip6_route.h>
+#include <net/ip_tunnels.h>
#include <net/addrconf.h>
#include <net/xfrm.h>
#include <net/net_namespace.h>
@@ -581,6 +582,7 @@ static const struct net_device_ops xfrmi_netdev_ops = {
static void xfrmi_dev_setup(struct net_device *dev)
{
dev->netdev_ops = &xfrmi_netdev_ops;
+ dev->header_ops = &ip_tunnel_header_ops;
dev->type = ARPHRD_NONE;
dev->mtu = ETH_DATA_LEN;
dev->min_mtu = ETH_MIN_MTU;
diff --git a/net/xfrm/xfrm_output.c b/net/xfrm/xfrm_output.c
index e4c23f69f69f..a7ab19353313 100644
--- a/net/xfrm/xfrm_output.c
+++ b/net/xfrm/xfrm_output.c
@@ -574,16 +574,12 @@ int xfrm_output(struct sock *sk, struct sk_buff *skb)
switch (x->outer_mode.family) {
case AF_INET:
memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
-#ifdef CONFIG_NETFILTER
IPCB(skb)->flags |= IPSKB_XFRM_TRANSFORMED;
-#endif
break;
case AF_INET6:
memset(IP6CB(skb), 0, sizeof(*IP6CB(skb)));
-#ifdef CONFIG_NETFILTER
IP6CB(skb)->flags |= IP6SKB_XFRM_TRANSFORMED;
-#endif
break;
}