summaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2020-07-25 17:49:04 -0700
committerDavid S. Miller <davem@davemloft.net>2020-07-25 17:49:04 -0700
commita57066b1a01977a646145f4ce8dfb4538b08368a (patch)
tree57c2b4fa2fc48e687a1820b9bf4ef4f4363be0f9 /net
parentdfecd3e00cd32b2a6d1cfdb30b513dd42575ada3 (diff)
parent04300d66f0a06d572d9f2ad6768c38cabde22179 (diff)
downloadlinux-a57066b1a01977a646145f4ce8dfb4538b08368a.tar.bz2
Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net
The UDP reuseport conflict was a little bit tricky. The net-next code, via bpf-next, extracted the reuseport handling into a helper so that the BPF sk lookup code could invoke it. At the same time, the logic for reuseport handling of unconnected sockets changed via commit efc6b6f6c3113e8b203b9debfb72d81e0f3dcace which changed the logic to carry on the reuseport result into the rest of the lookup loop if we do not return immediately. This requires moving the reuseport_has_conns() logic into the callers. While we are here, get rid of inline directives as they do not belong in foo.c files. The other changes were cases of more straightforward overlapping modifications. Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net')
-rw-r--r--net/ax25/af_ax25.c10
-rw-r--r--net/core/dev.c2
-rw-r--r--net/core/flow_offload.c1
-rw-r--r--net/core/net-sysfs.c2
-rw-r--r--net/core/rtnetlink.c3
-rw-r--r--net/core/sock_reuseport.c1
-rw-r--r--net/hsr/hsr_forward.c18
-rw-r--r--net/hsr/hsr_framereg.c3
-rw-r--r--net/ipv4/tcp_input.c11
-rw-r--r--net/ipv4/tcp_output.c13
-rw-r--r--net/ipv4/udp.c30
-rw-r--r--net/ipv6/ip6_gre.c11
-rw-r--r--net/ipv6/udp.c21
-rw-r--r--net/netfilter/ipvs/ip_vs_sync.c12
-rw-r--r--net/netfilter/nf_tables_api.c41
-rw-r--r--net/nfc/nci/core.c5
-rw-r--r--net/qrtr/qrtr.c1
-rw-r--r--net/rxrpc/recvmsg.c2
-rw-r--r--net/rxrpc/sendmsg.c2
-rw-r--r--net/sched/act_ct.c16
-rw-r--r--net/sched/cls_api.c1
-rw-r--r--net/sctp/stream.c27
-rw-r--r--net/smc/af_smc.c12
-rw-r--r--net/smc/smc_cdc.c6
-rw-r--r--net/smc/smc_core.c109
-rw-r--r--net/smc/smc_core.h5
-rw-r--r--net/smc/smc_ib.c16
-rw-r--r--net/smc/smc_ib.h1
-rw-r--r--net/smc/smc_llc.c127
-rw-r--r--net/smc/smc_llc.h2
-rw-r--r--net/sunrpc/xprtrdma/rpc_rdma.c4
-rw-r--r--net/sunrpc/xprtrdma/transport.c5
-rw-r--r--net/sunrpc/xprtrdma/verbs.c35
-rw-r--r--net/tipc/link.c2
-rw-r--r--net/vmw_vsock/virtio_transport.c2
35 files changed, 302 insertions, 257 deletions
diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c
index 17bf31a89692..269ee89d2c2b 100644
--- a/net/ax25/af_ax25.c
+++ b/net/ax25/af_ax25.c
@@ -1187,7 +1187,10 @@ static int __must_check ax25_connect(struct socket *sock,
if (addr_len > sizeof(struct sockaddr_ax25) &&
fsa->fsa_ax25.sax25_ndigis != 0) {
/* Valid number of digipeaters ? */
- if (fsa->fsa_ax25.sax25_ndigis < 1 || fsa->fsa_ax25.sax25_ndigis > AX25_MAX_DIGIS) {
+ if (fsa->fsa_ax25.sax25_ndigis < 1 ||
+ fsa->fsa_ax25.sax25_ndigis > AX25_MAX_DIGIS ||
+ addr_len < sizeof(struct sockaddr_ax25) +
+ sizeof(ax25_address) * fsa->fsa_ax25.sax25_ndigis) {
err = -EINVAL;
goto out_release;
}
@@ -1507,7 +1510,10 @@ static int ax25_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
struct full_sockaddr_ax25 *fsa = (struct full_sockaddr_ax25 *)usax;
/* Valid number of digipeaters ? */
- if (usax->sax25_ndigis < 1 || usax->sax25_ndigis > AX25_MAX_DIGIS) {
+ if (usax->sax25_ndigis < 1 ||
+ usax->sax25_ndigis > AX25_MAX_DIGIS ||
+ addr_len < sizeof(struct sockaddr_ax25) +
+ sizeof(ax25_address) * usax->sax25_ndigis) {
err = -EINVAL;
goto out;
}
diff --git a/net/core/dev.c b/net/core/dev.c
index a986b07ea845..fe2e387eed29 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -5609,7 +5609,7 @@ static void flush_backlog(struct work_struct *work)
skb_queue_walk_safe(&sd->input_pkt_queue, skb, tmp) {
if (skb->dev->reg_state == NETREG_UNREGISTERING) {
__skb_unlink(skb, &sd->input_pkt_queue);
- kfree_skb(skb);
+ dev_kfree_skb_irq(skb);
input_queue_head_incr(sd);
}
}
diff --git a/net/core/flow_offload.c b/net/core/flow_offload.c
index b8cf6ff5f961..d4474c812b64 100644
--- a/net/core/flow_offload.c
+++ b/net/core/flow_offload.c
@@ -4,6 +4,7 @@
#include <net/flow_offload.h>
#include <linux/rtnetlink.h>
#include <linux/mutex.h>
+#include <linux/rhashtable.h>
struct flow_rule *flow_rule_alloc(unsigned int num_actions)
{
diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
index e353b822bb15..7bd6440c63bf 100644
--- a/net/core/net-sysfs.c
+++ b/net/core/net-sysfs.c
@@ -1108,7 +1108,7 @@ static ssize_t tx_timeout_show(struct netdev_queue *queue, char *buf)
trans_timeout = queue->trans_timeout;
spin_unlock_irq(&queue->_xmit_lock);
- return sprintf(buf, "%lu", trans_timeout);
+ return sprintf(buf, fmt_ulong, trans_timeout);
}
static unsigned int get_netdev_queue_index(struct netdev_queue *queue)
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index 9aedc15736ad..85a4b0101f76 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -3343,7 +3343,8 @@ replay:
*/
if (err < 0) {
/* If device is not registered at all, free it now */
- if (dev->reg_state == NETREG_UNINITIALIZED)
+ if (dev->reg_state == NETREG_UNINITIALIZED ||
+ dev->reg_state == NETREG_UNREGISTERED)
free_netdev(dev);
goto out;
}
diff --git a/net/core/sock_reuseport.c b/net/core/sock_reuseport.c
index adcb3aea576d..bbdd3c7b6cb5 100644
--- a/net/core/sock_reuseport.c
+++ b/net/core/sock_reuseport.c
@@ -101,6 +101,7 @@ static struct sock_reuseport *reuseport_grow(struct sock_reuseport *reuse)
more_reuse->prog = reuse->prog;
more_reuse->reuseport_id = reuse->reuseport_id;
more_reuse->bind_inany = reuse->bind_inany;
+ more_reuse->has_conns = reuse->has_conns;
memcpy(more_reuse->socks, reuse->socks,
reuse->num_socks * sizeof(struct sock *));
diff --git a/net/hsr/hsr_forward.c b/net/hsr/hsr_forward.c
index 92c8ad75200b..ab8dca0c0b65 100644
--- a/net/hsr/hsr_forward.c
+++ b/net/hsr/hsr_forward.c
@@ -120,13 +120,18 @@ static struct sk_buff *frame_get_stripped_skb(struct hsr_frame_info *frame,
return skb_clone(frame->skb_std, GFP_ATOMIC);
}
-static void hsr_fill_tag(struct sk_buff *skb, struct hsr_frame_info *frame,
- struct hsr_port *port, u8 proto_version)
+static struct sk_buff *hsr_fill_tag(struct sk_buff *skb,
+ struct hsr_frame_info *frame,
+ struct hsr_port *port, u8 proto_version)
{
struct hsr_ethhdr *hsr_ethhdr;
int lane_id;
int lsdu_size;
+ /* pad to minimum packet size which is 60 + 6 (HSR tag) */
+ if (skb_put_padto(skb, ETH_ZLEN + HSR_HLEN))
+ return NULL;
+
if (port->type == HSR_PT_SLAVE_A)
lane_id = 0;
else
@@ -144,6 +149,8 @@ static void hsr_fill_tag(struct sk_buff *skb, struct hsr_frame_info *frame,
hsr_ethhdr->hsr_tag.encap_proto = hsr_ethhdr->ethhdr.h_proto;
hsr_ethhdr->ethhdr.h_proto = htons(proto_version ?
ETH_P_HSR : ETH_P_PRP);
+
+ return skb;
}
static struct sk_buff *create_tagged_skb(struct sk_buff *skb_o,
@@ -172,9 +179,10 @@ static struct sk_buff *create_tagged_skb(struct sk_buff *skb_o,
memmove(dst, src, movelen);
skb_reset_mac_header(skb);
- hsr_fill_tag(skb, frame, port, port->hsr->prot_version);
-
- return skb;
+ /* skb_put_padto free skb on error and hsr_fill_tag returns NULL in
+ * that case
+ */
+ return hsr_fill_tag(skb, frame, port, port->hsr->prot_version);
}
/* If the original frame was an HSR tagged frame, just clone it to be sent
diff --git a/net/hsr/hsr_framereg.c b/net/hsr/hsr_framereg.c
index 03b891904314..530de24b1fb5 100644
--- a/net/hsr/hsr_framereg.c
+++ b/net/hsr/hsr_framereg.c
@@ -325,7 +325,8 @@ void hsr_addr_subst_dest(struct hsr_node *node_src, struct sk_buff *skb,
if (port->type != node_dst->addr_B_port)
return;
- ether_addr_copy(eth_hdr(skb)->h_dest, node_dst->macaddress_B);
+ if (is_valid_ether_addr(node_dst->macaddress_B))
+ ether_addr_copy(eth_hdr(skb)->h_dest, node_dst->macaddress_B);
}
void hsr_register_frame_in(struct hsr_node *node, struct hsr_port *port,
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 82906deb7874..a018bafd7bdf 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -3514,10 +3514,8 @@ static void tcp_replace_ts_recent(struct tcp_sock *tp, u32 seq)
}
}
-/* This routine deals with acks during a TLP episode.
- * We mark the end of a TLP episode on receiving TLP dupack or when
- * ack is after tlp_high_seq.
- * Ref: loss detection algorithm in draft-dukkipati-tcpm-tcp-loss-probe.
+/* This routine deals with acks during a TLP episode and ends an episode by
+ * resetting tlp_high_seq. Ref: TLP algorithm in draft-ietf-tcpm-rack
*/
static void tcp_process_tlp_ack(struct sock *sk, u32 ack, int flag)
{
@@ -3526,7 +3524,10 @@ static void tcp_process_tlp_ack(struct sock *sk, u32 ack, int flag)
if (before(ack, tp->tlp_high_seq))
return;
- if (flag & FLAG_DSACKING_ACK) {
+ if (!tp->tlp_retrans) {
+ /* TLP of new data has been acknowledged */
+ tp->tlp_high_seq = 0;
+ } else if (flag & FLAG_DSACKING_ACK) {
/* This DSACK means original and TLP probe arrived; no loss */
tp->tlp_high_seq = 0;
} else if (after(ack, tp->tlp_high_seq)) {
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index dc0117013ba5..d8f16f6a9b02 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -2632,6 +2632,11 @@ void tcp_send_loss_probe(struct sock *sk)
int pcount;
int mss = tcp_current_mss(sk);
+ /* At most one outstanding TLP */
+ if (tp->tlp_high_seq)
+ goto rearm_timer;
+
+ tp->tlp_retrans = 0;
skb = tcp_send_head(sk);
if (skb && tcp_snd_wnd_test(tp, skb, mss)) {
pcount = tp->packets_out;
@@ -2649,10 +2654,6 @@ void tcp_send_loss_probe(struct sock *sk)
return;
}
- /* At most one outstanding TLP retransmission. */
- if (tp->tlp_high_seq)
- goto rearm_timer;
-
if (skb_still_in_host_queue(sk, skb))
goto rearm_timer;
@@ -2674,10 +2675,12 @@ void tcp_send_loss_probe(struct sock *sk)
if (__tcp_retransmit_skb(sk, skb, 1))
goto rearm_timer;
+ tp->tlp_retrans = 1;
+
+probe_sent:
/* Record snd_nxt for loss detection. */
tp->tlp_high_seq = tp->snd_nxt;
-probe_sent:
NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPLOSSPROBES);
/* Reset s.t. tcp_rearm_rto will restart timer from now */
inet_csk(sk)->icsk_pending = 0;
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 5a6a2f6d86b9..0fb5e4ea133f 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -409,10 +409,10 @@ static u32 udp_ehashfn(const struct net *net, const __be32 laddr,
udp_ehash_secret + net_hash_mix(net));
}
-static inline struct sock *lookup_reuseport(struct net *net, struct sock *sk,
- struct sk_buff *skb,
- __be32 saddr, __be16 sport,
- __be32 daddr, unsigned short hnum)
+static struct sock *lookup_reuseport(struct net *net, struct sock *sk,
+ struct sk_buff *skb,
+ __be32 saddr, __be16 sport,
+ __be32 daddr, unsigned short hnum)
{
struct sock *reuse_sk = NULL;
u32 hash;
@@ -421,9 +421,6 @@ static inline struct sock *lookup_reuseport(struct net *net, struct sock *sk,
hash = udp_ehashfn(net, daddr, hnum, saddr, sport);
reuse_sk = reuseport_select_sock(sk, hash, skb,
sizeof(struct udphdr));
- /* Fall back to scoring if group has connections */
- if (reuseport_has_conns(sk, false))
- return NULL;
}
return reuse_sk;
}
@@ -447,21 +444,22 @@ static struct sock *udp4_lib_lookup2(struct net *net,
if (score > badness) {
result = lookup_reuseport(net, sk, skb,
saddr, sport, daddr, hnum);
- if (result)
+ /* Fall back to scoring if group has connections */
+ if (result && !reuseport_has_conns(sk, false))
return result;
+ result = result ? : sk;
badness = score;
- result = sk;
}
}
return result;
}
-static inline struct sock *udp4_lookup_run_bpf(struct net *net,
- struct udp_table *udptable,
- struct sk_buff *skb,
- __be32 saddr, __be16 sport,
- __be32 daddr, u16 hnum)
+static struct sock *udp4_lookup_run_bpf(struct net *net,
+ struct udp_table *udptable,
+ struct sk_buff *skb,
+ __be32 saddr, __be16 sport,
+ __be32 daddr, u16 hnum)
{
struct sock *sk, *reuse_sk;
bool no_reuseport;
@@ -475,7 +473,7 @@ static inline struct sock *udp4_lookup_run_bpf(struct net *net,
return sk;
reuse_sk = lookup_reuseport(net, sk, skb, saddr, sport, daddr, hnum);
- if (reuse_sk)
+ if (reuse_sk && !reuseport_has_conns(sk, false))
sk = reuse_sk;
return sk;
}
@@ -2107,7 +2105,7 @@ static int udp_queue_rcv_one_skb(struct sock *sk, struct sk_buff *skb)
/*
* UDP-Lite specific tests, ignored on UDP sockets
*/
- if ((is_udplite & UDPLITE_RECV_CC) && UDP_SKB_CB(skb)->partial_cov) {
+ if ((up->pcflag & UDPLITE_RECV_CC) && UDP_SKB_CB(skb)->partial_cov) {
/*
* MIB statistics other than incrementing the error count are
diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
index 6532bde82b40..3a57fb9ce049 100644
--- a/net/ipv6/ip6_gre.c
+++ b/net/ipv6/ip6_gre.c
@@ -1562,17 +1562,18 @@ static void ip6gre_destroy_tunnels(struct net *net, struct list_head *head)
static int __net_init ip6gre_init_net(struct net *net)
{
struct ip6gre_net *ign = net_generic(net, ip6gre_net_id);
+ struct net_device *ndev;
int err;
if (!net_has_fallback_tunnels(net))
return 0;
- ign->fb_tunnel_dev = alloc_netdev(sizeof(struct ip6_tnl), "ip6gre0",
- NET_NAME_UNKNOWN,
- ip6gre_tunnel_setup);
- if (!ign->fb_tunnel_dev) {
+ ndev = alloc_netdev(sizeof(struct ip6_tnl), "ip6gre0",
+ NET_NAME_UNKNOWN, ip6gre_tunnel_setup);
+ if (!ndev) {
err = -ENOMEM;
goto err_alloc_dev;
}
+ ign->fb_tunnel_dev = ndev;
dev_net_set(ign->fb_tunnel_dev, net);
/* FB netdevice is special: we have one, and only one per netns.
* Allowing to move it to another netns is clearly unsafe.
@@ -1592,7 +1593,7 @@ static int __net_init ip6gre_init_net(struct net *net)
return 0;
err_reg_dev:
- free_netdev(ign->fb_tunnel_dev);
+ free_netdev(ndev);
err_alloc_dev:
return err;
}
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index 15818e18655d..5530c9dcb61c 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -141,12 +141,12 @@ static int compute_score(struct sock *sk, struct net *net,
return score;
}
-static inline struct sock *lookup_reuseport(struct net *net, struct sock *sk,
- struct sk_buff *skb,
- const struct in6_addr *saddr,
- __be16 sport,
- const struct in6_addr *daddr,
- unsigned int hnum)
+static struct sock *lookup_reuseport(struct net *net, struct sock *sk,
+ struct sk_buff *skb,
+ const struct in6_addr *saddr,
+ __be16 sport,
+ const struct in6_addr *daddr,
+ unsigned int hnum)
{
struct sock *reuse_sk = NULL;
u32 hash;
@@ -180,10 +180,11 @@ static struct sock *udp6_lib_lookup2(struct net *net,
if (score > badness) {
result = lookup_reuseport(net, sk, skb,
saddr, sport, daddr, hnum);
- if (result)
+ /* Fall back to scoring if group has connections */
+ if (result && !reuseport_has_conns(sk, false))
return result;
- result = sk;
+ result = result ? : sk;
badness = score;
}
}
@@ -210,7 +211,7 @@ static inline struct sock *udp6_lookup_run_bpf(struct net *net,
return sk;
reuse_sk = lookup_reuseport(net, sk, skb, saddr, sport, daddr, hnum);
- if (reuse_sk)
+ if (reuse_sk && !reuseport_has_conns(sk, false))
sk = reuse_sk;
return sk;
}
@@ -700,7 +701,7 @@ static int udpv6_queue_rcv_one_skb(struct sock *sk, struct sk_buff *skb)
/*
* UDP-Lite specific tests, ignored on UDP sockets (see net/ipv4/udp.c).
*/
- if ((is_udplite & UDPLITE_RECV_CC) && UDP_SKB_CB(skb)->partial_cov) {
+ if ((up->pcflag & UDPLITE_RECV_CC) && UDP_SKB_CB(skb)->partial_cov) {
if (up->pcrlen == 0) { /* full coverage was set */
net_dbg_ratelimited("UDPLITE6: partial coverage %d while full coverage %d requested\n",
diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c
index 605e0f68f8bd..2b8abbfe018c 100644
--- a/net/netfilter/ipvs/ip_vs_sync.c
+++ b/net/netfilter/ipvs/ip_vs_sync.c
@@ -1717,6 +1717,8 @@ static int sync_thread_backup(void *data)
{
struct ip_vs_sync_thread_data *tinfo = data;
struct netns_ipvs *ipvs = tinfo->ipvs;
+ struct sock *sk = tinfo->sock->sk;
+ struct udp_sock *up = udp_sk(sk);
int len;
pr_info("sync thread started: state = BACKUP, mcast_ifn = %s, "
@@ -1724,12 +1726,14 @@ static int sync_thread_backup(void *data)
ipvs->bcfg.mcast_ifn, ipvs->bcfg.syncid, tinfo->id);
while (!kthread_should_stop()) {
- wait_event_interruptible(*sk_sleep(tinfo->sock->sk),
- !skb_queue_empty(&tinfo->sock->sk->sk_receive_queue)
- || kthread_should_stop());
+ wait_event_interruptible(*sk_sleep(sk),
+ !skb_queue_empty_lockless(&sk->sk_receive_queue) ||
+ !skb_queue_empty_lockless(&up->reader_queue) ||
+ kthread_should_stop());
/* do we have data now? */
- while (!skb_queue_empty(&(tinfo->sock->sk->sk_receive_queue))) {
+ while (!skb_queue_empty_lockless(&sk->sk_receive_queue) ||
+ !skb_queue_empty_lockless(&up->reader_queue)) {
len = ip_vs_receive(tinfo->sock, tinfo->buf,
ipvs->bcfg.sync_maxlen);
if (len <= 0) {
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
index 6708a4f2eec8..de70a7c4e769 100644
--- a/net/netfilter/nf_tables_api.c
+++ b/net/netfilter/nf_tables_api.c
@@ -188,24 +188,6 @@ static void nft_netdev_unregister_hooks(struct net *net,
nf_unregister_net_hook(net, &hook->ops);
}
-static int nft_register_basechain_hooks(struct net *net, int family,
- struct nft_base_chain *basechain)
-{
- if (family == NFPROTO_NETDEV)
- return nft_netdev_register_hooks(net, &basechain->hook_list);
-
- return nf_register_net_hook(net, &basechain->ops);
-}
-
-static void nft_unregister_basechain_hooks(struct net *net, int family,
- struct nft_base_chain *basechain)
-{
- if (family == NFPROTO_NETDEV)
- nft_netdev_unregister_hooks(net, &basechain->hook_list);
- else
- nf_unregister_net_hook(net, &basechain->ops);
-}
-
static int nf_tables_register_hook(struct net *net,
const struct nft_table *table,
struct nft_chain *chain)
@@ -223,7 +205,10 @@ static int nf_tables_register_hook(struct net *net,
if (basechain->type->ops_register)
return basechain->type->ops_register(net, ops);
- return nft_register_basechain_hooks(net, table->family, basechain);
+ if (table->family == NFPROTO_NETDEV)
+ return nft_netdev_register_hooks(net, &basechain->hook_list);
+
+ return nf_register_net_hook(net, &basechain->ops);
}
static void nf_tables_unregister_hook(struct net *net,
@@ -242,7 +227,10 @@ static void nf_tables_unregister_hook(struct net *net,
if (basechain->type->ops_unregister)
return basechain->type->ops_unregister(net, ops);
- nft_unregister_basechain_hooks(net, table->family, basechain);
+ if (table->family == NFPROTO_NETDEV)
+ nft_netdev_unregister_hooks(net, &basechain->hook_list);
+ else
+ nf_unregister_net_hook(net, &basechain->ops);
}
static int nft_trans_table_add(struct nft_ctx *ctx, int msg_type)
@@ -838,8 +826,7 @@ static void nft_table_disable(struct net *net, struct nft_table *table, u32 cnt)
if (cnt && i++ == cnt)
break;
- nft_unregister_basechain_hooks(net, table->family,
- nft_base_chain(chain));
+ nf_tables_unregister_hook(net, table, chain);
}
}
@@ -854,8 +841,7 @@ static int nf_tables_table_enable(struct net *net, struct nft_table *table)
if (!nft_is_base_chain(chain))
continue;
- err = nft_register_basechain_hooks(net, table->family,
- nft_base_chain(chain));
+ err = nf_tables_register_hook(net, table, chain);
if (err < 0)
goto err_register_hooks;
@@ -900,11 +886,12 @@ static int nf_tables_updtable(struct nft_ctx *ctx)
nft_trans_table_enable(trans) = false;
} else if (!(flags & NFT_TABLE_F_DORMANT) &&
ctx->table->flags & NFT_TABLE_F_DORMANT) {
+ ctx->table->flags &= ~NFT_TABLE_F_DORMANT;
ret = nf_tables_table_enable(ctx->net, ctx->table);
- if (ret >= 0) {
- ctx->table->flags &= ~NFT_TABLE_F_DORMANT;
+ if (ret >= 0)
nft_trans_table_enable(trans) = true;
- }
+ else
+ ctx->table->flags |= NFT_TABLE_F_DORMANT;
}
if (ret < 0)
goto err;
diff --git a/net/nfc/nci/core.c b/net/nfc/nci/core.c
index f7b7dc5fe84a..741da8f81c2b 100644
--- a/net/nfc/nci/core.c
+++ b/net/nfc/nci/core.c
@@ -1228,10 +1228,13 @@ int nci_register_device(struct nci_dev *ndev)
rc = nfc_register_device(ndev->nfc_dev);
if (rc)
- goto destroy_rx_wq_exit;
+ goto destroy_tx_wq_exit;
goto exit;
+destroy_tx_wq_exit:
+ destroy_workqueue(ndev->tx_wq);
+
destroy_rx_wq_exit:
destroy_workqueue(ndev->rx_wq);
diff --git a/net/qrtr/qrtr.c b/net/qrtr/qrtr.c
index 0cb4adfc6641..b4c0db0b7d31 100644
--- a/net/qrtr/qrtr.c
+++ b/net/qrtr/qrtr.c
@@ -1180,6 +1180,7 @@ static int qrtr_release(struct socket *sock)
sk->sk_state_change(sk);
sock_set_flag(sk, SOCK_DEAD);
+ sock_orphan(sk);
sock->sk = NULL;
if (!sock_flag(sk, SOCK_ZAPPED))
diff --git a/net/rxrpc/recvmsg.c b/net/rxrpc/recvmsg.c
index 2989742a4aa1..490b1927215c 100644
--- a/net/rxrpc/recvmsg.c
+++ b/net/rxrpc/recvmsg.c
@@ -543,7 +543,7 @@ try_again:
list_empty(&rx->recvmsg_q) &&
rx->sk.sk_state != RXRPC_SERVER_LISTENING) {
release_sock(&rx->sk);
- return -ENODATA;
+ return -EAGAIN;
}
if (list_empty(&rx->recvmsg_q)) {
diff --git a/net/rxrpc/sendmsg.c b/net/rxrpc/sendmsg.c
index 1304b8608f56..03a30d014bb6 100644
--- a/net/rxrpc/sendmsg.c
+++ b/net/rxrpc/sendmsg.c
@@ -304,7 +304,7 @@ static int rxrpc_send_data(struct rxrpc_sock *rx,
/* this should be in poll */
sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk);
- if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN))
+ if (sk->sk_shutdown & SEND_SHUTDOWN)
return -EPIPE;
more = msg->msg_flags & MSG_MORE;
diff --git a/net/sched/act_ct.c b/net/sched/act_ct.c
index 3893e03454db..f2f663e88610 100644
--- a/net/sched/act_ct.c
+++ b/net/sched/act_ct.c
@@ -673,9 +673,10 @@ static int tcf_ct_ipv6_is_fragment(struct sk_buff *skb, bool *frag)
}
static int tcf_ct_handle_fragments(struct net *net, struct sk_buff *skb,
- u8 family, u16 zone)
+ u8 family, u16 zone, bool *defrag)
{
enum ip_conntrack_info ctinfo;
+ struct qdisc_skb_cb cb;
struct nf_conn *ct;
int err = 0;
bool frag;
@@ -693,6 +694,7 @@ static int tcf_ct_handle_fragments(struct net *net, struct sk_buff *skb,
return err;
skb_get(skb);
+ cb = *qdisc_skb_cb(skb);
if (family == NFPROTO_IPV4) {
enum ip_defrag_users user = IP_DEFRAG_CONNTRACK_IN + zone;
@@ -703,6 +705,9 @@ static int tcf_ct_handle_fragments(struct net *net, struct sk_buff *skb,
local_bh_enable();
if (err && err != -EINPROGRESS)
goto out_free;
+
+ if (!err)
+ *defrag = true;
} else { /* NFPROTO_IPV6 */
#if IS_ENABLED(CONFIG_NF_DEFRAG_IPV6)
enum ip6_defrag_users user = IP6_DEFRAG_CONNTRACK_IN + zone;
@@ -711,12 +716,16 @@ static int tcf_ct_handle_fragments(struct net *net, struct sk_buff *skb,
err = nf_ct_frag6_gather(net, skb, user);
if (err && err != -EINPROGRESS)
goto out_free;
+
+ if (!err)
+ *defrag = true;
#else
err = -EOPNOTSUPP;
goto out_free;
#endif
}
+ *qdisc_skb_cb(skb) = cb;
skb_clear_hash(skb);
skb->ignore_df = 1;
return err;
@@ -914,6 +923,7 @@ static int tcf_ct_act(struct sk_buff *skb, const struct tc_action *a,
int nh_ofs, err, retval;
struct tcf_ct_params *p;
bool skip_add = false;
+ bool defrag = false;
struct nf_conn *ct;
u8 family;
@@ -946,7 +956,7 @@ static int tcf_ct_act(struct sk_buff *skb, const struct tc_action *a,
*/
nh_ofs = skb_network_offset(skb);
skb_pull_rcsum(skb, nh_ofs);
- err = tcf_ct_handle_fragments(net, skb, family, p->zone);
+ err = tcf_ct_handle_fragments(net, skb, family, p->zone, &defrag);
if (err == -EINPROGRESS) {
retval = TC_ACT_STOLEN;
goto out;
@@ -1014,6 +1024,8 @@ out_push:
out:
tcf_action_update_bstats(&c->common, skb);
+ if (defrag)
+ qdisc_skb_cb(skb)->pkt_len = skb->len;
return retval;
drop:
diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
index b2b7440c2ae7..0b8623b3b24f 100644
--- a/net/sched/cls_api.c
+++ b/net/sched/cls_api.c
@@ -20,7 +20,6 @@
#include <linux/kmod.h>
#include <linux/slab.h>
#include <linux/idr.h>
-#include <linux/rhashtable.h>
#include <linux/jhash.h>
#include <linux/rculist.h>
#include <net/net_namespace.h>
diff --git a/net/sctp/stream.c b/net/sctp/stream.c
index 67f7e71f9129..bda2536dd740 100644
--- a/net/sctp/stream.c
+++ b/net/sctp/stream.c
@@ -22,17 +22,11 @@
#include <net/sctp/sm.h>
#include <net/sctp/stream_sched.h>
-/* Migrates chunks from stream queues to new stream queues if needed,
- * but not across associations. Also, removes those chunks to streams
- * higher than the new max.
- */
-static void sctp_stream_outq_migrate(struct sctp_stream *stream,
- struct sctp_stream *new, __u16 outcnt)
+static void sctp_stream_shrink_out(struct sctp_stream *stream, __u16 outcnt)
{
struct sctp_association *asoc;
struct sctp_chunk *ch, *temp;
struct sctp_outq *outq;
- int i;
asoc = container_of(stream, struct sctp_association, stream);
outq = &asoc->outqueue;
@@ -56,6 +50,19 @@ static void sctp_stream_outq_migrate(struct sctp_stream *stream,
sctp_chunk_free(ch);
}
+}
+
+/* Migrates chunks from stream queues to new stream queues if needed,
+ * but not across associations. Also, removes those chunks to streams
+ * higher than the new max.
+ */
+static void sctp_stream_outq_migrate(struct sctp_stream *stream,
+ struct sctp_stream *new, __u16 outcnt)
+{
+ int i;
+
+ if (stream->outcnt > outcnt)
+ sctp_stream_shrink_out(stream, outcnt);
if (new) {
/* Here we actually move the old ext stuff into the new
@@ -1037,11 +1044,13 @@ struct sctp_chunk *sctp_process_strreset_resp(
nums = ntohs(addstrm->number_of_streams);
number = stream->outcnt - nums;
- if (result == SCTP_STRRESET_PERFORMED)
+ if (result == SCTP_STRRESET_PERFORMED) {
for (i = number; i < stream->outcnt; i++)
SCTP_SO(stream, i)->state = SCTP_STREAM_OPEN;
- else
+ } else {
+ sctp_stream_shrink_out(stream, number);
stream->outcnt = number;
+ }
*evp = sctp_ulpevent_make_stream_change_event(asoc, flags,
0, nums, GFP_ATOMIC);
diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c
index 4ac1d4de6676..832e36269b10 100644
--- a/net/smc/af_smc.c
+++ b/net/smc/af_smc.c
@@ -126,8 +126,10 @@ EXPORT_SYMBOL_GPL(smc_proto6);
static void smc_restore_fallback_changes(struct smc_sock *smc)
{
- smc->clcsock->file->private_data = smc->sk.sk_socket;
- smc->clcsock->file = NULL;
+ if (smc->clcsock->file) { /* non-accepted sockets have no file yet */
+ smc->clcsock->file->private_data = smc->sk.sk_socket;
+ smc->clcsock->file = NULL;
+ }
}
static int __smc_release(struct smc_sock *smc)
@@ -352,7 +354,7 @@ static int smcr_lgr_reg_rmbs(struct smc_link *link,
*/
mutex_lock(&lgr->llc_conf_mutex);
for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
- if (lgr->lnk[i].state != SMC_LNK_ACTIVE)
+ if (!smc_link_active(&lgr->lnk[i]))
continue;
rc = smcr_link_reg_rmb(&lgr->lnk[i], rmb_desc);
if (rc)
@@ -632,7 +634,9 @@ static int smc_connect_rdma(struct smc_sock *smc,
for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
struct smc_link *l = &smc->conn.lgr->lnk[i];
- if (l->peer_qpn == ntoh24(aclc->qpn)) {
+ if (l->peer_qpn == ntoh24(aclc->qpn) &&
+ !memcmp(l->peer_gid, &aclc->lcl.gid, SMC_GID_SIZE) &&
+ !memcmp(l->peer_mac, &aclc->lcl.mac, sizeof(l->peer_mac))) {
link = l;
break;
}
diff --git a/net/smc/smc_cdc.c b/net/smc/smc_cdc.c
index a47e8855e045..ce468ff62a19 100644
--- a/net/smc/smc_cdc.c
+++ b/net/smc/smc_cdc.c
@@ -66,9 +66,13 @@ int smc_cdc_get_free_slot(struct smc_connection *conn,
rc = smc_wr_tx_get_free_slot(link, smc_cdc_tx_handler, wr_buf,
wr_rdma_buf,
(struct smc_wr_tx_pend_priv **)pend);
- if (conn->killed)
+ if (conn->killed) {
/* abnormal termination */
+ if (!rc)
+ smc_wr_tx_put_slot(link,
+ (struct smc_wr_tx_pend_priv *)pend);
rc = -EPIPE;
+ }
return rc;
}
diff --git a/net/smc/smc_core.c b/net/smc/smc_core.c
index f69d205b3e11..f82a2e599917 100644
--- a/net/smc/smc_core.c
+++ b/net/smc/smc_core.c
@@ -45,18 +45,10 @@ static struct smc_lgr_list smc_lgr_list = { /* established link groups */
static atomic_t lgr_cnt = ATOMIC_INIT(0); /* number of existing link groups */
static DECLARE_WAIT_QUEUE_HEAD(lgrs_deleted);
-struct smc_ib_up_work {
- struct work_struct work;
- struct smc_link_group *lgr;
- struct smc_ib_device *smcibdev;
- u8 ibport;
-};
-
static void smc_buf_free(struct smc_link_group *lgr, bool is_rmb,
struct smc_buf_desc *buf_desc);
static void __smc_lgr_terminate(struct smc_link_group *lgr, bool soft);
-static void smc_link_up_work(struct work_struct *work);
static void smc_link_down_work(struct work_struct *work);
/* return head of link group list and its lock for a given link group */
@@ -326,7 +318,6 @@ int smcr_link_init(struct smc_link_group *lgr, struct smc_link *lnk,
get_device(&ini->ib_dev->ibdev->dev);
atomic_inc(&ini->ib_dev->lnk_cnt);
- lnk->state = SMC_LNK_ACTIVATING;
lnk->link_id = smcr_next_link_id(lgr);
lnk->lgr = lgr;
lnk->link_idx = link_idx;
@@ -362,6 +353,7 @@ int smcr_link_init(struct smc_link_group *lgr, struct smc_link *lnk,
rc = smc_wr_create_link(lnk);
if (rc)
goto destroy_qp;
+ lnk->state = SMC_LNK_ACTIVATING;
return 0;
destroy_qp:
@@ -452,7 +444,7 @@ static int smc_lgr_create(struct smc_sock *smc, struct smc_init_info *ini)
}
smc->conn.lgr = lgr;
spin_lock_bh(lgr_lock);
- list_add(&lgr->list, lgr_list);
+ list_add_tail(&lgr->list, lgr_list);
spin_unlock_bh(lgr_lock);
return 0;
@@ -550,8 +542,7 @@ struct smc_link *smc_switch_conns(struct smc_link_group *lgr,
smc_wr_wakeup_tx_wait(from_lnk);
for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
- if (lgr->lnk[i].state != SMC_LNK_ACTIVE ||
- i == from_lnk->link_idx)
+ if (!smc_link_active(&lgr->lnk[i]) || i == from_lnk->link_idx)
continue;
if (is_dev_err && from_lnk->smcibdev == lgr->lnk[i].smcibdev &&
from_lnk->ibport == lgr->lnk[i].ibport) {
@@ -1106,67 +1097,23 @@ static void smc_conn_abort_work(struct work_struct *work)
sock_put(&smc->sk); /* sock_hold done by schedulers of abort_work */
}
-/* link is up - establish alternate link if applicable */
-static void smcr_link_up(struct smc_link_group *lgr,
- struct smc_ib_device *smcibdev, u8 ibport)
-{
- struct smc_link *link = NULL;
-
- if (list_empty(&lgr->list) ||
- lgr->type == SMC_LGR_SYMMETRIC ||
- lgr->type == SMC_LGR_ASYMMETRIC_PEER)
- return;
-
- if (lgr->role == SMC_SERV) {
- /* trigger local add link processing */
- link = smc_llc_usable_link(lgr);
- if (!link)
- return;
- smc_llc_srv_add_link_local(link);
- } else {
- /* invite server to start add link processing */
- u8 gid[SMC_GID_SIZE];
-
- if (smc_ib_determine_gid(smcibdev, ibport, lgr->vlan_id, gid,
- NULL))
- return;
- if (lgr->llc_flow_lcl.type != SMC_LLC_FLOW_NONE) {
- /* some other llc task is ongoing */
- wait_event_timeout(lgr->llc_flow_waiter,
- (list_empty(&lgr->list) ||
- lgr->llc_flow_lcl.type == SMC_LLC_FLOW_NONE),
- SMC_LLC_WAIT_TIME);
- }
- /* lgr or device no longer active? */
- if (!list_empty(&lgr->list) &&
- smc_ib_port_active(smcibdev, ibport))
- link = smc_llc_usable_link(lgr);
- if (link)
- smc_llc_send_add_link(link, smcibdev->mac[ibport - 1],
- gid, NULL, SMC_LLC_REQ);
- wake_up(&lgr->llc_flow_waiter); /* wake up next waiter */
- }
-}
-
void smcr_port_add(struct smc_ib_device *smcibdev, u8 ibport)
{
- struct smc_ib_up_work *ib_work;
struct smc_link_group *lgr, *n;
list_for_each_entry_safe(lgr, n, &smc_lgr_list.list, list) {
+ struct smc_link *link;
+
if (strncmp(smcibdev->pnetid[ibport - 1], lgr->pnet_id,
SMC_MAX_PNETID_LEN) ||
lgr->type == SMC_LGR_SYMMETRIC ||
lgr->type == SMC_LGR_ASYMMETRIC_PEER)
continue;
- ib_work = kmalloc(sizeof(*ib_work), GFP_KERNEL);
- if (!ib_work)
- continue;
- INIT_WORK(&ib_work->work, smc_link_up_work);
- ib_work->lgr = lgr;
- ib_work->smcibdev = smcibdev;
- ib_work->ibport = ibport;
- schedule_work(&ib_work->work);
+
+ /* trigger local add link processing */
+ link = smc_llc_usable_link(lgr);
+ if (link)
+ smc_llc_add_link_local(link);
}
}
@@ -1204,10 +1151,12 @@ static void smcr_link_down(struct smc_link *lnk)
SMC_LLC_WAIT_TIME);
mutex_lock(&lgr->llc_conf_mutex);
}
- if (!list_empty(&lgr->list))
+ if (!list_empty(&lgr->list)) {
smc_llc_send_delete_link(to_lnk, del_link_id,
SMC_LLC_REQ, true,
SMC_LLC_DEL_LOST_PATH);
+ smcr_link_clear(lnk, true);
+ }
wake_up(&lgr->llc_flow_waiter); /* wake up next waiter */
}
}
@@ -1247,20 +1196,6 @@ void smcr_port_err(struct smc_ib_device *smcibdev, u8 ibport)
}
}
-static void smc_link_up_work(struct work_struct *work)
-{
- struct smc_ib_up_work *ib_work = container_of(work,
- struct smc_ib_up_work,
- work);
- struct smc_link_group *lgr = ib_work->lgr;
-
- if (list_empty(&lgr->list))
- goto out;
- smcr_link_up(lgr, ib_work->smcibdev, ib_work->ibport);
-out:
- kfree(ib_work);
-}
-
static void smc_link_down_work(struct work_struct *work)
{
struct smc_link *link = container_of(work, struct smc_link,
@@ -1333,7 +1268,7 @@ static bool smcr_lgr_match(struct smc_link_group *lgr,
return false;
for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
- if (lgr->lnk[i].state != SMC_LNK_ACTIVE)
+ if (!smc_link_active(&lgr->lnk[i]))
continue;
if ((lgr->role == SMC_SERV || lgr->lnk[i].peer_qpn == clcqpn) &&
!memcmp(lgr->lnk[i].peer_gid, &lcl->gid, SMC_GID_SIZE) &&
@@ -1376,7 +1311,7 @@ int smc_conn_create(struct smc_sock *smc, struct smc_init_info *ini)
smcr_lgr_match(lgr, ini->ib_lcl, role, ini->ib_clcqpn)) &&
!lgr->sync_err &&
lgr->vlan_id == ini->vlan_id &&
- (role == SMC_CLNT ||
+ (role == SMC_CLNT || ini->is_smcd ||
lgr->conns_num < SMC_RMBS_PER_LGR_MAX)) {
/* link group found */
ini->cln_first_contact = SMC_REUSE_CONTACT;
@@ -1781,14 +1716,14 @@ static int __smc_buf_create(struct smc_sock *smc, bool is_smcd, bool is_rmb)
void smc_sndbuf_sync_sg_for_cpu(struct smc_connection *conn)
{
- if (!conn->lgr || conn->lgr->is_smcd || !smc_link_usable(conn->lnk))
+ if (!conn->lgr || conn->lgr->is_smcd || !smc_link_active(conn->lnk))
return;
smc_ib_sync_sg_for_cpu(conn->lnk, conn->sndbuf_desc, DMA_TO_DEVICE);
}
void smc_sndbuf_sync_sg_for_device(struct smc_connection *conn)
{
- if (!conn->lgr || conn->lgr->is_smcd || !smc_link_usable(conn->lnk))
+ if (!conn->lgr || conn->lgr->is_smcd || !smc_link_active(conn->lnk))
return;
smc_ib_sync_sg_for_device(conn->lnk, conn->sndbuf_desc, DMA_TO_DEVICE);
}
@@ -1800,7 +1735,7 @@ void smc_rmb_sync_sg_for_cpu(struct smc_connection *conn)
if (!conn->lgr || conn->lgr->is_smcd)
return;
for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
- if (!smc_link_usable(&conn->lgr->lnk[i]))
+ if (!smc_link_active(&conn->lgr->lnk[i]))
continue;
smc_ib_sync_sg_for_cpu(&conn->lgr->lnk[i], conn->rmb_desc,
DMA_FROM_DEVICE);
@@ -1814,7 +1749,7 @@ void smc_rmb_sync_sg_for_device(struct smc_connection *conn)
if (!conn->lgr || conn->lgr->is_smcd)
return;
for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
- if (!smc_link_usable(&conn->lgr->lnk[i]))
+ if (!smc_link_active(&conn->lgr->lnk[i]))
continue;
smc_ib_sync_sg_for_device(&conn->lgr->lnk[i], conn->rmb_desc,
DMA_FROM_DEVICE);
@@ -1837,8 +1772,12 @@ int smc_buf_create(struct smc_sock *smc, bool is_smcd)
return rc;
/* create rmb */
rc = __smc_buf_create(smc, is_smcd, true);
- if (rc)
+ if (rc) {
+ mutex_lock(&smc->conn.lgr->sndbufs_lock);
+ list_del(&smc->conn.sndbuf_desc->list);
+ mutex_unlock(&smc->conn.lgr->sndbufs_lock);
smc_buf_free(smc->conn.lgr, false, smc->conn.sndbuf_desc);
+ }
return rc;
}
diff --git a/net/smc/smc_core.h b/net/smc/smc_core.h
index c3ff512fd891..1c4d5439d0ff 100644
--- a/net/smc/smc_core.h
+++ b/net/smc/smc_core.h
@@ -349,6 +349,11 @@ static inline bool smc_link_usable(struct smc_link *lnk)
return true;
}
+static inline bool smc_link_active(struct smc_link *lnk)
+{
+ return lnk->state == SMC_LNK_ACTIVE;
+}
+
struct smc_sock;
struct smc_clc_msg_accept_confirm;
struct smc_clc_msg_local;
diff --git a/net/smc/smc_ib.c b/net/smc/smc_ib.c
index 7637fdebbb78..1c314dbdc7fa 100644
--- a/net/smc/smc_ib.c
+++ b/net/smc/smc_ib.c
@@ -506,6 +506,10 @@ long smc_ib_setup_per_ibdev(struct smc_ib_device *smcibdev)
int cqe_size_order, smc_order;
long rc;
+ mutex_lock(&smcibdev->mutex);
+ rc = 0;
+ if (smcibdev->initialized)
+ goto out;
/* the calculated number of cq entries fits to mlx5 cq allocation */
cqe_size_order = cache_line_size() == 128 ? 7 : 6;
smc_order = MAX_ORDER - cqe_size_order - 1;
@@ -517,7 +521,7 @@ long smc_ib_setup_per_ibdev(struct smc_ib_device *smcibdev)
rc = PTR_ERR_OR_ZERO(smcibdev->roce_cq_send);
if (IS_ERR(smcibdev->roce_cq_send)) {
smcibdev->roce_cq_send = NULL;
- return rc;
+ goto out;
}
smcibdev->roce_cq_recv = ib_create_cq(smcibdev->ibdev,
smc_wr_rx_cq_handler, NULL,
@@ -529,21 +533,26 @@ long smc_ib_setup_per_ibdev(struct smc_ib_device *smcibdev)
}
smc_wr_add_dev(smcibdev);
smcibdev->initialized = 1;
- return rc;
+ goto out;
err:
ib_destroy_cq(smcibdev->roce_cq_send);
+out:
+ mutex_unlock(&smcibdev->mutex);
return rc;
}
static void smc_ib_cleanup_per_ibdev(struct smc_ib_device *smcibdev)
{
+ mutex_lock(&smcibdev->mutex);
if (!smcibdev->initialized)
- return;
+ goto out;
smcibdev->initialized = 0;
ib_destroy_cq(smcibdev->roce_cq_recv);
ib_destroy_cq(smcibdev->roce_cq_send);
smc_wr_remove_dev(smcibdev);
+out:
+ mutex_unlock(&smcibdev->mutex);
}
static struct ib_client smc_ib_client;
@@ -566,6 +575,7 @@ static int smc_ib_add_dev(struct ib_device *ibdev)
INIT_WORK(&smcibdev->port_event_work, smc_ib_port_event_work);
atomic_set(&smcibdev->lnk_cnt, 0);
init_waitqueue_head(&smcibdev->lnks_deleted);
+ mutex_init(&smcibdev->mutex);
mutex_lock(&smc_ib_devices.mutex);
list_add_tail(&smcibdev->list, &smc_ib_devices.list);
mutex_unlock(&smc_ib_devices.mutex);
diff --git a/net/smc/smc_ib.h b/net/smc/smc_ib.h
index ae6776e1e726..2ce481187dd0 100644
--- a/net/smc/smc_ib.h
+++ b/net/smc/smc_ib.h
@@ -52,6 +52,7 @@ struct smc_ib_device { /* ib-device infos for smc */
DECLARE_BITMAP(ports_going_away, SMC_MAX_PORTS);
atomic_t lnk_cnt; /* number of links on ibdev */
wait_queue_head_t lnks_deleted; /* wait 4 removal of all links*/
+ struct mutex mutex; /* protect dev setup+cleanup */
};
struct smc_buf_desc;
diff --git a/net/smc/smc_llc.c b/net/smc/smc_llc.c
index c1a038689c63..df5b0a6ea848 100644
--- a/net/smc/smc_llc.c
+++ b/net/smc/smc_llc.c
@@ -428,7 +428,7 @@ static int smc_llc_send_confirm_rkey(struct smc_link *send_link,
rtok_ix = 1;
for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
link = &send_link->lgr->lnk[i];
- if (link->state == SMC_LNK_ACTIVE && link != send_link) {
+ if (smc_link_active(link) && link != send_link) {
rkeyllc->rtoken[rtok_ix].link_id = link->link_id;
rkeyllc->rtoken[rtok_ix].rmb_key =
htonl(rmb_desc->mr_rx[link->link_idx]->rkey);
@@ -895,6 +895,36 @@ out:
return rc;
}
+/* as an SMC client, invite server to start the add_link processing */
+static void smc_llc_cli_add_link_invite(struct smc_link *link,
+ struct smc_llc_qentry *qentry)
+{
+ struct smc_link_group *lgr = smc_get_lgr(link);
+ struct smc_init_info ini;
+
+ if (lgr->type == SMC_LGR_SYMMETRIC ||
+ lgr->type == SMC_LGR_ASYMMETRIC_PEER)
+ goto out;
+
+ ini.vlan_id = lgr->vlan_id;
+ smc_pnet_find_alt_roce(lgr, &ini, link->smcibdev);
+ if (!ini.ib_dev)
+ goto out;
+
+ smc_llc_send_add_link(link, ini.ib_dev->mac[ini.ib_port - 1],
+ ini.ib_gid, NULL, SMC_LLC_REQ);
+out:
+ kfree(qentry);
+}
+
+static bool smc_llc_is_local_add_link(union smc_llc_msg *llc)
+{
+ if (llc->raw.hdr.common.type == SMC_LLC_ADD_LINK &&
+ !llc->add_link.qp_mtu && !llc->add_link.link_num)
+ return true;
+ return false;
+}
+
static void smc_llc_process_cli_add_link(struct smc_link_group *lgr)
{
struct smc_llc_qentry *qentry;
@@ -902,7 +932,10 @@ static void smc_llc_process_cli_add_link(struct smc_link_group *lgr)
qentry = smc_llc_flow_qentry_clr(&lgr->llc_flow_lcl);
mutex_lock(&lgr->llc_conf_mutex);
- smc_llc_cli_add_link(qentry->link, qentry);
+ if (smc_llc_is_local_add_link(&qentry->msg))
+ smc_llc_cli_add_link_invite(qentry->link, qentry);
+ else
+ smc_llc_cli_add_link(qentry->link, qentry);
mutex_unlock(&lgr->llc_conf_mutex);
}
@@ -911,7 +944,7 @@ static int smc_llc_active_link_count(struct smc_link_group *lgr)
int i, link_count = 0;
for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
- if (!smc_link_usable(&lgr->lnk[i]))
+ if (!smc_link_active(&lgr->lnk[i]))
continue;
link_count++;
}
@@ -1051,12 +1084,14 @@ static int smc_llc_srv_conf_link(struct smc_link *link,
if (rc)
return -ENOLINK;
/* receive CONFIRM LINK response over the RoCE fabric */
- qentry = smc_llc_wait(lgr, link, SMC_LLC_WAIT_FIRST_TIME,
- SMC_LLC_CONFIRM_LINK);
- if (!qentry) {
+ qentry = smc_llc_wait(lgr, link, SMC_LLC_WAIT_FIRST_TIME, 0);
+ if (!qentry ||
+ qentry->msg.raw.hdr.common.type != SMC_LLC_CONFIRM_LINK) {
/* send DELETE LINK */
smc_llc_send_delete_link(link, link_new->link_id, SMC_LLC_REQ,
false, SMC_LLC_DEL_LOST_PATH);
+ if (qentry)
+ smc_llc_flow_qentry_del(&lgr->llc_flow_lcl);
return -ENOLINK;
}
smc_llc_save_peer_uid(qentry);
@@ -1158,14 +1193,14 @@ static void smc_llc_process_srv_add_link(struct smc_link_group *lgr)
mutex_unlock(&lgr->llc_conf_mutex);
}
-/* enqueue a local add_link req to trigger a new add_link flow, only as SERV */
-void smc_llc_srv_add_link_local(struct smc_link *link)
+/* enqueue a local add_link req to trigger a new add_link flow */
+void smc_llc_add_link_local(struct smc_link *link)
{
struct smc_llc_msg_add_link add_llc = {0};
add_llc.hd.length = sizeof(add_llc);
add_llc.hd.common.type = SMC_LLC_ADD_LINK;
- /* no dev and port needed, we as server ignore client data anyway */
+ /* no dev and port needed */
smc_llc_enqueue(link, (union smc_llc_msg *)&add_llc);
}
@@ -1345,7 +1380,7 @@ static void smc_llc_process_srv_delete_link(struct smc_link_group *lgr)
if (lgr->type == SMC_LGR_SINGLE && !list_empty(&lgr->list)) {
/* trigger setup of asymm alt link */
- smc_llc_srv_add_link_local(lnk);
+ smc_llc_add_link_local(lnk);
}
out:
mutex_unlock(&lgr->llc_conf_mutex);
@@ -1474,7 +1509,18 @@ static void smc_llc_event_handler(struct smc_llc_qentry *qentry)
if (list_empty(&lgr->list))
goto out; /* lgr is terminating */
if (lgr->role == SMC_CLNT) {
- if (lgr->llc_flow_lcl.type == SMC_LLC_FLOW_ADD_LINK) {
+ if (smc_llc_is_local_add_link(llc)) {
+ if (lgr->llc_flow_lcl.type ==
+ SMC_LLC_FLOW_ADD_LINK)
+ break; /* add_link in progress */
+ if (smc_llc_flow_start(&lgr->llc_flow_lcl,
+ qentry)) {
+ schedule_work(&lgr->llc_add_link_work);
+ }
+ return;
+ }
+ if (lgr->llc_flow_lcl.type == SMC_LLC_FLOW_ADD_LINK &&
+ !lgr->llc_flow_lcl.qentry) {
/* a flow is waiting for this message */
smc_llc_flow_qentry_set(&lgr->llc_flow_lcl,
qentry);
@@ -1498,28 +1544,13 @@ static void smc_llc_event_handler(struct smc_llc_qentry *qentry)
}
break;
case SMC_LLC_DELETE_LINK:
- if (lgr->role == SMC_CLNT) {
- /* server requests to delete this link, send response */
- if (lgr->llc_flow_lcl.type != SMC_LLC_FLOW_NONE) {
- /* DEL LINK REQ during ADD LINK SEQ */
- smc_llc_flow_qentry_set(&lgr->llc_flow_lcl,
- qentry);
- wake_up(&lgr->llc_msg_waiter);
- } else if (smc_llc_flow_start(&lgr->llc_flow_lcl,
- qentry)) {
- schedule_work(&lgr->llc_del_link_work);
- }
- } else {
- if (lgr->llc_flow_lcl.type == SMC_LLC_FLOW_ADD_LINK &&
- !lgr->llc_flow_lcl.qentry) {
- /* DEL LINK REQ during ADD LINK SEQ */
- smc_llc_flow_qentry_set(&lgr->llc_flow_lcl,
- qentry);
- wake_up(&lgr->llc_msg_waiter);
- } else if (smc_llc_flow_start(&lgr->llc_flow_lcl,
- qentry)) {
- schedule_work(&lgr->llc_del_link_work);
- }
+ if (lgr->llc_flow_lcl.type == SMC_LLC_FLOW_ADD_LINK &&
+ !lgr->llc_flow_lcl.qentry) {
+ /* DEL LINK REQ during ADD LINK SEQ */
+ smc_llc_flow_qentry_set(&lgr->llc_flow_lcl, qentry);
+ wake_up(&lgr->llc_msg_waiter);
+ } else if (smc_llc_flow_start(&lgr->llc_flow_lcl, qentry)) {
+ schedule_work(&lgr->llc_del_link_work);
}
return;
case SMC_LLC_CONFIRM_RKEY:
@@ -1585,23 +1616,30 @@ again:
static void smc_llc_rx_response(struct smc_link *link,
struct smc_llc_qentry *qentry)
{
+ enum smc_llc_flowtype flowtype = link->lgr->llc_flow_lcl.type;
+ struct smc_llc_flow *flow = &link->lgr->llc_flow_lcl;
u8 llc_type = qentry->msg.raw.hdr.common.type;
switch (llc_type) {
case SMC_LLC_TEST_LINK:
- if (link->state == SMC_LNK_ACTIVE)
+ if (smc_link_active(link))
complete(&link->llc_testlink_resp);
break;
case SMC_LLC_ADD_LINK:
- case SMC_LLC_DELETE_LINK:
- case SMC_LLC_CONFIRM_LINK:
case SMC_LLC_ADD_LINK_CONT:
+ case SMC_LLC_CONFIRM_LINK:
+ if (flowtype != SMC_LLC_FLOW_ADD_LINK || flow->qentry)
+ break; /* drop out-of-flow response */
+ goto assign;
+ case SMC_LLC_DELETE_LINK:
+ if (flowtype != SMC_LLC_FLOW_DEL_LINK || flow->qentry)
+ break; /* drop out-of-flow response */
+ goto assign;
case SMC_LLC_CONFIRM_RKEY:
case SMC_LLC_DELETE_RKEY:
- /* assign responses to the local flow, we requested them */
- smc_llc_flow_qentry_set(&link->lgr->llc_flow_lcl, qentry);
- wake_up(&link->lgr->llc_msg_waiter);
- return;
+ if (flowtype != SMC_LLC_FLOW_RKEY || flow->qentry)
+ break; /* drop out-of-flow response */
+ goto assign;
case SMC_LLC_CONFIRM_RKEY_CONT:
/* not used because max links is 3 */
break;
@@ -1610,6 +1648,11 @@ static void smc_llc_rx_response(struct smc_link *link,
break;
}
kfree(qentry);
+ return;
+assign:
+ /* assign responses to the local flow, we requested them */
+ smc_llc_flow_qentry_set(&link->lgr->llc_flow_lcl, qentry);
+ wake_up(&link->lgr->llc_msg_waiter);
}
static void smc_llc_enqueue(struct smc_link *link, union smc_llc_msg *llc)
@@ -1663,7 +1706,7 @@ static void smc_llc_testlink_work(struct work_struct *work)
u8 user_data[16] = { 0 };
int rc;
- if (link->state != SMC_LNK_ACTIVE)
+ if (!smc_link_active(link))
return; /* don't reschedule worker */
expire_time = link->wr_rx_tstamp + link->llc_testlink_time;
if (time_is_after_jiffies(expire_time)) {
@@ -1675,7 +1718,7 @@ static void smc_llc_testlink_work(struct work_struct *work)
/* receive TEST LINK response over RoCE fabric */
rc = wait_for_completion_interruptible_timeout(&link->llc_testlink_resp,
SMC_LLC_WAIT_TIME);
- if (link->state != SMC_LNK_ACTIVE)
+ if (!smc_link_active(link))
return; /* link state changed */
if (rc <= 0) {
smcr_link_down_cond_sched(link);
diff --git a/net/smc/smc_llc.h b/net/smc/smc_llc.h
index a5d2fe3eea61..cc00a2ec4e92 100644
--- a/net/smc/smc_llc.h
+++ b/net/smc/smc_llc.h
@@ -103,7 +103,7 @@ void smc_llc_send_link_delete_all(struct smc_link_group *lgr, bool ord,
u32 rsn);
int smc_llc_cli_add_link(struct smc_link *link, struct smc_llc_qentry *qentry);
int smc_llc_srv_add_link(struct smc_link *link);
-void smc_llc_srv_add_link_local(struct smc_link *link);
+void smc_llc_add_link_local(struct smc_link *link);
int smc_llc_init(void) __init;
#endif /* SMC_LLC_H */
diff --git a/net/sunrpc/xprtrdma/rpc_rdma.c b/net/sunrpc/xprtrdma/rpc_rdma.c
index 935bbef2f7be..453bacc99907 100644
--- a/net/sunrpc/xprtrdma/rpc_rdma.c
+++ b/net/sunrpc/xprtrdma/rpc_rdma.c
@@ -71,7 +71,7 @@ static unsigned int rpcrdma_max_call_header_size(unsigned int maxsegs)
size = RPCRDMA_HDRLEN_MIN;
/* Maximum Read list size */
- size = maxsegs * rpcrdma_readchunk_maxsz * sizeof(__be32);
+ size += maxsegs * rpcrdma_readchunk_maxsz * sizeof(__be32);
/* Minimal Read chunk size */
size += sizeof(__be32); /* segment count */
@@ -94,7 +94,7 @@ static unsigned int rpcrdma_max_reply_header_size(unsigned int maxsegs)
size = RPCRDMA_HDRLEN_MIN;
/* Maximum Write list size */
- size = sizeof(__be32); /* segment count */
+ size += sizeof(__be32); /* segment count */
size += maxsegs * rpcrdma_segment_maxsz * sizeof(__be32);
size += sizeof(__be32); /* list discriminator */
diff --git a/net/sunrpc/xprtrdma/transport.c b/net/sunrpc/xprtrdma/transport.c
index 14165b673b20..053c8ab1265a 100644
--- a/net/sunrpc/xprtrdma/transport.c
+++ b/net/sunrpc/xprtrdma/transport.c
@@ -249,6 +249,11 @@ xprt_rdma_connect_worker(struct work_struct *work)
xprt->stat.connect_start;
xprt_set_connected(xprt);
rc = -EAGAIN;
+ } else {
+ /* Force a call to xprt_rdma_close to clean up */
+ spin_lock(&xprt->transport_lock);
+ set_bit(XPRT_CLOSE_WAIT, &xprt->state);
+ spin_unlock(&xprt->transport_lock);
}
xprt_wake_pending_tasks(xprt, rc);
}
diff --git a/net/sunrpc/xprtrdma/verbs.c b/net/sunrpc/xprtrdma/verbs.c
index 2198c8ec8dff..75c646743df3 100644
--- a/net/sunrpc/xprtrdma/verbs.c
+++ b/net/sunrpc/xprtrdma/verbs.c
@@ -281,17 +281,19 @@ rpcrdma_cm_event_handler(struct rdma_cm_id *id, struct rdma_cm_event *event)
break;
case RDMA_CM_EVENT_CONNECT_ERROR:
ep->re_connect_status = -ENOTCONN;
- goto disconnected;
+ goto wake_connect_worker;
case RDMA_CM_EVENT_UNREACHABLE:
ep->re_connect_status = -ENETUNREACH;
- goto disconnected;
+ goto wake_connect_worker;
case RDMA_CM_EVENT_REJECTED:
dprintk("rpcrdma: connection to %pISpc rejected: %s\n",
sap, rdma_reject_msg(id, event->status));
ep->re_connect_status = -ECONNREFUSED;
if (event->status == IB_CM_REJ_STALE_CONN)
- ep->re_connect_status = -EAGAIN;
- goto disconnected;
+ ep->re_connect_status = -ENOTCONN;
+wake_connect_worker:
+ wake_up_all(&ep->re_connect_wait);
+ return 0;
case RDMA_CM_EVENT_DISCONNECTED:
ep->re_connect_status = -ECONNABORTED;
disconnected:
@@ -400,14 +402,14 @@ static int rpcrdma_ep_create(struct rpcrdma_xprt *r_xprt)
ep = kzalloc(sizeof(*ep), GFP_NOFS);
if (!ep)
- return -EAGAIN;
+ return -ENOTCONN;
ep->re_xprt = &r_xprt->rx_xprt;
kref_init(&ep->re_kref);
id = rpcrdma_create_id(r_xprt, ep);
if (IS_ERR(id)) {
- rc = PTR_ERR(id);
- goto out_free;
+ kfree(ep);
+ return PTR_ERR(id);
}
__module_get(THIS_MODULE);
device = id->device;
@@ -506,9 +508,6 @@ static int rpcrdma_ep_create(struct rpcrdma_xprt *r_xprt)
out_destroy:
rpcrdma_ep_put(ep);
rdma_destroy_id(id);
-out_free:
- kfree(ep);
- r_xprt->rx_ep = NULL;
return rc;
}
@@ -524,8 +523,6 @@ int rpcrdma_xprt_connect(struct rpcrdma_xprt *r_xprt)
struct rpcrdma_ep *ep;
int rc;
-retry:
- rpcrdma_xprt_disconnect(r_xprt);
rc = rpcrdma_ep_create(r_xprt);
if (rc)
return rc;
@@ -540,10 +537,6 @@ retry:
rpcrdma_ep_get(ep);
rpcrdma_post_recvs(r_xprt, true);
- rc = rpcrdma_sendctxs_create(r_xprt);
- if (rc)
- goto out;
-
rc = rdma_connect(ep->re_id, &ep->re_remote_cma);
if (rc)
goto out;
@@ -553,15 +546,19 @@ retry:
wait_event_interruptible(ep->re_connect_wait,
ep->re_connect_status != 0);
if (ep->re_connect_status <= 0) {
- if (ep->re_connect_status == -EAGAIN)
- goto retry;
rc = ep->re_connect_status;
goto out;
}
+ rc = rpcrdma_sendctxs_create(r_xprt);
+ if (rc) {
+ rc = -ENOTCONN;
+ goto out;
+ }
+
rc = rpcrdma_reqs_setup(r_xprt);
if (rc) {
- rpcrdma_xprt_disconnect(r_xprt);
+ rc = -ENOTCONN;
goto out;
}
rpcrdma_mrs_create(r_xprt);
diff --git a/net/tipc/link.c b/net/tipc/link.c
index d46d4ee5c4fd..107578122973 100644
--- a/net/tipc/link.c
+++ b/net/tipc/link.c
@@ -827,11 +827,11 @@ int tipc_link_timeout(struct tipc_link *l, struct sk_buff_head *xmitq)
state |= l->bc_rcvlink->rcv_unacked;
state |= l->rcv_unacked;
state |= !skb_queue_empty(&l->transmq);
- state |= !skb_queue_empty(&l->deferdq);
probe = mstate->probing;
probe |= l->silent_intv_cnt;
if (probe || mstate->monitoring)
l->silent_intv_cnt++;
+ probe |= !skb_queue_empty(&l->deferdq);
if (l->snd_nxt == l->checkpoint) {
tipc_link_update_cwin(l, 0, 0);
probe = true;
diff --git a/net/vmw_vsock/virtio_transport.c b/net/vmw_vsock/virtio_transport.c
index dfbaf6bd8b1c..2700a63ab095 100644
--- a/net/vmw_vsock/virtio_transport.c
+++ b/net/vmw_vsock/virtio_transport.c
@@ -22,7 +22,7 @@
#include <net/af_vsock.h>
static struct workqueue_struct *virtio_vsock_workqueue;
-static struct virtio_vsock *the_virtio_vsock;
+static struct virtio_vsock __rcu *the_virtio_vsock;
static DEFINE_MUTEX(the_virtio_vsock_mutex); /* protects the_virtio_vsock */
struct virtio_vsock {