summaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2019-08-19 11:54:03 -0700
committerDavid S. Miller <davem@davemloft.net>2019-08-19 11:54:03 -0700
commit446bf64b613c4433dac4b15f4eaf326beaad3c8e (patch)
treeb20b9457234f6f9a5d7464213ef6d10f95f3eed3 /net
parent20e79a0a2cfd15b6cfb18119f2e108396be56716 (diff)
parent06821504fd47a5e5b641aeeb638a0ae10a216ef8 (diff)
downloadlinux-446bf64b613c4433dac4b15f4eaf326beaad3c8e.tar.bz2
Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net
Merge conflict of mlx5 resolved using instructions in merge commit 9566e650bf7fdf58384bb06df634f7531ca3a97e. Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net')
-rw-r--r--net/batman-adv/multicast.c8
-rw-r--r--net/bluetooth/hci_core.c1
-rw-r--r--net/bluetooth/hci_debugfs.c31
-rw-r--r--net/bluetooth/hidp/core.c9
-rw-r--r--net/bluetooth/l2cap_core.c2
-rw-r--r--net/core/sock.c19
-rw-r--r--net/core/sock_diag.c3
-rw-r--r--net/dsa/switch.c3
-rw-r--r--net/ieee802154/6lowpan/reassembly.c2
-rw-r--r--net/ipv4/inet_fragment.c39
-rw-r--r--net/ipv4/ip_fragment.c8
-rw-r--r--net/ipv4/tcp.c3
-rw-r--r--net/ipv4/tcp_bpf.c6
-rw-r--r--net/ipv4/tcp_output.c3
-rw-r--r--net/ipv6/netfilter/nf_conntrack_reasm.c2
-rw-r--r--net/ipv6/reassembly.c2
-rw-r--r--net/netfilter/nf_conntrack_core.c16
-rw-r--r--net/netfilter/nf_flow_table_core.c43
-rw-r--r--net/netfilter/nf_flow_table_ip.c43
-rw-r--r--net/netfilter/nf_tables_api.c19
-rw-r--r--net/netfilter/nf_tables_offload.c17
-rw-r--r--net/netfilter/nft_flow_offload.c9
-rw-r--r--net/packet/af_packet.c7
-rw-r--r--net/rxrpc/af_rxrpc.c6
-rw-r--r--net/rxrpc/ar-internal.h8
-rw-r--r--net/rxrpc/call_event.c15
-rw-r--r--net/rxrpc/input.c59
-rw-r--r--net/rxrpc/local_object.c103
-rw-r--r--net/rxrpc/output.c3
-rw-r--r--net/rxrpc/recvmsg.c6
-rw-r--r--net/sched/act_skbedit.c12
-rw-r--r--net/sched/sch_taprio.c3
-rw-r--r--net/sctp/sm_sideeffect.c2
-rw-r--r--net/sctp/stream.c1
-rw-r--r--net/tipc/addr.c1
-rw-r--r--net/tipc/link.c92
-rw-r--r--net/tipc/msg.h8
-rw-r--r--net/tls/tls_device.c9
-rw-r--r--net/tls/tls_main.c2
39 files changed, 429 insertions, 196 deletions
diff --git a/net/batman-adv/multicast.c b/net/batman-adv/multicast.c
index 67d7f83009ae..1d5bdf3a4b65 100644
--- a/net/batman-adv/multicast.c
+++ b/net/batman-adv/multicast.c
@@ -2303,7 +2303,7 @@ __batadv_mcast_flags_dump(struct sk_buff *msg, u32 portid,
while (bucket_tmp < hash->size) {
if (batadv_mcast_flags_dump_bucket(msg, portid, cb, hash,
- *bucket, &idx_tmp))
+ bucket_tmp, &idx_tmp))
break;
bucket_tmp++;
@@ -2420,8 +2420,10 @@ void batadv_mcast_purge_orig(struct batadv_orig_node *orig)
batadv_mcast_want_unsnoop_update(bat_priv, orig, BATADV_NO_FLAGS);
batadv_mcast_want_ipv4_update(bat_priv, orig, BATADV_NO_FLAGS);
batadv_mcast_want_ipv6_update(bat_priv, orig, BATADV_NO_FLAGS);
- batadv_mcast_want_rtr4_update(bat_priv, orig, BATADV_NO_FLAGS);
- batadv_mcast_want_rtr6_update(bat_priv, orig, BATADV_NO_FLAGS);
+ batadv_mcast_want_rtr4_update(bat_priv, orig,
+ BATADV_MCAST_WANT_NO_RTR4);
+ batadv_mcast_want_rtr6_update(bat_priv, orig,
+ BATADV_MCAST_WANT_NO_RTR6);
spin_unlock_bh(&orig->mcast_handler_lock);
}
diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
index b9585e7d9d2e..04bc79359a17 100644
--- a/net/bluetooth/hci_core.c
+++ b/net/bluetooth/hci_core.c
@@ -3202,6 +3202,7 @@ struct hci_dev *hci_alloc_dev(void)
hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
hdev->auth_payload_timeout = DEFAULT_AUTH_PAYLOAD_TIMEOUT;
+ hdev->min_enc_key_size = HCI_MIN_ENC_KEY_SIZE;
mutex_init(&hdev->lock);
mutex_init(&hdev->req_lock);
diff --git a/net/bluetooth/hci_debugfs.c b/net/bluetooth/hci_debugfs.c
index bb67f4a5479a..402e2cc54044 100644
--- a/net/bluetooth/hci_debugfs.c
+++ b/net/bluetooth/hci_debugfs.c
@@ -433,6 +433,35 @@ static int auto_accept_delay_set(void *data, u64 val)
return 0;
}
+static int min_encrypt_key_size_set(void *data, u64 val)
+{
+ struct hci_dev *hdev = data;
+
+ if (val < 1 || val > 16)
+ return -EINVAL;
+
+ hci_dev_lock(hdev);
+ hdev->min_enc_key_size = val;
+ hci_dev_unlock(hdev);
+
+ return 0;
+}
+
+static int min_encrypt_key_size_get(void *data, u64 *val)
+{
+ struct hci_dev *hdev = data;
+
+ hci_dev_lock(hdev);
+ *val = hdev->min_enc_key_size;
+ hci_dev_unlock(hdev);
+
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(min_encrypt_key_size_fops,
+ min_encrypt_key_size_get,
+ min_encrypt_key_size_set, "%llu\n");
+
static int auto_accept_delay_get(void *data, u64 *val)
{
struct hci_dev *hdev = data;
@@ -545,6 +574,8 @@ void hci_debugfs_create_bredr(struct hci_dev *hdev)
if (lmp_ssp_capable(hdev)) {
debugfs_create_file("ssp_debug_mode", 0444, hdev->debugfs,
hdev, &ssp_debug_mode_fops);
+ debugfs_create_file("min_encrypt_key_size", 0644, hdev->debugfs,
+ hdev, &min_encrypt_key_size_fops);
debugfs_create_file("auto_accept_delay", 0644, hdev->debugfs,
hdev, &auto_accept_delay_fops);
}
diff --git a/net/bluetooth/hidp/core.c b/net/bluetooth/hidp/core.c
index 5abd423b55fa..8d889969ae7e 100644
--- a/net/bluetooth/hidp/core.c
+++ b/net/bluetooth/hidp/core.c
@@ -101,6 +101,7 @@ static int hidp_send_message(struct hidp_session *session, struct socket *sock,
{
struct sk_buff *skb;
struct sock *sk = sock->sk;
+ int ret;
BT_DBG("session %p data %p size %d", session, data, size);
@@ -114,13 +115,17 @@ static int hidp_send_message(struct hidp_session *session, struct socket *sock,
}
skb_put_u8(skb, hdr);
- if (data && size > 0)
+ if (data && size > 0) {
skb_put_data(skb, data, size);
+ ret = size;
+ } else {
+ ret = 0;
+ }
skb_queue_tail(transmit, skb);
wake_up_interruptible(sk_sleep(sk));
- return 0;
+ return ret;
}
static int hidp_send_ctrl_message(struct hidp_session *session,
diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
index cc506fe99b4d..dfc1edb168b7 100644
--- a/net/bluetooth/l2cap_core.c
+++ b/net/bluetooth/l2cap_core.c
@@ -1361,7 +1361,7 @@ static bool l2cap_check_enc_key_size(struct hci_conn *hcon)
* actually encrypted before enforcing a key size.
*/
return (!test_bit(HCI_CONN_ENCRYPT, &hcon->flags) ||
- hcon->enc_key_size >= HCI_MIN_ENC_KEY_SIZE);
+ hcon->enc_key_size >= hcon->hdev->min_enc_key_size);
}
static void l2cap_do_start(struct l2cap_chan *chan)
diff --git a/net/core/sock.c b/net/core/sock.c
index d57b0cc995a0..6d08553f885c 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -1992,6 +1992,19 @@ void skb_set_owner_w(struct sk_buff *skb, struct sock *sk)
}
EXPORT_SYMBOL(skb_set_owner_w);
+static bool can_skb_orphan_partial(const struct sk_buff *skb)
+{
+#ifdef CONFIG_TLS_DEVICE
+ /* Drivers depend on in-order delivery for crypto offload,
+ * partial orphan breaks out-of-order-OK logic.
+ */
+ if (skb->decrypted)
+ return false;
+#endif
+ return (skb->destructor == sock_wfree ||
+ (IS_ENABLED(CONFIG_INET) && skb->destructor == tcp_wfree));
+}
+
/* This helper is used by netem, as it can hold packets in its
* delay queue. We want to allow the owner socket to send more
* packets, as if they were already TX completed by a typical driver.
@@ -2003,11 +2016,7 @@ void skb_orphan_partial(struct sk_buff *skb)
if (skb_is_tcp_pure_ack(skb))
return;
- if (skb->destructor == sock_wfree
-#ifdef CONFIG_INET
- || skb->destructor == tcp_wfree
-#endif
- ) {
+ if (can_skb_orphan_partial(skb)) {
struct sock *sk = skb->sk;
if (refcount_inc_not_zero(&sk->sk_refcnt)) {
diff --git a/net/core/sock_diag.c b/net/core/sock_diag.c
index 3312a5849a97..c13ffbd33d8d 100644
--- a/net/core/sock_diag.c
+++ b/net/core/sock_diag.c
@@ -19,6 +19,7 @@ static const struct sock_diag_handler *sock_diag_handlers[AF_MAX];
static int (*inet_rcv_compat)(struct sk_buff *skb, struct nlmsghdr *nlh);
static DEFINE_MUTEX(sock_diag_table_mutex);
static struct workqueue_struct *broadcast_wq;
+static atomic64_t cookie_gen;
u64 sock_gen_cookie(struct sock *sk)
{
@@ -27,7 +28,7 @@ u64 sock_gen_cookie(struct sock *sk)
if (res)
return res;
- res = atomic64_inc_return(&sock_net(sk)->cookie_gen);
+ res = atomic64_inc_return(&cookie_gen);
atomic64_cmpxchg(&sk->sk_cookie, 0, res);
}
}
diff --git a/net/dsa/switch.c b/net/dsa/switch.c
index 4ec5b7f85d51..09d9286b27cc 100644
--- a/net/dsa/switch.c
+++ b/net/dsa/switch.c
@@ -153,6 +153,9 @@ static void dsa_switch_mdb_add_bitmap(struct dsa_switch *ds,
{
int port;
+ if (!ds->ops->port_mdb_add)
+ return;
+
for_each_set_bit(port, bitmap, ds->num_ports)
ds->ops->port_mdb_add(ds, port, mdb);
}
diff --git a/net/ieee802154/6lowpan/reassembly.c b/net/ieee802154/6lowpan/reassembly.c
index e4aba5d485be..bbe9b3b2d395 100644
--- a/net/ieee802154/6lowpan/reassembly.c
+++ b/net/ieee802154/6lowpan/reassembly.c
@@ -170,7 +170,7 @@ static int lowpan_frag_reasm(struct lowpan_frag_queue *fq, struct sk_buff *skb,
reasm_data = inet_frag_reasm_prepare(&fq->q, skb, prev_tail);
if (!reasm_data)
goto out_oom;
- inet_frag_reasm_finish(&fq->q, skb, reasm_data);
+ inet_frag_reasm_finish(&fq->q, skb, reasm_data, false);
skb->dev = ldev;
skb->tstamp = fq->q.stamp;
diff --git a/net/ipv4/inet_fragment.c b/net/ipv4/inet_fragment.c
index a999451345f9..10d31733297d 100644
--- a/net/ipv4/inet_fragment.c
+++ b/net/ipv4/inet_fragment.c
@@ -475,11 +475,12 @@ void *inet_frag_reasm_prepare(struct inet_frag_queue *q, struct sk_buff *skb,
EXPORT_SYMBOL(inet_frag_reasm_prepare);
void inet_frag_reasm_finish(struct inet_frag_queue *q, struct sk_buff *head,
- void *reasm_data)
+ void *reasm_data, bool try_coalesce)
{
struct sk_buff **nextp = (struct sk_buff **)reasm_data;
struct rb_node *rbn;
struct sk_buff *fp;
+ int sum_truesize;
skb_push(head, head->data - skb_network_header(head));
@@ -487,25 +488,41 @@ void inet_frag_reasm_finish(struct inet_frag_queue *q, struct sk_buff *head,
fp = FRAG_CB(head)->next_frag;
rbn = rb_next(&head->rbnode);
rb_erase(&head->rbnode, &q->rb_fragments);
+
+ sum_truesize = head->truesize;
while (rbn || fp) {
/* fp points to the next sk_buff in the current run;
* rbn points to the next run.
*/
/* Go through the current run. */
while (fp) {
- *nextp = fp;
- nextp = &fp->next;
- fp->prev = NULL;
- memset(&fp->rbnode, 0, sizeof(fp->rbnode));
- fp->sk = NULL;
- head->data_len += fp->len;
- head->len += fp->len;
+ struct sk_buff *next_frag = FRAG_CB(fp)->next_frag;
+ bool stolen;
+ int delta;
+
+ sum_truesize += fp->truesize;
if (head->ip_summed != fp->ip_summed)
head->ip_summed = CHECKSUM_NONE;
else if (head->ip_summed == CHECKSUM_COMPLETE)
head->csum = csum_add(head->csum, fp->csum);
- head->truesize += fp->truesize;
- fp = FRAG_CB(fp)->next_frag;
+
+ if (try_coalesce && skb_try_coalesce(head, fp, &stolen,
+ &delta)) {
+ kfree_skb_partial(fp, stolen);
+ } else {
+ fp->prev = NULL;
+ memset(&fp->rbnode, 0, sizeof(fp->rbnode));
+ fp->sk = NULL;
+
+ head->data_len += fp->len;
+ head->len += fp->len;
+ head->truesize += fp->truesize;
+
+ *nextp = fp;
+ nextp = &fp->next;
+ }
+
+ fp = next_frag;
}
/* Move to the next run. */
if (rbn) {
@@ -516,7 +533,7 @@ void inet_frag_reasm_finish(struct inet_frag_queue *q, struct sk_buff *head,
rbn = rbnext;
}
}
- sub_frag_mem_limit(q->fqdir, head->truesize);
+ sub_frag_mem_limit(q->fqdir, sum_truesize);
*nextp = NULL;
skb_mark_not_on_list(head);
diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
index 4385eb9e781f..cfeb8890f94e 100644
--- a/net/ipv4/ip_fragment.c
+++ b/net/ipv4/ip_fragment.c
@@ -393,6 +393,11 @@ err:
return err;
}
+static bool ip_frag_coalesce_ok(const struct ipq *qp)
+{
+ return qp->q.key.v4.user == IP_DEFRAG_LOCAL_DELIVER;
+}
+
/* Build a new IP datagram from all its fragments. */
static int ip_frag_reasm(struct ipq *qp, struct sk_buff *skb,
struct sk_buff *prev_tail, struct net_device *dev)
@@ -421,7 +426,8 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *skb,
if (len > 65535)
goto out_oversize;
- inet_frag_reasm_finish(&qp->q, skb, reasm_data);
+ inet_frag_reasm_finish(&qp->q, skb, reasm_data,
+ ip_frag_coalesce_ok(qp));
skb->dev = dev;
IPCB(skb)->frag_max_size = max(qp->max_df_size, qp->q.max_size);
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index f8fa1686f7f3..051ef10374f6 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -984,6 +984,9 @@ new_segment:
if (!skb)
goto wait_for_memory;
+#ifdef CONFIG_TLS_DEVICE
+ skb->decrypted = !!(flags & MSG_SENDPAGE_DECRYPTED);
+#endif
skb_entail(sk, skb);
copy = size_goal;
}
diff --git a/net/ipv4/tcp_bpf.c b/net/ipv4/tcp_bpf.c
index 3d1e15401384..8a56e09cfb0e 100644
--- a/net/ipv4/tcp_bpf.c
+++ b/net/ipv4/tcp_bpf.c
@@ -398,10 +398,14 @@ more_data:
static int tcp_bpf_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
{
struct sk_msg tmp, *msg_tx = NULL;
- int flags = msg->msg_flags | MSG_NO_SHARED_FRAGS;
int copied = 0, err = 0;
struct sk_psock *psock;
long timeo;
+ int flags;
+
+ /* Don't let internal do_tcp_sendpages() flags through */
+ flags = (msg->msg_flags & ~MSG_SENDPAGE_DECRYPTED);
+ flags |= MSG_NO_SHARED_FRAGS;
psock = sk_psock_get(sk);
if (unlikely(!psock))
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index e6d02e05bb1c..5c46bc4c7e8d 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -1320,6 +1320,7 @@ int tcp_fragment(struct sock *sk, enum tcp_queue tcp_queue,
buff = sk_stream_alloc_skb(sk, nsize, gfp, true);
if (!buff)
return -ENOMEM; /* We'll just try again later. */
+ skb_copy_decrypted(buff, skb);
sk->sk_wmem_queued += buff->truesize;
sk_mem_charge(sk, buff->truesize);
@@ -1874,6 +1875,7 @@ static int tso_fragment(struct sock *sk, struct sk_buff *skb, unsigned int len,
buff = sk_stream_alloc_skb(sk, 0, gfp, true);
if (unlikely(!buff))
return -ENOMEM;
+ skb_copy_decrypted(buff, skb);
sk->sk_wmem_queued += buff->truesize;
sk_mem_charge(sk, buff->truesize);
@@ -2143,6 +2145,7 @@ static int tcp_mtu_probe(struct sock *sk)
sk_mem_charge(sk, nskb->truesize);
skb = tcp_send_head(sk);
+ skb_copy_decrypted(nskb, skb);
TCP_SKB_CB(nskb)->seq = TCP_SKB_CB(skb)->seq;
TCP_SKB_CB(nskb)->end_seq = TCP_SKB_CB(skb)->seq + probe_size;
diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c
index 0f82c150543b..fed9666a2f7d 100644
--- a/net/ipv6/netfilter/nf_conntrack_reasm.c
+++ b/net/ipv6/netfilter/nf_conntrack_reasm.c
@@ -348,7 +348,7 @@ static int nf_ct_frag6_reasm(struct frag_queue *fq, struct sk_buff *skb,
skb_reset_transport_header(skb);
- inet_frag_reasm_finish(&fq->q, skb, reasm_data);
+ inet_frag_reasm_finish(&fq->q, skb, reasm_data, false);
skb->ignore_df = 1;
skb->dev = dev;
diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c
index ca05b16f1bb9..1f5d4d196dcc 100644
--- a/net/ipv6/reassembly.c
+++ b/net/ipv6/reassembly.c
@@ -282,7 +282,7 @@ static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *skb,
skb_reset_transport_header(skb);
- inet_frag_reasm_finish(&fq->q, skb, reasm_data);
+ inet_frag_reasm_finish(&fq->q, skb, reasm_data, true);
skb->dev = dev;
ipv6_hdr(skb)->payload_len = htons(payload_len);
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index a542761e90d1..81a8ef42b88d 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -453,13 +453,12 @@ EXPORT_SYMBOL_GPL(nf_ct_invert_tuple);
* table location, we assume id gets exposed to userspace.
*
* Following nf_conn items do not change throughout lifetime
- * of the nf_conn after it has been committed to main hash table:
+ * of the nf_conn:
*
* 1. nf_conn address
- * 2. nf_conn->ext address
- * 3. nf_conn->master address (normally NULL)
- * 4. tuple
- * 5. the associated net namespace
+ * 2. nf_conn->master address (normally NULL)
+ * 3. the associated net namespace
+ * 4. the original direction tuple
*/
u32 nf_ct_get_id(const struct nf_conn *ct)
{
@@ -469,9 +468,10 @@ u32 nf_ct_get_id(const struct nf_conn *ct)
net_get_random_once(&ct_id_seed, sizeof(ct_id_seed));
a = (unsigned long)ct;
- b = (unsigned long)ct->master ^ net_hash_mix(nf_ct_net(ct));
- c = (unsigned long)ct->ext;
- d = (unsigned long)siphash(&ct->tuplehash, sizeof(ct->tuplehash),
+ b = (unsigned long)ct->master;
+ c = (unsigned long)nf_ct_net(ct);
+ d = (unsigned long)siphash(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
+ sizeof(ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple),
&ct_id_seed);
#ifdef CONFIG_64BIT
return siphash_4u64((u64)a, (u64)b, (u64)c, (u64)d, &ct_id_seed);
diff --git a/net/netfilter/nf_flow_table_core.c b/net/netfilter/nf_flow_table_core.c
index e3d797252a98..80a8f9ae4c93 100644
--- a/net/netfilter/nf_flow_table_core.c
+++ b/net/netfilter/nf_flow_table_core.c
@@ -111,15 +111,16 @@ static void flow_offload_fixup_tcp(struct ip_ct_tcp *tcp)
#define NF_FLOWTABLE_TCP_PICKUP_TIMEOUT (120 * HZ)
#define NF_FLOWTABLE_UDP_PICKUP_TIMEOUT (30 * HZ)
-static void flow_offload_fixup_ct_state(struct nf_conn *ct)
+static inline __s32 nf_flow_timeout_delta(unsigned int timeout)
+{
+ return (__s32)(timeout - (u32)jiffies);
+}
+
+static void flow_offload_fixup_ct_timeout(struct nf_conn *ct)
{
const struct nf_conntrack_l4proto *l4proto;
+ int l4num = nf_ct_protonum(ct);
unsigned int timeout;
- int l4num;
-
- l4num = nf_ct_protonum(ct);
- if (l4num == IPPROTO_TCP)
- flow_offload_fixup_tcp(&ct->proto.tcp);
l4proto = nf_ct_l4proto_find(l4num);
if (!l4proto)
@@ -132,7 +133,20 @@ static void flow_offload_fixup_ct_state(struct nf_conn *ct)
else
return;
- ct->timeout = nfct_time_stamp + timeout;
+ if (nf_flow_timeout_delta(ct->timeout) > (__s32)timeout)
+ ct->timeout = nfct_time_stamp + timeout;
+}
+
+static void flow_offload_fixup_ct_state(struct nf_conn *ct)
+{
+ if (nf_ct_protonum(ct) == IPPROTO_TCP)
+ flow_offload_fixup_tcp(&ct->proto.tcp);
+}
+
+static void flow_offload_fixup_ct(struct nf_conn *ct)
+{
+ flow_offload_fixup_ct_state(ct);
+ flow_offload_fixup_ct_timeout(ct);
}
void flow_offload_free(struct flow_offload *flow)
@@ -208,6 +222,11 @@ int flow_offload_add(struct nf_flowtable *flow_table, struct flow_offload *flow)
}
EXPORT_SYMBOL_GPL(flow_offload_add);
+static inline bool nf_flow_has_expired(const struct flow_offload *flow)
+{
+ return nf_flow_timeout_delta(flow->timeout) <= 0;
+}
+
static void flow_offload_del(struct nf_flowtable *flow_table,
struct flow_offload *flow)
{
@@ -223,6 +242,11 @@ static void flow_offload_del(struct nf_flowtable *flow_table,
e = container_of(flow, struct flow_offload_entry, flow);
clear_bit(IPS_OFFLOAD_BIT, &e->ct->status);
+ if (nf_flow_has_expired(flow))
+ flow_offload_fixup_ct(e->ct);
+ else if (flow->flags & FLOW_OFFLOAD_TEARDOWN)
+ flow_offload_fixup_ct_timeout(e->ct);
+
flow_offload_free(flow);
}
@@ -298,11 +322,6 @@ nf_flow_table_iterate(struct nf_flowtable *flow_table,
return err;
}
-static inline bool nf_flow_has_expired(const struct flow_offload *flow)
-{
- return (__s32)(flow->timeout - (u32)jiffies) <= 0;
-}
-
static void nf_flow_offload_gc_step(struct flow_offload *flow, void *data)
{
struct nf_flowtable *flow_table = data;
diff --git a/net/netfilter/nf_flow_table_ip.c b/net/netfilter/nf_flow_table_ip.c
index cdfc33517e85..d68c801dd614 100644
--- a/net/netfilter/nf_flow_table_ip.c
+++ b/net/netfilter/nf_flow_table_ip.c
@@ -214,6 +214,25 @@ static bool nf_flow_exceeds_mtu(const struct sk_buff *skb, unsigned int mtu)
return true;
}
+static int nf_flow_offload_dst_check(struct dst_entry *dst)
+{
+ if (unlikely(dst_xfrm(dst)))
+ return dst_check(dst, 0) ? 0 : -1;
+
+ return 0;
+}
+
+static unsigned int nf_flow_xmit_xfrm(struct sk_buff *skb,
+ const struct nf_hook_state *state,
+ struct dst_entry *dst)
+{
+ skb_orphan(skb);
+ skb_dst_set_noref(skb, dst);
+ skb->tstamp = 0;
+ dst_output(state->net, state->sk, skb);
+ return NF_STOLEN;
+}
+
unsigned int
nf_flow_offload_ip_hook(void *priv, struct sk_buff *skb,
const struct nf_hook_state *state)
@@ -254,6 +273,11 @@ nf_flow_offload_ip_hook(void *priv, struct sk_buff *skb,
if (nf_flow_state_check(flow, ip_hdr(skb)->protocol, skb, thoff))
return NF_ACCEPT;
+ if (nf_flow_offload_dst_check(&rt->dst)) {
+ flow_offload_teardown(flow);
+ return NF_ACCEPT;
+ }
+
if (nf_flow_nat_ip(flow, skb, thoff, dir) < 0)
return NF_DROP;
@@ -261,6 +285,13 @@ nf_flow_offload_ip_hook(void *priv, struct sk_buff *skb,
iph = ip_hdr(skb);
ip_decrease_ttl(iph);
+ if (unlikely(dst_xfrm(&rt->dst))) {
+ memset(skb->cb, 0, sizeof(struct inet_skb_parm));
+ IPCB(skb)->iif = skb->dev->ifindex;
+ IPCB(skb)->flags = IPSKB_FORWARDED;
+ return nf_flow_xmit_xfrm(skb, state, &rt->dst);
+ }
+
skb->dev = outdev;
nexthop = rt_nexthop(rt, flow->tuplehash[!dir].tuple.src_v4.s_addr);
skb_dst_set_noref(skb, &rt->dst);
@@ -467,6 +498,11 @@ nf_flow_offload_ipv6_hook(void *priv, struct sk_buff *skb,
sizeof(*ip6h)))
return NF_ACCEPT;
+ if (nf_flow_offload_dst_check(&rt->dst)) {
+ flow_offload_teardown(flow);
+ return NF_ACCEPT;
+ }
+
if (skb_try_make_writable(skb, sizeof(*ip6h)))
return NF_DROP;
@@ -477,6 +513,13 @@ nf_flow_offload_ipv6_hook(void *priv, struct sk_buff *skb,
ip6h = ipv6_hdr(skb);
ip6h->hop_limit--;
+ if (unlikely(dst_xfrm(&rt->dst))) {
+ memset(skb->cb, 0, sizeof(struct inet6_skb_parm));
+ IP6CB(skb)->iif = skb->dev->ifindex;
+ IP6CB(skb)->flags = IP6SKB_FORWARDED;
+ return nf_flow_xmit_xfrm(skb, state, &rt->dst);
+ }
+
skb->dev = outdev;
nexthop = rt6_nexthop(rt, &flow->tuplehash[!dir].tuple.src_v6);
skb_dst_set_noref(skb, &rt->dst);
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
index fe3b7b0c6c66..6d00bef023c4 100644
--- a/net/netfilter/nf_tables_api.c
+++ b/net/netfilter/nf_tables_api.c
@@ -138,9 +138,14 @@ static void nft_set_trans_bind(const struct nft_ctx *ctx, struct nft_set *set)
return;
list_for_each_entry_reverse(trans, &net->nft.commit_list, list) {
- if (trans->msg_type == NFT_MSG_NEWSET &&
- nft_trans_set(trans) == set) {
- set->bound = true;
+ switch (trans->msg_type) {
+ case NFT_MSG_NEWSET:
+ if (nft_trans_set(trans) == set)
+ nft_trans_set_bound(trans) = true;
+ break;
+ case NFT_MSG_NEWSETELEM:
+ if (nft_trans_elem_set(trans) == set)
+ nft_trans_elem_set_bound(trans) = true;
break;
}
}
@@ -1662,6 +1667,10 @@ static int nf_tables_addchain(struct nft_ctx *ctx, u8 family, u8 genmask,
chain->flags |= NFT_BASE_CHAIN | flags;
basechain->policy = NF_ACCEPT;
+ if (chain->flags & NFT_CHAIN_HW_OFFLOAD &&
+ nft_chain_offload_priority(basechain) < 0)
+ return -EOPNOTSUPP;
+
flow_block_init(&basechain->flow_block);
} else {
chain = kzalloc(sizeof(*chain), GFP_KERNEL);
@@ -6906,7 +6915,7 @@ static int __nf_tables_abort(struct net *net)
break;
case NFT_MSG_NEWSET:
trans->ctx.table->use--;
- if (nft_trans_set(trans)->bound) {
+ if (nft_trans_set_bound(trans)) {
nft_trans_destroy(trans);
break;
}
@@ -6918,7 +6927,7 @@ static int __nf_tables_abort(struct net *net)
nft_trans_destroy(trans);
break;
case NFT_MSG_NEWSETELEM:
- if (nft_trans_elem_set(trans)->bound) {
+ if (nft_trans_elem_set_bound(trans)) {
nft_trans_destroy(trans);
break;
}
diff --git a/net/netfilter/nf_tables_offload.c b/net/netfilter/nf_tables_offload.c
index d3c4c9c88bc8..3c2725ade61b 100644
--- a/net/netfilter/nf_tables_offload.c
+++ b/net/netfilter/nf_tables_offload.c
@@ -103,10 +103,11 @@ void nft_offload_update_dependency(struct nft_offload_ctx *ctx,
}
static void nft_flow_offload_common_init(struct flow_cls_common_offload *common,
- __be16 proto,
- struct netlink_ext_ack *extack)
+ __be16 proto, int priority,
+ struct netlink_ext_ack *extack)
{
common->protocol = proto;
+ common->prio = priority;
common->extack = extack;
}
@@ -124,6 +125,15 @@ static int nft_setup_cb_call(struct nft_base_chain *basechain,
return 0;
}
+int nft_chain_offload_priority(struct nft_base_chain *basechain)
+{
+ if (basechain->ops.priority <= 0 ||
+ basechain->ops.priority > USHRT_MAX)
+ return -1;
+
+ return 0;
+}
+
static int nft_flow_offload_rule(struct nft_trans *trans,
enum flow_cls_command command)
{
@@ -142,7 +152,8 @@ static int nft_flow_offload_rule(struct nft_trans *trans,
if (flow)
proto = flow->proto;
- nft_flow_offload_common_init(&cls_flow.common, proto, &extack);
+ nft_flow_offload_common_init(&cls_flow.common, proto,
+ basechain->ops.priority, &extack);
cls_flow.command = command;
cls_flow.cookie = (unsigned long) rule;
if (flow)
diff --git a/net/netfilter/nft_flow_offload.c b/net/netfilter/nft_flow_offload.c
index aa5f571d4361..060a4ed46d5e 100644
--- a/net/netfilter/nft_flow_offload.c
+++ b/net/netfilter/nft_flow_offload.c
@@ -72,11 +72,11 @@ static void nft_flow_offload_eval(const struct nft_expr *expr,
{
struct nft_flow_offload *priv = nft_expr_priv(expr);
struct nf_flowtable *flowtable = &priv->flowtable->data;
+ struct tcphdr _tcph, *tcph = NULL;
enum ip_conntrack_info ctinfo;
struct nf_flow_route route;
struct flow_offload *flow;
enum ip_conntrack_dir dir;
- bool is_tcp = false;
struct nf_conn *ct;
int ret;
@@ -89,7 +89,10 @@ static void nft_flow_offload_eval(const struct nft_expr *expr,
switch (ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.protonum) {
case IPPROTO_TCP:
- is_tcp = true;
+ tcph = skb_header_pointer(pkt->skb, pkt->xt.thoff,
+ sizeof(_tcph), &_tcph);
+ if (unlikely(!tcph || tcph->fin || tcph->rst))
+ goto out;
break;
case IPPROTO_UDP:
break;
@@ -115,7 +118,7 @@ static void nft_flow_offload_eval(const struct nft_expr *expr,
if (!flow)
goto err_flow_alloc;
- if (is_tcp) {
+ if (tcph) {
ct->proto.tcp.seen[0].flags |= IP_CT_TCP_FLAG_BE_LIBERAL;
ct->proto.tcp.seen[1].flags |= IP_CT_TCP_FLAG_BE_LIBERAL;
}
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index 8d54f3047768..e2742b006d25 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -2618,6 +2618,13 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
mutex_lock(&po->pg_vec_lock);
+ /* packet_sendmsg() check on tx_ring.pg_vec was lockless,
+ * we need to confirm it under protection of pg_vec_lock.
+ */
+ if (unlikely(!po->tx_ring.pg_vec)) {
+ err = -EBUSY;
+ goto out;
+ }
if (likely(saddr == NULL)) {
dev = packet_cached_dev_get(po);
proto = po->num;
diff --git a/net/rxrpc/af_rxrpc.c b/net/rxrpc/af_rxrpc.c
index d09eaf153544..0dbbfd1b6487 100644
--- a/net/rxrpc/af_rxrpc.c
+++ b/net/rxrpc/af_rxrpc.c
@@ -193,7 +193,7 @@ static int rxrpc_bind(struct socket *sock, struct sockaddr *saddr, int len)
service_in_use:
write_unlock(&local->services_lock);
- rxrpc_put_local(local);
+ rxrpc_unuse_local(local);
ret = -EADDRINUSE;
error_unlock:
release_sock(&rx->sk);
@@ -402,7 +402,7 @@ EXPORT_SYMBOL(rxrpc_kernel_check_life);
*/
void rxrpc_kernel_probe_life(struct socket *sock, struct rxrpc_call *call)
{
- rxrpc_propose_ACK(call, RXRPC_ACK_PING, 0, 0, true, false,
+ rxrpc_propose_ACK(call, RXRPC_ACK_PING, 0, true, false,
rxrpc_propose_ack_ping_for_check_life);
rxrpc_send_ack_packet(call, true, NULL);
}
@@ -901,7 +901,7 @@ static int rxrpc_release_sock(struct sock *sk)
rxrpc_queue_work(&rxnet->service_conn_reaper);
rxrpc_queue_work(&rxnet->client_conn_reaper);
- rxrpc_put_local(rx->local);
+ rxrpc_unuse_local(rx->local);
rx->local = NULL;
key_put(rx->key);
rx->key = NULL;
diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h
index 63b26baa108a..fa5b030acaa8 100644
--- a/net/rxrpc/ar-internal.h
+++ b/net/rxrpc/ar-internal.h
@@ -257,7 +257,8 @@ struct rxrpc_security {
*/
struct rxrpc_local {
struct rcu_head rcu;
- atomic_t usage;
+ atomic_t active_users; /* Number of users of the local endpoint */
+ atomic_t usage; /* Number of references to the structure */
struct rxrpc_net *rxnet; /* The network ns in which this resides */
struct list_head link;
struct socket *socket; /* my UDP socket */
@@ -653,7 +654,6 @@ struct rxrpc_call {
/* receive-phase ACK management */
u8 ackr_reason; /* reason to ACK */
- u16 ackr_skew; /* skew on packet being ACK'd */
rxrpc_serial_t ackr_serial; /* serial of packet being ACK'd */
rxrpc_serial_t ackr_first_seq; /* first sequence number received */
rxrpc_seq_t ackr_prev_seq; /* previous sequence number received */
@@ -747,7 +747,7 @@ int rxrpc_reject_call(struct rxrpc_sock *);
/*
* call_event.c
*/
-void rxrpc_propose_ACK(struct rxrpc_call *, u8, u16, u32, bool, bool,
+void rxrpc_propose_ACK(struct rxrpc_call *, u8, u32, bool, bool,
enum rxrpc_propose_ack_trace);
void rxrpc_process_call(struct work_struct *);
@@ -1006,6 +1006,8 @@ struct rxrpc_local *rxrpc_lookup_local(struct net *, const struct sockaddr_rxrpc
struct rxrpc_local *rxrpc_get_local(struct rxrpc_local *);
struct rxrpc_local *rxrpc_get_local_maybe(struct rxrpc_local *);
void rxrpc_put_local(struct rxrpc_local *);
+struct rxrpc_local *rxrpc_use_local(struct rxrpc_local *);
+void rxrpc_unuse_local(struct rxrpc_local *);
void rxrpc_queue_local(struct rxrpc_local *);
void rxrpc_destroy_all_locals(struct rxrpc_net *);
diff --git a/net/rxrpc/call_event.c b/net/rxrpc/call_event.c
index bc2adeb3acb9..c767679bfa5d 100644
--- a/net/rxrpc/call_event.c
+++ b/net/rxrpc/call_event.c
@@ -43,8 +43,7 @@ static void rxrpc_propose_ping(struct rxrpc_call *call,
* propose an ACK be sent
*/
static void __rxrpc_propose_ACK(struct rxrpc_call *call, u8 ack_reason,
- u16 skew, u32 serial, bool immediate,
- bool background,
+ u32 serial, bool immediate, bool background,
enum rxrpc_propose_ack_trace why)
{
enum rxrpc_propose_ack_outcome outcome = rxrpc_propose_ack_use;
@@ -69,14 +68,12 @@ static void __rxrpc_propose_ACK(struct rxrpc_call *call, u8 ack_reason,
if (RXRPC_ACK_UPDATEABLE & (1 << ack_reason)) {
outcome = rxrpc_propose_ack_update;
call->ackr_serial = serial;
- call->ackr_skew = skew;
}
if (!immediate)
goto trace;
} else if (prior > rxrpc_ack_priority[call->ackr_reason]) {
call->ackr_reason = ack_reason;
call->ackr_serial = serial;
- call->ackr_skew = skew;
} else {
outcome = rxrpc_propose_ack_subsume;
}
@@ -137,11 +134,11 @@ trace:
* propose an ACK be sent, locking the call structure
*/
void rxrpc_propose_ACK(struct rxrpc_call *call, u8 ack_reason,
- u16 skew, u32 serial, bool immediate, bool background,
+ u32 serial, bool immediate, bool background,
enum rxrpc_propose_ack_trace why)
{
spin_lock_bh(&call->lock);
- __rxrpc_propose_ACK(call, ack_reason, skew, serial,
+ __rxrpc_propose_ACK(call, ack_reason, serial,
immediate, background, why);
spin_unlock_bh(&call->lock);
}
@@ -239,7 +236,7 @@ static void rxrpc_resend(struct rxrpc_call *call, unsigned long now_j)
ack_ts = ktime_sub(now, call->acks_latest_ts);
if (ktime_to_ns(ack_ts) < call->peer->rtt)
goto out;
- rxrpc_propose_ACK(call, RXRPC_ACK_PING, 0, 0, true, false,
+ rxrpc_propose_ACK(call, RXRPC_ACK_PING, 0, true, false,
rxrpc_propose_ack_ping_for_lost_ack);
rxrpc_send_ack_packet(call, true, NULL);
goto out;
@@ -372,7 +369,7 @@ recheck_state:
if (time_after_eq(now, t)) {
trace_rxrpc_timer(call, rxrpc_timer_exp_keepalive, now);
cmpxchg(&call->keepalive_at, t, now + MAX_JIFFY_OFFSET);
- rxrpc_propose_ACK(call, RXRPC_ACK_PING, 0, 0, true, true,
+ rxrpc_propose_ACK(call, RXRPC_ACK_PING, 0, true, true,
rxrpc_propose_ack_ping_for_keepalive);
set_bit(RXRPC_CALL_EV_PING, &call->events);
}
@@ -407,7 +404,7 @@ recheck_state:
send_ack = NULL;
if (test_and_clear_bit(RXRPC_CALL_EV_ACK_LOST, &call->events)) {
call->acks_lost_top = call->tx_top;
- rxrpc_propose_ACK(call, RXRPC_ACK_PING, 0, 0, true, false,
+ rxrpc_propose_ACK(call, RXRPC_ACK_PING, 0, true, false,
rxrpc_propose_ack_ping_for_lost_ack);
send_ack = &call->acks_lost_ping;
}
diff --git a/net/rxrpc/input.c b/net/rxrpc/input.c
index 5bd6f1546e5c..dd47d465d1d3 100644
--- a/net/rxrpc/input.c
+++ b/net/rxrpc/input.c
@@ -196,15 +196,14 @@ send_extra_data:
* Ping the other end to fill our RTT cache and to retrieve the rwind
* and MTU parameters.
*/
-static void rxrpc_send_ping(struct rxrpc_call *call, struct sk_buff *skb,
- int skew)
+static void rxrpc_send_ping(struct rxrpc_call *call, struct sk_buff *skb)
{
struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
ktime_t now = skb->tstamp;
if (call->peer->rtt_usage < 3 ||
ktime_before(ktime_add_ms(call->peer->rtt_last_req, 1000), now))
- rxrpc_propose_ACK(call, RXRPC_ACK_PING, skew, sp->hdr.serial,
+ rxrpc_propose_ACK(call, RXRPC_ACK_PING, sp->hdr.serial,
true, true,
rxrpc_propose_ack_ping_for_params);
}
@@ -419,8 +418,7 @@ static void rxrpc_input_dup_data(struct rxrpc_call *call, rxrpc_seq_t seq,
/*
* Process a DATA packet, adding the packet to the Rx ring.
*/
-static void rxrpc_input_data(struct rxrpc_call *call, struct sk_buff *skb,
- u16 skew)
+static void rxrpc_input_data(struct rxrpc_call *call, struct sk_buff *skb)
{
struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
enum rxrpc_call_state state;
@@ -600,11 +598,11 @@ skip:
ack:
if (ack)
- rxrpc_propose_ACK(call, ack, skew, ack_serial,
+ rxrpc_propose_ACK(call, ack, ack_serial,
immediate_ack, true,
rxrpc_propose_ack_input_data);
else
- rxrpc_propose_ACK(call, RXRPC_ACK_DELAY, skew, serial,
+ rxrpc_propose_ACK(call, RXRPC_ACK_DELAY, serial,
false, true,
rxrpc_propose_ack_input_data);
@@ -822,8 +820,7 @@ static void rxrpc_input_soft_acks(struct rxrpc_call *call, u8 *acks,
* soft-ACK means that the packet may be discarded and retransmission
* requested. A phase is complete when all packets are hard-ACK'd.
*/
-static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb,
- u16 skew)
+static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb)
{
struct rxrpc_ack_summary summary = { 0 };
struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
@@ -867,11 +864,11 @@ static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb,
if (buf.ack.reason == RXRPC_ACK_PING) {
_proto("Rx ACK %%%u PING Request", sp->hdr.serial);
rxrpc_propose_ACK(call, RXRPC_ACK_PING_RESPONSE,
- skew, sp->hdr.serial, true, true,
+ sp->hdr.serial, true, true,
rxrpc_propose_ack_respond_to_ping);
} else if (sp->hdr.flags & RXRPC_REQUEST_ACK) {
rxrpc_propose_ACK(call, RXRPC_ACK_REQUESTED,
- skew, sp->hdr.serial, true, true,
+ sp->hdr.serial, true, true,
rxrpc_propose_ack_respond_to_ack);
}
@@ -948,7 +945,7 @@ static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb,
RXRPC_TX_ANNO_LAST &&
summary.nr_acks == call->tx_top - hard_ack &&
rxrpc_is_client_call(call))
- rxrpc_propose_ACK(call, RXRPC_ACK_PING, skew, sp->hdr.serial,
+ rxrpc_propose_ACK(call, RXRPC_ACK_PING, sp->hdr.serial,
false, true,
rxrpc_propose_ack_ping_for_lost_reply);
@@ -1004,7 +1001,7 @@ static void rxrpc_input_abort(struct rxrpc_call *call, struct sk_buff *skb)
* Process an incoming call packet.
*/
static void rxrpc_input_call_packet(struct rxrpc_call *call,
- struct sk_buff *skb, u16 skew)
+ struct sk_buff *skb)
{
struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
unsigned long timo;
@@ -1023,11 +1020,11 @@ static void rxrpc_input_call_packet(struct rxrpc_call *call,
switch (sp->hdr.type) {
case RXRPC_PACKET_TYPE_DATA:
- rxrpc_input_data(call, skb, skew);
+ rxrpc_input_data(call, skb);
break;
case RXRPC_PACKET_TYPE_ACK:
- rxrpc_input_ack(call, skb, skew);
+ rxrpc_input_ack(call, skb);
break;
case RXRPC_PACKET_TYPE_BUSY:
@@ -1108,8 +1105,12 @@ static void rxrpc_post_packet_to_local(struct rxrpc_local *local,
{
_enter("%p,%p", local, skb);
- skb_queue_tail(&local->event_queue, skb);
- rxrpc_queue_local(local);
+ if (rxrpc_get_local_maybe(local)) {
+ skb_queue_tail(&local->event_queue, skb);
+ rxrpc_queue_local(local);
+ } else {
+ rxrpc_free_skb(skb, rxrpc_skb_rx_freed);
+ }
}
/*
@@ -1119,8 +1120,12 @@ static void rxrpc_reject_packet(struct rxrpc_local *local, struct sk_buff *skb)
{
CHECK_SLAB_OKAY(&local->usage);
- skb_queue_tail(&local->reject_queue, skb);
- rxrpc_queue_local(local);
+ if (rxrpc_get_local_maybe(local)) {
+ skb_queue_tail(&local->reject_queue, skb);
+ rxrpc_queue_local(local);
+ } else {
+ rxrpc_free_skb(skb, rxrpc_skb_rx_freed);
+ }
}
/*
@@ -1173,7 +1178,6 @@ int rxrpc_input_packet(struct sock *udp_sk, struct sk_buff *skb)
struct rxrpc_peer *peer = NULL;
struct rxrpc_sock *rx = NULL;
unsigned int channel;
- int skew = 0;
_enter("%p", udp_sk);
@@ -1301,15 +1305,8 @@ int rxrpc_input_packet(struct sock *udp_sk, struct sk_buff *skb)
goto out;
}
- /* Note the serial number skew here */
- skew = (int)sp->hdr.serial - (int)conn->hi_serial;
- if (skew >= 0) {
- if (skew > 0)
- conn->hi_serial = sp->hdr.serial;
- } else {
- skew = -skew;
- skew = min(skew, 65535);
- }
+ if ((int)sp->hdr.serial - (int)conn->hi_serial > 0)
+ conn->hi_serial = sp->hdr.serial;
/* Call-bound packets are routed by connection channel. */
channel = sp->hdr.cid & RXRPC_CHANNELMASK;
@@ -1372,11 +1369,11 @@ int rxrpc_input_packet(struct sock *udp_sk, struct sk_buff *skb)
call = rxrpc_new_incoming_call(local, rx, skb);
if (!call)
goto reject_packet;
- rxrpc_send_ping(call, skb, skew);
+ rxrpc_send_ping(call, skb);
mutex_unlock(&call->user_mutex);
}
- rxrpc_input_call_packet(call, skb, skew);
+ rxrpc_input_call_packet(call, skb);
goto discard;
discard:
diff --git a/net/rxrpc/local_object.c b/net/rxrpc/local_object.c
index b1c71bad510b..72a6e12a9304 100644
--- a/net/rxrpc/local_object.c
+++ b/net/rxrpc/local_object.c
@@ -79,6 +79,7 @@ static struct rxrpc_local *rxrpc_alloc_local(struct rxrpc_net *rxnet,
local = kzalloc(sizeof(struct rxrpc_local), GFP_KERNEL);
if (local) {
atomic_set(&local->usage, 1);
+ atomic_set(&local->active_users, 1);
local->rxnet = rxnet;
INIT_LIST_HEAD(&local->link);
INIT_WORK(&local->processor, rxrpc_local_processor);
@@ -92,7 +93,7 @@ static struct rxrpc_local *rxrpc_alloc_local(struct rxrpc_net *rxnet,
local->debug_id = atomic_inc_return(&rxrpc_debug_id);
memcpy(&local->srx, srx, sizeof(*srx));
local->srx.srx_service = 0;
- trace_rxrpc_local(local, rxrpc_local_new, 1, NULL);
+ trace_rxrpc_local(local->debug_id, rxrpc_local_new, 1, NULL);
}
_leave(" = %p", local);
@@ -266,11 +267,8 @@ struct rxrpc_local *rxrpc_lookup_local(struct net *net,
* bind the transport socket may still fail if we're attempting
* to use a local address that the dying object is still using.
*/
- if (!rxrpc_get_local_maybe(local)) {
- cursor = cursor->next;
- list_del_init(&local->link);
+ if (!rxrpc_use_local(local))
break;
- }
age = "old";
goto found;
@@ -284,7 +282,10 @@ struct rxrpc_local *rxrpc_lookup_local(struct net *net,
if (ret < 0)
goto sock_error;
- list_add_tail(&local->link, cursor);
+ if (cursor != &rxnet->local_endpoints)
+ list_replace_init(cursor, &local->link);
+ else
+ list_add_tail(&local->link, cursor);
age = "new";
found:
@@ -320,7 +321,7 @@ struct rxrpc_local *rxrpc_get_local(struct rxrpc_local *local)
int n;
n = atomic_inc_return(&local->usage);
- trace_rxrpc_local(local, rxrpc_local_got, n, here);
+ trace_rxrpc_local(local->debug_id, rxrpc_local_got, n, here);
return local;
}
@@ -334,7 +335,8 @@ struct rxrpc_local *rxrpc_get_local_maybe(struct rxrpc_local *local)
if (local) {
int n = atomic_fetch_add_unless(&local->usage, 1, 0);
if (n > 0)
- trace_rxrpc_local(local, rxrpc_local_got, n + 1, here);
+ trace_rxrpc_local(local->debug_id, rxrpc_local_got,
+ n + 1, here);
else
local = NULL;
}
@@ -342,24 +344,18 @@ struct rxrpc_local *rxrpc_get_local_maybe(struct rxrpc_local *local)
}
/*
- * Queue a local endpoint.
+ * Queue a local endpoint and pass the caller's reference to the work item.
*/
void rxrpc_queue_local(struct rxrpc_local *local)
{
const void *here = __builtin_return_address(0);
+ unsigned int debug_id = local->debug_id;
+ int n = atomic_read(&local->usage);
if (rxrpc_queue_work(&local->processor))
- trace_rxrpc_local(local, rxrpc_local_queued,
- atomic_read(&local->usage), here);
-}
-
-/*
- * A local endpoint reached its end of life.
- */
-static void __rxrpc_put_local(struct rxrpc_local *local)
-{
- _enter("%d", local->debug_id);
- rxrpc_queue_work(&local->processor);
+ trace_rxrpc_local(debug_id, rxrpc_local_queued, n, here);
+ else
+ rxrpc_put_local(local);
}
/*
@@ -372,10 +368,47 @@ void rxrpc_put_local(struct rxrpc_local *local)
if (local) {
n = atomic_dec_return(&local->usage);
- trace_rxrpc_local(local, rxrpc_local_put, n, here);
+ trace_rxrpc_local(local->debug_id, rxrpc_local_put, n, here);
if (n == 0)
- __rxrpc_put_local(local);
+ call_rcu(&local->rcu, rxrpc_local_rcu);
+ }
+}
+
+/*
+ * Start using a local endpoint.
+ */
+struct rxrpc_local *rxrpc_use_local(struct rxrpc_local *local)
+{
+ unsigned int au;
+
+ local = rxrpc_get_local_maybe(local);
+ if (!local)
+ return NULL;
+
+ au = atomic_fetch_add_unless(&local->active_users, 1, 0);
+ if (au == 0) {
+ rxrpc_put_local(local);
+ return NULL;
+ }
+
+ return local;
+}
+
+/*
+ * Cease using a local endpoint. Once the number of active users reaches 0, we
+ * start the closure of the transport in the work processor.
+ */
+void rxrpc_unuse_local(struct rxrpc_local *local)
+{
+ unsigned int au;
+
+ if (local) {
+ au = atomic_dec_return(&local->active_users);
+ if (au == 0)
+ rxrpc_queue_local(local);
+ else
+ rxrpc_put_local(local);
}
}
@@ -393,16 +426,6 @@ static void rxrpc_local_destroyer(struct rxrpc_local *local)
_enter("%d", local->debug_id);
- /* We can get a race between an incoming call packet queueing the
- * processor again and the work processor starting the destruction
- * process which will shut down the UDP socket.
- */
- if (local->dead) {
- _leave(" [already dead]");
- return;
- }
- local->dead = true;
-
mutex_lock(&rxnet->local_mutex);
list_del_init(&local->link);
mutex_unlock(&rxnet->local_mutex);
@@ -422,13 +445,11 @@ static void rxrpc_local_destroyer(struct rxrpc_local *local)
*/
rxrpc_purge_queue(&local->reject_queue);
rxrpc_purge_queue(&local->event_queue);
-
- _debug("rcu local %d", local->debug_id);
- call_rcu(&local->rcu, rxrpc_local_rcu);
}
/*
- * Process events on an endpoint
+ * Process events on an endpoint. The work item carries a ref which
+ * we must release.
*/
static void rxrpc_local_processor(struct work_struct *work)
{
@@ -436,13 +457,15 @@ static void rxrpc_local_processor(struct work_struct *work)
container_of(work, struct rxrpc_local, processor);
bool again;
- trace_rxrpc_local(local, rxrpc_local_processing,
+ trace_rxrpc_local(local->debug_id, rxrpc_local_processing,
atomic_read(&local->usage), NULL);
do {
again = false;
- if (atomic_read(&local->usage) == 0)
- return rxrpc_local_destroyer(local);
+ if (atomic_read(&local->active_users) == 0) {
+ rxrpc_local_destroyer(local);
+ break;
+ }
if (!skb_queue_empty(&local->reject_queue)) {
rxrpc_reject_packets(local);
@@ -454,6 +477,8 @@ static void rxrpc_local_processor(struct work_struct *work)
again = true;
}
} while (again);
+
+ rxrpc_put_local(local);
}
/*
diff --git a/net/rxrpc/output.c b/net/rxrpc/output.c
index 948e3fe249ec..369e516c4bdf 100644
--- a/net/rxrpc/output.c
+++ b/net/rxrpc/output.c
@@ -87,7 +87,7 @@ static size_t rxrpc_fill_out_ack(struct rxrpc_connection *conn,
*_top = top;
pkt->ack.bufferSpace = htons(8);
- pkt->ack.maxSkew = htons(call->ackr_skew);
+ pkt->ack.maxSkew = htons(0);
pkt->ack.firstPacket = htonl(hard_ack + 1);
pkt->ack.previousPacket = htonl(call->ackr_prev_seq);
pkt->ack.serial = htonl(serial);
@@ -228,7 +228,6 @@ int rxrpc_send_ack_packet(struct rxrpc_call *call, bool ping,
if (ping)
clear_bit(RXRPC_CALL_PINGING, &call->flags);
rxrpc_propose_ACK(call, pkt->ack.reason,
- ntohs(pkt->ack.maxSkew),
ntohl(pkt->ack.serial),
false, true,
rxrpc_propose_ack_retry_tx);
diff --git a/net/rxrpc/recvmsg.c b/net/rxrpc/recvmsg.c
index 5abf46cf9e6c..9a7e1bc9791d 100644
--- a/net/rxrpc/recvmsg.c
+++ b/net/rxrpc/recvmsg.c
@@ -141,7 +141,7 @@ static void rxrpc_end_rx_phase(struct rxrpc_call *call, rxrpc_serial_t serial)
ASSERTCMP(call->rx_hard_ack, ==, call->rx_top);
if (call->state == RXRPC_CALL_CLIENT_RECV_REPLY) {
- rxrpc_propose_ACK(call, RXRPC_ACK_IDLE, 0, serial, false, true,
+ rxrpc_propose_ACK(call, RXRPC_ACK_IDLE, serial, false, true,
rxrpc_propose_ack_terminal_ack);
//rxrpc_send_ack_packet(call, false, NULL);
}
@@ -159,7 +159,7 @@ static void rxrpc_end_rx_phase(struct rxrpc_call *call, rxrpc_serial_t serial)
call->state = RXRPC_CALL_SERVER_ACK_REQUEST;
call->expect_req_by = jiffies + MAX_JIFFY_OFFSET;
write_unlock_bh(&call->state_lock);
- rxrpc_propose_ACK(call, RXRPC_ACK_DELAY, 0, serial, false, true,
+ rxrpc_propose_ACK(call, RXRPC_ACK_DELAY, serial, false, true,
rxrpc_propose_ack_processing_op);
break;
default:
@@ -212,7 +212,7 @@ static void rxrpc_rotate_rx_window(struct rxrpc_call *call)
if (after_eq(hard_ack, call->ackr_consumed + 2) ||
after_eq(top, call->ackr_seen + 2) ||
(hard_ack == top && after(hard_ack, call->ackr_consumed)))
- rxrpc_propose_ACK(call, RXRPC_ACK_DELAY, 0, serial,
+ rxrpc_propose_ACK(call, RXRPC_ACK_DELAY, serial,
true, true,
rxrpc_propose_ack_rotate_rx);
if (call->ackr_reason && call->ackr_reason != RXRPC_ACK_DELAY)
diff --git a/net/sched/act_skbedit.c b/net/sched/act_skbedit.c
index b100870f02a6..37dced00b63d 100644
--- a/net/sched/act_skbedit.c
+++ b/net/sched/act_skbedit.c
@@ -307,6 +307,17 @@ static int tcf_skbedit_search(struct net *net, struct tc_action **a, u32 index)
return tcf_idr_search(tn, a, index);
}
+static size_t tcf_skbedit_get_fill_size(const struct tc_action *act)
+{
+ return nla_total_size(sizeof(struct tc_skbedit))
+ + nla_total_size(sizeof(u32)) /* TCA_SKBEDIT_PRIORITY */
+ + nla_total_size(sizeof(u16)) /* TCA_SKBEDIT_QUEUE_MAPPING */
+ + nla_total_size(sizeof(u32)) /* TCA_SKBEDIT_MARK */
+ + nla_total_size(sizeof(u16)) /* TCA_SKBEDIT_PTYPE */
+ + nla_total_size(sizeof(u32)) /* TCA_SKBEDIT_MASK */
+ + nla_total_size_64bit(sizeof(u64)); /* TCA_SKBEDIT_FLAGS */
+}
+
static struct tc_action_ops act_skbedit_ops = {
.kind = "skbedit",
.id = TCA_ID_SKBEDIT,
@@ -316,6 +327,7 @@ static struct tc_action_ops act_skbedit_ops = {
.init = tcf_skbedit_init,
.cleanup = tcf_skbedit_cleanup,
.walk = tcf_skbedit_walker,
+ .get_fill_size = tcf_skbedit_get_fill_size,
.lookup = tcf_skbedit_search,
.size = sizeof(struct tcf_skbedit),
};
diff --git a/net/sched/sch_taprio.c b/net/sched/sch_taprio.c
index 046fd2c102b4..540bde009ea5 100644
--- a/net/sched/sch_taprio.c
+++ b/net/sched/sch_taprio.c
@@ -1191,7 +1191,8 @@ unlock:
spin_unlock_bh(qdisc_lock(sch));
free_sched:
- kfree(new_admin);
+ if (new_admin)
+ call_rcu(&new_admin->rcu, taprio_free_sched_cb);
return err;
}
diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c
index a554d6d15d1b..1cf5bb5b73c4 100644
--- a/net/sctp/sm_sideeffect.c
+++ b/net/sctp/sm_sideeffect.c
@@ -546,7 +546,7 @@ static void sctp_do_8_2_transport_strike(struct sctp_cmd_seq *commands,
*/
if (net->sctp.pf_enable &&
(transport->state == SCTP_ACTIVE) &&
- (asoc->pf_retrans < transport->pathmaxrxt) &&
+ (transport->error_count < transport->pathmaxrxt) &&
(transport->error_count > asoc->pf_retrans)) {
sctp_assoc_control_transport(asoc, transport,
diff --git a/net/sctp/stream.c b/net/sctp/stream.c
index 25946604af85..e83cdaa2ab76 100644
--- a/net/sctp/stream.c
+++ b/net/sctp/stream.c
@@ -316,6 +316,7 @@ int sctp_send_reset_streams(struct sctp_association *asoc,
nstr_list[i] = htons(str_list[i]);
if (out && !sctp_stream_outq_is_empty(stream, str_nums, nstr_list)) {
+ kfree(nstr_list);
retval = -EAGAIN;
goto out;
}
diff --git a/net/tipc/addr.c b/net/tipc/addr.c
index b88d48d00913..0f1eaed1bd1b 100644
--- a/net/tipc/addr.c
+++ b/net/tipc/addr.c
@@ -75,6 +75,7 @@ void tipc_set_node_addr(struct net *net, u32 addr)
tipc_set_node_id(net, node_id);
}
tn->trial_addr = addr;
+ tn->addr_trial_end = jiffies;
pr_info("32-bit node address hash set to %x\n", addr);
}
diff --git a/net/tipc/link.c b/net/tipc/link.c
index 289e848084ac..6cc75ffd9e2c 100644
--- a/net/tipc/link.c
+++ b/net/tipc/link.c
@@ -106,8 +106,6 @@ struct tipc_stats {
* @transmitq: queue for sent, non-acked messages
* @backlogq: queue for messages waiting to be sent
* @snt_nxt: next sequence number to use for outbound messages
- * @prev_from: sequence number of most previous retransmission request
- * @stale_limit: time when repeated identical retransmits must force link reset
* @ackers: # of peers that needs to ack each packet before it can be released
* @acked: # last packet acked by a certain peer. Used for broadcast.
* @rcv_nxt: next sequence number to expect for inbound messages
@@ -164,9 +162,7 @@ struct tipc_link {
u16 limit;
} backlog[5];
u16 snd_nxt;
- u16 prev_from;
u16 window;
- unsigned long stale_limit;
/* Reception */
u16 rcv_nxt;
@@ -1063,47 +1059,53 @@ static void tipc_link_advance_backlog(struct tipc_link *l,
* link_retransmit_failure() - Detect repeated retransmit failures
* @l: tipc link sender
* @r: tipc link receiver (= l in case of unicast)
- * @from: seqno of the 1st packet in retransmit request
* @rc: returned code
*
* Return: true if the repeated retransmit failures happens, otherwise
* false
*/
static bool link_retransmit_failure(struct tipc_link *l, struct tipc_link *r,
- u16 from, int *rc)
+ int *rc)
{
struct sk_buff *skb = skb_peek(&l->transmq);
struct tipc_msg *hdr;
if (!skb)
return false;
- hdr = buf_msg(skb);
- /* Detect repeated retransmit failures on same packet */
- if (r->prev_from != from) {
- r->prev_from = from;
- r->stale_limit = jiffies + msecs_to_jiffies(r->tolerance);
- } else if (time_after(jiffies, r->stale_limit)) {
- pr_warn("Retransmission failure on link <%s>\n", l->name);
- link_print(l, "State of link ");
- pr_info("Failed msg: usr %u, typ %u, len %u, err %u\n",
- msg_user(hdr), msg_type(hdr), msg_size(hdr),
- msg_errcode(hdr));
- pr_info("sqno %u, prev: %x, src: %x\n",
- msg_seqno(hdr), msg_prevnode(hdr), msg_orignode(hdr));
-
- trace_tipc_list_dump(&l->transmq, true, "retrans failure!");
- trace_tipc_link_dump(l, TIPC_DUMP_NONE, "retrans failure!");
- trace_tipc_link_dump(r, TIPC_DUMP_NONE, "retrans failure!");
+ if (!TIPC_SKB_CB(skb)->retr_cnt)
+ return false;
- if (link_is_bc_sndlink(l))
- *rc = TIPC_LINK_DOWN_EVT;
+ if (!time_after(jiffies, TIPC_SKB_CB(skb)->retr_stamp +
+ msecs_to_jiffies(r->tolerance)))
+ return false;
+
+ hdr = buf_msg(skb);
+ if (link_is_bc_sndlink(l) && !less(r->acked, msg_seqno(hdr)))
+ return false;
+ pr_warn("Retransmission failure on link <%s>\n", l->name);
+ link_print(l, "State of link ");
+ pr_info("Failed msg: usr %u, typ %u, len %u, err %u\n",
+ msg_user(hdr), msg_type(hdr), msg_size(hdr), msg_errcode(hdr));
+ pr_info("sqno %u, prev: %x, dest: %x\n",
+ msg_seqno(hdr), msg_prevnode(hdr), msg_destnode(hdr));
+ pr_info("retr_stamp %d, retr_cnt %d\n",
+ jiffies_to_msecs(TIPC_SKB_CB(skb)->retr_stamp),
+ TIPC_SKB_CB(skb)->retr_cnt);
+
+ trace_tipc_list_dump(&l->transmq, true, "retrans failure!");
+ trace_tipc_link_dump(l, TIPC_DUMP_NONE, "retrans failure!");
+ trace_tipc_link_dump(r, TIPC_DUMP_NONE, "retrans failure!");
+
+ if (link_is_bc_sndlink(l)) {
+ r->state = LINK_RESET;
+ *rc = TIPC_LINK_DOWN_EVT;
+ } else {
*rc = tipc_link_fsm_evt(l, LINK_FAILURE_EVT);
- return true;
}
- return false;
+ return true;
}
/* tipc_link_bc_retrans() - retransmit zero or more packets
@@ -1129,7 +1131,7 @@ static int tipc_link_bc_retrans(struct tipc_link *l, struct tipc_link *r,
trace_tipc_link_retrans(r, from, to, &l->transmq);
- if (link_retransmit_failure(l, r, from, &rc))
+ if (link_retransmit_failure(l, r, &rc))
return rc;
skb_queue_walk(&l->transmq, skb) {
@@ -1138,11 +1140,10 @@ static int tipc_link_bc_retrans(struct tipc_link *l, struct tipc_link *r,
continue;
if (more(msg_seqno(hdr), to))
break;
- if (link_is_bc_sndlink(l)) {
- if (time_before(jiffies, TIPC_SKB_CB(skb)->nxt_retr))
- continue;
- TIPC_SKB_CB(skb)->nxt_retr = TIPC_BC_RETR_LIM;
- }
+
+ if (time_before(jiffies, TIPC_SKB_CB(skb)->nxt_retr))
+ continue;
+ TIPC_SKB_CB(skb)->nxt_retr = TIPC_BC_RETR_LIM;
_skb = __pskb_copy(skb, LL_MAX_HEADER + MIN_H_SIZE, GFP_ATOMIC);
if (!_skb)
return 0;
@@ -1152,6 +1153,10 @@ static int tipc_link_bc_retrans(struct tipc_link *l, struct tipc_link *r,
_skb->priority = TC_PRIO_CONTROL;
__skb_queue_tail(xmitq, _skb);
l->stats.retransmitted++;
+
+ /* Increase actual retrans counter & mark first time */
+ if (!TIPC_SKB_CB(skb)->retr_cnt++)
+ TIPC_SKB_CB(skb)->retr_stamp = jiffies;
}
return 0;
}
@@ -1393,12 +1398,10 @@ static int tipc_link_advance_transmq(struct tipc_link *l, u16 acked, u16 gap,
struct tipc_msg *hdr;
u16 bc_ack = l->bc_rcvlink->rcv_nxt - 1;
u16 ack = l->rcv_nxt - 1;
+ bool passed = false;
u16 seqno, n = 0;
int rc = 0;
- if (gap && link_retransmit_failure(l, l, acked + 1, &rc))
- return rc;
-
skb_queue_walk_safe(&l->transmq, skb, tmp) {
seqno = buf_seqno(skb);
@@ -1408,12 +1411,17 @@ next_gap_ack:
__skb_unlink(skb, &l->transmq);
kfree_skb(skb);
} else if (less_eq(seqno, acked + gap)) {
- /* retransmit skb */
+ /* First, check if repeated retrans failures occurs? */
+ if (!passed && link_retransmit_failure(l, l, &rc))
+ return rc;
+ passed = true;
+
+ /* retransmit skb if unrestricted*/
if (time_before(jiffies, TIPC_SKB_CB(skb)->nxt_retr))
continue;
TIPC_SKB_CB(skb)->nxt_retr = TIPC_UC_RETR_TIME;
-
- _skb = __pskb_copy(skb, MIN_H_SIZE, GFP_ATOMIC);
+ _skb = __pskb_copy(skb, LL_MAX_HEADER + MIN_H_SIZE,
+ GFP_ATOMIC);
if (!_skb)
continue;
hdr = buf_msg(_skb);
@@ -1422,6 +1430,10 @@ next_gap_ack:
_skb->priority = TC_PRIO_CONTROL;
__skb_queue_tail(xmitq, _skb);
l->stats.retransmitted++;
+
+ /* Increase actual retrans counter & mark first time */
+ if (!TIPC_SKB_CB(skb)->retr_cnt++)
+ TIPC_SKB_CB(skb)->retr_stamp = jiffies;
} else {
/* retry with Gap ACK blocks if any */
if (!ga || n >= ga->gack_cnt)
@@ -2681,7 +2693,7 @@ int tipc_link_dump(struct tipc_link *l, u16 dqueues, char *buf)
i += scnprintf(buf + i, sz - i, " %x", l->peer_caps);
i += scnprintf(buf + i, sz - i, " %u", l->silent_intv_cnt);
i += scnprintf(buf + i, sz - i, " %u", l->rst_cnt);
- i += scnprintf(buf + i, sz - i, " %u", l->prev_from);
+ i += scnprintf(buf + i, sz - i, " %u", 0);
i += scnprintf(buf + i, sz - i, " %u", 0);
i += scnprintf(buf + i, sz - i, " %u", l->acked);
diff --git a/net/tipc/msg.h b/net/tipc/msg.h
index 1c8c8dd32a4e..0daa6f04ca81 100644
--- a/net/tipc/msg.h
+++ b/net/tipc/msg.h
@@ -102,13 +102,15 @@ struct plist;
#define TIPC_MEDIA_INFO_OFFSET 5
struct tipc_skb_cb {
- u32 bytes_read;
- u32 orig_member;
struct sk_buff *tail;
unsigned long nxt_retr;
- bool validated;
+ unsigned long retr_stamp;
+ u32 bytes_read;
+ u32 orig_member;
u16 chain_imp;
u16 ackers;
+ u16 retr_cnt;
+ bool validated;
};
#define TIPC_SKB_CB(__skb) ((struct tipc_skb_cb *)&((__skb)->cb[0]))
diff --git a/net/tls/tls_device.c b/net/tls/tls_device.c
index d184230665eb..a470df7ffcf9 100644
--- a/net/tls/tls_device.c
+++ b/net/tls/tls_device.c
@@ -373,9 +373,9 @@ static int tls_push_data(struct sock *sk,
struct tls_context *tls_ctx = tls_get_ctx(sk);
struct tls_prot_info *prot = &tls_ctx->prot_info;
struct tls_offload_context_tx *ctx = tls_offload_ctx_tx(tls_ctx);
- int tls_push_record_flags = flags | MSG_SENDPAGE_NOTLAST;
int more = flags & (MSG_SENDPAGE_NOTLAST | MSG_MORE);
struct tls_record_info *record = ctx->open_record;
+ int tls_push_record_flags;
struct page_frag *pfrag;
size_t orig_size = size;
u32 max_open_record_len;
@@ -390,6 +390,9 @@ static int tls_push_data(struct sock *sk,
if (sk->sk_err)
return -sk->sk_err;
+ flags |= MSG_SENDPAGE_DECRYPTED;
+ tls_push_record_flags = flags | MSG_SENDPAGE_NOTLAST;
+
timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
if (tls_is_partially_sent_record(tls_ctx)) {
rc = tls_push_partial_record(sk, tls_ctx, flags);
@@ -576,7 +579,9 @@ void tls_device_write_space(struct sock *sk, struct tls_context *ctx)
gfp_t sk_allocation = sk->sk_allocation;
sk->sk_allocation = GFP_ATOMIC;
- tls_push_partial_record(sk, ctx, MSG_DONTWAIT | MSG_NOSIGNAL);
+ tls_push_partial_record(sk, ctx,
+ MSG_DONTWAIT | MSG_NOSIGNAL |
+ MSG_SENDPAGE_DECRYPTED);
sk->sk_allocation = sk_allocation;
}
}
diff --git a/net/tls/tls_main.c b/net/tls/tls_main.c
index 9cbbae606ced..43252a801c3f 100644
--- a/net/tls/tls_main.c
+++ b/net/tls/tls_main.c
@@ -308,6 +308,8 @@ static void tls_sk_proto_close(struct sock *sk, long timeout)
if (free_ctx)
icsk->icsk_ulp_data = NULL;
sk->sk_prot = ctx->sk_proto;
+ if (sk->sk_write_space == tls_write_space)
+ sk->sk_write_space = ctx->sk_write_space;
write_unlock_bh(&sk->sk_callback_lock);
release_sock(sk);
if (ctx->tx_conf == TLS_SW)