summaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorMat Martineau <mathew.j.martineau@linux.intel.com>2022-05-18 15:04:44 -0700
committerJakub Kicinski <kuba@kernel.org>2022-05-19 20:05:07 -0700
commitd42f9e4e2384febf9cb2d19ffa0cfac96189517a (patch)
treed78e18e690a92b61a557f4a0cea1fb2cffd56820 /net
parent7b16871f9932d8a371488d2967b033387870a747 (diff)
downloadlinux-d42f9e4e2384febf9cb2d19ffa0cfac96189517a.tar.bz2
mptcp: Check for orphaned subflow before handling MP_FAIL timer
MP_FAIL timeout (waiting for a peer to respond to an MP_FAIL with another MP_FAIL) is implemented using the MPTCP socket's sk_timer. That timer is also used at MPTCP socket close, so it's important to not have the two timer users interfere with each other. At MPTCP socket close, all subflows are orphaned before sk_timer is manipulated. By checking the SOCK_DEAD flag on the subflows, each subflow can determine if the timer is safe to alter without acquiring any MPTCP-level lock. This replaces code that was using the mptcp_data_lock and MPTCP-level socket state checks that did not correctly protect the timer. Fixes: 49fa1919d6bc ("mptcp: reset subflow when MP_FAIL doesn't respond") Reviewed-by: Paolo Abeni <pabeni@redhat.com> Signed-off-by: Mat Martineau <mathew.j.martineau@linux.intel.com> Signed-off-by: Jakub Kicinski <kuba@kernel.org>
Diffstat (limited to 'net')
-rw-r--r--net/mptcp/pm.c7
-rw-r--r--net/mptcp/subflow.c12
2 files changed, 6 insertions, 13 deletions
diff --git a/net/mptcp/pm.c b/net/mptcp/pm.c
index 8ba51120f35b..59a85220edc9 100644
--- a/net/mptcp/pm.c
+++ b/net/mptcp/pm.c
@@ -312,13 +312,10 @@ void mptcp_pm_mp_fail_received(struct sock *sk, u64 fail_seq)
subflow->send_mp_fail = 1;
MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_MPFAILTX);
subflow->send_infinite_map = 1;
- } else if (s && inet_sk_state_load(s) != TCP_CLOSE) {
+ } else if (!sock_flag(sk, SOCK_DEAD)) {
pr_debug("MP_FAIL response received");
- mptcp_data_lock(s);
- if (inet_sk_state_load(s) != TCP_CLOSE)
- sk_stop_timer(s, &s->sk_timer);
- mptcp_data_unlock(s);
+ sk_stop_timer(s, &s->sk_timer);
}
}
diff --git a/net/mptcp/subflow.c b/net/mptcp/subflow.c
index 27273cf091db..8841e8cd9ad8 100644
--- a/net/mptcp/subflow.c
+++ b/net/mptcp/subflow.c
@@ -1016,12 +1016,9 @@ static enum mapping_status get_mapping_status(struct sock *ssk,
pr_debug("infinite mapping received");
MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_INFINITEMAPRX);
subflow->map_data_len = 0;
- if (sk && inet_sk_state_load(sk) != TCP_CLOSE) {
- mptcp_data_lock(sk);
- if (inet_sk_state_load(sk) != TCP_CLOSE)
- sk_stop_timer(sk, &sk->sk_timer);
- mptcp_data_unlock(sk);
- }
+ if (!sock_flag(ssk, SOCK_DEAD))
+ sk_stop_timer(sk, &sk->sk_timer);
+
return MAPPING_INVALID;
}
@@ -1241,9 +1238,8 @@ fallback:
tcp_send_active_reset(ssk, GFP_ATOMIC);
while ((skb = skb_peek(&ssk->sk_receive_queue)))
sk_eat_skb(ssk, skb);
- } else {
+ } else if (!sock_flag(ssk, SOCK_DEAD)) {
WRITE_ONCE(subflow->mp_fail_response_expect, true);
- /* The data lock is acquired in __mptcp_move_skbs() */
sk_reset_timer((struct sock *)msk,
&((struct sock *)msk)->sk_timer,
jiffies + TCP_RTO_MAX);