summaryrefslogtreecommitdiffstats
path: root/net/mptcp/protocol.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/mptcp/protocol.c')
-rw-r--r--net/mptcp/protocol.c55
1 files changed, 36 insertions, 19 deletions
diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c
index c2a8392254dc..a57f3eab7b6a 100644
--- a/net/mptcp/protocol.c
+++ b/net/mptcp/protocol.c
@@ -363,8 +363,6 @@ static void mptcp_check_data_fin_ack(struct sock *sk)
/* Look for an acknowledged DATA_FIN */
if (mptcp_pending_data_fin_ack(sk)) {
- mptcp_stop_timer(sk);
-
WRITE_ONCE(msk->snd_data_fin_enable, 0);
switch (sk->sk_state) {
@@ -458,7 +456,18 @@ static bool mptcp_subflow_cleanup_rbuf(struct sock *ssk)
static void mptcp_cleanup_rbuf(struct mptcp_sock *msk)
{
struct sock *ack_hint = READ_ONCE(msk->ack_hint);
+ int old_space = READ_ONCE(msk->old_wspace);
struct mptcp_subflow_context *subflow;
+ struct sock *sk = (struct sock *)msk;
+ bool cleanup;
+
+ /* this is a simple superset of what tcp_cleanup_rbuf() implements
+ * so that we don't have to acquire the ssk socket lock most of the time
+ * to do actually nothing
+ */
+ cleanup = __mptcp_space(sk) - old_space >= max(0, old_space);
+ if (!cleanup)
+ return;
/* if the hinted ssk is still active, try to use it */
if (likely(ack_hint)) {
@@ -1565,6 +1574,9 @@ out:
mptcp_set_timeout(sk, ssk);
tcp_push(ssk, 0, info.mss_now, tcp_sk(ssk)->nonagle,
info.size_goal);
+ if (!mptcp_timer_pending(sk))
+ mptcp_reset_timer(sk);
+
if (msk->snd_data_fin_enable &&
msk->snd_nxt + 1 == msk->write_seq)
mptcp_schedule_work(sk);
@@ -1868,7 +1880,7 @@ static void __mptcp_splice_receive_queue(struct sock *sk)
skb_queue_splice_tail_init(&sk->sk_receive_queue, &msk->receive_queue);
}
-static bool __mptcp_move_skbs(struct mptcp_sock *msk, unsigned int rcv)
+static bool __mptcp_move_skbs(struct mptcp_sock *msk)
{
struct sock *sk = (struct sock *)msk;
unsigned int moved = 0;
@@ -1888,13 +1900,10 @@ static bool __mptcp_move_skbs(struct mptcp_sock *msk, unsigned int rcv)
slowpath = lock_sock_fast(ssk);
mptcp_data_lock(sk);
+ __mptcp_update_rmem(sk);
done = __mptcp_move_skbs_from_subflow(msk, ssk, &moved);
mptcp_data_unlock(sk);
- if (moved && rcv) {
- WRITE_ONCE(msk->rmem_pending, min(rcv, moved));
- tcp_cleanup_rbuf(ssk, 1);
- WRITE_ONCE(msk->rmem_pending, 0);
- }
+ tcp_cleanup_rbuf(ssk, moved);
unlock_sock_fast(ssk, slowpath);
} while (!done);
@@ -1907,6 +1916,7 @@ static bool __mptcp_move_skbs(struct mptcp_sock *msk, unsigned int rcv)
ret |= __mptcp_ofo_queue(msk);
__mptcp_splice_receive_queue(sk);
mptcp_data_unlock(sk);
+ mptcp_cleanup_rbuf(msk);
}
if (ret)
mptcp_check_data_fin((struct sock *)msk);
@@ -1936,7 +1946,7 @@ static int mptcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
target = sock_rcvlowat(sk, flags & MSG_WAITALL, len);
while (copied < len) {
- int bytes_read, old_space;
+ int bytes_read;
bytes_read = __mptcp_recvmsg_mskq(msk, msg, len - copied);
if (unlikely(bytes_read < 0)) {
@@ -1947,14 +1957,11 @@ static int mptcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
copied += bytes_read;
- if (skb_queue_empty(&msk->receive_queue) &&
- __mptcp_move_skbs(msk, len - copied))
- continue;
-
/* be sure to advertise window change */
- old_space = READ_ONCE(msk->old_wspace);
- if ((tcp_space(sk) - old_space) >= old_space)
- mptcp_cleanup_rbuf(msk);
+ mptcp_cleanup_rbuf(msk);
+
+ if (skb_queue_empty(&msk->receive_queue) && __mptcp_move_skbs(msk))
+ continue;
/* only the master socket status is relevant here. The exit
* conditions mirror closely tcp_recvmsg()
@@ -1982,7 +1989,7 @@ static int mptcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
/* race breaker: the shutdown could be after the
* previous receive queue check
*/
- if (__mptcp_move_skbs(msk, len - copied))
+ if (__mptcp_move_skbs(msk))
continue;
break;
}
@@ -2015,7 +2022,7 @@ static int mptcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
/* .. race-breaker: ssk might have gotten new data
* after last __mptcp_move_skbs() returned false.
*/
- if (unlikely(__mptcp_move_skbs(msk, 0)))
+ if (unlikely(__mptcp_move_skbs(msk)))
set_bit(MPTCP_DATA_READY, &msk->flags);
} else if (unlikely(!test_bit(MPTCP_DATA_READY, &msk->flags))) {
/* data to read but mptcp_wait_data() cleared DATA_READY */
@@ -2275,6 +2282,7 @@ static void mptcp_worker(struct work_struct *work)
if (!test_and_clear_bit(MPTCP_WORK_RTX, &msk->flags))
goto unlock;
+ __mptcp_clean_una(sk);
dfrag = mptcp_rtx_head(sk);
if (!dfrag)
goto unlock;
@@ -2943,6 +2951,8 @@ static void mptcp_release_cb(struct sock *sk)
mptcp_push_pending(sk, 0);
spin_lock_bh(&sk->sk_lock.slock);
}
+ if (test_and_clear_bit(MPTCP_ERROR_REPORT, &mptcp_sk(sk)->flags))
+ __mptcp_error_report(sk);
/* clear any wmem reservation and errors */
__mptcp_update_wmem(sk);
@@ -3319,7 +3329,7 @@ static __poll_t mptcp_check_writeable(struct mptcp_sock *msk)
struct sock *sk = (struct sock *)msk;
if (unlikely(sk->sk_shutdown & SEND_SHUTDOWN))
- return 0;
+ return EPOLLOUT | EPOLLWRNORM;
if (sk_stream_is_writeable(sk))
return EPOLLOUT | EPOLLWRNORM;
@@ -3352,9 +3362,16 @@ static __poll_t mptcp_poll(struct file *file, struct socket *sock,
mask |= mptcp_check_readable(msk);
mask |= mptcp_check_writeable(msk);
}
+ if (sk->sk_shutdown == SHUTDOWN_MASK || state == TCP_CLOSE)
+ mask |= EPOLLHUP;
if (sk->sk_shutdown & RCV_SHUTDOWN)
mask |= EPOLLIN | EPOLLRDNORM | EPOLLRDHUP;
+ /* This barrier is coupled with smp_wmb() in tcp_reset() */
+ smp_rmb();
+ if (sk->sk_err)
+ mask |= EPOLLERR;
+
return mask;
}