summaryrefslogtreecommitdiffstats
path: root/net/ipv4/tcp_recovery.c
diff options
context:
space:
mode:
authorYuchung Cheng <ycheng@google.com>2017-01-12 22:11:36 -0800
committerDavid S. Miller <davem@davemloft.net>2017-01-13 22:37:16 -0500
commita0370b3f3f2cfb8b424b04c0545414abaa53f5ee (patch)
tree3b5e28b49001af777d99edfa784d9c8f1256616c /net/ipv4/tcp_recovery.c
parent98e36d449cc681f1bb2ce2230243f7f977a7da1b (diff)
downloadlinux-a0370b3f3f2cfb8b424b04c0545414abaa53f5ee.tar.bz2
tcp: enable RACK loss detection to trigger recovery
This patch changes two things: 1. Start fast recovery with RACK in addition to other heuristics (e.g., DUPACK threshold, FACK). Prior to this change RACK is enabled to detect losses only after the recovery has started by other algorithms. 2. Disable TCP early retransmit. RACK subsumes the early retransmit with the new reordering timer feature. A latter patch in this series removes the early retransmit code. Signed-off-by: Yuchung Cheng <ycheng@google.com> Signed-off-by: Neal Cardwell <ncardwell@google.com> Acked-by: Eric Dumazet <edumazet@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4/tcp_recovery.c')
-rw-r--r--net/ipv4/tcp_recovery.c16
1 files changed, 10 insertions, 6 deletions
diff --git a/net/ipv4/tcp_recovery.c b/net/ipv4/tcp_recovery.c
index 1e330a2f913d..4ecb38ae8504 100644
--- a/net/ipv4/tcp_recovery.c
+++ b/net/ipv4/tcp_recovery.c
@@ -1,7 +1,7 @@
#include <linux/tcp.h>
#include <net/tcp.h>
-int sysctl_tcp_recovery __read_mostly = TCP_RACK_LOST_RETRANS;
+int sysctl_tcp_recovery __read_mostly = TCP_RACK_LOSS_DETECTION;
static void tcp_rack_mark_skb_lost(struct sock *sk, struct sk_buff *skb)
{
@@ -24,7 +24,9 @@ static bool tcp_rack_sent_after(const struct skb_mstamp *t1,
(t1->v64 == t2->v64 && after(seq1, seq2));
}
-/* Marks a packet lost, if some packet sent later has been (s)acked.
+/* RACK loss detection (IETF draft draft-ietf-tcpm-rack-01):
+ *
+ * Marks a packet lost, if some packet sent later has been (s)acked.
* The underlying idea is similar to the traditional dupthresh and FACK
* but they look at different metrics:
*
@@ -37,8 +39,10 @@ static bool tcp_rack_sent_after(const struct skb_mstamp *t1,
* is being more resilient to reordering by simply allowing some
* "settling delay", instead of tweaking the dupthresh.
*
- * The current version is only used after recovery starts but can be
- * easily extended to detect the first loss.
+ * When tcp_rack_detect_loss() detects some packets are lost and we
+ * are not already in the CA_Recovery state, either tcp_rack_reo_timeout()
+ * or tcp_time_to_recover()'s "Trick#1: the loss is proven" code path will
+ * make us enter the CA_Recovery state.
*/
static void tcp_rack_detect_loss(struct sock *sk, const struct skb_mstamp *now,
u32 *reo_timeout)
@@ -54,7 +58,7 @@ static void tcp_rack_detect_loss(struct sock *sk, const struct skb_mstamp *now,
* to queuing or delayed ACKs.
*/
reo_wnd = 1000;
- if (tp->rack.reord && tcp_min_rtt(tp) != ~0U)
+ if ((tp->rack.reord || !tp->lost_out) && tcp_min_rtt(tp) != ~0U)
reo_wnd = max(tcp_min_rtt(tp) >> 2, reo_wnd);
tcp_for_write_queue(skb, sk) {
@@ -105,7 +109,7 @@ void tcp_rack_mark_lost(struct sock *sk, const struct skb_mstamp *now)
struct tcp_sock *tp = tcp_sk(sk);
u32 timeout;
- if (inet_csk(sk)->icsk_ca_state < TCP_CA_Recovery || !tp->rack.advanced)
+ if (!tp->rack.advanced)
return;
/* Reset the advanced flag to avoid unnecessary queue scanning */