diff options
author | Baruch Even <baruch@ev-en.org> | 2006-03-20 22:22:47 -0800 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2006-03-20 22:22:47 -0800 |
commit | 0bc6d90b82775113bbbe371f5d9fcffefa5fa94d (patch) | |
tree | 5597a249a1ad015c226f2c94192de5efec9092d3 /net | |
parent | c33ad6e476e4cdc245215f3eb5b3df353df1b370 (diff) | |
download | linux-0bc6d90b82775113bbbe371f5d9fcffefa5fa94d.tar.bz2 |
[TCP] H-TCP: Account for delayed-ACKs
Account for delayed-ACKs in H-TCP.
Delayed-ACKs cause H-TCP to be less aggressive than its design calls
for. It is especially true when the receiver is a Linux machine where
the average delayed ack is over 3 packets with values of 7 not unheard
of.
Signed-off-By: Baruch Even <baruch@ev-en.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net')
-rw-r--r-- | net/ipv4/tcp_htcp.c | 26 |
1 files changed, 18 insertions, 8 deletions
diff --git a/net/ipv4/tcp_htcp.c b/net/ipv4/tcp_htcp.c index fda2f873599e..ac19252e34ce 100644 --- a/net/ipv4/tcp_htcp.c +++ b/net/ipv4/tcp_htcp.c @@ -29,7 +29,8 @@ struct htcp { u8 modeswitch; /* Delay modeswitch until we had at least one congestion event */ u8 ccount; /* Number of RTTs since last congestion event */ u8 undo_ccount; - u16 packetcount; + u16 pkts_acked; + u32 packetcount; u32 minRTT; u32 maxRTT; u32 snd_cwnd_cnt2; @@ -92,6 +93,12 @@ static void measure_achieved_throughput(struct sock *sk, u32 pkts_acked) struct htcp *ca = inet_csk_ca(sk); u32 now = tcp_time_stamp; + if (icsk->icsk_ca_state == TCP_CA_Open) + ca->pkts_acked = pkts_acked; + + if (!use_bandwidth_switch) + return; + /* achieved throughput calculations */ if (icsk->icsk_ca_state != TCP_CA_Open && icsk->icsk_ca_state != TCP_CA_Disorder) { @@ -217,20 +224,24 @@ static void htcp_cong_avoid(struct sock *sk, u32 ack, u32 rtt, measure_rtt(sk); /* keep track of number of round-trip times since last backoff event */ - if (ca->snd_cwnd_cnt2++ > tp->snd_cwnd) { + if (ca->snd_cwnd_cnt2 >= tp->snd_cwnd) { ca->ccount++; - ca->snd_cwnd_cnt2 = 0; + ca->snd_cwnd_cnt2 -= tp->snd_cwnd; htcp_alpha_update(ca); - } + } else + ca->snd_cwnd_cnt2 += ca->pkts_acked; /* In dangerous area, increase slowly. * In theory this is tp->snd_cwnd += alpha / tp->snd_cwnd */ - if ((tp->snd_cwnd_cnt++ * ca->alpha)>>7 >= tp->snd_cwnd) { + if ((tp->snd_cwnd_cnt * ca->alpha)>>7 >= tp->snd_cwnd) { if (tp->snd_cwnd < tp->snd_cwnd_clamp) tp->snd_cwnd++; tp->snd_cwnd_cnt = 0; - } + } else + tp->snd_cwnd_cnt += ca->pkts_acked; + + ca->pkts_acked = 1; } } @@ -249,6 +260,7 @@ static void htcp_init(struct sock *sk) memset(ca, 0, sizeof(struct htcp)); ca->alpha = ALPHA_BASE; ca->beta = BETA_MIN; + ca->pkts_acked = 1; } static void htcp_state(struct sock *sk, u8 new_state) @@ -278,8 +290,6 @@ static int __init htcp_register(void) { BUG_ON(sizeof(struct htcp) > ICSK_CA_PRIV_SIZE); BUILD_BUG_ON(BETA_MIN >= BETA_MAX); - if (!use_bandwidth_switch) - htcp.pkts_acked = NULL; return tcp_register_congestion_control(&htcp); } |