diff options
author | Eric Dumazet <edumazet@google.com> | 2012-07-20 05:45:50 +0000 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2012-07-20 10:59:41 -0700 |
commit | 6f458dfb409272082c9bfa412f77ff2fc21c626f (patch) | |
tree | 7475cd12eb01023b9852cbc957080b9aa7cfdc64 /net/ipv4/tcp_output.c | |
parent | 9dc274151a548ffd215caecec5a8872db8799447 (diff) | |
download | linux-6f458dfb409272082c9bfa412f77ff2fc21c626f.tar.bz2 |
tcp: improve latencies of timer triggered events
Modern TCP stack highly depends on tcp_write_timer() having a small
latency, but current implementation doesn't exactly meet the
expectations.
When a timer fires but finds the socket is owned by the user, it rearms
itself for an additional delay hoping next run will be more
successful.
tcp_write_timer() for example uses a 50ms delay for next try, and it
defeats many attempts to get predictable TCP behavior in term of
latencies.
Use the recently introduced tcp_release_cb(), so that the user owning
the socket will call various handlers right before socket release.
This will permit us to post a followup patch to address the
tcp_tso_should_defer() syndrome (some deferred packets have to wait
RTO timer to be transmitted, while cwnd should allow us to send them
sooner)
Signed-off-by: Eric Dumazet <edumazet@google.com>
Cc: Tom Herbert <therbert@google.com>
Cc: Yuchung Cheng <ycheng@google.com>
Cc: Neal Cardwell <ncardwell@google.com>
Cc: Nandita Dukkipati <nanditad@google.com>
Cc: H.K. Jerry Chu <hkchu@google.com>
Cc: John Heffner <johnwheffner@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4/tcp_output.c')
-rw-r--r-- | net/ipv4/tcp_output.c | 46 |
1 files changed, 29 insertions, 17 deletions
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index 27a32acfdb62..950aebfd9967 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -837,6 +837,13 @@ struct tsq_tasklet { }; static DEFINE_PER_CPU(struct tsq_tasklet, tsq_tasklet); +static void tcp_tsq_handler(struct sock *sk) +{ + if ((1 << sk->sk_state) & + (TCPF_ESTABLISHED | TCPF_FIN_WAIT1 | TCPF_CLOSING | + TCPF_CLOSE_WAIT | TCPF_LAST_ACK)) + tcp_write_xmit(sk, tcp_current_mss(sk), 0, 0, GFP_ATOMIC); +} /* * One tasklest per cpu tries to send more skbs. * We run in tasklet context but need to disable irqs when @@ -864,16 +871,10 @@ static void tcp_tasklet_func(unsigned long data) bh_lock_sock(sk); if (!sock_owned_by_user(sk)) { - if ((1 << sk->sk_state) & - (TCPF_ESTABLISHED | TCPF_FIN_WAIT1 | - TCPF_CLOSING | TCPF_CLOSE_WAIT | TCPF_LAST_ACK)) - tcp_write_xmit(sk, - tcp_current_mss(sk), - 0, 0, - GFP_ATOMIC); + tcp_tsq_handler(sk); } else { /* defer the work to tcp_release_cb() */ - set_bit(TSQ_OWNED, &tp->tsq_flags); + set_bit(TCP_TSQ_DEFERRED, &tp->tsq_flags); } bh_unlock_sock(sk); @@ -882,6 +883,9 @@ static void tcp_tasklet_func(unsigned long data) } } +#define TCP_DEFERRED_ALL ((1UL << TCP_TSQ_DEFERRED) | \ + (1UL << TCP_WRITE_TIMER_DEFERRED) | \ + (1UL << TCP_DELACK_TIMER_DEFERRED)) /** * tcp_release_cb - tcp release_sock() callback * @sk: socket @@ -892,16 +896,24 @@ static void tcp_tasklet_func(unsigned long data) void tcp_release_cb(struct sock *sk) { struct tcp_sock *tp = tcp_sk(sk); + unsigned long flags, nflags; - if (test_and_clear_bit(TSQ_OWNED, &tp->tsq_flags)) { - if ((1 << sk->sk_state) & - (TCPF_ESTABLISHED | TCPF_FIN_WAIT1 | - TCPF_CLOSING | TCPF_CLOSE_WAIT | TCPF_LAST_ACK)) - tcp_write_xmit(sk, - tcp_current_mss(sk), - 0, 0, - GFP_ATOMIC); - } + /* perform an atomic operation only if at least one flag is set */ + do { + flags = tp->tsq_flags; + if (!(flags & TCP_DEFERRED_ALL)) + return; + nflags = flags & ~TCP_DEFERRED_ALL; + } while (cmpxchg(&tp->tsq_flags, flags, nflags) != flags); + + if (flags & (1UL << TCP_TSQ_DEFERRED)) + tcp_tsq_handler(sk); + + if (flags & (1UL << TCP_WRITE_TIMER_DEFERRED)) + tcp_write_timer_handler(sk); + + if (flags & (1UL << TCP_DELACK_TIMER_DEFERRED)) + tcp_delack_timer_handler(sk); } EXPORT_SYMBOL(tcp_release_cb); |