summaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorEric Dumazet <edumazet@google.com>2022-08-30 11:56:56 -0700
committerJakub Kicinski <kuba@kernel.org>2022-08-31 19:56:48 -0700
commit79e3602caa6f9d59c4f66a268407080496dae408 (patch)
tree639b61f605e41a495d774f43c4e3062296bd3836 /net
parent8c70521238b7863c2af607e20bcba20f974c969b (diff)
downloadlinux-79e3602caa6f9d59c4f66a268407080496dae408.tar.bz2
tcp: make global challenge ack rate limitation per net-ns and default disabled
Because per host rate limiting has been proven problematic (side channel attacks can be based on it), per host rate limiting of challenge acks ideally should be per netns and turned off by default. This is a long due followup of following commits: 083ae308280d ("tcp: enable per-socket rate limiting of all 'challenge acks'") f2b2c582e824 ("tcp: mitigate ACK loops for connections as tcp_sock") 75ff39ccc1bd ("tcp: make challenge acks less predictable") Signed-off-by: Eric Dumazet <edumazet@google.com> Cc: Jason Baron <jbaron@akamai.com> Acked-by: Neal Cardwell <ncardwell@google.com> Signed-off-by: Jakub Kicinski <kuba@kernel.org>
Diffstat (limited to 'net')
-rw-r--r--net/ipv4/tcp_input.c21
-rw-r--r--net/ipv4/tcp_ipv4.c6
2 files changed, 15 insertions, 12 deletions
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index c184e15397a2..b85a9f755da4 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -3614,12 +3614,9 @@ bool tcp_oow_rate_limited(struct net *net, const struct sk_buff *skb,
/* RFC 5961 7 [ACK Throttling] */
static void tcp_send_challenge_ack(struct sock *sk)
{
- /* unprotected vars, we dont care of overwrites */
- static u32 challenge_timestamp;
- static unsigned int challenge_count;
struct tcp_sock *tp = tcp_sk(sk);
struct net *net = sock_net(sk);
- u32 count, now;
+ u32 count, now, ack_limit;
/* First check our per-socket dupack rate limit. */
if (__tcp_oow_rate_limited(net,
@@ -3627,18 +3624,22 @@ static void tcp_send_challenge_ack(struct sock *sk)
&tp->last_oow_ack_time))
return;
+ ack_limit = READ_ONCE(net->ipv4.sysctl_tcp_challenge_ack_limit);
+ if (ack_limit == INT_MAX)
+ goto send_ack;
+
/* Then check host-wide RFC 5961 rate limit. */
now = jiffies / HZ;
- if (now != READ_ONCE(challenge_timestamp)) {
- u32 ack_limit = READ_ONCE(net->ipv4.sysctl_tcp_challenge_ack_limit);
+ if (now != READ_ONCE(net->ipv4.tcp_challenge_timestamp)) {
u32 half = (ack_limit + 1) >> 1;
- WRITE_ONCE(challenge_timestamp, now);
- WRITE_ONCE(challenge_count, half + prandom_u32_max(ack_limit));
+ WRITE_ONCE(net->ipv4.tcp_challenge_timestamp, now);
+ WRITE_ONCE(net->ipv4.tcp_challenge_count, half + prandom_u32_max(ack_limit));
}
- count = READ_ONCE(challenge_count);
+ count = READ_ONCE(net->ipv4.tcp_challenge_count);
if (count > 0) {
- WRITE_ONCE(challenge_count, count - 1);
+ WRITE_ONCE(net->ipv4.tcp_challenge_count, count - 1);
+send_ack:
NET_INC_STATS(net, LINUX_MIB_TCPCHALLENGEACK);
tcp_send_ack(sk);
}
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 0c83780dc9bf..5b019ba2b9d2 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -3139,8 +3139,10 @@ static int __net_init tcp_sk_init(struct net *net)
net->ipv4.sysctl_tcp_tso_win_divisor = 3;
/* Default TSQ limit of 16 TSO segments */
net->ipv4.sysctl_tcp_limit_output_bytes = 16 * 65536;
- /* rfc5961 challenge ack rate limiting */
- net->ipv4.sysctl_tcp_challenge_ack_limit = 1000;
+
+ /* rfc5961 challenge ack rate limiting, per net-ns, disabled by default. */
+ net->ipv4.sysctl_tcp_challenge_ack_limit = INT_MAX;
+
net->ipv4.sysctl_tcp_min_tso_segs = 2;
net->ipv4.sysctl_tcp_tso_rtt_log = 9; /* 2^9 = 512 usec */
net->ipv4.sysctl_tcp_min_rtt_wlen = 300;