summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorEric Dumazet <edumazet@google.com>2016-04-13 22:05:40 -0700
committerDavid S. Miller <davem@davemloft.net>2016-04-15 16:45:44 -0400
commit8804b2722dc5d6f9b7ba0a9e812eae9ee5ce95bc (patch)
treefd1010fca7c152ca547f02d80a02dc9d1b9d7b20
parentb3d051477cf94e9d71d6acadb8a90de15237b9c1 (diff)
downloadlinux-8804b2722dc5d6f9b7ba0a9e812eae9ee5ce95bc.tar.bz2
tcp: remove false sharing in tcp_rcv_state_process()
Last known hot point during SYNFLOOD attack is the clearing of rx_opt.saw_tstamp in tcp_rcv_state_process() It is not needed for a listener, so we move it where it matters. Performance while a SYNFLOOD hits a single listener socket went from 5 Mpps to 6 Mpps on my test server (24 cores, 8 NIC RX queues) Signed-off-by: Eric Dumazet <edumazet@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--net/ipv4/tcp_input.c4
1 files changed, 2 insertions, 2 deletions
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 7ea7034af83f..90e0d9256b74 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -5796,8 +5796,6 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb)
int queued = 0;
bool acceptable;
- tp->rx_opt.saw_tstamp = 0;
-
switch (sk->sk_state) {
case TCP_CLOSE:
goto discard;
@@ -5838,6 +5836,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb)
goto discard;
case TCP_SYN_SENT:
+ tp->rx_opt.saw_tstamp = 0;
queued = tcp_rcv_synsent_state_process(sk, skb, th);
if (queued >= 0)
return queued;
@@ -5849,6 +5848,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb)
return 0;
}
+ tp->rx_opt.saw_tstamp = 0;
req = tp->fastopen_rsk;
if (req) {
WARN_ON_ONCE(sk->sk_state != TCP_SYN_RECV &&