summaryrefslogtreecommitdiffstats
path: root/include/net/tcp.h
diff options
context:
space:
mode:
authorEric Dumazet <edumazet@google.com>2016-04-27 16:44:27 -0700
committerDavid S. Miller <davem@davemloft.net>2016-04-27 22:48:22 -0400
commit6aef70a851ac77967992340faaff33f44598f60a (patch)
treefe2e05554f7901d9ff1349b403e11d1e95874fbc /include/net/tcp.h
parent2995aea5b68b6850e76aadd95be777cb36949e62 (diff)
downloadlinux-6aef70a851ac77967992340faaff33f44598f60a.tar.bz2
net: snmp: kill various STATS_USER() helpers
In the old days (before linux-3.0), SNMP counters were duplicated, one for user context, and one for BH context. After commit 8f0ea0fe3a03 ("snmp: reduce percpu needs by 50%") we have a single copy, and what really matters is preemption being enabled or disabled, since we use this_cpu_inc() or __this_cpu_inc() respectively. We therefore kill SNMP_INC_STATS_USER(), SNMP_ADD_STATS_USER(), NET_INC_STATS_USER(), NET_ADD_STATS_USER(), SCTP_INC_STATS_USER(), SNMP_INC_STATS64_USER(), SNMP_ADD_STATS64_USER(), TCP_ADD_STATS_USER(), UDP_INC_STATS_USER(), UDP6_INC_STATS_USER(), and XFRM_INC_STATS_USER() Following patches will rename __BH helpers to make clear their usage is not tied to BH being disabled. Signed-off-by: Eric Dumazet <edumazet@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'include/net/tcp.h')
-rw-r--r--include/net/tcp.h9
1 files changed, 4 insertions, 5 deletions
diff --git a/include/net/tcp.h b/include/net/tcp.h
index 7f2553da10d1..cfe15f712164 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -334,7 +334,6 @@ extern struct proto tcp_prot;
#define TCP_INC_STATS(net, field) SNMP_INC_STATS((net)->mib.tcp_statistics, field)
#define TCP_INC_STATS_BH(net, field) SNMP_INC_STATS_BH((net)->mib.tcp_statistics, field)
#define TCP_DEC_STATS(net, field) SNMP_DEC_STATS((net)->mib.tcp_statistics, field)
-#define TCP_ADD_STATS_USER(net, field, val) SNMP_ADD_STATS_USER((net)->mib.tcp_statistics, field, val)
#define TCP_ADD_STATS(net, field, val) SNMP_ADD_STATS((net)->mib.tcp_statistics, field, val)
void tcp_tasklet_init(void);
@@ -1298,10 +1297,10 @@ bool tcp_oow_rate_limited(struct net *net, const struct sk_buff *skb,
static inline void tcp_mib_init(struct net *net)
{
/* See RFC 2012 */
- TCP_ADD_STATS_USER(net, TCP_MIB_RTOALGORITHM, 1);
- TCP_ADD_STATS_USER(net, TCP_MIB_RTOMIN, TCP_RTO_MIN*1000/HZ);
- TCP_ADD_STATS_USER(net, TCP_MIB_RTOMAX, TCP_RTO_MAX*1000/HZ);
- TCP_ADD_STATS_USER(net, TCP_MIB_MAXCONN, -1);
+ TCP_ADD_STATS(net, TCP_MIB_RTOALGORITHM, 1);
+ TCP_ADD_STATS(net, TCP_MIB_RTOMIN, TCP_RTO_MIN*1000/HZ);
+ TCP_ADD_STATS(net, TCP_MIB_RTOMAX, TCP_RTO_MAX*1000/HZ);
+ TCP_ADD_STATS(net, TCP_MIB_MAXCONN, -1);
}
/* from STCP */