diff options
author | Eric Dumazet <edumazet@google.com> | 2021-09-29 18:03:32 -0700 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2021-09-30 14:17:10 +0100 |
commit | 59f09ae8fac4a990070fc6bdc889d0e0118664ea (patch) | |
tree | b2a2ae1c36876b2ef1aea66848d36b9b51cc41f2 /include | |
parent | dee3b2d0fa4b51a079f7d12159b42240f795bf64 (diff) | |
download | linux-59f09ae8fac4a990070fc6bdc889d0e0118664ea.tar.bz2 |
net: snmp: inline snmp_get_cpu_field()
This trivial function is called ~90,000 times on 256 cpus hosts,
when reading /proc/net/netstat. And this number keeps inflating.
Inlining it saves many cycles.
Signed-off-by: Eric Dumazet <edumazet@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'include')
-rw-r--r-- | include/net/ip.h | 6 |
1 files changed, 5 insertions, 1 deletions
diff --git a/include/net/ip.h b/include/net/ip.h index 9192444f2964..cf229a531194 100644 --- a/include/net/ip.h +++ b/include/net/ip.h @@ -291,7 +291,11 @@ void ip_send_unicast_reply(struct sock *sk, struct sk_buff *skb, #define NET_ADD_STATS(net, field, adnd) SNMP_ADD_STATS((net)->mib.net_statistics, field, adnd) #define __NET_ADD_STATS(net, field, adnd) __SNMP_ADD_STATS((net)->mib.net_statistics, field, adnd) -u64 snmp_get_cpu_field(void __percpu *mib, int cpu, int offct); +static inline u64 snmp_get_cpu_field(void __percpu *mib, int cpu, int offt) +{ + return *(((unsigned long *)per_cpu_ptr(mib, cpu)) + offt); +} + unsigned long snmp_fold_field(void __percpu *mib, int offt); #if BITS_PER_LONG==32 u64 snmp_get_cpu_field64(void __percpu *mib, int cpu, int offct, |