diff options
author | Eric W. Biederman <ebiederm@xmission.com> | 2014-03-13 21:26:42 -0700 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2014-03-14 22:41:36 -0400 |
commit | 57a7744e09867ebcfa0ccf1d6d529caa7728d552 (patch) | |
tree | 6407fee7138787a24bf9251abfeeae69a239028a /net/ipv4 | |
parent | 85dcce7a73f1cc59f7a96fe52713b1630f4ca272 (diff) | |
download | linux-57a7744e09867ebcfa0ccf1d6d529caa7728d552.tar.bz2 |
net: Replace u64_stats_fetch_begin_bh to u64_stats_fetch_begin_irq
Replace the bh safe variant with the hard irq safe variant.
We need a hard irq safe variant to deal with netpoll transmitting
packets from hard irq context, and we need it in most if not all of
the places using the bh safe variant.
Except on 32bit uni-processor the code is exactly the same so don't
bother with a bh variant, just have a hard irq safe variant that
everyone can use.
Signed-off-by: "Eric W. Biederman" <ebiederm@xmission.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4')
-rw-r--r-- | net/ipv4/af_inet.c | 4 | ||||
-rw-r--r-- | net/ipv4/ip_tunnel_core.c | 4 |
2 files changed, 4 insertions, 4 deletions
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c index 19ab78aca547..8c54870db792 100644 --- a/net/ipv4/af_inet.c +++ b/net/ipv4/af_inet.c @@ -1505,9 +1505,9 @@ u64 snmp_fold_field64(void __percpu *mib[], int offt, size_t syncp_offset) bhptr = per_cpu_ptr(mib[0], cpu); syncp = (struct u64_stats_sync *)(bhptr + syncp_offset); do { - start = u64_stats_fetch_begin_bh(syncp); + start = u64_stats_fetch_begin_irq(syncp); v = *(((u64 *) bhptr) + offt); - } while (u64_stats_fetch_retry_bh(syncp, start)); + } while (u64_stats_fetch_retry_irq(syncp, start)); res += v; } diff --git a/net/ipv4/ip_tunnel_core.c b/net/ipv4/ip_tunnel_core.c index 6f847dd56dbc..b86f0a37fa7c 100644 --- a/net/ipv4/ip_tunnel_core.c +++ b/net/ipv4/ip_tunnel_core.c @@ -161,12 +161,12 @@ struct rtnl_link_stats64 *ip_tunnel_get_stats64(struct net_device *dev, unsigned int start; do { - start = u64_stats_fetch_begin_bh(&tstats->syncp); + start = u64_stats_fetch_begin_irq(&tstats->syncp); rx_packets = tstats->rx_packets; tx_packets = tstats->tx_packets; rx_bytes = tstats->rx_bytes; tx_bytes = tstats->tx_bytes; - } while (u64_stats_fetch_retry_bh(&tstats->syncp, start)); + } while (u64_stats_fetch_retry_irq(&tstats->syncp, start)); tot->rx_packets += rx_packets; tot->tx_packets += tx_packets; |