summaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2015-05-29 16:45:45 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2015-05-29 16:45:45 -0700
commit1be44e234b672eadbf1d96eb172ef21f5ff6a2c9 (patch)
tree99bb11c447d5c1e8159f9ec9774c1fbdbf43152e /lib
parent2a6451718627eb60e85691053cb9820ae7ed3913 (diff)
parent22419ac9fe5e79483596cebdbd1d1209c18bac1a (diff)
downloadlinux-1be44e234b672eadbf1d96eb172ef21f5ff6a2c9.tar.bz2
Merge tag 'xfs-for-linus-4.1-rc6' of git://git.kernel.org/pub/scm/linux/kernel/git/dgc/linux-xfs
Pull xfs fixes from Dave Chinner: "This is a little larger than I'd like late in the release cycle, but all the fixes are for regressions introduced in the 4.1-rc1 merge, or are needed back in -stable kernels fairly quickly as they are filesystem corruption or userspace visible correctness issues. Changes in this update: - regression fix for new rename whiteout code - regression fixes for new superblock generic per-cpu counter code - fix for incorrect error return sign introduced in 3.17 - metadata corruption fixes that need to go back to -stable kernels" * tag 'xfs-for-linus-4.1-rc6' of git://git.kernel.org/pub/scm/linux/kernel/git/dgc/linux-xfs: xfs: fix broken i_nlink accounting for whiteout tmpfile inode xfs: xfs_iozero can return positive errno xfs: xfs_attr_inactive leaves inconsistent attr fork state behind xfs: extent size hints can round up extents past MAXEXTLEN xfs: inode and free block counters need to use __percpu_counter_compare percpu_counter: batch size aware __percpu_counter_compare() xfs: use percpu_counter_read_positive for mp->m_icount
Diffstat (limited to 'lib')
-rw-r--r--lib/percpu_counter.c6
1 files changed, 3 insertions, 3 deletions
diff --git a/lib/percpu_counter.c b/lib/percpu_counter.c
index 48144cdae819..f051d69f0910 100644
--- a/lib/percpu_counter.c
+++ b/lib/percpu_counter.c
@@ -197,13 +197,13 @@ static int percpu_counter_hotcpu_callback(struct notifier_block *nb,
* Compare counter against given value.
* Return 1 if greater, 0 if equal and -1 if less
*/
-int percpu_counter_compare(struct percpu_counter *fbc, s64 rhs)
+int __percpu_counter_compare(struct percpu_counter *fbc, s64 rhs, s32 batch)
{
s64 count;
count = percpu_counter_read(fbc);
/* Check to see if rough count will be sufficient for comparison */
- if (abs(count - rhs) > (percpu_counter_batch*num_online_cpus())) {
+ if (abs(count - rhs) > (batch * num_online_cpus())) {
if (count > rhs)
return 1;
else
@@ -218,7 +218,7 @@ int percpu_counter_compare(struct percpu_counter *fbc, s64 rhs)
else
return 0;
}
-EXPORT_SYMBOL(percpu_counter_compare);
+EXPORT_SYMBOL(__percpu_counter_compare);
static int __init percpu_counter_startup(void)
{