summaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
authorDave Chinner <dchinner@redhat.com>2015-05-29 07:39:34 +1000
committerBen Hutchings <ben@decadent.org.uk>2018-12-16 22:08:36 +0000
commit5f2c8b3ceecd49d0a8a4b048e07e5f8c61e20be2 (patch)
tree8b8e2b4aafff43252b946deb7d0f32a953181ce9 /include
parentfaeb75a4ae227efccb09ec2cc673999460b0e176 (diff)
percpu_counter: batch size aware __percpu_counter_compare()
commit 80188b0d77d7426b494af739ac129e0e684acb84 upstream. XFS uses non-stanard batch sizes for avoiding frequent global counter updates on it's allocated inode counters, as they increment or decrement in batches of 64 inodes. Hence the standard percpu counter batch of 32 means that the counter is effectively a global counter. Currently Xfs uses a batch size of 128 so that it doesn't take the global lock on every single modification. However, Xfs also needs to compare accurately against zero, which means we need to use percpu_counter_compare(), and that has a hard-coded batch size of 32, and hence will spuriously fail to detect when it is supposed to use precise comparisons and hence the accounting goes wrong. Add __percpu_counter_compare() to take a custom batch size so we can use it sanely in XFS and factor percpu_counter_compare() to use it. Signed-off-by: Dave Chinner <dchinner@redhat.com> Acked-by: Tejun Heo <tj@kernel.org> Signed-off-by: Dave Chinner <david@fromorbit.com> Signed-off-by: Ben Hutchings <ben@decadent.org.uk>
Diffstat (limited to 'include')
-rw-r--r--include/linux/percpu_counter.h13
1 files changed, 12 insertions, 1 deletions
diff --git a/include/linux/percpu_counter.h b/include/linux/percpu_counter.h
index d5dd4657c8d6..607de28af726 100644
--- a/include/linux/percpu_counter.h
+++ b/include/linux/percpu_counter.h
@@ -40,7 +40,12 @@ void percpu_counter_destroy(struct percpu_counter *fbc);
void percpu_counter_set(struct percpu_counter *fbc, s64 amount);
void __percpu_counter_add(struct percpu_counter *fbc, s64 amount, s32 batch);
s64 __percpu_counter_sum(struct percpu_counter *fbc);
-int percpu_counter_compare(struct percpu_counter *fbc, s64 rhs);
+int __percpu_counter_compare(struct percpu_counter *fbc, s64 rhs, s32 batch);
+
+static inline int percpu_counter_compare(struct percpu_counter *fbc, s64 rhs)
+{
+ return __percpu_counter_compare(fbc, rhs, percpu_counter_batch);
+}
static inline void percpu_counter_add(struct percpu_counter *fbc, s64 amount)
{
@@ -114,6 +119,12 @@ static inline int percpu_counter_compare(struct percpu_counter *fbc, s64 rhs)
return 0;
}
+static inline int
+__percpu_counter_compare(struct percpu_counter *fbc, s64 rhs, s32 batch)
+{
+ return percpu_counter_compare(fbc, rhs);
+}
+
static inline void
percpu_counter_add(struct percpu_counter *fbc, s64 amount)
{