summaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/Kconfig.debug1
-rw-r--r--lib/Kconfig.kasan2
-rw-r--r--lib/maple_tree.c13
-rw-r--r--lib/percpu_counter.c29
4 files changed, 34 insertions, 11 deletions
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 9dc5066654fd..1b2bdc02abf4 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -2260,6 +2260,7 @@ config TEST_XARRAY
tristate "Test the XArray code at runtime"
config TEST_MAPLE_TREE
+ depends on DEBUG_KERNEL
select DEBUG_MAPLE_TREE
tristate "Test the Maple Tree code at runtime"
diff --git a/lib/Kconfig.kasan b/lib/Kconfig.kasan
index 836f70393e22..be6ee6020290 100644
--- a/lib/Kconfig.kasan
+++ b/lib/Kconfig.kasan
@@ -181,7 +181,7 @@ config KASAN_VMALLOC
config KASAN_KUNIT_TEST
tristate "KUnit-compatible tests of KASAN bug detection capabilities" if !KUNIT_ALL_TESTS
- depends on KASAN && KUNIT
+ depends on KASAN && KUNIT && TRACEPOINTS
default KUNIT_ALL_TESTS
help
A KUnit-based KASAN test suite. Triggers different kinds of
diff --git a/lib/maple_tree.c b/lib/maple_tree.c
index df352f6ccc24..3fe1491d2bf9 100644
--- a/lib/maple_tree.c
+++ b/lib/maple_tree.c
@@ -323,14 +323,19 @@ static inline void *mte_safe_root(const struct maple_enode *node)
return (void *)((unsigned long)node & ~MAPLE_ROOT_NODE);
}
-static inline void mte_set_full(const struct maple_enode *node)
+static inline void *mte_set_full(const struct maple_enode *node)
{
- node = (void *)((unsigned long)node & ~MAPLE_ENODE_NULL);
+ return (void *)((unsigned long)node & ~MAPLE_ENODE_NULL);
}
-static inline void mte_clear_full(const struct maple_enode *node)
+static inline void *mte_clear_full(const struct maple_enode *node)
{
- node = (void *)((unsigned long)node | MAPLE_ENODE_NULL);
+ return (void *)((unsigned long)node | MAPLE_ENODE_NULL);
+}
+
+static inline bool mte_has_null(const struct maple_enode *node)
+{
+ return (unsigned long)node & MAPLE_ENODE_NULL;
}
static inline bool ma_is_root(struct maple_node *node)
diff --git a/lib/percpu_counter.c b/lib/percpu_counter.c
index ed610b75dc32..42f729c8e56c 100644
--- a/lib/percpu_counter.c
+++ b/lib/percpu_counter.c
@@ -117,11 +117,8 @@ void percpu_counter_sync(struct percpu_counter *fbc)
}
EXPORT_SYMBOL(percpu_counter_sync);
-/*
- * Add up all the per-cpu counts, return the result. This is a more accurate
- * but much slower version of percpu_counter_read_positive()
- */
-s64 __percpu_counter_sum(struct percpu_counter *fbc)
+static s64 __percpu_counter_sum_mask(struct percpu_counter *fbc,
+ const struct cpumask *cpu_mask)
{
s64 ret;
int cpu;
@@ -129,15 +126,35 @@ s64 __percpu_counter_sum(struct percpu_counter *fbc)
raw_spin_lock_irqsave(&fbc->lock, flags);
ret = fbc->count;
- for_each_online_cpu(cpu) {
+ for_each_cpu(cpu, cpu_mask) {
s32 *pcount = per_cpu_ptr(fbc->counters, cpu);
ret += *pcount;
}
raw_spin_unlock_irqrestore(&fbc->lock, flags);
return ret;
}
+
+/*
+ * Add up all the per-cpu counts, return the result. This is a more accurate
+ * but much slower version of percpu_counter_read_positive()
+ */
+s64 __percpu_counter_sum(struct percpu_counter *fbc)
+{
+ return __percpu_counter_sum_mask(fbc, cpu_online_mask);
+}
EXPORT_SYMBOL(__percpu_counter_sum);
+/*
+ * This is slower version of percpu_counter_sum as it traverses all possible
+ * cpus. Use this only in the cases where accurate data is needed in the
+ * presense of CPUs getting offlined.
+ */
+s64 percpu_counter_sum_all(struct percpu_counter *fbc)
+{
+ return __percpu_counter_sum_mask(fbc, cpu_possible_mask);
+}
+EXPORT_SYMBOL(percpu_counter_sum_all);
+
int __percpu_counter_init(struct percpu_counter *fbc, s64 amount, gfp_t gfp,
struct lock_class_key *key)
{