summaryrefslogtreecommitdiffstats
path: root/tools/include
diff options
context:
space:
mode:
authorMatthew Wilcox <willy@infradead.org>2017-11-10 15:15:08 -0500
committerMatthew Wilcox <willy@infradead.org>2018-10-21 10:45:57 -0400
commit58d6ea3085f2e53714810a513c61629f6d2be0a6 (patch)
treeb4b73ea07cd720063fd3a2f8b063422c5a0e698a /tools/include
parent9b89a0355144685a787b0dc5bcf7bdd6f2d02968 (diff)
downloadlinux-58d6ea3085f2e53714810a513c61629f6d2be0a6.tar.bz2
xarray: Add XArray unconditional store operations
xa_store() differs from radix_tree_insert() in that it will overwrite an existing element in the array rather than returning an error. This is the behaviour which most users want, and those that want more complex behaviour generally want to use the xas family of routines anyway. For memory allocation, xa_store() will first attempt to request memory from the slab allocator; if memory is not immediately available, it will drop the xa_lock and allocate memory, keeping a pointer in the xa_state. It does not use the per-CPU cache, although those will continue to exist until all radix tree users are converted to the xarray. This patch also includes xa_erase() and __xa_erase() for a streamlined way to store NULL. Since there is no need to allocate memory in order to store a NULL in the XArray, we do not need to trouble the user with deciding what memory allocation flags to use. Signed-off-by: Matthew Wilcox <willy@infradead.org>
Diffstat (limited to 'tools/include')
-rw-r--r--tools/include/linux/bitmap.h1
-rw-r--r--tools/include/linux/spinlock.h2
2 files changed, 3 insertions, 0 deletions
diff --git a/tools/include/linux/bitmap.h b/tools/include/linux/bitmap.h
index e63662db131b..05dca5c203f3 100644
--- a/tools/include/linux/bitmap.h
+++ b/tools/include/linux/bitmap.h
@@ -15,6 +15,7 @@ void __bitmap_or(unsigned long *dst, const unsigned long *bitmap1,
const unsigned long *bitmap2, int bits);
int __bitmap_and(unsigned long *dst, const unsigned long *bitmap1,
const unsigned long *bitmap2, unsigned int bits);
+void bitmap_clear(unsigned long *map, unsigned int start, int len);
#define BITMAP_FIRST_WORD_MASK(start) (~0UL << ((start) & (BITS_PER_LONG - 1)))
diff --git a/tools/include/linux/spinlock.h b/tools/include/linux/spinlock.h
index 622266b197d0..c934572d935c 100644
--- a/tools/include/linux/spinlock.h
+++ b/tools/include/linux/spinlock.h
@@ -37,4 +37,6 @@ static inline bool arch_spin_is_locked(arch_spinlock_t *mutex)
return true;
}
+#include <linux/lockdep.h>
+
#endif