diff options
Diffstat (limited to 'lib')
-rw-r--r-- | lib/Kconfig | 3 | ||||
-rw-r--r-- | lib/Kconfig.debug | 10 | ||||
-rw-r--r-- | lib/Makefile | 4 | ||||
-rw-r--r-- | lib/chacha.c (renamed from lib/chacha20.c) | 59 | ||||
-rw-r--r-- | lib/cordic.c | 23 | ||||
-rw-r--r-- | lib/gcd.c | 2 | ||||
-rw-r--r-- | lib/objagg.c | 501 | ||||
-rw-r--r-- | lib/percpu-refcount.c | 2 | ||||
-rw-r--r-- | lib/radix-tree.c | 4 | ||||
-rw-r--r-- | lib/raid6/Makefile | 15 | ||||
-rw-r--r-- | lib/rhashtable.c | 8 | ||||
-rw-r--r-- | lib/test_bpf.c | 14 | ||||
-rw-r--r-- | lib/test_debug_virtual.c | 1 | ||||
-rw-r--r-- | lib/test_objagg.c | 836 | ||||
-rw-r--r-- | lib/test_rhashtable.c | 32 | ||||
-rw-r--r-- | lib/test_xarray.c | 155 | ||||
-rw-r--r-- | lib/xarray.c | 8 |
17 files changed, 1578 insertions, 99 deletions
diff --git a/lib/Kconfig b/lib/Kconfig index a9965f4af4dd..7dbbcfe9cd90 100644 --- a/lib/Kconfig +++ b/lib/Kconfig @@ -624,3 +624,6 @@ config GENERIC_LIB_CMPDI2 config GENERIC_LIB_UCMPDI2 bool + +config OBJAGG + tristate "objagg" if COMPILE_TEST diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 1af29b8224fd..b3c91b9e32f8 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -1976,6 +1976,16 @@ config TEST_MEMCAT_P If unsure, say N. +config TEST_OBJAGG + tristate "Perform selftest on object aggreration manager" + default n + depends on OBJAGG + help + Enable this option to test object aggregation manager on boot + (or module load). + + If unsure, say N. + endif # RUNTIME_TESTING_MENU config MEMTEST diff --git a/lib/Makefile b/lib/Makefile index db06d1237898..e1b59da71418 100644 --- a/lib/Makefile +++ b/lib/Makefile @@ -20,7 +20,7 @@ KCOV_INSTRUMENT_dynamic_debug.o := n lib-y := ctype.o string.o vsprintf.o cmdline.o \ rbtree.o radix-tree.o timerqueue.o xarray.o \ idr.o int_sqrt.o extable.o \ - sha1.o chacha20.o irq_regs.o argv_split.o \ + sha1.o chacha.o irq_regs.o argv_split.o \ flex_proportions.o ratelimit.o show_mem.o \ is_single_threaded.o plist.o decompress.o kobject_uevent.o \ earlycpio.o seq_buf.o siphash.o dec_and_lock.o \ @@ -75,6 +75,7 @@ obj-$(CONFIG_TEST_PARMAN) += test_parman.o obj-$(CONFIG_TEST_KMOD) += test_kmod.o obj-$(CONFIG_TEST_DEBUG_VIRTUAL) += test_debug_virtual.o obj-$(CONFIG_TEST_MEMCAT_P) += test_memcat_p.o +obj-$(CONFIG_TEST_OBJAGG) += test_objagg.o ifeq ($(CONFIG_DEBUG_KOBJECT),y) CFLAGS_kobject.o += -DDEBUG @@ -274,3 +275,4 @@ obj-$(CONFIG_GENERIC_LIB_LSHRDI3) += lshrdi3.o obj-$(CONFIG_GENERIC_LIB_MULDI3) += muldi3.o obj-$(CONFIG_GENERIC_LIB_CMPDI2) += cmpdi2.o obj-$(CONFIG_GENERIC_LIB_UCMPDI2) += ucmpdi2.o +obj-$(CONFIG_OBJAGG) += objagg.o diff --git a/lib/chacha20.c b/lib/chacha.c index d907fec6a9ed..a46d2832dbab 100644 --- a/lib/chacha20.c +++ b/lib/chacha.c @@ -1,5 +1,5 @@ /* - * ChaCha20 256-bit cipher algorithm, RFC7539 + * The "hash function" used as the core of the ChaCha stream cipher (RFC7539) * * Copyright (C) 2015 Martin Willi * @@ -14,17 +14,16 @@ #include <linux/bitops.h> #include <linux/cryptohash.h> #include <asm/unaligned.h> -#include <crypto/chacha20.h> +#include <crypto/chacha.h> -void chacha20_block(u32 *state, u8 *stream) +static void chacha_permute(u32 *x, int nrounds) { - u32 x[16]; int i; - for (i = 0; i < ARRAY_SIZE(x); i++) - x[i] = state[i]; + /* whitelist the allowed round counts */ + WARN_ON_ONCE(nrounds != 20 && nrounds != 12); - for (i = 0; i < 20; i += 2) { + for (i = 0; i < nrounds; i += 2) { x[0] += x[4]; x[12] = rol32(x[12] ^ x[0], 16); x[1] += x[5]; x[13] = rol32(x[13] ^ x[1], 16); x[2] += x[6]; x[14] = rol32(x[14] ^ x[2], 16); @@ -65,10 +64,54 @@ void chacha20_block(u32 *state, u8 *stream) x[8] += x[13]; x[7] = rol32(x[7] ^ x[8], 7); x[9] += x[14]; x[4] = rol32(x[4] ^ x[9], 7); } +} + +/** + * chacha_block - generate one keystream block and increment block counter + * @state: input state matrix (16 32-bit words) + * @stream: output keystream block (64 bytes) + * @nrounds: number of rounds (20 or 12; 20 is recommended) + * + * This is the ChaCha core, a function from 64-byte strings to 64-byte strings. + * The caller has already converted the endianness of the input. This function + * also handles incrementing the block counter in the input matrix. + */ +void chacha_block(u32 *state, u8 *stream, int nrounds) +{ + u32 x[16]; + int i; + + memcpy(x, state, 64); + + chacha_permute(x, nrounds); for (i = 0; i < ARRAY_SIZE(x); i++) put_unaligned_le32(x[i] + state[i], &stream[i * sizeof(u32)]); state[12]++; } -EXPORT_SYMBOL(chacha20_block); +EXPORT_SYMBOL(chacha_block); + +/** + * hchacha_block - abbreviated ChaCha core, for XChaCha + * @in: input state matrix (16 32-bit words) + * @out: output (8 32-bit words) + * @nrounds: number of rounds (20 or 12; 20 is recommended) + * + * HChaCha is the ChaCha equivalent of HSalsa and is an intermediate step + * towards XChaCha (see https://cr.yp.to/snuffle/xsalsa-20081128.pdf). HChaCha + * skips the final addition of the initial state, and outputs only certain words + * of the state. It should not be used for streaming directly. + */ +void hchacha_block(const u32 *in, u32 *out, int nrounds) +{ + u32 x[16]; + + memcpy(x, in, 64); + + chacha_permute(x, nrounds); + + memcpy(&out[0], &x[0], 16); + memcpy(&out[4], &x[12], 16); +} +EXPORT_SYMBOL(hchacha_block); diff --git a/lib/cordic.c b/lib/cordic.c index 6cf477839ebd..8ef27c12956f 100644 --- a/lib/cordic.c +++ b/lib/cordic.c @@ -16,15 +16,6 @@ #include <linux/module.h> #include <linux/cordic.h> -#define CORDIC_ANGLE_GEN 39797 -#define CORDIC_PRECISION_SHIFT 16 -#define CORDIC_NUM_ITER (CORDIC_PRECISION_SHIFT + 2) - -#define FIXED(X) ((s32)((X) << CORDIC_PRECISION_SHIFT)) -#define FLOAT(X) (((X) >= 0) \ - ? ((((X) >> (CORDIC_PRECISION_SHIFT - 1)) + 1) >> 1) \ - : -((((-(X)) >> (CORDIC_PRECISION_SHIFT - 1)) + 1) >> 1)) - static const s32 arctan_table[] = { 2949120, 1740967, @@ -64,16 +55,16 @@ struct cordic_iq cordic_calc_iq(s32 theta) coord.q = 0; angle = 0; - theta = FIXED(theta); + theta = CORDIC_FIXED(theta); signtheta = (theta < 0) ? -1 : 1; - theta = ((theta + FIXED(180) * signtheta) % FIXED(360)) - - FIXED(180) * signtheta; + theta = ((theta + CORDIC_FIXED(180) * signtheta) % CORDIC_FIXED(360)) - + CORDIC_FIXED(180) * signtheta; - if (FLOAT(theta) > 90) { - theta -= FIXED(180); + if (CORDIC_FLOAT(theta) > 90) { + theta -= CORDIC_FIXED(180); signx = -1; - } else if (FLOAT(theta) < -90) { - theta += FIXED(180); + } else if (CORDIC_FLOAT(theta) < -90) { + theta += CORDIC_FIXED(180); signx = -1; } diff --git a/lib/gcd.c b/lib/gcd.c index 227dea924425..7948ab27f0a4 100644 --- a/lib/gcd.c +++ b/lib/gcd.c @@ -10,7 +10,7 @@ * has decent hardware division. */ -#if !defined(CONFIG_CPU_NO_EFFICIENT_FFS) && !defined(CPU_NO_EFFICIENT_FFS) +#if !defined(CONFIG_CPU_NO_EFFICIENT_FFS) /* If __ffs is available, the even/odd algorithm benchmarks slower. */ diff --git a/lib/objagg.c b/lib/objagg.c new file mode 100644 index 000000000000..c9b457a91153 --- /dev/null +++ b/lib/objagg.c @@ -0,0 +1,501 @@ +// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 +/* Copyright (c) 2018 Mellanox Technologies. All rights reserved */ + +#include <linux/module.h> +#include <linux/slab.h> +#include <linux/rhashtable.h> +#include <linux/list.h> +#include <linux/sort.h> +#include <linux/objagg.h> + +#define CREATE_TRACE_POINTS +#include <trace/events/objagg.h> + +struct objagg { + const struct objagg_ops *ops; + void *priv; + struct rhashtable obj_ht; + struct rhashtable_params ht_params; + struct list_head obj_list; + unsigned int obj_count; +}; + +struct objagg_obj { + struct rhash_head ht_node; /* member of objagg->obj_ht */ + struct list_head list; /* member of objagg->obj_list */ + struct objagg_obj *parent; /* if the object is nested, this + * holds pointer to parent, otherwise NULL + */ + union { + void *delta_priv; /* user delta private */ + void *root_priv; /* user root private */ + }; + unsigned int refcount; /* counts number of users of this object + * including nested objects + */ + struct objagg_obj_stats stats; + unsigned long obj[0]; +}; + +static unsigned int objagg_obj_ref_inc(struct objagg_obj *objagg_obj) +{ + return ++objagg_obj->refcount; +} + +static unsigned int objagg_obj_ref_dec(struct objagg_obj *objagg_obj) +{ + return --objagg_obj->refcount; +} + +static void objagg_obj_stats_inc(struct objagg_obj *objagg_obj) +{ + objagg_obj->stats.user_count++; + objagg_obj->stats.delta_user_count++; + if (objagg_obj->parent) + objagg_obj->parent->stats.delta_user_count++; +} + +static void objagg_obj_stats_dec(struct objagg_obj *objagg_obj) +{ + objagg_obj->stats.user_count--; + objagg_obj->stats.delta_user_count--; + if (objagg_obj->parent) + objagg_obj->parent->stats.delta_user_count--; +} + +static bool objagg_obj_is_root(const struct objagg_obj *objagg_obj) +{ + /* Nesting is not supported, so we can use ->parent + * to figure out if the object is root. + */ + return !objagg_obj->parent; +} + +/** + * objagg_obj_root_priv - obtains root private for an object + * @objagg_obj: objagg object instance + * + * Note: all locking must be provided by the caller. + * + * Either the object is root itself when the private is returned + * directly, or the parent is root and its private is returned + * instead. + * + * Returns a user private root pointer. + */ +const void *objagg_obj_root_priv(const struct objagg_obj *objagg_obj) +{ + if (objagg_obj_is_root(objagg_obj)) + return objagg_obj->root_priv; + WARN_ON(!objagg_obj_is_root(objagg_obj->parent)); + return objagg_obj->parent->root_priv; +} +EXPORT_SYMBOL(objagg_obj_root_priv); + +/** + * objagg_obj_delta_priv - obtains delta private for an object + * @objagg_obj: objagg object instance + * + * Note: all locking must be provided by the caller. + * + * Returns user private delta pointer or NULL in case the passed + * object is root. + */ +const void *objagg_obj_delta_priv(const struct objagg_obj *objagg_obj) +{ + if (objagg_obj_is_root(objagg_obj)) + return NULL; + return objagg_obj->delta_priv; +} +EXPORT_SYMBOL(objagg_obj_delta_priv); + +/** + * objagg_obj_raw - obtains object user private pointer + * @objagg_obj: objagg object instance + * + * Note: all locking must be provided by the caller. + * + * Returns user private pointer as was passed to objagg_obj_get() by "obj" arg. + */ +const void *objagg_obj_raw(const struct objagg_obj *objagg_obj) +{ + return objagg_obj->obj; +} +EXPORT_SYMBOL(objagg_obj_raw); + +static struct objagg_obj *objagg_obj_lookup(struct objagg *objagg, void *obj) +{ + return rhashtable_lookup_fast(&objagg->obj_ht, obj, objagg->ht_params); +} + +static int objagg_obj_parent_assign(struct objagg *objagg, + struct objagg_obj *objagg_obj, + struct objagg_obj *parent) +{ + void *delta_priv; + + delta_priv = objagg->ops->delta_create(objagg->priv, parent->obj, + objagg_obj->obj); + if (IS_ERR(delta_priv)) + return PTR_ERR(delta_priv); + + /* User returned a delta private, that means that + * our object can be aggregated into the parent. + */ + objagg_obj->parent = parent; + objagg_obj->delta_priv = delta_priv; + objagg_obj_ref_inc(objagg_obj->parent); + trace_objagg_obj_parent_assign(objagg, objagg_obj, + parent, + parent->refcount); + return 0; +} + +static int objagg_obj_parent_lookup_assign(struct objagg *objagg, + struct objagg_obj *objagg_obj) +{ + struct objagg_obj *objagg_obj_cur; + int err; + + list_for_each_entry(objagg_obj_cur, &objagg->obj_list, list) { + /* Nesting is not supported. In case the object + * is not root, it cannot be assigned as parent. + */ + if (!objagg_obj_is_root(objagg_obj_cur)) + continue; + err = objagg_obj_parent_assign(objagg, objagg_obj, + objagg_obj_cur); + if (!err) + return 0; + } + return -ENOENT; +} + +static void __objagg_obj_put(struct objagg *objagg, + struct objagg_obj *objagg_obj); + +static void objagg_obj_parent_unassign(struct objagg *objagg, + struct objagg_obj *objagg_obj) +{ + trace_objagg_obj_parent_unassign(objagg, objagg_obj, + objagg_obj->parent, + objagg_obj->parent->refcount); + objagg->ops->delta_destroy(objagg->priv, objagg_obj->delta_priv); + __objagg_obj_put(objagg, objagg_obj->parent); +} + +static int objagg_obj_root_create(struct objagg *objagg, + struct objagg_obj *objagg_obj) +{ + objagg_obj->root_priv = objagg->ops->root_create(objagg->priv, + objagg_obj->obj); + if (IS_ERR(objagg_obj->root_priv)) + return PTR_ERR(objagg_obj->root_priv); + + trace_objagg_obj_root_create(objagg, objagg_obj); + return 0; +} + +static void objagg_obj_root_destroy(struct objagg *objagg, + struct objagg_obj *objagg_obj) +{ + trace_objagg_obj_root_destroy(objagg, objagg_obj); + objagg->ops->root_destroy(objagg->priv, objagg_obj->root_priv); +} + +static int objagg_obj_init(struct objagg *objagg, + struct objagg_obj *objagg_obj) +{ + int err; + + /* Try to find if the object can be aggregated under an existing one. */ + err = objagg_obj_parent_lookup_assign(objagg, objagg_obj); + if (!err) + return 0; + /* If aggregation is not possible, make the object a root. */ + return objagg_obj_root_create(objagg, objagg_obj); +} + +static void objagg_obj_fini(struct objagg *objagg, + struct objagg_obj *objagg_obj) +{ + if (!objagg_obj_is_root(objagg_obj)) + objagg_obj_parent_unassign(objagg, objagg_obj); + else + objagg_obj_root_destroy(objagg, objagg_obj); +} + +static struct objagg_obj *objagg_obj_create(struct objagg *objagg, void *obj) +{ + struct objagg_obj *objagg_obj; + int err; + + objagg_obj = kzalloc(sizeof(*objagg_obj) + objagg->ops->obj_size, + GFP_KERNEL); + if (!objagg_obj) + return ERR_PTR(-ENOMEM); + objagg_obj_ref_inc(objagg_obj); + memcpy(objagg_obj->obj, obj, objagg->ops->obj_size); + + err = objagg_obj_init(objagg, objagg_obj); + if (err) + goto err_obj_init; + + err = rhashtable_insert_fast(&objagg->obj_ht, &objagg_obj->ht_node, + objagg->ht_params); + if (err) + goto err_ht_insert; + list_add(&objagg_obj->list, &objagg->obj_list); + objagg->obj_count++; + trace_objagg_obj_create(objagg, objagg_obj); + + return objagg_obj; + +err_ht_insert: + objagg_obj_fini(objagg, objagg_obj); +err_obj_init: + kfree(objagg_obj); + return ERR_PTR(err); +} + +static struct objagg_obj *__objagg_obj_get(struct objagg *objagg, void *obj) +{ + struct objagg_obj *objagg_obj; + + /* First, try to find the object exactly as user passed it, + * perhaps it is already in use. + */ + objagg_obj = objagg_obj_lookup(objagg, obj); + if (objagg_obj) { + objagg_obj_ref_inc(objagg_obj); + return objagg_obj; + } + + return objagg_obj_create(objagg, obj); +} + +/** + * objagg_obj_get - gets an object within objagg instance + * @objagg: objagg instance + * @obj: user-specific private object pointer + * + * Note: all locking must be provided by the caller. + * + * Size of the "obj" memory is specified in "objagg->ops". + * + * There are 3 main options this function wraps: + * 1) The object according to "obj" already exist. In that case + * the reference counter is incrementes and the object is returned. + * 2) The object does not exist, but it can be aggregated within + * another object. In that case, user ops->delta_create() is called + * to obtain delta data and a new object is created with returned + * user-delta private pointer. + * 3) The object does not exist and cannot be aggregated into + * any of the existing objects. In that case, user ops->root_create() + * is called to create the root and a new object is created with + * returned user-root private pointer. + * + * Returns a pointer to objagg object instance in case of success, + * otherwise it returns pointer error using ERR_PTR macro. + */ +struct objagg_obj *objagg_obj_get(struct objagg *objagg, void *obj) +{ + struct objagg_obj *objagg_obj; + + objagg_obj = __objagg_obj_get(objagg, obj); + if (IS_ERR(objagg_obj)) + return objagg_obj; + objagg_obj_stats_inc(objagg_obj); + trace_objagg_obj_get(objagg, objagg_obj, objagg_obj->refcount); + return objagg_obj; +} +EXPORT_SYMBOL(objagg_obj_get); + +static void objagg_obj_destroy(struct objagg *objagg, + struct objagg_obj *objagg_obj) +{ + trace_objagg_obj_destroy(objagg, objagg_obj); + --objagg->obj_count; + list_del(&objagg_obj->list); + rhashtable_remove_fast(&objagg->obj_ht, &objagg_obj->ht_node, + objagg->ht_params); + objagg_obj_fini(objagg, objagg_obj); + kfree(objagg_obj); +} + +static void __objagg_obj_put(struct objagg *objagg, + struct objagg_obj *objagg_obj) +{ + if (!objagg_obj_ref_dec(objagg_obj)) + objagg_obj_destroy(objagg, objagg_obj); +} + +/** + * objagg_obj_put - puts an object within objagg instance + * @objagg: objagg instance + * @objagg_obj: objagg object instance + * + * Note: all locking must be provided by the caller. + * + * Symmetric to objagg_obj_get(). + */ +void objagg_obj_put(struct objagg *objagg, struct objagg_obj *objagg_obj) +{ + trace_objagg_obj_put(objagg, objagg_obj, objagg_obj->refcount); + objagg_obj_stats_dec(objagg_obj); + __objagg_obj_put(objagg, objagg_obj); +} +EXPORT_SYMBOL(objagg_obj_put); + +/** + * objagg_create - creates a new objagg instance + * @ops: user-specific callbacks + * @priv: pointer to a private data passed to the ops + * + * Note: all locking must be provided by the caller. + * + * The purpose of the library is to provide an infrastructure to + * aggregate user-specified objects. Library does not care about the type + * of the object. User fills-up ops which take care of the specific + * user object manipulation. + * + * As a very stupid example, consider integer numbers. For example + * number 8 as a root object. That can aggregate number 9 with delta 1, + * number 10 with delta 2, etc. This example is implemented as + * a part of a testing module in test_objagg.c file. + * + * Each objagg instance contains multiple trees. Each tree node is + * represented by "an object". In the current implementation there can be + * only roots and leafs nodes. Leaf nodes are called deltas. + * But in general, this can be easily extended for intermediate nodes. + * In that extension, a delta would be associated with all non-root + * nodes. + * + * Returns a pointer to newly created objagg instance in case of success, + * otherwise it returns pointer error using ERR_PTR macro. + */ +struct objagg *objagg_create(const struct objagg_ops *ops, void *priv) +{ + struct objagg *objagg; + int err; + + if (WARN_ON(!ops || !ops->root_create || !ops->root_destroy || + !ops->delta_create || !ops->delta_destroy)) + return ERR_PTR(-EINVAL); + objagg = kzalloc(sizeof(*objagg), GFP_KERNEL); + if (!objagg) + return ERR_PTR(-ENOMEM); + objagg->ops = ops; + objagg->priv = priv; + INIT_LIST_HEAD(&objagg->obj_list); + + objagg->ht_params.key_len = ops->obj_size; + objagg->ht_params.key_offset = offsetof(struct objagg_obj, obj); + objagg->ht_params.head_offset = offsetof(struct objagg_obj, ht_node); + + err = rhashtable_init(&objagg->obj_ht, &objagg->ht_params); + if (err) + goto err_rhashtable_init; + + trace_objagg_create(objagg); + return objagg; + +err_rhashtable_init: + kfree(objagg); + return ERR_PTR(err); +} +EXPORT_SYMBOL(objagg_create); + +/** + * objagg_destroy - destroys a new objagg instance + * @objagg: objagg instance + * + * Note: all locking must be provided by the caller. + */ +void objagg_destroy(struct objagg *objagg) +{ + trace_objagg_destroy(objagg); + WARN_ON(!list_empty(&objagg->obj_list)); + rhashtable_destroy(&objagg->obj_ht); + kfree(objagg); +} +EXPORT_SYMBOL(objagg_destroy); + +static int objagg_stats_info_sort_cmp_func(const void *a, const void *b) +{ + const struct objagg_obj_stats_info *stats_info1 = a; + const struct objagg_obj_stats_info *stats_info2 = b; + + if (stats_info1->is_root != stats_info2->is_root) + return stats_info2->is_root - stats_info1->is_root; + if (stats_info1->stats.delta_user_count != + stats_info2->stats.delta_user_count) + return stats_info2->stats.delta_user_count - + stats_info1->stats.delta_user_count; + return stats_info2->stats.user_count - stats_info1->stats.user_count; +} + +/** + * objagg_stats_get - obtains stats of the objagg instance + * @objagg: objagg instance + * + * Note: all locking must be provided by the caller. + * + * The returned structure contains statistics of all object + * currently in use, ordered by following rules: + * 1) Root objects are always on lower indexes than the rest. + * 2) Objects with higher delta user count are always on lower + * indexes. + * 3) In case more objects have the same delta user count, + * the objects are ordered by user count. + * + * Returns a pointer to stats instance in case of success, + * otherwise it returns pointer error using ERR_PTR macro. + */ +const struct objagg_stats *objagg_stats_get(struct objagg *objagg) +{ + struct objagg_stats *objagg_stats; + struct objagg_obj *objagg_obj; + size_t alloc_size; + int i; + + alloc_size = sizeof(*objagg_stats) + + sizeof(objagg_stats->stats_info[0]) * objagg->obj_count; + objagg_stats = kzalloc(alloc_size, GFP_KERNEL); + if (!objagg_stats) + return ERR_PTR(-ENOMEM); + + i = 0; + list_for_each_entry(objagg_obj, &objagg->obj_list, list) { + memcpy(&objagg_stats->stats_info[i].stats, &objagg_obj->stats, + sizeof(objagg_stats->stats_info[0].stats)); + objagg_stats->stats_info[i].objagg_obj = objagg_obj; + objagg_stats->stats_info[i].is_root = + objagg_obj_is_root(objagg_obj); + i++; + } + objagg_stats->stats_info_count = i; + + sort(objagg_stats->stats_info, objagg_stats->stats_info_count, + sizeof(struct objagg_obj_stats_info), + objagg_stats_info_sort_cmp_func, NULL); + + return objagg_stats; +} +EXPORT_SYMBOL(objagg_stats_get); + +/** + * objagg_stats_puts - puts stats of the objagg instance + * @objagg_stats: objagg instance stats + * + * Note: all locking must be provided by the caller. + */ +void objagg_stats_put(const struct objagg_stats *objagg_stats) +{ + kfree(objagg_stats); +} +EXPORT_SYMBOL(objagg_stats_put); + +MODULE_LICENSE("Dual BSD/GPL"); +MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>"); +MODULE_DESCRIPTION("Object aggregation manager"); diff --git a/lib/percpu-refcount.c b/lib/percpu-refcount.c index de10b8c0bff6..9877682e49c7 100644 --- a/lib/percpu-refcount.c +++ b/lib/percpu-refcount.c @@ -181,7 +181,7 @@ static void __percpu_ref_switch_to_atomic(struct percpu_ref *ref, ref->confirm_switch = confirm_switch ?: percpu_ref_noop_confirm_switch; percpu_ref_get(ref); /* put after confirmation */ - call_rcu_sched(&ref->rcu, percpu_ref_switch_to_atomic_rcu); + call_rcu(&ref->rcu, percpu_ref_switch_to_atomic_rcu); } static void __percpu_ref_switch_to_percpu(struct percpu_ref *ref) diff --git a/lib/radix-tree.c b/lib/radix-tree.c index 1106bb6aa01e..14d51548bea6 100644 --- a/lib/radix-tree.c +++ b/lib/radix-tree.c @@ -784,11 +784,11 @@ void *__radix_tree_lookup(const struct radix_tree_root *root, while (radix_tree_is_internal_node(node)) { unsigned offset; - if (node == RADIX_TREE_RETRY) - goto restart; parent = entry_to_node(node); offset = radix_tree_descend(parent, &node, index); slot = parent->slots + offset; + if (node == RADIX_TREE_RETRY) + goto restart; if (parent->shift == 0) break; } diff --git a/lib/raid6/Makefile b/lib/raid6/Makefile index 2f8b61dfd9b0..7ed43eaa02ef 100644 --- a/lib/raid6/Makefile +++ b/lib/raid6/Makefile @@ -18,6 +18,21 @@ quiet_cmd_unroll = UNROLL $@ ifeq ($(CONFIG_ALTIVEC),y) altivec_flags := -maltivec $(call cc-option,-mabi=altivec) + +ifdef CONFIG_CC_IS_CLANG +# clang ppc port does not yet support -maltivec when -msoft-float is +# enabled. A future release of clang will resolve this +# https://bugs.llvm.org/show_bug.cgi?id=31177 +CFLAGS_REMOVE_altivec1.o += -msoft-float +CFLAGS_REMOVE_altivec2.o += -msoft-float +CFLAGS_REMOVE_altivec4.o += -msoft-float +CFLAGS_REMOVE_altivec8.o += -msoft-float +CFLAGS_REMOVE_altivec8.o += -msoft-float +CFLAGS_REMOVE_vpermxor1.o += -msoft-float +CFLAGS_REMOVE_vpermxor2.o += -msoft-float +CFLAGS_REMOVE_vpermxor4.o += -msoft-float +CFLAGS_REMOVE_vpermxor8.o += -msoft-float +endif endif # The GCC option -ffreestanding is required in order to compile code containing diff --git a/lib/rhashtable.c b/lib/rhashtable.c index 30526afa8343..852ffa5160f1 100644 --- a/lib/rhashtable.c +++ b/lib/rhashtable.c @@ -1179,8 +1179,7 @@ struct rhash_head __rcu **rht_bucket_nested(const struct bucket_table *tbl, unsigned int hash) { const unsigned int shift = PAGE_SHIFT - ilog2(sizeof(void *)); - static struct rhash_head __rcu *rhnull = - (struct rhash_head __rcu *)NULLS_MARKER(0); + static struct rhash_head __rcu *rhnull; unsigned int index = hash & ((1 << tbl->nest) - 1); unsigned int size = tbl->size >> tbl->nest; unsigned int subhash = hash; @@ -1198,8 +1197,11 @@ struct rhash_head __rcu **rht_bucket_nested(const struct bucket_table *tbl, subhash >>= shift; } - if (!ntbl) + if (!ntbl) { + if (!rhnull) + INIT_RHT_NULLS_HEAD(rhnull); return &rhnull; + } return &ntbl[subhash].bucket; diff --git a/lib/test_bpf.c b/lib/test_bpf.c index aa22bcaec1dc..f3e570722a7e 100644 --- a/lib/test_bpf.c +++ b/lib/test_bpf.c @@ -39,6 +39,7 @@ #define SKB_HASH 0x1234aaab #define SKB_QUEUE_MAP 123 #define SKB_VLAN_TCI 0xffff +#define SKB_VLAN_PRESENT 1 #define SKB_DEV_IFINDEX 577 #define SKB_DEV_TYPE 588 @@ -725,8 +726,8 @@ static struct bpf_test tests[] = { CLASSIC, { }, { - { 1, SKB_VLAN_TCI & ~VLAN_TAG_PRESENT }, - { 10, SKB_VLAN_TCI & ~VLAN_TAG_PRESENT } + { 1, SKB_VLAN_TCI }, + { 10, SKB_VLAN_TCI } }, }, { @@ -739,8 +740,8 @@ static struct bpf_test tests[] = { CLASSIC, { }, { - { 1, !!(SKB_VLAN_TCI & VLAN_TAG_PRESENT) }, - { 10, !!(SKB_VLAN_TCI & VLAN_TAG_PRESENT) } + { 1, SKB_VLAN_PRESENT }, + { 10, SKB_VLAN_PRESENT } }, }, { @@ -5289,8 +5290,8 @@ static struct bpf_test tests[] = { #endif { }, { - { 1, !!(SKB_VLAN_TCI & VLAN_TAG_PRESENT) }, - { 10, !!(SKB_VLAN_TCI & VLAN_TAG_PRESENT) } + { 1, SKB_VLAN_PRESENT }, + { 10, SKB_VLAN_PRESENT } }, .fill_helper = bpf_fill_maxinsns6, .expected_errcode = -ENOTSUPP, @@ -6493,6 +6494,7 @@ static struct sk_buff *populate_skb(char *buf, int size) skb->hash = SKB_HASH; skb->queue_mapping = SKB_QUEUE_MAP; skb->vlan_tci = SKB_VLAN_TCI; + skb->vlan_present = SKB_VLAN_PRESENT; skb->vlan_proto = htons(ETH_P_IP); dev_net_set(&dev, &init_net); skb->dev = &dev; diff --git a/lib/test_debug_virtual.c b/lib/test_debug_virtual.c index d5a06addeb27..bf864c73e462 100644 --- a/lib/test_debug_virtual.c +++ b/lib/test_debug_virtual.c @@ -5,6 +5,7 @@ #include <linux/vmalloc.h> #include <linux/slab.h> #include <linux/sizes.h> +#include <linux/io.h> #include <asm/page.h> #ifdef CONFIG_MIPS diff --git a/lib/test_objagg.c b/lib/test_objagg.c new file mode 100644 index 000000000000..ab57144bb0cd --- /dev/null +++ b/lib/test_objagg.c @@ -0,0 +1,836 @@ +// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 +/* Copyright (c) 2018 Mellanox Technologies. All rights reserved */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/slab.h> +#include <linux/random.h> +#include <linux/objagg.h> + +struct tokey { + unsigned int id; +}; + +#define NUM_KEYS 32 + +static int key_id_index(unsigned int key_id) +{ + if (key_id >= NUM_KEYS) { + WARN_ON(1); + return 0; + } + return key_id; +} + +#define BUF_LEN 128 + +struct world { + unsigned int root_count; + unsigned int delta_count; + char next_root_buf[BUF_LEN]; + struct objagg_obj *objagg_objs[NUM_KEYS]; + unsigned int key_refs[NUM_KEYS]; +}; + +struct root { + struct tokey key; + char buf[BUF_LEN]; +}; + +struct delta { + unsigned int key_id_diff; +}; + +static struct objagg_obj *world_obj_get(struct world *world, + struct objagg *objagg, + unsigned int key_id) +{ + struct objagg_obj *objagg_obj; + struct tokey key; + int err; + + key.id = key_id; + objagg_obj = objagg_obj_get(objagg, &key); + if (IS_ERR(objagg_obj)) { + pr_err("Key %u: Failed to get object.\n", key_id); + return objagg_obj; + } + if (!world->key_refs[key_id_index(key_id)]) { + world->objagg_objs[key_id_index(key_id)] = objagg_obj; + } else if (world->objagg_objs[key_id_index(key_id)] != objagg_obj) { + pr_err("Key %u: God another object for the same key.\n", + key_id); + err = -EINVAL; + goto err_key_id_check; + } + world->key_refs[key_id_index(key_id)]++; + return objagg_obj; + +err_key_id_check: + objagg_obj_put(objagg, objagg_obj); + return ERR_PTR(err); +} + +static void world_obj_put(struct world *world, struct objagg *objagg, + unsigned int key_id) +{ + struct objagg_obj *objagg_obj; + + if (!world->key_refs[key_id_index(key_id)]) + return; + objagg_obj = world->objagg_objs[key_id_index(key_id)]; + objagg_obj_put(objagg, objagg_obj); + world->key_refs[key_id_index(key_id)]--; +} + +#define MAX_KEY_ID_DIFF 5 + +static void *delta_create(void *priv, void *parent_obj, void *obj) +{ + struct tokey *parent_key = parent_obj; + struct world *world = priv; + struct tokey *key = obj; + int diff = key->id - parent_key->id; + struct delta *delta; + + if (diff < 0 || diff > MAX_KEY_ID_DIFF) + return ERR_PTR(-EINVAL); + + delta = kzalloc(sizeof(*delta), GFP_KERNEL); + if (!delta) + return ERR_PTR(-ENOMEM); + delta->key_id_diff = diff; + world->delta_count++; + return delta; +} + +static void delta_destroy(void *priv, void *delta_priv) +{ + struct delta *delta = delta_priv; + struct world *world = priv; + + world->delta_count--; + kfree(delta); +} + +static void *root_create(void *priv, void *obj) +{ + struct world *world = priv; + struct tokey *key = obj; + struct root *root; + + root = kzalloc(sizeof(*root), GFP_KERNEL); + if (!root) + return ERR_PTR(-ENOMEM); + memcpy(&root->key, key, sizeof(root->key)); + memcpy(root->buf, world->next_root_buf, sizeof(root->buf)); + world->root_count++; + return root; +} + +static void root_destroy(void *priv, void *root_priv) +{ + struct root *root = root_priv; + struct world *world = priv; + + world->root_count--; + kfree(root); +} + +static int test_nodelta_obj_get(struct world *world, struct objagg *objagg, + unsigned int key_id, bool should_create_root) +{ + unsigned int orig_root_count = world->root_count; + struct objagg_obj *objagg_obj; + const struct root *root; + int err; + + if (should_create_root) + prandom_bytes(world->next_root_buf, + sizeof(world->next_root_buf)); + + objagg_obj = world_obj_get(world, objagg, key_id); + if (IS_ERR(objagg_obj)) { + pr_err("Key %u: Failed to get object.\n", key_id); + return PTR_ERR(objagg_obj); + } + if (should_create_root) { + if (world->root_count != orig_root_count + 1) { + pr_err("Key %u: Root was not created\n", key_id); + err = -EINVAL; + goto err_check_root_count; + } + } else { + if (world->root_count != orig_root_count) { + pr_err("Key %u: Root was incorrectly created\n", + key_id); + err = -EINVAL; + goto err_check_root_count; + } + } + root = objagg_obj_root_priv(objagg_obj); + if (root->key.id != key_id) { + pr_err("Key %u: Root has unexpected key id\n", key_id); + err = -EINVAL; + goto err_check_key_id; + } + if (should_create_root && + memcmp(world->next_root_buf, root->buf, sizeof(root->buf))) { + pr_err("Key %u: Buffer does not match the expected content\n", + key_id); + err = -EINVAL; + goto err_check_buf; + } + return 0; + +err_check_buf: +err_check_key_id: +err_check_root_count: + objagg_obj_put(objagg, objagg_obj); + return err; +} + +static int test_nodelta_obj_put(struct world *world, struct objagg *objagg, + unsigned int key_id, bool should_destroy_root) +{ + unsigned int orig_root_count = world->root_count; + + world_obj_put(world, objagg, key_id); + + if (should_destroy_root) { + if (world->root_count != orig_root_count - 1) { + pr_err("Key %u: Root was not destroyed\n", key_id); + return -EINVAL; + } + } else { + if (world->root_count != orig_root_count) { + pr_err("Key %u: Root was incorrectly destroyed\n", + key_id); + return -EINVAL; + } + } + return 0; +} + +static int check_stats_zero(struct objagg *objagg) +{ + const struct objagg_stats *stats; + int err = 0; + + stats = objagg_stats_get(objagg); + if (IS_ERR(stats)) + return PTR_ERR(stats); + + if (stats->stats_info_count != 0) { + pr_err("Stats: Object count is not zero while it should be\n"); + err = -EINVAL; + } + + objagg_stats_put(stats); + return err; +} + +static int check_stats_nodelta(struct objagg *objagg) +{ + const struct objagg_stats *stats; + int i; + int err; + + stats = objagg_stats_get(objagg); + if (IS_ERR(stats)) + return PTR_ERR(stats); + + if (stats->stats_info_count != NUM_KEYS) { + pr_err("Stats: Unexpected object count (%u expected, %u returned)\n", + NUM_KEYS, stats->stats_info_count); + err = -EINVAL; + goto stats_put; + } + + for (i = 0; i < stats->stats_info_count; i++) { + if (stats->stats_info[i].stats.user_count != 2) { + pr_err("Stats: incorrect user count\n"); + err = -EINVAL; + goto stats_put; + } + if (stats->stats_info[i].stats.delta_user_count != 2) { + pr_err("Stats: incorrect delta user count\n"); + err = -EINVAL; + goto stats_put; + } + } + err = 0; + +stats_put: + objagg_stats_put(stats); + return err; +} + +static void *delta_create_dummy(void *priv, void *parent_obj, void *obj) +{ + return ERR_PTR(-EOPNOTSUPP); +} + +static void delta_destroy_dummy(void *priv, void *delta_priv) +{ +} + +static const struct objagg_ops nodelta_ops = { + .obj_size = sizeof(struct tokey), + .delta_create = delta_create_dummy, + .delta_destroy = delta_destroy_dummy, + .root_create = root_create, + .root_destroy = root_destroy, +}; + +static int test_nodelta(void) +{ + struct world world = {}; + struct objagg *objagg; + int i; + int err; + + objagg = objagg_create(&nodelta_ops, &world); + if (IS_ERR(objagg)) + return PTR_ERR(objagg); + + err = check_stats_zero(objagg); + if (err) + goto err_stats_first_zero; + + /* First round of gets, the root objects should be created */ + for (i = 0; i < NUM_KEYS; i++) { + err = test_nodelta_obj_get(&world, objagg, i, true); + if (err) + goto err_obj_first_get; + } + + /* Do the second round of gets, all roots are already created, + * make sure that no new root is created + */ + for (i = 0; i < NUM_KEYS; i++) { + err = test_nodelta_obj_get(&world, objagg, i, false); + if (err) + goto err_obj_second_get; + } + + err = check_stats_nodelta(objagg); + if (err) + goto err_stats_nodelta; + + for (i = NUM_KEYS - 1; i >= 0; i--) { + err = test_nodelta_obj_put(&world, objagg, i, false); + if (err) + goto err_obj_first_put; + } + for (i = NUM_KEYS - 1; i >= 0; i--) { + err = test_nodelta_obj_put(&world, objagg, i, true); + if (err) + goto err_obj_second_put; + } + + err = check_stats_zero(objagg); + if (err) + goto err_stats_second_zero; + + objagg_destroy(objagg); + return 0; + +err_stats_nodelta: +err_obj_first_put: +err_obj_second_get: + for (i--; i >= 0; i--) + world_obj_put(&world, objagg, i); + + i = NUM_KEYS; +err_obj_first_get: +err_obj_second_put: + for (i--; i >= 0; i--) + world_obj_put(&world, objagg, i); +err_stats_first_zero: +err_stats_second_zero: + objagg_destroy(objagg); + return err; +} + +static const struct objagg_ops delta_ops = { + .obj_size = sizeof(struct tokey), + .delta_create = delta_create, + .delta_destroy = delta_destroy, + .root_create = root_create, + .root_destroy = root_destroy, +}; + +enum action { + ACTION_GET, + ACTION_PUT, +}; + +enum expect_delta { + EXPECT_DELTA_SAME, + EXPECT_DELTA_INC, + EXPECT_DELTA_DEC, +}; + +enum expect_root { + EXPECT_ROOT_SAME, + EXPECT_ROOT_INC, + EXPECT_ROOT_DEC, +}; + +struct expect_stats_info { + struct objagg_obj_stats stats; + bool is_root; + unsigned int key_id; +}; + +struct expect_stats { + unsigned int info_count; + struct expect_stats_info info[NUM_KEYS]; +}; + +struct action_item { + unsigned int key_id; + enum action action; + enum expect_delta expect_delta; + enum expect_root expect_root; + struct expect_stats expect_stats; +}; + +#define EXPECT_STATS(count, ...) \ +{ \ + .info_count = count, \ + .info = { __VA_ARGS__ } \ +} + +#define ROOT(key_id, user_count, delta_user_count) \ + {{user_count, delta_user_count}, true, key_id} + +#define DELTA(key_id, user_count) \ + {{user_count, user_count}, false, key_id} + +static const struct action_item action_items[] = { + { + 1, ACTION_GET, EXPECT_DELTA_SAME, EXPECT_ROOT_INC, + EXPECT_STATS(1, ROOT(1, 1, 1)), + }, /* r: 1 d: */ + { + 7, ACTION_GET, EXPECT_DELTA_SAME, EXPECT_ROOT_INC, + EXPECT_STATS(2, ROOT(1, 1, 1), ROOT(7, 1, 1)), + }, /* r: 1, 7 d: */ + { + 3, ACTION_GET, EXPECT_DELTA_INC, EXPECT_ROOT_SAME, + EXPECT_STATS(3, ROOT(1, 1, 2), ROOT(7, 1, 1), + DELTA(3, 1)), + }, /* r: 1, 7 d: 3^1 */ + { + 5, ACTION_GET, EXPECT_DELTA_INC, EXPECT_ROOT_SAME, + EXPECT_STATS(4, ROOT(1, 1, 3), ROOT(7, 1, 1), + DELTA(3, 1), DELTA(5, 1)), + }, /* r: 1, 7 d: 3^1, 5^1 */ + { + 3, ACTION_GET, EXPECT_DELTA_SAME, EXPECT_ROOT_SAME, + EXPECT_STATS(4, ROOT(1, 1, 4), ROOT(7, 1, 1), + DELTA(3, 2), DELTA(5, 1)), + }, /* r: 1, 7 d: 3^1, 3^1, 5^1 */ + { + 1, ACTION_GET, EXPECT_DELTA_SAME, EXPECT_ROOT_SAME, + EXPECT_STATS(4, ROOT(1, 2, 5), ROOT(7, 1, 1), + DELTA(3, 2), DELTA(5, 1)), + }, /* r: 1, 1, 7 d: 3^1, 3^1, 5^1 */ + { + 30, ACTION_GET, EXPECT_DELTA_SAME, EXPECT_ROOT_INC, + EXPECT_STATS(5, ROOT(1, 2, 5), ROOT(7, 1, 1), ROOT(30, 1, 1), + DELTA(3, 2), DELTA(5, 1)), + }, /* r: 1, 1, 7, 30 d: 3^1, 3^1, 5^1 */ + { + 8, ACTION_GET, EXPECT_DELTA_INC, EXPECT_ROOT_SAME, + EXPECT_STATS(6, ROOT(1, 2, 5), ROOT(7, 1, 2), ROOT(30, 1, 1), + DELTA(3, 2), DELTA(5, 1), DELTA(8, 1)), + }, /* r: 1, 1, 7, 30 d: 3^1, 3^1, 5^1, 8^7 */ + { + 8, ACTION_GET, EXPECT_DELTA_SAME, EXPECT_ROOT_SAME, + EXPECT_STATS(6, ROOT(1, 2, 5), ROOT(7, 1, 3), ROOT(30, 1, 1), + DELTA(3, 2), DELTA(8, 2), DELTA(5, 1)), + }, /* r: 1, 1, 7, 30 d: 3^1, 3^1, 5^1, 8^7, 8^7 */ + { + 3, ACTION_PUT, EXPECT_DELTA_SAME, EXPECT_ROOT_SAME, + EXPECT_STATS(6, ROOT(1, 2, 4), ROOT(7, 1, 3), ROOT(30, 1, 1), + DELTA(8, 2), DELTA(3, 1), DELTA(5, 1)), + }, /* r: 1, 1, 7, 30 d: 3^1, 5^1, 8^7, 8^7 */ + { + 3, ACTION_PUT, EXPECT_DELTA_DEC, EXPECT_ROOT_SAME, + EXPECT_STATS(5, ROOT(1, 2, 3), ROOT(7, 1, 3), ROOT(30, 1, 1), + DELTA(8, 2), DELTA(5, 1)), + }, /* r: 1, 1, 7, 30 d: 5^1, 8^7, 8^7 */ + { + 1, ACTION_PUT, EXPECT_DELTA_SAME, EXPECT_ROOT_SAME, + EXPECT_STATS(5, ROOT(7, 1, 3), ROOT(1, 1, 2), ROOT(30, 1, 1), + DELTA(8, 2), DELTA(5, 1)), + }, /* r: 1, 7, 30 d: 5^1, 8^7, 8^7 */ + { + 1, ACTION_PUT, EXPECT_DELTA_SAME, EXPECT_ROOT_SAME, + EXPECT_STATS(5, ROOT(7, 1, 3), ROOT(30, 1, 1), ROOT(1, 0, 1), + DELTA(8, 2), DELTA(5, 1)), + }, /* r: 7, 30 d: 5^1, 8^7, 8^7 */ + { + 5, ACTION_PUT, EXPECT_DELTA_DEC, EXPECT_ROOT_DEC, + EXPECT_STATS(3, ROOT(7, 1, 3), ROOT(30, 1, 1), + DELTA(8, 2)), + }, /* r: 7, 30 d: 8^7, 8^7 */ + { + 5, ACTION_GET, EXPECT_DELTA_SAME, EXPECT_ROOT_INC, + EXPECT_STATS(4, ROOT(7, 1, 3), ROOT(30, 1, 1), ROOT(5, 1, 1), + DELTA(8, 2)), + }, /* r: 7, 30, 5 d: 8^7, 8^7 */ + { + 6, ACTION_GET, EXPECT_DELTA_INC, EXPECT_ROOT_SAME, + EXPECT_STATS(5, ROOT(7, 1, 3), ROOT(5, 1, 2), ROOT(30, 1, 1), + DELTA(8, 2), DELTA(6, 1)), + }, /* r: 7, 30, 5 d: 8^7, 8^7, 6^5 */ + { + 8, ACTION_GET, EXPECT_DELTA_SAME, EXPECT_ROOT_SAME, + EXPECT_STATS(5, ROOT(7, 1, 4), ROOT(5, 1, 2), ROOT(30, 1, 1), + DELTA(8, 3), DELTA(6, 1)), + }, /* r: 7, 30, 5 d: 8^7, 8^7, 8^7, 6^5 */ + { + 8, ACTION_PUT, EXPECT_DELTA_SAME, EXPECT_ROOT_SAME, + EXPECT_STATS(5, ROOT(7, 1, 3), ROOT(5, 1, 2), ROOT(30, 1, 1), + DELTA(8, 2), DELTA(6, 1)), + }, /* r: 7, 30, 5 d: 8^7, 8^7, 6^5 */ + { + 8, ACTION_PUT, EXPECT_DELTA_SAME, EXPECT_ROOT_SAME, + EXPECT_STATS(5, ROOT(7, 1, 2), ROOT(5, 1, 2), ROOT(30, 1, 1), + DELTA(8, 1), DELTA(6, 1)), + }, /* r: 7, 30, 5 d: 8^7, 6^5 */ + { + 8, ACTION_PUT, EXPECT_DELTA_DEC, EXPECT_ROOT_SAME, + EXPECT_STATS(4, ROOT(5, 1, 2), ROOT(7, 1, 1), ROOT(30, 1, 1), + DELTA(6, 1)), + }, /* r: 7, 30, 5 d: 6^5 */ + { + 8, ACTION_GET, EXPECT_DELTA_INC, EXPECT_ROOT_SAME, + EXPECT_STATS(5, ROOT(5, 1, 3), ROOT(7, 1, 1), ROOT(30, 1, 1), + DELTA(6, 1), DELTA(8, 1)), + }, /* r: 7, 30, 5 d: 6^5, 8^5 */ + { + 7, ACTION_PUT, EXPECT_DELTA_SAME, EXPECT_ROOT_DEC, + EXPECT_STATS(4, ROOT(5, 1, 3), ROOT(30, 1, 1), + DELTA(6, 1), DELTA(8, 1)), + }, /* r: 30, 5 d: 6^5, 8^5 */ + { + 30, ACTION_PUT, EXPECT_DELTA_SAME, EXPECT_ROOT_DEC, + EXPECT_STATS(3, ROOT(5, 1, 3), + DELTA(6, 1), DELTA(8, 1)), + }, /* r: 5 d: 6^5, 8^5 */ + { + 5, ACTION_PUT, EXPECT_DELTA_SAME, EXPECT_ROOT_SAME, + EXPECT_STATS(3, ROOT(5, 0, 2), + DELTA(6, 1), DELTA(8, 1)), + }, /* r: d: 6^5, 8^5 */ + { + 6, ACTION_PUT, EXPECT_DELTA_DEC, EXPECT_ROOT_SAME, + EXPECT_STATS(2, ROOT(5, 0, 1), + DELTA(8, 1)), + }, /* r: d: 6^5 */ + { + 8, ACTION_PUT, EXPECT_DELTA_DEC, EXPECT_ROOT_DEC, + EXPECT_STATS(0, ), + }, /* r: d: */ +}; + +static int check_expect(struct world *world, + const struct action_item *action_item, + unsigned int orig_delta_count, + unsigned int orig_root_count) +{ + unsigned int key_id = action_item->key_id; + + switch (action_item->expect_delta) { + case EXPECT_DELTA_SAME: + if (orig_delta_count != world->delta_count) { + pr_err("Key %u: Delta count changed while expected to remain the same.\n", + key_id); + return -EINVAL; + } + break; + case EXPECT_DELTA_INC: + if (WARN_ON(action_item->action == ACTION_PUT)) + return -EINVAL; + if (orig_delta_count + 1 != world->delta_count) { + pr_err("Key %u: Delta count was not incremented.\n", + key_id); + return -EINVAL; + } + break; + case EXPECT_DELTA_DEC: + if (WARN_ON(action_item->action == ACTION_GET)) + return -EINVAL; + if (orig_delta_count - 1 != world->delta_count) { + pr_err("Key %u: Delta count was not decremented.\n", + key_id); + return -EINVAL; + } + break; + } + + switch (action_item->expect_root) { + case EXPECT_ROOT_SAME: + if (orig_root_count != world->root_count) { + pr_err("Key %u: Root count changed while expected to remain the same.\n", + key_id); + return -EINVAL; + } + break; + case EXPECT_ROOT_INC: + if (WARN_ON(action_item->action == ACTION_PUT)) + return -EINVAL; + if (orig_root_count + 1 != world->root_count) { + pr_err("Key %u: Root count was not incremented.\n", + key_id); + return -EINVAL; + } + break; + case EXPECT_ROOT_DEC: + if (WARN_ON(action_item->action == ACTION_GET)) + return -EINVAL; + if (orig_root_count - 1 != world->root_count) { + pr_err("Key %u: Root count was not decremented.\n", + key_id); + return -EINVAL; + } + } + + return 0; +} + +static unsigned int obj_to_key_id(struct objagg_obj *objagg_obj) +{ + const struct tokey *root_key; + const struct delta *delta; + unsigned int key_id; + + root_key = objagg_obj_root_priv(objagg_obj); + key_id = root_key->id; + delta = objagg_obj_delta_priv(objagg_obj); + if (delta) + key_id += delta->key_id_diff; + return key_id; +} + +static int +check_expect_stats_nums(const struct objagg_obj_stats_info *stats_info, + const struct expect_stats_info *expect_stats_info, + const char **errmsg) +{ + if (stats_info->is_root != expect_stats_info->is_root) { + if (errmsg) + *errmsg = "Incorrect root/delta indication"; + return -EINVAL; + } + if (stats_info->stats.user_count != + expect_stats_info->stats.user_count) { + if (errmsg) + *errmsg = "Incorrect user count"; + return -EINVAL; + } + if (stats_info->stats.delta_user_count != + expect_stats_info->stats.delta_user_count) { + if (errmsg) + *errmsg = "Incorrect delta user count"; + return -EINVAL; + } + return 0; +} + +static int +check_expect_stats_key_id(const struct objagg_obj_stats_info *stats_info, + const struct expect_stats_info *expect_stats_info, + const char **errmsg) +{ + if (obj_to_key_id(stats_info->objagg_obj) != + expect_stats_info->key_id) { + if (errmsg) + *errmsg = "incorrect key id"; + return -EINVAL; + } + return 0; +} + +static int check_expect_stats_neigh(const struct objagg_stats *stats, + const struct expect_stats *expect_stats, + int pos) +{ + int i; + int err; + + for (i = pos - 1; i >= 0; i--) { + err = check_expect_stats_nums(&stats->stats_info[i], + &expect_stats->info[pos], NULL); + if (err) + break; + err = check_expect_stats_key_id(&stats->stats_info[i], + &expect_stats->info[pos], NULL); + if (!err) + return 0; + } + for (i = pos + 1; i < stats->stats_info_count; i++) { + err = check_expect_stats_nums(&stats->stats_info[i], + &expect_stats->info[pos], NULL); + if (err) + break; + err = check_expect_stats_key_id(&stats->stats_info[i], + &expect_stats->info[pos], NULL); + if (!err) + return 0; + } + return -EINVAL; +} + +static int __check_expect_stats(const struct objagg_stats *stats, + const struct expect_stats *expect_stats, + const char **errmsg) +{ + int i; + int err; + + if (stats->stats_info_count != expect_stats->info_count) { + *errmsg = "Unexpected object count"; + return -EINVAL; + } + + for (i = 0; i < stats->stats_info_count; i++) { + err = check_expect_stats_nums(&stats->stats_info[i], + &expect_stats->info[i], errmsg); + if (err) + return err; + err = check_expect_stats_key_id(&stats->stats_info[i], + &expect_stats->info[i], errmsg); + if (err) { + /* It is possible that one of the neighbor stats with + * same numbers have the correct key id, so check it + */ + err = check_expect_stats_neigh(stats, expect_stats, i); + if (err) + return err; + } + } + return 0; +} + +static int check_expect_stats(struct objagg *objagg, + const struct expect_stats *expect_stats, + const char **errmsg) +{ + const struct objagg_stats *stats; + int err; + + stats = objagg_stats_get(objagg); + if (IS_ERR(stats)) + return PTR_ERR(stats); + err = __check_expect_stats(stats, expect_stats, errmsg); + objagg_stats_put(stats); + return err; +} + +static int test_delta_action_item(struct world *world, + struct objagg *objagg, + const struct action_item *action_item, + bool inverse) +{ + unsigned int orig_delta_count = world->delta_count; + unsigned int orig_root_count = world->root_count; + unsigned int key_id = action_item->key_id; + enum action action = action_item->action; + struct objagg_obj *objagg_obj; + const char *errmsg; + int err; + + if (inverse) + action = action == ACTION_GET ? ACTION_PUT : ACTION_GET; + + switch (action) { + case ACTION_GET: + objagg_obj = world_obj_get(world, objagg, key_id); + if (IS_ERR(objagg_obj)) + return PTR_ERR(objagg_obj); + break; + case ACTION_PUT: + world_obj_put(world, objagg, key_id); + break; + } + + if (inverse) + return 0; + err = check_expect(world, action_item, + orig_delta_count, orig_root_count); + if (err) + goto errout; + + errmsg = NULL; + err = check_expect_stats(objagg, &action_item->expect_stats, &errmsg); + if (err) { + pr_err("Key %u: Stats: %s\n", action_item->key_id, errmsg); + goto errout; + } + + return 0; + +errout: + /* This can only happen when action is not inversed. + * So in case of an error, cleanup by doing inverse action. + */ + test_delta_action_item(world, objagg, action_item, true); + return err; +} + +static int test_delta(void) +{ + struct world world = {}; + struct objagg *objagg; + int i; + int err; + + objagg = objagg_create(&delta_ops, &world); + if (IS_ERR(objagg)) + return PTR_ERR(objagg); + + for (i = 0; i < ARRAY_SIZE(action_items); i++) { + err = test_delta_action_item(&world, objagg, + &action_items[i], false); + if (err) + goto err_do_action_item; + } + + objagg_destroy(objagg); + return 0; + +err_do_action_item: + for (i--; i >= 0; i--) + test_delta_action_item(&world, objagg, &action_items[i], true); + + objagg_destroy(objagg); + return err; +} + +static int __init test_objagg_init(void) +{ + int err; + + err = test_nodelta(); + if (err) + return err; + return test_delta(); +} + +static void __exit test_objagg_exit(void) +{ +} + +module_init(test_objagg_init); +module_exit(test_objagg_exit); +MODULE_LICENSE("Dual BSD/GPL"); +MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>"); +MODULE_DESCRIPTION("Test module for objagg"); diff --git a/lib/test_rhashtable.c b/lib/test_rhashtable.c index 82ac39ce5310..6a8ac7626797 100644 --- a/lib/test_rhashtable.c +++ b/lib/test_rhashtable.c @@ -20,11 +20,11 @@ #include <linux/module.h> #include <linux/rcupdate.h> #include <linux/rhashtable.h> -#include <linux/semaphore.h> #include <linux/slab.h> #include <linux/sched.h> #include <linux/random.h> #include <linux/vmalloc.h> +#include <linux/wait.h> #define MAX_ENTRIES 1000000 #define TEST_INSERT_FAIL INT_MAX @@ -112,8 +112,8 @@ static struct rhashtable_params test_rht_params_dup = { .automatic_shrinking = false, }; -static struct semaphore prestart_sem; -static struct semaphore startup_sem = __SEMAPHORE_INITIALIZER(startup_sem, 0); +static atomic_t startup_count; +static DECLARE_WAIT_QUEUE_HEAD(startup_wait); static int insert_retry(struct rhashtable *ht, struct test_obj *obj, const struct rhashtable_params params) @@ -634,9 +634,12 @@ static int threadfunc(void *data) int i, step, err = 0, insert_retries = 0; struct thread_data *tdata = data; - up(&prestart_sem); - if (down_interruptible(&startup_sem)) - pr_err(" thread[%d]: down_interruptible failed\n", tdata->id); + if (atomic_dec_and_test(&startup_count)) + wake_up(&startup_wait); + if (wait_event_interruptible(startup_wait, atomic_read(&startup_count) == -1)) { + pr_err(" thread[%d]: interrupted\n", tdata->id); + goto out; + } for (i = 0; i < tdata->entries; i++) { tdata->objs[i].value.id = i; @@ -755,7 +758,7 @@ static int __init test_rht_init(void) pr_info("Testing concurrent rhashtable access from %d threads\n", tcount); - sema_init(&prestart_sem, 1 - tcount); + atomic_set(&startup_count, tcount); tdata = vzalloc(array_size(tcount, sizeof(struct thread_data))); if (!tdata) return -ENOMEM; @@ -781,15 +784,18 @@ static int __init test_rht_init(void) tdata[i].objs = objs + i * entries; tdata[i].task = kthread_run(threadfunc, &tdata[i], "rhashtable_thrad[%d]", i); - if (IS_ERR(tdata[i].task)) + if (IS_ERR(tdata[i].task)) { pr_err(" kthread_run failed for thread %d\n", i); - else + atomic_dec(&startup_count); + } else { started_threads++; + } } - if (down_interruptible(&prestart_sem)) - pr_err(" down interruptible failed\n"); - for (i = 0; i < tcount; i++) - up(&startup_sem); + if (wait_event_interruptible(startup_wait, atomic_read(&startup_count) == 0)) + pr_err(" wait_event interruptible failed\n"); + /* count is 0 now, set it to -1 and wake up all threads together */ + atomic_dec(&startup_count); + wake_up_all(&startup_wait); for (i = 0; i < tcount; i++) { if (IS_ERR(tdata[i].task)) continue; diff --git a/lib/test_xarray.c b/lib/test_xarray.c index 0598e86af8fc..4676c0a1eeca 100644 --- a/lib/test_xarray.c +++ b/lib/test_xarray.c @@ -28,23 +28,28 @@ void xa_dump(const struct xarray *xa) { } } while (0) #endif +static void *xa_mk_index(unsigned long index) +{ + return xa_mk_value(index & LONG_MAX); +} + static void *xa_store_index(struct xarray *xa, unsigned long index, gfp_t gfp) { - return xa_store(xa, index, xa_mk_value(index & LONG_MAX), gfp); + return xa_store(xa, index, xa_mk_index(index), gfp); } static void xa_alloc_index(struct xarray *xa, unsigned long index, gfp_t gfp) { u32 id = 0; - XA_BUG_ON(xa, xa_alloc(xa, &id, UINT_MAX, xa_mk_value(index & LONG_MAX), + XA_BUG_ON(xa, xa_alloc(xa, &id, UINT_MAX, xa_mk_index(index), gfp) != 0); XA_BUG_ON(xa, id != index); } static void xa_erase_index(struct xarray *xa, unsigned long index) { - XA_BUG_ON(xa, xa_erase(xa, index) != xa_mk_value(index & LONG_MAX)); + XA_BUG_ON(xa, xa_erase(xa, index) != xa_mk_index(index)); XA_BUG_ON(xa, xa_load(xa, index) != NULL); } @@ -118,7 +123,7 @@ static noinline void check_xas_retry(struct xarray *xa) xas_set(&xas, 0); xas_for_each(&xas, entry, ULONG_MAX) { - xas_store(&xas, xa_mk_value(xas.xa_index)); + xas_store(&xas, xa_mk_index(xas.xa_index)); } xas_unlock(&xas); @@ -196,7 +201,7 @@ static noinline void check_xa_mark_1(struct xarray *xa, unsigned long index) XA_BUG_ON(xa, xa_store_index(xa, index + 2, GFP_KERNEL)); xa_set_mark(xa, index + 2, XA_MARK_1); XA_BUG_ON(xa, xa_store_index(xa, next, GFP_KERNEL)); - xa_store_order(xa, index, order, xa_mk_value(index), + xa_store_order(xa, index, order, xa_mk_index(index), GFP_KERNEL); for (i = base; i < next; i++) { XA_STATE(xas, xa, i); @@ -405,7 +410,7 @@ static noinline void check_xas_erase(struct xarray *xa) xas_set(&xas, j); do { xas_lock(&xas); - xas_store(&xas, xa_mk_value(j)); + xas_store(&xas, xa_mk_index(j)); xas_unlock(&xas); } while (xas_nomem(&xas, GFP_KERNEL)); } @@ -423,7 +428,7 @@ static noinline void check_xas_erase(struct xarray *xa) xas_set(&xas, 0); j = i; xas_for_each(&xas, entry, ULONG_MAX) { - XA_BUG_ON(xa, entry != xa_mk_value(j)); + XA_BUG_ON(xa, entry != xa_mk_index(j)); xas_store(&xas, NULL); j++; } @@ -440,17 +445,17 @@ static noinline void check_multi_store_1(struct xarray *xa, unsigned long index, unsigned long min = index & ~((1UL << order) - 1); unsigned long max = min + (1UL << order); - xa_store_order(xa, index, order, xa_mk_value(index), GFP_KERNEL); - XA_BUG_ON(xa, xa_load(xa, min) != xa_mk_value(index)); - XA_BUG_ON(xa, xa_load(xa, max - 1) != xa_mk_value(index)); + xa_store_order(xa, index, order, xa_mk_index(index), GFP_KERNEL); + XA_BUG_ON(xa, xa_load(xa, min) != xa_mk_index(index)); + XA_BUG_ON(xa, xa_load(xa, max - 1) != xa_mk_index(index)); XA_BUG_ON(xa, xa_load(xa, max) != NULL); XA_BUG_ON(xa, xa_load(xa, min - 1) != NULL); xas_lock(&xas); - XA_BUG_ON(xa, xas_store(&xas, xa_mk_value(min)) != xa_mk_value(index)); + XA_BUG_ON(xa, xas_store(&xas, xa_mk_index(min)) != xa_mk_index(index)); xas_unlock(&xas); - XA_BUG_ON(xa, xa_load(xa, min) != xa_mk_value(min)); - XA_BUG_ON(xa, xa_load(xa, max - 1) != xa_mk_value(min)); + XA_BUG_ON(xa, xa_load(xa, min) != xa_mk_index(min)); + XA_BUG_ON(xa, xa_load(xa, max - 1) != xa_mk_index(min)); XA_BUG_ON(xa, xa_load(xa, max) != NULL); XA_BUG_ON(xa, xa_load(xa, min - 1) != NULL); @@ -471,6 +476,32 @@ static noinline void check_multi_store_2(struct xarray *xa, unsigned long index, xas_unlock(&xas); XA_BUG_ON(xa, !xa_empty(xa)); } + +static noinline void check_multi_store_3(struct xarray *xa, unsigned long index, + unsigned int order) +{ + XA_STATE(xas, xa, 0); + void *entry; + int n = 0; + + xa_store_order(xa, index, order, xa_mk_index(index), GFP_KERNEL); + + xas_lock(&xas); + xas_for_each(&xas, entry, ULONG_MAX) { + XA_BUG_ON(xa, entry != xa_mk_index(index)); + n++; + } + XA_BUG_ON(xa, n != 1); + xas_set(&xas, index + 1); + xas_for_each(&xas, entry, ULONG_MAX) { + XA_BUG_ON(xa, entry != xa_mk_index(index)); + n++; + } + XA_BUG_ON(xa, n != 2); + xas_unlock(&xas); + + xa_destroy(xa); +} #endif static noinline void check_multi_store(struct xarray *xa) @@ -523,15 +554,15 @@ static noinline void check_multi_store(struct xarray *xa) for (i = 0; i < max_order; i++) { for (j = 0; j < max_order; j++) { - xa_store_order(xa, 0, i, xa_mk_value(i), GFP_KERNEL); - xa_store_order(xa, 0, j, xa_mk_value(j), GFP_KERNEL); + xa_store_order(xa, 0, i, xa_mk_index(i), GFP_KERNEL); + xa_store_order(xa, 0, j, xa_mk_index(j), GFP_KERNEL); for (k = 0; k < max_order; k++) { void *entry = xa_load(xa, (1UL << k) - 1); if ((i < k) && (j < k)) XA_BUG_ON(xa, entry != NULL); else - XA_BUG_ON(xa, entry != xa_mk_value(j)); + XA_BUG_ON(xa, entry != xa_mk_index(j)); } xa_erase(xa, 0); @@ -545,6 +576,11 @@ static noinline void check_multi_store(struct xarray *xa) check_multi_store_1(xa, (1UL << i) + 1, i); } check_multi_store_2(xa, 4095, 9); + + for (i = 1; i < 20; i++) { + check_multi_store_3(xa, 0, i); + check_multi_store_3(xa, 1UL << i, i); + } #endif } @@ -587,16 +623,25 @@ static noinline void check_xa_alloc(void) xa_destroy(&xa0); id = 0xfffffffeU; - XA_BUG_ON(&xa0, xa_alloc(&xa0, &id, UINT_MAX, xa_mk_value(0), + XA_BUG_ON(&xa0, xa_alloc(&xa0, &id, UINT_MAX, xa_mk_index(id), GFP_KERNEL) != 0); XA_BUG_ON(&xa0, id != 0xfffffffeU); - XA_BUG_ON(&xa0, xa_alloc(&xa0, &id, UINT_MAX, xa_mk_value(0), + XA_BUG_ON(&xa0, xa_alloc(&xa0, &id, UINT_MAX, xa_mk_index(id), GFP_KERNEL) != 0); XA_BUG_ON(&xa0, id != 0xffffffffU); - XA_BUG_ON(&xa0, xa_alloc(&xa0, &id, UINT_MAX, xa_mk_value(0), + XA_BUG_ON(&xa0, xa_alloc(&xa0, &id, UINT_MAX, xa_mk_index(id), GFP_KERNEL) != -ENOSPC); XA_BUG_ON(&xa0, id != 0xffffffffU); xa_destroy(&xa0); + + id = 10; + XA_BUG_ON(&xa0, xa_alloc(&xa0, &id, 5, xa_mk_index(id), + GFP_KERNEL) != -ENOSPC); + XA_BUG_ON(&xa0, xa_store_index(&xa0, 3, GFP_KERNEL) != 0); + XA_BUG_ON(&xa0, xa_alloc(&xa0, &id, 5, xa_mk_index(id), + GFP_KERNEL) != -ENOSPC); + xa_erase_index(&xa0, 3); + XA_BUG_ON(&xa0, !xa_empty(&xa0)); } static noinline void __check_store_iter(struct xarray *xa, unsigned long start, @@ -610,11 +655,11 @@ retry: xas_lock(&xas); xas_for_each_conflict(&xas, entry) { XA_BUG_ON(xa, !xa_is_value(entry)); - XA_BUG_ON(xa, entry < xa_mk_value(start)); - XA_BUG_ON(xa, entry > xa_mk_value(start + (1UL << order) - 1)); + XA_BUG_ON(xa, entry < xa_mk_index(start)); + XA_BUG_ON(xa, entry > xa_mk_index(start + (1UL << order) - 1)); count++; } - xas_store(&xas, xa_mk_value(start)); + xas_store(&xas, xa_mk_index(start)); xas_unlock(&xas); if (xas_nomem(&xas, GFP_KERNEL)) { count = 0; @@ -622,9 +667,9 @@ retry: } XA_BUG_ON(xa, xas_error(&xas)); XA_BUG_ON(xa, count != present); - XA_BUG_ON(xa, xa_load(xa, start) != xa_mk_value(start)); + XA_BUG_ON(xa, xa_load(xa, start) != xa_mk_index(start)); XA_BUG_ON(xa, xa_load(xa, start + (1UL << order) - 1) != - xa_mk_value(start)); + xa_mk_index(start)); xa_erase_index(xa, start); } @@ -703,7 +748,7 @@ static noinline void check_multi_find_2(struct xarray *xa) for (j = 0; j < index; j++) { XA_STATE(xas, xa, j + index); xa_store_index(xa, index - 1, GFP_KERNEL); - xa_store_order(xa, index, i, xa_mk_value(index), + xa_store_order(xa, index, i, xa_mk_index(index), GFP_KERNEL); rcu_read_lock(); xas_for_each(&xas, entry, ULONG_MAX) { @@ -778,7 +823,7 @@ static noinline void check_find_2(struct xarray *xa) j = 0; index = 0; xa_for_each(xa, entry, index, ULONG_MAX, XA_PRESENT) { - XA_BUG_ON(xa, xa_mk_value(index) != entry); + XA_BUG_ON(xa, xa_mk_index(index) != entry); XA_BUG_ON(xa, index != j++); } } @@ -786,10 +831,34 @@ static noinline void check_find_2(struct xarray *xa) xa_destroy(xa); } +static noinline void check_find_3(struct xarray *xa) +{ + XA_STATE(xas, xa, 0); + unsigned long i, j, k; + void *entry; + + for (i = 0; i < 100; i++) { + for (j = 0; j < 100; j++) { + for (k = 0; k < 100; k++) { + xas_set(&xas, j); + xas_for_each_marked(&xas, entry, k, XA_MARK_0) + ; + if (j > k) + XA_BUG_ON(xa, + xas.xa_node != XAS_RESTART); + } + } + xa_store_index(xa, i, GFP_KERNEL); + xa_set_mark(xa, i, XA_MARK_0); + } + xa_destroy(xa); +} + static noinline void check_find(struct xarray *xa) { check_find_1(xa); check_find_2(xa); + check_find_3(xa); check_multi_find(xa); check_multi_find_2(xa); } @@ -829,11 +898,11 @@ static noinline void check_find_entry(struct xarray *xa) for (index = 0; index < (1UL << (order + 5)); index += (1UL << order)) { xa_store_order(xa, index, order, - xa_mk_value(index), GFP_KERNEL); + xa_mk_index(index), GFP_KERNEL); XA_BUG_ON(xa, xa_load(xa, index) != - xa_mk_value(index)); + xa_mk_index(index)); XA_BUG_ON(xa, xa_find_entry(xa, - xa_mk_value(index)) != index); + xa_mk_index(index)) != index); } XA_BUG_ON(xa, xa_find_entry(xa, xa) != -1); xa_destroy(xa); @@ -844,7 +913,7 @@ static noinline void check_find_entry(struct xarray *xa) XA_BUG_ON(xa, xa_find_entry(xa, xa) != -1); xa_store_index(xa, ULONG_MAX, GFP_KERNEL); XA_BUG_ON(xa, xa_find_entry(xa, xa) != -1); - XA_BUG_ON(xa, xa_find_entry(xa, xa_mk_value(LONG_MAX)) != -1); + XA_BUG_ON(xa, xa_find_entry(xa, xa_mk_index(ULONG_MAX)) != -1); xa_erase_index(xa, ULONG_MAX); XA_BUG_ON(xa, !xa_empty(xa)); } @@ -864,7 +933,7 @@ static noinline void check_move_small(struct xarray *xa, unsigned long idx) XA_BUG_ON(xa, xas.xa_node == XAS_RESTART); XA_BUG_ON(xa, xas.xa_index != i); if (i == 0 || i == idx) - XA_BUG_ON(xa, entry != xa_mk_value(i)); + XA_BUG_ON(xa, entry != xa_mk_index(i)); else XA_BUG_ON(xa, entry != NULL); } @@ -878,7 +947,7 @@ static noinline void check_move_small(struct xarray *xa, unsigned long idx) XA_BUG_ON(xa, xas.xa_node == XAS_RESTART); XA_BUG_ON(xa, xas.xa_index != i); if (i == 0 || i == idx) - XA_BUG_ON(xa, entry != xa_mk_value(i)); + XA_BUG_ON(xa, entry != xa_mk_index(i)); else XA_BUG_ON(xa, entry != NULL); } while (i > 0); @@ -909,7 +978,7 @@ static noinline void check_move(struct xarray *xa) do { void *entry = xas_prev(&xas); i--; - XA_BUG_ON(xa, entry != xa_mk_value(i)); + XA_BUG_ON(xa, entry != xa_mk_index(i)); XA_BUG_ON(xa, i != xas.xa_index); } while (i != 0); @@ -918,7 +987,7 @@ static noinline void check_move(struct xarray *xa) do { void *entry = xas_next(&xas); - XA_BUG_ON(xa, entry != xa_mk_value(i)); + XA_BUG_ON(xa, entry != xa_mk_index(i)); XA_BUG_ON(xa, i != xas.xa_index); i++; } while (i < (1 << 16)); @@ -934,7 +1003,7 @@ static noinline void check_move(struct xarray *xa) void *entry = xas_prev(&xas); i--; if ((i < (1 << 8)) || (i >= (1 << 15))) - XA_BUG_ON(xa, entry != xa_mk_value(i)); + XA_BUG_ON(xa, entry != xa_mk_index(i)); else XA_BUG_ON(xa, entry != NULL); XA_BUG_ON(xa, i != xas.xa_index); @@ -946,7 +1015,7 @@ static noinline void check_move(struct xarray *xa) do { void *entry = xas_next(&xas); if ((i < (1 << 8)) || (i >= (1 << 15))) - XA_BUG_ON(xa, entry != xa_mk_value(i)); + XA_BUG_ON(xa, entry != xa_mk_index(i)); else XA_BUG_ON(xa, entry != NULL); XA_BUG_ON(xa, i != xas.xa_index); @@ -976,7 +1045,7 @@ static noinline void xa_store_many_order(struct xarray *xa, if (xas_error(&xas)) goto unlock; for (i = 0; i < (1U << order); i++) { - XA_BUG_ON(xa, xas_store(&xas, xa_mk_value(index + i))); + XA_BUG_ON(xa, xas_store(&xas, xa_mk_index(index + i))); xas_next(&xas); } unlock: @@ -1031,9 +1100,9 @@ static noinline void check_create_range_4(struct xarray *xa, if (xas_error(&xas)) goto unlock; for (i = 0; i < (1UL << order); i++) { - void *old = xas_store(&xas, xa_mk_value(base + i)); + void *old = xas_store(&xas, xa_mk_index(base + i)); if (xas.xa_index == index) - XA_BUG_ON(xa, old != xa_mk_value(base + i)); + XA_BUG_ON(xa, old != xa_mk_index(base + i)); else XA_BUG_ON(xa, old != NULL); xas_next(&xas); @@ -1085,10 +1154,10 @@ static noinline void __check_store_range(struct xarray *xa, unsigned long first, unsigned long last) { #ifdef CONFIG_XARRAY_MULTI - xa_store_range(xa, first, last, xa_mk_value(first), GFP_KERNEL); + xa_store_range(xa, first, last, xa_mk_index(first), GFP_KERNEL); - XA_BUG_ON(xa, xa_load(xa, first) != xa_mk_value(first)); - XA_BUG_ON(xa, xa_load(xa, last) != xa_mk_value(first)); + XA_BUG_ON(xa, xa_load(xa, first) != xa_mk_index(first)); + XA_BUG_ON(xa, xa_load(xa, last) != xa_mk_index(first)); XA_BUG_ON(xa, xa_load(xa, first - 1) != NULL); XA_BUG_ON(xa, xa_load(xa, last + 1) != NULL); @@ -1195,7 +1264,7 @@ static noinline void check_account(struct xarray *xa) XA_BUG_ON(xa, xas.xa_node->nr_values != 0); rcu_read_unlock(); - xa_store_order(xa, 1 << order, order, xa_mk_value(1 << order), + xa_store_order(xa, 1 << order, order, xa_mk_index(1UL << order), GFP_KERNEL); XA_BUG_ON(xa, xas.xa_node->count != xas.xa_node->nr_values * 2); diff --git a/lib/xarray.c b/lib/xarray.c index bbacca576593..5f3f9311de89 100644 --- a/lib/xarray.c +++ b/lib/xarray.c @@ -1131,7 +1131,7 @@ void *xas_find_marked(struct xa_state *xas, unsigned long max, xa_mark_t mark) entry = xa_head(xas->xa); xas->xa_node = NULL; if (xas->xa_index > max_index(entry)) - goto bounds; + goto out; if (!xa_is_node(entry)) { if (xa_marked(xas->xa, mark)) return entry; @@ -1180,11 +1180,9 @@ void *xas_find_marked(struct xa_state *xas, unsigned long max, xa_mark_t mark) } out: - if (!max) + if (xas->xa_index > max) goto max; -bounds: - xas->xa_node = XAS_BOUNDS; - return NULL; + return set_bounds(xas); max: xas->xa_node = XAS_RESTART; return NULL; |