summaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/Kconfig12
-rw-r--r--lib/Kconfig.debug66
-rw-r--r--lib/Kconfig.kasan4
-rw-r--r--lib/Kconfig.kcsan6
-rw-r--r--lib/Makefile7
-rw-r--r--lib/crypto/Kconfig9
-rw-r--r--lib/crypto/Makefile5
-rw-r--r--lib/crypto/aesgcm.c727
-rw-r--r--lib/crypto/gf128mul.c436
-rw-r--r--lib/debugobjects.c14
-rw-r--r--lib/fault-inject.c15
-rw-r--r--lib/find_bit_benchmark.c4
-rw-r--r--lib/fonts/fonts.c4
-rw-r--r--lib/fortify_kunit.c255
-rw-r--r--lib/interval_tree.c132
-rw-r--r--lib/iov_iter.c82
-rw-r--r--lib/is_signed_type_kunit.c4
-rw-r--r--lib/kobject.c2
-rw-r--r--lib/kunit/assert.c62
-rw-r--r--lib/kunit/debugfs.c2
-rw-r--r--lib/kunit/executor.c6
-rw-r--r--lib/kunit/kunit-example-test.c7
-rw-r--r--lib/kunit/string-stream.c5
-rw-r--r--lib/kunit/test.c15
-rw-r--r--lib/llist.c4
-rw-r--r--lib/lru_cache.c59
-rw-r--r--lib/maple_tree.c13
-rw-r--r--lib/math/div64.c15
-rw-r--r--lib/memcpy_kunit.c227
-rw-r--r--lib/net_utils.c3
-rw-r--r--lib/nlattr.c2
-rw-r--r--lib/notifier-error-inject.c2
-rw-r--r--lib/oid_registry.c1
-rw-r--r--lib/overflow_kunit.c381
-rw-r--r--lib/packing.c16
-rw-r--r--lib/percpu-refcount.c3
-rw-r--r--lib/percpu_counter.c29
-rw-r--r--lib/radix-tree.c2
-rw-r--r--lib/raid6/algos.c2
-rw-r--r--lib/raid6/s390vx.uc3
-rw-r--r--lib/reed_solomon/test_rslib.c6
-rw-r--r--lib/rhashtable.c16
-rw-r--r--lib/sbitmap.c148
-rw-r--r--lib/scatterlist.c25
-rw-r--r--lib/siphash_kunit.c (renamed from lib/test_siphash.c)165
-rw-r--r--lib/slub_kunit.c58
-rw-r--r--lib/string.c82
-rw-r--r--lib/strscpy_kunit.c142
-rw-r--r--lib/test-string_helpers.c2
-rw-r--r--lib/test_bpf.c1
-rw-r--r--lib/test_fprobe.c5
-rw-r--r--lib/test_hexdump.c10
-rw-r--r--lib/test_kprobes.c5
-rw-r--r--lib/test_linear_ranges.c13
-rw-r--r--lib/test_list_sort.c2
-rw-r--r--lib/test_printf.c40
-rw-r--r--lib/test_rhashtable.c12
-rw-r--r--lib/test_strscpy.c150
-rw-r--r--lib/test_vmalloc.c8
-rw-r--r--lib/ubsan.c3
-rw-r--r--lib/vdso/Makefile2
-rw-r--r--lib/vsprintf.c16
62 files changed, 2823 insertions, 731 deletions
diff --git a/lib/Kconfig b/lib/Kconfig
index 9bbf8a4b2108..ce2abffb9ed8 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -24,6 +24,7 @@ config LINEAR_RANGES
config PACKING
bool "Generic bitfield packing and unpacking"
+ select BITREVERSE
default n
help
This option provides the packing() helper function, which permits
@@ -479,6 +480,10 @@ config INTERVAL_TREE
for more information.
+config INTERVAL_TREE_SPAN_ITER
+ bool
+ depends on INTERVAL_TREE
+
config XARRAY_MULTI
bool
help
@@ -529,8 +534,8 @@ config CPUMASK_OFFSTACK
stack overflow.
config FORCE_NR_CPUS
- bool "NR_CPUS is set to an actual number of CPUs"
- depends on SMP
+ bool "Set number of CPUs at compile time"
+ depends on SMP && EXPERT && !COMPILE_TEST
help
Say Yes if you have NR_CPUS set to an actual number of possible
CPUs in your system, not to a default value. This forces the core
@@ -672,6 +677,9 @@ config ARCH_HAS_PMEM_API
config MEMREGION
bool
+config ARCH_HAS_CPU_CACHE_INVALIDATE_MEMREGION
+ bool
+
config ARCH_HAS_MEMREMAP_COMPAT_ALIGN
bool
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index c3c0b077ade3..a93c45ebc320 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -399,6 +399,7 @@ config FRAME_WARN
default 2048 if GCC_PLUGIN_LATENT_ENTROPY
default 2048 if PARISC
default 1536 if (!64BIT && XTENSA)
+ default 1280 if KASAN && !64BIT
default 1024 if !64BIT
default 2048 if 64BIT
help
@@ -468,6 +469,7 @@ config SECTION_MISMATCH_WARN_ONLY
config DEBUG_FORCE_FUNCTION_ALIGN_64B
bool "Force all function address 64B aligned"
depends on EXPERT && (X86_64 || ARM64 || PPC32 || PPC64 || ARC)
+ select FUNCTION_ALIGNMENT_64B
help
There are cases that a commit from one domain changes the function
address alignment of other domains, and cause magic performance
@@ -1716,6 +1718,16 @@ config LATENCYTOP
Enable this option if you want to use the LatencyTOP tool
to find out which userspace is blocking on what kernel operations.
+config DEBUG_CGROUP_REF
+ bool "Disable inlining of cgroup css reference count functions"
+ depends on DEBUG_KERNEL
+ depends on CGROUPS
+ depends on KPROBES
+ default n
+ help
+ Force cgroup css reference count functions to not be inlined so
+ that they can be kprobed for debugging.
+
source "kernel/trace/Kconfig"
config PROVIDE_OHCI1394_DMA_INIT
@@ -1874,8 +1886,14 @@ config NETDEV_NOTIFIER_ERROR_INJECT
If unsure, say N.
config FUNCTION_ERROR_INJECTION
- def_bool y
+ bool "Fault-injections of functions"
depends on HAVE_FUNCTION_ERROR_INJECTION && KPROBES
+ help
+ Add fault injections into various functions that are annotated with
+ ALLOW_ERROR_INJECTION() in the kernel. BPF may also modify the return
+ value of theses functions. This is useful to test error paths of code.
+
+ If unsure, say N
config FAULT_INJECTION
bool "Fault-injection framework"
@@ -2107,6 +2125,7 @@ config KPROBES_SANITY_TEST
depends on DEBUG_KERNEL
depends on KPROBES
depends on KUNIT
+ select STACKTRACE if ARCH_CORRECT_STACKTRACE_ON_KRETPROBE
default KUNIT_ALL_TESTS
help
This option provides for testing basic kprobes functionality on
@@ -2216,9 +2235,6 @@ config STRING_SELFTEST
config TEST_STRING_HELPERS
tristate "Test functions located in the string_helpers module at runtime"
-config TEST_STRSCPY
- tristate "Test strscpy*() family of functions at runtime"
-
config TEST_KSTRTOX
tristate "Test kstrto*() family of functions at runtime"
@@ -2242,6 +2258,7 @@ config TEST_XARRAY
tristate "Test the XArray code at runtime"
config TEST_MAPLE_TREE
+ depends on DEBUG_KERNEL
select DEBUG_MAPLE_TREE
tristate "Test the Maple Tree code at runtime"
@@ -2252,15 +2269,6 @@ config TEST_RHASHTABLE
If unsure, say N.
-config TEST_SIPHASH
- tristate "Perform selftest on siphash functions"
- help
- Enable this option to test the kernel's siphash (<linux/siphash.h>) hash
- functions on boot (or module load).
-
- This is intended to help people writing architecture-specific
- optimized versions. If unsure, say N.
-
config TEST_IDA
tristate "Perform selftest on IDA functions"
@@ -2588,6 +2596,22 @@ config HW_BREAKPOINT_KUNIT_TEST
If unsure, say N.
+config STRSCPY_KUNIT_TEST
+ tristate "Test strscpy*() family of functions at runtime" if !KUNIT_ALL_TESTS
+ depends on KUNIT
+ default KUNIT_ALL_TESTS
+
+config SIPHASH_KUNIT_TEST
+ tristate "Perform selftest on siphash functions" if !KUNIT_ALL_TESTS
+ depends on KUNIT
+ default KUNIT_ALL_TESTS
+ help
+ Enable this option to test the kernel's siphash (<linux/siphash.h>) hash
+ functions on boot (or module load).
+
+ This is intended to help people writing architecture-specific
+ optimized versions. If unsure, say N.
+
config TEST_UDELAY
tristate "udelay test driver"
help
@@ -2805,6 +2829,22 @@ config RUST_OVERFLOW_CHECKS
If unsure, say Y.
+config RUST_BUILD_ASSERT_ALLOW
+ bool "Allow unoptimized build-time assertions"
+ depends on RUST
+ help
+ Controls how are `build_error!` and `build_assert!` handled during build.
+
+ If calls to them exist in the binary, it may indicate a violated invariant
+ or that the optimizer failed to verify the invariant during compilation.
+
+ This should not happen, thus by default the build is aborted. However,
+ as an escape hatch, you can choose Y here to ignore them during build
+ and let the check be carried at runtime (with `panic!` being called if
+ the check fails).
+
+ If unsure, say N.
+
endmenu # "Rust"
source "Documentation/Kconfig"
diff --git a/lib/Kconfig.kasan b/lib/Kconfig.kasan
index ca09b1cf8ee9..be6ee6020290 100644
--- a/lib/Kconfig.kasan
+++ b/lib/Kconfig.kasan
@@ -37,7 +37,7 @@ menuconfig KASAN
(HAVE_ARCH_KASAN_SW_TAGS && CC_HAS_KASAN_SW_TAGS)) && \
CC_HAS_WORKING_NOSANITIZE_ADDRESS) || \
HAVE_ARCH_KASAN_HW_TAGS
- depends on (SLUB && SYSFS) || (SLAB && !DEBUG_SLAB)
+ depends on (SLUB && SYSFS && !SLUB_TINY) || (SLAB && !DEBUG_SLAB)
select STACKDEPOT_ALWAYS_INIT
help
Enables KASAN (Kernel Address Sanitizer) - a dynamic memory safety
@@ -181,7 +181,7 @@ config KASAN_VMALLOC
config KASAN_KUNIT_TEST
tristate "KUnit-compatible tests of KASAN bug detection capabilities" if !KUNIT_ALL_TESTS
- depends on KASAN && KUNIT
+ depends on KASAN && KUNIT && TRACEPOINTS
default KUNIT_ALL_TESTS
help
A KUnit-based KASAN test suite. Triggers different kinds of
diff --git a/lib/Kconfig.kcsan b/lib/Kconfig.kcsan
index 47a693c45864..375575a5a0e3 100644
--- a/lib/Kconfig.kcsan
+++ b/lib/Kconfig.kcsan
@@ -125,7 +125,7 @@ config KCSAN_SKIP_WATCH
default 4000
help
The number of per-CPU memory operations to skip, before another
- watchpoint is set up, i.e. one in KCSAN_WATCH_SKIP per-CPU
+ watchpoint is set up, i.e. one in KCSAN_SKIP_WATCH per-CPU
memory operations are used to set up a watchpoint. A smaller value
results in more aggressive race detection, whereas a larger value
improves system performance at the cost of missing some races.
@@ -135,8 +135,8 @@ config KCSAN_SKIP_WATCH_RANDOMIZE
default y
help
If instruction skip count should be randomized, where the maximum is
- KCSAN_WATCH_SKIP. If false, the chosen value is always
- KCSAN_WATCH_SKIP.
+ KCSAN_SKIP_WATCH. If false, the chosen value is always
+ KCSAN_SKIP_WATCH.
config KCSAN_INTERRUPT_WATCHER
bool "Interruptible watchers" if !KCSAN_STRICT
diff --git a/lib/Makefile b/lib/Makefile
index 59bd7c2f793a..4d9461bfea42 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -62,7 +62,6 @@ obj-$(CONFIG_TEST_BITOPS) += test_bitops.o
CFLAGS_test_bitops.o += -Werror
obj-$(CONFIG_CPUMASK_KUNIT_TEST) += cpumask_kunit.o
obj-$(CONFIG_TEST_SYSCTL) += test_sysctl.o
-obj-$(CONFIG_TEST_SIPHASH) += test_siphash.o
obj-$(CONFIG_HASH_KUNIT_TEST) += test_hash.o
obj-$(CONFIG_TEST_IDA) += test_ida.o
obj-$(CONFIG_TEST_UBSAN) += test_ubsan.o
@@ -82,7 +81,6 @@ obj-$(CONFIG_TEST_DYNAMIC_DEBUG) += test_dynamic_debug.o
obj-$(CONFIG_TEST_PRINTF) += test_printf.o
obj-$(CONFIG_TEST_SCANF) += test_scanf.o
obj-$(CONFIG_TEST_BITMAP) += test_bitmap.o
-obj-$(CONFIG_TEST_STRSCPY) += test_strscpy.o
obj-$(CONFIG_TEST_UUID) += test_uuid.o
obj-$(CONFIG_TEST_XARRAY) += test_xarray.o
obj-$(CONFIG_TEST_MAPLE_TREE) += test_maple_tree.o
@@ -377,10 +375,15 @@ obj-$(CONFIG_CMDLINE_KUNIT_TEST) += cmdline_kunit.o
obj-$(CONFIG_SLUB_KUNIT_TEST) += slub_kunit.o
obj-$(CONFIG_MEMCPY_KUNIT_TEST) += memcpy_kunit.o
obj-$(CONFIG_IS_SIGNED_TYPE_KUNIT_TEST) += is_signed_type_kunit.o
+CFLAGS_overflow_kunit.o = $(call cc-disable-warning, tautological-constant-out-of-range-compare)
obj-$(CONFIG_OVERFLOW_KUNIT_TEST) += overflow_kunit.o
CFLAGS_stackinit_kunit.o += $(call cc-disable-warning, switch-unreachable)
obj-$(CONFIG_STACKINIT_KUNIT_TEST) += stackinit_kunit.o
+CFLAGS_fortify_kunit.o += $(call cc-disable-warning, unsequenced)
+CFLAGS_fortify_kunit.o += $(DISABLE_STRUCTLEAK_PLUGIN)
obj-$(CONFIG_FORTIFY_KUNIT_TEST) += fortify_kunit.o
+obj-$(CONFIG_STRSCPY_KUNIT_TEST) += strscpy_kunit.o
+obj-$(CONFIG_SIPHASH_KUNIT_TEST) += siphash_kunit.o
obj-$(CONFIG_GENERIC_LIB_DEVMEM_IS_ALLOWED) += devmem_is_allowed.o
diff --git a/lib/crypto/Kconfig b/lib/crypto/Kconfig
index 7e9683e9f5c6..45436bfc6dff 100644
--- a/lib/crypto/Kconfig
+++ b/lib/crypto/Kconfig
@@ -8,9 +8,18 @@ config CRYPTO_LIB_UTILS
config CRYPTO_LIB_AES
tristate
+config CRYPTO_LIB_AESGCM
+ tristate
+ select CRYPTO_LIB_AES
+ select CRYPTO_LIB_GF128MUL
+ select CRYPTO_LIB_UTILS
+
config CRYPTO_LIB_ARC4
tristate
+config CRYPTO_LIB_GF128MUL
+ tristate
+
config CRYPTO_ARCH_HAVE_LIB_BLAKE2S
bool
help
diff --git a/lib/crypto/Makefile b/lib/crypto/Makefile
index c852f067ab06..6ec2d4543d9c 100644
--- a/lib/crypto/Makefile
+++ b/lib/crypto/Makefile
@@ -10,9 +10,14 @@ obj-$(CONFIG_CRYPTO_LIB_CHACHA_GENERIC) += libchacha.o
obj-$(CONFIG_CRYPTO_LIB_AES) += libaes.o
libaes-y := aes.o
+obj-$(CONFIG_CRYPTO_LIB_AESGCM) += libaesgcm.o
+libaesgcm-y := aesgcm.o
+
obj-$(CONFIG_CRYPTO_LIB_ARC4) += libarc4.o
libarc4-y := arc4.o
+obj-$(CONFIG_CRYPTO_LIB_GF128MUL) += gf128mul.o
+
# blake2s is used by the /dev/random driver which is always builtin
obj-y += libblake2s.o
libblake2s-y := blake2s.o
diff --git a/lib/crypto/aesgcm.c b/lib/crypto/aesgcm.c
new file mode 100644
index 000000000000..c632d6e17af8
--- /dev/null
+++ b/lib/crypto/aesgcm.c
@@ -0,0 +1,727 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Minimal library implementation of GCM
+ *
+ * Copyright 2022 Google LLC
+ */
+
+#include <linux/module.h>
+
+#include <crypto/algapi.h>
+#include <crypto/gcm.h>
+#include <crypto/ghash.h>
+
+#include <asm/irqflags.h>
+
+static void aesgcm_encrypt_block(const struct crypto_aes_ctx *ctx, void *dst,
+ const void *src)
+{
+ unsigned long flags;
+
+ /*
+ * In AES-GCM, both the GHASH key derivation and the CTR mode
+ * encryption operate on known plaintext, making them susceptible to
+ * timing attacks on the encryption key. The AES library already
+ * mitigates this risk to some extent by pulling the entire S-box into
+ * the caches before doing any substitutions, but this strategy is more
+ * effective when running with interrupts disabled.
+ */
+ local_irq_save(flags);
+ aes_encrypt(ctx, dst, src);
+ local_irq_restore(flags);
+}
+
+/**
+ * aesgcm_expandkey - Expands the AES and GHASH keys for the AES-GCM key
+ * schedule
+ *
+ * @ctx: The data structure that will hold the AES-GCM key schedule
+ * @key: The AES encryption input key
+ * @keysize: The length in bytes of the input key
+ * @authsize: The size in bytes of the GCM authentication tag
+ *
+ * Returns: 0 on success, or -EINVAL if @keysize or @authsize contain values
+ * that are not permitted by the GCM specification.
+ */
+int aesgcm_expandkey(struct aesgcm_ctx *ctx, const u8 *key,
+ unsigned int keysize, unsigned int authsize)
+{
+ u8 kin[AES_BLOCK_SIZE] = {};
+ int ret;
+
+ ret = crypto_gcm_check_authsize(authsize) ?:
+ aes_expandkey(&ctx->aes_ctx, key, keysize);
+ if (ret)
+ return ret;
+
+ ctx->authsize = authsize;
+ aesgcm_encrypt_block(&ctx->aes_ctx, &ctx->ghash_key, kin);
+
+ return 0;
+}
+EXPORT_SYMBOL(aesgcm_expandkey);
+
+static void aesgcm_ghash(be128 *ghash, const be128 *key, const void *src,
+ int len)
+{
+ while (len > 0) {
+ crypto_xor((u8 *)ghash, src, min(len, GHASH_BLOCK_SIZE));
+ gf128mul_lle(ghash, key);
+
+ src += GHASH_BLOCK_SIZE;
+ len -= GHASH_BLOCK_SIZE;
+ }
+}
+
+static void aesgcm_mac(const struct aesgcm_ctx *ctx, const u8 *src, int src_len,
+ const u8 *assoc, int assoc_len, __be32 *ctr, u8 *authtag)
+{
+ be128 tail = { cpu_to_be64(assoc_len * 8), cpu_to_be64(src_len * 8) };
+ u8 buf[AES_BLOCK_SIZE];
+ be128 ghash = {};
+
+ aesgcm_ghash(&ghash, &ctx->ghash_key, assoc, assoc_len);
+ aesgcm_ghash(&ghash, &ctx->ghash_key, src, src_len);
+ aesgcm_ghash(&ghash, &ctx->ghash_key, &tail, sizeof(tail));
+
+ ctr[3] = cpu_to_be32(1);
+ aesgcm_encrypt_block(&ctx->aes_ctx, buf, ctr);
+ crypto_xor_cpy(authtag, buf, (u8 *)&ghash, ctx->authsize);
+
+ memzero_explicit(&ghash, sizeof(ghash));
+ memzero_explicit(buf, sizeof(buf));
+}
+
+static void aesgcm_crypt(const struct aesgcm_ctx *ctx, u8 *dst, const u8 *src,
+ int len, __be32 *ctr)
+{
+ u8 buf[AES_BLOCK_SIZE];
+ unsigned int n = 2;
+
+ while (len > 0) {
+ /*
+ * The counter increment below must not result in overflow or
+ * carry into the next 32-bit word, as this could result in
+ * inadvertent IV reuse, which must be avoided at all cost for
+ * stream ciphers such as AES-CTR. Given the range of 'int
+ * len', this cannot happen, so no explicit test is necessary.
+ */
+ ctr[3] = cpu_to_be32(n++);
+ aesgcm_encrypt_block(&ctx->aes_ctx, buf, ctr);
+ crypto_xor_cpy(dst, src, buf, min(len, AES_BLOCK_SIZE));
+
+ dst += AES_BLOCK_SIZE;
+ src += AES_BLOCK_SIZE;
+ len -= AES_BLOCK_SIZE;
+ }
+ memzero_explicit(buf, sizeof(buf));
+}
+
+/**
+ * aesgcm_encrypt - Perform AES-GCM encryption on a block of data
+ *
+ * @ctx: The AES-GCM key schedule
+ * @dst: Pointer to the ciphertext output buffer
+ * @src: Pointer the plaintext (may equal @dst for encryption in place)
+ * @crypt_len: The size in bytes of the plaintext and ciphertext.
+ * @assoc: Pointer to the associated data,
+ * @assoc_len: The size in bytes of the associated data
+ * @iv: The initialization vector (IV) to use for this block of data
+ * (must be 12 bytes in size as per the GCM spec recommendation)
+ * @authtag: The address of the buffer in memory where the authentication
+ * tag should be stored. The buffer is assumed to have space for
+ * @ctx->authsize bytes.
+ */
+void aesgcm_encrypt(const struct aesgcm_ctx *ctx, u8 *dst, const u8 *src,
+ int crypt_len, const u8 *assoc, int assoc_len,
+ const u8 iv[GCM_AES_IV_SIZE], u8 *authtag)
+{
+ __be32 ctr[4];
+
+ memcpy(ctr, iv, GCM_AES_IV_SIZE);
+
+ aesgcm_crypt(ctx, dst, src, crypt_len, ctr);
+ aesgcm_mac(ctx, dst, crypt_len, assoc, assoc_len, ctr, authtag);
+}
+EXPORT_SYMBOL(aesgcm_encrypt);
+
+/**
+ * aesgcm_decrypt - Perform AES-GCM decryption on a block of data
+ *
+ * @ctx: The AES-GCM key schedule
+ * @dst: Pointer to the plaintext output buffer
+ * @src: Pointer the ciphertext (may equal @dst for decryption in place)
+ * @crypt_len: The size in bytes of the plaintext and ciphertext.
+ * @assoc: Pointer to the associated data,
+ * @assoc_len: The size in bytes of the associated data
+ * @iv: The initialization vector (IV) to use for this block of data
+ * (must be 12 bytes in size as per the GCM spec recommendation)
+ * @authtag: The address of the buffer in memory where the authentication
+ * tag is stored.
+ *
+ * Returns: true on success, or false if the ciphertext failed authentication.
+ * On failure, no plaintext will be returned.
+ */
+bool __must_check aesgcm_decrypt(const struct aesgcm_ctx *ctx, u8 *dst,
+ const u8 *src, int crypt_len, const u8 *assoc,
+ int assoc_len, const u8 iv[GCM_AES_IV_SIZE],
+ const u8 *authtag)
+{
+ u8 tagbuf[AES_BLOCK_SIZE];
+ __be32 ctr[4];
+
+ memcpy(ctr, iv, GCM_AES_IV_SIZE);
+
+ aesgcm_mac(ctx, src, crypt_len, assoc, assoc_len, ctr, tagbuf);
+ if (crypto_memneq(authtag, tagbuf, ctx->authsize)) {
+ memzero_explicit(tagbuf, sizeof(tagbuf));
+ return false;
+ }
+ aesgcm_crypt(ctx, dst, src, crypt_len, ctr);
+ return true;
+}
+EXPORT_SYMBOL(aesgcm_decrypt);
+
+MODULE_DESCRIPTION("Generic AES-GCM library");
+MODULE_AUTHOR("Ard Biesheuvel <ardb@kernel.org>");
+MODULE_LICENSE("GPL");
+
+#ifndef CONFIG_CRYPTO_MANAGER_DISABLE_TESTS
+
+/*
+ * Test code below. Vectors taken from crypto/testmgr.h
+ */
+
+static const u8 __initconst ctext0[16] =
+ "\x58\xe2\xfc\xce\xfa\x7e\x30\x61"
+ "\x36\x7f\x1d\x57\xa4\xe7\x45\x5a";
+
+static const u8 __initconst ptext1[16];
+
+static const u8 __initconst ctext1[32] =
+ "\x03\x88\xda\xce\x60\xb6\xa3\x92"
+ "\xf3\x28\xc2\xb9\x71\xb2\xfe\x78"
+ "\xab\x6e\x47\xd4\x2c\xec\x13\xbd"
+ "\xf5\x3a\x67\xb2\x12\x57\xbd\xdf";
+
+static const u8 __initconst ptext2[64] =
+ "\xd9\x31\x32\x25\xf8\x84\x06\xe5"
+ "\xa5\x59\x09\xc5\xaf\xf5\x26\x9a"
+ "\x86\xa7\xa9\x53\x15\x34\xf7\xda"
+ "\x2e\x4c\x30\x3d\x8a\x31\x8a\x72"
+ "\x1c\x3c\x0c\x95\x95\x68\x09\x53"
+ "\x2f\xcf\x0e\x24\x49\xa6\xb5\x25"
+ "\xb1\x6a\xed\xf5\xaa\x0d\xe6\x57"
+ "\xba\x63\x7b\x39\x1a\xaf\xd2\x55";
+
+static const u8 __initconst ctext2[80] =
+ "\x42\x83\x1e\xc2\x21\x77\x74\x24"
+ "\x4b\x72\x21\xb7\x84\xd0\xd4\x9c"
+ "\xe3\xaa\x21\x2f\x2c\x02\xa4\xe0"
+ "\x35\xc1\x7e\x23\x29\xac\xa1\x2e"
+ "\x21\xd5\x14\xb2\x54\x66\x93\x1c"
+ "\x7d\x8f\x6a\x5a\xac\x84\xaa\x05"
+ "\x1b\xa3\x0b\x39\x6a\x0a\xac\x97"
+ "\x3d\x58\xe0\x91\x47\x3f\x59\x85"
+ "\x4d\x5c\x2a\xf3\x27\xcd\x64\xa6"
+ "\x2c\xf3\x5a\xbd\x2b\xa6\xfa\xb4";
+
+static const u8 __initconst ptext3[60] =
+ "\xd9\x31\x32\x25\xf8\x84\x06\xe5"
+ "\xa5\x59\x09\xc5\xaf\xf5\x26\x9a"
+ "\x86\xa7\xa9\x53\x15\x34\xf7\xda"
+ "\x2e\x4c\x30\x3d\x8a\x31\x8a\x72"
+ "\x1c\x3c\x0c\x95\x95\x68\x09\x53"
+ "\x2f\xcf\x0e\x24\x49\xa6\xb5\x25"
+ "\xb1\x6a\xed\xf5\xaa\x0d\xe6\x57"
+ "\xba\x63\x7b\x39";
+
+static const u8 __initconst ctext3[76] =
+ "\x42\x83\x1e\xc2\x21\x77\x74\x24"
+ "\x4b\x72\x21\xb7\x84\xd0\xd4\x9c"
+ "\xe3\xaa\x21\x2f\x2c\x02\xa4\xe0"
+ "\x35\xc1\x7e\x23\x29\xac\xa1\x2e"
+ "\x21\xd5\x14\xb2\x54\x66\x93\x1c"
+ "\x7d\x8f\x6a\x5a\xac\x84\xaa\x05"
+ "\x1b\xa3\x0b\x39\x6a\x0a\xac\x97"
+ "\x3d\x58\xe0\x91"
+ "\x5b\xc9\x4f\xbc\x32\x21\xa5\xdb"
+ "\x94\xfa\xe9\x5a\xe7\x12\x1a\x47";
+
+static const u8 __initconst ctext4[16] =
+ "\xcd\x33\xb2\x8a\xc7\x73\xf7\x4b"
+ "\xa0\x0e\xd1\xf3\x12\x57\x24\x35";
+
+static const u8 __initconst ctext5[32] =
+ "\x98\xe7\x24\x7c\x07\xf0\xfe\x41"
+ "\x1c\x26\x7e\x43\x84\xb0\xf6\x00"
+ "\x2f\xf5\x8d\x80\x03\x39\x27\xab"
+ "\x8e\xf4\xd4\x58\x75\x14\xf0\xfb";
+
+static const u8 __initconst ptext6[64] =
+ "\xd9\x31\x32\x25\xf8\x84\x06\xe5"
+ "\xa5\x59\x09\xc5\xaf\xf5\x26\x9a"
+ "\x86\xa7\xa9\x53\x15\x34\xf7\xda"
+ "\x2e\x4c\x30\x3d\x8a\x31\x8a\x72"
+ "\x1c\x3c\x0c\x95\x95\x68\x09\x53"
+ "\x2f\xcf\x0e\x24\x49\xa6\xb5\x25"
+ "\xb1\x6a\xed\xf5\xaa\x0d\xe6\x57"
+ "\xba\x63\x7b\x39\x1a\xaf\xd2\x55";
+
+static const u8 __initconst ctext6[80] =
+ "\x39\x80\xca\x0b\x3c\x00\xe8\x41"
+ "\xeb\x06\xfa\xc4\x87\x2a\x27\x57"
+ "\x85\x9e\x1c\xea\xa6\xef\xd9\x84"
+ "\x62\x85\x93\xb4\x0c\xa1\xe1\x9c"
+ "\x7d\x77\x3d\x00\xc1\x44\xc5\x25"
+ "\xac\x61\x9d\x18\xc8\x4a\x3f\x47"
+ "\x18\xe2\x44\x8b\x2f\xe3\x24\xd9"
+ "\xcc\xda\x27\x10\xac\xad\xe2\x56"
+ "\x99\x24\xa7\xc8\x58\x73\x36\xbf"
+ "\xb1\x18\x02\x4d\xb8\x67\x4a\x14";
+
+static const u8 __initconst ctext7[16] =
+ "\x53\x0f\x8a\xfb\xc7\x45\x36\xb9"
+ "\xa9\x63\xb4\xf1\xc4\xcb\x73\x8b";
+
+static const u8 __initconst ctext8[32] =
+ "\xce\xa7\x40\x3d\x4d\x60\x6b\x6e"
+ "\x07\x4e\xc5\xd3\xba\xf3\x9d\x18"
+ "\xd0\xd1\xc8\xa7\x99\x99\x6b\xf0"
+ "\x26\x5b\x98\xb5\xd4\x8a\xb9\x19";
+
+static const u8 __initconst ptext9[64] =
+ "\xd9\x31\x32\x25\xf8\x84\x06\xe5"
+ "\xa5\x59\x09\xc5\xaf\xf5\x26\x9a"
+ "\x86\xa7\xa9\x53\x15\x34\xf7\xda"
+ "\x2e\x4c\x30\x3d\x8a\x31\x8a\x72"
+ "\x1c\x3c\x0c\x95\x95\x68\x09\x53"
+ "\x2f\xcf\x0e\x24\x49\xa6\xb5\x25"
+ "\xb1\x6a\xed\xf5\xaa\x0d\xe6\x57"
+ "\xba\x63\x7b\x39\x1a\xaf\xd2\x55";
+
+static const u8 __initconst ctext9[80] =
+ "\x52\x2d\xc1\xf0\x99\x56\x7d\x07"
+ "\xf4\x7f\x37\xa3\x2a\x84\x42\x7d"
+ "\x64\x3a\x8c\xdc\xbf\xe5\xc0\xc9"
+ "\x75\x98\xa2\xbd\x25\x55\xd1\xaa"
+ "\x8c\xb0\x8e\x48\x59\x0d\xbb\x3d"
+ "\xa7\xb0\x8b\x10\x56\x82\x88\x38"
+ "\xc5\xf6\x1e\x63\x93\xba\x7a\x0a"
+ "\xbc\xc9\xf6\x62\x89\x80\x15\xad"
+ "\xb0\x94\xda\xc5\xd9\x34\x71\xbd"
+ "\xec\x1a\x50\x22\x70\xe3\xcc\x6c";
+
+static const u8 __initconst ptext10[60] =
+ "\xd9\x31\x32\x25\xf8\x84\x06\xe5"
+ "\xa5\x59\x09\xc5\xaf\xf5\x26\x9a"
+ "\x86\xa7\xa9\x53\x15\x34\xf7\xda"
+ "\x2e\x4c\x30\x3d\x8a\x31\x8a\x72"
+ "\x1c\x3c\x0c\x95\x95\x68\x09\x53"
+ "\x2f\xcf\x0e\x24\x49\xa6\xb5\x25"
+ "\xb1\x6a\xed\xf5\xaa\x0d\xe6\x57"
+ "\xba\x63\x7b\x39";
+
+static const u8 __initconst ctext10[76] =
+ "\x52\x2d\xc1\xf0\x99\x56\x7d\x07"
+ "\xf4\x7f\x37\xa3\x2a\x84\x42\x7d"
+ "\x64\x3a\x8c\xdc\xbf\xe5\xc0\xc9"
+ "\x75\x98\xa2\xbd\x25\x55\xd1\xaa"
+ "\x8c\xb0\x8e\x48\x59\x0d\xbb\x3d"
+ "\xa7\xb0\x8b\x10\x56\x82\x88\x38"
+ "\xc5\xf6\x1e\x63\x93\xba\x7a\x0a"
+ "\xbc\xc9\xf6\x62"
+ "\x76\xfc\x6e\xce\x0f\x4e\x17\x68"
+ "\xcd\xdf\x88\x53\xbb\x2d\x55\x1b";
+
+static const u8 __initconst ptext11[60] =
+ "\xd9\x31\x32\x25\xf8\x84\x06\xe5"
+ "\xa5\x59\x09\xc5\xaf\xf5\x26\x9a"
+ "\x86\xa7\xa9\x53\x15\x34\xf7\xda"
+ "\x2e\x4c\x30\x3d\x8a\x31\x8a\x72"
+ "\x1c\x3c\x0c\x95\x95\x68\x09\x53"
+ "\x2f\xcf\x0e\x24\x49\xa6\xb5\x25"
+ "\xb1\x6a\xed\xf5\xaa\x0d\xe6\x57"
+ "\xba\x63\x7b\x39";
+
+static const u8 __initconst ctext11[76] =
+ "\x39\x80\xca\x0b\x3c\x00\xe8\x41"
+ "\xeb\x06\xfa\xc4\x87\x2a\x27\x57"
+ "\x85\x9e\x1c\xea\xa6\xef\xd9\x84"
+ "\x62\x85\x93\xb4\x0c\xa1\xe1\x9c"
+ "\x7d\x77\x3d\x00\xc1\x44\xc5\x25"
+ "\xac\x61\x9d\x18\xc8\x4a\x3f\x47"
+ "\x18\xe2\x44\x8b\x2f\xe3\x24\xd9"
+ "\xcc\xda\x27\x10"
+ "\x25\x19\x49\x8e\x80\xf1\x47\x8f"
+ "\x37\xba\x55\xbd\x6d\x27\x61\x8c";
+
+static const u8 __initconst ptext12[719] =
+ "\x42\xc1\xcc\x08\x48\x6f\x41\x3f"
+ "\x2f\x11\x66\x8b\x2a\x16\xf0\xe0"
+ "\x58\x83\xf0\xc3\x70\x14\xc0\x5b"
+ "\x3f\xec\x1d\x25\x3c\x51\xd2\x03"
+ "\xcf\x59\x74\x1f\xb2\x85\xb4\x07"
+ "\xc6\x6a\x63\x39\x8a\x5b\xde\xcb"
+ "\xaf\x08\x44\xbd\x6f\x91\x15\xe1"
+ "\xf5\x7a\x6e\x18\xbd\xdd\x61\x50"
+ "\x59\xa9\x97\xab\xbb\x0e\x74\x5c"
+ "\x00\xa4\x43\x54\x04\x54\x9b\x3b"
+ "\x77\xec\xfd\x5c\xa6\xe8\x7b\x08"
+ "\xae\xe6\x10\x3f\x32\x65\xd1\xfc"
+ "\xa4\x1d\x2c\x31\xfb\x33\x7a\xb3"
+ "\x35\x23\xf4\x20\x41\xd4\xad\x82"
+ "\x8b\xa4\xad\x96\x1c\x20\x53\xbe"
+ "\x0e\xa6\xf4\xdc\x78\x49\x3e\x72"
+ "\xb1\xa9\xb5\x83\xcb\x08\x54\xb7"
+ "\xad\x49\x3a\xae\x98\xce\xa6\x66"
+ "\x10\x30\x90\x8c\x55\x83\xd7\x7c"
+ "\x8b\xe6\x53\xde\xd2\x6e\x18\x21"
+ "\x01\x52\xd1\x9f\x9d\xbb\x9c\x73"
+ "\x57\xcc\x89\x09\x75\x9b\x78\x70"
+ "\xed\x26\x97\x4d\xb4\xe4\x0c\xa5"
+ "\xfa\x70\x04\x70\xc6\x96\x1c\x7d"
+ "\x54\x41\x77\xa8\xe3\xb0\x7e\x96"
+ "\x82\xd9\xec\xa2\x87\x68\x55\xf9"
+ "\x8f\x9e\x73\x43\x47\x6a\x08\x36"
+ "\x93\x67\xa8\x2d\xde\xac\x41\xa9"
+ "\x5c\x4d\x73\x97\x0f\x70\x68\xfa"
+ "\x56\x4d\x00\xc2\x3b\x1f\xc8\xb9"
+ "\x78\x1f\x51\x07\xe3\x9a\x13\x4e"
+ "\xed\x2b\x2e\xa3\xf7\x44\xb2\xe7"
+ "\xab\x19\x37\xd9\xba\x76\x5e\xd2"
+ "\xf2\x53\x15\x17\x4c\x6b\x16\x9f"
+ "\x02\x66\x49\xca\x7c\x91\x05\xf2"
+ "\x45\x36\x1e\xf5\x77\xad\x1f\x46"
+ "\xa8\x13\xfb\x63\xb6\x08\x99\x63"
+ "\x82\xa2\xed\xb3\xac\xdf\x43\x19"
+ "\x45\xea\x78\x73\xd9\xb7\x39\x11"
+ "\xa3\x13\x7c\xf8\x3f\xf7\xad\x81"
+ "\x48\x2f\xa9\x5c\x5f\xa0\xf0\x79"
+ "\xa4\x47\x7d\x80\x20\x26\xfd\x63"
+ "\x0a\xc7\x7e\x6d\x75\x47\xff\x76"
+ "\x66\x2e\x8a\x6c\x81\x35\xaf\x0b"
+ "\x2e\x6a\x49\x60\xc1\x10\xe1\xe1"
+ "\x54\x03\xa4\x09\x0c\x37\x7a\x15"
+ "\x23\x27\x5b\x8b\x4b\xa5\x64\x97"
+ "\xae\x4a\x50\x73\x1f\x66\x1c\x5c"
+ "\x03\x25\x3c\x8d\x48\x58\x71\x34"
+ "\x0e\xec\x4e\x55\x1a\x03\x6a\xe5"
+ "\xb6\x19\x2b\x84\x2a\x20\xd1\xea"
+ "\x80\x6f\x96\x0e\x05\x62\xc7\x78"
+ "\x87\x79\x60\x38\x46\xb4\x25\x57"
+ "\x6e\x16\x63\xf8\xad\x6e\xd7\x42"
+ "\x69\xe1\x88\xef\x6e\xd5\xb4\x9a"
+ "\x3c\x78\x6c\x3b\xe5\xa0\x1d\x22"
+ "\x86\x5c\x74\x3a\xeb\x24\x26\xc7"
+ "\x09\xfc\x91\x96\x47\x87\x4f\x1a"
+ "\xd6\x6b\x2c\x18\x47\xc0\xb8\x24"
+ "\xa8\x5a\x4a\x9e\xcb\x03\xe7\x2a"
+ "\x09\xe6\x4d\x9c\x6d\x86\x60\xf5"
+ "\x2f\x48\x69\x37\x9f\xf2\xd2\xcb"
+ "\x0e\x5a\xdd\x6e\x8a\xfb\x6a\xfe"
+ "\x0b\x63\xde\x87\x42\x79\x8a\x68"
+ "\x51\x28\x9b\x7a\xeb\xaf\xb8\x2f"
+ "\x9d\xd1\xc7\x45\x90\x08\xc9\x83"
+ "\xe9\x83\x84\xcb\x28\x69\x09\x69"
+ "\xce\x99\x46\x00\x54\xcb\xd8\x38"
+ "\xf9\x53\x4a\xbf\x31\xce\x57\x15"
+ "\x33\xfa\x96\x04\x33\x42\xe3\xc0"
+ "\xb7\x54\x4a\x65\x7a\x7c\x02\xe6"
+ "\x19\x95\xd0\x0e\x82\x07\x63\xf9"
+ "\xe1\x2b\x2a\xfc\x55\x92\x52\xc9"
+ "\xb5\x9f\x23\x28\x60\xe7\x20\x51"
+ "\x10\xd3\xed\x6d\x9b\xab\xb8\xe2"
+ "\x5d\x9a\x34\xb3\xbe\x9c\x64\xcb"
+ "\x78\xc6\x91\x22\x40\x91\x80\xbe"
+ "\xd7\x78\x5c\x0e\x0a\xdc\x08\xe9"
+ "\x67\x10\xa4\x83\x98\x79\x23\xe7"
+ "\x92\xda\xa9\x22\x16\xb1\xe7\x78"
+ "\xa3\x1c\x6c\x8f\x35\x7c\x4d\x37"
+ "\x2f\x6e\x0b\x50\x5c\x34\xb9\xf9"
+ "\xe6\x3d\x91\x0d\x32\x95\xaa\x3d"
+ "\x48\x11\x06\xbb\x2d\xf2\x63\x88"
+ "\x3f\x73\x09\xe2\x45\x56\x31\x51"
+ "\xfa\x5e\x4e\x62\xf7\x90\xf9\xa9"
+ "\x7d\x7b\x1b\xb1\xc8\x26\x6e\x66"
+ "\xf6\x90\x9a\x7f\xf2\x57\xcc\x23"
+ "\x59\xfa\xfa\xaa\x44\x04\x01\xa7"
+ "\xa4\x78\xdb\x74\x3d\x8b\xb5";
+
+static const u8 __initconst ctext12[735] =
+ "\x84\x0b\xdb\xd5\xb7\xa8\xfe\x20"
+ "\xbb\xb1\x12\x7f\x41\xea\xb3\xc0"
+ "\xa2\xb4\x37\x19\x11\x58\xb6\x0b"
+ "\x4c\x1d\x38\x05\x54\xd1\x16\x73"
+ "\x8e\x1c\x20\x90\xa2\x9a\xb7\x74"
+ "\x47\xe6\xd8\xfc\x18\x3a\xb4\xea"
+ "\xd5\x16\x5a\x2c\x53\x01\x46\xb3"
+ "\x18\x33\x74\x6c\x50\xf2\xe8\xc0"
+ "\x73\xda\x60\x22\xeb\xe3\xe5\x9b"
+ "\x20\x93\x6c\x4b\x37\x99\xb8\x23"
+ "\x3b\x4e\xac\xe8\x5b\xe8\x0f\xb7"
+ "\xc3\x8f\xfb\x4a\x37\xd9\x39\x95"
+ "\x34\xf1\xdb\x8f\x71\xd9\xc7\x0b"
+ "\x02\xf1\x63\xfc\x9b\xfc\xc5\xab"
+ "\xb9\x14\x13\x21\xdf\xce\xaa\x88"
+ "\x44\x30\x1e\xce\x26\x01\x92\xf8"
+ "\x9f\x00\x4b\x0c\x4b\xf7\x5f\xe0"
+ "\x89\xca\x94\x66\x11\x21\x97\xca"
+ "\x3e\x83\x74\x2d\xdb\x4d\x11\xeb"
+ "\x97\xc2\x14\xff\x9e\x1e\xa0\x6b"
+ "\x08\xb4\x31\x2b\x85\xc6\x85\x6c"
+ "\x90\xec\x39\xc0\xec\xb3\xb5\x4e"
+ "\xf3\x9c\xe7\x83\x3a\x77\x0a\xf4"
+ "\x56\xfe\xce\x18\x33\x6d\x0b\x2d"
+ "\x33\xda\xc8\x05\x5c\xb4\x09\x2a"
+ "\xde\x6b\x52\x98\x01\xef\x36\x3d"
+ "\xbd\xf9\x8f\xa8\x3e\xaa\xcd\xd1"
+ "\x01\x2d\x42\x49\xc3\xb6\x84\xbb"
+ "\x48\x96\xe0\x90\x93\x6c\x48\x64"
+ "\xd4\xfa\x7f\x93\x2c\xa6\x21\xc8"
+ "\x7a\x23\x7b\xaa\x20\x56\x12\xae"
+ "\x16\x9d\x94\x0f\x54\xa1\xec\xca"
+ "\x51\x4e\xf2\x39\xf4\xf8\x5f\x04"
+ "\x5a\x0d\xbf\xf5\x83\xa1\x15\xe1"
+ "\xf5\x3c\xd8\x62\xa3\xed\x47\x89"
+ "\x85\x4c\xe5\xdb\xac\x9e\x17\x1d"
+ "\x0c\x09\xe3\x3e\x39\x5b\x4d\x74"
+ "\x0e\xf5\x34\xee\x70\x11\x4c\xfd"
+ "\xdb\x34\xb1\xb5\x10\x3f\x73\xb7"
+ "\xf5\xfa\xed\xb0\x1f\xa5\xcd\x3c"
+ "\x8d\x35\x83\xd4\x11\x44\x6e\x6c"
+ "\x5b\xe0\x0e\x69\xa5\x39\xe5\xbb"
+ "\xa9\x57\x24\x37\xe6\x1f\xdd\xcf"
+ "\x16\x2a\x13\xf9\x6a\x2d\x90\xa0"
+ "\x03\x60\x7a\xed\x69\xd5\x00\x8b"
+ "\x7e\x4f\xcb\xb9\xfa\x91\xb9\x37"
+ "\xc1\x26\xce\x90\x97\x22\x64\x64"
+ "\xc1\x72\x43\x1b\xf6\xac\xc1\x54"
+ "\x8a\x10\x9c\xdd\x8d\xd5\x8e\xb2"
+ "\xe4\x85\xda\xe0\x20\x5f\xf4\xb4"
+ "\x15\xb5\xa0\x8d\x12\x74\x49\x23"
+ "\x3a\xdf\x4a\xd3\xf0\x3b\x89\xeb"
+ "\xf8\xcc\x62\x7b\xfb\x93\x07\x41"
+ "\x61\x26\x94\x58\x70\xa6\x3c\xe4"
+ "\xff\x58\xc4\x13\x3d\xcb\x36\x6b"
+ "\x32\xe5\xb2\x6d\x03\x74\x6f\x76"
+ "\x93\x77\xde\x48\xc4\xfa\x30\x4a"
+ "\xda\x49\x80\x77\x0f\x1c\xbe\x11"
+ "\xc8\x48\xb1\xe5\xbb\xf2\x8a\xe1"
+ "\x96\x2f\x9f\xd1\x8e\x8a\x5c\xe2"
+ "\xf7\xd7\xd8\x54\xf3\x3f\xc4\x91"
+ "\xb8\xfb\x86\xdc\x46\x24\x91\x60"
+ "\x6c\x2f\xc9\x41\x37\x51\x49\x54"
+ "\x09\x81\x21\xf3\x03\x9f\x2b\xe3"
+ "\x1f\x39\x63\xaf\xf4\xd7\x53\x60"
+ "\xa7\xc7\x54\xf9\xee\xb1\xb1\x7d"
+ "\x75\x54\x65\x93\xfe\xb1\x68\x6b"
+ "\x57\x02\xf9\xbb\x0e\xf9\xf8\xbf"
+ "\x01\x12\x27\xb4\xfe\xe4\x79\x7a"
+ "\x40\x5b\x51\x4b\xdf\x38\xec\xb1"
+ "\x6a\x56\xff\x35\x4d\x42\x33\xaa"
+ "\x6f\x1b\xe4\xdc\xe0\xdb\x85\x35"
+ "\x62\x10\xd4\xec\xeb\xc5\x7e\x45"
+ "\x1c\x6f\x17\xca\x3b\x8e\x2d\x66"
+ "\x4f\x4b\x36\x56\xcd\x1b\x59\xaa"
+ "\xd2\x9b\x17\xb9\x58\xdf\x7b\x64"
+ "\x8a\xff\x3b\x9c\xa6\xb5\x48\x9e"
+ "\xaa\xe2\x5d\x09\x71\x32\x5f\xb6"
+ "\x29\xbe\xe7\xc7\x52\x7e\x91\x82"
+ "\x6b\x6d\x33\xe1\x34\x06\x36\x21"
+ "\x5e\xbe\x1e\x2f\x3e\xc1\xfb\xea"
+ "\x49\x2c\xb5\xca\xf7\xb0\x37\xea"
+ "\x1f\xed\x10\x04\xd9\x48\x0d\x1a"
+ "\x1c\xfb\xe7\x84\x0e\x83\x53\x74"
+ "\xc7\x65\xe2\x5c\xe5\xba\x73\x4c"
+ "\x0e\xe1\xb5\x11\x45\x61\x43\x46"
+ "\xaa\x25\x8f\xbd\x85\x08\xfa\x4c"
+ "\x15\xc1\xc0\xd8\xf5\xdc\x16\xbb"
+ "\x7b\x1d\xe3\x87\x57\xa7\x2a\x1d"
+ "\x38\x58\x9e\x8a\x43\xdc\x57"
+ "\xd1\x81\x7d\x2b\xe9\xff\x99\x3a"
+ "\x4b\x24\x52\x58\x55\xe1\x49\x14";
+
+static struct {
+ const u8 *ptext;
+ const u8 *ctext;
+
+ u8 key[AES_MAX_KEY_SIZE];
+ u8 iv[GCM_AES_IV_SIZE];
+ u8 assoc[20];
+
+ int klen;
+ int clen;
+ int plen;
+ int alen;
+} const aesgcm_tv[] __initconst = {
+ { /* From McGrew & Viega - http://citeseer.ist.psu.edu/656989.html */
+ .klen = 16,
+ .ctext = ctext0,
+ .clen = sizeof(ctext0),
+ }, {
+ .klen = 16,
+ .ptext = ptext1,
+ .plen = sizeof(ptext1),
+ .ctext = ctext1,
+ .clen = sizeof(ctext1),
+ }, {
+ .key = "\xfe\xff\xe9\x92\x86\x65\x73\x1c"
+ "\x6d\x6a\x8f\x94\x67\x30\x83\x08",
+ .klen = 16,
+ .iv = "\xca\xfe\xba\xbe\xfa\xce\xdb\xad"
+ "\xde\xca\xf8\x88",
+ .ptext = ptext2,
+ .plen = sizeof(ptext2),
+ .ctext = ctext2,
+ .clen = sizeof(ctext2),
+ }, {
+ .key = "\xfe\xff\xe9\x92\x86\x65\x73\x1c"
+ "\x6d\x6a\x8f\x94\x67\x30\x83\x08",
+ .klen = 16,
+ .iv = "\xca\xfe\xba\xbe\xfa\xce\xdb\xad"
+ "\xde\xca\xf8\x88",
+ .ptext = ptext3,
+ .plen = sizeof(ptext3),
+ .assoc = "\xfe\xed\xfa\xce\xde\xad\xbe\xef"
+ "\xfe\xed\xfa\xce\xde\xad\xbe\xef"
+ "\xab\xad\xda\xd2",
+ .alen = 20,
+ .ctext = ctext3,
+ .clen = sizeof(ctext3),
+ }, {
+ .klen = 24,
+ .ctext = ctext4,
+ .clen = sizeof(ctext4),
+ }, {
+ .klen = 24,
+ .ptext = ptext1,
+ .plen = sizeof(ptext1),
+ .ctext = ctext5,
+ .clen = sizeof(ctext5),
+ }, {
+ .key = "\xfe\xff\xe9\x92\x86\x65\x73\x1c"
+ "\x6d\x6a\x8f\x94\x67\x30\x83\x08"
+ "\xfe\xff\xe9\x92\x86\x65\x73\x1c",
+ .klen = 24,
+ .iv = "\xca\xfe\xba\xbe\xfa\xce\xdb\xad"
+ "\xde\xca\xf8\x88",
+ .ptext = ptext6,
+ .plen = sizeof(ptext6),
+ .ctext = ctext6,
+ .clen = sizeof(ctext6),
+ }, {
+ .klen = 32,
+ .ctext = ctext7,
+ .clen = sizeof(ctext7),
+ }, {
+ .klen = 32,
+ .ptext = ptext1,
+ .plen = sizeof(ptext1),
+ .ctext = ctext8,
+ .clen = sizeof(ctext8),
+ }, {
+ .key = "\xfe\xff\xe9\x92\x86\x65\x73\x1c"
+ "\x6d\x6a\x8f\x94\x67\x30\x83\x08"
+ "\xfe\xff\xe9\x92\x86\x65\x73\x1c"
+ "\x6d\x6a\x8f\x94\x67\x30\x83\x08",
+ .klen = 32,
+ .iv = "\xca\xfe\xba\xbe\xfa\xce\xdb\xad"
+ "\xde\xca\xf8\x88",
+ .ptext = ptext9,
+ .plen = sizeof(ptext9),
+ .ctext = ctext9,
+ .clen = sizeof(ctext9),
+ }, {
+ .key = "\xfe\xff\xe9\x92\x86\x65\x73\x1c"
+ "\x6d\x6a\x8f\x94\x67\x30\x83\x08"
+ "\xfe\xff\xe9\x92\x86\x65\x73\x1c"
+ "\x6d\x6a\x8f\x94\x67\x30\x83\x08",
+ .klen = 32,
+ .iv = "\xca\xfe\xba\xbe\xfa\xce\xdb\xad"
+ "\xde\xca\xf8\x88",
+ .ptext = ptext10,
+ .plen = sizeof(ptext10),
+ .assoc = "\xfe\xed\xfa\xce\xde\xad\xbe\xef"
+ "\xfe\xed\xfa\xce\xde\xad\xbe\xef"
+ "\xab\xad\xda\xd2",
+ .alen = 20,
+ .ctext = ctext10,
+ .clen = sizeof(ctext10),
+ }, {
+ .key = "\xfe\xff\xe9\x92\x86\x65\x73\x1c"
+ "\x6d\x6a\x8f\x94\x67\x30\x83\x08"
+ "\xfe\xff\xe9\x92\x86\x65\x73\x1c",
+ .klen = 24,
+ .iv = "\xca\xfe\xba\xbe\xfa\xce\xdb\xad"
+ "\xde\xca\xf8\x88",
+ .ptext = ptext11,
+ .plen = sizeof(ptext11),
+ .assoc = "\xfe\xed\xfa\xce\xde\xad\xbe\xef"
+ "\xfe\xed\xfa\xce\xde\xad\xbe\xef"
+ "\xab\xad\xda\xd2",
+ .alen = 20,
+ .ctext = ctext11,
+ .clen = sizeof(ctext11),
+ }, {
+ .key = "\x62\x35\xf8\x95\xfc\xa5\xeb\xf6"
+ "\x0e\x92\x12\x04\xd3\xa1\x3f\x2e"
+ "\x8b\x32\xcf\xe7\x44\xed\x13\x59"
+ "\x04\x38\x77\xb0\xb9\xad\xb4\x38",
+ .klen = 32,
+ .iv = "\x00\xff\xff\xff\xff\x00\x00\xff"
+ "\xff\xff\x00\xff",
+ .ptext = ptext12,
+ .plen = sizeof(ptext12),
+ .ctext = ctext12,
+ .clen = sizeof(ctext12),
+ }
+};
+
+static int __init libaesgcm_init(void)
+{
+ for (int i = 0; i < ARRAY_SIZE(aesgcm_tv); i++) {
+ u8 tagbuf[AES_BLOCK_SIZE];
+ int plen = aesgcm_tv[i].plen;
+ struct aesgcm_ctx ctx;
+ u8 buf[sizeof(ptext12)];
+
+ if (aesgcm_expandkey(&ctx, aesgcm_tv[i].key, aesgcm_tv[i].klen,
+ aesgcm_tv[i].clen - plen)) {
+ pr_err("aesgcm_expandkey() failed on vector %d\n", i);
+ return -ENODEV;
+ }
+
+ if (!aesgcm_decrypt(&ctx, buf, aesgcm_tv[i].ctext, plen,
+ aesgcm_tv[i].assoc, aesgcm_tv[i].alen,
+ aesgcm_tv[i].iv, aesgcm_tv[i].ctext + plen)
+ || memcmp(buf, aesgcm_tv[i].ptext, plen)) {
+ pr_err("aesgcm_decrypt() #1 failed on vector %d\n", i);
+ return -ENODEV;
+ }
+
+ /* encrypt in place */
+ aesgcm_encrypt(&ctx, buf, buf, plen, aesgcm_tv[i].assoc,
+ aesgcm_tv[i].alen, aesgcm_tv[i].iv, tagbuf);
+ if (memcmp(buf, aesgcm_tv[i].ctext, plen)) {
+ pr_err("aesgcm_encrypt() failed on vector %d\n", i);
+ return -ENODEV;
+ }
+
+ /* decrypt in place */
+ if (!aesgcm_decrypt(&ctx, buf, buf, plen, aesgcm_tv[i].assoc,
+ aesgcm_tv[i].alen, aesgcm_tv[i].iv, tagbuf)
+ || memcmp(buf, aesgcm_tv[i].ptext, plen)) {
+ pr_err("aesgcm_decrypt() #2 failed on vector %d\n", i);
+ return -ENODEV;
+ }
+ }
+ return 0;
+}
+module_init(libaesgcm_init);
+
+static void __exit libaesgcm_exit(void)
+{
+}
+module_exit(libaesgcm_exit);
+#endif
diff --git a/lib/crypto/gf128mul.c b/lib/crypto/gf128mul.c
new file mode 100644
index 000000000000..8f8c45e0cdcf
--- /dev/null
+++ b/lib/crypto/gf128mul.c
@@ -0,0 +1,436 @@
+/* gf128mul.c - GF(2^128) multiplication functions
+ *
+ * Copyright (c) 2003, Dr Brian Gladman, Worcester, UK.
+ * Copyright (c) 2006, Rik Snel <rsnel@cube.dyndns.org>
+ *
+ * Based on Dr Brian Gladman's (GPL'd) work published at
+ * http://gladman.plushost.co.uk/oldsite/cryptography_technology/index.php
+ * See the original copyright notice below.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ */
+
+/*
+ ---------------------------------------------------------------------------
+ Copyright (c) 2003, Dr Brian Gladman, Worcester, UK. All rights reserved.
+
+ LICENSE TERMS
+
+ The free distribution and use of this software in both source and binary
+ form is allowed (with or without changes) provided that:
+
+ 1. distributions of this source code include the above copyright
+ notice, this list of conditions and the following disclaimer;
+
+ 2. distributions in binary form include the above copyright
+ notice, this list of conditions and the following disclaimer
+ in the documentation and/or other associated materials;
+
+ 3. the copyright holder's name is not used to endorse products
+ built using this software without specific written permission.
+
+ ALTERNATIVELY, provided that this notice is retained in full, this product
+ may be distributed under the terms of the GNU General Public License (GPL),
+ in which case the provisions of the GPL apply INSTEAD OF those given above.
+
+ DISCLAIMER
+
+ This software is provided 'as is' with no explicit or implied warranties
+ in respect of its properties, including, but not limited to, correctness
+ and/or fitness for purpose.
+ ---------------------------------------------------------------------------
+ Issue 31/01/2006
+
+ This file provides fast multiplication in GF(2^128) as required by several
+ cryptographic authentication modes
+*/
+
+#include <crypto/gf128mul.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+
+#define gf128mul_dat(q) { \
+ q(0x00), q(0x01), q(0x02), q(0x03), q(0x04), q(0x05), q(0x06), q(0x07),\
+ q(0x08), q(0x09), q(0x0a), q(0x0b), q(0x0c), q(0x0d), q(0x0e), q(0x0f),\
+ q(0x10), q(0x11), q(0x12), q(0x13), q(0x14), q(0x15), q(0x16), q(0x17),\
+ q(0x18), q(0x19), q(0x1a), q(0x1b), q(0x1c), q(0x1d), q(0x1e), q(0x1f),\
+ q(0x20), q(0x21), q(0x22), q(0x23), q(0x24), q(0x25), q(0x26), q(0x27),\
+ q(0x28), q(0x29), q(0x2a), q(0x2b), q(0x2c), q(0x2d), q(0x2e), q(0x2f),\
+ q(0x30), q(0x31), q(0x32), q(0x33), q(0x34), q(0x35), q(0x36), q(0x37),\
+ q(0x38), q(0x39), q(0x3a), q(0x3b), q(0x3c), q(0x3d), q(0x3e), q(0x3f),\
+ q(0x40), q(0x41), q(0x42), q(0x43), q(0x44), q(0x45), q(0x46), q(0x47),\
+ q(0x48), q(0x49), q(0x4a), q(0x4b), q(0x4c), q(0x4d), q(0x4e), q(0x4f),\
+ q(0x50), q(0x51), q(0x52), q(0x53), q(0x54), q(0x55), q(0x56), q(0x57),\
+ q(0x58), q(0x59), q(0x5a), q(0x5b), q(0x5c), q(0x5d), q(0x5e), q(0x5f),\
+ q(0x60), q(0x61), q(0x62), q(0x63), q(0x64), q(0x65), q(0x66), q(0x67),\
+ q(0x68), q(0x69), q(0x6a), q(0x6b), q(0x6c), q(0x6d), q(0x6e), q(0x6f),\
+ q(0x70), q(0x71), q(0x72), q(0x73), q(0x74), q(0x75), q(0x76), q(0x77),\
+ q(0x78), q(0x79), q(0x7a), q(0x7b), q(0x7c), q(0x7d), q(0x7e), q(0x7f),\
+ q(0x80), q(0x81), q(0x82), q(0x83), q(0x84), q(0x85), q(0x86), q(0x87),\
+ q(0x88), q(0x89), q(0x8a), q(0x8b), q(0x8c), q(0x8d), q(0x8e), q(0x8f),\
+ q(0x90), q(0x91), q(0x92), q(0x93), q(0x94), q(0x95), q(0x96), q(0x97),\
+ q(0x98), q(0x99), q(0x9a), q(0x9b), q(0x9c), q(0x9d), q(0x9e), q(0x9f),\
+ q(0xa0), q(0xa1), q(0xa2), q(0xa3), q(0xa4), q(0xa5), q(0xa6), q(0xa7),\
+ q(0xa8), q(0xa9), q(0xaa), q(0xab), q(0xac), q(0xad), q(0xae), q(0xaf),\
+ q(0xb0), q(0xb1), q(0xb2), q(0xb3), q(0xb4), q(0xb5), q(0xb6), q(0xb7),\
+ q(0xb8), q(0xb9), q(0xba), q(0xbb), q(0xbc), q(0xbd), q(0xbe), q(0xbf),\
+ q(0xc0), q(0xc1), q(0xc2), q(0xc3), q(0xc4), q(0xc5), q(0xc6), q(0xc7),\
+ q(0xc8), q(0xc9), q(0xca), q(0xcb), q(0xcc), q(0xcd), q(0xce), q(0xcf),\
+ q(0xd0), q(0xd1), q(0xd2), q(0xd3), q(0xd4), q(0xd5), q(0xd6), q(0xd7),\
+ q(0xd8), q(0xd9), q(0xda), q(0xdb), q(0xdc), q(0xdd), q(0xde), q(0xdf),\
+ q(0xe0), q(0xe1), q(0xe2), q(0xe3), q(0xe4), q(0xe5), q(0xe6), q(0xe7),\
+ q(0xe8), q(0xe9), q(0xea), q(0xeb), q(0xec), q(0xed), q(0xee), q(0xef),\
+ q(0xf0), q(0xf1), q(0xf2), q(0xf3), q(0xf4), q(0xf5), q(0xf6), q(0xf7),\
+ q(0xf8), q(0xf9), q(0xfa), q(0xfb), q(0xfc), q(0xfd), q(0xfe), q(0xff) \
+}
+
+/*
+ * Given a value i in 0..255 as the byte overflow when a field element
+ * in GF(2^128) is multiplied by x^8, the following macro returns the
+ * 16-bit value that must be XOR-ed into the low-degree end of the
+ * product to reduce it modulo the polynomial x^128 + x^7 + x^2 + x + 1.
+ *
+ * There are two versions of the macro, and hence two tables: one for
+ * the "be" convention where the highest-order bit is the coefficient of
+ * the highest-degree polynomial term, and one for the "le" convention
+ * where the highest-order bit is the coefficient of the lowest-degree
+ * polynomial term. In both cases the values are stored in CPU byte
+ * endianness such that the coefficients are ordered consistently across
+ * bytes, i.e. in the "be" table bits 15..0 of the stored value
+ * correspond to the coefficients of x^15..x^0, and in the "le" table
+ * bits 15..0 correspond to the coefficients of x^0..x^15.
+ *
+ * Therefore, provided that the appropriate byte endianness conversions
+ * are done by the multiplication functions (and these must be in place
+ * anyway to support both little endian and big endian CPUs), the "be"
+ * table can be used for multiplications of both "bbe" and "ble"
+ * elements, and the "le" table can be used for multiplications of both
+ * "lle" and "lbe" elements.
+ */
+
+#define xda_be(i) ( \
+ (i & 0x80 ? 0x4380 : 0) ^ (i & 0x40 ? 0x21c0 : 0) ^ \
+ (i & 0x20 ? 0x10e0 : 0) ^ (i & 0x10 ? 0x0870 : 0) ^ \
+ (i & 0x08 ? 0x0438 : 0) ^ (i & 0x04 ? 0x021c : 0) ^ \
+ (i & 0x02 ? 0x010e : 0) ^ (i & 0x01 ? 0x0087 : 0) \
+)
+
+#define xda_le(i) ( \
+ (i & 0x80 ? 0xe100 : 0) ^ (i & 0x40 ? 0x7080 : 0) ^ \
+ (i & 0x20 ? 0x3840 : 0) ^ (i & 0x10 ? 0x1c20 : 0) ^ \
+ (i & 0x08 ? 0x0e10 : 0) ^ (i & 0x04 ? 0x0708 : 0) ^ \
+ (i & 0x02 ? 0x0384 : 0) ^ (i & 0x01 ? 0x01c2 : 0) \
+)
+
+static const u16 gf128mul_table_le[256] = gf128mul_dat(xda_le);
+static const u16 gf128mul_table_be[256] = gf128mul_dat(xda_be);
+
+/*
+ * The following functions multiply a field element by x^8 in
+ * the polynomial field representation. They use 64-bit word operations
+ * to gain speed but compensate for machine endianness and hence work
+ * correctly on both styles of machine.
+ */
+
+static void gf128mul_x8_lle(be128 *x)
+{
+ u64 a = be64_to_cpu(x->a);
+ u64 b = be64_to_cpu(x->b);
+ u64 _tt = gf128mul_table_le[b & 0xff];
+
+ x->b = cpu_to_be64((b >> 8) | (a << 56));
+ x->a = cpu_to_be64((a >> 8) ^ (_tt << 48));
+}
+
+/* time invariant version of gf128mul_x8_lle */
+static void gf128mul_x8_lle_ti(be128 *x)
+{
+ u64 a = be64_to_cpu(x->a);
+ u64 b = be64_to_cpu(x->b);
+ u64 _tt = xda_le(b & 0xff); /* avoid table lookup */
+
+ x->b = cpu_to_be64((b >> 8) | (a << 56));
+ x->a = cpu_to_be64((a >> 8) ^ (_tt << 48));
+}
+
+static void gf128mul_x8_bbe(be128 *x)
+{
+ u64 a = be64_to_cpu(x->a);
+ u64 b = be64_to_cpu(x->b);
+ u64 _tt = gf128mul_table_be[a >> 56];
+
+ x->a = cpu_to_be64((a << 8) | (b >> 56));
+ x->b = cpu_to_be64((b << 8) ^ _tt);
+}
+
+void gf128mul_x8_ble(le128 *r, const le128 *x)
+{
+ u64 a = le64_to_cpu(x->a);
+ u64 b = le64_to_cpu(x->b);
+ u64 _tt = gf128mul_table_be[a >> 56];
+
+ r->a = cpu_to_le64((a << 8) | (b >> 56));
+ r->b = cpu_to_le64((b << 8) ^ _tt);
+}
+EXPORT_SYMBOL(gf128mul_x8_ble);
+
+void gf128mul_lle(be128 *r, const be128 *b)
+{
+ /*
+ * The p array should be aligned to twice the size of its element type,
+ * so that every even/odd pair is guaranteed to share a cacheline
+ * (assuming a cacheline size of 32 bytes or more, which is by far the
+ * most common). This ensures that each be128_xor() call in the loop
+ * takes the same amount of time regardless of the value of 'ch', which
+ * is derived from function parameter 'b', which is commonly used as a
+ * key, e.g., for GHASH. The odd array elements are all set to zero,
+ * making each be128_xor() a NOP if its associated bit in 'ch' is not
+ * set, and this is equivalent to calling be128_xor() conditionally.
+ * This approach aims to avoid leaking information about such keys
+ * through execution time variances.
+ *
+ * Unfortunately, __aligned(16) or higher does not work on x86 for
+ * variables on the stack so we need to perform the alignment by hand.
+ */
+ be128 array[16 + 3] = {};
+ be128 *p = PTR_ALIGN(&array[0], 2 * sizeof(be128));
+ int i;
+
+ p[0] = *r;
+ for (i = 0; i < 7; ++i)
+ gf128mul_x_lle(&p[2 * i + 2], &p[2 * i]);
+
+ memset(r, 0, sizeof(*r));
+ for (i = 0;;) {
+ u8 ch = ((u8 *)b)[15 - i];
+
+ be128_xor(r, r, &p[ 0 + !(ch & 0x80)]);
+ be128_xor(r, r, &p[ 2 + !(ch & 0x40)]);
+ be128_xor(r, r, &p[ 4 + !(ch & 0x20)]);
+ be128_xor(r, r, &p[ 6 + !(ch & 0x10)]);
+ be128_xor(r, r, &p[ 8 + !(ch & 0x08)]);
+ be128_xor(r, r, &p[10 + !(ch & 0x04)]);
+ be128_xor(r, r, &p[12 + !(ch & 0x02)]);
+ be128_xor(r, r, &p[14 + !(ch & 0x01)]);
+
+ if (++i >= 16)
+ break;
+
+ gf128mul_x8_lle_ti(r); /* use the time invariant version */
+ }
+}
+EXPORT_SYMBOL(gf128mul_lle);
+
+void gf128mul_bbe(be128 *r, const be128 *b)
+{
+ be128 p[8];
+ int i;
+
+ p[0] = *r;
+ for (i = 0; i < 7; ++i)
+ gf128mul_x_bbe(&p[i + 1], &p[i]);
+
+ memset(r, 0, sizeof(*r));
+ for (i = 0;;) {
+ u8 ch = ((u8 *)b)[i];
+
+ if (ch & 0x80)
+ be128_xor(r, r, &p[7]);
+ if (ch & 0x40)
+ be128_xor(r, r, &p[6]);
+ if (ch & 0x20)
+ be128_xor(r, r, &p[5]);
+ if (ch & 0x10)
+ be128_xor(r, r, &p[4]);
+ if (ch & 0x08)
+ be128_xor(r, r, &p[3]);
+ if (ch & 0x04)
+ be128_xor(r, r, &p[2]);
+ if (ch & 0x02)
+ be128_xor(r, r, &p[1]);
+ if (ch & 0x01)
+ be128_xor(r, r, &p[0]);
+
+ if (++i >= 16)
+ break;
+
+ gf128mul_x8_bbe(r);
+ }
+}
+EXPORT_SYMBOL(gf128mul_bbe);
+
+/* This version uses 64k bytes of table space.
+ A 16 byte buffer has to be multiplied by a 16 byte key
+ value in GF(2^128). If we consider a GF(2^128) value in
+ the buffer's lowest byte, we can construct a table of
+ the 256 16 byte values that result from the 256 values
+ of this byte. This requires 4096 bytes. But we also
+ need tables for each of the 16 higher bytes in the
+ buffer as well, which makes 64 kbytes in total.
+*/
+/* additional explanation
+ * t[0][BYTE] contains g*BYTE
+ * t[1][BYTE] contains g*x^8*BYTE
+ * ..
+ * t[15][BYTE] contains g*x^120*BYTE */
+struct gf128mul_64k *gf128mul_init_64k_bbe(const be128 *g)
+{
+ struct gf128mul_64k *t;
+ int i, j, k;
+
+ t = kzalloc(sizeof(*t), GFP_KERNEL);
+ if (!t)
+ goto out;
+
+ for (i = 0; i < 16; i++) {
+ t->t[i] = kzalloc(sizeof(*t->t[i]), GFP_KERNEL);
+ if (!t->t[i]) {
+ gf128mul_free_64k(t);
+ t = NULL;
+ goto out;
+ }
+ }
+
+ t->t[0]->t[1] = *g;
+ for (j = 1; j <= 64; j <<= 1)
+ gf128mul_x_bbe(&t->t[0]->t[j + j], &t->t[0]->t[j]);
+
+ for (i = 0;;) {
+ for (j = 2; j < 256; j += j)
+ for (k = 1; k < j; ++k)
+ be128_xor(&t->t[i]->t[j + k],
+ &t->t[i]->t[j], &t->t[i]->t[k]);
+
+ if (++i >= 16)
+ break;
+
+ for (j = 128; j > 0; j >>= 1) {
+ t->t[i]->t[j] = t->t[i - 1]->t[j];
+ gf128mul_x8_bbe(&t->t[i]->t[j]);
+ }
+ }
+
+out:
+ return t;
+}
+EXPORT_SYMBOL(gf128mul_init_64k_bbe);
+
+void gf128mul_free_64k(struct gf128mul_64k *t)
+{
+ int i;
+
+ for (i = 0; i < 16; i++)
+ kfree_sensitive(t->t[i]);
+ kfree_sensitive(t);
+}
+EXPORT_SYMBOL(gf128mul_free_64k);
+
+void gf128mul_64k_bbe(be128 *a, const struct gf128mul_64k *t)
+{
+ u8 *ap = (u8 *)a;
+ be128 r[1];
+ int i;
+
+ *r = t->t[0]->t[ap[15]];
+ for (i = 1; i < 16; ++i)
+ be128_xor(r, r, &t->t[i]->t[ap[15 - i]]);
+ *a = *r;
+}
+EXPORT_SYMBOL(gf128mul_64k_bbe);
+
+/* This version uses 4k bytes of table space.
+ A 16 byte buffer has to be multiplied by a 16 byte key
+ value in GF(2^128). If we consider a GF(2^128) value in a
+ single byte, we can construct a table of the 256 16 byte
+ values that result from the 256 values of this byte.
+ This requires 4096 bytes. If we take the highest byte in
+ the buffer and use this table to get the result, we then
+ have to multiply by x^120 to get the final value. For the
+ next highest byte the result has to be multiplied by x^112
+ and so on. But we can do this by accumulating the result
+ in an accumulator starting with the result for the top
+ byte. We repeatedly multiply the accumulator value by
+ x^8 and then add in (i.e. xor) the 16 bytes of the next
+ lower byte in the buffer, stopping when we reach the
+ lowest byte. This requires a 4096 byte table.
+*/
+struct gf128mul_4k *gf128mul_init_4k_lle(const be128 *g)
+{
+ struct gf128mul_4k *t;
+ int j, k;
+
+ t = kzalloc(sizeof(*t), GFP_KERNEL);
+ if (!t)
+ goto out;
+
+ t->t[128] = *g;
+ for (j = 64; j > 0; j >>= 1)
+ gf128mul_x_lle(&t->t[j], &t->t[j+j]);
+
+ for (j = 2; j < 256; j += j)
+ for (k = 1; k < j; ++k)
+ be128_xor(&t->t[j + k], &t->t[j], &t->t[k]);
+
+out:
+ return t;
+}
+EXPORT_SYMBOL(gf128mul_init_4k_lle);
+
+struct gf128mul_4k *gf128mul_init_4k_bbe(const be128 *g)
+{
+ struct gf128mul_4k *t;
+ int j, k;
+
+ t = kzalloc(sizeof(*t), GFP_KERNEL);
+ if (!t)
+ goto out;
+
+ t->t[1] = *g;
+ for (j = 1; j <= 64; j <<= 1)
+ gf128mul_x_bbe(&t->t[j + j], &t->t[j]);
+
+ for (j = 2; j < 256; j += j)
+ for (k = 1; k < j; ++k)
+ be128_xor(&t->t[j + k], &t->t[j], &t->t[k]);
+
+out:
+ return t;
+}
+EXPORT_SYMBOL(gf128mul_init_4k_bbe);
+
+void gf128mul_4k_lle(be128 *a, const struct gf128mul_4k *t)
+{
+ u8 *ap = (u8 *)a;
+ be128 r[1];
+ int i = 15;
+
+ *r = t->t[ap[15]];
+ while (i--) {
+ gf128mul_x8_lle(r);
+ be128_xor(r, r, &t->t[ap[i]]);
+ }
+ *a = *r;
+}
+EXPORT_SYMBOL(gf128mul_4k_lle);
+
+void gf128mul_4k_bbe(be128 *a, const struct gf128mul_4k *t)
+{
+ u8 *ap = (u8 *)a;
+ be128 r[1];
+ int i = 0;
+
+ *r = t->t[ap[0]];
+ while (++i < 16) {
+ gf128mul_x8_bbe(r);
+ be128_xor(r, r, &t->t[ap[i]]);
+ }
+ *a = *r;
+}
+EXPORT_SYMBOL(gf128mul_4k_bbe);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Functions for multiplying elements of GF(2^128)");
diff --git a/lib/debugobjects.c b/lib/debugobjects.c
index 337d797a7141..df86e649d8be 100644
--- a/lib/debugobjects.c
+++ b/lib/debugobjects.c
@@ -437,6 +437,7 @@ static int object_cpu_offline(unsigned int cpu)
struct debug_percpu_free *percpu_pool;
struct hlist_node *tmp;
struct debug_obj *obj;
+ unsigned long flags;
/* Remote access is safe as the CPU is dead already */
percpu_pool = per_cpu_ptr(&percpu_obj_pool, cpu);
@@ -444,6 +445,12 @@ static int object_cpu_offline(unsigned int cpu)
hlist_del(&obj->node);
kmem_cache_free(obj_cache, obj);
}
+
+ raw_spin_lock_irqsave(&pool_lock, flags);
+ obj_pool_used -= percpu_pool->obj_free;
+ debug_objects_freed += percpu_pool->obj_free;
+ raw_spin_unlock_irqrestore(&pool_lock, flags);
+
percpu_pool->obj_free = 0;
return 0;
@@ -500,9 +507,9 @@ static void debug_print_object(struct debug_obj *obj, char *msg)
descr->debug_hint(obj->object) : NULL;
limit++;
WARN(1, KERN_ERR "ODEBUG: %s %s (active state %u) "
- "object type: %s hint: %pS\n",
+ "object: %p object type: %s hint: %pS\n",
msg, obj_states[obj->state], obj->astate,
- descr->name, hint);
+ obj->object, descr->name, hint);
}
debug_objects_warnings++;
}
@@ -1318,6 +1325,8 @@ static int __init debug_objects_replace_static_objects(void)
hlist_add_head(&obj->node, &objects);
}
+ debug_objects_allocated += i;
+
/*
* debug_objects_mem_init() is now called early that only one CPU is up
* and interrupts have been disabled, so it is safe to replace the
@@ -1386,6 +1395,7 @@ void __init debug_objects_mem_init(void)
debug_objects_enabled = 0;
kmem_cache_destroy(obj_cache);
pr_warn("out of memory.\n");
+ return;
} else
debug_objects_selftest();
diff --git a/lib/fault-inject.c b/lib/fault-inject.c
index 96e092de5b72..1421818c9ef7 100644
--- a/lib/fault-inject.c
+++ b/lib/fault-inject.c
@@ -41,9 +41,6 @@ EXPORT_SYMBOL_GPL(setup_fault_attr);
static void fail_dump(struct fault_attr *attr)
{
- if (attr->no_warn)
- return;
-
if (attr->verbose > 0 && __ratelimit(&attr->ratelimit_state)) {
printk(KERN_NOTICE "FAULT_INJECTION: forcing a failure.\n"
"name %pd, interval %lu, probability %lu, "
@@ -103,7 +100,7 @@ static inline bool fail_stacktrace(struct fault_attr *attr)
* http://www.nongnu.org/failmalloc/
*/
-bool should_fail(struct fault_attr *attr, ssize_t size)
+bool should_fail_ex(struct fault_attr *attr, ssize_t size, int flags)
{
if (in_task()) {
unsigned int fail_nth = READ_ONCE(current->fail_nth);
@@ -139,20 +136,26 @@ bool should_fail(struct fault_attr *attr, ssize_t size)
return false;
}
- if (attr->probability <= prandom_u32_max(100))
+ if (attr->probability <= get_random_u32_below(100))
return false;
if (!fail_stacktrace(attr))
return false;
fail:
- fail_dump(attr);
+ if (!(flags & FAULT_NOWARN))
+ fail_dump(attr);
if (atomic_read(&attr->times) != -1)
atomic_dec_not_zero(&attr->times);
return true;
}
+
+bool should_fail(struct fault_attr *attr, ssize_t size)
+{
+ return should_fail_ex(attr, size, 0);
+}
EXPORT_SYMBOL_GPL(should_fail);
#ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
diff --git a/lib/find_bit_benchmark.c b/lib/find_bit_benchmark.c
index 7c3c011abd29..d3fb09e6eff1 100644
--- a/lib/find_bit_benchmark.c
+++ b/lib/find_bit_benchmark.c
@@ -174,8 +174,8 @@ static int __init find_bit_test(void)
bitmap_zero(bitmap2, BITMAP_LEN);
while (nbits--) {
- __set_bit(prandom_u32_max(BITMAP_LEN), bitmap);
- __set_bit(prandom_u32_max(BITMAP_LEN), bitmap2);
+ __set_bit(get_random_u32_below(BITMAP_LEN), bitmap);
+ __set_bit(get_random_u32_below(BITMAP_LEN), bitmap2);
}
test_find_next_bit(bitmap, BITMAP_LEN);
diff --git a/lib/fonts/fonts.c b/lib/fonts/fonts.c
index 5f4b07b56cd9..973866438608 100644
--- a/lib/fonts/fonts.c
+++ b/lib/fonts/fonts.c
@@ -135,8 +135,8 @@ const struct font_desc *get_default_font(int xres, int yres, u32 font_w,
if (res > 20)
c += 20 - res;
- if ((font_w & (1 << (f->width - 1))) &&
- (font_h & (1 << (f->height - 1))))
+ if ((font_w & (1U << (f->width - 1))) &&
+ (font_h & (1U << (f->height - 1))))
c += 1000;
if (c > cc) {
diff --git a/lib/fortify_kunit.c b/lib/fortify_kunit.c
index 409af07f340a..c8c33cbaae9e 100644
--- a/lib/fortify_kunit.c
+++ b/lib/fortify_kunit.c
@@ -16,7 +16,10 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <kunit/test.h>
+#include <linux/device.h>
+#include <linux/slab.h>
#include <linux/string.h>
+#include <linux/vmalloc.h>
static const char array_of_10[] = "this is 10";
static const char *ptr_of_11 = "this is 11!";
@@ -60,9 +63,261 @@ static void control_flow_split_test(struct kunit *test)
KUNIT_EXPECT_EQ(test, want_minus_one(pick), SIZE_MAX);
}
+#define KUNIT_EXPECT_BOS(test, p, expected, name) \
+ KUNIT_EXPECT_EQ_MSG(test, __builtin_object_size(p, 1), \
+ expected, \
+ "__alloc_size() not working with __bos on " name "\n")
+
+#if !__has_builtin(__builtin_dynamic_object_size)
+#define KUNIT_EXPECT_BDOS(test, p, expected, name) \
+ /* Silence "unused variable 'expected'" warning. */ \
+ KUNIT_EXPECT_EQ(test, expected, expected)
+#else
+#define KUNIT_EXPECT_BDOS(test, p, expected, name) \
+ KUNIT_EXPECT_EQ_MSG(test, __builtin_dynamic_object_size(p, 1), \
+ expected, \
+ "__alloc_size() not working with __bdos on " name "\n")
+#endif
+
+/* If the execpted size is a constant value, __bos can see it. */
+#define check_const(_expected, alloc, free) do { \
+ size_t expected = (_expected); \
+ void *p = alloc; \
+ KUNIT_EXPECT_TRUE_MSG(test, p != NULL, #alloc " failed?!\n"); \
+ KUNIT_EXPECT_BOS(test, p, expected, #alloc); \
+ KUNIT_EXPECT_BDOS(test, p, expected, #alloc); \
+ free; \
+} while (0)
+
+/* If the execpted size is NOT a constant value, __bos CANNOT see it. */
+#define check_dynamic(_expected, alloc, free) do { \
+ size_t expected = (_expected); \
+ void *p = alloc; \
+ KUNIT_EXPECT_TRUE_MSG(test, p != NULL, #alloc " failed?!\n"); \
+ KUNIT_EXPECT_BOS(test, p, SIZE_MAX, #alloc); \
+ KUNIT_EXPECT_BDOS(test, p, expected, #alloc); \
+ free; \
+} while (0)
+
+/* Assortment of constant-value kinda-edge cases. */
+#define CONST_TEST_BODY(TEST_alloc) do { \
+ /* Special-case vmalloc()-family to skip 0-sized allocs. */ \
+ if (strcmp(#TEST_alloc, "TEST_vmalloc") != 0) \
+ TEST_alloc(check_const, 0, 0); \
+ TEST_alloc(check_const, 1, 1); \
+ TEST_alloc(check_const, 128, 128); \
+ TEST_alloc(check_const, 1023, 1023); \
+ TEST_alloc(check_const, 1025, 1025); \
+ TEST_alloc(check_const, 4096, 4096); \
+ TEST_alloc(check_const, 4097, 4097); \
+} while (0)
+
+static volatile size_t zero_size;
+static volatile size_t unknown_size = 50;
+
+#if !__has_builtin(__builtin_dynamic_object_size)
+#define DYNAMIC_TEST_BODY(TEST_alloc) \
+ kunit_skip(test, "Compiler is missing __builtin_dynamic_object_size() support\n")
+#else
+#define DYNAMIC_TEST_BODY(TEST_alloc) do { \
+ size_t size = unknown_size; \
+ \
+ /* \
+ * Expected size is "size" in each test, before it is then \
+ * internally incremented in each test. Requires we disable \
+ * -Wunsequenced. \
+ */ \
+ TEST_alloc(check_dynamic, size, size++); \
+ /* Make sure incrementing actually happened. */ \
+ KUNIT_EXPECT_NE(test, size, unknown_size); \
+} while (0)
+#endif
+
+#define DEFINE_ALLOC_SIZE_TEST_PAIR(allocator) \
+static void alloc_size_##allocator##_const_test(struct kunit *test) \
+{ \
+ CONST_TEST_BODY(TEST_##allocator); \
+} \
+static void alloc_size_##allocator##_dynamic_test(struct kunit *test) \
+{ \
+ DYNAMIC_TEST_BODY(TEST_##allocator); \
+}
+
+#define TEST_kmalloc(checker, expected_size, alloc_size) do { \
+ gfp_t gfp = GFP_KERNEL | __GFP_NOWARN; \
+ void *orig; \
+ size_t len; \
+ \
+ checker(expected_size, kmalloc(alloc_size, gfp), \
+ kfree(p)); \
+ checker(expected_size, \
+ kmalloc_node(alloc_size, gfp, NUMA_NO_NODE), \
+ kfree(p)); \
+ checker(expected_size, kzalloc(alloc_size, gfp), \
+ kfree(p)); \
+ checker(expected_size, \
+ kzalloc_node(alloc_size, gfp, NUMA_NO_NODE), \
+ kfree(p)); \
+ checker(expected_size, kcalloc(1, alloc_size, gfp), \
+ kfree(p)); \
+ checker(expected_size, kcalloc(alloc_size, 1, gfp), \
+ kfree(p)); \
+ checker(expected_size, \
+ kcalloc_node(1, alloc_size, gfp, NUMA_NO_NODE), \
+ kfree(p)); \
+ checker(expected_size, \
+ kcalloc_node(alloc_size, 1, gfp, NUMA_NO_NODE), \
+ kfree(p)); \
+ checker(expected_size, kmalloc_array(1, alloc_size, gfp), \
+ kfree(p)); \
+ checker(expected_size, kmalloc_array(alloc_size, 1, gfp), \
+ kfree(p)); \
+ checker(expected_size, \
+ kmalloc_array_node(1, alloc_size, gfp, NUMA_NO_NODE), \
+ kfree(p)); \
+ checker(expected_size, \
+ kmalloc_array_node(alloc_size, 1, gfp, NUMA_NO_NODE), \
+ kfree(p)); \
+ checker(expected_size, __kmalloc(alloc_size, gfp), \
+ kfree(p)); \
+ checker(expected_size, \
+ __kmalloc_node(alloc_size, gfp, NUMA_NO_NODE), \
+ kfree(p)); \
+ \
+ orig = kmalloc(alloc_size, gfp); \
+ KUNIT_EXPECT_TRUE(test, orig != NULL); \
+ checker((expected_size) * 2, \
+ krealloc(orig, (alloc_size) * 2, gfp), \
+ kfree(p)); \
+ orig = kmalloc(alloc_size, gfp); \
+ KUNIT_EXPECT_TRUE(test, orig != NULL); \
+ checker((expected_size) * 2, \
+ krealloc_array(orig, 1, (alloc_size) * 2, gfp), \
+ kfree(p)); \
+ orig = kmalloc(alloc_size, gfp); \
+ KUNIT_EXPECT_TRUE(test, orig != NULL); \
+ checker((expected_size) * 2, \
+ krealloc_array(orig, (alloc_size) * 2, 1, gfp), \
+ kfree(p)); \
+ \
+ len = 11; \
+ /* Using memdup() with fixed size, so force unknown length. */ \
+ if (!__builtin_constant_p(expected_size)) \
+ len += zero_size; \
+ checker(len, kmemdup("hello there", len, gfp), kfree(p)); \
+} while (0)
+DEFINE_ALLOC_SIZE_TEST_PAIR(kmalloc)
+
+/* Sizes are in pages, not bytes. */
+#define TEST_vmalloc(checker, expected_pages, alloc_pages) do { \
+ gfp_t gfp = GFP_KERNEL | __GFP_NOWARN; \
+ checker((expected_pages) * PAGE_SIZE, \
+ vmalloc((alloc_pages) * PAGE_SIZE), vfree(p)); \
+ checker((expected_pages) * PAGE_SIZE, \
+ vzalloc((alloc_pages) * PAGE_SIZE), vfree(p)); \
+ checker((expected_pages) * PAGE_SIZE, \
+ __vmalloc((alloc_pages) * PAGE_SIZE, gfp), vfree(p)); \
+} while (0)
+DEFINE_ALLOC_SIZE_TEST_PAIR(vmalloc)
+
+/* Sizes are in pages (and open-coded for side-effects), not bytes. */
+#define TEST_kvmalloc(checker, expected_pages, alloc_pages) do { \
+ gfp_t gfp = GFP_KERNEL | __GFP_NOWARN; \
+ size_t prev_size; \
+ void *orig; \
+ \
+ checker((expected_pages) * PAGE_SIZE, \
+ kvmalloc((alloc_pages) * PAGE_SIZE, gfp), \
+ vfree(p)); \
+ checker((expected_pages) * PAGE_SIZE, \
+ kvmalloc_node((alloc_pages) * PAGE_SIZE, gfp, NUMA_NO_NODE), \
+ vfree(p)); \
+ checker((expected_pages) * PAGE_SIZE, \
+ kvzalloc((alloc_pages) * PAGE_SIZE, gfp), \
+ vfree(p)); \
+ checker((expected_pages) * PAGE_SIZE, \
+ kvzalloc_node((alloc_pages) * PAGE_SIZE, gfp, NUMA_NO_NODE), \
+ vfree(p)); \
+ checker((expected_pages) * PAGE_SIZE, \
+ kvcalloc(1, (alloc_pages) * PAGE_SIZE, gfp), \
+ vfree(p)); \
+ checker((expected_pages) * PAGE_SIZE, \
+ kvcalloc((alloc_pages) * PAGE_SIZE, 1, gfp), \
+ vfree(p)); \
+ checker((expected_pages) * PAGE_SIZE, \
+ kvmalloc_array(1, (alloc_pages) * PAGE_SIZE, gfp), \
+ vfree(p)); \
+ checker((expected_pages) * PAGE_SIZE, \
+ kvmalloc_array((alloc_pages) * PAGE_SIZE, 1, gfp), \
+ vfree(p)); \
+ \
+ prev_size = (expected_pages) * PAGE_SIZE; \
+ orig = kvmalloc(prev_size, gfp); \
+ KUNIT_EXPECT_TRUE(test, orig != NULL); \
+ checker(((expected_pages) * PAGE_SIZE) * 2, \
+ kvrealloc(orig, prev_size, \
+ ((alloc_pages) * PAGE_SIZE) * 2, gfp), \
+ kvfree(p)); \
+} while (0)
+DEFINE_ALLOC_SIZE_TEST_PAIR(kvmalloc)
+
+#define TEST_devm_kmalloc(checker, expected_size, alloc_size) do { \
+ gfp_t gfp = GFP_KERNEL | __GFP_NOWARN; \
+ const char dev_name[] = "fortify-test"; \
+ struct device *dev; \
+ void *orig; \
+ size_t len; \
+ \
+ /* Create dummy device for devm_kmalloc()-family tests. */ \
+ dev = root_device_register(dev_name); \
+ KUNIT_ASSERT_FALSE_MSG(test, IS_ERR(dev), \
+ "Cannot register test device\n"); \
+ \
+ checker(expected_size, devm_kmalloc(dev, alloc_size, gfp), \
+ devm_kfree(dev, p)); \
+ checker(expected_size, devm_kzalloc(dev, alloc_size, gfp), \
+ devm_kfree(dev, p)); \
+ checker(expected_size, \
+ devm_kmalloc_array(dev, 1, alloc_size, gfp), \
+ devm_kfree(dev, p)); \
+ checker(expected_size, \
+ devm_kmalloc_array(dev, alloc_size, 1, gfp), \
+ devm_kfree(dev, p)); \
+ checker(expected_size, \
+ devm_kcalloc(dev, 1, alloc_size, gfp), \
+ devm_kfree(dev, p)); \
+ checker(expected_size, \
+ devm_kcalloc(dev, alloc_size, 1, gfp), \
+ devm_kfree(dev, p)); \
+ \
+ orig = devm_kmalloc(dev, alloc_size, gfp); \
+ KUNIT_EXPECT_TRUE(test, orig != NULL); \
+ checker((expected_size) * 2, \
+ devm_krealloc(dev, orig, (alloc_size) * 2, gfp), \
+ devm_kfree(dev, p)); \
+ \
+ len = 4; \
+ /* Using memdup() with fixed size, so force unknown length. */ \
+ if (!__builtin_constant_p(expected_size)) \
+ len += zero_size; \
+ checker(len, devm_kmemdup(dev, "Ohai", len, gfp), \
+ devm_kfree(dev, p)); \
+ \
+ device_unregister(dev); \
+} while (0)
+DEFINE_ALLOC_SIZE_TEST_PAIR(devm_kmalloc)
+
static struct kunit_case fortify_test_cases[] = {
KUNIT_CASE(known_sizes_test),
KUNIT_CASE(control_flow_split_test),
+ KUNIT_CASE(alloc_size_kmalloc_const_test),
+ KUNIT_CASE(alloc_size_kmalloc_dynamic_test),
+ KUNIT_CASE(alloc_size_vmalloc_const_test),
+ KUNIT_CASE(alloc_size_vmalloc_dynamic_test),
+ KUNIT_CASE(alloc_size_kvmalloc_const_test),
+ KUNIT_CASE(alloc_size_kvmalloc_dynamic_test),
+ KUNIT_CASE(alloc_size_devm_kmalloc_const_test),
+ KUNIT_CASE(alloc_size_devm_kmalloc_dynamic_test),
{}
};
diff --git a/lib/interval_tree.c b/lib/interval_tree.c
index 593ce56ece50..3412737ff365 100644
--- a/lib/interval_tree.c
+++ b/lib/interval_tree.c
@@ -15,3 +15,135 @@ EXPORT_SYMBOL_GPL(interval_tree_insert);
EXPORT_SYMBOL_GPL(interval_tree_remove);
EXPORT_SYMBOL_GPL(interval_tree_iter_first);
EXPORT_SYMBOL_GPL(interval_tree_iter_next);
+
+#ifdef CONFIG_INTERVAL_TREE_SPAN_ITER
+/*
+ * Roll nodes[1] into nodes[0] by advancing nodes[1] to the end of a contiguous
+ * span of nodes. This makes nodes[0]->last the end of that contiguous used span
+ * indexes that started at the original nodes[1]->start. nodes[1] is now the
+ * first node starting the next used span. A hole span is between nodes[0]->last
+ * and nodes[1]->start. nodes[1] must be !NULL.
+ */
+static void
+interval_tree_span_iter_next_gap(struct interval_tree_span_iter *state)
+{
+ struct interval_tree_node *cur = state->nodes[1];
+
+ state->nodes[0] = cur;
+ do {
+ if (cur->last > state->nodes[0]->last)
+ state->nodes[0] = cur;
+ cur = interval_tree_iter_next(cur, state->first_index,
+ state->last_index);
+ } while (cur && (state->nodes[0]->last >= cur->start ||
+ state->nodes[0]->last + 1 == cur->start));
+ state->nodes[1] = cur;
+}
+
+void interval_tree_span_iter_first(struct interval_tree_span_iter *iter,
+ struct rb_root_cached *itree,
+ unsigned long first_index,
+ unsigned long last_index)
+{
+ iter->first_index = first_index;
+ iter->last_index = last_index;
+ iter->nodes[0] = NULL;
+ iter->nodes[1] =
+ interval_tree_iter_first(itree, first_index, last_index);
+ if (!iter->nodes[1]) {
+ /* No nodes intersect the span, whole span is hole */
+ iter->start_hole = first_index;
+ iter->last_hole = last_index;
+ iter->is_hole = 1;
+ return;
+ }
+ if (iter->nodes[1]->start > first_index) {
+ /* Leading hole on first iteration */
+ iter->start_hole = first_index;
+ iter->last_hole = iter->nodes[1]->start - 1;
+ iter->is_hole = 1;
+ interval_tree_span_iter_next_gap(iter);
+ return;
+ }
+
+ /* Starting inside a used */
+ iter->start_used = first_index;
+ iter->is_hole = 0;
+ interval_tree_span_iter_next_gap(iter);
+ iter->last_used = iter->nodes[0]->last;
+ if (iter->last_used >= last_index) {
+ iter->last_used = last_index;
+ iter->nodes[0] = NULL;
+ iter->nodes[1] = NULL;
+ }
+}
+EXPORT_SYMBOL_GPL(interval_tree_span_iter_first);
+
+void interval_tree_span_iter_next(struct interval_tree_span_iter *iter)
+{
+ if (!iter->nodes[0] && !iter->nodes[1]) {
+ iter->is_hole = -1;
+ return;
+ }
+
+ if (iter->is_hole) {
+ iter->start_used = iter->last_hole + 1;
+ iter->last_used = iter->nodes[0]->last;
+ if (iter->last_used >= iter->last_index) {
+ iter->last_used = iter->last_index;
+ iter->nodes[0] = NULL;
+ iter->nodes[1] = NULL;
+ }
+ iter->is_hole = 0;
+ return;
+ }
+
+ if (!iter->nodes[1]) {
+ /* Trailing hole */
+ iter->start_hole = iter->nodes[0]->last + 1;
+ iter->last_hole = iter->last_index;
+ iter->nodes[0] = NULL;
+ iter->is_hole = 1;
+ return;
+ }
+
+ /* must have both nodes[0] and [1], interior hole */
+ iter->start_hole = iter->nodes[0]->last + 1;
+ iter->last_hole = iter->nodes[1]->start - 1;
+ iter->is_hole = 1;
+ interval_tree_span_iter_next_gap(iter);
+}
+EXPORT_SYMBOL_GPL(interval_tree_span_iter_next);
+
+/*
+ * Advance the iterator index to a specific position. The returned used/hole is
+ * updated to start at new_index. This is faster than calling
+ * interval_tree_span_iter_first() as it can avoid full searches in several
+ * cases where the iterator is already set.
+ */
+void interval_tree_span_iter_advance(struct interval_tree_span_iter *iter,
+ struct rb_root_cached *itree,
+ unsigned long new_index)
+{
+ if (iter->is_hole == -1)
+ return;
+
+ iter->first_index = new_index;
+ if (new_index > iter->last_index) {
+ iter->is_hole = -1;
+ return;
+ }
+
+ /* Rely on the union aliasing hole/used */
+ if (iter->start_hole <= new_index && new_index <= iter->last_hole) {
+ iter->start_hole = new_index;
+ return;
+ }
+ if (new_index == iter->last_hole + 1)
+ interval_tree_span_iter_next(iter);
+ else
+ interval_tree_span_iter_first(iter, itree, new_index,
+ iter->last_index);
+}
+EXPORT_SYMBOL_GPL(interval_tree_span_iter_advance);
+#endif
diff --git a/lib/iov_iter.c b/lib/iov_iter.c
index c3ca28ca68a6..f9a3ff37ecd1 100644
--- a/lib/iov_iter.c
+++ b/lib/iov_iter.c
@@ -520,6 +520,8 @@ static size_t csum_and_copy_to_pipe_iter(const void *addr, size_t bytes,
size_t _copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i)
{
+ if (WARN_ON_ONCE(i->data_source))
+ return 0;
if (unlikely(iov_iter_is_pipe(i)))
return copy_pipe_to_iter(addr, bytes, i);
if (user_backed_iter(i))
@@ -606,6 +608,8 @@ static size_t copy_mc_pipe_to_iter(const void *addr, size_t bytes,
*/
size_t _copy_mc_to_iter(const void *addr, size_t bytes, struct iov_iter *i)
{
+ if (WARN_ON_ONCE(i->data_source))
+ return 0;
if (unlikely(iov_iter_is_pipe(i)))
return copy_mc_pipe_to_iter(addr, bytes, i);
if (user_backed_iter(i))
@@ -622,10 +626,9 @@ EXPORT_SYMBOL_GPL(_copy_mc_to_iter);
size_t _copy_from_iter(void *addr, size_t bytes, struct iov_iter *i)
{
- if (unlikely(iov_iter_is_pipe(i))) {
- WARN_ON(1);
+ if (WARN_ON_ONCE(!i->data_source))
return 0;
- }
+
if (user_backed_iter(i))
might_fault();
iterate_and_advance(i, bytes, base, len, off,
@@ -639,10 +642,9 @@ EXPORT_SYMBOL(_copy_from_iter);
size_t _copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i)
{
- if (unlikely(iov_iter_is_pipe(i))) {
- WARN_ON(1);
+ if (WARN_ON_ONCE(!i->data_source))
return 0;
- }
+
iterate_and_advance(i, bytes, base, len, off,
__copy_from_user_inatomic_nocache(addr + off, base, len),
memcpy(addr + off, base, len)
@@ -671,10 +673,9 @@ EXPORT_SYMBOL(_copy_from_iter_nocache);
*/
size_t _copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i)
{
- if (unlikely(iov_iter_is_pipe(i))) {
- WARN_ON(1);
+ if (WARN_ON_ONCE(!i->data_source))
return 0;
- }
+
iterate_and_advance(i, bytes, base, len, off,
__copy_from_user_flushcache(addr + off, base, len),
memcpy_flushcache(addr + off, base, len)
@@ -703,17 +704,18 @@ static inline bool page_copy_sane(struct page *page, size_t offset, size_t n)
head = compound_head(page);
v += (page - head) << PAGE_SHIFT;
- if (likely(n <= v && v <= (page_size(head))))
- return true;
- WARN_ON(1);
- return false;
+ if (WARN_ON(n > v || v > page_size(head)))
+ return false;
+ return true;
}
size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
struct iov_iter *i)
{
size_t res = 0;
- if (unlikely(!page_copy_sane(page, offset, bytes)))
+ if (!page_copy_sane(page, offset, bytes))
+ return 0;
+ if (WARN_ON_ONCE(i->data_source))
return 0;
if (unlikely(iov_iter_is_pipe(i)))
return copy_page_to_iter_pipe(page, offset, bytes, i);
@@ -808,13 +810,12 @@ size_t copy_page_from_iter_atomic(struct page *page, unsigned offset, size_t byt
struct iov_iter *i)
{
char *kaddr = kmap_atomic(page), *p = kaddr + offset;
- if (unlikely(!page_copy_sane(page, offset, bytes))) {
+ if (!page_copy_sane(page, offset, bytes)) {
kunmap_atomic(kaddr);
return 0;
}
- if (unlikely(iov_iter_is_pipe(i) || iov_iter_is_discard(i))) {
+ if (WARN_ON_ONCE(!i->data_source)) {
kunmap_atomic(kaddr);
- WARN_ON(1);
return 0;
}
iterate_and_advance(i, bytes, base, len, off,
@@ -1430,7 +1431,8 @@ static struct page *first_bvec_segment(const struct iov_iter *i,
static ssize_t __iov_iter_get_pages_alloc(struct iov_iter *i,
struct page ***pages, size_t maxsize,
- unsigned int maxpages, size_t *start)
+ unsigned int maxpages, size_t *start,
+ unsigned int gup_flags)
{
unsigned int n;
@@ -1442,7 +1444,6 @@ static ssize_t __iov_iter_get_pages_alloc(struct iov_iter *i,
maxsize = MAX_RW_COUNT;
if (likely(user_backed_iter(i))) {
- unsigned int gup_flags = 0;
unsigned long addr;
int res;
@@ -1492,33 +1493,49 @@ static ssize_t __iov_iter_get_pages_alloc(struct iov_iter *i,
return -EFAULT;
}
-ssize_t iov_iter_get_pages2(struct iov_iter *i,
+ssize_t iov_iter_get_pages(struct iov_iter *i,
struct page **pages, size_t maxsize, unsigned maxpages,
- size_t *start)
+ size_t *start, unsigned gup_flags)
{
if (!maxpages)
return 0;
BUG_ON(!pages);
- return __iov_iter_get_pages_alloc(i, &pages, maxsize, maxpages, start);
+ return __iov_iter_get_pages_alloc(i, &pages, maxsize, maxpages,
+ start, gup_flags);
+}
+EXPORT_SYMBOL_GPL(iov_iter_get_pages);
+
+ssize_t iov_iter_get_pages2(struct iov_iter *i, struct page **pages,
+ size_t maxsize, unsigned maxpages, size_t *start)
+{
+ return iov_iter_get_pages(i, pages, maxsize, maxpages, start, 0);
}
EXPORT_SYMBOL(iov_iter_get_pages2);
-ssize_t iov_iter_get_pages_alloc2(struct iov_iter *i,
+ssize_t iov_iter_get_pages_alloc(struct iov_iter *i,
struct page ***pages, size_t maxsize,
- size_t *start)
+ size_t *start, unsigned gup_flags)
{
ssize_t len;
*pages = NULL;
- len = __iov_iter_get_pages_alloc(i, pages, maxsize, ~0U, start);
+ len = __iov_iter_get_pages_alloc(i, pages, maxsize, ~0U, start,
+ gup_flags);
if (len <= 0) {
kvfree(*pages);
*pages = NULL;
}
return len;
}
+EXPORT_SYMBOL_GPL(iov_iter_get_pages_alloc);
+
+ssize_t iov_iter_get_pages_alloc2(struct iov_iter *i,
+ struct page ***pages, size_t maxsize, size_t *start)
+{
+ return iov_iter_get_pages_alloc(i, pages, maxsize, start, 0);
+}
EXPORT_SYMBOL(iov_iter_get_pages_alloc2);
size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum,
@@ -1526,10 +1543,9 @@ size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum,
{
__wsum sum, next;
sum = *csum;
- if (unlikely(iov_iter_is_pipe(i) || iov_iter_is_discard(i))) {
- WARN_ON(1);
+ if (WARN_ON_ONCE(!i->data_source))
return 0;
- }
+
iterate_and_advance(i, bytes, base, len, off, ({
next = csum_and_copy_from_user(base, addr + off, len);
sum = csum_block_add(sum, next, off);
@@ -1549,9 +1565,15 @@ size_t csum_and_copy_to_iter(const void *addr, size_t bytes, void *_csstate,
struct csum_state *csstate = _csstate;
__wsum sum, next;
- if (unlikely(iov_iter_is_discard(i))) {
- WARN_ON(1); /* for now */
+ if (WARN_ON_ONCE(i->data_source))
return 0;
+ if (unlikely(iov_iter_is_discard(i))) {
+ // can't use csum_memcpy() for that one - data is not copied
+ csstate->csum = csum_block_add(csstate->csum,
+ csum_partial(addr, bytes, 0),
+ csstate->off);
+ csstate->off += bytes;
+ return bytes;
}
sum = csum_shift(csstate->csum, csstate->off);
diff --git a/lib/is_signed_type_kunit.c b/lib/is_signed_type_kunit.c
index 207207522925..0a7f6ae62839 100644
--- a/lib/is_signed_type_kunit.c
+++ b/lib/is_signed_type_kunit.c
@@ -21,11 +21,7 @@ static void is_signed_type_test(struct kunit *test)
KUNIT_EXPECT_EQ(test, is_signed_type(bool), false);
KUNIT_EXPECT_EQ(test, is_signed_type(signed char), true);
KUNIT_EXPECT_EQ(test, is_signed_type(unsigned char), false);
-#ifdef __CHAR_UNSIGNED__
KUNIT_EXPECT_EQ(test, is_signed_type(char), false);
-#else
- KUNIT_EXPECT_EQ(test, is_signed_type(char), true);
-#endif
KUNIT_EXPECT_EQ(test, is_signed_type(int), true);
KUNIT_EXPECT_EQ(test, is_signed_type(unsigned int), false);
KUNIT_EXPECT_EQ(test, is_signed_type(long), true);
diff --git a/lib/kobject.c b/lib/kobject.c
index a0b2dbfcfa23..af1f5f2954d4 100644
--- a/lib/kobject.c
+++ b/lib/kobject.c
@@ -694,7 +694,7 @@ static void kobject_release(struct kref *kref)
{
struct kobject *kobj = container_of(kref, struct kobject, kref);
#ifdef CONFIG_DEBUG_KOBJECT_RELEASE
- unsigned long delay = HZ + HZ * prandom_u32_max(4);
+ unsigned long delay = HZ + HZ * get_random_u32_below(4);
pr_info("kobject: '%s' (%p): %s, parent %p (delayed %ld)\n",
kobject_name(kobj), kobj, __func__, kobj->parent, delay);
INIT_DELAYED_WORK(&kobj->release, kobject_delayed_cleanup);
diff --git a/lib/kunit/assert.c b/lib/kunit/assert.c
index d00d6d181ee8..f5b50babe38d 100644
--- a/lib/kunit/assert.c
+++ b/lib/kunit/assert.c
@@ -127,13 +127,15 @@ void kunit_binary_assert_format(const struct kunit_assert *assert,
binary_assert->text->right_text);
if (!is_literal(stream->test, binary_assert->text->left_text,
binary_assert->left_value, stream->gfp))
- string_stream_add(stream, KUNIT_SUBSUBTEST_INDENT "%s == %lld\n",
+ string_stream_add(stream, KUNIT_SUBSUBTEST_INDENT "%s == %lld (0x%llx)\n",
binary_assert->text->left_text,
+ binary_assert->left_value,
binary_assert->left_value);
if (!is_literal(stream->test, binary_assert->text->right_text,
binary_assert->right_value, stream->gfp))
- string_stream_add(stream, KUNIT_SUBSUBTEST_INDENT "%s == %lld",
+ string_stream_add(stream, KUNIT_SUBSUBTEST_INDENT "%s == %lld (0x%llx)",
binary_assert->text->right_text,
+ binary_assert->right_value,
binary_assert->right_value);
kunit_assert_print_msg(message, stream);
}
@@ -204,3 +206,59 @@ void kunit_binary_str_assert_format(const struct kunit_assert *assert,
kunit_assert_print_msg(message, stream);
}
EXPORT_SYMBOL_GPL(kunit_binary_str_assert_format);
+
+/* Adds a hexdump of a buffer to a string_stream comparing it with
+ * a second buffer. The different bytes are marked with <>.
+ */
+static void kunit_assert_hexdump(struct string_stream *stream,
+ const void *buf,
+ const void *compared_buf,
+ const size_t len)
+{
+ size_t i;
+ const u8 *buf1 = buf;
+ const u8 *buf2 = compared_buf;
+
+ string_stream_add(stream, KUNIT_SUBSUBTEST_INDENT);
+
+ for (i = 0; i < len; ++i) {
+ if (!(i % 16) && i)
+ string_stream_add(stream, "\n" KUNIT_SUBSUBTEST_INDENT);
+
+ if (buf1[i] != buf2[i])
+ string_stream_add(stream, "<%02x>", buf1[i]);
+ else
+ string_stream_add(stream, " %02x ", buf1[i]);
+ }
+}
+
+void kunit_mem_assert_format(const struct kunit_assert *assert,
+ const struct va_format *message,
+ struct string_stream *stream)
+{
+ struct kunit_mem_assert *mem_assert;
+
+ mem_assert = container_of(assert, struct kunit_mem_assert,
+ assert);
+
+ string_stream_add(stream,
+ KUNIT_SUBTEST_INDENT "Expected %s %s %s, but\n",
+ mem_assert->text->left_text,
+ mem_assert->text->operation,
+ mem_assert->text->right_text);
+
+ string_stream_add(stream, KUNIT_SUBSUBTEST_INDENT "%s ==\n",
+ mem_assert->text->left_text);
+ kunit_assert_hexdump(stream, mem_assert->left_value,
+ mem_assert->right_value, mem_assert->size);
+
+ string_stream_add(stream, "\n");
+
+ string_stream_add(stream, KUNIT_SUBSUBTEST_INDENT "%s ==\n",
+ mem_assert->text->right_text);
+ kunit_assert_hexdump(stream, mem_assert->right_value,
+ mem_assert->left_value, mem_assert->size);
+
+ kunit_assert_print_msg(message, stream);
+}
+EXPORT_SYMBOL_GPL(kunit_mem_assert_format);
diff --git a/lib/kunit/debugfs.c b/lib/kunit/debugfs.c
index 1048ef1b8d6e..de0ee2e03ed6 100644
--- a/lib/kunit/debugfs.c
+++ b/lib/kunit/debugfs.c
@@ -63,7 +63,7 @@ static int debugfs_print_results(struct seq_file *seq, void *v)
kunit_suite_for_each_test_case(suite, test_case)
debugfs_print_result(seq, suite, test_case);
- seq_printf(seq, "%s %d - %s\n",
+ seq_printf(seq, "%s %d %s\n",
kunit_status_to_ok_not_ok(success), 1, suite->name);
return 0;
}
diff --git a/lib/kunit/executor.c b/lib/kunit/executor.c
index 9bbc422c284b..74982b83707c 100644
--- a/lib/kunit/executor.c
+++ b/lib/kunit/executor.c
@@ -166,7 +166,7 @@ static void kunit_exec_run_tests(struct suite_set *suite_set)
{
size_t num_suites = suite_set->end - suite_set->start;
- pr_info("TAP version 14\n");
+ pr_info("KTAP version 1\n");
pr_info("1..%zu\n", num_suites);
__kunit_test_suites_init(suite_set->start, num_suites);
@@ -177,8 +177,8 @@ static void kunit_exec_list_tests(struct suite_set *suite_set)
struct kunit_suite * const *suites;
struct kunit_case *test_case;
- /* Hack: print a tap header so kunit.py can find the start of KUnit output. */
- pr_info("TAP version 14\n");
+ /* Hack: print a ktap header so kunit.py can find the start of KUnit output. */
+ pr_info("KTAP version 1\n");
for (suites = suite_set->start; suites < suite_set->end; suites++)
kunit_suite_for_each_test_case((*suites), test_case) {
diff --git a/lib/kunit/kunit-example-test.c b/lib/kunit/kunit-example-test.c
index f8fe582c9e36..66cc4e2365ec 100644
--- a/lib/kunit/kunit-example-test.c
+++ b/lib/kunit/kunit-example-test.c
@@ -86,6 +86,9 @@ static void example_mark_skipped_test(struct kunit *test)
*/
static void example_all_expect_macros_test(struct kunit *test)
{
+ const u32 array1[] = { 0x0F, 0xFF };
+ const u32 array2[] = { 0x1F, 0xFF };
+
/* Boolean assertions */
KUNIT_EXPECT_TRUE(test, true);
KUNIT_EXPECT_FALSE(test, false);
@@ -109,6 +112,10 @@ static void example_all_expect_macros_test(struct kunit *test)
KUNIT_EXPECT_STREQ(test, "hi", "hi");
KUNIT_EXPECT_STRNEQ(test, "hi", "bye");
+ /* Memory block assertions */
+ KUNIT_EXPECT_MEMEQ(test, array1, array1, sizeof(array1));
+ KUNIT_EXPECT_MEMNEQ(test, array1, array2, sizeof(array1));
+
/*
* There are also ASSERT variants of all of the above that abort test
* execution if they fail. Useful for memory allocations, etc.
diff --git a/lib/kunit/string-stream.c b/lib/kunit/string-stream.c
index a608746020a9..f5f51166d8c2 100644
--- a/lib/kunit/string-stream.c
+++ b/lib/kunit/string-stream.c
@@ -131,11 +131,6 @@ bool string_stream_is_empty(struct string_stream *stream)
return list_empty(&stream->fragments);
}
-struct string_stream_alloc_context {
- struct kunit *test;
- gfp_t gfp;
-};
-
struct string_stream *alloc_string_stream(struct kunit *test, gfp_t gfp)
{
struct string_stream *stream;
diff --git a/lib/kunit/test.c b/lib/kunit/test.c
index 2a6992fe7c3e..c9ebf975e56b 100644
--- a/lib/kunit/test.c
+++ b/lib/kunit/test.c
@@ -20,6 +20,8 @@
#include "string-stream.h"
#include "try-catch-impl.h"
+DEFINE_STATIC_KEY_FALSE(kunit_running);
+
#if IS_BUILTIN(CONFIG_KUNIT)
/*
* Fail the current test and print an error message to the log.
@@ -149,6 +151,7 @@ EXPORT_SYMBOL_GPL(kunit_suite_num_test_cases);
static void kunit_print_suite_start(struct kunit_suite *suite)
{
+ kunit_log(KERN_INFO, suite, KUNIT_SUBTEST_INDENT "KTAP version 1\n");
kunit_log(KERN_INFO, suite, KUNIT_SUBTEST_INDENT "# Subtest: %s",
suite->name);
kunit_log(KERN_INFO, suite, KUNIT_SUBTEST_INDENT "1..%zd",
@@ -175,13 +178,13 @@ static void kunit_print_ok_not_ok(void *test_or_suite,
* representation.
*/
if (suite)
- pr_info("%s %zd - %s%s%s\n",
+ pr_info("%s %zd %s%s%s\n",
kunit_status_to_ok_not_ok(status),
test_number, description, directive_header,
(status == KUNIT_SKIPPED) ? directive : "");
else
kunit_log(KERN_INFO, test,
- KUNIT_SUBTEST_INDENT "%s %zd - %s%s%s",
+ KUNIT_SUBTEST_INDENT "%s %zd %s%s%s",
kunit_status_to_ok_not_ok(status),
test_number, description, directive_header,
(status == KUNIT_SKIPPED) ? directive : "");
@@ -543,6 +546,8 @@ int kunit_run_tests(struct kunit_suite *suite)
param_desc[0] = '\0';
test.param_value = test_case->generate_params(NULL, param_desc);
kunit_log(KERN_INFO, &test, KUNIT_SUBTEST_INDENT KUNIT_SUBTEST_INDENT
+ "KTAP version 1\n");
+ kunit_log(KERN_INFO, &test, KUNIT_SUBTEST_INDENT KUNIT_SUBTEST_INDENT
"# Subtest: %s", test_case->name);
while (test.param_value) {
@@ -555,7 +560,7 @@ int kunit_run_tests(struct kunit_suite *suite)
kunit_log(KERN_INFO, &test,
KUNIT_SUBTEST_INDENT KUNIT_SUBTEST_INDENT
- "%s %d - %s",
+ "%s %d %s",
kunit_status_to_ok_not_ok(test.status),
test.param_index + 1, param_desc);
@@ -612,10 +617,14 @@ int __kunit_test_suites_init(struct kunit_suite * const * const suites, int num_
return 0;
}
+ static_branch_inc(&kunit_running);
+
for (i = 0; i < num_suites; i++) {
kunit_init_suite(suites[i]);
kunit_run_tests(suites[i]);
}
+
+ static_branch_dec(&kunit_running);
return 0;
}
EXPORT_SYMBOL_GPL(__kunit_test_suites_init);
diff --git a/lib/llist.c b/lib/llist.c
index 7d78b736e8af..6e668fa5a2c6 100644
--- a/lib/llist.c
+++ b/lib/llist.c
@@ -26,10 +26,10 @@
bool llist_add_batch(struct llist_node *new_first, struct llist_node *new_last,
struct llist_head *head)
{
- struct llist_node *first;
+ struct llist_node *first = READ_ONCE(head->first);
do {
- new_last->next = first = READ_ONCE(head->first);
+ new_last->next = first;
} while (!try_cmpxchg(&head->first, &first, new_first));
return !first;
diff --git a/lib/lru_cache.c b/lib/lru_cache.c
index dc35464216d3..b3d9187611de 100644
--- a/lib/lru_cache.c
+++ b/lib/lru_cache.c
@@ -60,17 +60,6 @@ int lc_try_lock(struct lru_cache *lc)
} while (unlikely (val == LC_PARANOIA));
/* Spin until no-one is inside a PARANOIA_ENTRY()/RETURN() section. */
return 0 == val;
-#if 0
- /* Alternative approach, spin in case someone enters or leaves a
- * PARANOIA_ENTRY()/RETURN() section. */
- unsigned long old, new, val;
- do {
- old = lc->flags & LC_PARANOIA;
- new = old | LC_LOCKED;
- val = cmpxchg(&lc->flags, old, new);
- } while (unlikely (val == (old ^ LC_PARANOIA)));
- return old == val;
-#endif
}
/**
@@ -364,7 +353,7 @@ static struct lc_element *__lc_get(struct lru_cache *lc, unsigned int enr, unsig
struct lc_element *e;
PARANOIA_ENTRY();
- if (lc->flags & LC_STARVING) {
+ if (test_bit(__LC_STARVING, &lc->flags)) {
++lc->starving;
RETURN(NULL);
}
@@ -417,7 +406,7 @@ static struct lc_element *__lc_get(struct lru_cache *lc, unsigned int enr, unsig
* the LRU element, we have to wait ...
*/
if (!lc_unused_element_available(lc)) {
- __set_bit(__LC_STARVING, &lc->flags);
+ set_bit(__LC_STARVING, &lc->flags);
RETURN(NULL);
}
@@ -586,48 +575,6 @@ struct lc_element *lc_element_by_index(struct lru_cache *lc, unsigned i)
}
/**
- * lc_index_of
- * @lc: the lru cache to operate on
- * @e: the element to query for its index position in lc->element
- */
-unsigned int lc_index_of(struct lru_cache *lc, struct lc_element *e)
-{
- PARANOIA_LC_ELEMENT(lc, e);
- return e->lc_index;
-}
-
-/**
- * lc_set - associate index with label
- * @lc: the lru cache to operate on
- * @enr: the label to set
- * @index: the element index to associate label with.
- *
- * Used to initialize the active set to some previously recorded state.
- */
-void lc_set(struct lru_cache *lc, unsigned int enr, int index)
-{
- struct lc_element *e;
- struct list_head *lh;
-
- if (index < 0 || index >= lc->nr_elements)
- return;
-
- e = lc_element_by_index(lc, index);
- BUG_ON(e->lc_number != e->lc_new_number);
- BUG_ON(e->refcnt != 0);
-
- e->lc_number = e->lc_new_number = enr;
- hlist_del_init(&e->colision);
- if (enr == LC_FREE)
- lh = &lc->free;
- else {
- hlist_add_head(&e->colision, lc_hash_slot(lc, enr));
- lh = &lc->lru;
- }
- list_move(&e->list, lh);
-}
-
-/**
* lc_seq_dump_details - Dump a complete LRU cache to seq in textual form.
* @lc: the lru cache to operate on
* @seq: the &struct seq_file pointer to seq_printf into
@@ -661,7 +608,6 @@ void lc_seq_dump_details(struct seq_file *seq, struct lru_cache *lc, char *utext
EXPORT_SYMBOL(lc_create);
EXPORT_SYMBOL(lc_reset);
EXPORT_SYMBOL(lc_destroy);
-EXPORT_SYMBOL(lc_set);
EXPORT_SYMBOL(lc_del);
EXPORT_SYMBOL(lc_try_get);
EXPORT_SYMBOL(lc_find);
@@ -669,7 +615,6 @@ EXPORT_SYMBOL(lc_get);
EXPORT_SYMBOL(lc_put);
EXPORT_SYMBOL(lc_committed);
EXPORT_SYMBOL(lc_element_by_index);
-EXPORT_SYMBOL(lc_index_of);
EXPORT_SYMBOL(lc_seq_printf_stats);
EXPORT_SYMBOL(lc_seq_dump_details);
EXPORT_SYMBOL(lc_try_lock);
diff --git a/lib/maple_tree.c b/lib/maple_tree.c
index df352f6ccc24..3fe1491d2bf9 100644
--- a/lib/maple_tree.c
+++ b/lib/maple_tree.c
@@ -323,14 +323,19 @@ static inline void *mte_safe_root(const struct maple_enode *node)
return (void *)((unsigned long)node & ~MAPLE_ROOT_NODE);
}
-static inline void mte_set_full(const struct maple_enode *node)
+static inline void *mte_set_full(const struct maple_enode *node)
{
- node = (void *)((unsigned long)node & ~MAPLE_ENODE_NULL);
+ return (void *)((unsigned long)node & ~MAPLE_ENODE_NULL);
}
-static inline void mte_clear_full(const struct maple_enode *node)
+static inline void *mte_clear_full(const struct maple_enode *node)
{
- node = (void *)((unsigned long)node | MAPLE_ENODE_NULL);
+ return (void *)((unsigned long)node | MAPLE_ENODE_NULL);
+}
+
+static inline bool mte_has_null(const struct maple_enode *node)
+{
+ return (unsigned long)node & MAPLE_ENODE_NULL;
}
static inline bool ma_is_root(struct maple_node *node)
diff --git a/lib/math/div64.c b/lib/math/div64.c
index 46866394fc84..55a81782e271 100644
--- a/lib/math/div64.c
+++ b/lib/math/div64.c
@@ -63,12 +63,6 @@ uint32_t __attribute__((weak)) __div64_32(uint64_t *n, uint32_t base)
EXPORT_SYMBOL(__div64_32);
#endif
-/**
- * div_s64_rem - signed 64bit divide with 64bit divisor and remainder
- * @dividend: 64bit dividend
- * @divisor: 64bit divisor
- * @remainder: 64bit remainder
- */
#ifndef div_s64_rem
s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder)
{
@@ -89,7 +83,7 @@ s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder)
EXPORT_SYMBOL(div_s64_rem);
#endif
-/**
+/*
* div64_u64_rem - unsigned 64bit divide with 64bit divisor and remainder
* @dividend: 64bit dividend
* @divisor: 64bit divisor
@@ -129,7 +123,7 @@ u64 div64_u64_rem(u64 dividend, u64 divisor, u64 *remainder)
EXPORT_SYMBOL(div64_u64_rem);
#endif
-/**
+/*
* div64_u64 - unsigned 64bit divide with 64bit divisor
* @dividend: 64bit dividend
* @divisor: 64bit divisor
@@ -163,11 +157,6 @@ u64 div64_u64(u64 dividend, u64 divisor)
EXPORT_SYMBOL(div64_u64);
#endif
-/**
- * div64_s64 - signed 64bit divide with 64bit divisor
- * @dividend: 64bit dividend
- * @divisor: 64bit divisor
- */
#ifndef div64_s64
s64 div64_s64(s64 dividend, s64 divisor)
{
diff --git a/lib/memcpy_kunit.c b/lib/memcpy_kunit.c
index 2b5cc70ac53f..89128551448d 100644
--- a/lib/memcpy_kunit.c
+++ b/lib/memcpy_kunit.c
@@ -105,6 +105,8 @@ static void memcpy_test(struct kunit *test)
#undef TEST_OP
}
+static unsigned char larger_array [2048];
+
static void memmove_test(struct kunit *test)
{
#define TEST_OP "memmove"
@@ -179,6 +181,26 @@ static void memmove_test(struct kunit *test)
ptr = &overlap.data[2];
memmove(ptr, overlap.data, 5);
compare("overlapping write", overlap, overlap_expected);
+
+ /* Verify larger overlapping moves. */
+ larger_array[256] = 0xAAu;
+ /*
+ * Test a backwards overlapping memmove first. 256 and 1024 are
+ * important for i386 to use rep movsl.
+ */
+ memmove(larger_array, larger_array + 256, 1024);
+ KUNIT_ASSERT_EQ(test, larger_array[0], 0xAAu);
+ KUNIT_ASSERT_EQ(test, larger_array[256], 0x00);
+ KUNIT_ASSERT_NULL(test,
+ memchr(larger_array + 1, 0xaa, ARRAY_SIZE(larger_array) - 1));
+ /* Test a forwards overlapping memmove. */
+ larger_array[0] = 0xBBu;
+ memmove(larger_array + 256, larger_array, 1024);
+ KUNIT_ASSERT_EQ(test, larger_array[0], 0xBBu);
+ KUNIT_ASSERT_EQ(test, larger_array[256], 0xBBu);
+ KUNIT_ASSERT_NULL(test, memchr(larger_array + 1, 0xBBu, 256 - 1));
+ KUNIT_ASSERT_NULL(test,
+ memchr(larger_array + 257, 0xBBu, ARRAY_SIZE(larger_array) - 257));
#undef TEST_OP
}
@@ -270,6 +292,208 @@ static void memset_test(struct kunit *test)
#undef TEST_OP
}
+static u8 large_src[1024];
+static u8 large_dst[2048];
+static const u8 large_zero[2048];
+
+static void set_random_nonzero(struct kunit *test, u8 *byte)
+{
+ int failed_rng = 0;
+
+ while (*byte == 0) {
+ get_random_bytes(byte, 1);
+ KUNIT_ASSERT_LT_MSG(test, failed_rng++, 100,
+ "Is the RNG broken?");
+ }
+}
+
+static void init_large(struct kunit *test)
+{
+
+ /* Get many bit patterns. */
+ get_random_bytes(large_src, ARRAY_SIZE(large_src));
+
+ /* Make sure we have non-zero edges. */
+ set_random_nonzero(test, &large_src[0]);
+ set_random_nonzero(test, &large_src[ARRAY_SIZE(large_src) - 1]);
+
+ /* Explicitly zero the entire destination. */
+ memset(large_dst, 0, ARRAY_SIZE(large_dst));
+}
+
+/*
+ * Instead of an indirect function call for "copy" or a giant macro,
+ * use a bool to pick memcpy or memmove.
+ */
+static void copy_large_test(struct kunit *test, bool use_memmove)
+{
+ init_large(test);
+
+ /* Copy a growing number of non-overlapping bytes ... */
+ for (int bytes = 1; bytes <= ARRAY_SIZE(large_src); bytes++) {
+ /* Over a shifting destination window ... */
+ for (int offset = 0; offset < ARRAY_SIZE(large_src); offset++) {
+ int right_zero_pos = offset + bytes;
+ int right_zero_size = ARRAY_SIZE(large_dst) - right_zero_pos;
+
+ /* Copy! */
+ if (use_memmove)
+ memmove(large_dst + offset, large_src, bytes);
+ else
+ memcpy(large_dst + offset, large_src, bytes);
+
+ /* Did we touch anything before the copy area? */
+ KUNIT_ASSERT_EQ_MSG(test,
+ memcmp(large_dst, large_zero, offset), 0,
+ "with size %d at offset %d", bytes, offset);
+ /* Did we touch anything after the copy area? */
+ KUNIT_ASSERT_EQ_MSG(test,
+ memcmp(&large_dst[right_zero_pos], large_zero, right_zero_size), 0,
+ "with size %d at offset %d", bytes, offset);
+
+ /* Are we byte-for-byte exact across the copy? */
+ KUNIT_ASSERT_EQ_MSG(test,
+ memcmp(large_dst + offset, large_src, bytes), 0,
+ "with size %d at offset %d", bytes, offset);
+
+ /* Zero out what we copied for the next cycle. */
+ memset(large_dst + offset, 0, bytes);
+ }
+ /* Avoid stall warnings if this loop gets slow. */
+ cond_resched();
+ }
+}
+
+static void memcpy_large_test(struct kunit *test)
+{
+ copy_large_test(test, false);
+}
+
+static void memmove_large_test(struct kunit *test)
+{
+ copy_large_test(test, true);
+}
+
+/*
+ * On the assumption that boundary conditions are going to be the most
+ * sensitive, instead of taking a full step (inc) each iteration,
+ * take single index steps for at least the first "inc"-many indexes
+ * from the "start" and at least the last "inc"-many indexes before
+ * the "end". When in the middle, take full "inc"-wide steps. For
+ * example, calling next_step(idx, 1, 15, 3) with idx starting at 0
+ * would see the following pattern: 1 2 3 4 7 10 11 12 13 14 15.
+ */
+static int next_step(int idx, int start, int end, int inc)
+{
+ start += inc;
+ end -= inc;
+
+ if (idx < start || idx + inc > end)
+ inc = 1;
+ return idx + inc;
+}
+
+static void inner_loop(struct kunit *test, int bytes, int d_off, int s_off)
+{
+ int left_zero_pos, left_zero_size;
+ int right_zero_pos, right_zero_size;
+ int src_pos, src_orig_pos, src_size;
+ int pos;
+
+ /* Place the source in the destination buffer. */
+ memcpy(&large_dst[s_off], large_src, bytes);
+
+ /* Copy to destination offset. */
+ memmove(&large_dst[d_off], &large_dst[s_off], bytes);
+
+ /* Make sure destination entirely matches. */
+ KUNIT_ASSERT_EQ_MSG(test, memcmp(&large_dst[d_off], large_src, bytes), 0,
+ "with size %d at src offset %d and dest offset %d",
+ bytes, s_off, d_off);
+
+ /* Calculate the expected zero spans. */
+ if (s_off < d_off) {
+ left_zero_pos = 0;
+ left_zero_size = s_off;
+
+ right_zero_pos = d_off + bytes;
+ right_zero_size = ARRAY_SIZE(large_dst) - right_zero_pos;
+
+ src_pos = s_off;
+ src_orig_pos = 0;
+ src_size = d_off - s_off;
+ } else {
+ left_zero_pos = 0;
+ left_zero_size = d_off;
+
+ right_zero_pos = s_off + bytes;
+ right_zero_size = ARRAY_SIZE(large_dst) - right_zero_pos;
+
+ src_pos = d_off + bytes;
+ src_orig_pos = src_pos - s_off;
+ src_size = right_zero_pos - src_pos;
+ }
+
+ /* Check non-overlapping source is unchanged.*/
+ KUNIT_ASSERT_EQ_MSG(test,
+ memcmp(&large_dst[src_pos], &large_src[src_orig_pos], src_size), 0,
+ "with size %d at src offset %d and dest offset %d",
+ bytes, s_off, d_off);
+
+ /* Check leading buffer contents are zero. */
+ KUNIT_ASSERT_EQ_MSG(test,
+ memcmp(&large_dst[left_zero_pos], large_zero, left_zero_size), 0,
+ "with size %d at src offset %d and dest offset %d",
+ bytes, s_off, d_off);
+ /* Check trailing buffer contents are zero. */
+ KUNIT_ASSERT_EQ_MSG(test,
+ memcmp(&large_dst[right_zero_pos], large_zero, right_zero_size), 0,
+ "with size %d at src offset %d and dest offset %d",
+ bytes, s_off, d_off);
+
+ /* Zero out everything not already zeroed.*/
+ pos = left_zero_pos + left_zero_size;
+ memset(&large_dst[pos], 0, right_zero_pos - pos);
+}
+
+static void memmove_overlap_test(struct kunit *test)
+{
+ /*
+ * Running all possible offset and overlap combinations takes a
+ * very long time. Instead, only check up to 128 bytes offset
+ * into the destination buffer (which should result in crossing
+ * cachelines), with a step size of 1 through 7 to try to skip some
+ * redundancy.
+ */
+ static const int offset_max = 128; /* less than ARRAY_SIZE(large_src); */
+ static const int bytes_step = 7;
+ static const int window_step = 7;
+
+ static const int bytes_start = 1;
+ static const int bytes_end = ARRAY_SIZE(large_src) + 1;
+
+ init_large(test);
+
+ /* Copy a growing number of overlapping bytes ... */
+ for (int bytes = bytes_start; bytes < bytes_end;
+ bytes = next_step(bytes, bytes_start, bytes_end, bytes_step)) {
+
+ /* Over a shifting destination window ... */
+ for (int d_off = 0; d_off < offset_max; d_off++) {
+ int s_start = max(d_off - bytes, 0);
+ int s_end = min_t(int, d_off + bytes, ARRAY_SIZE(large_src));
+
+ /* Over a shifting source window ... */
+ for (int s_off = s_start; s_off < s_end;
+ s_off = next_step(s_off, s_start, s_end, window_step))
+ inner_loop(test, bytes, d_off, s_off);
+
+ /* Avoid stall warnings. */
+ cond_resched();
+ }
+ }
+}
+
static void strtomem_test(struct kunit *test)
{
static const char input[sizeof(unsigned long)] = "hi";
@@ -325,7 +549,10 @@ static void strtomem_test(struct kunit *test)
static struct kunit_case memcpy_test_cases[] = {
KUNIT_CASE(memset_test),
KUNIT_CASE(memcpy_test),
+ KUNIT_CASE(memcpy_large_test),
KUNIT_CASE(memmove_test),
+ KUNIT_CASE(memmove_large_test),
+ KUNIT_CASE(memmove_overlap_test),
KUNIT_CASE(strtomem_test),
{}
};
diff --git a/lib/net_utils.c b/lib/net_utils.c
index af525353395d..c17201df3d08 100644
--- a/lib/net_utils.c
+++ b/lib/net_utils.c
@@ -6,10 +6,11 @@
bool mac_pton(const char *s, u8 *mac)
{
+ size_t maxlen = 3 * ETH_ALEN - 1;
int i;
/* XX:XX:XX:XX:XX:XX */
- if (strlen(s) < 3 * ETH_ALEN - 1)
+ if (strnlen(s, maxlen) < maxlen)
return false;
/* Don't dirty result unless string is valid MAC. */
diff --git a/lib/nlattr.c b/lib/nlattr.c
index b67a53e29b8f..9055e8b4d144 100644
--- a/lib/nlattr.c
+++ b/lib/nlattr.c
@@ -646,7 +646,7 @@ EXPORT_SYMBOL(__nla_validate);
/**
* nla_policy_len - Determine the max. length of a policy
- * @policy: policy to use
+ * @p: policy to use
* @n: number of policies
*
* Determines the max. length of the policy. It is currently used
diff --git a/lib/notifier-error-inject.c b/lib/notifier-error-inject.c
index 21016b32d313..2b24ea6c9497 100644
--- a/lib/notifier-error-inject.c
+++ b/lib/notifier-error-inject.c
@@ -15,7 +15,7 @@ static int debugfs_errno_get(void *data, u64 *val)
return 0;
}
-DEFINE_SIMPLE_ATTRIBUTE(fops_errno, debugfs_errno_get, debugfs_errno_set,
+DEFINE_SIMPLE_ATTRIBUTE_SIGNED(fops_errno, debugfs_errno_get, debugfs_errno_set,
"%lld\n");
static struct dentry *debugfs_create_errno(const char *name, umode_t mode,
diff --git a/lib/oid_registry.c b/lib/oid_registry.c
index e592d48b1974..fe6705cfd780 100644
--- a/lib/oid_registry.c
+++ b/lib/oid_registry.c
@@ -146,7 +146,6 @@ int sprint_oid(const void *data, size_t datasize, char *buffer, size_t bufsize)
bufsize -= count;
while (v < end) {
- num = 0;
n = *v++;
if (!(n & 0x80)) {
num = n;
diff --git a/lib/overflow_kunit.c b/lib/overflow_kunit.c
index b8556a2e7bb1..dcd3ba102db6 100644
--- a/lib/overflow_kunit.c
+++ b/lib/overflow_kunit.c
@@ -736,6 +736,384 @@ static void overflow_size_helpers_test(struct kunit *test)
#undef check_one_size_helper
}
+static void overflows_type_test(struct kunit *test)
+{
+ int count = 0;
+ unsigned int var;
+
+#define __TEST_OVERFLOWS_TYPE(func, arg1, arg2, of) do { \
+ bool __of = func(arg1, arg2); \
+ KUNIT_EXPECT_EQ_MSG(test, __of, of, \
+ "expected " #func "(" #arg1 ", " #arg2 " to%s overflow\n",\
+ of ? "" : " not"); \
+ count++; \
+} while (0)
+
+/* Args are: first type, second type, value, overflow expected */
+#define TEST_OVERFLOWS_TYPE(__t1, __t2, v, of) do { \
+ __t1 t1 = (v); \
+ __t2 t2; \
+ __TEST_OVERFLOWS_TYPE(__overflows_type, t1, t2, of); \
+ __TEST_OVERFLOWS_TYPE(__overflows_type, t1, __t2, of); \
+ __TEST_OVERFLOWS_TYPE(__overflows_type_constexpr, t1, t2, of); \
+ __TEST_OVERFLOWS_TYPE(__overflows_type_constexpr, t1, __t2, of);\
+} while (0)
+
+ TEST_OVERFLOWS_TYPE(u8, u8, U8_MAX, false);
+ TEST_OVERFLOWS_TYPE(u8, u16, U8_MAX, false);
+ TEST_OVERFLOWS_TYPE(u8, s8, U8_MAX, true);
+ TEST_OVERFLOWS_TYPE(u8, s8, S8_MAX, false);
+ TEST_OVERFLOWS_TYPE(u8, s8, (u8)S8_MAX + 1, true);
+ TEST_OVERFLOWS_TYPE(u8, s16, U8_MAX, false);
+ TEST_OVERFLOWS_TYPE(s8, u8, S8_MAX, false);
+ TEST_OVERFLOWS_TYPE(s8, u8, -1, true);
+ TEST_OVERFLOWS_TYPE(s8, u8, S8_MIN, true);
+ TEST_OVERFLOWS_TYPE(s8, u16, S8_MAX, false);
+ TEST_OVERFLOWS_TYPE(s8, u16, -1, true);
+ TEST_OVERFLOWS_TYPE(s8, u16, S8_MIN, true);
+ TEST_OVERFLOWS_TYPE(s8, u32, S8_MAX, false);
+ TEST_OVERFLOWS_TYPE(s8, u32, -1, true);
+ TEST_OVERFLOWS_TYPE(s8, u32, S8_MIN, true);
+#if BITS_PER_LONG == 64
+ TEST_OVERFLOWS_TYPE(s8, u64, S8_MAX, false);
+ TEST_OVERFLOWS_TYPE(s8, u64, -1, true);
+ TEST_OVERFLOWS_TYPE(s8, u64, S8_MIN, true);
+#endif
+ TEST_OVERFLOWS_TYPE(s8, s8, S8_MAX, false);
+ TEST_OVERFLOWS_TYPE(s8, s8, S8_MIN, false);
+ TEST_OVERFLOWS_TYPE(s8, s16, S8_MAX, false);
+ TEST_OVERFLOWS_TYPE(s8, s16, S8_MIN, false);
+ TEST_OVERFLOWS_TYPE(u16, u8, U8_MAX, false);
+ TEST_OVERFLOWS_TYPE(u16, u8, (u16)U8_MAX + 1, true);
+ TEST_OVERFLOWS_TYPE(u16, u8, U16_MAX, true);
+ TEST_OVERFLOWS_TYPE(u16, s8, S8_MAX, false);
+ TEST_OVERFLOWS_TYPE(u16, s8, (u16)S8_MAX + 1, true);
+ TEST_OVERFLOWS_TYPE(u16, s8, U16_MAX, true);
+ TEST_OVERFLOWS_TYPE(u16, s16, S16_MAX, false);
+ TEST_OVERFLOWS_TYPE(u16, s16, (u16)S16_MAX + 1, true);
+ TEST_OVERFLOWS_TYPE(u16, s16, U16_MAX, true);
+ TEST_OVERFLOWS_TYPE(u16, u32, U16_MAX, false);
+ TEST_OVERFLOWS_TYPE(u16, s32, U16_MAX, false);
+ TEST_OVERFLOWS_TYPE(s16, u8, U8_MAX, false);
+ TEST_OVERFLOWS_TYPE(s16, u8, (s16)U8_MAX + 1, true);
+ TEST_OVERFLOWS_TYPE(s16, u8, -1, true);
+ TEST_OVERFLOWS_TYPE(s16, u8, S16_MIN, true);
+ TEST_OVERFLOWS_TYPE(s16, u16, S16_MAX, false);
+ TEST_OVERFLOWS_TYPE(s16, u16, -1, true);
+ TEST_OVERFLOWS_TYPE(s16, u16, S16_MIN, true);
+ TEST_OVERFLOWS_TYPE(s16, u32, S16_MAX, false);
+ TEST_OVERFLOWS_TYPE(s16, u32, -1, true);
+ TEST_OVERFLOWS_TYPE(s16, u32, S16_MIN, true);
+#if BITS_PER_LONG == 64
+ TEST_OVERFLOWS_TYPE(s16, u64, S16_MAX, false);
+ TEST_OVERFLOWS_TYPE(s16, u64, -1, true);
+ TEST_OVERFLOWS_TYPE(s16, u64, S16_MIN, true);
+#endif
+ TEST_OVERFLOWS_TYPE(s16, s8, S8_MAX, false);
+ TEST_OVERFLOWS_TYPE(s16, s8, S8_MIN, false);
+ TEST_OVERFLOWS_TYPE(s16, s8, (s16)S8_MAX + 1, true);
+ TEST_OVERFLOWS_TYPE(s16, s8, (s16)S8_MIN - 1, true);
+ TEST_OVERFLOWS_TYPE(s16, s8, S16_MAX, true);
+ TEST_OVERFLOWS_TYPE(s16, s8, S16_MIN, true);
+ TEST_OVERFLOWS_TYPE(s16, s16, S16_MAX, false);
+ TEST_OVERFLOWS_TYPE(s16, s16, S16_MIN, false);
+ TEST_OVERFLOWS_TYPE(s16, s32, S16_MAX, false);
+ TEST_OVERFLOWS_TYPE(s16, s32, S16_MIN, false);
+ TEST_OVERFLOWS_TYPE(u32, u8, U8_MAX, false);
+ TEST_OVERFLOWS_TYPE(u32, u8, (u32)U8_MAX + 1, true);
+ TEST_OVERFLOWS_TYPE(u32, u8, U32_MAX, true);
+ TEST_OVERFLOWS_TYPE(u32, s8, S8_MAX, false);
+ TEST_OVERFLOWS_TYPE(u32, s8, (u32)S8_MAX + 1, true);
+ TEST_OVERFLOWS_TYPE(u32, s8, U32_MAX, true);
+ TEST_OVERFLOWS_TYPE(u32, u16, U16_MAX, false);
+ TEST_OVERFLOWS_TYPE(u32, u16, U16_MAX + 1, true);
+ TEST_OVERFLOWS_TYPE(u32, u16, U32_MAX, true);
+ TEST_OVERFLOWS_TYPE(u32, s16, S16_MAX, false);
+ TEST_OVERFLOWS_TYPE(u32, s16, (u32)S16_MAX + 1, true);
+ TEST_OVERFLOWS_TYPE(u32, s16, U32_MAX, true);
+ TEST_OVERFLOWS_TYPE(u32, u32, U32_MAX, false);
+ TEST_OVERFLOWS_TYPE(u32, s32, S32_MAX, false);
+ TEST_OVERFLOWS_TYPE(u32, s32, U32_MAX, true);
+ TEST_OVERFLOWS_TYPE(u32, s32, (u32)S32_MAX + 1, true);
+#if BITS_PER_LONG == 64
+ TEST_OVERFLOWS_TYPE(u32, u64, U32_MAX, false);
+ TEST_OVERFLOWS_TYPE(u32, s64, U32_MAX, false);
+#endif
+ TEST_OVERFLOWS_TYPE(s32, u8, U8_MAX, false);
+ TEST_OVERFLOWS_TYPE(s32, u8, (s32)U8_MAX + 1, true);
+ TEST_OVERFLOWS_TYPE(s32, u16, S32_MAX, true);
+ TEST_OVERFLOWS_TYPE(s32, u8, -1, true);
+ TEST_OVERFLOWS_TYPE(s32, u8, S32_MIN, true);
+ TEST_OVERFLOWS_TYPE(s32, u16, U16_MAX, false);
+ TEST_OVERFLOWS_TYPE(s32, u16, (s32)U16_MAX + 1, true);
+ TEST_OVERFLOWS_TYPE(s32, u16, S32_MAX, true);
+ TEST_OVERFLOWS_TYPE(s32, u16, -1, true);
+ TEST_OVERFLOWS_TYPE(s32, u16, S32_MIN, true);
+ TEST_OVERFLOWS_TYPE(s32, u32, S32_MAX, false);
+ TEST_OVERFLOWS_TYPE(s32, u32, -1, true);
+ TEST_OVERFLOWS_TYPE(s32, u32, S32_MIN, true);
+#if BITS_PER_LONG == 64
+ TEST_OVERFLOWS_TYPE(s32, u64, S32_MAX, false);
+ TEST_OVERFLOWS_TYPE(s32, u64, -1, true);
+ TEST_OVERFLOWS_TYPE(s32, u64, S32_MIN, true);
+#endif
+ TEST_OVERFLOWS_TYPE(s32, s8, S8_MAX, false);
+ TEST_OVERFLOWS_TYPE(s32, s8, S8_MIN, false);
+ TEST_OVERFLOWS_TYPE(s32, s8, (s32)S8_MAX + 1, true);
+ TEST_OVERFLOWS_TYPE(s32, s8, (s32)S8_MIN - 1, true);
+ TEST_OVERFLOWS_TYPE(s32, s8, S32_MAX, true);
+ TEST_OVERFLOWS_TYPE(s32, s8, S32_MIN, true);
+ TEST_OVERFLOWS_TYPE(s32, s16, S16_MAX, false);
+ TEST_OVERFLOWS_TYPE(s32, s16, S16_MIN, false);
+ TEST_OVERFLOWS_TYPE(s32, s16, (s32)S16_MAX + 1, true);
+ TEST_OVERFLOWS_TYPE(s32, s16, (s32)S16_MIN - 1, true);
+ TEST_OVERFLOWS_TYPE(s32, s16, S32_MAX, true);
+ TEST_OVERFLOWS_TYPE(s32, s16, S32_MIN, true);
+ TEST_OVERFLOWS_TYPE(s32, s32, S32_MAX, false);
+ TEST_OVERFLOWS_TYPE(s32, s32, S32_MIN, false);
+#if BITS_PER_LONG == 64
+ TEST_OVERFLOWS_TYPE(s32, s64, S32_MAX, false);
+ TEST_OVERFLOWS_TYPE(s32, s64, S32_MIN, false);
+ TEST_OVERFLOWS_TYPE(u64, u8, U64_MAX, true);
+ TEST_OVERFLOWS_TYPE(u64, u8, U8_MAX, false);
+ TEST_OVERFLOWS_TYPE(u64, u8, (u64)U8_MAX + 1, true);
+ TEST_OVERFLOWS_TYPE(u64, u16, U64_MAX, true);
+ TEST_OVERFLOWS_TYPE(u64, u16, U16_MAX, false);
+ TEST_OVERFLOWS_TYPE(u64, u16, (u64)U16_MAX + 1, true);
+ TEST_OVERFLOWS_TYPE(u64, u32, U64_MAX, true);
+ TEST_OVERFLOWS_TYPE(u64, u32, U32_MAX, false);
+ TEST_OVERFLOWS_TYPE(u64, u32, (u64)U32_MAX + 1, true);
+ TEST_OVERFLOWS_TYPE(u64, u64, U64_MAX, false);
+ TEST_OVERFLOWS_TYPE(u64, s8, S8_MAX, false);
+ TEST_OVERFLOWS_TYPE(u64, s8, (u64)S8_MAX + 1, true);
+ TEST_OVERFLOWS_TYPE(u64, s8, U64_MAX, true);
+ TEST_OVERFLOWS_TYPE(u64, s16, S16_MAX, false);
+ TEST_OVERFLOWS_TYPE(u64, s16, (u64)S16_MAX + 1, true);
+ TEST_OVERFLOWS_TYPE(u64, s16, U64_MAX, true);
+ TEST_OVERFLOWS_TYPE(u64, s32, S32_MAX, false);
+ TEST_OVERFLOWS_TYPE(u64, s32, (u64)S32_MAX + 1, true);
+ TEST_OVERFLOWS_TYPE(u64, s32, U64_MAX, true);
+ TEST_OVERFLOWS_TYPE(u64, s64, S64_MAX, false);
+ TEST_OVERFLOWS_TYPE(u64, s64, U64_MAX, true);
+ TEST_OVERFLOWS_TYPE(u64, s64, (u64)S64_MAX + 1, true);
+ TEST_OVERFLOWS_TYPE(s64, u8, S64_MAX, true);
+ TEST_OVERFLOWS_TYPE(s64, u8, S64_MIN, true);
+ TEST_OVERFLOWS_TYPE(s64, u8, -1, true);
+ TEST_OVERFLOWS_TYPE(s64, u8, U8_MAX, false);
+ TEST_OVERFLOWS_TYPE(s64, u8, (s64)U8_MAX + 1, true);
+ TEST_OVERFLOWS_TYPE(s64, u16, S64_MAX, true);
+ TEST_OVERFLOWS_TYPE(s64, u16, S64_MIN, true);
+ TEST_OVERFLOWS_TYPE(s64, u16, -1, true);
+ TEST_OVERFLOWS_TYPE(s64, u16, U16_MAX, false);
+ TEST_OVERFLOWS_TYPE(s64, u16, (s64)U16_MAX + 1, true);
+ TEST_OVERFLOWS_TYPE(s64, u32, S64_MAX, true);
+ TEST_OVERFLOWS_TYPE(s64, u32, S64_MIN, true);
+ TEST_OVERFLOWS_TYPE(s64, u32, -1, true);
+ TEST_OVERFLOWS_TYPE(s64, u32, U32_MAX, false);
+ TEST_OVERFLOWS_TYPE(s64, u32, (s64)U32_MAX + 1, true);
+ TEST_OVERFLOWS_TYPE(s64, u64, S64_MAX, false);
+ TEST_OVERFLOWS_TYPE(s64, u64, S64_MIN, true);
+ TEST_OVERFLOWS_TYPE(s64, u64, -1, true);
+ TEST_OVERFLOWS_TYPE(s64, s8, S8_MAX, false);
+ TEST_OVERFLOWS_TYPE(s64, s8, S8_MIN, false);
+ TEST_OVERFLOWS_TYPE(s64, s8, (s64)S8_MAX + 1, true);
+ TEST_OVERFLOWS_TYPE(s64, s8, (s64)S8_MIN - 1, true);
+ TEST_OVERFLOWS_TYPE(s64, s8, S64_MAX, true);
+ TEST_OVERFLOWS_TYPE(s64, s16, S16_MAX, false);
+ TEST_OVERFLOWS_TYPE(s64, s16, S16_MIN, false);
+ TEST_OVERFLOWS_TYPE(s64, s16, (s64)S16_MAX + 1, true);
+ TEST_OVERFLOWS_TYPE(s64, s16, (s64)S16_MIN - 1, true);
+ TEST_OVERFLOWS_TYPE(s64, s16, S64_MAX, true);
+ TEST_OVERFLOWS_TYPE(s64, s32, S32_MAX, false);
+ TEST_OVERFLOWS_TYPE(s64, s32, S32_MIN, false);
+ TEST_OVERFLOWS_TYPE(s64, s32, (s64)S32_MAX + 1, true);
+ TEST_OVERFLOWS_TYPE(s64, s32, (s64)S32_MIN - 1, true);
+ TEST_OVERFLOWS_TYPE(s64, s32, S64_MAX, true);
+ TEST_OVERFLOWS_TYPE(s64, s64, S64_MAX, false);
+ TEST_OVERFLOWS_TYPE(s64, s64, S64_MIN, false);
+#endif
+
+ /* Check for macro side-effects. */
+ var = INT_MAX - 1;
+ __TEST_OVERFLOWS_TYPE(__overflows_type, var++, int, false);
+ __TEST_OVERFLOWS_TYPE(__overflows_type, var++, int, false);
+ __TEST_OVERFLOWS_TYPE(__overflows_type, var++, int, true);
+ var = INT_MAX - 1;
+ __TEST_OVERFLOWS_TYPE(overflows_type, var++, int, false);
+ __TEST_OVERFLOWS_TYPE(overflows_type, var++, int, false);
+ __TEST_OVERFLOWS_TYPE(overflows_type, var++, int, true);
+
+ kunit_info(test, "%d overflows_type() tests finished\n", count);
+#undef TEST_OVERFLOWS_TYPE
+#undef __TEST_OVERFLOWS_TYPE
+}
+
+static void same_type_test(struct kunit *test)
+{
+ int count = 0;
+ int var;
+
+#define TEST_SAME_TYPE(t1, t2, same) do { \
+ typeof(t1) __t1h = type_max(t1); \
+ typeof(t1) __t1l = type_min(t1); \
+ typeof(t2) __t2h = type_max(t2); \
+ typeof(t2) __t2l = type_min(t2); \
+ KUNIT_EXPECT_EQ(test, true, __same_type(t1, __t1h)); \
+ KUNIT_EXPECT_EQ(test, true, __same_type(t1, __t1l)); \
+ KUNIT_EXPECT_EQ(test, true, __same_type(__t1h, t1)); \
+ KUNIT_EXPECT_EQ(test, true, __same_type(__t1l, t1)); \
+ KUNIT_EXPECT_EQ(test, true, __same_type(t2, __t2h)); \
+ KUNIT_EXPECT_EQ(test, true, __same_type(t2, __t2l)); \
+ KUNIT_EXPECT_EQ(test, true, __same_type(__t2h, t2)); \
+ KUNIT_EXPECT_EQ(test, true, __same_type(__t2l, t2)); \
+ KUNIT_EXPECT_EQ(test, same, __same_type(t1, t2)); \
+ KUNIT_EXPECT_EQ(test, same, __same_type(t2, __t1h)); \
+ KUNIT_EXPECT_EQ(test, same, __same_type(t2, __t1l)); \
+ KUNIT_EXPECT_EQ(test, same, __same_type(__t1h, t2)); \
+ KUNIT_EXPECT_EQ(test, same, __same_type(__t1l, t2)); \
+ KUNIT_EXPECT_EQ(test, same, __same_type(t1, __t2h)); \
+ KUNIT_EXPECT_EQ(test, same, __same_type(t1, __t2l)); \
+ KUNIT_EXPECT_EQ(test, same, __same_type(__t2h, t1)); \
+ KUNIT_EXPECT_EQ(test, same, __same_type(__t2l, t1)); \
+} while (0)
+
+#if BITS_PER_LONG == 64
+# define TEST_SAME_TYPE64(base, t, m) TEST_SAME_TYPE(base, t, m)
+#else
+# define TEST_SAME_TYPE64(base, t, m) do { } while (0)
+#endif
+
+#define TEST_TYPE_SETS(base, mu8, mu16, mu32, ms8, ms16, ms32, mu64, ms64) \
+do { \
+ TEST_SAME_TYPE(base, u8, mu8); \
+ TEST_SAME_TYPE(base, u16, mu16); \
+ TEST_SAME_TYPE(base, u32, mu32); \
+ TEST_SAME_TYPE(base, s8, ms8); \
+ TEST_SAME_TYPE(base, s16, ms16); \
+ TEST_SAME_TYPE(base, s32, ms32); \
+ TEST_SAME_TYPE64(base, u64, mu64); \
+ TEST_SAME_TYPE64(base, s64, ms64); \
+} while (0)
+
+ TEST_TYPE_SETS(u8, true, false, false, false, false, false, false, false);
+ TEST_TYPE_SETS(u16, false, true, false, false, false, false, false, false);
+ TEST_TYPE_SETS(u32, false, false, true, false, false, false, false, false);
+ TEST_TYPE_SETS(s8, false, false, false, true, false, false, false, false);
+ TEST_TYPE_SETS(s16, false, false, false, false, true, false, false, false);
+ TEST_TYPE_SETS(s32, false, false, false, false, false, true, false, false);
+#if BITS_PER_LONG == 64
+ TEST_TYPE_SETS(u64, false, false, false, false, false, false, true, false);
+ TEST_TYPE_SETS(s64, false, false, false, false, false, false, false, true);
+#endif
+
+ /* Check for macro side-effects. */
+ var = 4;
+ KUNIT_EXPECT_EQ(test, var, 4);
+ KUNIT_EXPECT_TRUE(test, __same_type(var++, int));
+ KUNIT_EXPECT_EQ(test, var, 4);
+ KUNIT_EXPECT_TRUE(test, __same_type(int, var++));
+ KUNIT_EXPECT_EQ(test, var, 4);
+ KUNIT_EXPECT_TRUE(test, __same_type(var++, var++));
+ KUNIT_EXPECT_EQ(test, var, 4);
+
+ kunit_info(test, "%d __same_type() tests finished\n", count);
+
+#undef TEST_TYPE_SETS
+#undef TEST_SAME_TYPE64
+#undef TEST_SAME_TYPE
+}
+
+static void castable_to_type_test(struct kunit *test)
+{
+ int count = 0;
+
+#define TEST_CASTABLE_TO_TYPE(arg1, arg2, pass) do { \
+ bool __pass = castable_to_type(arg1, arg2); \
+ KUNIT_EXPECT_EQ_MSG(test, __pass, pass, \
+ "expected castable_to_type(" #arg1 ", " #arg2 ") to%s pass\n",\
+ pass ? "" : " not"); \
+ count++; \
+} while (0)
+
+ TEST_CASTABLE_TO_TYPE(16, u8, true);
+ TEST_CASTABLE_TO_TYPE(16, u16, true);
+ TEST_CASTABLE_TO_TYPE(16, u32, true);
+ TEST_CASTABLE_TO_TYPE(16, s8, true);
+ TEST_CASTABLE_TO_TYPE(16, s16, true);
+ TEST_CASTABLE_TO_TYPE(16, s32, true);
+ TEST_CASTABLE_TO_TYPE(-16, s8, true);
+ TEST_CASTABLE_TO_TYPE(-16, s16, true);
+ TEST_CASTABLE_TO_TYPE(-16, s32, true);
+#if BITS_PER_LONG == 64
+ TEST_CASTABLE_TO_TYPE(16, u64, true);
+ TEST_CASTABLE_TO_TYPE(-16, s64, true);
+#endif
+
+#define TEST_CASTABLE_TO_TYPE_VAR(width) do { \
+ u ## width u ## width ## var = 0; \
+ s ## width s ## width ## var = 0; \
+ \
+ /* Constant expressions that fit types. */ \
+ TEST_CASTABLE_TO_TYPE(type_max(u ## width), u ## width, true); \
+ TEST_CASTABLE_TO_TYPE(type_min(u ## width), u ## width, true); \
+ TEST_CASTABLE_TO_TYPE(type_max(u ## width), u ## width ## var, true); \
+ TEST_CASTABLE_TO_TYPE(type_min(u ## width), u ## width ## var, true); \
+ TEST_CASTABLE_TO_TYPE(type_max(s ## width), s ## width, true); \
+ TEST_CASTABLE_TO_TYPE(type_min(s ## width), s ## width, true); \
+ TEST_CASTABLE_TO_TYPE(type_max(s ## width), s ## width ## var, true); \
+ TEST_CASTABLE_TO_TYPE(type_min(u ## width), s ## width ## var, true); \
+ /* Constant expressions that do not fit types. */ \
+ TEST_CASTABLE_TO_TYPE(type_max(u ## width), s ## width, false); \
+ TEST_CASTABLE_TO_TYPE(type_max(u ## width), s ## width ## var, false); \
+ TEST_CASTABLE_TO_TYPE(type_min(s ## width), u ## width, false); \
+ TEST_CASTABLE_TO_TYPE(type_min(s ## width), u ## width ## var, false); \
+ /* Non-constant expression with mismatched type. */ \
+ TEST_CASTABLE_TO_TYPE(s ## width ## var, u ## width, false); \
+ TEST_CASTABLE_TO_TYPE(u ## width ## var, s ## width, false); \
+} while (0)
+
+#define TEST_CASTABLE_TO_TYPE_RANGE(width) do { \
+ unsigned long big = U ## width ## _MAX; \
+ signed long small = S ## width ## _MIN; \
+ u ## width u ## width ## var = 0; \
+ s ## width s ## width ## var = 0; \
+ \
+ /* Constant expression in range. */ \
+ TEST_CASTABLE_TO_TYPE(U ## width ## _MAX, u ## width, true); \
+ TEST_CASTABLE_TO_TYPE(U ## width ## _MAX, u ## width ## var, true); \
+ TEST_CASTABLE_TO_TYPE(S ## width ## _MIN, s ## width, true); \
+ TEST_CASTABLE_TO_TYPE(S ## width ## _MIN, s ## width ## var, true); \
+ /* Constant expression out of range. */ \
+ TEST_CASTABLE_TO_TYPE((unsigned long)U ## width ## _MAX + 1, u ## width, false); \
+ TEST_CASTABLE_TO_TYPE((unsigned long)U ## width ## _MAX + 1, u ## width ## var, false); \
+ TEST_CASTABLE_TO_TYPE((signed long)S ## width ## _MIN - 1, s ## width, false); \
+ TEST_CASTABLE_TO_TYPE((signed long)S ## width ## _MIN - 1, s ## width ## var, false); \
+ /* Non-constant expression with mismatched type. */ \
+ TEST_CASTABLE_TO_TYPE(big, u ## width, false); \
+ TEST_CASTABLE_TO_TYPE(big, u ## width ## var, false); \
+ TEST_CASTABLE_TO_TYPE(small, s ## width, false); \
+ TEST_CASTABLE_TO_TYPE(small, s ## width ## var, false); \
+} while (0)
+
+ TEST_CASTABLE_TO_TYPE_VAR(8);
+ TEST_CASTABLE_TO_TYPE_VAR(16);
+ TEST_CASTABLE_TO_TYPE_VAR(32);
+#if BITS_PER_LONG == 64
+ TEST_CASTABLE_TO_TYPE_VAR(64);
+#endif
+
+ TEST_CASTABLE_TO_TYPE_RANGE(8);
+ TEST_CASTABLE_TO_TYPE_RANGE(16);
+#if BITS_PER_LONG == 64
+ TEST_CASTABLE_TO_TYPE_RANGE(32);
+#endif
+ kunit_info(test, "%d castable_to_type() tests finished\n", count);
+
+#undef TEST_CASTABLE_TO_TYPE_RANGE
+#undef TEST_CASTABLE_TO_TYPE_VAR
+#undef TEST_CASTABLE_TO_TYPE
+}
+
static struct kunit_case overflow_test_cases[] = {
KUNIT_CASE(u8_u8__u8_overflow_test),
KUNIT_CASE(s8_s8__s8_overflow_test),
@@ -755,6 +1133,9 @@ static struct kunit_case overflow_test_cases[] = {
KUNIT_CASE(shift_nonsense_test),
KUNIT_CASE(overflow_allocation_test),
KUNIT_CASE(overflow_size_helpers_test),
+ KUNIT_CASE(overflows_type_test),
+ KUNIT_CASE(same_type_test),
+ KUNIT_CASE(castable_to_type_test),
{}
};
diff --git a/lib/packing.c b/lib/packing.c
index 9a72f4bbf0e2..a96169237ae6 100644
--- a/lib/packing.c
+++ b/lib/packing.c
@@ -7,6 +7,7 @@
#include <linux/bitops.h>
#include <linux/errno.h>
#include <linux/types.h>
+#include <linux/bitrev.h>
static int get_le_offset(int offset)
{
@@ -29,19 +30,6 @@ static int get_reverse_lsw32_offset(int offset, size_t len)
return word_index * 4 + offset;
}
-static u64 bit_reverse(u64 val, unsigned int width)
-{
- u64 new_val = 0;
- unsigned int bit;
- unsigned int i;
-
- for (i = 0; i < width; i++) {
- bit = (val & (1 << i)) != 0;
- new_val |= (bit << (width - i - 1));
- }
- return new_val;
-}
-
static void adjust_for_msb_right_quirk(u64 *to_write, int *box_start_bit,
int *box_end_bit, u8 *box_mask)
{
@@ -49,7 +37,7 @@ static void adjust_for_msb_right_quirk(u64 *to_write, int *box_start_bit,
int new_box_start_bit, new_box_end_bit;
*to_write >>= *box_end_bit;
- *to_write = bit_reverse(*to_write, box_bit_width);
+ *to_write = bitrev8(*to_write) >> (8 - box_bit_width);
*to_write <<= *box_end_bit;
new_box_end_bit = box_bit_width - *box_start_bit - 1;
diff --git a/lib/percpu-refcount.c b/lib/percpu-refcount.c
index e5c5315da274..668f6aa6a75d 100644
--- a/lib/percpu-refcount.c
+++ b/lib/percpu-refcount.c
@@ -230,7 +230,8 @@ static void __percpu_ref_switch_to_atomic(struct percpu_ref *ref,
percpu_ref_noop_confirm_switch;
percpu_ref_get(ref); /* put after confirmation */
- call_rcu(&ref->data->rcu, percpu_ref_switch_to_atomic_rcu);
+ call_rcu_hurry(&ref->data->rcu,
+ percpu_ref_switch_to_atomic_rcu);
}
static void __percpu_ref_switch_to_percpu(struct percpu_ref *ref)
diff --git a/lib/percpu_counter.c b/lib/percpu_counter.c
index ed610b75dc32..42f729c8e56c 100644
--- a/lib/percpu_counter.c
+++ b/lib/percpu_counter.c
@@ -117,11 +117,8 @@ void percpu_counter_sync(struct percpu_counter *fbc)
}
EXPORT_SYMBOL(percpu_counter_sync);
-/*
- * Add up all the per-cpu counts, return the result. This is a more accurate
- * but much slower version of percpu_counter_read_positive()
- */
-s64 __percpu_counter_sum(struct percpu_counter *fbc)
+static s64 __percpu_counter_sum_mask(struct percpu_counter *fbc,
+ const struct cpumask *cpu_mask)
{
s64 ret;
int cpu;
@@ -129,15 +126,35 @@ s64 __percpu_counter_sum(struct percpu_counter *fbc)
raw_spin_lock_irqsave(&fbc->lock, flags);
ret = fbc->count;
- for_each_online_cpu(cpu) {
+ for_each_cpu(cpu, cpu_mask) {
s32 *pcount = per_cpu_ptr(fbc->counters, cpu);
ret += *pcount;
}
raw_spin_unlock_irqrestore(&fbc->lock, flags);
return ret;
}
+
+/*
+ * Add up all the per-cpu counts, return the result. This is a more accurate
+ * but much slower version of percpu_counter_read_positive()
+ */
+s64 __percpu_counter_sum(struct percpu_counter *fbc)
+{
+ return __percpu_counter_sum_mask(fbc, cpu_online_mask);
+}
EXPORT_SYMBOL(__percpu_counter_sum);
+/*
+ * This is slower version of percpu_counter_sum as it traverses all possible
+ * cpus. Use this only in the cases where accurate data is needed in the
+ * presense of CPUs getting offlined.
+ */
+s64 percpu_counter_sum_all(struct percpu_counter *fbc)
+{
+ return __percpu_counter_sum_mask(fbc, cpu_possible_mask);
+}
+EXPORT_SYMBOL(percpu_counter_sum_all);
+
int __percpu_counter_init(struct percpu_counter *fbc, s64 amount, gfp_t gfp,
struct lock_class_key *key)
{
diff --git a/lib/radix-tree.c b/lib/radix-tree.c
index 3c78e1e8b2ad..049ba132f7ef 100644
--- a/lib/radix-tree.c
+++ b/lib/radix-tree.c
@@ -1029,7 +1029,7 @@ void *radix_tree_tag_clear(struct radix_tree_root *root,
{
struct radix_tree_node *node, *parent;
unsigned long maxindex;
- int offset;
+ int offset = 0;
radix_tree_load_root(root, &node, &maxindex);
if (index > maxindex)
diff --git a/lib/raid6/algos.c b/lib/raid6/algos.c
index 39b74221f4a7..a22a05c9af8a 100644
--- a/lib/raid6/algos.c
+++ b/lib/raid6/algos.c
@@ -18,12 +18,10 @@
#else
#include <linux/module.h>
#include <linux/gfp.h>
-#if !RAID6_USE_EMPTY_ZERO_PAGE
/* In .bss so it's zeroed */
const char raid6_empty_zero_page[PAGE_SIZE] __attribute__((aligned(256)));
EXPORT_SYMBOL(raid6_empty_zero_page);
#endif
-#endif
struct raid6_calls raid6_call;
EXPORT_SYMBOL_GPL(raid6_call);
diff --git a/lib/raid6/s390vx.uc b/lib/raid6/s390vx.uc
index 9e597e1f91a4..b25dfc9c7759 100644
--- a/lib/raid6/s390vx.uc
+++ b/lib/raid6/s390vx.uc
@@ -13,8 +13,7 @@
#include <linux/raid/pq.h>
#include <asm/fpu/api.h>
-
-asm(".include \"asm/vx-insn.h\"\n");
+#include <asm/vx-insn.h>
#define NSIZE 16
diff --git a/lib/reed_solomon/test_rslib.c b/lib/reed_solomon/test_rslib.c
index 848e7eb5da92..75cb1adac884 100644
--- a/lib/reed_solomon/test_rslib.c
+++ b/lib/reed_solomon/test_rslib.c
@@ -183,7 +183,7 @@ static int get_rcw_we(struct rs_control *rs, struct wspace *ws,
do {
/* Must not choose the same location twice */
- errloc = prandom_u32_max(len);
+ errloc = get_random_u32_below(len);
} while (errlocs[errloc] != 0);
errlocs[errloc] = 1;
@@ -194,12 +194,12 @@ static int get_rcw_we(struct rs_control *rs, struct wspace *ws,
for (i = 0; i < eras; i++) {
do {
/* Must not choose the same location twice */
- errloc = prandom_u32_max(len);
+ errloc = get_random_u32_below(len);
} while (errlocs[errloc] != 0);
derrlocs[i] = errloc;
- if (ewsc && prandom_u32_max(2)) {
+ if (ewsc && get_random_u32_below(2)) {
/* Erasure with the symbol intact */
errlocs[errloc] = 2;
} else {
diff --git a/lib/rhashtable.c b/lib/rhashtable.c
index e12bbfb240b8..6ae2ba8e06a2 100644
--- a/lib/rhashtable.c
+++ b/lib/rhashtable.c
@@ -231,6 +231,7 @@ static int rhashtable_rehash_one(struct rhashtable *ht,
struct rhash_head *head, *next, *entry;
struct rhash_head __rcu **pprev = NULL;
unsigned int new_hash;
+ unsigned long flags;
if (new_tbl->nest)
goto out;
@@ -253,13 +254,14 @@ static int rhashtable_rehash_one(struct rhashtable *ht,
new_hash = head_hashfn(ht, new_tbl, entry);
- rht_lock_nested(new_tbl, &new_tbl->buckets[new_hash], SINGLE_DEPTH_NESTING);
+ flags = rht_lock_nested(new_tbl, &new_tbl->buckets[new_hash],
+ SINGLE_DEPTH_NESTING);
head = rht_ptr(new_tbl->buckets + new_hash, new_tbl, new_hash);
RCU_INIT_POINTER(entry->next, head);
- rht_assign_unlock(new_tbl, &new_tbl->buckets[new_hash], entry);
+ rht_assign_unlock(new_tbl, &new_tbl->buckets[new_hash], entry, flags);
if (pprev)
rcu_assign_pointer(*pprev, next);
@@ -276,18 +278,19 @@ static int rhashtable_rehash_chain(struct rhashtable *ht,
{
struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht);
struct rhash_lock_head __rcu **bkt = rht_bucket_var(old_tbl, old_hash);
+ unsigned long flags;
int err;
if (!bkt)
return 0;
- rht_lock(old_tbl, bkt);
+ flags = rht_lock(old_tbl, bkt);
while (!(err = rhashtable_rehash_one(ht, bkt, old_hash)))
;
if (err == -ENOENT)
err = 0;
- rht_unlock(old_tbl, bkt);
+ rht_unlock(old_tbl, bkt, flags);
return err;
}
@@ -590,6 +593,7 @@ static void *rhashtable_try_insert(struct rhashtable *ht, const void *key,
struct bucket_table *new_tbl;
struct bucket_table *tbl;
struct rhash_lock_head __rcu **bkt;
+ unsigned long flags;
unsigned int hash;
void *data;
@@ -607,7 +611,7 @@ static void *rhashtable_try_insert(struct rhashtable *ht, const void *key,
new_tbl = rht_dereference_rcu(tbl->future_tbl, ht);
data = ERR_PTR(-EAGAIN);
} else {
- rht_lock(tbl, bkt);
+ flags = rht_lock(tbl, bkt);
data = rhashtable_lookup_one(ht, bkt, tbl,
hash, key, obj);
new_tbl = rhashtable_insert_one(ht, bkt, tbl,
@@ -615,7 +619,7 @@ static void *rhashtable_try_insert(struct rhashtable *ht, const void *key,
if (PTR_ERR(new_tbl) != -EEXIST)
data = ERR_CAST(new_tbl);
- rht_unlock(tbl, bkt);
+ rht_unlock(tbl, bkt, flags);
}
} while (!IS_ERR_OR_NULL(new_tbl));
diff --git a/lib/sbitmap.c b/lib/sbitmap.c
index 7280ae8ca88c..1fcede228fa2 100644
--- a/lib/sbitmap.c
+++ b/lib/sbitmap.c
@@ -21,7 +21,7 @@ static int init_alloc_hint(struct sbitmap *sb, gfp_t flags)
int i;
for_each_possible_cpu(i)
- *per_cpu_ptr(sb->alloc_hint, i) = prandom_u32_max(depth);
+ *per_cpu_ptr(sb->alloc_hint, i) = get_random_u32_below(depth);
}
return 0;
}
@@ -33,7 +33,7 @@ static inline unsigned update_alloc_hint_before_get(struct sbitmap *sb,
hint = this_cpu_read(*sb->alloc_hint);
if (unlikely(hint >= depth)) {
- hint = depth ? prandom_u32_max(depth) : 0;
+ hint = depth ? get_random_u32_below(depth) : 0;
this_cpu_write(*sb->alloc_hint, hint);
}
@@ -434,6 +434,8 @@ int sbitmap_queue_init_node(struct sbitmap_queue *sbq, unsigned int depth,
sbq->wake_batch = sbq_calc_wake_batch(sbq, depth);
atomic_set(&sbq->wake_index, 0);
atomic_set(&sbq->ws_active, 0);
+ atomic_set(&sbq->completion_cnt, 0);
+ atomic_set(&sbq->wakeup_cnt, 0);
sbq->ws = kzalloc_node(SBQ_WAIT_QUEUES * sizeof(*sbq->ws), flags, node);
if (!sbq->ws) {
@@ -441,40 +443,21 @@ int sbitmap_queue_init_node(struct sbitmap_queue *sbq, unsigned int depth,
return -ENOMEM;
}
- for (i = 0; i < SBQ_WAIT_QUEUES; i++) {
+ for (i = 0; i < SBQ_WAIT_QUEUES; i++)
init_waitqueue_head(&sbq->ws[i].wait);
- atomic_set(&sbq->ws[i].wait_cnt, sbq->wake_batch);
- }
return 0;
}
EXPORT_SYMBOL_GPL(sbitmap_queue_init_node);
-static inline void __sbitmap_queue_update_wake_batch(struct sbitmap_queue *sbq,
- unsigned int wake_batch)
-{
- int i;
-
- if (sbq->wake_batch != wake_batch) {
- WRITE_ONCE(sbq->wake_batch, wake_batch);
- /*
- * Pairs with the memory barrier in sbitmap_queue_wake_up()
- * to ensure that the batch size is updated before the wait
- * counts.
- */
- smp_mb();
- for (i = 0; i < SBQ_WAIT_QUEUES; i++)
- atomic_set(&sbq->ws[i].wait_cnt, 1);
- }
-}
-
static void sbitmap_queue_update_wake_batch(struct sbitmap_queue *sbq,
unsigned int depth)
{
unsigned int wake_batch;
wake_batch = sbq_calc_wake_batch(sbq, depth);
- __sbitmap_queue_update_wake_batch(sbq, wake_batch);
+ if (sbq->wake_batch != wake_batch)
+ WRITE_ONCE(sbq->wake_batch, wake_batch);
}
void sbitmap_queue_recalculate_wake_batch(struct sbitmap_queue *sbq,
@@ -488,7 +471,8 @@ void sbitmap_queue_recalculate_wake_batch(struct sbitmap_queue *sbq,
wake_batch = clamp_val(depth / SBQ_WAIT_QUEUES,
min_batch, SBQ_WAKE_BATCH);
- __sbitmap_queue_update_wake_batch(sbq, wake_batch);
+
+ WRITE_ONCE(sbq->wake_batch, wake_batch);
}
EXPORT_SYMBOL_GPL(sbitmap_queue_recalculate_wake_batch);
@@ -576,106 +560,56 @@ void sbitmap_queue_min_shallow_depth(struct sbitmap_queue *sbq,
}
EXPORT_SYMBOL_GPL(sbitmap_queue_min_shallow_depth);
-static struct sbq_wait_state *sbq_wake_ptr(struct sbitmap_queue *sbq)
+static void __sbitmap_queue_wake_up(struct sbitmap_queue *sbq, int nr)
{
int i, wake_index;
if (!atomic_read(&sbq->ws_active))
- return NULL;
+ return;
wake_index = atomic_read(&sbq->wake_index);
for (i = 0; i < SBQ_WAIT_QUEUES; i++) {
struct sbq_wait_state *ws = &sbq->ws[wake_index];
- if (waitqueue_active(&ws->wait) && atomic_read(&ws->wait_cnt)) {
- if (wake_index != atomic_read(&sbq->wake_index))
- atomic_set(&sbq->wake_index, wake_index);
- return ws;
- }
-
+ /*
+ * Advance the index before checking the current queue.
+ * It improves fairness, by ensuring the queue doesn't
+ * need to be fully emptied before trying to wake up
+ * from the next one.
+ */
wake_index = sbq_index_inc(wake_index);
+
+ /*
+ * It is sufficient to wake up at least one waiter to
+ * guarantee forward progress.
+ */
+ if (waitqueue_active(&ws->wait) &&
+ wake_up_nr(&ws->wait, nr))
+ break;
}
- return NULL;
+ if (wake_index != atomic_read(&sbq->wake_index))
+ atomic_set(&sbq->wake_index, wake_index);
}
-static bool __sbq_wake_up(struct sbitmap_queue *sbq, int *nr)
+void sbitmap_queue_wake_up(struct sbitmap_queue *sbq, int nr)
{
- struct sbq_wait_state *ws;
- unsigned int wake_batch;
- int wait_cnt, cur, sub;
- bool ret;
+ unsigned int wake_batch = READ_ONCE(sbq->wake_batch);
+ unsigned int wakeups;
- if (*nr <= 0)
- return false;
+ if (!atomic_read(&sbq->ws_active))
+ return;
- ws = sbq_wake_ptr(sbq);
- if (!ws)
- return false;
+ atomic_add(nr, &sbq->completion_cnt);
+ wakeups = atomic_read(&sbq->wakeup_cnt);
- cur = atomic_read(&ws->wait_cnt);
do {
- /*
- * For concurrent callers of this, callers should call this
- * function again to wakeup a new batch on a different 'ws'.
- */
- if (cur == 0)
- return true;
- sub = min(*nr, cur);
- wait_cnt = cur - sub;
- } while (!atomic_try_cmpxchg(&ws->wait_cnt, &cur, wait_cnt));
-
- /*
- * If we decremented queue without waiters, retry to avoid lost
- * wakeups.
- */
- if (wait_cnt > 0)
- return !waitqueue_active(&ws->wait);
-
- *nr -= sub;
-
- /*
- * When wait_cnt == 0, we have to be particularly careful as we are
- * responsible to reset wait_cnt regardless whether we've actually
- * woken up anybody. But in case we didn't wakeup anybody, we still
- * need to retry.
- */
- ret = !waitqueue_active(&ws->wait);
- wake_batch = READ_ONCE(sbq->wake_batch);
-
- /*
- * Wake up first in case that concurrent callers decrease wait_cnt
- * while waitqueue is empty.
- */
- wake_up_nr(&ws->wait, wake_batch);
+ if (atomic_read(&sbq->completion_cnt) - wakeups < wake_batch)
+ return;
+ } while (!atomic_try_cmpxchg(&sbq->wakeup_cnt,
+ &wakeups, wakeups + wake_batch));
- /*
- * Pairs with the memory barrier in sbitmap_queue_resize() to
- * ensure that we see the batch size update before the wait
- * count is reset.
- *
- * Also pairs with the implicit barrier between decrementing wait_cnt
- * and checking for waitqueue_active() to make sure waitqueue_active()
- * sees result of the wakeup if atomic_dec_return() has seen the result
- * of atomic_set().
- */
- smp_mb__before_atomic();
-
- /*
- * Increase wake_index before updating wait_cnt, otherwise concurrent
- * callers can see valid wait_cnt in old waitqueue, which can cause
- * invalid wakeup on the old waitqueue.
- */
- sbq_index_atomic_inc(&sbq->wake_index);
- atomic_set(&ws->wait_cnt, wake_batch);
-
- return ret || *nr;
-}
-
-void sbitmap_queue_wake_up(struct sbitmap_queue *sbq, int nr)
-{
- while (__sbq_wake_up(sbq, &nr))
- ;
+ __sbitmap_queue_wake_up(sbq, wake_batch);
}
EXPORT_SYMBOL_GPL(sbitmap_queue_wake_up);
@@ -792,9 +726,7 @@ void sbitmap_queue_show(struct sbitmap_queue *sbq, struct seq_file *m)
seq_puts(m, "ws={\n");
for (i = 0; i < SBQ_WAIT_QUEUES; i++) {
struct sbq_wait_state *ws = &sbq->ws[i];
-
- seq_printf(m, "\t{.wait_cnt=%d, .wait=%s},\n",
- atomic_read(&ws->wait_cnt),
+ seq_printf(m, "\t{.wait=%s},\n",
waitqueue_active(&ws->wait) ? "active" : "inactive");
}
seq_puts(m, "}\n");
diff --git a/lib/scatterlist.c b/lib/scatterlist.c
index c8c3d675845c..a0ad2a7959b5 100644
--- a/lib/scatterlist.c
+++ b/lib/scatterlist.c
@@ -410,6 +410,15 @@ static struct scatterlist *get_next_sg(struct sg_append_table *table,
return new_sg;
}
+static bool pages_are_mergeable(struct page *a, struct page *b)
+{
+ if (page_to_pfn(a) != page_to_pfn(b) + 1)
+ return false;
+ if (!zone_device_pages_have_same_pgmap(a, b))
+ return false;
+ return true;
+}
+
/**
* sg_alloc_append_table_from_pages - Allocate and initialize an append sg
* table from an array of pages
@@ -447,6 +456,7 @@ int sg_alloc_append_table_from_pages(struct sg_append_table *sgt_append,
unsigned int chunks, cur_page, seg_len, i, prv_len = 0;
unsigned int added_nents = 0;
struct scatterlist *s = sgt_append->prv;
+ struct page *last_pg;
/*
* The algorithm below requires max_segment to be aligned to PAGE_SIZE
@@ -460,21 +470,17 @@ int sg_alloc_append_table_from_pages(struct sg_append_table *sgt_append,
return -EOPNOTSUPP;
if (sgt_append->prv) {
- unsigned long paddr =
- (page_to_pfn(sg_page(sgt_append->prv)) * PAGE_SIZE +
- sgt_append->prv->offset + sgt_append->prv->length) /
- PAGE_SIZE;
-
if (WARN_ON(offset))
return -EINVAL;
/* Merge contiguous pages into the last SG */
prv_len = sgt_append->prv->length;
- while (n_pages && page_to_pfn(pages[0]) == paddr) {
+ last_pg = sg_page(sgt_append->prv);
+ while (n_pages && pages_are_mergeable(last_pg, pages[0])) {
if (sgt_append->prv->length + PAGE_SIZE > max_segment)
break;
sgt_append->prv->length += PAGE_SIZE;
- paddr++;
+ last_pg = pages[0];
pages++;
n_pages--;
}
@@ -488,7 +494,7 @@ int sg_alloc_append_table_from_pages(struct sg_append_table *sgt_append,
for (i = 1; i < n_pages; i++) {
seg_len += PAGE_SIZE;
if (seg_len >= max_segment ||
- page_to_pfn(pages[i]) != page_to_pfn(pages[i - 1]) + 1) {
+ !pages_are_mergeable(pages[i], pages[i - 1])) {
chunks++;
seg_len = 0;
}
@@ -504,8 +510,7 @@ int sg_alloc_append_table_from_pages(struct sg_append_table *sgt_append,
for (j = cur_page + 1; j < n_pages; j++) {
seg_len += PAGE_SIZE;
if (seg_len >= max_segment ||
- page_to_pfn(pages[j]) !=
- page_to_pfn(pages[j - 1]) + 1)
+ !pages_are_mergeable(pages[j], pages[j - 1]))
break;
}
diff --git a/lib/test_siphash.c b/lib/siphash_kunit.c
index a96788d0141d..a3c697e8be35 100644
--- a/lib/test_siphash.c
+++ b/lib/siphash_kunit.c
@@ -13,6 +13,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <kunit/test.h>
#include <linux/siphash.h>
#include <linux/kernel.h>
#include <linux/string.h>
@@ -109,114 +110,88 @@ static const u32 test_vectors_hsiphash[64] = {
};
#endif
-static int __init siphash_test_init(void)
+#define chk(hash, vector, fmt...) \
+ KUNIT_EXPECT_EQ_MSG(test, hash, vector, fmt)
+
+static void siphash_test(struct kunit *test)
{
u8 in[64] __aligned(SIPHASH_ALIGNMENT);
u8 in_unaligned[65] __aligned(SIPHASH_ALIGNMENT);
u8 i;
- int ret = 0;
for (i = 0; i < 64; ++i) {
in[i] = i;
in_unaligned[i + 1] = i;
- if (siphash(in, i, &test_key_siphash) !=
- test_vectors_siphash[i]) {
- pr_info("siphash self-test aligned %u: FAIL\n", i + 1);
- ret = -EINVAL;
- }
- if (siphash(in_unaligned + 1, i, &test_key_siphash) !=
- test_vectors_siphash[i]) {
- pr_info("siphash self-test unaligned %u: FAIL\n", i + 1);
- ret = -EINVAL;
- }
- if (hsiphash(in, i, &test_key_hsiphash) !=
- test_vectors_hsiphash[i]) {
- pr_info("hsiphash self-test aligned %u: FAIL\n", i + 1);
- ret = -EINVAL;
- }
- if (hsiphash(in_unaligned + 1, i, &test_key_hsiphash) !=
- test_vectors_hsiphash[i]) {
- pr_info("hsiphash self-test unaligned %u: FAIL\n", i + 1);
- ret = -EINVAL;
- }
- }
- if (siphash_1u64(0x0706050403020100ULL, &test_key_siphash) !=
- test_vectors_siphash[8]) {
- pr_info("siphash self-test 1u64: FAIL\n");
- ret = -EINVAL;
- }
- if (siphash_2u64(0x0706050403020100ULL, 0x0f0e0d0c0b0a0908ULL,
- &test_key_siphash) != test_vectors_siphash[16]) {
- pr_info("siphash self-test 2u64: FAIL\n");
- ret = -EINVAL;
+ chk(siphash(in, i, &test_key_siphash),
+ test_vectors_siphash[i],
+ "siphash self-test aligned %u: FAIL", i + 1);
+ chk(siphash(in_unaligned + 1, i, &test_key_siphash),
+ test_vectors_siphash[i],
+ "siphash self-test unaligned %u: FAIL", i + 1);
+ chk(hsiphash(in, i, &test_key_hsiphash),
+ test_vectors_hsiphash[i],
+ "hsiphash self-test aligned %u: FAIL", i + 1);
+ chk(hsiphash(in_unaligned + 1, i, &test_key_hsiphash),
+ test_vectors_hsiphash[i],
+ "hsiphash self-test unaligned %u: FAIL", i + 1);
}
- if (siphash_3u64(0x0706050403020100ULL, 0x0f0e0d0c0b0a0908ULL,
- 0x1716151413121110ULL, &test_key_siphash) !=
- test_vectors_siphash[24]) {
- pr_info("siphash self-test 3u64: FAIL\n");
- ret = -EINVAL;
- }
- if (siphash_4u64(0x0706050403020100ULL, 0x0f0e0d0c0b0a0908ULL,
+ chk(siphash_1u64(0x0706050403020100ULL, &test_key_siphash),
+ test_vectors_siphash[8],
+ "siphash self-test 1u64: FAIL");
+ chk(siphash_2u64(0x0706050403020100ULL, 0x0f0e0d0c0b0a0908ULL,
+ &test_key_siphash),
+ test_vectors_siphash[16],
+ "siphash self-test 2u64: FAIL");
+ chk(siphash_3u64(0x0706050403020100ULL, 0x0f0e0d0c0b0a0908ULL,
+ 0x1716151413121110ULL, &test_key_siphash),
+ test_vectors_siphash[24],
+ "siphash self-test 3u64: FAIL");
+ chk(siphash_4u64(0x0706050403020100ULL, 0x0f0e0d0c0b0a0908ULL,
0x1716151413121110ULL, 0x1f1e1d1c1b1a1918ULL,
- &test_key_siphash) != test_vectors_siphash[32]) {
- pr_info("siphash self-test 4u64: FAIL\n");
- ret = -EINVAL;
- }
- if (siphash_1u32(0x03020100U, &test_key_siphash) !=
- test_vectors_siphash[4]) {
- pr_info("siphash self-test 1u32: FAIL\n");
- ret = -EINVAL;
- }
- if (siphash_2u32(0x03020100U, 0x07060504U, &test_key_siphash) !=
- test_vectors_siphash[8]) {
- pr_info("siphash self-test 2u32: FAIL\n");
- ret = -EINVAL;
- }
- if (siphash_3u32(0x03020100U, 0x07060504U,
- 0x0b0a0908U, &test_key_siphash) !=
- test_vectors_siphash[12]) {
- pr_info("siphash self-test 3u32: FAIL\n");
- ret = -EINVAL;
- }
- if (siphash_4u32(0x03020100U, 0x07060504U,
- 0x0b0a0908U, 0x0f0e0d0cU, &test_key_siphash) !=
- test_vectors_siphash[16]) {
- pr_info("siphash self-test 4u32: FAIL\n");
- ret = -EINVAL;
- }
- if (hsiphash_1u32(0x03020100U, &test_key_hsiphash) !=
- test_vectors_hsiphash[4]) {
- pr_info("hsiphash self-test 1u32: FAIL\n");
- ret = -EINVAL;
- }
- if (hsiphash_2u32(0x03020100U, 0x07060504U, &test_key_hsiphash) !=
- test_vectors_hsiphash[8]) {
- pr_info("hsiphash self-test 2u32: FAIL\n");
- ret = -EINVAL;
- }
- if (hsiphash_3u32(0x03020100U, 0x07060504U,
- 0x0b0a0908U, &test_key_hsiphash) !=
- test_vectors_hsiphash[12]) {
- pr_info("hsiphash self-test 3u32: FAIL\n");
- ret = -EINVAL;
- }
- if (hsiphash_4u32(0x03020100U, 0x07060504U,
- 0x0b0a0908U, 0x0f0e0d0cU, &test_key_hsiphash) !=
- test_vectors_hsiphash[16]) {
- pr_info("hsiphash self-test 4u32: FAIL\n");
- ret = -EINVAL;
- }
- if (!ret)
- pr_info("self-tests: pass\n");
- return ret;
+ &test_key_siphash),
+ test_vectors_siphash[32],
+ "siphash self-test 4u64: FAIL");
+ chk(siphash_1u32(0x03020100U, &test_key_siphash),
+ test_vectors_siphash[4],
+ "siphash self-test 1u32: FAIL");
+ chk(siphash_2u32(0x03020100U, 0x07060504U, &test_key_siphash),
+ test_vectors_siphash[8],
+ "siphash self-test 2u32: FAIL");
+ chk(siphash_3u32(0x03020100U, 0x07060504U,
+ 0x0b0a0908U, &test_key_siphash),
+ test_vectors_siphash[12],
+ "siphash self-test 3u32: FAIL");
+ chk(siphash_4u32(0x03020100U, 0x07060504U,
+ 0x0b0a0908U, 0x0f0e0d0cU, &test_key_siphash),
+ test_vectors_siphash[16],
+ "siphash self-test 4u32: FAIL");
+ chk(hsiphash_1u32(0x03020100U, &test_key_hsiphash),
+ test_vectors_hsiphash[4],
+ "hsiphash self-test 1u32: FAIL");
+ chk(hsiphash_2u32(0x03020100U, 0x07060504U, &test_key_hsiphash),
+ test_vectors_hsiphash[8],
+ "hsiphash self-test 2u32: FAIL");
+ chk(hsiphash_3u32(0x03020100U, 0x07060504U,
+ 0x0b0a0908U, &test_key_hsiphash),
+ test_vectors_hsiphash[12],
+ "hsiphash self-test 3u32: FAIL");
+ chk(hsiphash_4u32(0x03020100U, 0x07060504U,
+ 0x0b0a0908U, 0x0f0e0d0cU, &test_key_hsiphash),
+ test_vectors_hsiphash[16],
+ "hsiphash self-test 4u32: FAIL");
}
-static void __exit siphash_test_exit(void)
-{
-}
+static struct kunit_case siphash_test_cases[] = {
+ KUNIT_CASE(siphash_test),
+ {}
+};
+
+static struct kunit_suite siphash_test_suite = {
+ .name = "siphash",
+ .test_cases = siphash_test_cases,
+};
-module_init(siphash_test_init);
-module_exit(siphash_test_exit);
+kunit_test_suite(siphash_test_suite);
MODULE_AUTHOR("Jason A. Donenfeld <Jason@zx2c4.com>");
MODULE_LICENSE("Dual BSD/GPL");
diff --git a/lib/slub_kunit.c b/lib/slub_kunit.c
index 7a0564d7cb7a..d4a3730b08fa 100644
--- a/lib/slub_kunit.c
+++ b/lib/slub_kunit.c
@@ -1,5 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
#include <kunit/test.h>
+#include <kunit/test-bug.h>
#include <linux/mm.h>
#include <linux/slab.h>
#include <linux/module.h>
@@ -9,10 +10,25 @@
static struct kunit_resource resource;
static int slab_errors;
+/*
+ * Wrapper function for kmem_cache_create(), which reduces 2 parameters:
+ * 'align' and 'ctor', and sets SLAB_SKIP_KFENCE flag to avoid getting an
+ * object from kfence pool, where the operation could be caught by both
+ * our test and kfence sanity check.
+ */
+static struct kmem_cache *test_kmem_cache_create(const char *name,
+ unsigned int size, slab_flags_t flags)
+{
+ struct kmem_cache *s = kmem_cache_create(name, size, 0,
+ (flags | SLAB_NO_USER_FLAGS), NULL);
+ s->flags |= SLAB_SKIP_KFENCE;
+ return s;
+}
+
static void test_clobber_zone(struct kunit *test)
{
- struct kmem_cache *s = kmem_cache_create("TestSlub_RZ_alloc", 64, 0,
- SLAB_RED_ZONE|SLAB_NO_USER_FLAGS, NULL);
+ struct kmem_cache *s = test_kmem_cache_create("TestSlub_RZ_alloc", 64,
+ SLAB_RED_ZONE);
u8 *p = kmem_cache_alloc(s, GFP_KERNEL);
kasan_disable_current();
@@ -29,8 +45,8 @@ static void test_clobber_zone(struct kunit *test)
#ifndef CONFIG_KASAN
static void test_next_pointer(struct kunit *test)
{
- struct kmem_cache *s = kmem_cache_create("TestSlub_next_ptr_free", 64, 0,
- SLAB_POISON|SLAB_NO_USER_FLAGS, NULL);
+ struct kmem_cache *s = test_kmem_cache_create("TestSlub_next_ptr_free",
+ 64, SLAB_POISON);
u8 *p = kmem_cache_alloc(s, GFP_KERNEL);
unsigned long tmp;
unsigned long *ptr_addr;
@@ -74,8 +90,8 @@ static void test_next_pointer(struct kunit *test)
static void test_first_word(struct kunit *test)
{
- struct kmem_cache *s = kmem_cache_create("TestSlub_1th_word_free", 64, 0,
- SLAB_POISON|SLAB_NO_USER_FLAGS, NULL);
+ struct kmem_cache *s = test_kmem_cache_create("TestSlub_1th_word_free",
+ 64, SLAB_POISON);
u8 *p = kmem_cache_alloc(s, GFP_KERNEL);
kmem_cache_free(s, p);
@@ -89,8 +105,8 @@ static void test_first_word(struct kunit *test)
static void test_clobber_50th_byte(struct kunit *test)
{
- struct kmem_cache *s = kmem_cache_create("TestSlub_50th_word_free", 64, 0,
- SLAB_POISON|SLAB_NO_USER_FLAGS, NULL);
+ struct kmem_cache *s = test_kmem_cache_create("TestSlub_50th_word_free",
+ 64, SLAB_POISON);
u8 *p = kmem_cache_alloc(s, GFP_KERNEL);
kmem_cache_free(s, p);
@@ -105,8 +121,8 @@ static void test_clobber_50th_byte(struct kunit *test)
static void test_clobber_redzone_free(struct kunit *test)
{
- struct kmem_cache *s = kmem_cache_create("TestSlub_RZ_free", 64, 0,
- SLAB_RED_ZONE|SLAB_NO_USER_FLAGS, NULL);
+ struct kmem_cache *s = test_kmem_cache_create("TestSlub_RZ_free", 64,
+ SLAB_RED_ZONE);
u8 *p = kmem_cache_alloc(s, GFP_KERNEL);
kasan_disable_current();
@@ -120,6 +136,27 @@ static void test_clobber_redzone_free(struct kunit *test)
kmem_cache_destroy(s);
}
+static void test_kmalloc_redzone_access(struct kunit *test)
+{
+ struct kmem_cache *s = test_kmem_cache_create("TestSlub_RZ_kmalloc", 32,
+ SLAB_KMALLOC|SLAB_STORE_USER|SLAB_RED_ZONE);
+ u8 *p = kmalloc_trace(s, GFP_KERNEL, 18);
+
+ kasan_disable_current();
+
+ /* Suppress the -Warray-bounds warning */
+ OPTIMIZER_HIDE_VAR(p);
+ p[18] = 0xab;
+ p[19] = 0xab;
+
+ validate_slab_cache(s);
+ KUNIT_EXPECT_EQ(test, 2, slab_errors);
+
+ kasan_enable_current();
+ kmem_cache_free(s, p);
+ kmem_cache_destroy(s);
+}
+
static int test_init(struct kunit *test)
{
slab_errors = 0;
@@ -139,6 +176,7 @@ static struct kunit_case test_cases[] = {
#endif
KUNIT_CASE(test_clobber_redzone_free),
+ KUNIT_CASE(test_kmalloc_redzone_access),
{}
};
diff --git a/lib/string.c b/lib/string.c
index 3371d26a0e39..4fb566ea610f 100644
--- a/lib/string.c
+++ b/lib/string.c
@@ -76,11 +76,6 @@ EXPORT_SYMBOL(strcasecmp);
#endif
#ifndef __HAVE_ARCH_STRCPY
-/**
- * strcpy - Copy a %NUL terminated string
- * @dest: Where to copy the string to
- * @src: Where to copy the string from
- */
char *strcpy(char *dest, const char *src)
{
char *tmp = dest;
@@ -93,19 +88,6 @@ EXPORT_SYMBOL(strcpy);
#endif
#ifndef __HAVE_ARCH_STRNCPY
-/**
- * strncpy - Copy a length-limited, C-string
- * @dest: Where to copy the string to
- * @src: Where to copy the string from
- * @count: The maximum number of bytes to copy
- *
- * The result is not %NUL-terminated if the source exceeds
- * @count bytes.
- *
- * In the case where the length of @src is less than that of
- * count, the remainder of @dest will be padded with %NUL.
- *
- */
char *strncpy(char *dest, const char *src, size_t count)
{
char *tmp = dest;
@@ -122,17 +104,6 @@ EXPORT_SYMBOL(strncpy);
#endif
#ifndef __HAVE_ARCH_STRLCPY
-/**
- * strlcpy - Copy a C-string into a sized buffer
- * @dest: Where to copy the string to
- * @src: Where to copy the string from
- * @size: size of destination buffer
- *
- * Compatible with ``*BSD``: the result is always a valid
- * NUL-terminated string that fits in the buffer (unless,
- * of course, the buffer size is zero). It does not pad
- * out the result like strncpy() does.
- */
size_t strlcpy(char *dest, const char *src, size_t size)
{
size_t ret = strlen(src);
@@ -148,30 +119,6 @@ EXPORT_SYMBOL(strlcpy);
#endif
#ifndef __HAVE_ARCH_STRSCPY
-/**
- * strscpy - Copy a C-string into a sized buffer
- * @dest: Where to copy the string to
- * @src: Where to copy the string from
- * @count: Size of destination buffer
- *
- * Copy the string, or as much of it as fits, into the dest buffer. The
- * behavior is undefined if the string buffers overlap. The destination
- * buffer is always NUL terminated, unless it's zero-sized.
- *
- * Preferred to strlcpy() since the API doesn't require reading memory
- * from the src string beyond the specified "count" bytes, and since
- * the return value is easier to error-check than strlcpy()'s.
- * In addition, the implementation is robust to the string changing out
- * from underneath it, unlike the current strlcpy() implementation.
- *
- * Preferred to strncpy() since it always returns a valid string, and
- * doesn't unnecessarily force the tail of the destination buffer to be
- * zeroed. If zeroing is desired please use strscpy_pad().
- *
- * Returns:
- * * The number of characters copied (not including the trailing %NUL)
- * * -E2BIG if count is 0 or @src was truncated.
- */
ssize_t strscpy(char *dest, const char *src, size_t count)
{
const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
@@ -266,11 +213,6 @@ char *stpcpy(char *__restrict__ dest, const char *__restrict__ src)
EXPORT_SYMBOL(stpcpy);
#ifndef __HAVE_ARCH_STRCAT
-/**
- * strcat - Append one %NUL-terminated string to another
- * @dest: The string to be appended to
- * @src: The string to append to it
- */
char *strcat(char *dest, const char *src)
{
char *tmp = dest;
@@ -285,15 +227,6 @@ EXPORT_SYMBOL(strcat);
#endif
#ifndef __HAVE_ARCH_STRNCAT
-/**
- * strncat - Append a length-limited, C-string to another
- * @dest: The string to be appended to
- * @src: The string to append to it
- * @count: The maximum numbers of bytes to copy
- *
- * Note that in contrast to strncpy(), strncat() ensures the result is
- * terminated.
- */
char *strncat(char *dest, const char *src, size_t count)
{
char *tmp = dest;
@@ -314,12 +247,6 @@ EXPORT_SYMBOL(strncat);
#endif
#ifndef __HAVE_ARCH_STRLCAT
-/**
- * strlcat - Append a length-limited, C-string to another
- * @dest: The string to be appended to
- * @src: The string to append to it
- * @count: The size of the destination buffer.
- */
size_t strlcat(char *dest, const char *src, size_t count)
{
size_t dsize = strlen(dest);
@@ -484,10 +411,6 @@ EXPORT_SYMBOL(strnchr);
#endif
#ifndef __HAVE_ARCH_STRLEN
-/**
- * strlen - Find the length of a string
- * @s: The string to be sized
- */
size_t strlen(const char *s)
{
const char *sc;
@@ -500,11 +423,6 @@ EXPORT_SYMBOL(strlen);
#endif
#ifndef __HAVE_ARCH_STRNLEN
-/**
- * strnlen - Find the length of a length-limited string
- * @s: The string to be sized
- * @count: The maximum number of bytes to search
- */
size_t strnlen(const char *s, size_t count)
{
const char *sc;
diff --git a/lib/strscpy_kunit.c b/lib/strscpy_kunit.c
new file mode 100644
index 000000000000..a6b6344354ed
--- /dev/null
+++ b/lib/strscpy_kunit.c
@@ -0,0 +1,142 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Kernel module for testing 'strscpy' family of functions.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <kunit/test.h>
+#include <linux/string.h>
+
+/*
+ * tc() - Run a specific test case.
+ * @src: Source string, argument to strscpy_pad()
+ * @count: Size of destination buffer, argument to strscpy_pad()
+ * @expected: Expected return value from call to strscpy_pad()
+ * @terminator: 1 if there should be a terminating null byte 0 otherwise.
+ * @chars: Number of characters from the src string expected to be
+ * written to the dst buffer.
+ * @pad: Number of pad characters expected (in the tail of dst buffer).
+ * (@pad does not include the null terminator byte.)
+ *
+ * Calls strscpy_pad() and verifies the return value and state of the
+ * destination buffer after the call returns.
+ */
+static void tc(struct kunit *test, char *src, int count, int expected,
+ int chars, int terminator, int pad)
+{
+ int nr_bytes_poison;
+ int max_expected;
+ int max_count;
+ int written;
+ char buf[6];
+ int index, i;
+ const char POISON = 'z';
+
+ KUNIT_ASSERT_TRUE_MSG(test, src != NULL,
+ "null source string not supported");
+
+ memset(buf, POISON, sizeof(buf));
+ /* Future proofing test suite, validate args */
+ max_count = sizeof(buf) - 2; /* Space for null and to verify overflow */
+ max_expected = count - 1; /* Space for the null */
+
+ KUNIT_ASSERT_LE_MSG(test, count, max_count,
+ "count (%d) is too big (%d) ... aborting", count, max_count);
+ KUNIT_EXPECT_LE_MSG(test, expected, max_expected,
+ "expected (%d) is bigger than can possibly be returned (%d)",
+ expected, max_expected);
+
+ written = strscpy_pad(buf, src, count);
+ KUNIT_ASSERT_EQ(test, written, expected);
+
+ if (count && written == -E2BIG) {
+ KUNIT_ASSERT_EQ_MSG(test, 0, strncmp(buf, src, count - 1),
+ "buffer state invalid for -E2BIG");
+ KUNIT_ASSERT_EQ_MSG(test, buf[count - 1], '\0',
+ "too big string is not null terminated correctly");
+ }
+
+ for (i = 0; i < chars; i++)
+ KUNIT_ASSERT_EQ_MSG(test, buf[i], src[i],
+ "buf[i]==%c != src[i]==%c", buf[i], src[i]);
+
+ if (terminator)
+ KUNIT_ASSERT_EQ_MSG(test, buf[count - 1], '\0',
+ "string is not null terminated correctly");
+
+ for (i = 0; i < pad; i++) {
+ index = chars + terminator + i;
+ KUNIT_ASSERT_EQ_MSG(test, buf[index], '\0',
+ "padding missing at index: %d", i);
+ }
+
+ nr_bytes_poison = sizeof(buf) - chars - terminator - pad;
+ for (i = 0; i < nr_bytes_poison; i++) {
+ index = sizeof(buf) - 1 - i; /* Check from the end back */
+ KUNIT_ASSERT_EQ_MSG(test, buf[index], POISON,
+ "poison value missing at index: %d", i);
+ }
+}
+
+static void strscpy_test(struct kunit *test)
+{
+ char dest[8];
+
+ /*
+ * tc() uses a destination buffer of size 6 and needs at
+ * least 2 characters spare (one for null and one to check for
+ * overflow). This means we should only call tc() with
+ * strings up to a maximum of 4 characters long and 'count'
+ * should not exceed 4. To test with longer strings increase
+ * the buffer size in tc().
+ */
+
+ /* tc(test, src, count, expected, chars, terminator, pad) */
+ tc(test, "a", 0, -E2BIG, 0, 0, 0);
+ tc(test, "", 0, -E2BIG, 0, 0, 0);
+
+ tc(test, "a", 1, -E2BIG, 0, 1, 0);
+ tc(test, "", 1, 0, 0, 1, 0);
+
+ tc(test, "ab", 2, -E2BIG, 1, 1, 0);
+ tc(test, "a", 2, 1, 1, 1, 0);
+ tc(test, "", 2, 0, 0, 1, 1);
+
+ tc(test, "abc", 3, -E2BIG, 2, 1, 0);
+ tc(test, "ab", 3, 2, 2, 1, 0);
+ tc(test, "a", 3, 1, 1, 1, 1);
+ tc(test, "", 3, 0, 0, 1, 2);
+
+ tc(test, "abcd", 4, -E2BIG, 3, 1, 0);
+ tc(test, "abc", 4, 3, 3, 1, 0);
+ tc(test, "ab", 4, 2, 2, 1, 1);
+ tc(test, "a", 4, 1, 1, 1, 2);
+ tc(test, "", 4, 0, 0, 1, 3);
+
+ /* Compile-time-known source strings. */
+ KUNIT_EXPECT_EQ(test, strscpy(dest, "", ARRAY_SIZE(dest)), 0);
+ KUNIT_EXPECT_EQ(test, strscpy(dest, "", 3), 0);
+ KUNIT_EXPECT_EQ(test, strscpy(dest, "", 1), 0);
+ KUNIT_EXPECT_EQ(test, strscpy(dest, "", 0), -E2BIG);
+ KUNIT_EXPECT_EQ(test, strscpy(dest, "Fixed", ARRAY_SIZE(dest)), 5);
+ KUNIT_EXPECT_EQ(test, strscpy(dest, "Fixed", 3), -E2BIG);
+ KUNIT_EXPECT_EQ(test, strscpy(dest, "Fixed", 1), -E2BIG);
+ KUNIT_EXPECT_EQ(test, strscpy(dest, "Fixed", 0), -E2BIG);
+ KUNIT_EXPECT_EQ(test, strscpy(dest, "This is too long", ARRAY_SIZE(dest)), -E2BIG);
+}
+
+static struct kunit_case strscpy_test_cases[] = {
+ KUNIT_CASE(strscpy_test),
+ {}
+};
+
+static struct kunit_suite strscpy_test_suite = {
+ .name = "strscpy",
+ .test_cases = strscpy_test_cases,
+};
+
+kunit_test_suite(strscpy_test_suite);
+
+MODULE_AUTHOR("Tobin C. Harding <tobin@kernel.org>");
+MODULE_LICENSE("GPL");
diff --git a/lib/test-string_helpers.c b/lib/test-string_helpers.c
index 86fadd3ba08c..41d3447bc3b4 100644
--- a/lib/test-string_helpers.c
+++ b/lib/test-string_helpers.c
@@ -587,7 +587,7 @@ static int __init test_string_helpers_init(void)
for (i = 0; i < UNESCAPE_ALL_MASK + 1; i++)
test_string_unescape("unescape", i, false);
test_string_unescape("unescape inplace",
- prandom_u32_max(UNESCAPE_ANY + 1), true);
+ get_random_u32_below(UNESCAPE_ANY + 1), true);
/* Without dictionary */
for (i = 0; i < ESCAPE_ALL_MASK + 1; i++)
diff --git a/lib/test_bpf.c b/lib/test_bpf.c
index 5820704165a6..ade9ac672adb 100644
--- a/lib/test_bpf.c
+++ b/lib/test_bpf.c
@@ -14346,7 +14346,6 @@ static struct sk_buff *populate_skb(char *buf, int size)
skb->hash = SKB_HASH;
skb->queue_mapping = SKB_QUEUE_MAP;
skb->vlan_tci = SKB_VLAN_TCI;
- skb->vlan_present = SKB_VLAN_PRESENT;
skb->vlan_proto = htons(ETH_P_IP);
dev_net_set(&dev, &init_net);
skb->dev = &dev;
diff --git a/lib/test_fprobe.c b/lib/test_fprobe.c
index e0381b3ec410..1fb56cf5e5ce 100644
--- a/lib/test_fprobe.c
+++ b/lib/test_fprobe.c
@@ -144,10 +144,7 @@ static unsigned long get_ftrace_location(void *func)
static int fprobe_test_init(struct kunit *test)
{
- do {
- rand1 = get_random_u32();
- } while (rand1 <= div_factor);
-
+ rand1 = get_random_u32_above(div_factor);
target = fprobe_selftest_target;
target2 = fprobe_selftest_target2;
target_ip = get_ftrace_location(target);
diff --git a/lib/test_hexdump.c b/lib/test_hexdump.c
index 0927f44cd478..b916801f23a8 100644
--- a/lib/test_hexdump.c
+++ b/lib/test_hexdump.c
@@ -149,7 +149,7 @@ static void __init test_hexdump(size_t len, int rowsize, int groupsize,
static void __init test_hexdump_set(int rowsize, bool ascii)
{
size_t d = min_t(size_t, sizeof(data_b), rowsize);
- size_t len = prandom_u32_max(d) + 1;
+ size_t len = get_random_u32_inclusive(1, d);
test_hexdump(len, rowsize, 4, ascii);
test_hexdump(len, rowsize, 2, ascii);
@@ -208,11 +208,11 @@ static void __init test_hexdump_overflow(size_t buflen, size_t len,
static void __init test_hexdump_overflow_set(size_t buflen, bool ascii)
{
unsigned int i = 0;
- int rs = (prandom_u32_max(2) + 1) * 16;
+ int rs = get_random_u32_inclusive(1, 2) * 16;
do {
int gs = 1 << i;
- size_t len = prandom_u32_max(rs) + gs;
+ size_t len = get_random_u32_below(rs) + gs;
test_hexdump_overflow(buflen, rounddown(len, gs), rs, gs, ascii);
} while (i++ < 3);
@@ -223,11 +223,11 @@ static int __init test_hexdump_init(void)
unsigned int i;
int rowsize;
- rowsize = (prandom_u32_max(2) + 1) * 16;
+ rowsize = get_random_u32_inclusive(1, 2) * 16;
for (i = 0; i < 16; i++)
test_hexdump_set(rowsize, false);
- rowsize = (prandom_u32_max(2) + 1) * 16;
+ rowsize = get_random_u32_inclusive(1, 2) * 16;
for (i = 0; i < 16; i++)
test_hexdump_set(rowsize, true);
diff --git a/lib/test_kprobes.c b/lib/test_kprobes.c
index eeb1d728d974..1c95e5719802 100644
--- a/lib/test_kprobes.c
+++ b/lib/test_kprobes.c
@@ -339,10 +339,7 @@ static int kprobes_test_init(struct kunit *test)
stacktrace_target = kprobe_stacktrace_target;
internal_target = kprobe_stacktrace_internal_target;
stacktrace_driver = kprobe_stacktrace_driver;
-
- do {
- rand1 = get_random_u32();
- } while (rand1 <= div_factor);
+ rand1 = get_random_u32_above(div_factor);
return 0;
}
diff --git a/lib/test_linear_ranges.c b/lib/test_linear_ranges.c
index 676e0b8abcdd..c18f9c0f1f25 100644
--- a/lib/test_linear_ranges.c
+++ b/lib/test_linear_ranges.c
@@ -107,17 +107,8 @@ static const unsigned int range2_vals[] = { RANGE2_MIN, RANGE2_MIN +
#define SMALLEST_VAL RANGE1_MIN
static struct linear_range testr[] = {
- {
- .min = RANGE1_MIN,
- .min_sel = RANGE1_MIN_SEL,
- .max_sel = RANGE1_MAX_SEL,
- .step = RANGE1_STEP,
- }, {
- .min = RANGE2_MIN,
- .min_sel = RANGE2_MIN_SEL,
- .max_sel = RANGE2_MAX_SEL,
- .step = RANGE2_STEP
- },
+ LINEAR_RANGE(RANGE1_MIN, RANGE1_MIN_SEL, RANGE1_MAX_SEL, RANGE1_STEP),
+ LINEAR_RANGE(RANGE2_MIN, RANGE2_MIN_SEL, RANGE2_MAX_SEL, RANGE2_STEP),
};
static void range_test_get_value(struct kunit *test)
diff --git a/lib/test_list_sort.c b/lib/test_list_sort.c
index 19ff229b9c3a..cc5f335f29b5 100644
--- a/lib/test_list_sort.c
+++ b/lib/test_list_sort.c
@@ -71,7 +71,7 @@ static void list_sort_test(struct kunit *test)
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, el);
/* force some equivalencies */
- el->value = prandom_u32_max(TEST_LIST_LEN / 3);
+ el->value = get_random_u32_below(TEST_LIST_LEN / 3);
el->serial = i;
el->poison1 = TEST_POISON1;
el->poison2 = TEST_POISON2;
diff --git a/lib/test_printf.c b/lib/test_printf.c
index 4bd15a593fbd..d34dc636b81c 100644
--- a/lib/test_printf.c
+++ b/lib/test_printf.c
@@ -126,7 +126,7 @@ __test(const char *expect, int elen, const char *fmt, ...)
* be able to print it as expected.
*/
failed_tests += do_test(BUF_SIZE, expect, elen, fmt, ap);
- rand = 1 + prandom_u32_max(elen+1);
+ rand = get_random_u32_inclusive(1, elen + 1);
/* Since elen < BUF_SIZE, we have 1 <= rand <= BUF_SIZE. */
failed_tests += do_test(rand, expect, elen, fmt, ap);
failed_tests += do_test(0, expect, elen, fmt, ap);
@@ -179,18 +179,6 @@ test_number(void)
* behaviour.
*/
test("00|0|0|0|0", "%.2d|%.1d|%.0d|%.*d|%1.0d", 0, 0, 0, 0, 0, 0);
-#ifndef __CHAR_UNSIGNED__
- {
- /*
- * Passing a 'char' to a %02x specifier doesn't do
- * what was presumably the intention when char is
- * signed and the value is negative. One must either &
- * with 0xff or cast to u8.
- */
- char val = -16;
- test("0xfffffff0|0xf0|0xf0", "%#02x|%#02x|%#02x", val, val & 0xff, (u8)val);
- }
-#endif
}
static void __init
@@ -704,31 +692,29 @@ flags(void)
static void __init fwnode_pointer(void)
{
- const struct software_node softnodes[] = {
- { .name = "first", },
- { .name = "second", .parent = &softnodes[0], },
- { .name = "third", .parent = &softnodes[1], },
- { NULL /* Guardian */ }
- };
- const char * const full_name = "first/second/third";
+ const struct software_node first = { .name = "first" };
+ const struct software_node second = { .name = "second", .parent = &first };
+ const struct software_node third = { .name = "third", .parent = &second };
+ const struct software_node *group[] = { &first, &second, &third, NULL };
const char * const full_name_second = "first/second";
+ const char * const full_name_third = "first/second/third";
const char * const second_name = "second";
const char * const third_name = "third";
int rval;
- rval = software_node_register_nodes(softnodes);
+ rval = software_node_register_node_group(group);
if (rval) {
pr_warn("cannot register softnodes; rval %d\n", rval);
return;
}
- test(full_name_second, "%pfw", software_node_fwnode(&softnodes[1]));
- test(full_name, "%pfw", software_node_fwnode(&softnodes[2]));
- test(full_name, "%pfwf", software_node_fwnode(&softnodes[2]));
- test(second_name, "%pfwP", software_node_fwnode(&softnodes[1]));
- test(third_name, "%pfwP", software_node_fwnode(&softnodes[2]));
+ test(full_name_second, "%pfw", software_node_fwnode(&second));
+ test(full_name_third, "%pfw", software_node_fwnode(&third));
+ test(full_name_third, "%pfwf", software_node_fwnode(&third));
+ test(second_name, "%pfwP", software_node_fwnode(&second));
+ test(third_name, "%pfwP", software_node_fwnode(&third));
- software_node_unregister_nodes(softnodes);
+ software_node_unregister_node_group(group);
}
static void __init fourcc_pointer(void)
diff --git a/lib/test_rhashtable.c b/lib/test_rhashtable.c
index f2ba5787055a..c20f6cb4bf55 100644
--- a/lib/test_rhashtable.c
+++ b/lib/test_rhashtable.c
@@ -368,8 +368,8 @@ static int __init test_rhltable(unsigned int entries)
pr_info("test %d random rhlist add/delete operations\n", entries);
for (j = 0; j < entries; j++) {
- u32 i = prandom_u32_max(entries);
- u32 prand = prandom_u32_max(4);
+ u32 i = get_random_u32_below(entries);
+ u32 prand = get_random_u32_below(4);
cond_resched();
@@ -396,7 +396,7 @@ static int __init test_rhltable(unsigned int entries)
}
if (prand & 2) {
- i = prandom_u32_max(entries);
+ i = get_random_u32_below(entries);
if (test_bit(i, obj_in_table)) {
err = rhltable_remove(&rhlt, &rhl_test_objects[i].list_node, test_rht_params);
WARN(err, "cannot remove element at slot %d", i);
@@ -434,7 +434,7 @@ out_free:
static int __init test_rhashtable_max(struct test_obj *array,
unsigned int entries)
{
- unsigned int i, insert_retries = 0;
+ unsigned int i;
int err;
test_rht_params.max_size = roundup_pow_of_two(entries / 8);
@@ -447,9 +447,7 @@ static int __init test_rhashtable_max(struct test_obj *array,
obj->value.id = i * 2;
err = insert_retry(&ht, obj, test_rht_params);
- if (err > 0)
- insert_retries += err;
- else if (err)
+ if (err < 0)
return err;
}
diff --git a/lib/test_strscpy.c b/lib/test_strscpy.c
deleted file mode 100644
index a827f94601f5..000000000000
--- a/lib/test_strscpy.c
+++ /dev/null
@@ -1,150 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0+
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/string.h>
-
-#include "../tools/testing/selftests/kselftest_module.h"
-
-/*
- * Kernel module for testing 'strscpy' family of functions.
- */
-
-KSTM_MODULE_GLOBALS();
-
-/*
- * tc() - Run a specific test case.
- * @src: Source string, argument to strscpy_pad()
- * @count: Size of destination buffer, argument to strscpy_pad()
- * @expected: Expected return value from call to strscpy_pad()
- * @terminator: 1 if there should be a terminating null byte 0 otherwise.
- * @chars: Number of characters from the src string expected to be
- * written to the dst buffer.
- * @pad: Number of pad characters expected (in the tail of dst buffer).
- * (@pad does not include the null terminator byte.)
- *
- * Calls strscpy_pad() and verifies the return value and state of the
- * destination buffer after the call returns.
- */
-static int __init tc(char *src, int count, int expected,
- int chars, int terminator, int pad)
-{
- int nr_bytes_poison;
- int max_expected;
- int max_count;
- int written;
- char buf[6];
- int index, i;
- const char POISON = 'z';
-
- total_tests++;
-
- if (!src) {
- pr_err("null source string not supported\n");
- return -1;
- }
-
- memset(buf, POISON, sizeof(buf));
- /* Future proofing test suite, validate args */
- max_count = sizeof(buf) - 2; /* Space for null and to verify overflow */
- max_expected = count - 1; /* Space for the null */
- if (count > max_count) {
- pr_err("count (%d) is too big (%d) ... aborting", count, max_count);
- return -1;
- }
- if (expected > max_expected) {
- pr_warn("expected (%d) is bigger than can possibly be returned (%d)",
- expected, max_expected);
- }
-
- written = strscpy_pad(buf, src, count);
- if ((written) != (expected)) {
- pr_err("%d != %d (written, expected)\n", written, expected);
- goto fail;
- }
-
- if (count && written == -E2BIG) {
- if (strncmp(buf, src, count - 1) != 0) {
- pr_err("buffer state invalid for -E2BIG\n");
- goto fail;
- }
- if (buf[count - 1] != '\0') {
- pr_err("too big string is not null terminated correctly\n");
- goto fail;
- }
- }
-
- for (i = 0; i < chars; i++) {
- if (buf[i] != src[i]) {
- pr_err("buf[i]==%c != src[i]==%c\n", buf[i], src[i]);
- goto fail;
- }
- }
-
- if (terminator) {
- if (buf[count - 1] != '\0') {
- pr_err("string is not null terminated correctly\n");
- goto fail;
- }
- }
-
- for (i = 0; i < pad; i++) {
- index = chars + terminator + i;
- if (buf[index] != '\0') {
- pr_err("padding missing at index: %d\n", i);
- goto fail;
- }
- }
-
- nr_bytes_poison = sizeof(buf) - chars - terminator - pad;
- for (i = 0; i < nr_bytes_poison; i++) {
- index = sizeof(buf) - 1 - i; /* Check from the end back */
- if (buf[index] != POISON) {
- pr_err("poison value missing at index: %d\n", i);
- goto fail;
- }
- }
-
- return 0;
-fail:
- failed_tests++;
- return -1;
-}
-
-static void __init selftest(void)
-{
- /*
- * tc() uses a destination buffer of size 6 and needs at
- * least 2 characters spare (one for null and one to check for
- * overflow). This means we should only call tc() with
- * strings up to a maximum of 4 characters long and 'count'
- * should not exceed 4. To test with longer strings increase
- * the buffer size in tc().
- */
-
- /* tc(src, count, expected, chars, terminator, pad) */
- KSTM_CHECK_ZERO(tc("a", 0, -E2BIG, 0, 0, 0));
- KSTM_CHECK_ZERO(tc("", 0, -E2BIG, 0, 0, 0));
-
- KSTM_CHECK_ZERO(tc("a", 1, -E2BIG, 0, 1, 0));
- KSTM_CHECK_ZERO(tc("", 1, 0, 0, 1, 0));
-
- KSTM_CHECK_ZERO(tc("ab", 2, -E2BIG, 1, 1, 0));
- KSTM_CHECK_ZERO(tc("a", 2, 1, 1, 1, 0));
- KSTM_CHECK_ZERO(tc("", 2, 0, 0, 1, 1));
-
- KSTM_CHECK_ZERO(tc("abc", 3, -E2BIG, 2, 1, 0));
- KSTM_CHECK_ZERO(tc("ab", 3, 2, 2, 1, 0));
- KSTM_CHECK_ZERO(tc("a", 3, 1, 1, 1, 1));
- KSTM_CHECK_ZERO(tc("", 3, 0, 0, 1, 2));
-
- KSTM_CHECK_ZERO(tc("abcd", 4, -E2BIG, 3, 1, 0));
- KSTM_CHECK_ZERO(tc("abc", 4, 3, 3, 1, 0));
- KSTM_CHECK_ZERO(tc("ab", 4, 2, 2, 1, 1));
- KSTM_CHECK_ZERO(tc("a", 4, 1, 1, 1, 2));
- KSTM_CHECK_ZERO(tc("", 4, 0, 0, 1, 3));
-}
-
-KSTM_MODULE_LOADERS(test_strscpy);
-MODULE_AUTHOR("Tobin C. Harding <tobin@kernel.org>");
-MODULE_LICENSE("GPL");
diff --git a/lib/test_vmalloc.c b/lib/test_vmalloc.c
index cf7780572f5b..f90d2c27675b 100644
--- a/lib/test_vmalloc.c
+++ b/lib/test_vmalloc.c
@@ -151,7 +151,7 @@ static int random_size_alloc_test(void)
int i;
for (i = 0; i < test_loop_count; i++) {
- n = prandom_u32_max(100) + 1;
+ n = get_random_u32_inclusive(1, 100);
p = vmalloc(n * PAGE_SIZE);
if (!p)
@@ -291,12 +291,12 @@ pcpu_alloc_test(void)
return -1;
for (i = 0; i < 35000; i++) {
- size = prandom_u32_max(PAGE_SIZE / 4) + 1;
+ size = get_random_u32_inclusive(1, PAGE_SIZE / 4);
/*
* Maximum PAGE_SIZE
*/
- align = 1 << (prandom_u32_max(11) + 1);
+ align = 1 << get_random_u32_inclusive(1, 11);
pcpu[i] = __alloc_percpu(size, align);
if (!pcpu[i])
@@ -391,7 +391,7 @@ static void shuffle_array(int *arr, int n)
for (i = n - 1; i > 0; i--) {
/* Cut the range. */
- j = prandom_u32_max(i);
+ j = get_random_u32_below(i);
/* Swap indexes. */
swap(arr[i], arr[j]);
diff --git a/lib/ubsan.c b/lib/ubsan.c
index 36bd75e33426..60c7099857a0 100644
--- a/lib/ubsan.c
+++ b/lib/ubsan.c
@@ -154,8 +154,7 @@ static void ubsan_epilogue(void)
current->in_ubsan--;
- if (panic_on_warn)
- panic("panic_on_warn set ...\n");
+ check_panic_on_warn("UBSAN");
}
void __ubsan_handle_divrem_overflow(void *_data, void *lhs, void *rhs)
diff --git a/lib/vdso/Makefile b/lib/vdso/Makefile
index c415a685d61b..e814061d6aa0 100644
--- a/lib/vdso/Makefile
+++ b/lib/vdso/Makefile
@@ -17,6 +17,6 @@ $(error ARCH_REL_TYPE_ABS is not set)
endif
quiet_cmd_vdso_check = VDSOCHK $@
- cmd_vdso_check = if $(OBJDUMP) -R $@ | egrep -h "$(ARCH_REL_TYPE_ABS)"; \
+ cmd_vdso_check = if $(OBJDUMP) -R $@ | grep -E -h "$(ARCH_REL_TYPE_ABS)"; \
then (echo >&2 "$@: dynamic relocations are not supported"; \
rm -f $@; /bin/false); fi
diff --git a/lib/vsprintf.c b/lib/vsprintf.c
index 24f37bab8bc1..be71a03c936a 100644
--- a/lib/vsprintf.c
+++ b/lib/vsprintf.c
@@ -41,6 +41,7 @@
#include <linux/siphash.h>
#include <linux/compiler.h>
#include <linux/property.h>
+#include <linux/notifier.h>
#ifdef CONFIG_BLOCK
#include <linux/blkdev.h>
#endif
@@ -752,26 +753,21 @@ early_param("debug_boot_weak_hash", debug_boot_weak_hash_enable);
static bool filled_random_ptr_key __read_mostly;
static siphash_key_t ptr_key __read_mostly;
-static void fill_ptr_key_workfn(struct work_struct *work);
-static DECLARE_DELAYED_WORK(fill_ptr_key_work, fill_ptr_key_workfn);
-static void fill_ptr_key_workfn(struct work_struct *work)
+static int fill_ptr_key(struct notifier_block *nb, unsigned long action, void *data)
{
- if (!rng_is_initialized()) {
- queue_delayed_work(system_unbound_wq, &fill_ptr_key_work, HZ * 2);
- return;
- }
-
get_random_bytes(&ptr_key, sizeof(ptr_key));
/* Pairs with smp_rmb() before reading ptr_key. */
smp_wmb();
WRITE_ONCE(filled_random_ptr_key, true);
+ return NOTIFY_DONE;
}
static int __init vsprintf_init_hashval(void)
{
- fill_ptr_key_workfn(NULL);
+ static struct notifier_block fill_ptr_key_nb = { .notifier_call = fill_ptr_key };
+ execute_with_initialized_rng(&fill_ptr_key_nb);
return 0;
}
subsys_initcall(vsprintf_init_hashval)
@@ -866,7 +862,7 @@ char *restricted_pointer(char *buf, char *end, const void *ptr,
* kptr_restrict==1 cannot be used in IRQ context
* because its test for CAP_SYSLOG would be meaningless.
*/
- if (in_irq() || in_serving_softirq() || in_nmi()) {
+ if (in_hardirq() || in_serving_softirq() || in_nmi()) {
if (spec.field_width == -1)
spec.field_width = 2 * sizeof(ptr);
return error_string(buf, end, "pK-error", spec);