summaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/Kconfig10
-rw-r--r--lib/Kconfig.debug47
-rw-r--r--lib/Kconfig.kcsan42
-rw-r--r--lib/bitmap.c121
-rw-r--r--lib/crypto/Kconfig3
-rw-r--r--lib/crypto/Makefile3
-rw-r--r--lib/crypto/blake2s.c8
-rw-r--r--lib/crypto/chacha20poly1305.c8
-rw-r--r--lib/crypto/curve25519.c8
-rw-r--r--lib/crypto/sm4.c176
-rw-r--r--lib/debugobjects.c7
-rw-r--r--lib/devmem_is_allowed.c2
-rw-r--r--lib/kunit/test.c109
-rw-r--r--lib/linear_ranges.c31
-rw-r--r--lib/mpi/mpiutil.c2
-rw-r--r--lib/nmi_backtrace.c13
-rw-r--r--lib/once.c11
-rw-r--r--lib/scatterlist.c160
-rw-r--r--lib/sg_pool.c3
-rw-r--r--lib/string.c16
-rw-r--r--lib/string_helpers.c4
-rw-r--r--lib/test-string_helpers.c14
-rw-r--r--lib/test_bitmap.c150
-rw-r--r--lib/test_bpf.c2332
-rw-r--r--lib/test_kasan.c82
-rw-r--r--lib/test_kasan_module.c20
-rw-r--r--lib/test_lockup.c8
-rw-r--r--lib/test_scanf.c4
-rw-r--r--lib/test_stackinit.c253
-rw-r--r--lib/test_vmalloc.c5
-rw-r--r--lib/ubsan.c3
31 files changed, 3351 insertions, 304 deletions
diff --git a/lib/Kconfig b/lib/Kconfig
index 5c9c0687f76d..5e7165e6a346 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -50,12 +50,18 @@ config HAVE_ARCH_BITREVERSE
This option enables the use of hardware bit-reversal instructions on
architectures which support such operations.
-config GENERIC_STRNCPY_FROM_USER
+config ARCH_HAS_STRNCPY_FROM_USER
bool
-config GENERIC_STRNLEN_USER
+config ARCH_HAS_STRNLEN_USER
bool
+config GENERIC_STRNCPY_FROM_USER
+ def_bool !ARCH_HAS_STRNCPY_FROM_USER
+
+config GENERIC_STRNLEN_USER
+ def_bool !ARCH_HAS_STRNLEN_USER
+
config GENERIC_NET_UTILS
bool
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 7d3d29203a72..0f61b1ec385d 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -346,7 +346,7 @@ config FRAME_WARN
int "Warn for stack frames larger than"
range 0 8192
default 2048 if GCC_PLUGIN_LATENT_ENTROPY
- default 1280 if (!64BIT && PARISC)
+ default 1536 if (!64BIT && PARISC)
default 1024 if (!64BIT && !PARISC)
default 2048 if 64BIT
help
@@ -1237,7 +1237,7 @@ config PROVE_LOCKING
depends on DEBUG_KERNEL && LOCK_DEBUGGING_SUPPORT
select LOCKDEP
select DEBUG_SPINLOCK
- select DEBUG_MUTEXES
+ select DEBUG_MUTEXES if !PREEMPT_RT
select DEBUG_RT_MUTEXES if RT_MUTEXES
select DEBUG_RWSEMS
select DEBUG_WW_MUTEX_SLOWPATH
@@ -1301,7 +1301,7 @@ config LOCK_STAT
depends on DEBUG_KERNEL && LOCK_DEBUGGING_SUPPORT
select LOCKDEP
select DEBUG_SPINLOCK
- select DEBUG_MUTEXES
+ select DEBUG_MUTEXES if !PREEMPT_RT
select DEBUG_RT_MUTEXES if RT_MUTEXES
select DEBUG_LOCK_ALLOC
default n
@@ -1337,7 +1337,7 @@ config DEBUG_SPINLOCK
config DEBUG_MUTEXES
bool "Mutex debugging: basic checks"
- depends on DEBUG_KERNEL
+ depends on DEBUG_KERNEL && !PREEMPT_RT
help
This feature allows mutex semantics violations to be detected and
reported.
@@ -1347,7 +1347,8 @@ config DEBUG_WW_MUTEX_SLOWPATH
depends on DEBUG_KERNEL && LOCK_DEBUGGING_SUPPORT
select DEBUG_LOCK_ALLOC
select DEBUG_SPINLOCK
- select DEBUG_MUTEXES
+ select DEBUG_MUTEXES if !PREEMPT_RT
+ select DEBUG_RT_MUTEXES if PREEMPT_RT
help
This feature enables slowpath testing for w/w mutex users by
injecting additional -EDEADLK wound/backoff cases. Together with
@@ -1370,7 +1371,7 @@ config DEBUG_LOCK_ALLOC
bool "Lock debugging: detect incorrect freeing of live locks"
depends on DEBUG_KERNEL && LOCK_DEBUGGING_SUPPORT
select DEBUG_SPINLOCK
- select DEBUG_MUTEXES
+ select DEBUG_MUTEXES if !PREEMPT_RT
select DEBUG_RT_MUTEXES if RT_MUTEXES
select LOCKDEP
help
@@ -1681,33 +1682,6 @@ config DEBUG_WQ_FORCE_RR_CPU
feature by default. When enabled, memory and cache locality will
be impacted.
-config DEBUG_BLOCK_EXT_DEVT
- bool "Force extended block device numbers and spread them"
- depends on DEBUG_KERNEL
- depends on BLOCK
- default n
- help
- BIG FAT WARNING: ENABLING THIS OPTION MIGHT BREAK BOOTING ON
- SOME DISTRIBUTIONS. DO NOT ENABLE THIS UNLESS YOU KNOW WHAT
- YOU ARE DOING. Distros, please enable this and fix whatever
- is broken.
-
- Conventionally, block device numbers are allocated from
- predetermined contiguous area. However, extended block area
- may introduce non-contiguous block device numbers. This
- option forces most block device numbers to be allocated from
- the extended space and spreads them to discover kernel or
- userland code paths which assume predetermined contiguous
- device number allocation.
-
- Note that turning on this debug option shuffles all the
- device numbers for all IDE and SCSI devices including libata
- ones, so root partition specified using device number
- directly (via rdev or root=MAJ:MIN) won't work anymore.
- Textual device names (root=/dev/sdXn) will continue to work.
-
- Say N if you are unsure.
-
config CPU_HOTPLUG_STATE_CONTROL
bool "Enable CPU hotplug state control"
depends on DEBUG_KERNEL
@@ -1973,6 +1947,13 @@ config FAIL_MMC_REQUEST
and to test how the mmc host driver handles retries from
the block device.
+config FAIL_SUNRPC
+ bool "Fault-injection capability for SunRPC"
+ depends on FAULT_INJECTION_DEBUG_FS && SUNRPC_DEBUG
+ help
+ Provide fault-injection capability for SunRPC and
+ its consumers.
+
config FAULT_INJECTION_STACKTRACE_FILTER
bool "stacktrace filter for fault-injection capabilities"
depends on FAULT_INJECTION_DEBUG_FS && STACKTRACE_SUPPORT
diff --git a/lib/Kconfig.kcsan b/lib/Kconfig.kcsan
index 0440f373248e..e0a93ffdef30 100644
--- a/lib/Kconfig.kcsan
+++ b/lib/Kconfig.kcsan
@@ -40,10 +40,14 @@ menuconfig KCSAN
if KCSAN
-# Compiler capabilities that should not fail the test if they are unavailable.
config CC_HAS_TSAN_COMPOUND_READ_BEFORE_WRITE
def_bool (CC_IS_CLANG && $(cc-option,-fsanitize=thread -mllvm -tsan-compound-read-before-write=1)) || \
(CC_IS_GCC && $(cc-option,-fsanitize=thread --param tsan-compound-read-before-write=1))
+ help
+ The compiler instruments plain compound read-write operations
+ differently (++, --, +=, -=, |=, &=, etc.), which allows KCSAN to
+ distinguish them from other plain accesses. This is currently
+ supported by Clang 12 or later.
config KCSAN_VERBOSE
bool "Show verbose reports with more information about system state"
@@ -58,9 +62,6 @@ config KCSAN_VERBOSE
generated from any one of them, system stability may suffer due to
deadlocks or recursion. If in doubt, say N.
-config KCSAN_DEBUG
- bool "Debugging of KCSAN internals"
-
config KCSAN_SELFTEST
bool "Perform short selftests on boot"
default y
@@ -149,7 +150,8 @@ config KCSAN_SKIP_WATCH_RANDOMIZE
KCSAN_WATCH_SKIP.
config KCSAN_INTERRUPT_WATCHER
- bool "Interruptible watchers"
+ bool "Interruptible watchers" if !KCSAN_STRICT
+ default KCSAN_STRICT
help
If enabled, a task that set up a watchpoint may be interrupted while
delayed. This option will allow KCSAN to detect races between
@@ -169,13 +171,9 @@ config KCSAN_REPORT_ONCE_IN_MS
reporting to avoid flooding the console with reports. Setting this
to 0 disables rate limiting.
-# The main purpose of the below options is to control reported data races (e.g.
-# in fuzzer configs), and are not expected to be switched frequently by other
-# users. We could turn some of them into boot parameters, but given they should
-# not be switched normally, let's keep them here to simplify configuration.
-#
-# The defaults below are chosen to be very conservative, and may miss certain
-# bugs.
+# The main purpose of the below options is to control reported data races, and
+# are not expected to be switched frequently by non-testers or at runtime.
+# The defaults are chosen to be conservative, and can miss certain bugs.
config KCSAN_REPORT_RACE_UNKNOWN_ORIGIN
bool "Report races of unknown origin"
@@ -186,9 +184,17 @@ config KCSAN_REPORT_RACE_UNKNOWN_ORIGIN
reported if it was only possible to infer a race due to a data value
change while an access is being delayed on a watchpoint.
+config KCSAN_STRICT
+ bool "Strict data-race checking"
+ help
+ KCSAN will report data races with the strictest possible rules, which
+ closely aligns with the rules defined by the Linux-kernel memory
+ consistency model (LKMM).
+
config KCSAN_REPORT_VALUE_CHANGE_ONLY
bool "Only report races where watcher observed a data value change"
default y
+ depends on !KCSAN_STRICT
help
If enabled and a conflicting write is observed via a watchpoint, but
the data value of the memory location was observed to remain
@@ -197,6 +203,7 @@ config KCSAN_REPORT_VALUE_CHANGE_ONLY
config KCSAN_ASSUME_PLAIN_WRITES_ATOMIC
bool "Assume that plain aligned writes up to word size are atomic"
default y
+ depends on !KCSAN_STRICT
help
Assume that plain aligned writes up to word size are atomic by
default, and also not subject to other unsafe compiler optimizations
@@ -209,6 +216,7 @@ config KCSAN_ASSUME_PLAIN_WRITES_ATOMIC
config KCSAN_IGNORE_ATOMICS
bool "Do not instrument marked atomic accesses"
+ depends on !KCSAN_STRICT
help
Never instrument marked atomic accesses. This option can be used for
additional filtering. Conflicting marked atomic reads and plain
@@ -224,4 +232,14 @@ config KCSAN_IGNORE_ATOMICS
due to two conflicting plain writes will be reported (aligned and
unaligned, if CONFIG_KCSAN_ASSUME_PLAIN_WRITES_ATOMIC=n).
+config KCSAN_PERMISSIVE
+ bool "Enable all additional permissive rules"
+ depends on KCSAN_REPORT_VALUE_CHANGE_ONLY
+ help
+ Enable additional permissive rules to ignore certain classes of data
+ races (also see kernel/kcsan/permissive.h). None of the permissive
+ rules imply that such data races are generally safe, but can be used
+ to further reduce reported data races due to data-racy patterns
+ common across the kernel.
+
endif # KCSAN
diff --git a/lib/bitmap.c b/lib/bitmap.c
index 9401d39e4722..663dd81967d4 100644
--- a/lib/bitmap.c
+++ b/lib/bitmap.c
@@ -487,6 +487,127 @@ int bitmap_print_to_pagebuf(bool list, char *buf, const unsigned long *maskp,
}
EXPORT_SYMBOL(bitmap_print_to_pagebuf);
+/**
+ * bitmap_print_to_buf - convert bitmap to list or hex format ASCII string
+ * @list: indicates whether the bitmap must be list
+ * true: print in decimal list format
+ * false: print in hexadecimal bitmask format
+ */
+static int bitmap_print_to_buf(bool list, char *buf, const unsigned long *maskp,
+ int nmaskbits, loff_t off, size_t count)
+{
+ const char *fmt = list ? "%*pbl\n" : "%*pb\n";
+ ssize_t size;
+ void *data;
+
+ data = kasprintf(GFP_KERNEL, fmt, nmaskbits, maskp);
+ if (!data)
+ return -ENOMEM;
+
+ size = memory_read_from_buffer(buf, count, &off, data, strlen(data) + 1);
+ kfree(data);
+
+ return size;
+}
+
+/**
+ * bitmap_print_bitmask_to_buf - convert bitmap to hex bitmask format ASCII string
+ *
+ * The bitmap_print_to_pagebuf() is used indirectly via its cpumap wrapper
+ * cpumap_print_to_pagebuf() or directly by drivers to export hexadecimal
+ * bitmask and decimal list to userspace by sysfs ABI.
+ * Drivers might be using a normal attribute for this kind of ABIs. A
+ * normal attribute typically has show entry as below:
+ * static ssize_t example_attribute_show(struct device *dev,
+ * struct device_attribute *attr, char *buf)
+ * {
+ * ...
+ * return bitmap_print_to_pagebuf(true, buf, &mask, nr_trig_max);
+ * }
+ * show entry of attribute has no offset and count parameters and this
+ * means the file is limited to one page only.
+ * bitmap_print_to_pagebuf() API works terribly well for this kind of
+ * normal attribute with buf parameter and without offset, count:
+ * bitmap_print_to_pagebuf(bool list, char *buf, const unsigned long *maskp,
+ * int nmaskbits)
+ * {
+ * }
+ * The problem is once we have a large bitmap, we have a chance to get a
+ * bitmask or list more than one page. Especially for list, it could be
+ * as complex as 0,3,5,7,9,... We have no simple way to know it exact size.
+ * It turns out bin_attribute is a way to break this limit. bin_attribute
+ * has show entry as below:
+ * static ssize_t
+ * example_bin_attribute_show(struct file *filp, struct kobject *kobj,
+ * struct bin_attribute *attr, char *buf,
+ * loff_t offset, size_t count)
+ * {
+ * ...
+ * }
+ * With the new offset and count parameters, this makes sysfs ABI be able
+ * to support file size more than one page. For example, offset could be
+ * >= 4096.
+ * bitmap_print_bitmask_to_buf(), bitmap_print_list_to_buf() wit their
+ * cpumap wrapper cpumap_print_bitmask_to_buf(), cpumap_print_list_to_buf()
+ * make those drivers be able to support large bitmask and list after they
+ * move to use bin_attribute. In result, we have to pass the corresponding
+ * parameters such as off, count from bin_attribute show entry to this API.
+ *
+ * @buf: buffer into which string is placed
+ * @maskp: pointer to bitmap to convert
+ * @nmaskbits: size of bitmap, in bits
+ * @off: in the string from which we are copying, We copy to @buf
+ * @count: the maximum number of bytes to print
+ *
+ * The role of cpumap_print_bitmask_to_buf() and cpumap_print_list_to_buf()
+ * is similar with cpumap_print_to_pagebuf(), the difference is that
+ * bitmap_print_to_pagebuf() mainly serves sysfs attribute with the assumption
+ * the destination buffer is exactly one page and won't be more than one page.
+ * cpumap_print_bitmask_to_buf() and cpumap_print_list_to_buf(), on the other
+ * hand, mainly serves bin_attribute which doesn't work with exact one page,
+ * and it can break the size limit of converted decimal list and hexadecimal
+ * bitmask.
+ *
+ * WARNING!
+ *
+ * This function is not a replacement for sprintf() or bitmap_print_to_pagebuf().
+ * It is intended to workaround sysfs limitations discussed above and should be
+ * used carefully in general case for the following reasons:
+ * - Time complexity is O(nbits^2/count), comparing to O(nbits) for snprintf().
+ * - Memory complexity is O(nbits), comparing to O(1) for snprintf().
+ * - @off and @count are NOT offset and number of bits to print.
+ * - If printing part of bitmap as list, the resulting string is not a correct
+ * list representation of bitmap. Particularly, some bits within or out of
+ * related interval may be erroneously set or unset. The format of the string
+ * may be broken, so bitmap_parselist-like parser may fail parsing it.
+ * - If printing the whole bitmap as list by parts, user must ensure the order
+ * of calls of the function such that the offset is incremented linearly.
+ * - If printing the whole bitmap as list by parts, user must keep bitmap
+ * unchanged between the very first and very last call. Otherwise concatenated
+ * result may be incorrect, and format may be broken.
+ *
+ * Returns the number of characters actually printed to @buf
+ */
+int bitmap_print_bitmask_to_buf(char *buf, const unsigned long *maskp,
+ int nmaskbits, loff_t off, size_t count)
+{
+ return bitmap_print_to_buf(false, buf, maskp, nmaskbits, off, count);
+}
+EXPORT_SYMBOL(bitmap_print_bitmask_to_buf);
+
+/**
+ * bitmap_print_list_to_buf - convert bitmap to decimal list format ASCII string
+ *
+ * Everything is same with the above bitmap_print_bitmask_to_buf() except
+ * the print format.
+ */
+int bitmap_print_list_to_buf(char *buf, const unsigned long *maskp,
+ int nmaskbits, loff_t off, size_t count)
+{
+ return bitmap_print_to_buf(true, buf, maskp, nmaskbits, off, count);
+}
+EXPORT_SYMBOL(bitmap_print_list_to_buf);
+
/*
* Region 9-38:4/10 describes the following bitmap structure:
* 0 9 12 18 38 N
diff --git a/lib/crypto/Kconfig b/lib/crypto/Kconfig
index 14c032de276e..545ccbddf6a1 100644
--- a/lib/crypto/Kconfig
+++ b/lib/crypto/Kconfig
@@ -128,3 +128,6 @@ config CRYPTO_LIB_CHACHA20POLY1305
config CRYPTO_LIB_SHA256
tristate
+
+config CRYPTO_LIB_SM4
+ tristate
diff --git a/lib/crypto/Makefile b/lib/crypto/Makefile
index 3a435629d9ce..73205ed269ba 100644
--- a/lib/crypto/Makefile
+++ b/lib/crypto/Makefile
@@ -38,6 +38,9 @@ libpoly1305-y += poly1305.o
obj-$(CONFIG_CRYPTO_LIB_SHA256) += libsha256.o
libsha256-y := sha256.o
+obj-$(CONFIG_CRYPTO_LIB_SM4) += libsm4.o
+libsm4-y := sm4.o
+
ifneq ($(CONFIG_CRYPTO_MANAGER_DISABLE_TESTS),y)
libblake2s-y += blake2s-selftest.o
libchacha20poly1305-y += chacha20poly1305-selftest.o
diff --git a/lib/crypto/blake2s.c b/lib/crypto/blake2s.c
index c64ac8bfb6a9..4055aa593ec4 100644
--- a/lib/crypto/blake2s.c
+++ b/lib/crypto/blake2s.c
@@ -73,7 +73,7 @@ void blake2s256_hmac(u8 *out, const u8 *in, const u8 *key, const size_t inlen,
}
EXPORT_SYMBOL(blake2s256_hmac);
-static int __init mod_init(void)
+static int __init blake2s_mod_init(void)
{
if (!IS_ENABLED(CONFIG_CRYPTO_MANAGER_DISABLE_TESTS) &&
WARN_ON(!blake2s_selftest()))
@@ -81,12 +81,12 @@ static int __init mod_init(void)
return 0;
}
-static void __exit mod_exit(void)
+static void __exit blake2s_mod_exit(void)
{
}
-module_init(mod_init);
-module_exit(mod_exit);
+module_init(blake2s_mod_init);
+module_exit(blake2s_mod_exit);
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("BLAKE2s hash function");
MODULE_AUTHOR("Jason A. Donenfeld <Jason@zx2c4.com>");
diff --git a/lib/crypto/chacha20poly1305.c b/lib/crypto/chacha20poly1305.c
index c2fcdb98cc02..fa6a9440fc95 100644
--- a/lib/crypto/chacha20poly1305.c
+++ b/lib/crypto/chacha20poly1305.c
@@ -354,7 +354,7 @@ bool chacha20poly1305_decrypt_sg_inplace(struct scatterlist *src, size_t src_len
}
EXPORT_SYMBOL(chacha20poly1305_decrypt_sg_inplace);
-static int __init mod_init(void)
+static int __init chacha20poly1305_init(void)
{
if (!IS_ENABLED(CONFIG_CRYPTO_MANAGER_DISABLE_TESTS) &&
WARN_ON(!chacha20poly1305_selftest()))
@@ -362,12 +362,12 @@ static int __init mod_init(void)
return 0;
}
-static void __exit mod_exit(void)
+static void __exit chacha20poly1305_exit(void)
{
}
-module_init(mod_init);
-module_exit(mod_exit);
+module_init(chacha20poly1305_init);
+module_exit(chacha20poly1305_exit);
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("ChaCha20Poly1305 AEAD construction");
MODULE_AUTHOR("Jason A. Donenfeld <Jason@zx2c4.com>");
diff --git a/lib/crypto/curve25519.c b/lib/crypto/curve25519.c
index fb29739e8c29..064b352c6907 100644
--- a/lib/crypto/curve25519.c
+++ b/lib/crypto/curve25519.c
@@ -13,7 +13,7 @@
#include <linux/module.h>
#include <linux/init.h>
-static int __init mod_init(void)
+static int __init curve25519_init(void)
{
if (!IS_ENABLED(CONFIG_CRYPTO_MANAGER_DISABLE_TESTS) &&
WARN_ON(!curve25519_selftest()))
@@ -21,12 +21,12 @@ static int __init mod_init(void)
return 0;
}
-static void __exit mod_exit(void)
+static void __exit curve25519_exit(void)
{
}
-module_init(mod_init);
-module_exit(mod_exit);
+module_init(curve25519_init);
+module_exit(curve25519_exit);
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("Curve25519 scalar multiplication");
diff --git a/lib/crypto/sm4.c b/lib/crypto/sm4.c
new file mode 100644
index 000000000000..633b59fed9db
--- /dev/null
+++ b/lib/crypto/sm4.c
@@ -0,0 +1,176 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * SM4, as specified in
+ * https://tools.ietf.org/id/draft-ribose-cfrg-sm4-10.html
+ *
+ * Copyright (C) 2018 ARM Limited or its affiliates.
+ * Copyright (c) 2021 Tianjia Zhang <tianjia.zhang@linux.alibaba.com>
+ */
+
+#include <linux/module.h>
+#include <asm/unaligned.h>
+#include <crypto/sm4.h>
+
+static const u32 fk[4] = {
+ 0xa3b1bac6, 0x56aa3350, 0x677d9197, 0xb27022dc
+};
+
+static const u32 __cacheline_aligned ck[32] = {
+ 0x00070e15, 0x1c232a31, 0x383f464d, 0x545b6269,
+ 0x70777e85, 0x8c939aa1, 0xa8afb6bd, 0xc4cbd2d9,
+ 0xe0e7eef5, 0xfc030a11, 0x181f262d, 0x343b4249,
+ 0x50575e65, 0x6c737a81, 0x888f969d, 0xa4abb2b9,
+ 0xc0c7ced5, 0xdce3eaf1, 0xf8ff060d, 0x141b2229,
+ 0x30373e45, 0x4c535a61, 0x686f767d, 0x848b9299,
+ 0xa0a7aeb5, 0xbcc3cad1, 0xd8dfe6ed, 0xf4fb0209,
+ 0x10171e25, 0x2c333a41, 0x484f565d, 0x646b7279
+};
+
+static const u8 __cacheline_aligned sbox[256] = {
+ 0xd6, 0x90, 0xe9, 0xfe, 0xcc, 0xe1, 0x3d, 0xb7,
+ 0x16, 0xb6, 0x14, 0xc2, 0x28, 0xfb, 0x2c, 0x05,
+ 0x2b, 0x67, 0x9a, 0x76, 0x2a, 0xbe, 0x04, 0xc3,
+ 0xaa, 0x44, 0x13, 0x26, 0x49, 0x86, 0x06, 0x99,
+ 0x9c, 0x42, 0x50, 0xf4, 0x91, 0xef, 0x98, 0x7a,
+ 0x33, 0x54, 0x0b, 0x43, 0xed, 0xcf, 0xac, 0x62,
+ 0xe4, 0xb3, 0x1c, 0xa9, 0xc9, 0x08, 0xe8, 0x95,
+ 0x80, 0xdf, 0x94, 0xfa, 0x75, 0x8f, 0x3f, 0xa6,
+ 0x47, 0x07, 0xa7, 0xfc, 0xf3, 0x73, 0x17, 0xba,
+ 0x83, 0x59, 0x3c, 0x19, 0xe6, 0x85, 0x4f, 0xa8,
+ 0x68, 0x6b, 0x81, 0xb2, 0x71, 0x64, 0xda, 0x8b,
+ 0xf8, 0xeb, 0x0f, 0x4b, 0x70, 0x56, 0x9d, 0x35,
+ 0x1e, 0x24, 0x0e, 0x5e, 0x63, 0x58, 0xd1, 0xa2,
+ 0x25, 0x22, 0x7c, 0x3b, 0x01, 0x21, 0x78, 0x87,
+ 0xd4, 0x00, 0x46, 0x57, 0x9f, 0xd3, 0x27, 0x52,
+ 0x4c, 0x36, 0x02, 0xe7, 0xa0, 0xc4, 0xc8, 0x9e,
+ 0xea, 0xbf, 0x8a, 0xd2, 0x40, 0xc7, 0x38, 0xb5,
+ 0xa3, 0xf7, 0xf2, 0xce, 0xf9, 0x61, 0x15, 0xa1,
+ 0xe0, 0xae, 0x5d, 0xa4, 0x9b, 0x34, 0x1a, 0x55,
+ 0xad, 0x93, 0x32, 0x30, 0xf5, 0x8c, 0xb1, 0xe3,
+ 0x1d, 0xf6, 0xe2, 0x2e, 0x82, 0x66, 0xca, 0x60,
+ 0xc0, 0x29, 0x23, 0xab, 0x0d, 0x53, 0x4e, 0x6f,
+ 0xd5, 0xdb, 0x37, 0x45, 0xde, 0xfd, 0x8e, 0x2f,
+ 0x03, 0xff, 0x6a, 0x72, 0x6d, 0x6c, 0x5b, 0x51,
+ 0x8d, 0x1b, 0xaf, 0x92, 0xbb, 0xdd, 0xbc, 0x7f,
+ 0x11, 0xd9, 0x5c, 0x41, 0x1f, 0x10, 0x5a, 0xd8,
+ 0x0a, 0xc1, 0x31, 0x88, 0xa5, 0xcd, 0x7b, 0xbd,
+ 0x2d, 0x74, 0xd0, 0x12, 0xb8, 0xe5, 0xb4, 0xb0,
+ 0x89, 0x69, 0x97, 0x4a, 0x0c, 0x96, 0x77, 0x7e,
+ 0x65, 0xb9, 0xf1, 0x09, 0xc5, 0x6e, 0xc6, 0x84,
+ 0x18, 0xf0, 0x7d, 0xec, 0x3a, 0xdc, 0x4d, 0x20,
+ 0x79, 0xee, 0x5f, 0x3e, 0xd7, 0xcb, 0x39, 0x48
+};
+
+static inline u32 sm4_t_non_lin_sub(u32 x)
+{
+ u32 out;
+
+ out = (u32)sbox[x & 0xff];
+ out |= (u32)sbox[(x >> 8) & 0xff] << 8;
+ out |= (u32)sbox[(x >> 16) & 0xff] << 16;
+ out |= (u32)sbox[(x >> 24) & 0xff] << 24;
+
+ return out;
+}
+
+static inline u32 sm4_key_lin_sub(u32 x)
+{
+ return x ^ rol32(x, 13) ^ rol32(x, 23);
+}
+
+static inline u32 sm4_enc_lin_sub(u32 x)
+{
+ return x ^ rol32(x, 2) ^ rol32(x, 10) ^ rol32(x, 18) ^ rol32(x, 24);
+}
+
+static inline u32 sm4_key_sub(u32 x)
+{
+ return sm4_key_lin_sub(sm4_t_non_lin_sub(x));
+}
+
+static inline u32 sm4_enc_sub(u32 x)
+{
+ return sm4_enc_lin_sub(sm4_t_non_lin_sub(x));
+}
+
+static inline u32 sm4_round(u32 x0, u32 x1, u32 x2, u32 x3, u32 rk)
+{
+ return x0 ^ sm4_enc_sub(x1 ^ x2 ^ x3 ^ rk);
+}
+
+
+/**
+ * sm4_expandkey - Expands the SM4 key as described in GB/T 32907-2016
+ * @ctx: The location where the computed key will be stored.
+ * @in_key: The supplied key.
+ * @key_len: The length of the supplied key.
+ *
+ * Returns 0 on success. The function fails only if an invalid key size (or
+ * pointer) is supplied.
+ */
+int sm4_expandkey(struct sm4_ctx *ctx, const u8 *in_key,
+ unsigned int key_len)
+{
+ u32 rk[4];
+ const u32 *key = (u32 *)in_key;
+ int i;
+
+ if (key_len != SM4_KEY_SIZE)
+ return -EINVAL;
+
+ rk[0] = get_unaligned_be32(&key[0]) ^ fk[0];
+ rk[1] = get_unaligned_be32(&key[1]) ^ fk[1];
+ rk[2] = get_unaligned_be32(&key[2]) ^ fk[2];
+ rk[3] = get_unaligned_be32(&key[3]) ^ fk[3];
+
+ for (i = 0; i < 32; i += 4) {
+ rk[0] ^= sm4_key_sub(rk[1] ^ rk[2] ^ rk[3] ^ ck[i + 0]);
+ rk[1] ^= sm4_key_sub(rk[2] ^ rk[3] ^ rk[0] ^ ck[i + 1]);
+ rk[2] ^= sm4_key_sub(rk[3] ^ rk[0] ^ rk[1] ^ ck[i + 2]);
+ rk[3] ^= sm4_key_sub(rk[0] ^ rk[1] ^ rk[2] ^ ck[i + 3]);
+
+ ctx->rkey_enc[i + 0] = rk[0];
+ ctx->rkey_enc[i + 1] = rk[1];
+ ctx->rkey_enc[i + 2] = rk[2];
+ ctx->rkey_enc[i + 3] = rk[3];
+ ctx->rkey_dec[31 - 0 - i] = rk[0];
+ ctx->rkey_dec[31 - 1 - i] = rk[1];
+ ctx->rkey_dec[31 - 2 - i] = rk[2];
+ ctx->rkey_dec[31 - 3 - i] = rk[3];
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(sm4_expandkey);
+
+/**
+ * sm4_crypt_block - Encrypt or decrypt a single SM4 block
+ * @rk: The rkey_enc for encrypt or rkey_dec for decrypt
+ * @out: Buffer to store output data
+ * @in: Buffer containing the input data
+ */
+void sm4_crypt_block(const u32 *rk, u8 *out, const u8 *in)
+{
+ u32 x[4], i;
+
+ x[0] = get_unaligned_be32(in + 0 * 4);
+ x[1] = get_unaligned_be32(in + 1 * 4);
+ x[2] = get_unaligned_be32(in + 2 * 4);
+ x[3] = get_unaligned_be32(in + 3 * 4);
+
+ for (i = 0; i < 32; i += 4) {
+ x[0] = sm4_round(x[0], x[1], x[2], x[3], rk[i + 0]);
+ x[1] = sm4_round(x[1], x[2], x[3], x[0], rk[i + 1]);
+ x[2] = sm4_round(x[2], x[3], x[0], x[1], rk[i + 2]);
+ x[3] = sm4_round(x[3], x[0], x[1], x[2], rk[i + 3]);
+ }
+
+ put_unaligned_be32(x[3 - 0], out + 0 * 4);
+ put_unaligned_be32(x[3 - 1], out + 1 * 4);
+ put_unaligned_be32(x[3 - 2], out + 2 * 4);
+ put_unaligned_be32(x[3 - 3], out + 3 * 4);
+}
+EXPORT_SYMBOL_GPL(sm4_crypt_block);
+
+MODULE_DESCRIPTION("Generic SM4 library");
+MODULE_LICENSE("GPL v2");
diff --git a/lib/debugobjects.c b/lib/debugobjects.c
index 9e14ae02306b..6946f8e204e3 100644
--- a/lib/debugobjects.c
+++ b/lib/debugobjects.c
@@ -557,7 +557,12 @@ __debug_object_init(void *addr, const struct debug_obj_descr *descr, int onstack
struct debug_obj *obj;
unsigned long flags;
- fill_pool();
+ /*
+ * On RT enabled kernels the pool refill must happen in preemptible
+ * context:
+ */
+ if (!IS_ENABLED(CONFIG_PREEMPT_RT) || preemptible())
+ fill_pool();
db = get_bucket((unsigned long) addr);
diff --git a/lib/devmem_is_allowed.c b/lib/devmem_is_allowed.c
index c0d67c541849..60be9e24bd57 100644
--- a/lib/devmem_is_allowed.c
+++ b/lib/devmem_is_allowed.c
@@ -19,7 +19,7 @@
*/
int devmem_is_allowed(unsigned long pfn)
{
- if (iomem_is_exclusive(pfn << PAGE_SHIFT))
+ if (iomem_is_exclusive(PFN_PHYS(pfn)))
return 0;
if (!page_is_ram(pfn))
return 1;
diff --git a/lib/kunit/test.c b/lib/kunit/test.c
index d79ecb86ea57..f246b847024e 100644
--- a/lib/kunit/test.c
+++ b/lib/kunit/test.c
@@ -10,6 +10,7 @@
#include <kunit/test-bug.h>
#include <linux/kernel.h>
#include <linux/kref.h>
+#include <linux/moduleparam.h>
#include <linux/sched/debug.h>
#include <linux/sched.h>
@@ -52,6 +53,51 @@ EXPORT_SYMBOL_GPL(__kunit_fail_current_test);
#endif
/*
+ * KUnit statistic mode:
+ * 0 - disabled
+ * 1 - only when there is more than one subtest
+ * 2 - enabled
+ */
+static int kunit_stats_enabled = 1;
+module_param_named(stats_enabled, kunit_stats_enabled, int, 0644);
+MODULE_PARM_DESC(stats_enabled,
+ "Print test stats: never (0), only for multiple subtests (1), or always (2)");
+
+struct kunit_result_stats {
+ unsigned long passed;
+ unsigned long skipped;
+ unsigned long failed;
+ unsigned long total;
+};
+
+static bool kunit_should_print_stats(struct kunit_result_stats stats)
+{
+ if (kunit_stats_enabled == 0)
+ return false;
+
+ if (kunit_stats_enabled == 2)
+ return true;
+
+ return (stats.total > 1);
+}
+
+static void kunit_print_test_stats(struct kunit *test,
+ struct kunit_result_stats stats)
+{
+ if (!kunit_should_print_stats(stats))
+ return;
+
+ kunit_log(KERN_INFO, test,
+ KUNIT_SUBTEST_INDENT
+ "# %s: pass:%lu fail:%lu skip:%lu total:%lu",
+ test->name,
+ stats.passed,
+ stats.failed,
+ stats.skipped,
+ stats.total);
+}
+
+/*
* Append formatted message to log, size of which is limited to
* KUNIT_LOG_SIZE bytes (including null terminating byte).
*/
@@ -393,15 +439,69 @@ static void kunit_run_case_catch_errors(struct kunit_suite *suite,
test_case->status = KUNIT_SUCCESS;
}
+static void kunit_print_suite_stats(struct kunit_suite *suite,
+ struct kunit_result_stats suite_stats,
+ struct kunit_result_stats param_stats)
+{
+ if (kunit_should_print_stats(suite_stats)) {
+ kunit_log(KERN_INFO, suite,
+ "# %s: pass:%lu fail:%lu skip:%lu total:%lu",
+ suite->name,
+ suite_stats.passed,
+ suite_stats.failed,
+ suite_stats.skipped,
+ suite_stats.total);
+ }
+
+ if (kunit_should_print_stats(param_stats)) {
+ kunit_log(KERN_INFO, suite,
+ "# Totals: pass:%lu fail:%lu skip:%lu total:%lu",
+ param_stats.passed,
+ param_stats.failed,
+ param_stats.skipped,
+ param_stats.total);
+ }
+}
+
+static void kunit_update_stats(struct kunit_result_stats *stats,
+ enum kunit_status status)
+{
+ switch (status) {
+ case KUNIT_SUCCESS:
+ stats->passed++;
+ break;
+ case KUNIT_SKIPPED:
+ stats->skipped++;
+ break;
+ case KUNIT_FAILURE:
+ stats->failed++;
+ break;
+ }
+
+ stats->total++;
+}
+
+static void kunit_accumulate_stats(struct kunit_result_stats *total,
+ struct kunit_result_stats add)
+{
+ total->passed += add.passed;
+ total->skipped += add.skipped;
+ total->failed += add.failed;
+ total->total += add.total;
+}
+
int kunit_run_tests(struct kunit_suite *suite)
{
char param_desc[KUNIT_PARAM_DESC_SIZE];
struct kunit_case *test_case;
+ struct kunit_result_stats suite_stats = { 0 };
+ struct kunit_result_stats total_stats = { 0 };
kunit_print_subtest_start(suite);
kunit_suite_for_each_test_case(suite, test_case) {
struct kunit test = { .param_value = NULL, .param_index = 0 };
+ struct kunit_result_stats param_stats = { 0 };
test_case->status = KUNIT_SKIPPED;
if (test_case->generate_params) {
@@ -431,14 +531,23 @@ int kunit_run_tests(struct kunit_suite *suite)
test.param_value = test_case->generate_params(test.param_value, param_desc);
test.param_index++;
}
+
+ kunit_update_stats(&param_stats, test.status);
+
} while (test.param_value);
+ kunit_print_test_stats(&test, param_stats);
+
kunit_print_ok_not_ok(&test, true, test_case->status,
kunit_test_case_num(suite, test_case),
test_case->name,
test.status_comment);
+
+ kunit_update_stats(&suite_stats, test_case->status);
+ kunit_accumulate_stats(&total_stats, param_stats);
}
+ kunit_print_suite_stats(suite, suite_stats, total_stats);
kunit_print_subtest_end(suite);
return 0;
diff --git a/lib/linear_ranges.c b/lib/linear_ranges.c
index ced5c15d3f04..a1a7dfa881de 100644
--- a/lib/linear_ranges.c
+++ b/lib/linear_ranges.c
@@ -241,5 +241,36 @@ int linear_range_get_selector_high(const struct linear_range *r,
}
EXPORT_SYMBOL_GPL(linear_range_get_selector_high);
+/**
+ * linear_range_get_selector_within - return linear range selector for value
+ * @r: pointer to linear range where selector is looked from
+ * @val: value for which the selector is searched
+ * @selector: address where found selector value is updated
+ *
+ * Return selector for which range value is closest match for given
+ * input value. Value is matching if it is equal or lower than given
+ * value. But return maximum selector if given value is higher than
+ * maximum value.
+ */
+void linear_range_get_selector_within(const struct linear_range *r,
+ unsigned int val, unsigned int *selector)
+{
+ if (r->min > val) {
+ *selector = r->min_sel;
+ return;
+ }
+
+ if (linear_range_get_max_value(r) < val) {
+ *selector = r->max_sel;
+ return;
+ }
+
+ if (r->step == 0)
+ *selector = r->min_sel;
+ else
+ *selector = (val - r->min) / r->step + r->min_sel;
+}
+EXPORT_SYMBOL_GPL(linear_range_get_selector_within);
+
MODULE_DESCRIPTION("linear-ranges helper");
MODULE_LICENSE("GPL");
diff --git a/lib/mpi/mpiutil.c b/lib/mpi/mpiutil.c
index 9a75ca3f7edf..bc81419f400c 100644
--- a/lib/mpi/mpiutil.c
+++ b/lib/mpi/mpiutil.c
@@ -148,7 +148,7 @@ int mpi_resize(MPI a, unsigned nlimbs)
return 0; /* no need to do it */
if (a->d) {
- p = kmalloc_array(nlimbs, sizeof(mpi_limb_t), GFP_KERNEL);
+ p = kcalloc(nlimbs, sizeof(mpi_limb_t), GFP_KERNEL);
if (!p)
return -ENOMEM;
memcpy(p, a->d, a->alloced * sizeof(mpi_limb_t));
diff --git a/lib/nmi_backtrace.c b/lib/nmi_backtrace.c
index 8abe1870dba4..f9e89001b52e 100644
--- a/lib/nmi_backtrace.c
+++ b/lib/nmi_backtrace.c
@@ -75,12 +75,6 @@ void nmi_trigger_cpumask_backtrace(const cpumask_t *mask,
touch_softlockup_watchdog();
}
- /*
- * Force flush any remote buffers that might be stuck in IRQ context
- * and therefore could not run their irq_work.
- */
- printk_safe_flush();
-
clear_bit_unlock(0, &backtrace_flag);
put_cpu();
}
@@ -92,8 +86,14 @@ module_param(backtrace_idle, bool, 0644);
bool nmi_cpu_backtrace(struct pt_regs *regs)
{
int cpu = smp_processor_id();
+ unsigned long flags;
if (cpumask_test_cpu(cpu, to_cpumask(backtrace_mask))) {
+ /*
+ * Allow nested NMI backtraces while serializing
+ * against other CPUs.
+ */
+ printk_cpu_lock_irqsave(flags);
if (!READ_ONCE(backtrace_idle) && regs && cpu_in_idle(instruction_pointer(regs))) {
pr_warn("NMI backtrace for cpu %d skipped: idling at %pS\n",
cpu, (void *)instruction_pointer(regs));
@@ -104,6 +104,7 @@ bool nmi_cpu_backtrace(struct pt_regs *regs)
else
dump_stack();
}
+ printk_cpu_unlock_irqrestore(flags);
cpumask_clear_cpu(cpu, to_cpumask(backtrace_mask));
return true;
}
diff --git a/lib/once.c b/lib/once.c
index 8b7d6235217e..59149bf3bfb4 100644
--- a/lib/once.c
+++ b/lib/once.c
@@ -3,10 +3,12 @@
#include <linux/spinlock.h>
#include <linux/once.h>
#include <linux/random.h>
+#include <linux/module.h>
struct once_work {
struct work_struct work;
struct static_key_true *key;
+ struct module *module;
};
static void once_deferred(struct work_struct *w)
@@ -16,10 +18,11 @@ static void once_deferred(struct work_struct *w)
work = container_of(w, struct once_work, work);
BUG_ON(!static_key_enabled(work->key));
static_branch_disable(work->key);
+ module_put(work->module);
kfree(work);
}
-static void once_disable_jump(struct static_key_true *key)
+static void once_disable_jump(struct static_key_true *key, struct module *mod)
{
struct once_work *w;
@@ -29,6 +32,8 @@ static void once_disable_jump(struct static_key_true *key)
INIT_WORK(&w->work, once_deferred);
w->key = key;
+ w->module = mod;
+ __module_get(mod);
schedule_work(&w->work);
}
@@ -53,11 +58,11 @@ bool __do_once_start(bool *done, unsigned long *flags)
EXPORT_SYMBOL(__do_once_start);
void __do_once_done(bool *done, struct static_key_true *once_key,
- unsigned long *flags)
+ unsigned long *flags, struct module *mod)
__releases(once_lock)
{
*done = true;
spin_unlock_irqrestore(&once_lock, *flags);
- once_disable_jump(once_key);
+ once_disable_jump(once_key, mod);
}
EXPORT_SYMBOL(__do_once_done);
diff --git a/lib/scatterlist.c b/lib/scatterlist.c
index 27efa6178153..abb3432ed744 100644
--- a/lib/scatterlist.c
+++ b/lib/scatterlist.c
@@ -182,6 +182,7 @@ static void sg_kfree(struct scatterlist *sg, unsigned int nents)
* @nents_first_chunk: Number of entries int the (preallocated) first
* scatterlist chunk, 0 means no such preallocated first chunk
* @free_fn: Free function
+ * @num_ents: Number of entries in the table
*
* Description:
* Free an sg table previously allocated and setup with
@@ -190,7 +191,8 @@ static void sg_kfree(struct scatterlist *sg, unsigned int nents)
*
**/
void __sg_free_table(struct sg_table *table, unsigned int max_ents,
- unsigned int nents_first_chunk, sg_free_fn *free_fn)
+ unsigned int nents_first_chunk, sg_free_fn *free_fn,
+ unsigned int num_ents)
{
struct scatterlist *sgl, *next;
unsigned curr_max_ents = nents_first_chunk ?: max_ents;
@@ -199,8 +201,8 @@ void __sg_free_table(struct sg_table *table, unsigned int max_ents,
return;
sgl = table->sgl;
- while (table->orig_nents) {
- unsigned int alloc_size = table->orig_nents;
+ while (num_ents) {
+ unsigned int alloc_size = num_ents;
unsigned int sg_size;
/*
@@ -218,7 +220,7 @@ void __sg_free_table(struct sg_table *table, unsigned int max_ents,
next = NULL;
}
- table->orig_nents -= sg_size;
+ num_ents -= sg_size;
if (nents_first_chunk)
nents_first_chunk = 0;
else
@@ -232,13 +234,27 @@ void __sg_free_table(struct sg_table *table, unsigned int max_ents,
EXPORT_SYMBOL(__sg_free_table);
/**
+ * sg_free_append_table - Free a previously allocated append sg table.
+ * @table: The mapped sg append table header
+ *
+ **/
+void sg_free_append_table(struct sg_append_table *table)
+{
+ __sg_free_table(&table->sgt, SG_MAX_SINGLE_ALLOC, false, sg_kfree,
+ table->total_nents);
+}
+EXPORT_SYMBOL(sg_free_append_table);
+
+
+/**
* sg_free_table - Free a previously allocated sg table
* @table: The mapped sg table header
*
**/
void sg_free_table(struct sg_table *table)
{
- __sg_free_table(table, SG_MAX_SINGLE_ALLOC, false, sg_kfree);
+ __sg_free_table(table, SG_MAX_SINGLE_ALLOC, false, sg_kfree,
+ table->orig_nents);
}
EXPORT_SYMBOL(sg_free_table);
@@ -359,13 +375,12 @@ int sg_alloc_table(struct sg_table *table, unsigned int nents, gfp_t gfp_mask)
ret = __sg_alloc_table(table, nents, SG_MAX_SINGLE_ALLOC,
NULL, 0, gfp_mask, sg_kmalloc);
if (unlikely(ret))
- __sg_free_table(table, SG_MAX_SINGLE_ALLOC, 0, sg_kfree);
-
+ sg_free_table(table);
return ret;
}
EXPORT_SYMBOL(sg_alloc_table);
-static struct scatterlist *get_next_sg(struct sg_table *table,
+static struct scatterlist *get_next_sg(struct sg_append_table *table,
struct scatterlist *cur,
unsigned long needed_sges,
gfp_t gfp_mask)
@@ -386,54 +401,52 @@ static struct scatterlist *get_next_sg(struct sg_table *table,
return ERR_PTR(-ENOMEM);
sg_init_table(new_sg, alloc_size);
if (cur) {
+ table->total_nents += alloc_size - 1;
__sg_chain(next_sg, new_sg);
- table->orig_nents += alloc_size - 1;
} else {
- table->sgl = new_sg;
- table->orig_nents = alloc_size;
- table->nents = 0;
+ table->sgt.sgl = new_sg;
+ table->total_nents = alloc_size;
}
return new_sg;
}
/**
- * __sg_alloc_table_from_pages - Allocate and initialize an sg table from
- * an array of pages
- * @sgt: The sg table header to use
- * @pages: Pointer to an array of page pointers
- * @n_pages: Number of pages in the pages array
+ * sg_alloc_append_table_from_pages - Allocate and initialize an append sg
+ * table from an array of pages
+ * @sgt_append: The sg append table to use
+ * @pages: Pointer to an array of page pointers
+ * @n_pages: Number of pages in the pages array
* @offset: Offset from start of the first page to the start of a buffer
* @size: Number of valid bytes in the buffer (after offset)
* @max_segment: Maximum size of a scatterlist element in bytes
- * @prv: Last populated sge in sgt
* @left_pages: Left pages caller have to set after this call
* @gfp_mask: GFP allocation mask
*
* Description:
- * If @prv is NULL, allocate and initialize an sg table from a list of pages,
- * else reuse the scatterlist passed in at @prv.
- * Contiguous ranges of the pages are squashed into a single scatterlist
- * entry up to the maximum size specified in @max_segment. A user may
- * provide an offset at a start and a size of valid data in a buffer
- * specified by the page array.
+ * In the first call it allocate and initialize an sg table from a list of
+ * pages, else reuse the scatterlist from sgt_append. Contiguous ranges of
+ * the pages are squashed into a single scatterlist entry up to the maximum
+ * size specified in @max_segment. A user may provide an offset at a start
+ * and a size of valid data in a buffer specified by the page array. The
+ * returned sg table is released by sg_free_append_table
*
* Returns:
- * Last SGE in sgt on success, PTR_ERR on otherwise.
- * The allocation in @sgt must be released by sg_free_table.
+ * 0 on success, negative error on failure
*
* Notes:
* If this function returns non-0 (eg failure), the caller must call
- * sg_free_table() to cleanup any leftover allocations.
+ * sg_free_append_table() to cleanup any leftover allocations.
+ *
+ * In the fist call, sgt_append must by initialized.
*/
-struct scatterlist *__sg_alloc_table_from_pages(struct sg_table *sgt,
+int sg_alloc_append_table_from_pages(struct sg_append_table *sgt_append,
struct page **pages, unsigned int n_pages, unsigned int offset,
unsigned long size, unsigned int max_segment,
- struct scatterlist *prv, unsigned int left_pages,
- gfp_t gfp_mask)
+ unsigned int left_pages, gfp_t gfp_mask)
{
unsigned int chunks, cur_page, seg_len, i, prv_len = 0;
unsigned int added_nents = 0;
- struct scatterlist *s = prv;
+ struct scatterlist *s = sgt_append->prv;
/*
* The algorithm below requires max_segment to be aligned to PAGE_SIZE
@@ -441,25 +454,26 @@ struct scatterlist *__sg_alloc_table_from_pages(struct sg_table *sgt,
*/
max_segment = ALIGN_DOWN(max_segment, PAGE_SIZE);
if (WARN_ON(max_segment < PAGE_SIZE))
- return ERR_PTR(-EINVAL);
+ return -EINVAL;
- if (IS_ENABLED(CONFIG_ARCH_NO_SG_CHAIN) && prv)
- return ERR_PTR(-EOPNOTSUPP);
+ if (IS_ENABLED(CONFIG_ARCH_NO_SG_CHAIN) && sgt_append->prv)
+ return -EOPNOTSUPP;
- if (prv) {
- unsigned long paddr = (page_to_pfn(sg_page(prv)) * PAGE_SIZE +
- prv->offset + prv->length) /
- PAGE_SIZE;
+ if (sgt_append->prv) {
+ unsigned long paddr =
+ (page_to_pfn(sg_page(sgt_append->prv)) * PAGE_SIZE +
+ sgt_append->prv->offset + sgt_append->prv->length) /
+ PAGE_SIZE;
if (WARN_ON(offset))
- return ERR_PTR(-EINVAL);
+ return -EINVAL;
/* Merge contiguous pages into the last SG */
- prv_len = prv->length;
+ prv_len = sgt_append->prv->length;
while (n_pages && page_to_pfn(pages[0]) == paddr) {
- if (prv->length + PAGE_SIZE > max_segment)
+ if (sgt_append->prv->length + PAGE_SIZE > max_segment)
break;
- prv->length += PAGE_SIZE;
+ sgt_append->prv->length += PAGE_SIZE;
paddr++;
pages++;
n_pages--;
@@ -496,15 +510,16 @@ struct scatterlist *__sg_alloc_table_from_pages(struct sg_table *sgt,
}
/* Pass how many chunks might be left */
- s = get_next_sg(sgt, s, chunks - i + left_pages, gfp_mask);
+ s = get_next_sg(sgt_append, s, chunks - i + left_pages,
+ gfp_mask);
if (IS_ERR(s)) {
/*
* Adjust entry length to be as before function was
* called.
*/
- if (prv)
- prv->length = prv_len;
- return s;
+ if (sgt_append->prv)
+ sgt_append->prv->length = prv_len;
+ return PTR_ERR(s);
}
chunk_size = ((j - cur_page) << PAGE_SHIFT) - offset;
sg_set_page(s, pages[cur_page],
@@ -514,42 +529,58 @@ struct scatterlist *__sg_alloc_table_from_pages(struct sg_table *sgt,
offset = 0;
cur_page = j;
}
- sgt->nents += added_nents;
+ sgt_append->sgt.nents += added_nents;
+ sgt_append->sgt.orig_nents = sgt_append->sgt.nents;
+ sgt_append->prv = s;
out:
if (!left_pages)
sg_mark_end(s);
- return s;
+ return 0;
}
-EXPORT_SYMBOL(__sg_alloc_table_from_pages);
+EXPORT_SYMBOL(sg_alloc_append_table_from_pages);
/**
- * sg_alloc_table_from_pages - Allocate and initialize an sg table from
- * an array of pages
+ * sg_alloc_table_from_pages_segment - Allocate and initialize an sg table from
+ * an array of pages and given maximum
+ * segment.
* @sgt: The sg table header to use
* @pages: Pointer to an array of page pointers
* @n_pages: Number of pages in the pages array
* @offset: Offset from start of the first page to the start of a buffer
* @size: Number of valid bytes in the buffer (after offset)
+ * @max_segment: Maximum size of a scatterlist element in bytes
* @gfp_mask: GFP allocation mask
*
* Description:
* Allocate and initialize an sg table from a list of pages. Contiguous
- * ranges of the pages are squashed into a single scatterlist node. A user
- * may provide an offset at a start and a size of valid data in a buffer
- * specified by the page array. The returned sg table is released by
- * sg_free_table.
+ * ranges of the pages are squashed into a single scatterlist node up to the
+ * maximum size specified in @max_segment. A user may provide an offset at a
+ * start and a size of valid data in a buffer specified by the page array.
*
- * Returns:
+ * The returned sg table is released by sg_free_table.
+ *
+ * Returns:
* 0 on success, negative error on failure
*/
-int sg_alloc_table_from_pages(struct sg_table *sgt, struct page **pages,
- unsigned int n_pages, unsigned int offset,
- unsigned long size, gfp_t gfp_mask)
+int sg_alloc_table_from_pages_segment(struct sg_table *sgt, struct page **pages,
+ unsigned int n_pages, unsigned int offset,
+ unsigned long size, unsigned int max_segment,
+ gfp_t gfp_mask)
{
- return PTR_ERR_OR_ZERO(__sg_alloc_table_from_pages(sgt, pages, n_pages,
- offset, size, UINT_MAX, NULL, 0, gfp_mask));
+ struct sg_append_table append = {};
+ int err;
+
+ err = sg_alloc_append_table_from_pages(&append, pages, n_pages, offset,
+ size, max_segment, 0, gfp_mask);
+ if (err) {
+ sg_free_append_table(&append);
+ return err;
+ }
+ memcpy(sgt, &append.sgt, sizeof(*sgt));
+ WARN_ON(append.total_nents != sgt->orig_nents);
+ return 0;
}
-EXPORT_SYMBOL(sg_alloc_table_from_pages);
+EXPORT_SYMBOL(sg_alloc_table_from_pages_segment);
#ifdef CONFIG_SGL_ALLOC
@@ -887,9 +918,8 @@ void sg_miter_stop(struct sg_mapping_iter *miter)
miter->__offset += miter->consumed;
miter->__remaining -= miter->consumed;
- if ((miter->__flags & SG_MITER_TO_SG) &&
- !PageSlab(miter->page))
- flush_kernel_dcache_page(miter->page);
+ if (miter->__flags & SG_MITER_TO_SG)
+ flush_dcache_page(miter->page);
if (miter->__flags & SG_MITER_ATOMIC) {
WARN_ON_ONCE(preemptible());
diff --git a/lib/sg_pool.c b/lib/sg_pool.c
index db29e5c1f790..a0b1a52cd6f7 100644
--- a/lib/sg_pool.c
+++ b/lib/sg_pool.c
@@ -90,7 +90,8 @@ void sg_free_table_chained(struct sg_table *table,
if (nents_first_chunk == 1)
nents_first_chunk = 0;
- __sg_free_table(table, SG_CHUNK_SIZE, nents_first_chunk, sg_pool_free);
+ __sg_free_table(table, SG_CHUNK_SIZE, nents_first_chunk, sg_pool_free,
+ table->orig_nents);
}
EXPORT_SYMBOL_GPL(sg_free_table_chained);
diff --git a/lib/string.c b/lib/string.c
index 77bd0b1d3296..b2de45a581f4 100644
--- a/lib/string.c
+++ b/lib/string.c
@@ -29,6 +29,7 @@
#include <linux/errno.h>
#include <linux/slab.h>
+#include <asm/unaligned.h>
#include <asm/byteorder.h>
#include <asm/word-at-a-time.h>
#include <asm/page.h>
@@ -935,6 +936,21 @@ __visible int memcmp(const void *cs, const void *ct, size_t count)
const unsigned char *su1, *su2;
int res = 0;
+#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
+ if (count >= sizeof(unsigned long)) {
+ const unsigned long *u1 = cs;
+ const unsigned long *u2 = ct;
+ do {
+ if (get_unaligned(u1) != get_unaligned(u2))
+ break;
+ u1++;
+ u2++;
+ count -= sizeof(unsigned long);
+ } while (count >= sizeof(unsigned long));
+ cs = u1;
+ ct = u2;
+ }
+#endif
for (su1 = cs, su2 = ct; 0 < count; ++su1, ++su2, count--)
if ((res = *su1 - *su2) != 0)
break;
diff --git a/lib/string_helpers.c b/lib/string_helpers.c
index 5a35c7e16e96..3806a52ce697 100644
--- a/lib/string_helpers.c
+++ b/lib/string_helpers.c
@@ -361,6 +361,9 @@ static bool escape_special(unsigned char c, char **dst, char *end)
case '\e':
to = 'e';
break;
+ case '"':
+ to = '"';
+ break;
default:
return false;
}
@@ -474,6 +477,7 @@ static bool escape_hex(unsigned char c, char **dst, char *end)
* '\t' - horizontal tab
* '\v' - vertical tab
* %ESCAPE_SPECIAL:
+ * '\"' - double quote
* '\\' - backslash
* '\a' - alert (BEL)
* '\e' - escape
diff --git a/lib/test-string_helpers.c b/lib/test-string_helpers.c
index 2185d71704f0..437d8e6b7cb1 100644
--- a/lib/test-string_helpers.c
+++ b/lib/test-string_helpers.c
@@ -140,13 +140,13 @@ static const struct test_string_2 escape0[] __initconst = {{
},{
.in = "\\h\\\"\a\e\\",
.s1 = {{
- .out = "\\\\h\\\\\"\\a\\e\\\\",
+ .out = "\\\\h\\\\\\\"\\a\\e\\\\",
.flags = ESCAPE_SPECIAL,
},{
- .out = "\\\\\\150\\\\\\042\\a\\e\\\\",
+ .out = "\\\\\\150\\\\\\\"\\a\\e\\\\",
.flags = ESCAPE_SPECIAL | ESCAPE_OCTAL,
},{
- .out = "\\\\\\x68\\\\\\x22\\a\\e\\\\",
+ .out = "\\\\\\x68\\\\\\\"\\a\\e\\\\",
.flags = ESCAPE_SPECIAL | ESCAPE_HEX,
},{
/* terminator */
@@ -157,10 +157,10 @@ static const struct test_string_2 escape0[] __initconst = {{
.out = "\eb \\C\007\"\x90\\r]",
.flags = ESCAPE_SPACE,
},{
- .out = "\\eb \\\\C\\a\"\x90\r]",
+ .out = "\\eb \\\\C\\a\\\"\x90\r]",
.flags = ESCAPE_SPECIAL,
},{
- .out = "\\eb \\\\C\\a\"\x90\\r]",
+ .out = "\\eb \\\\C\\a\\\"\x90\\r]",
.flags = ESCAPE_SPACE | ESCAPE_SPECIAL,
},{
.out = "\\033\\142\\040\\134\\103\\007\\042\\220\\015\\135",
@@ -169,10 +169,10 @@ static const struct test_string_2 escape0[] __initconst = {{
.out = "\\033\\142\\040\\134\\103\\007\\042\\220\\r\\135",
.flags = ESCAPE_SPACE | ESCAPE_OCTAL,
},{
- .out = "\\e\\142\\040\\\\\\103\\a\\042\\220\\015\\135",
+ .out = "\\e\\142\\040\\\\\\103\\a\\\"\\220\\015\\135",
.flags = ESCAPE_SPECIAL | ESCAPE_OCTAL,
},{
- .out = "\\e\\142\\040\\\\\\103\\a\\042\\220\\r\\135",
+ .out = "\\e\\142\\040\\\\\\103\\a\\\"\\220\\r\\135",
.flags = ESCAPE_SPACE | ESCAPE_SPECIAL | ESCAPE_OCTAL,
},{
.out = "\eb \\C\007\"\x90\r]",
diff --git a/lib/test_bitmap.c b/lib/test_bitmap.c
index 4ea73f5aed41..d33fa5a61b95 100644
--- a/lib/test_bitmap.c
+++ b/lib/test_bitmap.c
@@ -19,6 +19,7 @@
KSTM_MODULE_GLOBALS();
static char pbl_buffer[PAGE_SIZE] __initdata;
+static char print_buf[PAGE_SIZE * 2] __initdata;
static const unsigned long exp1[] __initconst = {
BITMAP_FROM_U64(1),
@@ -156,6 +157,20 @@ static bool __init __check_eq_clump8(const char *srcfile, unsigned int line,
return true;
}
+static bool __init
+__check_eq_str(const char *srcfile, unsigned int line,
+ const char *exp_str, const char *str,
+ unsigned int len)
+{
+ bool eq;
+
+ eq = strncmp(exp_str, str, len) == 0;
+ if (!eq)
+ pr_err("[%s:%u] expected %s, got %s\n", srcfile, line, exp_str, str);
+
+ return eq;
+}
+
#define __expect_eq(suffix, ...) \
({ \
int result = 0; \
@@ -173,6 +188,7 @@ static bool __init __check_eq_clump8(const char *srcfile, unsigned int line,
#define expect_eq_pbl(...) __expect_eq(pbl, ##__VA_ARGS__)
#define expect_eq_u32_array(...) __expect_eq(u32_array, ##__VA_ARGS__)
#define expect_eq_clump8(...) __expect_eq(clump8, ##__VA_ARGS__)
+#define expect_eq_str(...) __expect_eq(str, ##__VA_ARGS__)
static void __init test_zero_clear(void)
{
@@ -660,6 +676,139 @@ static void __init test_bitmap_cut(void)
}
}
+struct test_bitmap_print {
+ const unsigned long *bitmap;
+ unsigned long nbits;
+ const char *mask;
+ const char *list;
+};
+
+static const unsigned long small_bitmap[] __initconst = {
+ BITMAP_FROM_U64(0x3333333311111111ULL),
+};
+
+static const char small_mask[] __initconst = "33333333,11111111\n";
+static const char small_list[] __initconst = "0,4,8,12,16,20,24,28,32-33,36-37,40-41,44-45,48-49,52-53,56-57,60-61\n";
+
+static const unsigned long large_bitmap[] __initconst = {
+ BITMAP_FROM_U64(0x3333333311111111ULL), BITMAP_FROM_U64(0x3333333311111111ULL),
+ BITMAP_FROM_U64(0x3333333311111111ULL), BITMAP_FROM_U64(0x3333333311111111ULL),
+ BITMAP_FROM_U64(0x3333333311111111ULL), BITMAP_FROM_U64(0x3333333311111111ULL),
+ BITMAP_FROM_U64(0x3333333311111111ULL), BITMAP_FROM_U64(0x3333333311111111ULL),
+ BITMAP_FROM_U64(0x3333333311111111ULL), BITMAP_FROM_U64(0x3333333311111111ULL),
+ BITMAP_FROM_U64(0x3333333311111111ULL), BITMAP_FROM_U64(0x3333333311111111ULL),
+ BITMAP_FROM_U64(0x3333333311111111ULL), BITMAP_FROM_U64(0x3333333311111111ULL),
+ BITMAP_FROM_U64(0x3333333311111111ULL), BITMAP_FROM_U64(0x3333333311111111ULL),
+ BITMAP_FROM_U64(0x3333333311111111ULL), BITMAP_FROM_U64(0x3333333311111111ULL),
+ BITMAP_FROM_U64(0x3333333311111111ULL), BITMAP_FROM_U64(0x3333333311111111ULL),
+ BITMAP_FROM_U64(0x3333333311111111ULL), BITMAP_FROM_U64(0x3333333311111111ULL),
+ BITMAP_FROM_U64(0x3333333311111111ULL), BITMAP_FROM_U64(0x3333333311111111ULL),
+ BITMAP_FROM_U64(0x3333333311111111ULL), BITMAP_FROM_U64(0x3333333311111111ULL),
+ BITMAP_FROM_U64(0x3333333311111111ULL), BITMAP_FROM_U64(0x3333333311111111ULL),
+ BITMAP_FROM_U64(0x3333333311111111ULL), BITMAP_FROM_U64(0x3333333311111111ULL),
+ BITMAP_FROM_U64(0x3333333311111111ULL), BITMAP_FROM_U64(0x3333333311111111ULL),
+ BITMAP_FROM_U64(0x3333333311111111ULL), BITMAP_FROM_U64(0x3333333311111111ULL),
+ BITMAP_FROM_U64(0x3333333311111111ULL), BITMAP_FROM_U64(0x3333333311111111ULL),
+ BITMAP_FROM_U64(0x3333333311111111ULL), BITMAP_FROM_U64(0x3333333311111111ULL),
+ BITMAP_FROM_U64(0x3333333311111111ULL), BITMAP_FROM_U64(0x3333333311111111ULL),
+};
+
+static const char large_mask[] __initconst = "33333333,11111111,33333333,11111111,"
+ "33333333,11111111,33333333,11111111,"
+ "33333333,11111111,33333333,11111111,"
+ "33333333,11111111,33333333,11111111,"
+ "33333333,11111111,33333333,11111111,"
+ "33333333,11111111,33333333,11111111,"
+ "33333333,11111111,33333333,11111111,"
+ "33333333,11111111,33333333,11111111,"
+ "33333333,11111111,33333333,11111111,"
+ "33333333,11111111,33333333,11111111,"
+ "33333333,11111111,33333333,11111111,"
+ "33333333,11111111,33333333,11111111,"
+ "33333333,11111111,33333333,11111111,"
+ "33333333,11111111,33333333,11111111,"
+ "33333333,11111111,33333333,11111111,"
+ "33333333,11111111,33333333,11111111,"
+ "33333333,11111111,33333333,11111111,"
+ "33333333,11111111,33333333,11111111,"
+ "33333333,11111111,33333333,11111111,"
+ "33333333,11111111,33333333,11111111\n";
+
+static const char large_list[] __initconst = /* more than 4KB */
+ "0,4,8,12,16,20,24,28,32-33,36-37,40-41,44-45,48-49,52-53,56-57,60-61,64,68,72,76,80,84,88,92,96-97,100-101,104-1"
+ "05,108-109,112-113,116-117,120-121,124-125,128,132,136,140,144,148,152,156,160-161,164-165,168-169,172-173,176-1"
+ "77,180-181,184-185,188-189,192,196,200,204,208,212,216,220,224-225,228-229,232-233,236-237,240-241,244-245,248-2"
+ "49,252-253,256,260,264,268,272,276,280,284,288-289,292-293,296-297,300-301,304-305,308-309,312-313,316-317,320,3"
+ "24,328,332,336,340,344,348,352-353,356-357,360-361,364-365,368-369,372-373,376-377,380-381,384,388,392,396,400,4"
+ "04,408,412,416-417,420-421,424-425,428-429,432-433,436-437,440-441,444-445,448,452,456,460,464,468,472,476,480-4"
+ "81,484-485,488-489,492-493,496-497,500-501,504-505,508-509,512,516,520,524,528,532,536,540,544-545,548-549,552-5"
+ "53,556-557,560-561,564-565,568-569,572-573,576,580,584,588,592,596,600,604,608-609,612-613,616-617,620-621,624-6"
+ "25,628-629,632-633,636-637,640,644,648,652,656,660,664,668,672-673,676-677,680-681,684-685,688-689,692-693,696-6"
+ "97,700-701,704,708,712,716,720,724,728,732,736-737,740-741,744-745,748-749,752-753,756-757,760-761,764-765,768,7"
+ "72,776,780,784,788,792,796,800-801,804-805,808-809,812-813,816-817,820-821,824-825,828-829,832,836,840,844,848,8"
+ "52,856,860,864-865,868-869,872-873,876-877,880-881,884-885,888-889,892-893,896,900,904,908,912,916,920,924,928-9"
+ "29,932-933,936-937,940-941,944-945,948-949,952-953,956-957,960,964,968,972,976,980,984,988,992-993,996-997,1000-"
+ "1001,1004-1005,1008-1009,1012-1013,1016-1017,1020-1021,1024,1028,1032,1036,1040,1044,1048,1052,1056-1057,1060-10"
+ "61,1064-1065,1068-1069,1072-1073,1076-1077,1080-1081,1084-1085,1088,1092,1096,1100,1104,1108,1112,1116,1120-1121"
+ ",1124-1125,1128-1129,1132-1133,1136-1137,1140-1141,1144-1145,1148-1149,1152,1156,1160,1164,1168,1172,1176,1180,1"
+ "184-1185,1188-1189,1192-1193,1196-1197,1200-1201,1204-1205,1208-1209,1212-1213,1216,1220,1224,1228,1232,1236,124"
+ "0,1244,1248-1249,1252-1253,1256-1257,1260-1261,1264-1265,1268-1269,1272-1273,1276-1277,1280,1284,1288,1292,1296,"
+ "1300,1304,1308,1312-1313,1316-1317,1320-1321,1324-1325,1328-1329,1332-1333,1336-1337,1340-1341,1344,1348,1352,13"
+ "56,1360,1364,1368,1372,1376-1377,1380-1381,1384-1385,1388-1389,1392-1393,1396-1397,1400-1401,1404-1405,1408,1412"
+ ",1416,1420,1424,1428,1432,1436,1440-1441,1444-1445,1448-1449,1452-1453,1456-1457,1460-1461,1464-1465,1468-1469,1"
+ "472,1476,1480,1484,1488,1492,1496,1500,1504-1505,1508-1509,1512-1513,1516-1517,1520-1521,1524-1525,1528-1529,153"
+ "2-1533,1536,1540,1544,1548,1552,1556,1560,1564,1568-1569,1572-1573,1576-1577,1580-1581,1584-1585,1588-1589,1592-"
+ "1593,1596-1597,1600,1604,1608,1612,1616,1620,1624,1628,1632-1633,1636-1637,1640-1641,1644-1645,1648-1649,1652-16"
+ "53,1656-1657,1660-1661,1664,1668,1672,1676,1680,1684,1688,1692,1696-1697,1700-1701,1704-1705,1708-1709,1712-1713"
+ ",1716-1717,1720-1721,1724-1725,1728,1732,1736,1740,1744,1748,1752,1756,1760-1761,1764-1765,1768-1769,1772-1773,1"
+ "776-1777,1780-1781,1784-1785,1788-1789,1792,1796,1800,1804,1808,1812,1816,1820,1824-1825,1828-1829,1832-1833,183"
+ "6-1837,1840-1841,1844-1845,1848-1849,1852-1853,1856,1860,1864,1868,1872,1876,1880,1884,1888-1889,1892-1893,1896-"
+ "1897,1900-1901,1904-1905,1908-1909,1912-1913,1916-1917,1920,1924,1928,1932,1936,1940,1944,1948,1952-1953,1956-19"
+ "57,1960-1961,1964-1965,1968-1969,1972-1973,1976-1977,1980-1981,1984,1988,1992,1996,2000,2004,2008,2012,2016-2017"
+ ",2020-2021,2024-2025,2028-2029,2032-2033,2036-2037,2040-2041,2044-2045,2048,2052,2056,2060,2064,2068,2072,2076,2"
+ "080-2081,2084-2085,2088-2089,2092-2093,2096-2097,2100-2101,2104-2105,2108-2109,2112,2116,2120,2124,2128,2132,213"
+ "6,2140,2144-2145,2148-2149,2152-2153,2156-2157,2160-2161,2164-2165,2168-2169,2172-2173,2176,2180,2184,2188,2192,"
+ "2196,2200,2204,2208-2209,2212-2213,2216-2217,2220-2221,2224-2225,2228-2229,2232-2233,2236-2237,2240,2244,2248,22"
+ "52,2256,2260,2264,2268,2272-2273,2276-2277,2280-2281,2284-2285,2288-2289,2292-2293,2296-2297,2300-2301,2304,2308"
+ ",2312,2316,2320,2324,2328,2332,2336-2337,2340-2341,2344-2345,2348-2349,2352-2353,2356-2357,2360-2361,2364-2365,2"
+ "368,2372,2376,2380,2384,2388,2392,2396,2400-2401,2404-2405,2408-2409,2412-2413,2416-2417,2420-2421,2424-2425,242"
+ "8-2429,2432,2436,2440,2444,2448,2452,2456,2460,2464-2465,2468-2469,2472-2473,2476-2477,2480-2481,2484-2485,2488-"
+ "2489,2492-2493,2496,2500,2504,2508,2512,2516,2520,2524,2528-2529,2532-2533,2536-2537,2540-2541,2544-2545,2548-25"
+ "49,2552-2553,2556-2557\n";
+
+static const struct test_bitmap_print test_print[] __initconst = {
+ { small_bitmap, sizeof(small_bitmap) * BITS_PER_BYTE, small_mask, small_list },
+ { large_bitmap, sizeof(large_bitmap) * BITS_PER_BYTE, large_mask, large_list },
+};
+
+static void __init test_bitmap_print_buf(void)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(test_print); i++) {
+ const struct test_bitmap_print *t = &test_print[i];
+ int n;
+
+ n = bitmap_print_bitmask_to_buf(print_buf, t->bitmap, t->nbits,
+ 0, 2 * PAGE_SIZE);
+ expect_eq_uint(strlen(t->mask) + 1, n);
+ expect_eq_str(t->mask, print_buf, n);
+
+ n = bitmap_print_list_to_buf(print_buf, t->bitmap, t->nbits,
+ 0, 2 * PAGE_SIZE);
+ expect_eq_uint(strlen(t->list) + 1, n);
+ expect_eq_str(t->list, print_buf, n);
+
+ /* test by non-zero offset */
+ if (strlen(t->list) > PAGE_SIZE) {
+ n = bitmap_print_list_to_buf(print_buf, t->bitmap, t->nbits,
+ PAGE_SIZE, PAGE_SIZE);
+ expect_eq_uint(strlen(t->list) + 1 - PAGE_SIZE, n);
+ expect_eq_str(t->list + PAGE_SIZE, print_buf, n);
+ }
+ }
+}
+
static void __init selftest(void)
{
test_zero_clear();
@@ -672,6 +821,7 @@ static void __init selftest(void)
test_mem_optimisations();
test_for_each_set_clump8();
test_bitmap_cut();
+ test_bitmap_print_buf();
}
KSTM_MODULE_LOADERS(test_bitmap);
diff --git a/lib/test_bpf.c b/lib/test_bpf.c
index d500320778c7..830a18ecffc8 100644
--- a/lib/test_bpf.c
+++ b/lib/test_bpf.c
@@ -461,6 +461,41 @@ static int bpf_fill_stxdw(struct bpf_test *self)
return __bpf_fill_stxdw(self, BPF_DW);
}
+static int bpf_fill_long_jmp(struct bpf_test *self)
+{
+ unsigned int len = BPF_MAXINSNS;
+ struct bpf_insn *insn;
+ int i;
+
+ insn = kmalloc_array(len, sizeof(*insn), GFP_KERNEL);
+ if (!insn)
+ return -ENOMEM;
+
+ insn[0] = BPF_ALU64_IMM(BPF_MOV, R0, 1);
+ insn[1] = BPF_JMP_IMM(BPF_JEQ, R0, 1, len - 2 - 1);
+
+ /*
+ * Fill with a complex 64-bit operation that expands to a lot of
+ * instructions on 32-bit JITs. The large jump offset can then
+ * overflow the conditional branch field size, triggering a branch
+ * conversion mechanism in some JITs.
+ *
+ * Note: BPF_MAXINSNS of ALU64 MUL is enough to trigger such branch
+ * conversion on the 32-bit MIPS JIT. For other JITs, the instruction
+ * count and/or operation may need to be modified to trigger the
+ * branch conversion.
+ */
+ for (i = 2; i < len - 1; i++)
+ insn[i] = BPF_ALU64_IMM(BPF_MUL, R0, (i << 16) + i);
+
+ insn[len - 1] = BPF_EXIT_INSN();
+
+ self->u.ptr.insns = insn;
+ self->u.ptr.len = len;
+
+ return 0;
+}
+
static struct bpf_test tests[] = {
{
"TAX",
@@ -1917,6 +1952,163 @@ static struct bpf_test tests[] = {
{ { 0, -1 } }
},
{
+ /*
+ * Register (non-)clobbering test, in the case where a 32-bit
+ * JIT implements complex ALU64 operations via function calls.
+ * If so, the function call must be invisible in the eBPF
+ * registers. The JIT must then save and restore relevant
+ * registers during the call. The following tests check that
+ * the eBPF registers retain their values after such a call.
+ */
+ "INT: Register clobbering, R1 updated",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, 0),
+ BPF_ALU32_IMM(BPF_MOV, R1, 123456789),
+ BPF_ALU32_IMM(BPF_MOV, R2, 2),
+ BPF_ALU32_IMM(BPF_MOV, R3, 3),
+ BPF_ALU32_IMM(BPF_MOV, R4, 4),
+ BPF_ALU32_IMM(BPF_MOV, R5, 5),
+ BPF_ALU32_IMM(BPF_MOV, R6, 6),
+ BPF_ALU32_IMM(BPF_MOV, R7, 7),
+ BPF_ALU32_IMM(BPF_MOV, R8, 8),
+ BPF_ALU32_IMM(BPF_MOV, R9, 9),
+ BPF_ALU64_IMM(BPF_DIV, R1, 123456789),
+ BPF_JMP_IMM(BPF_JNE, R0, 0, 10),
+ BPF_JMP_IMM(BPF_JNE, R1, 1, 9),
+ BPF_JMP_IMM(BPF_JNE, R2, 2, 8),
+ BPF_JMP_IMM(BPF_JNE, R3, 3, 7),
+ BPF_JMP_IMM(BPF_JNE, R4, 4, 6),
+ BPF_JMP_IMM(BPF_JNE, R5, 5, 5),
+ BPF_JMP_IMM(BPF_JNE, R6, 6, 4),
+ BPF_JMP_IMM(BPF_JNE, R7, 7, 3),
+ BPF_JMP_IMM(BPF_JNE, R8, 8, 2),
+ BPF_JMP_IMM(BPF_JNE, R9, 9, 1),
+ BPF_ALU32_IMM(BPF_MOV, R0, 1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 1 } }
+ },
+ {
+ "INT: Register clobbering, R2 updated",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, 0),
+ BPF_ALU32_IMM(BPF_MOV, R1, 1),
+ BPF_ALU32_IMM(BPF_MOV, R2, 2 * 123456789),
+ BPF_ALU32_IMM(BPF_MOV, R3, 3),
+ BPF_ALU32_IMM(BPF_MOV, R4, 4),
+ BPF_ALU32_IMM(BPF_MOV, R5, 5),
+ BPF_ALU32_IMM(BPF_MOV, R6, 6),
+ BPF_ALU32_IMM(BPF_MOV, R7, 7),
+ BPF_ALU32_IMM(BPF_MOV, R8, 8),
+ BPF_ALU32_IMM(BPF_MOV, R9, 9),
+ BPF_ALU64_IMM(BPF_DIV, R2, 123456789),
+ BPF_JMP_IMM(BPF_JNE, R0, 0, 10),
+ BPF_JMP_IMM(BPF_JNE, R1, 1, 9),
+ BPF_JMP_IMM(BPF_JNE, R2, 2, 8),
+ BPF_JMP_IMM(BPF_JNE, R3, 3, 7),
+ BPF_JMP_IMM(BPF_JNE, R4, 4, 6),
+ BPF_JMP_IMM(BPF_JNE, R5, 5, 5),
+ BPF_JMP_IMM(BPF_JNE, R6, 6, 4),
+ BPF_JMP_IMM(BPF_JNE, R7, 7, 3),
+ BPF_JMP_IMM(BPF_JNE, R8, 8, 2),
+ BPF_JMP_IMM(BPF_JNE, R9, 9, 1),
+ BPF_ALU32_IMM(BPF_MOV, R0, 1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 1 } }
+ },
+ {
+ /*
+ * Test 32-bit JITs that implement complex ALU64 operations as
+ * function calls R0 = f(R1, R2), and must re-arrange operands.
+ */
+#define NUMER 0xfedcba9876543210ULL
+#define DENOM 0x0123456789abcdefULL
+ "ALU64_DIV X: Operand register permutations",
+ .u.insns_int = {
+ /* R0 / R2 */
+ BPF_LD_IMM64(R0, NUMER),
+ BPF_LD_IMM64(R2, DENOM),
+ BPF_ALU64_REG(BPF_DIV, R0, R2),
+ BPF_JMP_IMM(BPF_JEQ, R0, NUMER / DENOM, 1),
+ BPF_EXIT_INSN(),
+ /* R1 / R0 */
+ BPF_LD_IMM64(R1, NUMER),
+ BPF_LD_IMM64(R0, DENOM),
+ BPF_ALU64_REG(BPF_DIV, R1, R0),
+ BPF_JMP_IMM(BPF_JEQ, R1, NUMER / DENOM, 1),
+ BPF_EXIT_INSN(),
+ /* R0 / R1 */
+ BPF_LD_IMM64(R0, NUMER),
+ BPF_LD_IMM64(R1, DENOM),
+ BPF_ALU64_REG(BPF_DIV, R0, R1),
+ BPF_JMP_IMM(BPF_JEQ, R0, NUMER / DENOM, 1),
+ BPF_EXIT_INSN(),
+ /* R2 / R0 */
+ BPF_LD_IMM64(R2, NUMER),
+ BPF_LD_IMM64(R0, DENOM),
+ BPF_ALU64_REG(BPF_DIV, R2, R0),
+ BPF_JMP_IMM(BPF_JEQ, R2, NUMER / DENOM, 1),
+ BPF_EXIT_INSN(),
+ /* R2 / R1 */
+ BPF_LD_IMM64(R2, NUMER),
+ BPF_LD_IMM64(R1, DENOM),
+ BPF_ALU64_REG(BPF_DIV, R2, R1),
+ BPF_JMP_IMM(BPF_JEQ, R2, NUMER / DENOM, 1),
+ BPF_EXIT_INSN(),
+ /* R1 / R2 */
+ BPF_LD_IMM64(R1, NUMER),
+ BPF_LD_IMM64(R2, DENOM),
+ BPF_ALU64_REG(BPF_DIV, R1, R2),
+ BPF_JMP_IMM(BPF_JEQ, R1, NUMER / DENOM, 1),
+ BPF_EXIT_INSN(),
+ /* R1 / R1 */
+ BPF_LD_IMM64(R1, NUMER),
+ BPF_ALU64_REG(BPF_DIV, R1, R1),
+ BPF_JMP_IMM(BPF_JEQ, R1, 1, 1),
+ BPF_EXIT_INSN(),
+ /* R2 / R2 */
+ BPF_LD_IMM64(R2, DENOM),
+ BPF_ALU64_REG(BPF_DIV, R2, R2),
+ BPF_JMP_IMM(BPF_JEQ, R2, 1, 1),
+ BPF_EXIT_INSN(),
+ /* R3 / R4 */
+ BPF_LD_IMM64(R3, NUMER),
+ BPF_LD_IMM64(R4, DENOM),
+ BPF_ALU64_REG(BPF_DIV, R3, R4),
+ BPF_JMP_IMM(BPF_JEQ, R3, NUMER / DENOM, 1),
+ BPF_EXIT_INSN(),
+ /* Successful return */
+ BPF_LD_IMM64(R0, 1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+#undef NUMER
+#undef DENOM
+ },
+#ifdef CONFIG_32BIT
+ {
+ "INT: 32-bit context pointer word order and zero-extension",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, 0),
+ BPF_JMP32_IMM(BPF_JEQ, R1, 0, 3),
+ BPF_ALU64_IMM(BPF_RSH, R1, 32),
+ BPF_JMP32_IMM(BPF_JNE, R1, 0, 1),
+ BPF_ALU32_IMM(BPF_MOV, R0, 1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 1 } }
+ },
+#endif
+ {
"check: missing ret",
.u.insns = {
BPF_STMT(BPF_LD | BPF_IMM, 1),
@@ -2361,6 +2553,48 @@ static struct bpf_test tests[] = {
{ { 0, 0x1 } },
},
{
+ "ALU_MOV_K: small negative",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, -123),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, -123 } }
+ },
+ {
+ "ALU_MOV_K: small negative zero extension",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, -123),
+ BPF_ALU64_IMM(BPF_RSH, R0, 32),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0 } }
+ },
+ {
+ "ALU_MOV_K: large negative",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, -123456789),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, -123456789 } }
+ },
+ {
+ "ALU_MOV_K: large negative zero extension",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, -123456789),
+ BPF_ALU64_IMM(BPF_RSH, R0, 32),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0 } }
+ },
+ {
"ALU64_MOV_K: dst = 2",
.u.insns_int = {
BPF_ALU64_IMM(BPF_MOV, R0, 2),
@@ -2412,6 +2646,48 @@ static struct bpf_test tests[] = {
{ },
{ { 0, 0x1 } },
},
+ {
+ "ALU64_MOV_K: small negative",
+ .u.insns_int = {
+ BPF_ALU64_IMM(BPF_MOV, R0, -123),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, -123 } }
+ },
+ {
+ "ALU64_MOV_K: small negative sign extension",
+ .u.insns_int = {
+ BPF_ALU64_IMM(BPF_MOV, R0, -123),
+ BPF_ALU64_IMM(BPF_RSH, R0, 32),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0xffffffff } }
+ },
+ {
+ "ALU64_MOV_K: large negative",
+ .u.insns_int = {
+ BPF_ALU64_IMM(BPF_MOV, R0, -123456789),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, -123456789 } }
+ },
+ {
+ "ALU64_MOV_K: large negative sign extension",
+ .u.insns_int = {
+ BPF_ALU64_IMM(BPF_MOV, R0, -123456789),
+ BPF_ALU64_IMM(BPF_RSH, R0, 32),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0xffffffff } }
+ },
/* BPF_ALU | BPF_ADD | BPF_X */
{
"ALU_ADD_X: 1 + 2 = 3",
@@ -2967,6 +3243,31 @@ static struct bpf_test tests[] = {
{ },
{ { 0, 2147483647 } },
},
+ {
+ "ALU64_MUL_X: 64x64 multiply, low word",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0x0fedcba987654321LL),
+ BPF_LD_IMM64(R1, 0x123456789abcdef0LL),
+ BPF_ALU64_REG(BPF_MUL, R0, R1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0xe5618cf0 } }
+ },
+ {
+ "ALU64_MUL_X: 64x64 multiply, high word",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0x0fedcba987654321LL),
+ BPF_LD_IMM64(R1, 0x123456789abcdef0LL),
+ BPF_ALU64_REG(BPF_MUL, R0, R1),
+ BPF_ALU64_IMM(BPF_RSH, R0, 32),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0x2236d88f } }
+ },
/* BPF_ALU | BPF_MUL | BPF_K */
{
"ALU_MUL_K: 2 * 3 = 6",
@@ -3077,6 +3378,29 @@ static struct bpf_test tests[] = {
{ },
{ { 0, 0x1 } },
},
+ {
+ "ALU64_MUL_K: 64x32 multiply, low word",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0x0123456789abcdefLL),
+ BPF_ALU64_IMM(BPF_MUL, R0, 0x12345678),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0xe242d208 } }
+ },
+ {
+ "ALU64_MUL_K: 64x32 multiply, high word",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0x0123456789abcdefLL),
+ BPF_ALU64_IMM(BPF_MUL, R0, 0x12345678),
+ BPF_ALU64_IMM(BPF_RSH, R0, 32),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0xc28f5c28 } }
+ },
/* BPF_ALU | BPF_DIV | BPF_X */
{
"ALU_DIV_X: 6 / 2 = 3",
@@ -3431,6 +3755,44 @@ static struct bpf_test tests[] = {
{ { 0, 0xffffffff } },
},
{
+ "ALU_AND_K: Small immediate",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, 0x01020304),
+ BPF_ALU32_IMM(BPF_AND, R0, 15),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 4 } }
+ },
+ {
+ "ALU_AND_K: Large immediate",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, 0xf1f2f3f4),
+ BPF_ALU32_IMM(BPF_AND, R0, 0xafbfcfdf),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0xa1b2c3d4 } }
+ },
+ {
+ "ALU_AND_K: Zero extension",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0x0123456789abcdefLL),
+ BPF_LD_IMM64(R1, 0x0000000080a0c0e0LL),
+ BPF_ALU32_IMM(BPF_AND, R0, 0xf0f0f0f0),
+ BPF_JMP_REG(BPF_JEQ, R0, R1, 2),
+ BPF_MOV32_IMM(R0, 2),
+ BPF_EXIT_INSN(),
+ BPF_MOV32_IMM(R0, 1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 1 } }
+ },
+ {
"ALU64_AND_K: 3 & 2 = 2",
.u.insns_int = {
BPF_LD_IMM64(R0, 3),
@@ -3453,7 +3815,7 @@ static struct bpf_test tests[] = {
{ { 0, 0xffffffff } },
},
{
- "ALU64_AND_K: 0x0000ffffffff0000 & 0x0 = 0x0000ffff00000000",
+ "ALU64_AND_K: 0x0000ffffffff0000 & 0x0 = 0x0000000000000000",
.u.insns_int = {
BPF_LD_IMM64(R2, 0x0000ffffffff0000LL),
BPF_LD_IMM64(R3, 0x0000000000000000LL),
@@ -3469,7 +3831,7 @@ static struct bpf_test tests[] = {
{ { 0, 0x1 } },
},
{
- "ALU64_AND_K: 0x0000ffffffff0000 & -1 = 0x0000ffffffffffff",
+ "ALU64_AND_K: 0x0000ffffffff0000 & -1 = 0x0000ffffffff0000",
.u.insns_int = {
BPF_LD_IMM64(R2, 0x0000ffffffff0000LL),
BPF_LD_IMM64(R3, 0x0000ffffffff0000LL),
@@ -3500,6 +3862,38 @@ static struct bpf_test tests[] = {
{ },
{ { 0, 0x1 } },
},
+ {
+ "ALU64_AND_K: Sign extension 1",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0x0123456789abcdefLL),
+ BPF_LD_IMM64(R1, 0x00000000090b0d0fLL),
+ BPF_ALU64_IMM(BPF_AND, R0, 0x0f0f0f0f),
+ BPF_JMP_REG(BPF_JEQ, R0, R1, 2),
+ BPF_MOV32_IMM(R0, 2),
+ BPF_EXIT_INSN(),
+ BPF_MOV32_IMM(R0, 1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 1 } }
+ },
+ {
+ "ALU64_AND_K: Sign extension 2",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0x0123456789abcdefLL),
+ BPF_LD_IMM64(R1, 0x0123456780a0c0e0LL),
+ BPF_ALU64_IMM(BPF_AND, R0, 0xf0f0f0f0),
+ BPF_JMP_REG(BPF_JEQ, R0, R1, 2),
+ BPF_MOV32_IMM(R0, 2),
+ BPF_EXIT_INSN(),
+ BPF_MOV32_IMM(R0, 1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 1 } }
+ },
/* BPF_ALU | BPF_OR | BPF_X */
{
"ALU_OR_X: 1 | 2 = 3",
@@ -3573,6 +3967,44 @@ static struct bpf_test tests[] = {
{ { 0, 0xffffffff } },
},
{
+ "ALU_OR_K: Small immediate",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, 0x01020304),
+ BPF_ALU32_IMM(BPF_OR, R0, 1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0x01020305 } }
+ },
+ {
+ "ALU_OR_K: Large immediate",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, 0x01020304),
+ BPF_ALU32_IMM(BPF_OR, R0, 0xa0b0c0d0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0xa1b2c3d4 } }
+ },
+ {
+ "ALU_OR_K: Zero extension",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0x0123456789abcdefLL),
+ BPF_LD_IMM64(R1, 0x00000000f9fbfdffLL),
+ BPF_ALU32_IMM(BPF_OR, R0, 0xf0f0f0f0),
+ BPF_JMP_REG(BPF_JEQ, R0, R1, 2),
+ BPF_MOV32_IMM(R0, 2),
+ BPF_EXIT_INSN(),
+ BPF_MOV32_IMM(R0, 1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 1 } }
+ },
+ {
"ALU64_OR_K: 1 | 2 = 3",
.u.insns_int = {
BPF_LD_IMM64(R0, 1),
@@ -3595,7 +4027,7 @@ static struct bpf_test tests[] = {
{ { 0, 0xffffffff } },
},
{
- "ALU64_OR_K: 0x0000ffffffff0000 | 0x0 = 0x0000ffff00000000",
+ "ALU64_OR_K: 0x0000ffffffff0000 | 0x0 = 0x0000ffffffff0000",
.u.insns_int = {
BPF_LD_IMM64(R2, 0x0000ffffffff0000LL),
BPF_LD_IMM64(R3, 0x0000ffffffff0000LL),
@@ -3642,6 +4074,38 @@ static struct bpf_test tests[] = {
{ },
{ { 0, 0x1 } },
},
+ {
+ "ALU64_OR_K: Sign extension 1",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0x0123456789abcdefLL),
+ BPF_LD_IMM64(R1, 0x012345678fafcfefLL),
+ BPF_ALU64_IMM(BPF_OR, R0, 0x0f0f0f0f),
+ BPF_JMP_REG(BPF_JEQ, R0, R1, 2),
+ BPF_MOV32_IMM(R0, 2),
+ BPF_EXIT_INSN(),
+ BPF_MOV32_IMM(R0, 1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 1 } }
+ },
+ {
+ "ALU64_OR_K: Sign extension 2",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0x0123456789abcdefLL),
+ BPF_LD_IMM64(R1, 0xfffffffff9fbfdffLL),
+ BPF_ALU64_IMM(BPF_OR, R0, 0xf0f0f0f0),
+ BPF_JMP_REG(BPF_JEQ, R0, R1, 2),
+ BPF_MOV32_IMM(R0, 2),
+ BPF_EXIT_INSN(),
+ BPF_MOV32_IMM(R0, 1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 1 } }
+ },
/* BPF_ALU | BPF_XOR | BPF_X */
{
"ALU_XOR_X: 5 ^ 6 = 3",
@@ -3715,6 +4179,44 @@ static struct bpf_test tests[] = {
{ { 0, 0xfffffffe } },
},
{
+ "ALU_XOR_K: Small immediate",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, 0x01020304),
+ BPF_ALU32_IMM(BPF_XOR, R0, 15),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0x0102030b } }
+ },
+ {
+ "ALU_XOR_K: Large immediate",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, 0xf1f2f3f4),
+ BPF_ALU32_IMM(BPF_XOR, R0, 0xafbfcfdf),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0x5e4d3c2b } }
+ },
+ {
+ "ALU_XOR_K: Zero extension",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0x0123456789abcdefLL),
+ BPF_LD_IMM64(R1, 0x00000000795b3d1fLL),
+ BPF_ALU32_IMM(BPF_XOR, R0, 0xf0f0f0f0),
+ BPF_JMP_REG(BPF_JEQ, R0, R1, 2),
+ BPF_MOV32_IMM(R0, 2),
+ BPF_EXIT_INSN(),
+ BPF_MOV32_IMM(R0, 1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 1 } }
+ },
+ {
"ALU64_XOR_K: 5 ^ 6 = 3",
.u.insns_int = {
BPF_LD_IMM64(R0, 5),
@@ -3726,7 +4228,7 @@ static struct bpf_test tests[] = {
{ { 0, 3 } },
},
{
- "ALU64_XOR_K: 1 & 0xffffffff = 0xfffffffe",
+ "ALU64_XOR_K: 1 ^ 0xffffffff = 0xfffffffe",
.u.insns_int = {
BPF_LD_IMM64(R0, 1),
BPF_ALU64_IMM(BPF_XOR, R0, 0xffffffff),
@@ -3784,6 +4286,38 @@ static struct bpf_test tests[] = {
{ },
{ { 0, 0x1 } },
},
+ {
+ "ALU64_XOR_K: Sign extension 1",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0x0123456789abcdefLL),
+ BPF_LD_IMM64(R1, 0x0123456786a4c2e0LL),
+ BPF_ALU64_IMM(BPF_XOR, R0, 0x0f0f0f0f),
+ BPF_JMP_REG(BPF_JEQ, R0, R1, 2),
+ BPF_MOV32_IMM(R0, 2),
+ BPF_EXIT_INSN(),
+ BPF_MOV32_IMM(R0, 1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 1 } }
+ },
+ {
+ "ALU64_XOR_K: Sign extension 2",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0x0123456789abcdefLL),
+ BPF_LD_IMM64(R1, 0xfedcba98795b3d1fLL),
+ BPF_ALU64_IMM(BPF_XOR, R0, 0xf0f0f0f0),
+ BPF_JMP_REG(BPF_JEQ, R0, R1, 2),
+ BPF_MOV32_IMM(R0, 2),
+ BPF_EXIT_INSN(),
+ BPF_MOV32_IMM(R0, 1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 1 } }
+ },
/* BPF_ALU | BPF_LSH | BPF_X */
{
"ALU_LSH_X: 1 << 1 = 2",
@@ -3810,6 +4344,18 @@ static struct bpf_test tests[] = {
{ { 0, 0x80000000 } },
},
{
+ "ALU_LSH_X: 0x12345678 << 12 = 0x45678000",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, 0x12345678),
+ BPF_ALU32_IMM(BPF_MOV, R1, 12),
+ BPF_ALU32_REG(BPF_LSH, R0, R1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0x45678000 } }
+ },
+ {
"ALU64_LSH_X: 1 << 1 = 2",
.u.insns_int = {
BPF_LD_IMM64(R0, 1),
@@ -3833,6 +4379,106 @@ static struct bpf_test tests[] = {
{ },
{ { 0, 0x80000000 } },
},
+ {
+ "ALU64_LSH_X: Shift < 32, low word",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0x0123456789abcdefLL),
+ BPF_ALU32_IMM(BPF_MOV, R1, 12),
+ BPF_ALU64_REG(BPF_LSH, R0, R1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0xbcdef000 } }
+ },
+ {
+ "ALU64_LSH_X: Shift < 32, high word",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0x0123456789abcdefLL),
+ BPF_ALU32_IMM(BPF_MOV, R1, 12),
+ BPF_ALU64_REG(BPF_LSH, R0, R1),
+ BPF_ALU64_IMM(BPF_RSH, R0, 32),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0x3456789a } }
+ },
+ {
+ "ALU64_LSH_X: Shift > 32, low word",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0x0123456789abcdefLL),
+ BPF_ALU32_IMM(BPF_MOV, R1, 36),
+ BPF_ALU64_REG(BPF_LSH, R0, R1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0 } }
+ },
+ {
+ "ALU64_LSH_X: Shift > 32, high word",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0x0123456789abcdefLL),
+ BPF_ALU32_IMM(BPF_MOV, R1, 36),
+ BPF_ALU64_REG(BPF_LSH, R0, R1),
+ BPF_ALU64_IMM(BPF_RSH, R0, 32),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0x9abcdef0 } }
+ },
+ {
+ "ALU64_LSH_X: Shift == 32, low word",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0x0123456789abcdefLL),
+ BPF_ALU32_IMM(BPF_MOV, R1, 32),
+ BPF_ALU64_REG(BPF_LSH, R0, R1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0 } }
+ },
+ {
+ "ALU64_LSH_X: Shift == 32, high word",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0x0123456789abcdefLL),
+ BPF_ALU32_IMM(BPF_MOV, R1, 32),
+ BPF_ALU64_REG(BPF_LSH, R0, R1),
+ BPF_ALU64_IMM(BPF_RSH, R0, 32),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0x89abcdef } }
+ },
+ {
+ "ALU64_LSH_X: Zero shift, low word",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0x0123456789abcdefLL),
+ BPF_ALU32_IMM(BPF_MOV, R1, 0),
+ BPF_ALU64_REG(BPF_LSH, R0, R1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0x89abcdef } }
+ },
+ {
+ "ALU64_LSH_X: Zero shift, high word",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0x0123456789abcdefLL),
+ BPF_ALU32_IMM(BPF_MOV, R1, 0),
+ BPF_ALU64_REG(BPF_LSH, R0, R1),
+ BPF_ALU64_IMM(BPF_RSH, R0, 32),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0x01234567 } }
+ },
/* BPF_ALU | BPF_LSH | BPF_K */
{
"ALU_LSH_K: 1 << 1 = 2",
@@ -3857,6 +4503,28 @@ static struct bpf_test tests[] = {
{ { 0, 0x80000000 } },
},
{
+ "ALU_LSH_K: 0x12345678 << 12 = 0x45678000",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, 0x12345678),
+ BPF_ALU32_IMM(BPF_LSH, R0, 12),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0x45678000 } }
+ },
+ {
+ "ALU_LSH_K: 0x12345678 << 0 = 0x12345678",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, 0x12345678),
+ BPF_ALU32_IMM(BPF_LSH, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0x12345678 } }
+ },
+ {
"ALU64_LSH_K: 1 << 1 = 2",
.u.insns_int = {
BPF_LD_IMM64(R0, 1),
@@ -3878,6 +4546,86 @@ static struct bpf_test tests[] = {
{ },
{ { 0, 0x80000000 } },
},
+ {
+ "ALU64_LSH_K: Shift < 32, low word",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0x0123456789abcdefLL),
+ BPF_ALU64_IMM(BPF_LSH, R0, 12),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0xbcdef000 } }
+ },
+ {
+ "ALU64_LSH_K: Shift < 32, high word",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0x0123456789abcdefLL),
+ BPF_ALU64_IMM(BPF_LSH, R0, 12),
+ BPF_ALU64_IMM(BPF_RSH, R0, 32),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0x3456789a } }
+ },
+ {
+ "ALU64_LSH_K: Shift > 32, low word",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0x0123456789abcdefLL),
+ BPF_ALU64_IMM(BPF_LSH, R0, 36),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0 } }
+ },
+ {
+ "ALU64_LSH_K: Shift > 32, high word",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0x0123456789abcdefLL),
+ BPF_ALU64_IMM(BPF_LSH, R0, 36),
+ BPF_ALU64_IMM(BPF_RSH, R0, 32),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0x9abcdef0 } }
+ },
+ {
+ "ALU64_LSH_K: Shift == 32, low word",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0x0123456789abcdefLL),
+ BPF_ALU64_IMM(BPF_LSH, R0, 32),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0 } }
+ },
+ {
+ "ALU64_LSH_K: Shift == 32, high word",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0x0123456789abcdefLL),
+ BPF_ALU64_IMM(BPF_LSH, R0, 32),
+ BPF_ALU64_IMM(BPF_RSH, R0, 32),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0x89abcdef } }
+ },
+ {
+ "ALU64_LSH_K: Zero shift",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0x0123456789abcdefLL),
+ BPF_ALU64_IMM(BPF_LSH, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0x89abcdef } }
+ },
/* BPF_ALU | BPF_RSH | BPF_X */
{
"ALU_RSH_X: 2 >> 1 = 1",
@@ -3904,6 +4652,18 @@ static struct bpf_test tests[] = {
{ { 0, 1 } },
},
{
+ "ALU_RSH_X: 0x12345678 >> 20 = 0x123",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, 0x12345678),
+ BPF_ALU32_IMM(BPF_MOV, R1, 20),
+ BPF_ALU32_REG(BPF_RSH, R0, R1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0x123 } }
+ },
+ {
"ALU64_RSH_X: 2 >> 1 = 1",
.u.insns_int = {
BPF_LD_IMM64(R0, 2),
@@ -3927,6 +4687,106 @@ static struct bpf_test tests[] = {
{ },
{ { 0, 1 } },
},
+ {
+ "ALU64_RSH_X: Shift < 32, low word",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0x8123456789abcdefLL),
+ BPF_ALU32_IMM(BPF_MOV, R1, 12),
+ BPF_ALU64_REG(BPF_RSH, R0, R1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0x56789abc } }
+ },
+ {
+ "ALU64_RSH_X: Shift < 32, high word",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0x8123456789abcdefLL),
+ BPF_ALU32_IMM(BPF_MOV, R1, 12),
+ BPF_ALU64_REG(BPF_RSH, R0, R1),
+ BPF_ALU64_IMM(BPF_RSH, R0, 32),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0x00081234 } }
+ },
+ {
+ "ALU64_RSH_X: Shift > 32, low word",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0x8123456789abcdefLL),
+ BPF_ALU32_IMM(BPF_MOV, R1, 36),
+ BPF_ALU64_REG(BPF_RSH, R0, R1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0x08123456 } }
+ },
+ {
+ "ALU64_RSH_X: Shift > 32, high word",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0x8123456789abcdefLL),
+ BPF_ALU32_IMM(BPF_MOV, R1, 36),
+ BPF_ALU64_REG(BPF_RSH, R0, R1),
+ BPF_ALU64_IMM(BPF_RSH, R0, 32),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0 } }
+ },
+ {
+ "ALU64_RSH_X: Shift == 32, low word",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0x8123456789abcdefLL),
+ BPF_ALU32_IMM(BPF_MOV, R1, 32),
+ BPF_ALU64_REG(BPF_RSH, R0, R1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0x81234567 } }
+ },
+ {
+ "ALU64_RSH_X: Shift == 32, high word",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0x8123456789abcdefLL),
+ BPF_ALU32_IMM(BPF_MOV, R1, 32),
+ BPF_ALU64_REG(BPF_RSH, R0, R1),
+ BPF_ALU64_IMM(BPF_RSH, R0, 32),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0 } }
+ },
+ {
+ "ALU64_RSH_X: Zero shift, low word",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0x8123456789abcdefLL),
+ BPF_ALU32_IMM(BPF_MOV, R1, 0),
+ BPF_ALU64_REG(BPF_RSH, R0, R1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0x89abcdef } }
+ },
+ {
+ "ALU64_RSH_X: Zero shift, high word",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0x8123456789abcdefLL),
+ BPF_ALU32_IMM(BPF_MOV, R1, 0),
+ BPF_ALU64_REG(BPF_RSH, R0, R1),
+ BPF_ALU64_IMM(BPF_RSH, R0, 32),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0x81234567 } }
+ },
/* BPF_ALU | BPF_RSH | BPF_K */
{
"ALU_RSH_K: 2 >> 1 = 1",
@@ -3951,6 +4811,28 @@ static struct bpf_test tests[] = {
{ { 0, 1 } },
},
{
+ "ALU_RSH_K: 0x12345678 >> 20 = 0x123",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, 0x12345678),
+ BPF_ALU32_IMM(BPF_RSH, R0, 20),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0x123 } }
+ },
+ {
+ "ALU_RSH_K: 0x12345678 >> 0 = 0x12345678",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, 0x12345678),
+ BPF_ALU32_IMM(BPF_RSH, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0x12345678 } }
+ },
+ {
"ALU64_RSH_K: 2 >> 1 = 1",
.u.insns_int = {
BPF_LD_IMM64(R0, 2),
@@ -3972,9 +4854,101 @@ static struct bpf_test tests[] = {
{ },
{ { 0, 1 } },
},
+ {
+ "ALU64_RSH_K: Shift < 32, low word",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0x8123456789abcdefLL),
+ BPF_ALU64_IMM(BPF_RSH, R0, 12),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0x56789abc } }
+ },
+ {
+ "ALU64_RSH_K: Shift < 32, high word",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0x8123456789abcdefLL),
+ BPF_ALU64_IMM(BPF_RSH, R0, 12),
+ BPF_ALU64_IMM(BPF_RSH, R0, 32),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0x00081234 } }
+ },
+ {
+ "ALU64_RSH_K: Shift > 32, low word",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0x8123456789abcdefLL),
+ BPF_ALU64_IMM(BPF_RSH, R0, 36),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0x08123456 } }
+ },
+ {
+ "ALU64_RSH_K: Shift > 32, high word",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0x8123456789abcdefLL),
+ BPF_ALU64_IMM(BPF_RSH, R0, 36),
+ BPF_ALU64_IMM(BPF_RSH, R0, 32),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0 } }
+ },
+ {
+ "ALU64_RSH_K: Shift == 32, low word",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0x8123456789abcdefLL),
+ BPF_ALU64_IMM(BPF_RSH, R0, 32),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0x81234567 } }
+ },
+ {
+ "ALU64_RSH_K: Shift == 32, high word",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0x8123456789abcdefLL),
+ BPF_ALU64_IMM(BPF_RSH, R0, 32),
+ BPF_ALU64_IMM(BPF_RSH, R0, 32),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0 } }
+ },
+ {
+ "ALU64_RSH_K: Zero shift",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0x0123456789abcdefLL),
+ BPF_ALU64_IMM(BPF_RSH, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0x89abcdef } }
+ },
/* BPF_ALU | BPF_ARSH | BPF_X */
{
- "ALU_ARSH_X: 0xff00ff0000000000 >> 40 = 0xffffffffffff00ff",
+ "ALU32_ARSH_X: -1234 >> 7 = -10",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, -1234),
+ BPF_ALU32_IMM(BPF_MOV, R1, 7),
+ BPF_ALU32_REG(BPF_ARSH, R0, R1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, -10 } }
+ },
+ {
+ "ALU64_ARSH_X: 0xff00ff0000000000 >> 40 = 0xffffffffffff00ff",
.u.insns_int = {
BPF_LD_IMM64(R0, 0xff00ff0000000000LL),
BPF_ALU32_IMM(BPF_MOV, R1, 40),
@@ -3985,9 +4959,131 @@ static struct bpf_test tests[] = {
{ },
{ { 0, 0xffff00ff } },
},
+ {
+ "ALU64_ARSH_X: Shift < 32, low word",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0x8123456789abcdefLL),
+ BPF_ALU32_IMM(BPF_MOV, R1, 12),
+ BPF_ALU64_REG(BPF_ARSH, R0, R1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0x56789abc } }
+ },
+ {
+ "ALU64_ARSH_X: Shift < 32, high word",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0x8123456789abcdefLL),
+ BPF_ALU32_IMM(BPF_MOV, R1, 12),
+ BPF_ALU64_REG(BPF_ARSH, R0, R1),
+ BPF_ALU64_IMM(BPF_RSH, R0, 32),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0xfff81234 } }
+ },
+ {
+ "ALU64_ARSH_X: Shift > 32, low word",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0x8123456789abcdefLL),
+ BPF_ALU32_IMM(BPF_MOV, R1, 36),
+ BPF_ALU64_REG(BPF_ARSH, R0, R1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0xf8123456 } }
+ },
+ {
+ "ALU64_ARSH_X: Shift > 32, high word",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0x8123456789abcdefLL),
+ BPF_ALU32_IMM(BPF_MOV, R1, 36),
+ BPF_ALU64_REG(BPF_ARSH, R0, R1),
+ BPF_ALU64_IMM(BPF_RSH, R0, 32),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, -1 } }
+ },
+ {
+ "ALU64_ARSH_X: Shift == 32, low word",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0x8123456789abcdefLL),
+ BPF_ALU32_IMM(BPF_MOV, R1, 32),
+ BPF_ALU64_REG(BPF_ARSH, R0, R1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0x81234567 } }
+ },
+ {
+ "ALU64_ARSH_X: Shift == 32, high word",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0x8123456789abcdefLL),
+ BPF_ALU32_IMM(BPF_MOV, R1, 32),
+ BPF_ALU64_REG(BPF_ARSH, R0, R1),
+ BPF_ALU64_IMM(BPF_RSH, R0, 32),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, -1 } }
+ },
+ {
+ "ALU64_ARSH_X: Zero shift, low word",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0x8123456789abcdefLL),
+ BPF_ALU32_IMM(BPF_MOV, R1, 0),
+ BPF_ALU64_REG(BPF_ARSH, R0, R1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0x89abcdef } }
+ },
+ {
+ "ALU64_ARSH_X: Zero shift, high word",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0x8123456789abcdefLL),
+ BPF_ALU32_IMM(BPF_MOV, R1, 0),
+ BPF_ALU64_REG(BPF_ARSH, R0, R1),
+ BPF_ALU64_IMM(BPF_RSH, R0, 32),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0x81234567 } }
+ },
/* BPF_ALU | BPF_ARSH | BPF_K */
{
- "ALU_ARSH_K: 0xff00ff0000000000 >> 40 = 0xffffffffffff00ff",
+ "ALU32_ARSH_K: -1234 >> 7 = -10",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, -1234),
+ BPF_ALU32_IMM(BPF_ARSH, R0, 7),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, -10 } }
+ },
+ {
+ "ALU32_ARSH_K: -1234 >> 0 = -1234",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, -1234),
+ BPF_ALU32_IMM(BPF_ARSH, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, -1234 } }
+ },
+ {
+ "ALU64_ARSH_K: 0xff00ff0000000000 >> 40 = 0xffffffffffff00ff",
.u.insns_int = {
BPF_LD_IMM64(R0, 0xff00ff0000000000LL),
BPF_ALU64_IMM(BPF_ARSH, R0, 40),
@@ -3997,6 +5093,86 @@ static struct bpf_test tests[] = {
{ },
{ { 0, 0xffff00ff } },
},
+ {
+ "ALU64_ARSH_K: Shift < 32, low word",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0x8123456789abcdefLL),
+ BPF_ALU64_IMM(BPF_RSH, R0, 12),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0x56789abc } }
+ },
+ {
+ "ALU64_ARSH_K: Shift < 32, high word",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0x8123456789abcdefLL),
+ BPF_ALU64_IMM(BPF_ARSH, R0, 12),
+ BPF_ALU64_IMM(BPF_RSH, R0, 32),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0xfff81234 } }
+ },
+ {
+ "ALU64_ARSH_K: Shift > 32, low word",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0x8123456789abcdefLL),
+ BPF_ALU64_IMM(BPF_ARSH, R0, 36),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0xf8123456 } }
+ },
+ {
+ "ALU64_ARSH_K: Shift > 32, high word",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0xf123456789abcdefLL),
+ BPF_ALU64_IMM(BPF_ARSH, R0, 36),
+ BPF_ALU64_IMM(BPF_RSH, R0, 32),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, -1 } }
+ },
+ {
+ "ALU64_ARSH_K: Shift == 32, low word",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0x8123456789abcdefLL),
+ BPF_ALU64_IMM(BPF_ARSH, R0, 32),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0x81234567 } }
+ },
+ {
+ "ALU64_ARSH_K: Shift == 32, high word",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0x8123456789abcdefLL),
+ BPF_ALU64_IMM(BPF_ARSH, R0, 32),
+ BPF_ALU64_IMM(BPF_RSH, R0, 32),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, -1 } }
+ },
+ {
+ "ALU64_ARSH_K: Zero shift",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0x8123456789abcdefLL),
+ BPF_ALU64_IMM(BPF_ARSH, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0x89abcdef } }
+ },
/* BPF_ALU | BPF_NEG */
{
"ALU_NEG: -(3) = -3",
@@ -4286,8 +5462,8 @@ static struct bpf_test tests[] = {
.u.insns_int = {
BPF_LD_IMM64(R0, 0),
BPF_LD_IMM64(R1, 0xffffffffffffffffLL),
- BPF_STX_MEM(BPF_W, R10, R1, -40),
- BPF_LDX_MEM(BPF_W, R0, R10, -40),
+ BPF_STX_MEM(BPF_DW, R10, R1, -40),
+ BPF_LDX_MEM(BPF_DW, R0, R10, -40),
BPF_EXIT_INSN(),
},
INTERNAL,
@@ -4295,80 +5471,346 @@ static struct bpf_test tests[] = {
{ { 0, 0xffffffff } },
.stack_depth = 40,
},
+ {
+ "STX_MEM_DW: Store double word: first word in memory",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0),
+ BPF_LD_IMM64(R1, 0x0123456789abcdefLL),
+ BPF_STX_MEM(BPF_DW, R10, R1, -40),
+ BPF_LDX_MEM(BPF_W, R0, R10, -40),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+#ifdef __BIG_ENDIAN
+ { { 0, 0x01234567 } },
+#else
+ { { 0, 0x89abcdef } },
+#endif
+ .stack_depth = 40,
+ },
+ {
+ "STX_MEM_DW: Store double word: second word in memory",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 0),
+ BPF_LD_IMM64(R1, 0x0123456789abcdefLL),
+ BPF_STX_MEM(BPF_DW, R10, R1, -40),
+ BPF_LDX_MEM(BPF_W, R0, R10, -36),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+#ifdef __BIG_ENDIAN
+ { { 0, 0x89abcdef } },
+#else
+ { { 0, 0x01234567 } },
+#endif
+ .stack_depth = 40,
+ },
/* BPF_STX | BPF_ATOMIC | BPF_W/DW */
{
- "STX_XADD_W: Test: 0x12 + 0x10 = 0x22",
+ "STX_XADD_W: X + 1 + 1 + 1 + ...",
+ { },
+ INTERNAL,
+ { },
+ { { 0, 4134 } },
+ .fill_helper = bpf_fill_stxw,
+ },
+ {
+ "STX_XADD_DW: X + 1 + 1 + 1 + ...",
+ { },
+ INTERNAL,
+ { },
+ { { 0, 4134 } },
+ .fill_helper = bpf_fill_stxdw,
+ },
+ /*
+ * Exhaustive tests of atomic operation variants.
+ * Individual tests are expanded from template macros for all
+ * combinations of ALU operation, word size and fetching.
+ */
+#define BPF_ATOMIC_OP_TEST1(width, op, logic, old, update, result) \
+{ \
+ "BPF_ATOMIC | " #width ", " #op ": Test: " \
+ #old " " #logic " " #update " = " #result, \
+ .u.insns_int = { \
+ BPF_ALU32_IMM(BPF_MOV, R5, update), \
+ BPF_ST_MEM(width, R10, -40, old), \
+ BPF_ATOMIC_OP(width, op, R10, R5, -40), \
+ BPF_LDX_MEM(width, R0, R10, -40), \
+ BPF_EXIT_INSN(), \
+ }, \
+ INTERNAL, \
+ { }, \
+ { { 0, result } }, \
+ .stack_depth = 40, \
+}
+#define BPF_ATOMIC_OP_TEST2(width, op, logic, old, update, result) \
+{ \
+ "BPF_ATOMIC | " #width ", " #op ": Test side effects, r10: " \
+ #old " " #logic " " #update " = " #result, \
+ .u.insns_int = { \
+ BPF_ALU64_REG(BPF_MOV, R1, R10), \
+ BPF_ALU32_IMM(BPF_MOV, R0, update), \
+ BPF_ST_MEM(BPF_W, R10, -40, old), \
+ BPF_ATOMIC_OP(width, op, R10, R0, -40), \
+ BPF_ALU64_REG(BPF_MOV, R0, R10), \
+ BPF_ALU64_REG(BPF_SUB, R0, R1), \
+ BPF_EXIT_INSN(), \
+ }, \
+ INTERNAL, \
+ { }, \
+ { { 0, 0 } }, \
+ .stack_depth = 40, \
+}
+#define BPF_ATOMIC_OP_TEST3(width, op, logic, old, update, result) \
+{ \
+ "BPF_ATOMIC | " #width ", " #op ": Test side effects, r0: " \
+ #old " " #logic " " #update " = " #result, \
+ .u.insns_int = { \
+ BPF_ALU64_REG(BPF_MOV, R0, R10), \
+ BPF_ALU32_IMM(BPF_MOV, R1, update), \
+ BPF_ST_MEM(width, R10, -40, old), \
+ BPF_ATOMIC_OP(width, op, R10, R1, -40), \
+ BPF_ALU64_REG(BPF_SUB, R0, R10), \
+ BPF_EXIT_INSN(), \
+ }, \
+ INTERNAL, \
+ { }, \
+ { { 0, 0 } }, \
+ .stack_depth = 40, \
+}
+#define BPF_ATOMIC_OP_TEST4(width, op, logic, old, update, result) \
+{ \
+ "BPF_ATOMIC | " #width ", " #op ": Test fetch: " \
+ #old " " #logic " " #update " = " #result, \
+ .u.insns_int = { \
+ BPF_ALU32_IMM(BPF_MOV, R3, update), \
+ BPF_ST_MEM(width, R10, -40, old), \
+ BPF_ATOMIC_OP(width, op, R10, R3, -40), \
+ BPF_ALU64_REG(BPF_MOV, R0, R3), \
+ BPF_EXIT_INSN(), \
+ }, \
+ INTERNAL, \
+ { }, \
+ { { 0, (op) & BPF_FETCH ? old : update } }, \
+ .stack_depth = 40, \
+}
+ /* BPF_ATOMIC | BPF_W: BPF_ADD */
+ BPF_ATOMIC_OP_TEST1(BPF_W, BPF_ADD, +, 0x12, 0xab, 0xbd),
+ BPF_ATOMIC_OP_TEST2(BPF_W, BPF_ADD, +, 0x12, 0xab, 0xbd),
+ BPF_ATOMIC_OP_TEST3(BPF_W, BPF_ADD, +, 0x12, 0xab, 0xbd),
+ BPF_ATOMIC_OP_TEST4(BPF_W, BPF_ADD, +, 0x12, 0xab, 0xbd),
+ /* BPF_ATOMIC | BPF_W: BPF_ADD | BPF_FETCH */
+ BPF_ATOMIC_OP_TEST1(BPF_W, BPF_ADD | BPF_FETCH, +, 0x12, 0xab, 0xbd),
+ BPF_ATOMIC_OP_TEST2(BPF_W, BPF_ADD | BPF_FETCH, +, 0x12, 0xab, 0xbd),
+ BPF_ATOMIC_OP_TEST3(BPF_W, BPF_ADD | BPF_FETCH, +, 0x12, 0xab, 0xbd),
+ BPF_ATOMIC_OP_TEST4(BPF_W, BPF_ADD | BPF_FETCH, +, 0x12, 0xab, 0xbd),
+ /* BPF_ATOMIC | BPF_DW: BPF_ADD */
+ BPF_ATOMIC_OP_TEST1(BPF_DW, BPF_ADD, +, 0x12, 0xab, 0xbd),
+ BPF_ATOMIC_OP_TEST2(BPF_DW, BPF_ADD, +, 0x12, 0xab, 0xbd),
+ BPF_ATOMIC_OP_TEST3(BPF_DW, BPF_ADD, +, 0x12, 0xab, 0xbd),
+ BPF_ATOMIC_OP_TEST4(BPF_DW, BPF_ADD, +, 0x12, 0xab, 0xbd),
+ /* BPF_ATOMIC | BPF_DW: BPF_ADD | BPF_FETCH */
+ BPF_ATOMIC_OP_TEST1(BPF_DW, BPF_ADD | BPF_FETCH, +, 0x12, 0xab, 0xbd),
+ BPF_ATOMIC_OP_TEST2(BPF_DW, BPF_ADD | BPF_FETCH, +, 0x12, 0xab, 0xbd),
+ BPF_ATOMIC_OP_TEST3(BPF_DW, BPF_ADD | BPF_FETCH, +, 0x12, 0xab, 0xbd),
+ BPF_ATOMIC_OP_TEST4(BPF_DW, BPF_ADD | BPF_FETCH, +, 0x12, 0xab, 0xbd),
+ /* BPF_ATOMIC | BPF_W: BPF_AND */
+ BPF_ATOMIC_OP_TEST1(BPF_W, BPF_AND, &, 0x12, 0xab, 0x02),
+ BPF_ATOMIC_OP_TEST2(BPF_W, BPF_AND, &, 0x12, 0xab, 0x02),
+ BPF_ATOMIC_OP_TEST3(BPF_W, BPF_AND, &, 0x12, 0xab, 0x02),
+ BPF_ATOMIC_OP_TEST4(BPF_W, BPF_AND, &, 0x12, 0xab, 0x02),
+ /* BPF_ATOMIC | BPF_W: BPF_AND | BPF_FETCH */
+ BPF_ATOMIC_OP_TEST1(BPF_W, BPF_AND | BPF_FETCH, &, 0x12, 0xab, 0x02),
+ BPF_ATOMIC_OP_TEST2(BPF_W, BPF_AND | BPF_FETCH, &, 0x12, 0xab, 0x02),
+ BPF_ATOMIC_OP_TEST3(BPF_W, BPF_AND | BPF_FETCH, &, 0x12, 0xab, 0x02),
+ BPF_ATOMIC_OP_TEST4(BPF_W, BPF_AND | BPF_FETCH, &, 0x12, 0xab, 0x02),
+ /* BPF_ATOMIC | BPF_DW: BPF_AND */
+ BPF_ATOMIC_OP_TEST1(BPF_DW, BPF_AND, &, 0x12, 0xab, 0x02),
+ BPF_ATOMIC_OP_TEST2(BPF_DW, BPF_AND, &, 0x12, 0xab, 0x02),
+ BPF_ATOMIC_OP_TEST3(BPF_DW, BPF_AND, &, 0x12, 0xab, 0x02),
+ BPF_ATOMIC_OP_TEST4(BPF_DW, BPF_AND, &, 0x12, 0xab, 0x02),
+ /* BPF_ATOMIC | BPF_DW: BPF_AND | BPF_FETCH */
+ BPF_ATOMIC_OP_TEST1(BPF_DW, BPF_AND | BPF_FETCH, &, 0x12, 0xab, 0x02),
+ BPF_ATOMIC_OP_TEST2(BPF_DW, BPF_AND | BPF_FETCH, &, 0x12, 0xab, 0x02),
+ BPF_ATOMIC_OP_TEST3(BPF_DW, BPF_AND | BPF_FETCH, &, 0x12, 0xab, 0x02),
+ BPF_ATOMIC_OP_TEST4(BPF_DW, BPF_AND | BPF_FETCH, &, 0x12, 0xab, 0x02),
+ /* BPF_ATOMIC | BPF_W: BPF_OR */
+ BPF_ATOMIC_OP_TEST1(BPF_W, BPF_OR, |, 0x12, 0xab, 0xbb),
+ BPF_ATOMIC_OP_TEST2(BPF_W, BPF_OR, |, 0x12, 0xab, 0xbb),
+ BPF_ATOMIC_OP_TEST3(BPF_W, BPF_OR, |, 0x12, 0xab, 0xbb),
+ BPF_ATOMIC_OP_TEST4(BPF_W, BPF_OR, |, 0x12, 0xab, 0xbb),
+ /* BPF_ATOMIC | BPF_W: BPF_OR | BPF_FETCH */
+ BPF_ATOMIC_OP_TEST1(BPF_W, BPF_OR | BPF_FETCH, |, 0x12, 0xab, 0xbb),
+ BPF_ATOMIC_OP_TEST2(BPF_W, BPF_OR | BPF_FETCH, |, 0x12, 0xab, 0xbb),
+ BPF_ATOMIC_OP_TEST3(BPF_W, BPF_OR | BPF_FETCH, |, 0x12, 0xab, 0xbb),
+ BPF_ATOMIC_OP_TEST4(BPF_W, BPF_OR | BPF_FETCH, |, 0x12, 0xab, 0xbb),
+ /* BPF_ATOMIC | BPF_DW: BPF_OR */
+ BPF_ATOMIC_OP_TEST1(BPF_DW, BPF_OR, |, 0x12, 0xab, 0xbb),
+ BPF_ATOMIC_OP_TEST2(BPF_DW, BPF_OR, |, 0x12, 0xab, 0xbb),
+ BPF_ATOMIC_OP_TEST3(BPF_DW, BPF_OR, |, 0x12, 0xab, 0xbb),
+ BPF_ATOMIC_OP_TEST4(BPF_DW, BPF_OR, |, 0x12, 0xab, 0xbb),
+ /* BPF_ATOMIC | BPF_DW: BPF_OR | BPF_FETCH */
+ BPF_ATOMIC_OP_TEST1(BPF_DW, BPF_OR | BPF_FETCH, |, 0x12, 0xab, 0xbb),
+ BPF_ATOMIC_OP_TEST2(BPF_DW, BPF_OR | BPF_FETCH, |, 0x12, 0xab, 0xbb),
+ BPF_ATOMIC_OP_TEST3(BPF_DW, BPF_OR | BPF_FETCH, |, 0x12, 0xab, 0xbb),
+ BPF_ATOMIC_OP_TEST4(BPF_DW, BPF_OR | BPF_FETCH, |, 0x12, 0xab, 0xbb),
+ /* BPF_ATOMIC | BPF_W: BPF_XOR */
+ BPF_ATOMIC_OP_TEST1(BPF_W, BPF_XOR, ^, 0x12, 0xab, 0xb9),
+ BPF_ATOMIC_OP_TEST2(BPF_W, BPF_XOR, ^, 0x12, 0xab, 0xb9),
+ BPF_ATOMIC_OP_TEST3(BPF_W, BPF_XOR, ^, 0x12, 0xab, 0xb9),
+ BPF_ATOMIC_OP_TEST4(BPF_W, BPF_XOR, ^, 0x12, 0xab, 0xb9),
+ /* BPF_ATOMIC | BPF_W: BPF_XOR | BPF_FETCH */
+ BPF_ATOMIC_OP_TEST1(BPF_W, BPF_XOR | BPF_FETCH, ^, 0x12, 0xab, 0xb9),
+ BPF_ATOMIC_OP_TEST2(BPF_W, BPF_XOR | BPF_FETCH, ^, 0x12, 0xab, 0xb9),
+ BPF_ATOMIC_OP_TEST3(BPF_W, BPF_XOR | BPF_FETCH, ^, 0x12, 0xab, 0xb9),
+ BPF_ATOMIC_OP_TEST4(BPF_W, BPF_XOR | BPF_FETCH, ^, 0x12, 0xab, 0xb9),
+ /* BPF_ATOMIC | BPF_DW: BPF_XOR */
+ BPF_ATOMIC_OP_TEST1(BPF_DW, BPF_XOR, ^, 0x12, 0xab, 0xb9),
+ BPF_ATOMIC_OP_TEST2(BPF_DW, BPF_XOR, ^, 0x12, 0xab, 0xb9),
+ BPF_ATOMIC_OP_TEST3(BPF_DW, BPF_XOR, ^, 0x12, 0xab, 0xb9),
+ BPF_ATOMIC_OP_TEST4(BPF_DW, BPF_XOR, ^, 0x12, 0xab, 0xb9),
+ /* BPF_ATOMIC | BPF_DW: BPF_XOR | BPF_FETCH */
+ BPF_ATOMIC_OP_TEST1(BPF_DW, BPF_XOR | BPF_FETCH, ^, 0x12, 0xab, 0xb9),
+ BPF_ATOMIC_OP_TEST2(BPF_DW, BPF_XOR | BPF_FETCH, ^, 0x12, 0xab, 0xb9),
+ BPF_ATOMIC_OP_TEST3(BPF_DW, BPF_XOR | BPF_FETCH, ^, 0x12, 0xab, 0xb9),
+ BPF_ATOMIC_OP_TEST4(BPF_DW, BPF_XOR | BPF_FETCH, ^, 0x12, 0xab, 0xb9),
+ /* BPF_ATOMIC | BPF_W: BPF_XCHG */
+ BPF_ATOMIC_OP_TEST1(BPF_W, BPF_XCHG, xchg, 0x12, 0xab, 0xab),
+ BPF_ATOMIC_OP_TEST2(BPF_W, BPF_XCHG, xchg, 0x12, 0xab, 0xab),
+ BPF_ATOMIC_OP_TEST3(BPF_W, BPF_XCHG, xchg, 0x12, 0xab, 0xab),
+ BPF_ATOMIC_OP_TEST4(BPF_W, BPF_XCHG, xchg, 0x12, 0xab, 0xab),
+ /* BPF_ATOMIC | BPF_DW: BPF_XCHG */
+ BPF_ATOMIC_OP_TEST1(BPF_DW, BPF_XCHG, xchg, 0x12, 0xab, 0xab),
+ BPF_ATOMIC_OP_TEST2(BPF_DW, BPF_XCHG, xchg, 0x12, 0xab, 0xab),
+ BPF_ATOMIC_OP_TEST3(BPF_DW, BPF_XCHG, xchg, 0x12, 0xab, 0xab),
+ BPF_ATOMIC_OP_TEST4(BPF_DW, BPF_XCHG, xchg, 0x12, 0xab, 0xab),
+#undef BPF_ATOMIC_OP_TEST1
+#undef BPF_ATOMIC_OP_TEST2
+#undef BPF_ATOMIC_OP_TEST3
+#undef BPF_ATOMIC_OP_TEST4
+ /* BPF_ATOMIC | BPF_W, BPF_CMPXCHG */
+ {
+ "BPF_ATOMIC | BPF_W, BPF_CMPXCHG: Test successful return",
+ .u.insns_int = {
+ BPF_ST_MEM(BPF_W, R10, -40, 0x01234567),
+ BPF_ALU32_IMM(BPF_MOV, R0, 0x01234567),
+ BPF_ALU32_IMM(BPF_MOV, R3, 0x89abcdef),
+ BPF_ATOMIC_OP(BPF_W, BPF_CMPXCHG, R10, R3, -40),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0x01234567 } },
+ .stack_depth = 40,
+ },
+ {
+ "BPF_ATOMIC | BPF_W, BPF_CMPXCHG: Test successful store",
.u.insns_int = {
- BPF_ALU32_IMM(BPF_MOV, R0, 0x12),
- BPF_ST_MEM(BPF_W, R10, -40, 0x10),
- BPF_ATOMIC_OP(BPF_W, BPF_ADD, R10, R0, -40),
+ BPF_ST_MEM(BPF_W, R10, -40, 0x01234567),
+ BPF_ALU32_IMM(BPF_MOV, R0, 0x01234567),
+ BPF_ALU32_IMM(BPF_MOV, R3, 0x89abcdef),
+ BPF_ATOMIC_OP(BPF_W, BPF_CMPXCHG, R10, R3, -40),
BPF_LDX_MEM(BPF_W, R0, R10, -40),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
- { { 0, 0x22 } },
+ { { 0, 0x89abcdef } },
.stack_depth = 40,
},
{
- "STX_XADD_W: Test side-effects, r10: 0x12 + 0x10 = 0x22",
+ "BPF_ATOMIC | BPF_W, BPF_CMPXCHG: Test failure return",
.u.insns_int = {
- BPF_ALU64_REG(BPF_MOV, R1, R10),
- BPF_ALU32_IMM(BPF_MOV, R0, 0x12),
- BPF_ST_MEM(BPF_W, R10, -40, 0x10),
- BPF_ATOMIC_OP(BPF_W, BPF_ADD, R10, R0, -40),
- BPF_ALU64_REG(BPF_MOV, R0, R10),
- BPF_ALU64_REG(BPF_SUB, R0, R1),
+ BPF_ST_MEM(BPF_W, R10, -40, 0x01234567),
+ BPF_ALU32_IMM(BPF_MOV, R0, 0x76543210),
+ BPF_ALU32_IMM(BPF_MOV, R3, 0x89abcdef),
+ BPF_ATOMIC_OP(BPF_W, BPF_CMPXCHG, R10, R3, -40),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
- { { 0, 0 } },
+ { { 0, 0x01234567 } },
.stack_depth = 40,
},
{
- "STX_XADD_W: Test side-effects, r0: 0x12 + 0x10 = 0x22",
+ "BPF_ATOMIC | BPF_W, BPF_CMPXCHG: Test failure store",
.u.insns_int = {
- BPF_ALU32_IMM(BPF_MOV, R0, 0x12),
- BPF_ST_MEM(BPF_W, R10, -40, 0x10),
- BPF_ATOMIC_OP(BPF_W, BPF_ADD, R10, R0, -40),
+ BPF_ST_MEM(BPF_W, R10, -40, 0x01234567),
+ BPF_ALU32_IMM(BPF_MOV, R0, 0x76543210),
+ BPF_ALU32_IMM(BPF_MOV, R3, 0x89abcdef),
+ BPF_ATOMIC_OP(BPF_W, BPF_CMPXCHG, R10, R3, -40),
+ BPF_LDX_MEM(BPF_W, R0, R10, -40),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
- { { 0, 0x12 } },
+ { { 0, 0x01234567 } },
.stack_depth = 40,
},
{
- "STX_XADD_W: X + 1 + 1 + 1 + ...",
+ "BPF_ATOMIC | BPF_W, BPF_CMPXCHG: Test side effects",
+ .u.insns_int = {
+ BPF_ST_MEM(BPF_W, R10, -40, 0x01234567),
+ BPF_ALU32_IMM(BPF_MOV, R0, 0x01234567),
+ BPF_ALU32_IMM(BPF_MOV, R3, 0x89abcdef),
+ BPF_ATOMIC_OP(BPF_W, BPF_CMPXCHG, R10, R3, -40),
+ BPF_ATOMIC_OP(BPF_W, BPF_CMPXCHG, R10, R3, -40),
+ BPF_ALU32_REG(BPF_MOV, R0, R3),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
{ },
+ { { 0, 0x89abcdef } },
+ .stack_depth = 40,
+ },
+ /* BPF_ATOMIC | BPF_DW, BPF_CMPXCHG */
+ {
+ "BPF_ATOMIC | BPF_DW, BPF_CMPXCHG: Test successful return",
+ .u.insns_int = {
+ BPF_LD_IMM64(R1, 0x0123456789abcdefULL),
+ BPF_LD_IMM64(R2, 0xfecdba9876543210ULL),
+ BPF_ALU64_REG(BPF_MOV, R0, R1),
+ BPF_STX_MEM(BPF_DW, R10, R1, -40),
+ BPF_ATOMIC_OP(BPF_DW, BPF_CMPXCHG, R10, R2, -40),
+ BPF_JMP_REG(BPF_JNE, R0, R1, 1),
+ BPF_ALU64_REG(BPF_SUB, R0, R1),
+ BPF_EXIT_INSN(),
+ },
INTERNAL,
{ },
- { { 0, 4134 } },
- .fill_helper = bpf_fill_stxw,
+ { { 0, 0 } },
+ .stack_depth = 40,
},
{
- "STX_XADD_DW: Test: 0x12 + 0x10 = 0x22",
+ "BPF_ATOMIC | BPF_DW, BPF_CMPXCHG: Test successful store",
.u.insns_int = {
- BPF_ALU32_IMM(BPF_MOV, R0, 0x12),
- BPF_ST_MEM(BPF_DW, R10, -40, 0x10),
- BPF_ATOMIC_OP(BPF_DW, BPF_ADD, R10, R0, -40),
+ BPF_LD_IMM64(R1, 0x0123456789abcdefULL),
+ BPF_LD_IMM64(R2, 0xfecdba9876543210ULL),
+ BPF_ALU64_REG(BPF_MOV, R0, R1),
+ BPF_STX_MEM(BPF_DW, R10, R0, -40),
+ BPF_ATOMIC_OP(BPF_DW, BPF_CMPXCHG, R10, R2, -40),
BPF_LDX_MEM(BPF_DW, R0, R10, -40),
+ BPF_JMP_REG(BPF_JNE, R0, R2, 1),
+ BPF_ALU64_REG(BPF_SUB, R0, R2),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
- { { 0, 0x22 } },
+ { { 0, 0 } },
.stack_depth = 40,
},
{
- "STX_XADD_DW: Test side-effects, r10: 0x12 + 0x10 = 0x22",
+ "BPF_ATOMIC | BPF_DW, BPF_CMPXCHG: Test failure return",
.u.insns_int = {
- BPF_ALU64_REG(BPF_MOV, R1, R10),
- BPF_ALU32_IMM(BPF_MOV, R0, 0x12),
- BPF_ST_MEM(BPF_DW, R10, -40, 0x10),
- BPF_ATOMIC_OP(BPF_DW, BPF_ADD, R10, R0, -40),
- BPF_ALU64_REG(BPF_MOV, R0, R10),
+ BPF_LD_IMM64(R1, 0x0123456789abcdefULL),
+ BPF_LD_IMM64(R2, 0xfecdba9876543210ULL),
+ BPF_ALU64_REG(BPF_MOV, R0, R1),
+ BPF_ALU64_IMM(BPF_ADD, R0, 1),
+ BPF_STX_MEM(BPF_DW, R10, R1, -40),
+ BPF_ATOMIC_OP(BPF_DW, BPF_CMPXCHG, R10, R2, -40),
+ BPF_JMP_REG(BPF_JNE, R0, R1, 1),
BPF_ALU64_REG(BPF_SUB, R0, R1),
BPF_EXIT_INSN(),
},
@@ -4378,25 +5820,552 @@ static struct bpf_test tests[] = {
.stack_depth = 40,
},
{
- "STX_XADD_DW: Test side-effects, r0: 0x12 + 0x10 = 0x22",
+ "BPF_ATOMIC | BPF_DW, BPF_CMPXCHG: Test failure store",
.u.insns_int = {
- BPF_ALU32_IMM(BPF_MOV, R0, 0x12),
- BPF_ST_MEM(BPF_DW, R10, -40, 0x10),
- BPF_ATOMIC_OP(BPF_DW, BPF_ADD, R10, R0, -40),
+ BPF_LD_IMM64(R1, 0x0123456789abcdefULL),
+ BPF_LD_IMM64(R2, 0xfecdba9876543210ULL),
+ BPF_ALU64_REG(BPF_MOV, R0, R1),
+ BPF_ALU64_IMM(BPF_ADD, R0, 1),
+ BPF_STX_MEM(BPF_DW, R10, R1, -40),
+ BPF_ATOMIC_OP(BPF_DW, BPF_CMPXCHG, R10, R2, -40),
+ BPF_LDX_MEM(BPF_DW, R0, R10, -40),
+ BPF_JMP_REG(BPF_JNE, R0, R1, 1),
+ BPF_ALU64_REG(BPF_SUB, R0, R1),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
- { { 0, 0x12 } },
+ { { 0, 0 } },
.stack_depth = 40,
},
{
- "STX_XADD_DW: X + 1 + 1 + 1 + ...",
+ "BPF_ATOMIC | BPF_DW, BPF_CMPXCHG: Test side effects",
+ .u.insns_int = {
+ BPF_LD_IMM64(R1, 0x0123456789abcdefULL),
+ BPF_LD_IMM64(R2, 0xfecdba9876543210ULL),
+ BPF_ALU64_REG(BPF_MOV, R0, R1),
+ BPF_STX_MEM(BPF_DW, R10, R1, -40),
+ BPF_ATOMIC_OP(BPF_DW, BPF_CMPXCHG, R10, R2, -40),
+ BPF_LD_IMM64(R0, 0xfecdba9876543210ULL),
+ BPF_JMP_REG(BPF_JNE, R0, R2, 1),
+ BPF_ALU64_REG(BPF_SUB, R0, R2),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
{ },
+ { { 0, 0 } },
+ .stack_depth = 40,
+ },
+ /* BPF_JMP32 | BPF_JEQ | BPF_K */
+ {
+ "JMP32_JEQ_K: Small immediate",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, 123),
+ BPF_JMP32_IMM(BPF_JEQ, R0, 321, 1),
+ BPF_JMP32_IMM(BPF_JEQ, R0, 123, 1),
+ BPF_ALU32_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
INTERNAL,
{ },
- { { 0, 4134 } },
- .fill_helper = bpf_fill_stxdw,
+ { { 0, 123 } }
+ },
+ {
+ "JMP32_JEQ_K: Large immediate",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, 12345678),
+ BPF_JMP32_IMM(BPF_JEQ, R0, 12345678 & 0xffff, 1),
+ BPF_JMP32_IMM(BPF_JEQ, R0, 12345678, 1),
+ BPF_ALU32_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 12345678 } }
+ },
+ {
+ "JMP32_JEQ_K: negative immediate",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, -123),
+ BPF_JMP32_IMM(BPF_JEQ, R0, 123, 1),
+ BPF_JMP32_IMM(BPF_JEQ, R0, -123, 1),
+ BPF_ALU32_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, -123 } }
+ },
+ /* BPF_JMP32 | BPF_JEQ | BPF_X */
+ {
+ "JMP32_JEQ_X",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, 1234),
+ BPF_ALU32_IMM(BPF_MOV, R1, 4321),
+ BPF_JMP32_REG(BPF_JEQ, R0, R1, 2),
+ BPF_ALU32_IMM(BPF_MOV, R1, 1234),
+ BPF_JMP32_REG(BPF_JEQ, R0, R1, 1),
+ BPF_ALU32_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 1234 } }
+ },
+ /* BPF_JMP32 | BPF_JNE | BPF_K */
+ {
+ "JMP32_JNE_K: Small immediate",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, 123),
+ BPF_JMP32_IMM(BPF_JNE, R0, 123, 1),
+ BPF_JMP32_IMM(BPF_JNE, R0, 321, 1),
+ BPF_ALU32_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 123 } }
+ },
+ {
+ "JMP32_JNE_K: Large immediate",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, 12345678),
+ BPF_JMP32_IMM(BPF_JNE, R0, 12345678, 1),
+ BPF_JMP32_IMM(BPF_JNE, R0, 12345678 & 0xffff, 1),
+ BPF_ALU32_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 12345678 } }
+ },
+ {
+ "JMP32_JNE_K: negative immediate",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, -123),
+ BPF_JMP32_IMM(BPF_JNE, R0, -123, 1),
+ BPF_JMP32_IMM(BPF_JNE, R0, 123, 1),
+ BPF_ALU32_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, -123 } }
+ },
+ /* BPF_JMP32 | BPF_JNE | BPF_X */
+ {
+ "JMP32_JNE_X",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, 1234),
+ BPF_ALU32_IMM(BPF_MOV, R1, 1234),
+ BPF_JMP32_REG(BPF_JNE, R0, R1, 2),
+ BPF_ALU32_IMM(BPF_MOV, R1, 4321),
+ BPF_JMP32_REG(BPF_JNE, R0, R1, 1),
+ BPF_ALU32_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 1234 } }
+ },
+ /* BPF_JMP32 | BPF_JSET | BPF_K */
+ {
+ "JMP32_JSET_K: Small immediate",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, 1),
+ BPF_JMP32_IMM(BPF_JSET, R0, 2, 1),
+ BPF_JMP32_IMM(BPF_JSET, R0, 3, 1),
+ BPF_ALU32_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 1 } }
+ },
+ {
+ "JMP32_JSET_K: Large immediate",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, 0x40000000),
+ BPF_JMP32_IMM(BPF_JSET, R0, 0x3fffffff, 1),
+ BPF_JMP32_IMM(BPF_JSET, R0, 0x60000000, 1),
+ BPF_ALU32_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0x40000000 } }
+ },
+ {
+ "JMP32_JSET_K: negative immediate",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, -123),
+ BPF_JMP32_IMM(BPF_JSET, R0, -1, 1),
+ BPF_ALU32_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, -123 } }
+ },
+ /* BPF_JMP32 | BPF_JSET | BPF_X */
+ {
+ "JMP32_JSET_X",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, 8),
+ BPF_ALU32_IMM(BPF_MOV, R1, 7),
+ BPF_JMP32_REG(BPF_JSET, R0, R1, 2),
+ BPF_ALU32_IMM(BPF_MOV, R1, 8 | 2),
+ BPF_JMP32_REG(BPF_JNE, R0, R1, 1),
+ BPF_ALU32_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 8 } }
+ },
+ /* BPF_JMP32 | BPF_JGT | BPF_K */
+ {
+ "JMP32_JGT_K: Small immediate",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, 123),
+ BPF_JMP32_IMM(BPF_JGT, R0, 123, 1),
+ BPF_JMP32_IMM(BPF_JGT, R0, 122, 1),
+ BPF_ALU32_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 123 } }
+ },
+ {
+ "JMP32_JGT_K: Large immediate",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, 0xfffffffe),
+ BPF_JMP32_IMM(BPF_JGT, R0, 0xffffffff, 1),
+ BPF_JMP32_IMM(BPF_JGT, R0, 0xfffffffd, 1),
+ BPF_ALU32_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0xfffffffe } }
+ },
+ /* BPF_JMP32 | BPF_JGT | BPF_X */
+ {
+ "JMP32_JGT_X",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, 0xfffffffe),
+ BPF_ALU32_IMM(BPF_MOV, R1, 0xffffffff),
+ BPF_JMP32_REG(BPF_JGT, R0, R1, 2),
+ BPF_ALU32_IMM(BPF_MOV, R1, 0xfffffffd),
+ BPF_JMP32_REG(BPF_JGT, R0, R1, 1),
+ BPF_ALU32_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0xfffffffe } }
+ },
+ /* BPF_JMP32 | BPF_JGE | BPF_K */
+ {
+ "JMP32_JGE_K: Small immediate",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, 123),
+ BPF_JMP32_IMM(BPF_JGE, R0, 124, 1),
+ BPF_JMP32_IMM(BPF_JGE, R0, 123, 1),
+ BPF_ALU32_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 123 } }
+ },
+ {
+ "JMP32_JGE_K: Large immediate",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, 0xfffffffe),
+ BPF_JMP32_IMM(BPF_JGE, R0, 0xffffffff, 1),
+ BPF_JMP32_IMM(BPF_JGE, R0, 0xfffffffe, 1),
+ BPF_ALU32_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0xfffffffe } }
+ },
+ /* BPF_JMP32 | BPF_JGE | BPF_X */
+ {
+ "JMP32_JGE_X",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, 0xfffffffe),
+ BPF_ALU32_IMM(BPF_MOV, R1, 0xffffffff),
+ BPF_JMP32_REG(BPF_JGE, R0, R1, 2),
+ BPF_ALU32_IMM(BPF_MOV, R1, 0xfffffffe),
+ BPF_JMP32_REG(BPF_JGE, R0, R1, 1),
+ BPF_ALU32_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0xfffffffe } }
+ },
+ /* BPF_JMP32 | BPF_JLT | BPF_K */
+ {
+ "JMP32_JLT_K: Small immediate",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, 123),
+ BPF_JMP32_IMM(BPF_JLT, R0, 123, 1),
+ BPF_JMP32_IMM(BPF_JLT, R0, 124, 1),
+ BPF_ALU32_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 123 } }
+ },
+ {
+ "JMP32_JLT_K: Large immediate",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, 0xfffffffe),
+ BPF_JMP32_IMM(BPF_JLT, R0, 0xfffffffd, 1),
+ BPF_JMP32_IMM(BPF_JLT, R0, 0xffffffff, 1),
+ BPF_ALU32_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0xfffffffe } }
+ },
+ /* BPF_JMP32 | BPF_JLT | BPF_X */
+ {
+ "JMP32_JLT_X",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, 0xfffffffe),
+ BPF_ALU32_IMM(BPF_MOV, R1, 0xfffffffd),
+ BPF_JMP32_REG(BPF_JLT, R0, R1, 2),
+ BPF_ALU32_IMM(BPF_MOV, R1, 0xffffffff),
+ BPF_JMP32_REG(BPF_JLT, R0, R1, 1),
+ BPF_ALU32_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0xfffffffe } }
+ },
+ /* BPF_JMP32 | BPF_JLE | BPF_K */
+ {
+ "JMP32_JLE_K: Small immediate",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, 123),
+ BPF_JMP32_IMM(BPF_JLE, R0, 122, 1),
+ BPF_JMP32_IMM(BPF_JLE, R0, 123, 1),
+ BPF_ALU32_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 123 } }
+ },
+ {
+ "JMP32_JLE_K: Large immediate",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, 0xfffffffe),
+ BPF_JMP32_IMM(BPF_JLE, R0, 0xfffffffd, 1),
+ BPF_JMP32_IMM(BPF_JLE, R0, 0xfffffffe, 1),
+ BPF_ALU32_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0xfffffffe } }
+ },
+ /* BPF_JMP32 | BPF_JLE | BPF_X */
+ {
+ "JMP32_JLE_X",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, 0xfffffffe),
+ BPF_ALU32_IMM(BPF_MOV, R1, 0xfffffffd),
+ BPF_JMP32_REG(BPF_JLE, R0, R1, 2),
+ BPF_ALU32_IMM(BPF_MOV, R1, 0xfffffffe),
+ BPF_JMP32_REG(BPF_JLE, R0, R1, 1),
+ BPF_ALU32_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0xfffffffe } }
+ },
+ /* BPF_JMP32 | BPF_JSGT | BPF_K */
+ {
+ "JMP32_JSGT_K: Small immediate",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, -123),
+ BPF_JMP32_IMM(BPF_JSGT, R0, -123, 1),
+ BPF_JMP32_IMM(BPF_JSGT, R0, -124, 1),
+ BPF_ALU32_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, -123 } }
+ },
+ {
+ "JMP32_JSGT_K: Large immediate",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, -12345678),
+ BPF_JMP32_IMM(BPF_JSGT, R0, -12345678, 1),
+ BPF_JMP32_IMM(BPF_JSGT, R0, -12345679, 1),
+ BPF_ALU32_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, -12345678 } }
+ },
+ /* BPF_JMP32 | BPF_JSGT | BPF_X */
+ {
+ "JMP32_JSGT_X",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, -12345678),
+ BPF_ALU32_IMM(BPF_MOV, R1, -12345678),
+ BPF_JMP32_REG(BPF_JSGT, R0, R1, 2),
+ BPF_ALU32_IMM(BPF_MOV, R1, -12345679),
+ BPF_JMP32_REG(BPF_JSGT, R0, R1, 1),
+ BPF_ALU32_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, -12345678 } }
+ },
+ /* BPF_JMP32 | BPF_JSGE | BPF_K */
+ {
+ "JMP32_JSGE_K: Small immediate",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, -123),
+ BPF_JMP32_IMM(BPF_JSGE, R0, -122, 1),
+ BPF_JMP32_IMM(BPF_JSGE, R0, -123, 1),
+ BPF_ALU32_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, -123 } }
+ },
+ {
+ "JMP32_JSGE_K: Large immediate",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, -12345678),
+ BPF_JMP32_IMM(BPF_JSGE, R0, -12345677, 1),
+ BPF_JMP32_IMM(BPF_JSGE, R0, -12345678, 1),
+ BPF_ALU32_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, -12345678 } }
+ },
+ /* BPF_JMP32 | BPF_JSGE | BPF_X */
+ {
+ "JMP32_JSGE_X",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, -12345678),
+ BPF_ALU32_IMM(BPF_MOV, R1, -12345677),
+ BPF_JMP32_REG(BPF_JSGE, R0, R1, 2),
+ BPF_ALU32_IMM(BPF_MOV, R1, -12345678),
+ BPF_JMP32_REG(BPF_JSGE, R0, R1, 1),
+ BPF_ALU32_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, -12345678 } }
+ },
+ /* BPF_JMP32 | BPF_JSLT | BPF_K */
+ {
+ "JMP32_JSLT_K: Small immediate",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, -123),
+ BPF_JMP32_IMM(BPF_JSLT, R0, -123, 1),
+ BPF_JMP32_IMM(BPF_JSLT, R0, -122, 1),
+ BPF_ALU32_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, -123 } }
+ },
+ {
+ "JMP32_JSLT_K: Large immediate",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, -12345678),
+ BPF_JMP32_IMM(BPF_JSLT, R0, -12345678, 1),
+ BPF_JMP32_IMM(BPF_JSLT, R0, -12345677, 1),
+ BPF_ALU32_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, -12345678 } }
+ },
+ /* BPF_JMP32 | BPF_JSLT | BPF_X */
+ {
+ "JMP32_JSLT_X",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, -12345678),
+ BPF_ALU32_IMM(BPF_MOV, R1, -12345678),
+ BPF_JMP32_REG(BPF_JSLT, R0, R1, 2),
+ BPF_ALU32_IMM(BPF_MOV, R1, -12345677),
+ BPF_JMP32_REG(BPF_JSLT, R0, R1, 1),
+ BPF_ALU32_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, -12345678 } }
+ },
+ /* BPF_JMP32 | BPF_JSLE | BPF_K */
+ {
+ "JMP32_JSLE_K: Small immediate",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, -123),
+ BPF_JMP32_IMM(BPF_JSLE, R0, -124, 1),
+ BPF_JMP32_IMM(BPF_JSLE, R0, -123, 1),
+ BPF_ALU32_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, -123 } }
+ },
+ {
+ "JMP32_JSLE_K: Large immediate",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, -12345678),
+ BPF_JMP32_IMM(BPF_JSLE, R0, -12345679, 1),
+ BPF_JMP32_IMM(BPF_JSLE, R0, -12345678, 1),
+ BPF_ALU32_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, -12345678 } }
+ },
+ /* BPF_JMP32 | BPF_JSLE | BPF_K */
+ {
+ "JMP32_JSLE_X",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, -12345678),
+ BPF_ALU32_IMM(BPF_MOV, R1, -12345679),
+ BPF_JMP32_REG(BPF_JSLE, R0, R1, 2),
+ BPF_ALU32_IMM(BPF_MOV, R1, -12345678),
+ BPF_JMP32_REG(BPF_JSLE, R0, R1, 1),
+ BPF_ALU32_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, -12345678 } }
},
/* BPF_JMP | BPF_EXIT */
{
@@ -5223,6 +7192,14 @@ static struct bpf_test tests[] = {
{ },
{ { 0, 1 } },
},
+ { /* Mainly checking JIT here. */
+ "BPF_MAXINSNS: Very long conditional jump",
+ { },
+ INTERNAL | FLAG_NO_DATA,
+ { },
+ { { 0, 1 } },
+ .fill_helper = bpf_fill_long_jmp,
+ },
{
"JMP_JA: Jump, gap, jump, ...",
{ },
@@ -6639,7 +8616,7 @@ static int __run_one(const struct bpf_prog *fp, const void *data,
start = ktime_get_ns();
for (i = 0; i < runs; i++)
- ret = BPF_PROG_RUN(fp, data);
+ ret = bpf_prog_run(fp, data);
finish = ktime_get_ns();
migrate_enable();
@@ -6659,7 +8636,14 @@ static int run_one(const struct bpf_prog *fp, struct bpf_test *test)
u64 duration;
u32 ret;
- if (test->test[i].data_size == 0 &&
+ /*
+ * NOTE: Several sub-tests may be present, in which case
+ * a zero {data_size, result} tuple indicates the end of
+ * the sub-test array. The first test is always run,
+ * even if both data_size and result happen to be zero.
+ */
+ if (i > 0 &&
+ test->test[i].data_size == 0 &&
test->test[i].result == 0)
break;
@@ -7005,8 +8989,248 @@ static __init int test_bpf(void)
return err_cnt ? -EINVAL : 0;
}
+struct tail_call_test {
+ const char *descr;
+ struct bpf_insn insns[MAX_INSNS];
+ int result;
+ int stack_depth;
+};
+
+/*
+ * Magic marker used in test snippets for tail calls below.
+ * BPF_LD/MOV to R2 and R2 with this immediate value is replaced
+ * with the proper values by the test runner.
+ */
+#define TAIL_CALL_MARKER 0x7a11ca11
+
+/* Special offset to indicate a NULL call target */
+#define TAIL_CALL_NULL 0x7fff
+
+/* Special offset to indicate an out-of-range index */
+#define TAIL_CALL_INVALID 0x7ffe
+
+#define TAIL_CALL(offset) \
+ BPF_LD_IMM64(R2, TAIL_CALL_MARKER), \
+ BPF_RAW_INSN(BPF_ALU | BPF_MOV | BPF_K, R3, 0, \
+ offset, TAIL_CALL_MARKER), \
+ BPF_JMP_IMM(BPF_TAIL_CALL, 0, 0, 0)
+
+/*
+ * Tail call tests. Each test case may call any other test in the table,
+ * including itself, specified as a relative index offset from the calling
+ * test. The index TAIL_CALL_NULL can be used to specify a NULL target
+ * function to test the JIT error path. Similarly, the index TAIL_CALL_INVALID
+ * results in a target index that is out of range.
+ */
+static struct tail_call_test tail_call_tests[] = {
+ {
+ "Tail call leaf",
+ .insns = {
+ BPF_ALU64_REG(BPF_MOV, R0, R1),
+ BPF_ALU64_IMM(BPF_ADD, R0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .result = 1,
+ },
+ {
+ "Tail call 2",
+ .insns = {
+ BPF_ALU64_IMM(BPF_ADD, R1, 2),
+ TAIL_CALL(-1),
+ BPF_ALU64_IMM(BPF_MOV, R0, -1),
+ BPF_EXIT_INSN(),
+ },
+ .result = 3,
+ },
+ {
+ "Tail call 3",
+ .insns = {
+ BPF_ALU64_IMM(BPF_ADD, R1, 3),
+ TAIL_CALL(-1),
+ BPF_ALU64_IMM(BPF_MOV, R0, -1),
+ BPF_EXIT_INSN(),
+ },
+ .result = 6,
+ },
+ {
+ "Tail call 4",
+ .insns = {
+ BPF_ALU64_IMM(BPF_ADD, R1, 4),
+ TAIL_CALL(-1),
+ BPF_ALU64_IMM(BPF_MOV, R0, -1),
+ BPF_EXIT_INSN(),
+ },
+ .result = 10,
+ },
+ {
+ "Tail call error path, max count reached",
+ .insns = {
+ BPF_ALU64_IMM(BPF_ADD, R1, 1),
+ BPF_ALU64_REG(BPF_MOV, R0, R1),
+ TAIL_CALL(0),
+ BPF_EXIT_INSN(),
+ },
+ .result = MAX_TAIL_CALL_CNT + 1,
+ },
+ {
+ "Tail call error path, NULL target",
+ .insns = {
+ BPF_ALU64_IMM(BPF_MOV, R0, -1),
+ TAIL_CALL(TAIL_CALL_NULL),
+ BPF_ALU64_IMM(BPF_MOV, R0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .result = 1,
+ },
+ {
+ "Tail call error path, index out of range",
+ .insns = {
+ BPF_ALU64_IMM(BPF_MOV, R0, -1),
+ TAIL_CALL(TAIL_CALL_INVALID),
+ BPF_ALU64_IMM(BPF_MOV, R0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .result = 1,
+ },
+};
+
+static void __init destroy_tail_call_tests(struct bpf_array *progs)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(tail_call_tests); i++)
+ if (progs->ptrs[i])
+ bpf_prog_free(progs->ptrs[i]);
+ kfree(progs);
+}
+
+static __init int prepare_tail_call_tests(struct bpf_array **pprogs)
+{
+ int ntests = ARRAY_SIZE(tail_call_tests);
+ struct bpf_array *progs;
+ int which, err;
+
+ /* Allocate the table of programs to be used for tall calls */
+ progs = kzalloc(sizeof(*progs) + (ntests + 1) * sizeof(progs->ptrs[0]),
+ GFP_KERNEL);
+ if (!progs)
+ goto out_nomem;
+
+ /* Create all eBPF programs and populate the table */
+ for (which = 0; which < ntests; which++) {
+ struct tail_call_test *test = &tail_call_tests[which];
+ struct bpf_prog *fp;
+ int len, i;
+
+ /* Compute the number of program instructions */
+ for (len = 0; len < MAX_INSNS; len++) {
+ struct bpf_insn *insn = &test->insns[len];
+
+ if (len < MAX_INSNS - 1 &&
+ insn->code == (BPF_LD | BPF_DW | BPF_IMM))
+ len++;
+ if (insn->code == 0)
+ break;
+ }
+
+ /* Allocate and initialize the program */
+ fp = bpf_prog_alloc(bpf_prog_size(len), 0);
+ if (!fp)
+ goto out_nomem;
+
+ fp->len = len;
+ fp->type = BPF_PROG_TYPE_SOCKET_FILTER;
+ fp->aux->stack_depth = test->stack_depth;
+ memcpy(fp->insnsi, test->insns, len * sizeof(struct bpf_insn));
+
+ /* Relocate runtime tail call offsets and addresses */
+ for (i = 0; i < len; i++) {
+ struct bpf_insn *insn = &fp->insnsi[i];
+
+ if (insn->imm != TAIL_CALL_MARKER)
+ continue;
+
+ switch (insn->code) {
+ case BPF_LD | BPF_DW | BPF_IMM:
+ insn[0].imm = (u32)(long)progs;
+ insn[1].imm = ((u64)(long)progs) >> 32;
+ break;
+
+ case BPF_ALU | BPF_MOV | BPF_K:
+ if (insn->off == TAIL_CALL_NULL)
+ insn->imm = ntests;
+ else if (insn->off == TAIL_CALL_INVALID)
+ insn->imm = ntests + 1;
+ else
+ insn->imm = which + insn->off;
+ insn->off = 0;
+ }
+ }
+
+ fp = bpf_prog_select_runtime(fp, &err);
+ if (err)
+ goto out_err;
+
+ progs->ptrs[which] = fp;
+ }
+
+ /* The last entry contains a NULL program pointer */
+ progs->map.max_entries = ntests + 1;
+ *pprogs = progs;
+ return 0;
+
+out_nomem:
+ err = -ENOMEM;
+
+out_err:
+ if (progs)
+ destroy_tail_call_tests(progs);
+ return err;
+}
+
+static __init int test_tail_calls(struct bpf_array *progs)
+{
+ int i, err_cnt = 0, pass_cnt = 0;
+ int jit_cnt = 0, run_cnt = 0;
+
+ for (i = 0; i < ARRAY_SIZE(tail_call_tests); i++) {
+ struct tail_call_test *test = &tail_call_tests[i];
+ struct bpf_prog *fp = progs->ptrs[i];
+ u64 duration;
+ int ret;
+
+ cond_resched();
+
+ pr_info("#%d %s ", i, test->descr);
+ if (!fp) {
+ err_cnt++;
+ continue;
+ }
+ pr_cont("jited:%u ", fp->jited);
+
+ run_cnt++;
+ if (fp->jited)
+ jit_cnt++;
+
+ ret = __run_one(fp, NULL, MAX_TESTRUNS, &duration);
+ if (ret == test->result) {
+ pr_cont("%lld PASS", duration);
+ pass_cnt++;
+ } else {
+ pr_cont("ret %d != %d FAIL", ret, test->result);
+ err_cnt++;
+ }
+ }
+
+ pr_info("%s: Summary: %d PASSED, %d FAILED, [%d/%d JIT'ed]\n",
+ __func__, pass_cnt, err_cnt, jit_cnt, run_cnt);
+
+ return err_cnt ? -EINVAL : 0;
+}
+
static int __init test_bpf_init(void)
{
+ struct bpf_array *progs = NULL;
int ret;
ret = prepare_bpf_tests();
@@ -7018,6 +9242,14 @@ static int __init test_bpf_init(void)
if (ret)
return ret;
+ ret = prepare_tail_call_tests(&progs);
+ if (ret)
+ return ret;
+ ret = test_tail_calls(progs);
+ destroy_tail_call_tests(progs);
+ if (ret)
+ return ret;
+
return test_skb_segment();
}
diff --git a/lib/test_kasan.c b/lib/test_kasan.c
index 8f7b0b2f6e11..8835e0784578 100644
--- a/lib/test_kasan.c
+++ b/lib/test_kasan.c
@@ -53,7 +53,6 @@ static int kasan_test_init(struct kunit *test)
}
multishot = kasan_save_enable_multi_shot();
- kasan_set_tagging_report_once(false);
fail_data.report_found = false;
kunit_add_named_resource(test, NULL, NULL, &resource,
"kasan_data", &fail_data);
@@ -62,7 +61,6 @@ static int kasan_test_init(struct kunit *test)
static void kasan_test_exit(struct kunit *test)
{
- kasan_set_tagging_report_once(true);
kasan_restore_multi_shot(multishot);
KUNIT_EXPECT_FALSE(test, fail_data.report_found);
}
@@ -122,12 +120,28 @@ static void kasan_test_exit(struct kunit *test)
static void kmalloc_oob_right(struct kunit *test)
{
char *ptr;
- size_t size = 123;
+ size_t size = 128 - KASAN_GRANULE_SIZE - 5;
ptr = kmalloc(size, GFP_KERNEL);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
- KUNIT_EXPECT_KASAN_FAIL(test, ptr[size + OOB_TAG_OFF] = 'x');
+ /*
+ * An unaligned access past the requested kmalloc size.
+ * Only generic KASAN can precisely detect these.
+ */
+ if (IS_ENABLED(CONFIG_KASAN_GENERIC))
+ KUNIT_EXPECT_KASAN_FAIL(test, ptr[size] = 'x');
+
+ /*
+ * An aligned access into the first out-of-bounds granule that falls
+ * within the aligned kmalloc object.
+ */
+ KUNIT_EXPECT_KASAN_FAIL(test, ptr[size + 5] = 'y');
+
+ /* Out-of-bounds access past the aligned kmalloc object. */
+ KUNIT_EXPECT_KASAN_FAIL(test, ptr[0] =
+ ptr[size + KASAN_GRANULE_SIZE + 5]);
+
kfree(ptr);
}
@@ -151,7 +165,7 @@ static void kmalloc_node_oob_right(struct kunit *test)
ptr = kmalloc_node(size, GFP_KERNEL, 0);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
- KUNIT_EXPECT_KASAN_FAIL(test, ptr[size] = 0);
+ KUNIT_EXPECT_KASAN_FAIL(test, ptr[0] = ptr[size]);
kfree(ptr);
}
@@ -187,7 +201,7 @@ static void kmalloc_pagealloc_uaf(struct kunit *test)
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
kfree(ptr);
- KUNIT_EXPECT_KASAN_FAIL(test, ptr[0] = 0);
+ KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[0]);
}
static void kmalloc_pagealloc_invalid_free(struct kunit *test)
@@ -221,7 +235,7 @@ static void pagealloc_oob_right(struct kunit *test)
ptr = page_address(pages);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
- KUNIT_EXPECT_KASAN_FAIL(test, ptr[size] = 0);
+ KUNIT_EXPECT_KASAN_FAIL(test, ptr[0] = ptr[size]);
free_pages((unsigned long)ptr, order);
}
@@ -236,7 +250,7 @@ static void pagealloc_uaf(struct kunit *test)
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
free_pages((unsigned long)ptr, order);
- KUNIT_EXPECT_KASAN_FAIL(test, ptr[0] = 0);
+ KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[0]);
}
static void kmalloc_large_oob_right(struct kunit *test)
@@ -412,64 +426,70 @@ static void kmalloc_uaf_16(struct kunit *test)
kfree(ptr1);
}
+/*
+ * Note: in the memset tests below, the written range touches both valid and
+ * invalid memory. This makes sure that the instrumentation does not only check
+ * the starting address but the whole range.
+ */
+
static void kmalloc_oob_memset_2(struct kunit *test)
{
char *ptr;
- size_t size = 8;
+ size_t size = 128 - KASAN_GRANULE_SIZE;
ptr = kmalloc(size, GFP_KERNEL);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
- KUNIT_EXPECT_KASAN_FAIL(test, memset(ptr + 7 + OOB_TAG_OFF, 0, 2));
+ KUNIT_EXPECT_KASAN_FAIL(test, memset(ptr + size - 1, 0, 2));
kfree(ptr);
}
static void kmalloc_oob_memset_4(struct kunit *test)
{
char *ptr;
- size_t size = 8;
+ size_t size = 128 - KASAN_GRANULE_SIZE;
ptr = kmalloc(size, GFP_KERNEL);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
- KUNIT_EXPECT_KASAN_FAIL(test, memset(ptr + 5 + OOB_TAG_OFF, 0, 4));
+ KUNIT_EXPECT_KASAN_FAIL(test, memset(ptr + size - 3, 0, 4));
kfree(ptr);
}
-
static void kmalloc_oob_memset_8(struct kunit *test)
{
char *ptr;
- size_t size = 8;
+ size_t size = 128 - KASAN_GRANULE_SIZE;
ptr = kmalloc(size, GFP_KERNEL);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
- KUNIT_EXPECT_KASAN_FAIL(test, memset(ptr + 1 + OOB_TAG_OFF, 0, 8));
+ KUNIT_EXPECT_KASAN_FAIL(test, memset(ptr + size - 7, 0, 8));
kfree(ptr);
}
static void kmalloc_oob_memset_16(struct kunit *test)
{
char *ptr;
- size_t size = 16;
+ size_t size = 128 - KASAN_GRANULE_SIZE;
ptr = kmalloc(size, GFP_KERNEL);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
- KUNIT_EXPECT_KASAN_FAIL(test, memset(ptr + 1 + OOB_TAG_OFF, 0, 16));
+ KUNIT_EXPECT_KASAN_FAIL(test, memset(ptr + size - 15, 0, 16));
kfree(ptr);
}
static void kmalloc_oob_in_memset(struct kunit *test)
{
char *ptr;
- size_t size = 666;
+ size_t size = 128 - KASAN_GRANULE_SIZE;
ptr = kmalloc(size, GFP_KERNEL);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
- KUNIT_EXPECT_KASAN_FAIL(test, memset(ptr, 0, size + 5 + OOB_TAG_OFF));
+ KUNIT_EXPECT_KASAN_FAIL(test,
+ memset(ptr, 0, size + KASAN_GRANULE_SIZE));
kfree(ptr);
}
@@ -479,11 +499,17 @@ static void kmalloc_memmove_invalid_size(struct kunit *test)
size_t size = 64;
volatile size_t invalid_size = -2;
+ /*
+ * Hardware tag-based mode doesn't check memmove for negative size.
+ * As a result, this test introduces a side-effect memory corruption,
+ * which can result in a crash.
+ */
+ KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_KASAN_HW_TAGS);
+
ptr = kmalloc(size, GFP_KERNEL);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
memset((char *)ptr, 0, 64);
-
KUNIT_EXPECT_KASAN_FAIL(test,
memmove((char *)ptr, (char *)ptr + 4, invalid_size));
kfree(ptr);
@@ -498,7 +524,7 @@ static void kmalloc_uaf(struct kunit *test)
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
kfree(ptr);
- KUNIT_EXPECT_KASAN_FAIL(test, *(ptr + 8) = 'x');
+ KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[8]);
}
static void kmalloc_uaf_memset(struct kunit *test)
@@ -506,6 +532,12 @@ static void kmalloc_uaf_memset(struct kunit *test)
char *ptr;
size_t size = 33;
+ /*
+ * Only generic KASAN uses quarantine, which is required to avoid a
+ * kernel memory corruption this test causes.
+ */
+ KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_GENERIC);
+
ptr = kmalloc(size, GFP_KERNEL);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
@@ -537,7 +569,7 @@ again:
goto again;
}
- KUNIT_EXPECT_KASAN_FAIL(test, ptr1[40] = 'x');
+ KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr1)[40]);
KUNIT_EXPECT_PTR_NE(test, ptr1, ptr2);
kfree(ptr2);
@@ -684,7 +716,7 @@ static void ksize_unpoisons_memory(struct kunit *test)
ptr[size] = 'x';
/* This one must. */
- KUNIT_EXPECT_KASAN_FAIL(test, ptr[real_size] = 'y');
+ KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[real_size]);
kfree(ptr);
}
@@ -703,8 +735,8 @@ static void ksize_uaf(struct kunit *test)
kfree(ptr);
KUNIT_EXPECT_KASAN_FAIL(test, ksize(ptr));
- KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result = *ptr);
- KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result = *(ptr + size));
+ KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[0]);
+ KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[size]);
}
static void kasan_stack_oob(struct kunit *test)
diff --git a/lib/test_kasan_module.c b/lib/test_kasan_module.c
index f1017f345d6c..7ebf433edef3 100644
--- a/lib/test_kasan_module.c
+++ b/lib/test_kasan_module.c
@@ -15,13 +15,11 @@
#include "../mm/kasan/kasan.h"
-#define OOB_TAG_OFF (IS_ENABLED(CONFIG_KASAN_GENERIC) ? 0 : KASAN_GRANULE_SIZE)
-
static noinline void __init copy_user_test(void)
{
char *kmem;
char __user *usermem;
- size_t size = 10;
+ size_t size = 128 - KASAN_GRANULE_SIZE;
int __maybe_unused unused;
kmem = kmalloc(size, GFP_KERNEL);
@@ -38,25 +36,25 @@ static noinline void __init copy_user_test(void)
}
pr_info("out-of-bounds in copy_from_user()\n");
- unused = copy_from_user(kmem, usermem, size + 1 + OOB_TAG_OFF);
+ unused = copy_from_user(kmem, usermem, size + 1);
pr_info("out-of-bounds in copy_to_user()\n");
- unused = copy_to_user(usermem, kmem, size + 1 + OOB_TAG_OFF);
+ unused = copy_to_user(usermem, kmem, size + 1);
pr_info("out-of-bounds in __copy_from_user()\n");
- unused = __copy_from_user(kmem, usermem, size + 1 + OOB_TAG_OFF);
+ unused = __copy_from_user(kmem, usermem, size + 1);
pr_info("out-of-bounds in __copy_to_user()\n");
- unused = __copy_to_user(usermem, kmem, size + 1 + OOB_TAG_OFF);
+ unused = __copy_to_user(usermem, kmem, size + 1);
pr_info("out-of-bounds in __copy_from_user_inatomic()\n");
- unused = __copy_from_user_inatomic(kmem, usermem, size + 1 + OOB_TAG_OFF);
+ unused = __copy_from_user_inatomic(kmem, usermem, size + 1);
pr_info("out-of-bounds in __copy_to_user_inatomic()\n");
- unused = __copy_to_user_inatomic(usermem, kmem, size + 1 + OOB_TAG_OFF);
+ unused = __copy_to_user_inatomic(usermem, kmem, size + 1);
pr_info("out-of-bounds in strncpy_from_user()\n");
- unused = strncpy_from_user(kmem, usermem, size + 1 + OOB_TAG_OFF);
+ unused = strncpy_from_user(kmem, usermem, size + 1);
vm_munmap((unsigned long)usermem, PAGE_SIZE);
kfree(kmem);
@@ -73,7 +71,7 @@ static noinline void __init kasan_rcu_reclaim(struct rcu_head *rp)
struct kasan_rcu_info, rcu);
kfree(fp);
- fp->i = 1;
+ ((volatile struct kasan_rcu_info *)fp)->i;
}
static noinline void __init kasan_rcu_uaf(void)
diff --git a/lib/test_lockup.c b/lib/test_lockup.c
index 864554e76973..906b598740a7 100644
--- a/lib/test_lockup.c
+++ b/lib/test_lockup.c
@@ -485,13 +485,13 @@ static int __init test_lockup_init(void)
offsetof(spinlock_t, lock.wait_lock.magic),
SPINLOCK_MAGIC) ||
test_magic(lock_rwlock_ptr,
- offsetof(rwlock_t, rtmutex.wait_lock.magic),
+ offsetof(rwlock_t, rwbase.rtmutex.wait_lock.magic),
SPINLOCK_MAGIC) ||
test_magic(lock_mutex_ptr,
- offsetof(struct mutex, lock.wait_lock.magic),
+ offsetof(struct mutex, rtmutex.wait_lock.magic),
SPINLOCK_MAGIC) ||
test_magic(lock_rwsem_ptr,
- offsetof(struct rw_semaphore, rtmutex.wait_lock.magic),
+ offsetof(struct rw_semaphore, rwbase.rtmutex.wait_lock.magic),
SPINLOCK_MAGIC))
return -EINVAL;
#else
@@ -502,7 +502,7 @@ static int __init test_lockup_init(void)
offsetof(rwlock_t, magic),
RWLOCK_MAGIC) ||
test_magic(lock_mutex_ptr,
- offsetof(struct mutex, wait_lock.rlock.magic),
+ offsetof(struct mutex, wait_lock.magic),
SPINLOCK_MAGIC) ||
test_magic(lock_rwsem_ptr,
offsetof(struct rw_semaphore, wait_lock.magic),
diff --git a/lib/test_scanf.c b/lib/test_scanf.c
index 84fe09eaf55e..abae88848972 100644
--- a/lib/test_scanf.c
+++ b/lib/test_scanf.c
@@ -271,7 +271,7 @@ static u32 __init next_test_random(u32 max_bits)
{
u32 n_bits = hweight32(prandom_u32_state(&rnd_state)) % (max_bits + 1);
- return prandom_u32_state(&rnd_state) & (UINT_MAX >> (32 - n_bits));
+ return prandom_u32_state(&rnd_state) & GENMASK(n_bits, 0);
}
static unsigned long long __init next_test_random_ull(void)
@@ -280,7 +280,7 @@ static unsigned long long __init next_test_random_ull(void)
u32 n_bits = (hweight32(rand1) * 3) % 64;
u64 val = (u64)prandom_u32_state(&rnd_state) * rand1;
- return val & (ULLONG_MAX >> (64 - n_bits));
+ return val & GENMASK_ULL(n_bits, 0);
}
#define random_for_type(T) \
diff --git a/lib/test_stackinit.c b/lib/test_stackinit.c
index f93b1e145ada..a3c74e6a21ff 100644
--- a/lib/test_stackinit.c
+++ b/lib/test_stackinit.c
@@ -1,8 +1,13 @@
-// SPDX-License-Identifier: GPL-2.0
+// SPDX-License-Identifier: GPL-2.0-or-later
/*
- * Test cases for compiler-based stack variable zeroing via future
- * compiler flags or CONFIG_GCC_PLUGIN_STRUCTLEAK*.
+ * Test cases for compiler-based stack variable zeroing via
+ * -ftrivial-auto-var-init={zero,pattern} or CONFIG_GCC_PLUGIN_STRUCTLEAK*.
+ *
+ * External build example:
+ * clang -O2 -Wall -ftrivial-auto-var-init=pattern \
+ * -o test_stackinit test_stackinit.c
*/
+#ifdef __KERNEL__
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/init.h>
@@ -10,6 +15,63 @@
#include <linux/module.h>
#include <linux/string.h>
+#else
+
+/* Userspace headers. */
+#include <stdio.h>
+#include <stdint.h>
+#include <string.h>
+#include <stdbool.h>
+#include <errno.h>
+#include <sys/types.h>
+
+/* Linux kernel-ism stubs for stand-alone userspace build. */
+#define KBUILD_MODNAME "stackinit"
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#define pr_err(fmt, ...) fprintf(stderr, pr_fmt(fmt), ##__VA_ARGS__)
+#define pr_warn(fmt, ...) fprintf(stderr, pr_fmt(fmt), ##__VA_ARGS__)
+#define pr_info(fmt, ...) fprintf(stdout, pr_fmt(fmt), ##__VA_ARGS__)
+#define __init /**/
+#define __exit /**/
+#define __user /**/
+#define noinline __attribute__((__noinline__))
+#define __aligned(x) __attribute__((__aligned__(x)))
+#ifdef __clang__
+# define __compiletime_error(message) /**/
+#else
+# define __compiletime_error(message) __attribute__((__error__(message)))
+#endif
+#define __compiletime_assert(condition, msg, prefix, suffix) \
+ do { \
+ extern void prefix ## suffix(void) __compiletime_error(msg); \
+ if (!(condition)) \
+ prefix ## suffix(); \
+ } while (0)
+#define _compiletime_assert(condition, msg, prefix, suffix) \
+ __compiletime_assert(condition, msg, prefix, suffix)
+#define compiletime_assert(condition, msg) \
+ _compiletime_assert(condition, msg, __compiletime_assert_, __COUNTER__)
+#define BUILD_BUG_ON_MSG(cond, msg) compiletime_assert(!(cond), msg)
+#define BUILD_BUG_ON(condition) \
+ BUILD_BUG_ON_MSG(condition, "BUILD_BUG_ON failed: " #condition)
+typedef uint8_t u8;
+typedef uint16_t u16;
+typedef uint32_t u32;
+typedef uint64_t u64;
+
+#define module_init(func) static int (*do_init)(void) = func
+#define module_exit(func) static void (*do_exit)(void) = func
+#define MODULE_LICENSE(str) int main(void) { \
+ int rc; \
+ /* License: str */ \
+ rc = do_init(); \
+ if (rc == 0) \
+ do_exit(); \
+ return rc; \
+ }
+
+#endif /* __KERNEL__ */
+
/* Exfiltration buffer. */
#define MAX_VAR_SIZE 128
static u8 check_buf[MAX_VAR_SIZE];
@@ -33,6 +95,10 @@ static bool range_contains(char *haystack_start, size_t haystack_size,
return false;
}
+/* Whether the test is expected to fail. */
+#define WANT_SUCCESS 0
+#define XFAIL 1
+
#define DO_NOTHING_TYPE_SCALAR(var_type) var_type
#define DO_NOTHING_TYPE_STRING(var_type) void
#define DO_NOTHING_TYPE_STRUCT(var_type) void
@@ -58,34 +124,73 @@ static bool range_contains(char *haystack_start, size_t haystack_size,
#define INIT_CLONE_STRING [FILL_SIZE_STRING]
#define INIT_CLONE_STRUCT /**/
-#define INIT_SCALAR_none /**/
-#define INIT_SCALAR_zero = 0
+#define ZERO_CLONE_SCALAR(zero) memset(&(zero), 0x00, sizeof(zero))
+#define ZERO_CLONE_STRING(zero) memset(&(zero), 0x00, sizeof(zero))
+/*
+ * For the struct, intentionally poison padding to see if it gets
+ * copied out in direct assignments.
+ * */
+#define ZERO_CLONE_STRUCT(zero) \
+ do { \
+ memset(&(zero), 0xFF, sizeof(zero)); \
+ zero.one = 0; \
+ zero.two = 0; \
+ zero.three = 0; \
+ zero.four = 0; \
+ } while (0)
+
+#define INIT_SCALAR_none(var_type) /**/
+#define INIT_SCALAR_zero(var_type) = 0
-#define INIT_STRING_none [FILL_SIZE_STRING] /**/
-#define INIT_STRING_zero [FILL_SIZE_STRING] = { }
+#define INIT_STRING_none(var_type) [FILL_SIZE_STRING] /**/
+#define INIT_STRING_zero(var_type) [FILL_SIZE_STRING] = { }
-#define INIT_STRUCT_none /**/
-#define INIT_STRUCT_zero = { }
-#define INIT_STRUCT_static_partial = { .two = 0, }
-#define INIT_STRUCT_static_all = { .one = arg->one, \
- .two = arg->two, \
- .three = arg->three, \
- .four = arg->four, \
+#define INIT_STRUCT_none(var_type) /**/
+#define INIT_STRUCT_zero(var_type) = { }
+
+
+#define __static_partial { .two = 0, }
+#define __static_all { .one = 0, \
+ .two = 0, \
+ .three = 0, \
+ .four = 0, \
}
-#define INIT_STRUCT_dynamic_partial = { .two = arg->two, }
-#define INIT_STRUCT_dynamic_all = { .one = arg->one, \
- .two = arg->two, \
- .three = arg->three, \
- .four = arg->four, \
+#define __dynamic_partial { .two = arg->two, }
+#define __dynamic_all { .one = arg->one, \
+ .two = arg->two, \
+ .three = arg->three, \
+ .four = arg->four, \
}
-#define INIT_STRUCT_runtime_partial ; \
- var.two = 0
-#define INIT_STRUCT_runtime_all ; \
- var.one = 0; \
+#define __runtime_partial var.two = 0
+#define __runtime_all var.one = 0; \
var.two = 0; \
var.three = 0; \
- memset(&var.four, 0, \
- sizeof(var.four))
+ var.four = 0
+
+#define INIT_STRUCT_static_partial(var_type) \
+ = __static_partial
+#define INIT_STRUCT_static_all(var_type) \
+ = __static_all
+#define INIT_STRUCT_dynamic_partial(var_type) \
+ = __dynamic_partial
+#define INIT_STRUCT_dynamic_all(var_type) \
+ = __dynamic_all
+#define INIT_STRUCT_runtime_partial(var_type) \
+ ; __runtime_partial
+#define INIT_STRUCT_runtime_all(var_type) \
+ ; __runtime_all
+
+#define INIT_STRUCT_assigned_static_partial(var_type) \
+ ; var = (var_type)__static_partial
+#define INIT_STRUCT_assigned_static_all(var_type) \
+ ; var = (var_type)__static_all
+#define INIT_STRUCT_assigned_dynamic_partial(var_type) \
+ ; var = (var_type)__dynamic_partial
+#define INIT_STRUCT_assigned_dynamic_all(var_type) \
+ ; var = (var_type)__dynamic_all
+
+#define INIT_STRUCT_assigned_copy(var_type) \
+ ; var = *(arg)
/*
* @name: unique string name for the test
@@ -106,7 +211,7 @@ static noinline __init int test_ ## name (void) \
BUILD_BUG_ON(sizeof(zero) > MAX_VAR_SIZE); \
\
/* Fill clone type with zero for per-field init. */ \
- memset(&zero, 0x00, sizeof(zero)); \
+ ZERO_CLONE_ ## which(zero); \
/* Clear entire check buffer for 0xFF overlap test. */ \
memset(check_buf, 0x00, sizeof(check_buf)); \
/* Fill stack with 0xFF. */ \
@@ -149,7 +254,7 @@ static noinline __init int test_ ## name (void) \
return (xfail) ? 0 : 1; \
} \
}
-#define DEFINE_TEST(name, var_type, which, init_level) \
+#define DEFINE_TEST(name, var_type, which, init_level, xfail) \
/* no-op to force compiler into ignoring "uninitialized" vars */\
static noinline __init DO_NOTHING_TYPE_ ## which(var_type) \
do_nothing_ ## name(var_type *ptr) \
@@ -165,7 +270,8 @@ static noinline __init int leaf_ ## name(unsigned long sp, \
var_type *arg) \
{ \
char buf[VAR_BUFFER]; \
- var_type var INIT_ ## which ## _ ## init_level; \
+ var_type var \
+ INIT_ ## which ## _ ## init_level(var_type); \
\
target_start = &var; \
target_size = sizeof(var); \
@@ -191,7 +297,7 @@ static noinline __init int leaf_ ## name(unsigned long sp, \
\
return (int)buf[0] | (int)buf[sizeof(buf) - 1]; \
} \
-DEFINE_TEST_DRIVER(name, var_type, which, 0)
+DEFINE_TEST_DRIVER(name, var_type, which, xfail)
/* Structure with no padding. */
struct test_packed {
@@ -210,18 +316,13 @@ struct test_small_hole {
unsigned long four;
};
-/* Try to trigger unhandled padding in a structure. */
-struct test_aligned {
- u32 internal1;
- u64 internal2;
-} __aligned(64);
-
+/* Trigger unhandled padding in a structure. */
struct test_big_hole {
u8 one;
u8 two;
u8 three;
/* 61 byte padding hole here. */
- struct test_aligned four;
+ u8 four __aligned(64);
} __aligned(64);
struct test_trailing_hole {
@@ -240,42 +341,50 @@ struct test_user {
unsigned long four;
};
-#define DEFINE_SCALAR_TEST(name, init) \
- DEFINE_TEST(name ## _ ## init, name, SCALAR, init)
+#define DEFINE_SCALAR_TEST(name, init, xfail) \
+ DEFINE_TEST(name ## _ ## init, name, SCALAR, \
+ init, xfail)
-#define DEFINE_SCALAR_TESTS(init) \
- DEFINE_SCALAR_TEST(u8, init); \
- DEFINE_SCALAR_TEST(u16, init); \
- DEFINE_SCALAR_TEST(u32, init); \
- DEFINE_SCALAR_TEST(u64, init); \
- DEFINE_TEST(char_array_ ## init, unsigned char, STRING, init)
+#define DEFINE_SCALAR_TESTS(init, xfail) \
+ DEFINE_SCALAR_TEST(u8, init, xfail); \
+ DEFINE_SCALAR_TEST(u16, init, xfail); \
+ DEFINE_SCALAR_TEST(u32, init, xfail); \
+ DEFINE_SCALAR_TEST(u64, init, xfail); \
+ DEFINE_TEST(char_array_ ## init, unsigned char, \
+ STRING, init, xfail)
-#define DEFINE_STRUCT_TEST(name, init) \
+#define DEFINE_STRUCT_TEST(name, init, xfail) \
DEFINE_TEST(name ## _ ## init, \
- struct test_ ## name, STRUCT, init)
+ struct test_ ## name, STRUCT, init, \
+ xfail)
+
+#define DEFINE_STRUCT_TESTS(init, xfail) \
+ DEFINE_STRUCT_TEST(small_hole, init, xfail); \
+ DEFINE_STRUCT_TEST(big_hole, init, xfail); \
+ DEFINE_STRUCT_TEST(trailing_hole, init, xfail); \
+ DEFINE_STRUCT_TEST(packed, init, xfail)
-#define DEFINE_STRUCT_TESTS(init) \
- DEFINE_STRUCT_TEST(small_hole, init); \
- DEFINE_STRUCT_TEST(big_hole, init); \
- DEFINE_STRUCT_TEST(trailing_hole, init); \
- DEFINE_STRUCT_TEST(packed, init)
+#define DEFINE_STRUCT_INITIALIZER_TESTS(base) \
+ DEFINE_STRUCT_TESTS(base ## _ ## partial, \
+ WANT_SUCCESS); \
+ DEFINE_STRUCT_TESTS(base ## _ ## all, \
+ WANT_SUCCESS)
/* These should be fully initialized all the time! */
-DEFINE_SCALAR_TESTS(zero);
-DEFINE_STRUCT_TESTS(zero);
-/* Static initialization: padding may be left uninitialized. */
-DEFINE_STRUCT_TESTS(static_partial);
-DEFINE_STRUCT_TESTS(static_all);
-/* Dynamic initialization: padding may be left uninitialized. */
-DEFINE_STRUCT_TESTS(dynamic_partial);
-DEFINE_STRUCT_TESTS(dynamic_all);
-/* Runtime initialization: padding may be left uninitialized. */
-DEFINE_STRUCT_TESTS(runtime_partial);
-DEFINE_STRUCT_TESTS(runtime_all);
+DEFINE_SCALAR_TESTS(zero, WANT_SUCCESS);
+DEFINE_STRUCT_TESTS(zero, WANT_SUCCESS);
+/* Struct initializers: padding may be left uninitialized. */
+DEFINE_STRUCT_INITIALIZER_TESTS(static);
+DEFINE_STRUCT_INITIALIZER_TESTS(dynamic);
+DEFINE_STRUCT_INITIALIZER_TESTS(runtime);
+DEFINE_STRUCT_INITIALIZER_TESTS(assigned_static);
+DEFINE_STRUCT_INITIALIZER_TESTS(assigned_dynamic);
+DEFINE_STRUCT_TESTS(assigned_copy, XFAIL);
/* No initialization without compiler instrumentation. */
-DEFINE_SCALAR_TESTS(none);
-DEFINE_STRUCT_TESTS(none);
-DEFINE_TEST(user, struct test_user, STRUCT, none);
+DEFINE_SCALAR_TESTS(none, WANT_SUCCESS);
+DEFINE_STRUCT_TESTS(none, WANT_SUCCESS);
+/* Initialization of members with __user attribute. */
+DEFINE_TEST(user, struct test_user, STRUCT, none, WANT_SUCCESS);
/*
* Check two uses through a variable declaration outside either path,
@@ -285,6 +394,10 @@ DEFINE_TEST(user, struct test_user, STRUCT, none);
static int noinline __leaf_switch_none(int path, bool fill)
{
switch (path) {
+ /*
+ * This is intentionally unreachable. To silence the
+ * warning, build with -Wno-switch-unreachable
+ */
uint64_t var;
case 1:
@@ -334,8 +447,8 @@ static noinline __init int leaf_switch_2_none(unsigned long sp, bool fill,
* non-code areas (i.e. in a switch statement before the first "case").
* https://bugs.llvm.org/show_bug.cgi?id=44916
*/
-DEFINE_TEST_DRIVER(switch_1_none, uint64_t, SCALAR, 1);
-DEFINE_TEST_DRIVER(switch_2_none, uint64_t, SCALAR, 1);
+DEFINE_TEST_DRIVER(switch_1_none, uint64_t, SCALAR, XFAIL);
+DEFINE_TEST_DRIVER(switch_2_none, uint64_t, SCALAR, XFAIL);
static int __init test_stackinit_init(void)
{
@@ -361,12 +474,18 @@ static int __init test_stackinit_init(void)
test_structs(zero);
/* Padding here appears to be accidentally always initialized? */
test_structs(dynamic_partial);
+ test_structs(assigned_dynamic_partial);
/* Padding initialization depends on compiler behaviors. */
test_structs(static_partial);
test_structs(static_all);
test_structs(dynamic_all);
test_structs(runtime_partial);
test_structs(runtime_all);
+ test_structs(assigned_static_partial);
+ test_structs(assigned_static_all);
+ test_structs(assigned_dynamic_all);
+ /* Everything fails this since it effectively performs a memcpy(). */
+ test_structs(assigned_copy);
/* STRUCTLEAK_BYREF_ALL should cover everything from here down. */
test_scalars(none);
diff --git a/lib/test_vmalloc.c b/lib/test_vmalloc.c
index 01e9543de566..e14993bc84d2 100644
--- a/lib/test_vmalloc.c
+++ b/lib/test_vmalloc.c
@@ -35,6 +35,9 @@ __param(int, test_repeat_count, 1,
__param(int, test_loop_count, 1000000,
"Set test loop counter");
+__param(int, nr_pages, 0,
+ "Set number of pages for fix_size_alloc_test(default: 1)");
+
__param(int, run_test_mask, INT_MAX,
"Set tests specified in the mask.\n\n"
"\t\tid: 1, name: fix_size_alloc_test\n"
@@ -262,7 +265,7 @@ static int fix_size_alloc_test(void)
int i;
for (i = 0; i < test_loop_count; i++) {
- ptr = vmalloc(3 * PAGE_SIZE);
+ ptr = vmalloc((nr_pages > 0 ? nr_pages:1) * PAGE_SIZE);
if (!ptr)
return -1;
diff --git a/lib/ubsan.c b/lib/ubsan.c
index 26229973049d..bdc380ff5d5c 100644
--- a/lib/ubsan.c
+++ b/lib/ubsan.c
@@ -14,6 +14,7 @@
#include <linux/types.h>
#include <linux/sched.h>
#include <linux/uaccess.h>
+#include <kunit/test-bug.h>
#include "ubsan.h"
@@ -141,6 +142,8 @@ static void ubsan_prologue(struct source_location *loc, const char *reason)
"========================================\n");
pr_err("UBSAN: %s in %s:%d:%d\n", reason, loc->file_name,
loc->line & LINE_MASK, loc->column & COLUMN_MASK);
+
+ kunit_fail_current_test("%s in %s", reason, loc->file_name);
}
static void ubsan_epilogue(void)