diff options
author | Ingo Molnar <mingo@kernel.org> | 2019-12-25 10:41:37 +0100 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2019-12-25 10:41:37 +0100 |
commit | 1e5f8a308551b9816588e12bb795aeadebe37c4a (patch) | |
tree | bd71fc796fed24a3b7cc99df4a1d1bdaecc2b387 /lib | |
parent | a5e37de90e67ac1072a9a44bd0cec9f5e98ded08 (diff) | |
parent | 46cf053efec6a3a5f343fead837777efe8252a46 (diff) | |
download | linux-1e5f8a308551b9816588e12bb795aeadebe37c4a.tar.bz2 |
Merge tag 'v5.5-rc3' into sched/core, to pick up fixes
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'lib')
-rw-r--r-- | lib/Kconfig | 2 | ||||
-rw-r--r-- | lib/Kconfig.debug | 455 | ||||
-rw-r--r-- | lib/Kconfig.kgdb | 8 | ||||
-rw-r--r-- | lib/Makefile | 2 | ||||
-rw-r--r-- | lib/bitmap.c | 12 | ||||
-rw-r--r-- | lib/find_bit.c | 14 | ||||
-rw-r--r-- | lib/genalloc.c | 5 | ||||
-rw-r--r-- | lib/iov_iter.c | 3 | ||||
-rw-r--r-- | lib/logic_pio.c | 14 | ||||
-rw-r--r-- | lib/math/rational.c | 63 | ||||
-rw-r--r-- | lib/raid6/unroll.awk | 2 | ||||
-rw-r--r-- | lib/sbitmap.c | 2 | ||||
-rw-r--r-- | lib/test_bitmap.c | 202 | ||||
-rw-r--r-- | lib/test_meminit.c | 20 | ||||
-rw-r--r-- | lib/ubsan.c | 64 |
15 files changed, 532 insertions, 336 deletions
diff --git a/lib/Kconfig b/lib/Kconfig index 6d7c5877c9f1..6e790dc55c5b 100644 --- a/lib/Kconfig +++ b/lib/Kconfig @@ -572,7 +572,7 @@ config OID_REGISTRY Enable fast lookup object identifier registry. config UCS2_STRING - tristate + tristate # # generic vdso diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index d03e2b1570ca..6859f523517b 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -128,8 +128,8 @@ config DYNAMIC_DEBUG lineno : line number of the debug statement module : module that contains the debug statement function : function that contains the debug statement - flags : '=p' means the line is turned 'on' for printing - format : the format used for the debug statement + flags : '=p' means the line is turned 'on' for printing + format : the format used for the debug statement From a live system: @@ -173,6 +173,15 @@ config SYMBOLIC_ERRNAME of the number 28. It makes the kernel image slightly larger (about 3KB), but can make the kernel logs easier to read. +config DEBUG_BUGVERBOSE + bool "Verbose BUG() reporting (adds 70K)" if DEBUG_KERNEL && EXPERT + depends on BUG && (GENERIC_BUG || HAVE_DEBUG_BUGVERBOSE) + default y + help + Say Y here to make BUG() panics output the file name and line number + of the BUG call as well as the EIP and oops trace. This aids + debugging but costs about 70-100K of memory. + endmenu # "printk and dmesg options" menu "Compile-time checks and compiler options" @@ -181,7 +190,7 @@ config DEBUG_INFO bool "Compile the kernel with debug info" depends on DEBUG_KERNEL && !COMPILE_TEST help - If you say Y here the resulting kernel image will include + If you say Y here the resulting kernel image will include debugging info resulting in a larger kernel image. This adds debug symbols to the kernel and modules (gcc -g), and is needed if you intend to use kernel crashdump or binary object @@ -278,25 +287,13 @@ config STRIP_ASM_SYMS get_wchan() and suchlike. config READABLE_ASM - bool "Generate readable assembler code" - depends on DEBUG_KERNEL - help - Disable some compiler optimizations that tend to generate human unreadable - assembler output. This may make the kernel slightly slower, but it helps - to keep kernel developers who have to stare a lot at assembler listings - sane. - -config DEBUG_FS - bool "Debug Filesystem" + bool "Generate readable assembler code" + depends on DEBUG_KERNEL help - debugfs is a virtual file system that kernel developers use to put - debugging files into. Enable this option to be able to read and - write to these files. - - For detailed documentation on the debugfs API, see - Documentation/filesystems/. - - If unsure, say N. + Disable some compiler optimizations that tend to generate human unreadable + assembler output. This may make the kernel slightly slower, but it helps + to keep kernel developers who have to stare a lot at assembler listings + sane. config HEADERS_INSTALL bool "Install uapi headers to usr/include" @@ -399,6 +396,8 @@ config DEBUG_FORCE_WEAK_PER_CPU endmenu # "Compiler options" +menu "Generic Kernel Debugging Instruments" + config MAGIC_SYSRQ bool "Magic SysRq key" depends on !UML @@ -432,6 +431,24 @@ config MAGIC_SYSRQ_SERIAL This option allows you to decide whether you want to enable the magic SysRq key. +config DEBUG_FS + bool "Debug Filesystem" + help + debugfs is a virtual file system that kernel developers use to put + debugging files into. Enable this option to be able to read and + write to these files. + + For detailed documentation on the debugfs API, see + Documentation/filesystems/. + + If unsure, say N. + +source "lib/Kconfig.kgdb" + +source "lib/Kconfig.ubsan" + +endmenu + config DEBUG_KERNEL bool "Kernel debugging" help @@ -506,11 +523,11 @@ config DEBUG_OBJECTS_PERCPU_COUNTER config DEBUG_OBJECTS_ENABLE_DEFAULT int "debug_objects bootup default value (0-1)" - range 0 1 - default "1" - depends on DEBUG_OBJECTS - help - Debug objects boot parameter default value + range 0 1 + default "1" + depends on DEBUG_OBJECTS + help + Debug objects boot parameter default value config DEBUG_SLAB bool "Debug slab memory allocations" @@ -624,12 +641,24 @@ config DEBUG_STACK_USAGE This option will slow down process creation somewhat. +config SCHED_STACK_END_CHECK + bool "Detect stack corruption on calls to schedule()" + depends on DEBUG_KERNEL + default n + help + This option checks for a stack overrun on calls to schedule(). + If the stack end location is found to be over written always panic as + the content of the corrupted region can no longer be trusted. + This is to ensure no erroneous behaviour occurs which could result in + data corruption or a sporadic crash at a later stage once the region + is examined. The runtime overhead introduced is minimal. + config DEBUG_VM bool "Debug VM" depends on DEBUG_KERNEL help Enable this to turn on extended checks in the virtual-memory system - that may impact performance. + that may impact performance. If unsure, say N. @@ -756,53 +785,6 @@ source "lib/Kconfig.kasan" endmenu # "Memory Debugging" -config ARCH_HAS_KCOV - bool - help - An architecture should select this when it can successfully - build and run with CONFIG_KCOV. This typically requires - disabling instrumentation for some early boot code. - -config CC_HAS_SANCOV_TRACE_PC - def_bool $(cc-option,-fsanitize-coverage=trace-pc) - -config KCOV - bool "Code coverage for fuzzing" - depends on ARCH_HAS_KCOV - depends on CC_HAS_SANCOV_TRACE_PC || GCC_PLUGINS - select DEBUG_FS - select GCC_PLUGIN_SANCOV if !CC_HAS_SANCOV_TRACE_PC - help - KCOV exposes kernel code coverage information in a form suitable - for coverage-guided fuzzing (randomized testing). - - If RANDOMIZE_BASE is enabled, PC values will not be stable across - different machines and across reboots. If you need stable PC values, - disable RANDOMIZE_BASE. - - For more details, see Documentation/dev-tools/kcov.rst. - -config KCOV_ENABLE_COMPARISONS - bool "Enable comparison operands collection by KCOV" - depends on KCOV - depends on $(cc-option,-fsanitize-coverage=trace-cmp) - help - KCOV also exposes operands of every comparison in the instrumented - code along with operand sizes and PCs of the comparison instructions. - These operands can be used by fuzzing engines to improve the quality - of fuzzing coverage. - -config KCOV_INSTRUMENT_ALL - bool "Instrument all code by default" - depends on KCOV - default y - help - If you are doing generic system call fuzzing (like e.g. syzkaller), - then you will want to instrument the whole kernel and you should - say y here. If you are doing more targeted fuzzing (like e.g. - filesystem fuzzing with AFL) then you will want to enable coverage - for more specific subsets of files, and should say n here. - config DEBUG_SHIRQ bool "Debug shared IRQ handlers" depends on DEBUG_KERNEL @@ -812,7 +794,35 @@ config DEBUG_SHIRQ Drivers ought to be able to handle interrupts coming in at those points; some don't and need to be caught. -menu "Debug Lockups and Hangs" +menu "Debug Oops, Lockups and Hangs" + +config PANIC_ON_OOPS + bool "Panic on Oops" + help + Say Y here to enable the kernel to panic when it oopses. This + has the same effect as setting oops=panic on the kernel command + line. + + This feature is useful to ensure that the kernel does not do + anything erroneous after an oops which could result in data + corruption or other issues. + + Say N if unsure. + +config PANIC_ON_OOPS_VALUE + int + range 0 1 + default 0 if !PANIC_ON_OOPS + default 1 if PANIC_ON_OOPS + +config PANIC_TIMEOUT + int "panic timeout" + default 0 + help + Set the timeout value (in seconds) until a reboot occurs when the + the kernel panics. If n = 0, then we wait forever. A timeout + value n > 0 will wait n seconds before rebooting, while a timeout + value n < 0 will reboot immediately. config LOCKUP_DETECTOR bool @@ -970,33 +980,7 @@ config WQ_WATCHDOG endmenu # "Debug lockups and hangs" -config PANIC_ON_OOPS - bool "Panic on Oops" - help - Say Y here to enable the kernel to panic when it oopses. This - has the same effect as setting oops=panic on the kernel command - line. - - This feature is useful to ensure that the kernel does not do - anything erroneous after an oops which could result in data - corruption or other issues. - - Say N if unsure. - -config PANIC_ON_OOPS_VALUE - int - range 0 1 - default 0 if !PANIC_ON_OOPS - default 1 if PANIC_ON_OOPS - -config PANIC_TIMEOUT - int "panic timeout" - default 0 - help - Set the timeout value (in seconds) until a reboot occurs when the - the kernel panics. If n = 0, then we wait forever. A timeout - value n > 0 will wait n seconds before rebooting, while a timeout - value n < 0 will reboot immediately. +menu "Scheduler Debugging" config SCHED_DEBUG bool "Collect scheduler debugging info" @@ -1024,17 +1008,7 @@ config SCHEDSTATS application, you can say N to avoid the very slight overhead this adds. -config SCHED_STACK_END_CHECK - bool "Detect stack corruption on calls to schedule()" - depends on DEBUG_KERNEL - default n - help - This option checks for a stack overrun on calls to schedule(). - If the stack end location is found to be over written always panic as - the content of the corrupted region can no longer be trusted. - This is to ensure no erroneous behaviour occurs which could result in - data corruption or a sporadic crash at a later stage once the region - is examined. The runtime overhead introduced is minimal. +endmenu config DEBUG_TIMEKEEPING bool "Enable extra timekeeping sanity checking" @@ -1338,14 +1312,7 @@ config DEBUG_KOBJECT_RELEASE config HAVE_DEBUG_BUGVERBOSE bool -config DEBUG_BUGVERBOSE - bool "Verbose BUG() reporting (adds 70K)" if DEBUG_KERNEL && EXPERT - depends on BUG && (GENERIC_BUG || HAVE_DEBUG_BUGVERBOSE) - default y - help - Say Y here to make BUG() panics output the file name and line number - of the BUG call as well as the EIP and oops trace. This aids - debugging but costs about 70-100K of memory. +menu "Debug kernel data structures" config DEBUG_LIST bool "Debug linked list manipulation" @@ -1386,6 +1353,18 @@ config DEBUG_NOTIFIERS This is a relatively cheap check but if you care about maximum performance, say N. +config BUG_ON_DATA_CORRUPTION + bool "Trigger a BUG when data corruption is detected" + select DEBUG_LIST + help + Select this option if the kernel should BUG when it encounters + data corruption in kernel memory structures when they get checked + for validity. + + If unsure, say N. + +endmenu + config DEBUG_CREDENTIALS bool "Debug credential management" depends on DEBUG_KERNEL @@ -1419,7 +1398,7 @@ config DEBUG_WQ_FORCE_RR_CPU be impacted. config DEBUG_BLOCK_EXT_DEVT - bool "Force extended block device numbers and spread them" + bool "Force extended block device numbers and spread them" depends on DEBUG_KERNEL depends on BLOCK default n @@ -1458,6 +1437,103 @@ config CPU_HOTPLUG_STATE_CONTROL Say N if your are unsure. +config LATENCYTOP + bool "Latency measuring infrastructure" + depends on DEBUG_KERNEL + depends on STACKTRACE_SUPPORT + depends on PROC_FS + select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE && !ARM && !ARC && !X86 + select KALLSYMS + select KALLSYMS_ALL + select STACKTRACE + select SCHEDSTATS + select SCHED_DEBUG + help + Enable this option if you want to use the LatencyTOP tool + to find out which userspace is blocking on what kernel operations. + +source "kernel/trace/Kconfig" + +config PROVIDE_OHCI1394_DMA_INIT + bool "Remote debugging over FireWire early on boot" + depends on PCI && X86 + help + If you want to debug problems which hang or crash the kernel early + on boot and the crashing machine has a FireWire port, you can use + this feature to remotely access the memory of the crashed machine + over FireWire. This employs remote DMA as part of the OHCI1394 + specification which is now the standard for FireWire controllers. + + With remote DMA, you can monitor the printk buffer remotely using + firescope and access all memory below 4GB using fireproxy from gdb. + Even controlling a kernel debugger is possible using remote DMA. + + Usage: + + If ohci1394_dma=early is used as boot parameter, it will initialize + all OHCI1394 controllers which are found in the PCI config space. + + As all changes to the FireWire bus such as enabling and disabling + devices cause a bus reset and thereby disable remote DMA for all + devices, be sure to have the cable plugged and FireWire enabled on + the debugging host before booting the debug target for debugging. + + This code (~1k) is freed after boot. By then, the firewire stack + in charge of the OHCI-1394 controllers should be used instead. + + See Documentation/debugging-via-ohci1394.txt for more information. + +source "samples/Kconfig" + +config ARCH_HAS_DEVMEM_IS_ALLOWED + bool + +config STRICT_DEVMEM + bool "Filter access to /dev/mem" + depends on MMU && DEVMEM + depends on ARCH_HAS_DEVMEM_IS_ALLOWED + default y if PPC || X86 || ARM64 + help + If this option is disabled, you allow userspace (root) access to all + of memory, including kernel and userspace memory. Accidental + access to this is obviously disastrous, but specific access can + be used by people debugging the kernel. Note that with PAT support + enabled, even in this case there are restrictions on /dev/mem + use due to the cache aliasing requirements. + + If this option is switched on, and IO_STRICT_DEVMEM=n, the /dev/mem + file only allows userspace access to PCI space and the BIOS code and + data regions. This is sufficient for dosemu and X and all common + users of /dev/mem. + + If in doubt, say Y. + +config IO_STRICT_DEVMEM + bool "Filter I/O access to /dev/mem" + depends on STRICT_DEVMEM + help + If this option is disabled, you allow userspace (root) access to all + io-memory regardless of whether a driver is actively using that + range. Accidental access to this is obviously disastrous, but + specific access can be used by people debugging kernel drivers. + + If this option is switched on, the /dev/mem file only allows + userspace access to *idle* io-memory ranges (see /proc/iomem) This + may break traditional users of /dev/mem (dosemu, legacy X, etc...) + if the driver using a given range cannot be disabled. + + If in doubt, say Y. + +menu "$(SRCARCH) Debugging" + +source "arch/$(SRCARCH)/Kconfig.debug" + +endmenu + +menu "Kernel Testing and Coverage" + +source "lib/kunit/Kconfig" + config NOTIFIER_ERROR_INJECTION tristate "Notifier error injection" depends on DEBUG_KERNEL @@ -1616,53 +1692,53 @@ config FAULT_INJECTION_STACKTRACE_FILTER help Provide stacktrace filter for fault-injection capabilities -config LATENCYTOP - bool "Latency measuring infrastructure" - depends on DEBUG_KERNEL - depends on STACKTRACE_SUPPORT - depends on PROC_FS - select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE && !ARM && !ARC && !X86 - select KALLSYMS - select KALLSYMS_ALL - select STACKTRACE - select SCHEDSTATS - select SCHED_DEBUG - help - Enable this option if you want to use the LatencyTOP tool - to find out which userspace is blocking on what kernel operations. - -source "kernel/trace/Kconfig" - -config PROVIDE_OHCI1394_DMA_INIT - bool "Remote debugging over FireWire early on boot" - depends on PCI && X86 +config ARCH_HAS_KCOV + bool help - If you want to debug problems which hang or crash the kernel early - on boot and the crashing machine has a FireWire port, you can use - this feature to remotely access the memory of the crashed machine - over FireWire. This employs remote DMA as part of the OHCI1394 - specification which is now the standard for FireWire controllers. + An architecture should select this when it can successfully + build and run with CONFIG_KCOV. This typically requires + disabling instrumentation for some early boot code. - With remote DMA, you can monitor the printk buffer remotely using - firescope and access all memory below 4GB using fireproxy from gdb. - Even controlling a kernel debugger is possible using remote DMA. +config CC_HAS_SANCOV_TRACE_PC + def_bool $(cc-option,-fsanitize-coverage=trace-pc) - Usage: - If ohci1394_dma=early is used as boot parameter, it will initialize - all OHCI1394 controllers which are found in the PCI config space. +config KCOV + bool "Code coverage for fuzzing" + depends on ARCH_HAS_KCOV + depends on CC_HAS_SANCOV_TRACE_PC || GCC_PLUGINS + select DEBUG_FS + select GCC_PLUGIN_SANCOV if !CC_HAS_SANCOV_TRACE_PC + help + KCOV exposes kernel code coverage information in a form suitable + for coverage-guided fuzzing (randomized testing). - As all changes to the FireWire bus such as enabling and disabling - devices cause a bus reset and thereby disable remote DMA for all - devices, be sure to have the cable plugged and FireWire enabled on - the debugging host before booting the debug target for debugging. + If RANDOMIZE_BASE is enabled, PC values will not be stable across + different machines and across reboots. If you need stable PC values, + disable RANDOMIZE_BASE. - This code (~1k) is freed after boot. By then, the firewire stack - in charge of the OHCI-1394 controllers should be used instead. + For more details, see Documentation/dev-tools/kcov.rst. - See Documentation/debugging-via-ohci1394.txt for more information. +config KCOV_ENABLE_COMPARISONS + bool "Enable comparison operands collection by KCOV" + depends on KCOV + depends on $(cc-option,-fsanitize-coverage=trace-cmp) + help + KCOV also exposes operands of every comparison in the instrumented + code along with operand sizes and PCs of the comparison instructions. + These operands can be used by fuzzing engines to improve the quality + of fuzzing coverage. -source "lib/kunit/Kconfig" +config KCOV_INSTRUMENT_ALL + bool "Instrument all code by default" + depends on KCOV + default y + help + If you are doing generic system call fuzzing (like e.g. syzkaller), + then you will want to instrument the whole kernel and you should + say y here. If you are doing more targeted fuzzing (like e.g. + filesystem fuzzing with AFL) then you will want to enable coverage + for more specific subsets of files, and should say n here. menuconfig RUNTIME_TESTING_MENU bool "Runtime Testing" @@ -2099,62 +2175,7 @@ config MEMTEST memtest=17, mean do 17 test patterns. If you are unsure how to answer this question, answer N. -config BUG_ON_DATA_CORRUPTION - bool "Trigger a BUG when data corruption is detected" - select DEBUG_LIST - help - Select this option if the kernel should BUG when it encounters - data corruption in kernel memory structures when they get checked - for validity. - - If unsure, say N. - -source "samples/Kconfig" - -source "lib/Kconfig.kgdb" - -source "lib/Kconfig.ubsan" - -config ARCH_HAS_DEVMEM_IS_ALLOWED - bool -config STRICT_DEVMEM - bool "Filter access to /dev/mem" - depends on MMU && DEVMEM - depends on ARCH_HAS_DEVMEM_IS_ALLOWED - default y if PPC || X86 || ARM64 - ---help--- - If this option is disabled, you allow userspace (root) access to all - of memory, including kernel and userspace memory. Accidental - access to this is obviously disastrous, but specific access can - be used by people debugging the kernel. Note that with PAT support - enabled, even in this case there are restrictions on /dev/mem - use due to the cache aliasing requirements. - - If this option is switched on, and IO_STRICT_DEVMEM=n, the /dev/mem - file only allows userspace access to PCI space and the BIOS code and - data regions. This is sufficient for dosemu and X and all common - users of /dev/mem. - - If in doubt, say Y. - -config IO_STRICT_DEVMEM - bool "Filter I/O access to /dev/mem" - depends on STRICT_DEVMEM - ---help--- - If this option is disabled, you allow userspace (root) access to all - io-memory regardless of whether a driver is actively using that - range. Accidental access to this is obviously disastrous, but - specific access can be used by people debugging kernel drivers. - - If this option is switched on, the /dev/mem file only allows - userspace access to *idle* io-memory ranges (see /proc/iomem) This - may break traditional users of /dev/mem (dosemu, legacy X, etc...) - if the driver using a given range cannot be disabled. - - If in doubt, say Y. - -source "arch/$(SRCARCH)/Kconfig.debug" config HYPERV_TESTING bool "Microsoft Hyper-V driver testing" @@ -2163,4 +2184,6 @@ config HYPERV_TESTING help Select this option to enable Hyper-V vmbus testing. +endmenu # "Kernel Testing and Coverage" + endmenu # Kernel hacking diff --git a/lib/Kconfig.kgdb b/lib/Kconfig.kgdb index bbe397df04a3..933680b59e2d 100644 --- a/lib/Kconfig.kgdb +++ b/lib/Kconfig.kgdb @@ -64,9 +64,9 @@ config KGDB_LOW_LEVEL_TRAP depends on X86 || MIPS default n help - This will add an extra call back to kgdb for the breakpoint - exception handler which will allow kgdb to step through a - notify handler. + This will add an extra call back to kgdb for the breakpoint + exception handler which will allow kgdb to step through a + notify handler. config KGDB_KDB bool "KGDB_KDB: include kdb frontend for kgdb" @@ -96,7 +96,7 @@ config KDB_DEFAULT_ENABLE The config option merely sets the default at boot time. Both issuing 'echo X > /sys/module/kdb/parameters/cmd_enable' or - setting with kdb.cmd_enable=X kernel command line option will + setting with kdb.cmd_enable=X kernel command line option will override the default settings. config KDB_KEYBOARD diff --git a/lib/Makefile b/lib/Makefile index c2f0e2a4e4e8..93217d44237f 100644 --- a/lib/Makefile +++ b/lib/Makefile @@ -109,7 +109,7 @@ obj-$(CONFIG_HAS_IOMEM) += iomap_copy.o devres.o obj-$(CONFIG_CHECK_SIGNATURE) += check_signature.o obj-$(CONFIG_DEBUG_LOCKING_API_SELFTESTS) += locking-selftest.o -obj-y += logic_pio.o +lib-y += logic_pio.o obj-$(CONFIG_GENERIC_HWEIGHT) += hweight.o diff --git a/lib/bitmap.c b/lib/bitmap.c index f9e834841e94..4250519d7d1c 100644 --- a/lib/bitmap.c +++ b/lib/bitmap.c @@ -222,6 +222,18 @@ int __bitmap_andnot(unsigned long *dst, const unsigned long *bitmap1, } EXPORT_SYMBOL(__bitmap_andnot); +void __bitmap_replace(unsigned long *dst, + const unsigned long *old, const unsigned long *new, + const unsigned long *mask, unsigned int nbits) +{ + unsigned int k; + unsigned int nr = BITS_TO_LONGS(nbits); + + for (k = 0; k < nr; k++) + dst[k] = (old[k] & ~mask[k]) | (new[k] & mask[k]); +} +EXPORT_SYMBOL(__bitmap_replace); + int __bitmap_intersects(const unsigned long *bitmap1, const unsigned long *bitmap2, unsigned int bits) { diff --git a/lib/find_bit.c b/lib/find_bit.c index 5c51eb45178a..e35a76b291e6 100644 --- a/lib/find_bit.c +++ b/lib/find_bit.c @@ -214,3 +214,17 @@ EXPORT_SYMBOL(find_next_bit_le); #endif #endif /* __BIG_ENDIAN */ + +unsigned long find_next_clump8(unsigned long *clump, const unsigned long *addr, + unsigned long size, unsigned long offset) +{ + offset = find_next_bit(addr, size, offset); + if (offset == size) + return size; + + offset = round_down(offset, 8); + *clump = bitmap_get_value8(addr, offset); + + return offset; +} +EXPORT_SYMBOL(find_next_clump8); diff --git a/lib/genalloc.c b/lib/genalloc.c index 24d20ca7e91b..7f1244b5294a 100644 --- a/lib/genalloc.c +++ b/lib/genalloc.c @@ -540,7 +540,7 @@ void gen_pool_for_each_chunk(struct gen_pool *pool, EXPORT_SYMBOL(gen_pool_for_each_chunk); /** - * addr_in_gen_pool - checks if an address falls within the range of a pool + * gen_pool_has_addr - checks if an address falls within the range of a pool * @pool: the generic memory pool * @start: start address * @size: size of the region @@ -548,7 +548,7 @@ EXPORT_SYMBOL(gen_pool_for_each_chunk); * Check if the range of addresses falls within the specified pool. Returns * true if the entire range is contained in the pool and false otherwise. */ -bool addr_in_gen_pool(struct gen_pool *pool, unsigned long start, +bool gen_pool_has_addr(struct gen_pool *pool, unsigned long start, size_t size) { bool found = false; @@ -567,6 +567,7 @@ bool addr_in_gen_pool(struct gen_pool *pool, unsigned long start, rcu_read_unlock(); return found; } +EXPORT_SYMBOL(gen_pool_has_addr); /** * gen_pool_avail - get available free space of the pool diff --git a/lib/iov_iter.c b/lib/iov_iter.c index fb29c02c6a3c..51595bf3af85 100644 --- a/lib/iov_iter.c +++ b/lib/iov_iter.c @@ -1222,11 +1222,12 @@ EXPORT_SYMBOL(iov_iter_discard); unsigned long iov_iter_alignment(const struct iov_iter *i) { - unsigned int p_mask = i->pipe->ring_size - 1; unsigned long res = 0; size_t size = i->count; if (unlikely(iov_iter_is_pipe(i))) { + unsigned int p_mask = i->pipe->ring_size - 1; + if (size && i->iov_offset && allocated(&i->pipe->bufs[i->head & p_mask])) return size | i->iov_offset; return size; diff --git a/lib/logic_pio.c b/lib/logic_pio.c index 905027574e5d..f511a99bb389 100644 --- a/lib/logic_pio.c +++ b/lib/logic_pio.c @@ -3,6 +3,7 @@ * Copyright (C) 2017 HiSilicon Limited, All Rights Reserved. * Author: Gabriele Paoloni <gabriele.paoloni@huawei.com> * Author: Zhichang Yuan <yuanzhichang@hisilicon.com> + * Author: John Garry <john.garry@huawei.com> */ #define pr_fmt(fmt) "LOGIC PIO: " fmt @@ -39,7 +40,8 @@ int logic_pio_register_range(struct logic_pio_hwaddr *new_range) resource_size_t iio_sz = MMIO_UPPER_LIMIT; int ret = 0; - if (!new_range || !new_range->fwnode || !new_range->size) + if (!new_range || !new_range->fwnode || !new_range->size || + (new_range->flags == LOGIC_PIO_INDIRECT && !new_range->ops)) return -EINVAL; start = new_range->hw_start; @@ -237,7 +239,7 @@ type logic_in##bw(unsigned long addr) \ } else if (addr >= MMIO_UPPER_LIMIT && addr < IO_SPACE_LIMIT) { \ struct logic_pio_hwaddr *entry = find_io_range(addr); \ \ - if (entry && entry->ops) \ + if (entry) \ ret = entry->ops->in(entry->hostdata, \ addr, sizeof(type)); \ else \ @@ -253,7 +255,7 @@ void logic_out##bw(type value, unsigned long addr) \ } else if (addr >= MMIO_UPPER_LIMIT && addr < IO_SPACE_LIMIT) { \ struct logic_pio_hwaddr *entry = find_io_range(addr); \ \ - if (entry && entry->ops) \ + if (entry) \ entry->ops->out(entry->hostdata, \ addr, value, sizeof(type)); \ else \ @@ -261,7 +263,7 @@ void logic_out##bw(type value, unsigned long addr) \ } \ } \ \ -void logic_ins##bw(unsigned long addr, void *buffer, \ +void logic_ins##bw(unsigned long addr, void *buffer, \ unsigned int count) \ { \ if (addr < MMIO_UPPER_LIMIT) { \ @@ -269,7 +271,7 @@ void logic_ins##bw(unsigned long addr, void *buffer, \ } else if (addr >= MMIO_UPPER_LIMIT && addr < IO_SPACE_LIMIT) { \ struct logic_pio_hwaddr *entry = find_io_range(addr); \ \ - if (entry && entry->ops) \ + if (entry) \ entry->ops->ins(entry->hostdata, \ addr, buffer, sizeof(type), count); \ else \ @@ -286,7 +288,7 @@ void logic_outs##bw(unsigned long addr, const void *buffer, \ } else if (addr >= MMIO_UPPER_LIMIT && addr < IO_SPACE_LIMIT) { \ struct logic_pio_hwaddr *entry = find_io_range(addr); \ \ - if (entry && entry->ops) \ + if (entry) \ entry->ops->outs(entry->hostdata, \ addr, buffer, sizeof(type), count); \ else \ diff --git a/lib/math/rational.c b/lib/math/rational.c index ba7443677c90..31fb27db2deb 100644 --- a/lib/math/rational.c +++ b/lib/math/rational.c @@ -3,6 +3,7 @@ * rational fractions * * Copyright (C) 2009 emlix GmbH, Oskar Schirmer <oskar@scara.com> + * Copyright (C) 2019 Trent Piepho <tpiepho@gmail.com> * * helper functions when coping with rational numbers */ @@ -10,6 +11,7 @@ #include <linux/rational.h> #include <linux/compiler.h> #include <linux/export.h> +#include <linux/kernel.h> /* * calculate best rational approximation for a given fraction @@ -33,30 +35,65 @@ void rational_best_approximation( unsigned long max_numerator, unsigned long max_denominator, unsigned long *best_numerator, unsigned long *best_denominator) { - unsigned long n, d, n0, d0, n1, d1; + /* n/d is the starting rational, which is continually + * decreased each iteration using the Euclidean algorithm. + * + * dp is the value of d from the prior iteration. + * + * n2/d2, n1/d1, and n0/d0 are our successively more accurate + * approximations of the rational. They are, respectively, + * the current, previous, and two prior iterations of it. + * + * a is current term of the continued fraction. + */ + unsigned long n, d, n0, d0, n1, d1, n2, d2; n = given_numerator; d = given_denominator; n0 = d1 = 0; n1 = d0 = 1; + for (;;) { - unsigned long t, a; - if ((n1 > max_numerator) || (d1 > max_denominator)) { - n1 = n0; - d1 = d0; - break; - } + unsigned long dp, a; + if (d == 0) break; - t = d; + /* Find next term in continued fraction, 'a', via + * Euclidean algorithm. + */ + dp = d; a = n / d; d = n % d; - n = t; - t = n0 + a * n1; + n = dp; + + /* Calculate the current rational approximation (aka + * convergent), n2/d2, using the term just found and + * the two prior approximations. + */ + n2 = n0 + a * n1; + d2 = d0 + a * d1; + + /* If the current convergent exceeds the maxes, then + * return either the previous convergent or the + * largest semi-convergent, the final term of which is + * found below as 't'. + */ + if ((n2 > max_numerator) || (d2 > max_denominator)) { + unsigned long t = min((max_numerator - n0) / n1, + (max_denominator - d0) / d1); + + /* This tests if the semi-convergent is closer + * than the previous convergent. + */ + if (2u * t > a || (2u * t == a && d0 * dp > d1 * d)) { + n1 = n0 + t * n1; + d1 = d0 + t * d1; + } + break; + } n0 = n1; - n1 = t; - t = d0 + a * d1; + n1 = n2; d0 = d1; - d1 = t; + d1 = d2; } *best_numerator = n1; *best_denominator = d1; diff --git a/lib/raid6/unroll.awk b/lib/raid6/unroll.awk index c6aa03631df8..0809805a7e23 100644 --- a/lib/raid6/unroll.awk +++ b/lib/raid6/unroll.awk @@ -13,7 +13,7 @@ BEGIN { for (i = 0; i < rep; ++i) { tmp = $0 gsub(/\$\$/, i, tmp) - gsub(/\$\#/, n, tmp) + gsub(/\$#/, n, tmp) gsub(/\$\*/, "$", tmp) print tmp } diff --git a/lib/sbitmap.c b/lib/sbitmap.c index 33feec8989f1..af88d1346dd7 100644 --- a/lib/sbitmap.c +++ b/lib/sbitmap.c @@ -650,8 +650,8 @@ void sbitmap_add_wait_queue(struct sbitmap_queue *sbq, if (!sbq_wait->sbq) { sbq_wait->sbq = sbq; atomic_inc(&sbq->ws_active); + add_wait_queue(&ws->wait, &sbq_wait->wait); } - add_wait_queue(&ws->wait, &sbq_wait->wait); } EXPORT_SYMBOL_GPL(sbitmap_add_wait_queue); diff --git a/lib/test_bitmap.c b/lib/test_bitmap.c index 51a98f7ee79e..e14a15ac250b 100644 --- a/lib/test_bitmap.c +++ b/lib/test_bitmap.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * Test cases for printf facility. + * Test cases for bitmap API. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt @@ -21,6 +21,39 @@ static unsigned failed_tests __initdata; static char pbl_buffer[PAGE_SIZE] __initdata; +static const unsigned long exp1[] __initconst = { + BITMAP_FROM_U64(1), + BITMAP_FROM_U64(2), + BITMAP_FROM_U64(0x0000ffff), + BITMAP_FROM_U64(0xffff0000), + BITMAP_FROM_U64(0x55555555), + BITMAP_FROM_U64(0xaaaaaaaa), + BITMAP_FROM_U64(0x11111111), + BITMAP_FROM_U64(0x22222222), + BITMAP_FROM_U64(0xffffffff), + BITMAP_FROM_U64(0xfffffffe), + BITMAP_FROM_U64(0x3333333311111111ULL), + BITMAP_FROM_U64(0xffffffff77777777ULL), + BITMAP_FROM_U64(0), +}; + +static const unsigned long exp2[] __initconst = { + BITMAP_FROM_U64(0x3333333311111111ULL), + BITMAP_FROM_U64(0xffffffff77777777ULL), +}; + +/* Fibonacci sequence */ +static const unsigned long exp2_to_exp3_mask[] __initconst = { + BITMAP_FROM_U64(0x008000020020212eULL), +}; +/* exp3_0_1 = (exp2[0] & ~exp2_to_exp3_mask) | (exp2[1] & exp2_to_exp3_mask) */ +static const unsigned long exp3_0_1[] __initconst = { + BITMAP_FROM_U64(0x33b3333311313137ULL), +}; +/* exp3_1_0 = (exp2[1] & ~exp2_to_exp3_mask) | (exp2[0] & exp2_to_exp3_mask) */ +static const unsigned long exp3_1_0[] __initconst = { + BITMAP_FROM_U64(0xff7fffff77575751ULL), +}; static bool __init __check_eq_uint(const char *srcfile, unsigned int line, @@ -92,6 +125,36 @@ __check_eq_u32_array(const char *srcfile, unsigned int line, return true; } +static bool __init __check_eq_clump8(const char *srcfile, unsigned int line, + const unsigned int offset, + const unsigned int size, + const unsigned char *const clump_exp, + const unsigned long *const clump) +{ + unsigned long exp; + + if (offset >= size) { + pr_warn("[%s:%u] bit offset for clump out-of-bounds: expected less than %u, got %u\n", + srcfile, line, size, offset); + return false; + } + + exp = clump_exp[offset / 8]; + if (!exp) { + pr_warn("[%s:%u] bit offset for zero clump: expected nonzero clump, got bit offset %u with clump value 0", + srcfile, line, offset); + return false; + } + + if (*clump != exp) { + pr_warn("[%s:%u] expected clump value of 0x%lX, got clump value of 0x%lX", + srcfile, line, exp, *clump); + return false; + } + + return true; +} + #define __expect_eq(suffix, ...) \ ({ \ int result = 0; \ @@ -108,6 +171,7 @@ __check_eq_u32_array(const char *srcfile, unsigned int line, #define expect_eq_bitmap(...) __expect_eq(bitmap, ##__VA_ARGS__) #define expect_eq_pbl(...) __expect_eq(pbl, ##__VA_ARGS__) #define expect_eq_u32_array(...) __expect_eq(u32_array, ##__VA_ARGS__) +#define expect_eq_clump8(...) __expect_eq(clump8, ##__VA_ARGS__) static void __init test_zero_clear(void) { @@ -206,6 +270,30 @@ static void __init test_copy(void) expect_eq_pbl("0-108,128-1023", bmap2, 1024); } +#define EXP2_IN_BITS (sizeof(exp2) * 8) + +static void __init test_replace(void) +{ + unsigned int nbits = 64; + DECLARE_BITMAP(bmap, 1024); + + bitmap_zero(bmap, 1024); + bitmap_replace(bmap, &exp2[0], &exp2[1], exp2_to_exp3_mask, nbits); + expect_eq_bitmap(bmap, exp3_0_1, nbits); + + bitmap_zero(bmap, 1024); + bitmap_replace(bmap, &exp2[1], &exp2[0], exp2_to_exp3_mask, nbits); + expect_eq_bitmap(bmap, exp3_1_0, nbits); + + bitmap_fill(bmap, 1024); + bitmap_replace(bmap, &exp2[0], &exp2[1], exp2_to_exp3_mask, nbits); + expect_eq_bitmap(bmap, exp3_0_1, nbits); + + bitmap_fill(bmap, 1024); + bitmap_replace(bmap, &exp2[1], &exp2[0], exp2_to_exp3_mask, nbits); + expect_eq_bitmap(bmap, exp3_1_0, nbits); +} + #define PARSE_TIME 0x1 struct test_bitmap_parselist{ @@ -216,53 +304,32 @@ struct test_bitmap_parselist{ const int flags; }; -static const unsigned long exp[] __initconst = { - BITMAP_FROM_U64(1), - BITMAP_FROM_U64(2), - BITMAP_FROM_U64(0x0000ffff), - BITMAP_FROM_U64(0xffff0000), - BITMAP_FROM_U64(0x55555555), - BITMAP_FROM_U64(0xaaaaaaaa), - BITMAP_FROM_U64(0x11111111), - BITMAP_FROM_U64(0x22222222), - BITMAP_FROM_U64(0xffffffff), - BITMAP_FROM_U64(0xfffffffe), - BITMAP_FROM_U64(0x3333333311111111ULL), - BITMAP_FROM_U64(0xffffffff77777777ULL), - BITMAP_FROM_U64(0), -}; - -static const unsigned long exp2[] __initconst = { - BITMAP_FROM_U64(0x3333333311111111ULL), - BITMAP_FROM_U64(0xffffffff77777777ULL) -}; - static const struct test_bitmap_parselist parselist_tests[] __initconst = { #define step (sizeof(u64) / sizeof(unsigned long)) - {0, "0", &exp[0], 8, 0}, - {0, "1", &exp[1 * step], 8, 0}, - {0, "0-15", &exp[2 * step], 32, 0}, - {0, "16-31", &exp[3 * step], 32, 0}, - {0, "0-31:1/2", &exp[4 * step], 32, 0}, - {0, "1-31:1/2", &exp[5 * step], 32, 0}, - {0, "0-31:1/4", &exp[6 * step], 32, 0}, - {0, "1-31:1/4", &exp[7 * step], 32, 0}, - {0, "0-31:4/4", &exp[8 * step], 32, 0}, - {0, "1-31:4/4", &exp[9 * step], 32, 0}, - {0, "0-31:1/4,32-63:2/4", &exp[10 * step], 64, 0}, - {0, "0-31:3/4,32-63:4/4", &exp[11 * step], 64, 0}, - {0, " ,, 0-31:3/4 ,, 32-63:4/4 ,, ", &exp[11 * step], 64, 0}, + {0, "0", &exp1[0], 8, 0}, + {0, "1", &exp1[1 * step], 8, 0}, + {0, "0-15", &exp1[2 * step], 32, 0}, + {0, "16-31", &exp1[3 * step], 32, 0}, + {0, "0-31:1/2", &exp1[4 * step], 32, 0}, + {0, "1-31:1/2", &exp1[5 * step], 32, 0}, + {0, "0-31:1/4", &exp1[6 * step], 32, 0}, + {0, "1-31:1/4", &exp1[7 * step], 32, 0}, + {0, "0-31:4/4", &exp1[8 * step], 32, 0}, + {0, "1-31:4/4", &exp1[9 * step], 32, 0}, + {0, "0-31:1/4,32-63:2/4", &exp1[10 * step], 64, 0}, + {0, "0-31:3/4,32-63:4/4", &exp1[11 * step], 64, 0}, + {0, " ,, 0-31:3/4 ,, 32-63:4/4 ,, ", &exp1[11 * step], 64, 0}, {0, "0-31:1/4,32-63:2/4,64-95:3/4,96-127:4/4", exp2, 128, 0}, {0, "0-2047:128/256", NULL, 2048, PARSE_TIME}, - {0, "", &exp[12 * step], 8, 0}, - {0, "\n", &exp[12 * step], 8, 0}, - {0, ",, ,, , , ,", &exp[12 * step], 8, 0}, - {0, " , ,, , , ", &exp[12 * step], 8, 0}, - {0, " , ,, , , \n", &exp[12 * step], 8, 0}, + {0, "", &exp1[12 * step], 8, 0}, + {0, "\n", &exp1[12 * step], 8, 0}, + {0, ",, ,, , , ,", &exp1[12 * step], 8, 0}, + {0, " , ,, , , ", &exp1[12 * step], 8, 0}, + {0, " , ,, , , \n", &exp1[12 * step], 8, 0}, {-EINVAL, "-1", NULL, 8, 0}, {-EINVAL, "-0", NULL, 8, 0}, @@ -280,6 +347,8 @@ static const struct test_bitmap_parselist parselist_tests[] __initconst = { {-EINVAL, "a-31:10/1", NULL, 8, 0}, {-EINVAL, "0-31:a/1", NULL, 8, 0}, {-EINVAL, "0-\n", NULL, 8, 0}, + +#undef step }; static void __init __test_bitmap_parselist(int is_user) @@ -299,7 +368,7 @@ static void __init __test_bitmap_parselist(int is_user) set_fs(KERNEL_DS); time = ktime_get(); - err = bitmap_parselist_user(ptest.in, len, + err = bitmap_parselist_user((__force const char __user *)ptest.in, len, bmap, ptest.nbits); time = ktime_get() - time; set_fs(orig_fs); @@ -326,6 +395,8 @@ static void __init __test_bitmap_parselist(int is_user) if (ptest.flags & PARSE_TIME) pr_err("parselist%s: %d: input is '%s' OK, Time: %llu\n", mode, i, ptest.in, time); + +#undef ptest } } @@ -339,20 +410,20 @@ static void __init test_bitmap_parselist_user(void) __test_bitmap_parselist(1); } -#define EXP_BYTES (sizeof(exp) * 8) +#define EXP1_IN_BITS (sizeof(exp1) * 8) static void __init test_bitmap_arr32(void) { unsigned int nbits, next_bit; - u32 arr[sizeof(exp) / 4]; - DECLARE_BITMAP(bmap2, EXP_BYTES); + u32 arr[EXP1_IN_BITS / 32]; + DECLARE_BITMAP(bmap2, EXP1_IN_BITS); memset(arr, 0xa5, sizeof(arr)); - for (nbits = 0; nbits < EXP_BYTES; ++nbits) { - bitmap_to_arr32(arr, exp, nbits); + for (nbits = 0; nbits < EXP1_IN_BITS; ++nbits) { + bitmap_to_arr32(arr, exp1, nbits); bitmap_from_arr32(bmap2, arr, nbits); - expect_eq_bitmap(bmap2, exp, nbits); + expect_eq_bitmap(bmap2, exp1, nbits); next_bit = find_next_bit(bmap2, round_up(nbits, BITS_PER_LONG), nbits); @@ -361,7 +432,7 @@ static void __init test_bitmap_arr32(void) " tail is not safely cleared: %d\n", nbits, next_bit); - if (nbits < EXP_BYTES - 32) + if (nbits < EXP1_IN_BITS - 32) expect_eq_uint(arr[DIV_ROUND_UP(nbits, 32)], 0xa5a5a5a5); } @@ -404,15 +475,50 @@ static void noinline __init test_mem_optimisations(void) } } +static const unsigned char clump_exp[] __initconst = { + 0x01, /* 1 bit set */ + 0x02, /* non-edge 1 bit set */ + 0x00, /* zero bits set */ + 0x38, /* 3 bits set across 4-bit boundary */ + 0x38, /* Repeated clump */ + 0x0F, /* 4 bits set */ + 0xFF, /* all bits set */ + 0x05, /* non-adjacent 2 bits set */ +}; + +static void __init test_for_each_set_clump8(void) +{ +#define CLUMP_EXP_NUMBITS 64 + DECLARE_BITMAP(bits, CLUMP_EXP_NUMBITS); + unsigned int start; + unsigned long clump; + + /* set bitmap to test case */ + bitmap_zero(bits, CLUMP_EXP_NUMBITS); + bitmap_set(bits, 0, 1); /* 0x01 */ + bitmap_set(bits, 9, 1); /* 0x02 */ + bitmap_set(bits, 27, 3); /* 0x28 */ + bitmap_set(bits, 35, 3); /* 0x28 */ + bitmap_set(bits, 40, 4); /* 0x0F */ + bitmap_set(bits, 48, 8); /* 0xFF */ + bitmap_set(bits, 56, 1); /* 0x05 - part 1 */ + bitmap_set(bits, 58, 1); /* 0x05 - part 2 */ + + for_each_set_clump8(start, clump, bits, CLUMP_EXP_NUMBITS) + expect_eq_clump8(start, CLUMP_EXP_NUMBITS, clump_exp, &clump); +} + static void __init selftest(void) { test_zero_clear(); test_fill_set(); test_copy(); + test_replace(); test_bitmap_arr32(); test_bitmap_parselist(); test_bitmap_parselist_user(); test_mem_optimisations(); + test_for_each_set_clump8(); } KSTM_MODULE_LOADERS(test_bitmap); diff --git a/lib/test_meminit.c b/lib/test_meminit.c index 9742e5cb853a..e4f706a404b3 100644 --- a/lib/test_meminit.c +++ b/lib/test_meminit.c @@ -183,6 +183,9 @@ static bool __init check_buf(void *buf, int size, bool want_ctor, return fail; } +#define BULK_SIZE 100 +static void *bulk_array[BULK_SIZE]; + /* * Test kmem_cache with given parameters: * want_ctor - use a constructor; @@ -203,9 +206,24 @@ static int __init do_kmem_cache_size(size_t size, bool want_ctor, want_rcu ? SLAB_TYPESAFE_BY_RCU : 0, want_ctor ? test_ctor : NULL); for (iter = 0; iter < 10; iter++) { + /* Do a test of bulk allocations */ + if (!want_rcu && !want_ctor) { + int ret; + + ret = kmem_cache_alloc_bulk(c, alloc_mask, BULK_SIZE, bulk_array); + if (!ret) { + fail = true; + } else { + int i; + for (i = 0; i < ret; i++) + fail |= check_buf(bulk_array[i], size, want_ctor, want_rcu, want_zero); + kmem_cache_free_bulk(c, ret, bulk_array); + } + } + buf = kmem_cache_alloc(c, alloc_mask); /* Check that buf is zeroed, if it must be. */ - fail = check_buf(buf, size, want_ctor, want_rcu, want_zero); + fail |= check_buf(buf, size, want_ctor, want_rcu, want_zero); fill_with_garbage_skip(buf, size, want_ctor ? CTOR_BYTES : 0); if (!want_rcu) { diff --git a/lib/ubsan.c b/lib/ubsan.c index fc552d524ef7..7b9b58aee72c 100644 --- a/lib/ubsan.c +++ b/lib/ubsan.c @@ -140,25 +140,21 @@ static void val_to_string(char *str, size_t size, struct type_descriptor *type, } } -static DEFINE_SPINLOCK(report_lock); - -static void ubsan_prologue(struct source_location *location, - unsigned long *flags) +static void ubsan_prologue(struct source_location *location) { current->in_ubsan++; - spin_lock_irqsave(&report_lock, *flags); pr_err("========================================" "========================================\n"); print_source_location("UBSAN: Undefined behaviour in", location); } -static void ubsan_epilogue(unsigned long *flags) +static void ubsan_epilogue(void) { dump_stack(); pr_err("========================================" "========================================\n"); - spin_unlock_irqrestore(&report_lock, *flags); + current->in_ubsan--; } @@ -167,14 +163,13 @@ static void handle_overflow(struct overflow_data *data, void *lhs, { struct type_descriptor *type = data->type; - unsigned long flags; char lhs_val_str[VALUE_LENGTH]; char rhs_val_str[VALUE_LENGTH]; if (suppress_report(&data->location)) return; - ubsan_prologue(&data->location, &flags); + ubsan_prologue(&data->location); val_to_string(lhs_val_str, sizeof(lhs_val_str), type, lhs); val_to_string(rhs_val_str, sizeof(rhs_val_str), type, rhs); @@ -186,7 +181,7 @@ static void handle_overflow(struct overflow_data *data, void *lhs, rhs_val_str, type->type_name); - ubsan_epilogue(&flags); + ubsan_epilogue(); } void __ubsan_handle_add_overflow(struct overflow_data *data, @@ -214,20 +209,19 @@ EXPORT_SYMBOL(__ubsan_handle_mul_overflow); void __ubsan_handle_negate_overflow(struct overflow_data *data, void *old_val) { - unsigned long flags; char old_val_str[VALUE_LENGTH]; if (suppress_report(&data->location)) return; - ubsan_prologue(&data->location, &flags); + ubsan_prologue(&data->location); val_to_string(old_val_str, sizeof(old_val_str), data->type, old_val); pr_err("negation of %s cannot be represented in type %s:\n", old_val_str, data->type->type_name); - ubsan_epilogue(&flags); + ubsan_epilogue(); } EXPORT_SYMBOL(__ubsan_handle_negate_overflow); @@ -235,13 +229,12 @@ EXPORT_SYMBOL(__ubsan_handle_negate_overflow); void __ubsan_handle_divrem_overflow(struct overflow_data *data, void *lhs, void *rhs) { - unsigned long flags; char rhs_val_str[VALUE_LENGTH]; if (suppress_report(&data->location)) return; - ubsan_prologue(&data->location, &flags); + ubsan_prologue(&data->location); val_to_string(rhs_val_str, sizeof(rhs_val_str), data->type, rhs); @@ -251,58 +244,52 @@ void __ubsan_handle_divrem_overflow(struct overflow_data *data, else pr_err("division by zero\n"); - ubsan_epilogue(&flags); + ubsan_epilogue(); } EXPORT_SYMBOL(__ubsan_handle_divrem_overflow); static void handle_null_ptr_deref(struct type_mismatch_data_common *data) { - unsigned long flags; - if (suppress_report(data->location)) return; - ubsan_prologue(data->location, &flags); + ubsan_prologue(data->location); pr_err("%s null pointer of type %s\n", type_check_kinds[data->type_check_kind], data->type->type_name); - ubsan_epilogue(&flags); + ubsan_epilogue(); } static void handle_misaligned_access(struct type_mismatch_data_common *data, unsigned long ptr) { - unsigned long flags; - if (suppress_report(data->location)) return; - ubsan_prologue(data->location, &flags); + ubsan_prologue(data->location); pr_err("%s misaligned address %p for type %s\n", type_check_kinds[data->type_check_kind], (void *)ptr, data->type->type_name); pr_err("which requires %ld byte alignment\n", data->alignment); - ubsan_epilogue(&flags); + ubsan_epilogue(); } static void handle_object_size_mismatch(struct type_mismatch_data_common *data, unsigned long ptr) { - unsigned long flags; - if (suppress_report(data->location)) return; - ubsan_prologue(data->location, &flags); + ubsan_prologue(data->location); pr_err("%s address %p with insufficient space\n", type_check_kinds[data->type_check_kind], (void *) ptr); pr_err("for an object of type %s\n", data->type->type_name); - ubsan_epilogue(&flags); + ubsan_epilogue(); } static void ubsan_type_mismatch_common(struct type_mismatch_data_common *data, @@ -351,25 +338,23 @@ EXPORT_SYMBOL(__ubsan_handle_type_mismatch_v1); void __ubsan_handle_out_of_bounds(struct out_of_bounds_data *data, void *index) { - unsigned long flags; char index_str[VALUE_LENGTH]; if (suppress_report(&data->location)) return; - ubsan_prologue(&data->location, &flags); + ubsan_prologue(&data->location); val_to_string(index_str, sizeof(index_str), data->index_type, index); pr_err("index %s is out of range for type %s\n", index_str, data->array_type->type_name); - ubsan_epilogue(&flags); + ubsan_epilogue(); } EXPORT_SYMBOL(__ubsan_handle_out_of_bounds); void __ubsan_handle_shift_out_of_bounds(struct shift_out_of_bounds_data *data, void *lhs, void *rhs) { - unsigned long flags; struct type_descriptor *rhs_type = data->rhs_type; struct type_descriptor *lhs_type = data->lhs_type; char rhs_str[VALUE_LENGTH]; @@ -379,7 +364,7 @@ void __ubsan_handle_shift_out_of_bounds(struct shift_out_of_bounds_data *data, if (suppress_report(&data->location)) goto out; - ubsan_prologue(&data->location, &flags); + ubsan_prologue(&data->location); val_to_string(rhs_str, sizeof(rhs_str), rhs_type, rhs); val_to_string(lhs_str, sizeof(lhs_str), lhs_type, lhs); @@ -402,7 +387,7 @@ void __ubsan_handle_shift_out_of_bounds(struct shift_out_of_bounds_data *data, lhs_str, rhs_str, lhs_type->type_name); - ubsan_epilogue(&flags); + ubsan_epilogue(); out: user_access_restore(ua_flags); } @@ -411,11 +396,9 @@ EXPORT_SYMBOL(__ubsan_handle_shift_out_of_bounds); void __ubsan_handle_builtin_unreachable(struct unreachable_data *data) { - unsigned long flags; - - ubsan_prologue(&data->location, &flags); + ubsan_prologue(&data->location); pr_err("calling __builtin_unreachable()\n"); - ubsan_epilogue(&flags); + ubsan_epilogue(); panic("can't return from __builtin_unreachable()"); } EXPORT_SYMBOL(__ubsan_handle_builtin_unreachable); @@ -423,19 +406,18 @@ EXPORT_SYMBOL(__ubsan_handle_builtin_unreachable); void __ubsan_handle_load_invalid_value(struct invalid_value_data *data, void *val) { - unsigned long flags; char val_str[VALUE_LENGTH]; if (suppress_report(&data->location)) return; - ubsan_prologue(&data->location, &flags); + ubsan_prologue(&data->location); val_to_string(val_str, sizeof(val_str), data->type, val); pr_err("load of value %s is not a valid value for type %s\n", val_str, data->type->type_name); - ubsan_epilogue(&flags); + ubsan_epilogue(); } EXPORT_SYMBOL(__ubsan_handle_load_invalid_value); |