diff options
Diffstat (limited to 'lib')
-rw-r--r-- | lib/Kconfig | 11 | ||||
-rw-r--r-- | lib/Kconfig.debug | 189 | ||||
-rw-r--r-- | lib/Kconfig.kgdb | 2 | ||||
-rw-r--r-- | lib/Makefile | 4 | ||||
-rw-r--r-- | lib/cmdline.c | 6 | ||||
-rw-r--r-- | lib/cpumask.c | 32 | ||||
-rw-r--r-- | lib/crc4.c | 46 | ||||
-rw-r--r-- | lib/dma-noop.c | 21 | ||||
-rw-r--r-- | lib/dma-virt.c | 12 | ||||
-rw-r--r-- | lib/flex_proportions.c | 6 | ||||
-rw-r--r-- | lib/iov_iter.c | 22 | ||||
-rw-r--r-- | lib/kobject_uevent.c | 167 | ||||
-rw-r--r-- | lib/libcrc32c.c | 6 | ||||
-rw-r--r-- | lib/locking-selftest-rtmutex.h | 11 | ||||
-rw-r--r-- | lib/locking-selftest.c | 133 | ||||
-rw-r--r-- | lib/nlattr.c | 11 | ||||
-rw-r--r-- | lib/nmi_backtrace.c | 3 | ||||
-rw-r--r-- | lib/percpu_counter.c | 4 | ||||
-rw-r--r-- | lib/refcount.c | 3 | ||||
-rw-r--r-- | lib/scatterlist.c | 35 | ||||
-rw-r--r-- | lib/smp_processor_id.c | 2 | ||||
-rw-r--r-- | lib/strnlen_user.c | 34 | ||||
-rw-r--r-- | lib/test_bpf.c | 97 | ||||
-rw-r--r-- | lib/test_uuid.c | 36 | ||||
-rw-r--r-- | lib/uuid.c | 33 | ||||
-rw-r--r-- | lib/vsprintf.c | 4 |
26 files changed, 584 insertions, 346 deletions
diff --git a/lib/Kconfig b/lib/Kconfig index 0c8b78a9ae2e..6762529ad9e4 100644 --- a/lib/Kconfig +++ b/lib/Kconfig @@ -158,6 +158,14 @@ config CRC32_BIT endchoice +config CRC4 + tristate "CRC4 functions" + help + This option is provided for the case where no in-kernel-tree + modules require CRC4 functions, but a module built outside + the kernel tree does. Such modules that use library CRC4 + functions require M here. + config CRC7 tristate "CRC7 functions" help @@ -548,6 +556,9 @@ config ARCH_HAS_SG_CHAIN config ARCH_HAS_PMEM_API bool +config ARCH_HAS_UACCESS_FLUSHCACHE + bool + config ARCH_HAS_MMIO_FLUSH bool diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index e4587ebe52c7..ca9460f049b8 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -286,7 +286,7 @@ config DEBUG_FS write to these files. For detailed documentation on the debugfs API, see - Documentation/DocBook/filesystems. + Documentation/filesystems/. If unsure, say N. @@ -1052,6 +1052,7 @@ config DEBUG_LOCK_ALLOC depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT select DEBUG_SPINLOCK select DEBUG_MUTEXES + select DEBUG_RT_MUTEXES if RT_MUTEXES select LOCKDEP help This feature will check whether any held lock (spinlock, rwlock, @@ -1067,6 +1068,7 @@ config PROVE_LOCKING select LOCKDEP select DEBUG_SPINLOCK select DEBUG_MUTEXES + select DEBUG_RT_MUTEXES if RT_MUTEXES select DEBUG_LOCK_ALLOC select TRACE_IRQFLAGS default n @@ -1121,6 +1123,7 @@ config LOCK_STAT select LOCKDEP select DEBUG_SPINLOCK select DEBUG_MUTEXES + select DEBUG_RT_MUTEXES if RT_MUTEXES select DEBUG_LOCK_ALLOC default n help @@ -1301,189 +1304,7 @@ config DEBUG_CREDENTIALS If unsure, say N. -menu "RCU Debugging" - -config PROVE_RCU - def_bool PROVE_LOCKING - -config PROVE_RCU_REPEATEDLY - bool "RCU debugging: don't disable PROVE_RCU on first splat" - depends on PROVE_RCU - default n - help - By itself, PROVE_RCU will disable checking upon issuing the - first warning (or "splat"). This feature prevents such - disabling, allowing multiple RCU-lockdep warnings to be printed - on a single reboot. - - Say Y to allow multiple RCU-lockdep warnings per boot. - - Say N if you are unsure. - -config SPARSE_RCU_POINTER - bool "RCU debugging: sparse-based checks for pointer usage" - default n - help - This feature enables the __rcu sparse annotation for - RCU-protected pointers. This annotation will cause sparse - to flag any non-RCU used of annotated pointers. This can be - helpful when debugging RCU usage. Please note that this feature - is not intended to enforce code cleanliness; it is instead merely - a debugging aid. - - Say Y to make sparse flag questionable use of RCU-protected pointers - - Say N if you are unsure. - -config TORTURE_TEST - tristate - default n - -config RCU_PERF_TEST - tristate "performance tests for RCU" - depends on DEBUG_KERNEL - select TORTURE_TEST - select SRCU - select TASKS_RCU - default n - help - This option provides a kernel module that runs performance - tests on the RCU infrastructure. The kernel module may be built - after the fact on the running kernel to be tested, if desired. - - Say Y here if you want RCU performance tests to be built into - the kernel. - Say M if you want the RCU performance tests to build as a module. - Say N if you are unsure. - -config RCU_TORTURE_TEST - tristate "torture tests for RCU" - depends on DEBUG_KERNEL - select TORTURE_TEST - select SRCU - select TASKS_RCU - default n - help - This option provides a kernel module that runs torture tests - on the RCU infrastructure. The kernel module may be built - after the fact on the running kernel to be tested, if desired. - - Say Y here if you want RCU torture tests to be built into - the kernel. - Say M if you want the RCU torture tests to build as a module. - Say N if you are unsure. - -config RCU_TORTURE_TEST_SLOW_PREINIT - bool "Slow down RCU grace-period pre-initialization to expose races" - depends on RCU_TORTURE_TEST - help - This option delays grace-period pre-initialization (the - propagation of CPU-hotplug changes up the rcu_node combining - tree) for a few jiffies between initializing each pair of - consecutive rcu_node structures. This helps to expose races - involving grace-period pre-initialization, in other words, it - makes your kernel less stable. It can also greatly increase - grace-period latency, especially on systems with large numbers - of CPUs. This is useful when torture-testing RCU, but in - almost no other circumstance. - - Say Y here if you want your system to crash and hang more often. - Say N if you want a sane system. - -config RCU_TORTURE_TEST_SLOW_PREINIT_DELAY - int "How much to slow down RCU grace-period pre-initialization" - range 0 5 - default 3 - depends on RCU_TORTURE_TEST_SLOW_PREINIT - help - This option specifies the number of jiffies to wait between - each rcu_node structure pre-initialization step. - -config RCU_TORTURE_TEST_SLOW_INIT - bool "Slow down RCU grace-period initialization to expose races" - depends on RCU_TORTURE_TEST - help - This option delays grace-period initialization for a few - jiffies between initializing each pair of consecutive - rcu_node structures. This helps to expose races involving - grace-period initialization, in other words, it makes your - kernel less stable. It can also greatly increase grace-period - latency, especially on systems with large numbers of CPUs. - This is useful when torture-testing RCU, but in almost no - other circumstance. - - Say Y here if you want your system to crash and hang more often. - Say N if you want a sane system. - -config RCU_TORTURE_TEST_SLOW_INIT_DELAY - int "How much to slow down RCU grace-period initialization" - range 0 5 - default 3 - depends on RCU_TORTURE_TEST_SLOW_INIT - help - This option specifies the number of jiffies to wait between - each rcu_node structure initialization. - -config RCU_TORTURE_TEST_SLOW_CLEANUP - bool "Slow down RCU grace-period cleanup to expose races" - depends on RCU_TORTURE_TEST - help - This option delays grace-period cleanup for a few jiffies - between cleaning up each pair of consecutive rcu_node - structures. This helps to expose races involving grace-period - cleanup, in other words, it makes your kernel less stable. - It can also greatly increase grace-period latency, especially - on systems with large numbers of CPUs. This is useful when - torture-testing RCU, but in almost no other circumstance. - - Say Y here if you want your system to crash and hang more often. - Say N if you want a sane system. - -config RCU_TORTURE_TEST_SLOW_CLEANUP_DELAY - int "How much to slow down RCU grace-period cleanup" - range 0 5 - default 3 - depends on RCU_TORTURE_TEST_SLOW_CLEANUP - help - This option specifies the number of jiffies to wait between - each rcu_node structure cleanup operation. - -config RCU_CPU_STALL_TIMEOUT - int "RCU CPU stall timeout in seconds" - depends on RCU_STALL_COMMON - range 3 300 - default 21 - help - If a given RCU grace period extends more than the specified - number of seconds, a CPU stall warning is printed. If the - RCU grace period persists, additional CPU stall warnings are - printed at more widely spaced intervals. - -config RCU_TRACE - bool "Enable tracing for RCU" - depends on DEBUG_KERNEL - default y if TREE_RCU - select TRACE_CLOCK - help - This option provides tracing in RCU which presents stats - in debugfs for debugging RCU implementation. It also enables - additional tracepoints for ftrace-style event tracing. - - Say Y here if you want to enable RCU tracing - Say N if you are unsure. - -config RCU_EQS_DEBUG - bool "Provide debugging asserts for adding NO_HZ support to an arch" - depends on DEBUG_KERNEL - help - This option provides consistency checks in RCU's handling of - NO_HZ. These checks have proven quite helpful in detecting - bugs in arch-specific NO_HZ code. - - Say N here if you need ultimate kernel/user switch latencies - Say Y if you are unsure - -endmenu # "RCU Debugging" +source "kernel/rcu/Kconfig.debug" config DEBUG_WQ_FORCE_RR_CPU bool "Force round-robin CPU selection for unbound work items" diff --git a/lib/Kconfig.kgdb b/lib/Kconfig.kgdb index 533f912638ed..ab4ff0eea776 100644 --- a/lib/Kconfig.kgdb +++ b/lib/Kconfig.kgdb @@ -13,7 +13,7 @@ menuconfig KGDB CONFIG_FRAME_POINTER to aid in producing more reliable stack backtraces in the external debugger. Documentation of kernel debugger is available at http://kgdb.sourceforge.net - as well as in DocBook form in Documentation/DocBook/. If + as well as in Documentation/dev-tools/kgdb.rst. If unsure, say N. if KGDB diff --git a/lib/Makefile b/lib/Makefile index 0166fbc0fa81..7fb6ab799b8e 100644 --- a/lib/Makefile +++ b/lib/Makefile @@ -25,9 +25,6 @@ lib-y := ctype.o string.o vsprintf.o cmdline.o \ earlycpio.o seq_buf.o siphash.o \ nmi_backtrace.o nodemask.o win_minmax.o -CFLAGS_radix-tree.o += -DCONFIG_SPARSE_RCU_POINTER -CFLAGS_idr.o += -DCONFIG_SPARSE_RCU_POINTER - lib-$(CONFIG_MMU) += ioremap.o lib-$(CONFIG_SMP) += cpumask.o lib-$(CONFIG_DMA_NOOP_OPS) += dma-noop.o @@ -99,6 +96,7 @@ obj-$(CONFIG_CRC_T10DIF)+= crc-t10dif.o obj-$(CONFIG_CRC_ITU_T) += crc-itu-t.o obj-$(CONFIG_CRC32) += crc32.o obj-$(CONFIG_CRC32_SELFTEST) += crc32test.o +obj-$(CONFIG_CRC4) += crc4.o obj-$(CONFIG_CRC7) += crc7.o obj-$(CONFIG_LIBCRC32C) += libcrc32c.o obj-$(CONFIG_CRC8) += crc8.o diff --git a/lib/cmdline.c b/lib/cmdline.c index 3c6432df7e63..4c0888c4a68d 100644 --- a/lib/cmdline.c +++ b/lib/cmdline.c @@ -23,14 +23,14 @@ * the values[M, M+1, ..., N] into the ints array in get_options. */ -static int get_range(char **str, int *pint) +static int get_range(char **str, int *pint, int n) { int x, inc_counter, upper_range; (*str)++; upper_range = simple_strtol((*str), NULL, 0); inc_counter = upper_range - *pint; - for (x = *pint; x < upper_range; x++) + for (x = *pint; n && x < upper_range; x++, n--) *pint++ = x; return inc_counter; } @@ -97,7 +97,7 @@ char *get_options(const char *str, int nints, int *ints) break; if (res == 3) { int range_nums; - range_nums = get_range((char **)&str, ints + i); + range_nums = get_range((char **)&str, ints + i, nints - i); if (range_nums < 0) break; /* diff --git a/lib/cpumask.c b/lib/cpumask.c index 81dedaab36cc..4731a0895760 100644 --- a/lib/cpumask.c +++ b/lib/cpumask.c @@ -43,6 +43,38 @@ int cpumask_any_but(const struct cpumask *mask, unsigned int cpu) } EXPORT_SYMBOL(cpumask_any_but); +/** + * cpumask_next_wrap - helper to implement for_each_cpu_wrap + * @n: the cpu prior to the place to search + * @mask: the cpumask pointer + * @start: the start point of the iteration + * @wrap: assume @n crossing @start terminates the iteration + * + * Returns >= nr_cpu_ids on completion + * + * Note: the @wrap argument is required for the start condition when + * we cannot assume @start is set in @mask. + */ +int cpumask_next_wrap(int n, const struct cpumask *mask, int start, bool wrap) +{ + int next; + +again: + next = cpumask_next(n, mask); + + if (wrap && n < start && next >= start) { + return nr_cpumask_bits; + + } else if (next >= nr_cpumask_bits) { + wrap = true; + n = -1; + goto again; + } + + return next; +} +EXPORT_SYMBOL(cpumask_next_wrap); + /* These are not inline because of header tangles. */ #ifdef CONFIG_CPUMASK_OFFSTACK /** diff --git a/lib/crc4.c b/lib/crc4.c new file mode 100644 index 000000000000..cf6db46661be --- /dev/null +++ b/lib/crc4.c @@ -0,0 +1,46 @@ +/* + * crc4.c - simple crc-4 calculations. + * + * This source code is licensed under the GNU General Public License, Version + * 2. See the file COPYING for more details. + */ + +#include <linux/crc4.h> +#include <linux/module.h> + +static const uint8_t crc4_tab[] = { + 0x0, 0x7, 0xe, 0x9, 0xb, 0xc, 0x5, 0x2, + 0x1, 0x6, 0xf, 0x8, 0xa, 0xd, 0x4, 0x3, +}; + +/** + * crc4 - calculate the 4-bit crc of a value. + * @crc: starting crc4 + * @x: value to checksum + * @bits: number of bits in @x to checksum + * + * Returns the crc4 value of @x, using polynomial 0b10111. + * + * The @x value is treated as left-aligned, and bits above @bits are ignored + * in the crc calculations. + */ +uint8_t crc4(uint8_t c, uint64_t x, int bits) +{ + int i; + + /* mask off anything above the top bit */ + x &= (1ull << bits) - 1; + + /* Align to 4-bits */ + bits = (bits + 3) & ~0x3; + + /* Calculate crc4 over four-bit nibbles, starting at the MSbit */ + for (i = bits - 4; i >= 0; i -= 4) + c = crc4_tab[c ^ ((x >> i) & 0xf)]; + + return c; +} +EXPORT_SYMBOL_GPL(crc4); + +MODULE_DESCRIPTION("CRC4 calculations"); +MODULE_LICENSE("GPL"); diff --git a/lib/dma-noop.c b/lib/dma-noop.c index de26c8b68f34..acc4190e2731 100644 --- a/lib/dma-noop.c +++ b/lib/dma-noop.c @@ -7,6 +7,7 @@ #include <linux/mm.h> #include <linux/dma-mapping.h> #include <linux/scatterlist.h> +#include <linux/pfn.h> static void *dma_noop_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t gfp, @@ -16,7 +17,8 @@ static void *dma_noop_alloc(struct device *dev, size_t size, ret = (void *)__get_free_pages(gfp, get_order(size)); if (ret) - *dma_handle = virt_to_phys(ret); + *dma_handle = virt_to_phys(ret) - PFN_PHYS(dev->dma_pfn_offset); + return ret; } @@ -32,7 +34,7 @@ static dma_addr_t dma_noop_map_page(struct device *dev, struct page *page, enum dma_data_direction dir, unsigned long attrs) { - return page_to_phys(page) + offset; + return page_to_phys(page) + offset - PFN_PHYS(dev->dma_pfn_offset); } static int dma_noop_map_sg(struct device *dev, struct scatterlist *sgl, int nents, @@ -43,34 +45,23 @@ static int dma_noop_map_sg(struct device *dev, struct scatterlist *sgl, int nent struct scatterlist *sg; for_each_sg(sgl, sg, nents, i) { + dma_addr_t offset = PFN_PHYS(dev->dma_pfn_offset); void *va; BUG_ON(!sg_page(sg)); va = sg_virt(sg); - sg_dma_address(sg) = (dma_addr_t)virt_to_phys(va); + sg_dma_address(sg) = (dma_addr_t)virt_to_phys(va) - offset; sg_dma_len(sg) = sg->length; } return nents; } -static int dma_noop_mapping_error(struct device *dev, dma_addr_t dma_addr) -{ - return 0; -} - -static int dma_noop_supported(struct device *dev, u64 mask) -{ - return 1; -} - const struct dma_map_ops dma_noop_ops = { .alloc = dma_noop_alloc, .free = dma_noop_free, .map_page = dma_noop_map_page, .map_sg = dma_noop_map_sg, - .mapping_error = dma_noop_mapping_error, - .dma_supported = dma_noop_supported, }; EXPORT_SYMBOL(dma_noop_ops); diff --git a/lib/dma-virt.c b/lib/dma-virt.c index dcd4df1f7174..5c4f11329721 100644 --- a/lib/dma-virt.c +++ b/lib/dma-virt.c @@ -51,22 +51,10 @@ static int dma_virt_map_sg(struct device *dev, struct scatterlist *sgl, return nents; } -static int dma_virt_mapping_error(struct device *dev, dma_addr_t dma_addr) -{ - return false; -} - -static int dma_virt_supported(struct device *dev, u64 mask) -{ - return true; -} - const struct dma_map_ops dma_virt_ops = { .alloc = dma_virt_alloc, .free = dma_virt_free, .map_page = dma_virt_map_page, .map_sg = dma_virt_map_sg, - .mapping_error = dma_virt_mapping_error, - .dma_supported = dma_virt_supported, }; EXPORT_SYMBOL(dma_virt_ops); diff --git a/lib/flex_proportions.c b/lib/flex_proportions.c index a71cf1bdd4c9..2cc1f94e03a1 100644 --- a/lib/flex_proportions.c +++ b/lib/flex_proportions.c @@ -207,7 +207,7 @@ static void fprop_reflect_period_percpu(struct fprop_global *p, if (val < (nr_cpu_ids * PROP_BATCH)) val = percpu_counter_sum(&pl->events); - __percpu_counter_add(&pl->events, + percpu_counter_add_batch(&pl->events, -val + (val >> (period-pl->period)), PROP_BATCH); } else percpu_counter_set(&pl->events, 0); @@ -219,7 +219,7 @@ static void fprop_reflect_period_percpu(struct fprop_global *p, void __fprop_inc_percpu(struct fprop_global *p, struct fprop_local_percpu *pl) { fprop_reflect_period_percpu(p, pl); - __percpu_counter_add(&pl->events, 1, PROP_BATCH); + percpu_counter_add_batch(&pl->events, 1, PROP_BATCH); percpu_counter_add(&p->events, 1); } @@ -267,6 +267,6 @@ void __fprop_inc_percpu_max(struct fprop_global *p, return; } else fprop_reflect_period_percpu(p, pl); - __percpu_counter_add(&pl->events, 1, PROP_BATCH); + percpu_counter_add_batch(&pl->events, 1, PROP_BATCH); percpu_counter_add(&p->events, 1); } diff --git a/lib/iov_iter.c b/lib/iov_iter.c index f835964c9485..c9a69064462f 100644 --- a/lib/iov_iter.c +++ b/lib/iov_iter.c @@ -615,6 +615,28 @@ size_t copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i) } EXPORT_SYMBOL(copy_from_iter_nocache); +#ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE +size_t copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i) +{ + char *to = addr; + if (unlikely(i->type & ITER_PIPE)) { + WARN_ON(1); + return 0; + } + iterate_and_advance(i, bytes, v, + __copy_from_user_flushcache((to += v.iov_len) - v.iov_len, + v.iov_base, v.iov_len), + memcpy_page_flushcache((to += v.bv_len) - v.bv_len, v.bv_page, + v.bv_offset, v.bv_len), + memcpy_flushcache((to += v.iov_len) - v.iov_len, v.iov_base, + v.iov_len) + ) + + return bytes; +} +EXPORT_SYMBOL_GPL(copy_from_iter_flushcache); +#endif + bool copy_from_iter_full_nocache(void *addr, size_t bytes, struct iov_iter *i) { char *to = addr; diff --git a/lib/kobject_uevent.c b/lib/kobject_uevent.c index 9a2b811966eb..719c155fce20 100644 --- a/lib/kobject_uevent.c +++ b/lib/kobject_uevent.c @@ -23,6 +23,8 @@ #include <linux/socket.h> #include <linux/skbuff.h> #include <linux/netlink.h> +#include <linux/uuid.h> +#include <linux/ctype.h> #include <net/sock.h> #include <net/net_namespace.h> @@ -52,19 +54,13 @@ static const char *kobject_actions[] = { [KOBJ_OFFLINE] = "offline", }; -/** - * kobject_action_type - translate action string to numeric type - * - * @buf: buffer containing the action string, newline is ignored - * @count: length of buffer - * @type: pointer to the location to store the action type - * - * Returns 0 if the action string was recognized. - */ -int kobject_action_type(const char *buf, size_t count, - enum kobject_action *type) +static int kobject_action_type(const char *buf, size_t count, + enum kobject_action *type, + const char **args) { enum kobject_action action; + size_t count_first; + const char *args_start; int ret = -EINVAL; if (count && (buf[count-1] == '\n' || buf[count-1] == '\0')) @@ -73,11 +69,20 @@ int kobject_action_type(const char *buf, size_t count, if (!count) goto out; + args_start = strnchr(buf, count, ' '); + if (args_start) { + count_first = args_start - buf; + args_start = args_start + 1; + } else + count_first = count; + for (action = 0; action < ARRAY_SIZE(kobject_actions); action++) { - if (strncmp(kobject_actions[action], buf, count) != 0) + if (strncmp(kobject_actions[action], buf, count_first) != 0) continue; - if (kobject_actions[action][count] != '\0') + if (kobject_actions[action][count_first] != '\0') continue; + if (args) + *args = args_start; *type = action; ret = 0; break; @@ -86,6 +91,142 @@ out: return ret; } +static const char *action_arg_word_end(const char *buf, const char *buf_end, + char delim) +{ + const char *next = buf; + + while (next <= buf_end && *next != delim) + if (!isalnum(*next++)) + return NULL; + + if (next == buf) + return NULL; + + return next; +} + +static int kobject_action_args(const char *buf, size_t count, + struct kobj_uevent_env **ret_env) +{ + struct kobj_uevent_env *env = NULL; + const char *next, *buf_end, *key; + int key_len; + int r = -EINVAL; + + if (count && (buf[count - 1] == '\n' || buf[count - 1] == '\0')) + count--; + + if (!count) + return -EINVAL; + + env = kzalloc(sizeof(*env), GFP_KERNEL); + if (!env) + return -ENOMEM; + + /* first arg is UUID */ + if (count < UUID_STRING_LEN || !uuid_is_valid(buf) || + add_uevent_var(env, "SYNTH_UUID=%.*s", UUID_STRING_LEN, buf)) + goto out; + + /* + * the rest are custom environment variables in KEY=VALUE + * format with ' ' delimiter between each KEY=VALUE pair + */ + next = buf + UUID_STRING_LEN; + buf_end = buf + count - 1; + + while (next <= buf_end) { + if (*next != ' ') + goto out; + + /* skip the ' ', key must follow */ + key = ++next; + if (key > buf_end) + goto out; + + buf = next; + next = action_arg_word_end(buf, buf_end, '='); + if (!next || next > buf_end || *next != '=') + goto out; + key_len = next - buf; + + /* skip the '=', value must follow */ + if (++next > buf_end) + goto out; + + buf = next; + next = action_arg_word_end(buf, buf_end, ' '); + if (!next) + goto out; + + if (add_uevent_var(env, "SYNTH_ARG_%.*s=%.*s", + key_len, key, (int) (next - buf), buf)) + goto out; + } + + r = 0; +out: + if (r) + kfree(env); + else + *ret_env = env; + return r; +} + +/** + * kobject_synth_uevent - send synthetic uevent with arguments + * + * @kobj: struct kobject for which synthetic uevent is to be generated + * @buf: buffer containing action type and action args, newline is ignored + * @count: length of buffer + * + * Returns 0 if kobject_synthetic_uevent() is completed with success or the + * corresponding error when it fails. + */ +int kobject_synth_uevent(struct kobject *kobj, const char *buf, size_t count) +{ + char *no_uuid_envp[] = { "SYNTH_UUID=0", NULL }; + enum kobject_action action; + const char *action_args; + struct kobj_uevent_env *env; + const char *msg = NULL, *devpath; + int r; + + r = kobject_action_type(buf, count, &action, &action_args); + if (r) { + msg = "unknown uevent action string\n"; + goto out; + } + + if (!action_args) { + r = kobject_uevent_env(kobj, action, no_uuid_envp); + goto out; + } + + r = kobject_action_args(action_args, + count - (action_args - buf), &env); + if (r == -EINVAL) { + msg = "incorrect uevent action arguments\n"; + goto out; + } + + if (r) + goto out; + + r = kobject_uevent_env(kobj, action, env->envp); + kfree(env); +out: + if (r) { + devpath = kobject_get_path(kobj, GFP_KERNEL); + printk(KERN_WARNING "synth uevent: %s: %s", + devpath ?: "unknown device", + msg ?: "failed to send uevent"); + kfree(devpath); + } + return r; +} + #ifdef CONFIG_NET static int kobj_bcast_filter(struct sock *dsk, struct sk_buff *skb, void *data) { diff --git a/lib/libcrc32c.c b/lib/libcrc32c.c index 74a54b7f2562..9f79547d1b97 100644 --- a/lib/libcrc32c.c +++ b/lib/libcrc32c.c @@ -43,7 +43,7 @@ static struct crypto_shash *tfm; u32 crc32c(u32 crc, const void *address, unsigned int length) { SHASH_DESC_ON_STACK(shash, tfm); - u32 *ctx = (u32 *)shash_desc_ctx(shash); + u32 ret, *ctx = (u32 *)shash_desc_ctx(shash); int err; shash->tfm = tfm; @@ -53,7 +53,9 @@ u32 crc32c(u32 crc, const void *address, unsigned int length) err = crypto_shash_update(shash, address, length); BUG_ON(err); - return *ctx; + ret = *ctx; + barrier_data(ctx); + return ret; } EXPORT_SYMBOL(crc32c); diff --git a/lib/locking-selftest-rtmutex.h b/lib/locking-selftest-rtmutex.h new file mode 100644 index 000000000000..e3cb83989d16 --- /dev/null +++ b/lib/locking-selftest-rtmutex.h @@ -0,0 +1,11 @@ +#undef LOCK +#define LOCK RTL + +#undef UNLOCK +#define UNLOCK RTU + +#undef RLOCK +#undef WLOCK + +#undef INIT +#define INIT RTI diff --git a/lib/locking-selftest.c b/lib/locking-selftest.c index f3a217ea0388..6f2b135dc5e8 100644 --- a/lib/locking-selftest.c +++ b/lib/locking-selftest.c @@ -21,6 +21,7 @@ #include <linux/interrupt.h> #include <linux/debug_locks.h> #include <linux/irqflags.h> +#include <linux/rtmutex.h> /* * Change this to 1 if you want to see the failure printouts: @@ -46,6 +47,7 @@ __setup("debug_locks_verbose=", setup_debug_locks_verbose); #define LOCKTYPE_MUTEX 0x4 #define LOCKTYPE_RWSEM 0x8 #define LOCKTYPE_WW 0x10 +#define LOCKTYPE_RTMUTEX 0x20 static struct ww_acquire_ctx t, t2; static struct ww_mutex o, o2, o3; @@ -74,6 +76,15 @@ static DECLARE_RWSEM(rwsem_B); static DECLARE_RWSEM(rwsem_C); static DECLARE_RWSEM(rwsem_D); +#ifdef CONFIG_RT_MUTEXES + +static DEFINE_RT_MUTEX(rtmutex_A); +static DEFINE_RT_MUTEX(rtmutex_B); +static DEFINE_RT_MUTEX(rtmutex_C); +static DEFINE_RT_MUTEX(rtmutex_D); + +#endif + /* * Locks that we initialize dynamically as well so that * e.g. X1 and X2 becomes two instances of the same class, @@ -108,6 +119,17 @@ static DECLARE_RWSEM(rwsem_Y2); static DECLARE_RWSEM(rwsem_Z1); static DECLARE_RWSEM(rwsem_Z2); +#ifdef CONFIG_RT_MUTEXES + +static DEFINE_RT_MUTEX(rtmutex_X1); +static DEFINE_RT_MUTEX(rtmutex_X2); +static DEFINE_RT_MUTEX(rtmutex_Y1); +static DEFINE_RT_MUTEX(rtmutex_Y2); +static DEFINE_RT_MUTEX(rtmutex_Z1); +static DEFINE_RT_MUTEX(rtmutex_Z2); + +#endif + /* * non-inlined runtime initializers, to let separate locks share * the same lock-class: @@ -129,6 +151,17 @@ INIT_CLASS_FUNC(Z) static void init_shared_classes(void) { +#ifdef CONFIG_RT_MUTEXES + static struct lock_class_key rt_X, rt_Y, rt_Z; + + __rt_mutex_init(&rtmutex_X1, __func__, &rt_X); + __rt_mutex_init(&rtmutex_X2, __func__, &rt_X); + __rt_mutex_init(&rtmutex_Y1, __func__, &rt_Y); + __rt_mutex_init(&rtmutex_Y2, __func__, &rt_Y); + __rt_mutex_init(&rtmutex_Z1, __func__, &rt_Z); + __rt_mutex_init(&rtmutex_Z2, __func__, &rt_Z); +#endif + init_class_X(&lock_X1, &rwlock_X1, &mutex_X1, &rwsem_X1); init_class_X(&lock_X2, &rwlock_X2, &mutex_X2, &rwsem_X2); @@ -193,6 +226,10 @@ static void init_shared_classes(void) #define MU(x) mutex_unlock(&mutex_##x) #define MI(x) mutex_init(&mutex_##x) +#define RTL(x) rt_mutex_lock(&rtmutex_##x) +#define RTU(x) rt_mutex_unlock(&rtmutex_##x) +#define RTI(x) rt_mutex_init(&rtmutex_##x) + #define WSL(x) down_write(&rwsem_##x) #define WSU(x) up_write(&rwsem_##x) @@ -264,6 +301,11 @@ GENERATE_TESTCASE(AA_wsem) #include "locking-selftest-rsem.h" GENERATE_TESTCASE(AA_rsem) +#ifdef CONFIG_RT_MUTEXES +#include "locking-selftest-rtmutex.h" +GENERATE_TESTCASE(AA_rtmutex); +#endif + #undef E /* @@ -345,6 +387,11 @@ GENERATE_TESTCASE(ABBA_wsem) #include "locking-selftest-rsem.h" GENERATE_TESTCASE(ABBA_rsem) +#ifdef CONFIG_RT_MUTEXES +#include "locking-selftest-rtmutex.h" +GENERATE_TESTCASE(ABBA_rtmutex); +#endif + #undef E /* @@ -373,6 +420,11 @@ GENERATE_TESTCASE(ABBCCA_wsem) #include "locking-selftest-rsem.h" GENERATE_TESTCASE(ABBCCA_rsem) +#ifdef CONFIG_RT_MUTEXES +#include "locking-selftest-rtmutex.h" +GENERATE_TESTCASE(ABBCCA_rtmutex); +#endif + #undef E /* @@ -401,6 +453,11 @@ GENERATE_TESTCASE(ABCABC_wsem) #include "locking-selftest-rsem.h" GENERATE_TESTCASE(ABCABC_rsem) +#ifdef CONFIG_RT_MUTEXES +#include "locking-selftest-rtmutex.h" +GENERATE_TESTCASE(ABCABC_rtmutex); +#endif + #undef E /* @@ -430,6 +487,11 @@ GENERATE_TESTCASE(ABBCCDDA_wsem) #include "locking-selftest-rsem.h" GENERATE_TESTCASE(ABBCCDDA_rsem) +#ifdef CONFIG_RT_MUTEXES +#include "locking-selftest-rtmutex.h" +GENERATE_TESTCASE(ABBCCDDA_rtmutex); +#endif + #undef E /* @@ -458,6 +520,11 @@ GENERATE_TESTCASE(ABCDBDDA_wsem) #include "locking-selftest-rsem.h" GENERATE_TESTCASE(ABCDBDDA_rsem) +#ifdef CONFIG_RT_MUTEXES +#include "locking-selftest-rtmutex.h" +GENERATE_TESTCASE(ABCDBDDA_rtmutex); +#endif + #undef E /* @@ -486,6 +553,11 @@ GENERATE_TESTCASE(ABCDBCDA_wsem) #include "locking-selftest-rsem.h" GENERATE_TESTCASE(ABCDBCDA_rsem) +#ifdef CONFIG_RT_MUTEXES +#include "locking-selftest-rtmutex.h" +GENERATE_TESTCASE(ABCDBCDA_rtmutex); +#endif + #undef E /* @@ -513,33 +585,10 @@ GENERATE_TESTCASE(double_unlock_wsem) #include "locking-selftest-rsem.h" GENERATE_TESTCASE(double_unlock_rsem) -#undef E - -/* - * Bad unlock ordering: - */ -#define E() \ - \ - LOCK(A); \ - LOCK(B); \ - UNLOCK(A); /* fail */ \ - UNLOCK(B); - -/* - * 6 testcases: - */ -#include "locking-selftest-spin.h" -GENERATE_TESTCASE(bad_unlock_order_spin) -#include "locking-selftest-wlock.h" -GENERATE_TESTCASE(bad_unlock_order_wlock) -#include "locking-selftest-rlock.h" -GENERATE_TESTCASE(bad_unlock_order_rlock) -#include "locking-selftest-mutex.h" -GENERATE_TESTCASE(bad_unlock_order_mutex) -#include "locking-selftest-wsem.h" -GENERATE_TESTCASE(bad_unlock_order_wsem) -#include "locking-selftest-rsem.h" -GENERATE_TESTCASE(bad_unlock_order_rsem) +#ifdef CONFIG_RT_MUTEXES +#include "locking-selftest-rtmutex.h" +GENERATE_TESTCASE(double_unlock_rtmutex); +#endif #undef E @@ -567,6 +616,11 @@ GENERATE_TESTCASE(init_held_wsem) #include "locking-selftest-rsem.h" GENERATE_TESTCASE(init_held_rsem) +#ifdef CONFIG_RT_MUTEXES +#include "locking-selftest-rtmutex.h" +GENERATE_TESTCASE(init_held_rtmutex); +#endif + #undef E /* @@ -916,6 +970,9 @@ GENERATE_PERMUTATIONS_3_EVENTS(irq_read_recursion_soft) # define I_MUTEX(x) lockdep_reset_lock(&mutex_##x.dep_map) # define I_RWSEM(x) lockdep_reset_lock(&rwsem_##x.dep_map) # define I_WW(x) lockdep_reset_lock(&x.dep_map) +#ifdef CONFIG_RT_MUTEXES +# define I_RTMUTEX(x) lockdep_reset_lock(&rtmutex_##x.dep_map) +#endif #else # define I_SPINLOCK(x) # define I_RWLOCK(x) @@ -924,12 +981,23 @@ GENERATE_PERMUTATIONS_3_EVENTS(irq_read_recursion_soft) # define I_WW(x) #endif +#ifndef I_RTMUTEX +# define I_RTMUTEX(x) +#endif + +#ifdef CONFIG_RT_MUTEXES +#define I2_RTMUTEX(x) rt_mutex_init(&rtmutex_##x) +#else +#define I2_RTMUTEX(x) +#endif + #define I1(x) \ do { \ I_SPINLOCK(x); \ I_RWLOCK(x); \ I_MUTEX(x); \ I_RWSEM(x); \ + I_RTMUTEX(x); \ } while (0) #define I2(x) \ @@ -938,6 +1006,7 @@ GENERATE_PERMUTATIONS_3_EVENTS(irq_read_recursion_soft) rwlock_init(&rwlock_##x); \ mutex_init(&mutex_##x); \ init_rwsem(&rwsem_##x); \ + I2_RTMUTEX(x); \ } while (0) static void reset_locks(void) @@ -1013,6 +1082,12 @@ static void dotest(void (*testcase_fn)(void), int expected, int lockclass_mask) reset_locks(); } +#ifdef CONFIG_RT_MUTEXES +#define dotest_rt(fn, e, m) dotest((fn), (e), (m)) +#else +#define dotest_rt(fn, e, m) +#endif + static inline void print_testname(const char *testname) { printk("%33s:", testname); @@ -1050,6 +1125,7 @@ static inline void print_testname(const char *testname) dotest(name##_mutex, FAILURE, LOCKTYPE_MUTEX); \ dotest(name##_wsem, FAILURE, LOCKTYPE_RWSEM); \ dotest(name##_rsem, FAILURE, LOCKTYPE_RWSEM); \ + dotest_rt(name##_rtmutex, FAILURE, LOCKTYPE_RTMUTEX); \ pr_cont("\n"); #define DO_TESTCASE_6_SUCCESS(desc, name) \ @@ -1060,6 +1136,7 @@ static inline void print_testname(const char *testname) dotest(name##_mutex, SUCCESS, LOCKTYPE_MUTEX); \ dotest(name##_wsem, SUCCESS, LOCKTYPE_RWSEM); \ dotest(name##_rsem, SUCCESS, LOCKTYPE_RWSEM); \ + dotest_rt(name##_rtmutex, SUCCESS, LOCKTYPE_RTMUTEX); \ pr_cont("\n"); /* @@ -1073,6 +1150,7 @@ static inline void print_testname(const char *testname) dotest(name##_mutex, FAILURE, LOCKTYPE_MUTEX); \ dotest(name##_wsem, FAILURE, LOCKTYPE_RWSEM); \ dotest(name##_rsem, FAILURE, LOCKTYPE_RWSEM); \ + dotest_rt(name##_rtmutex, FAILURE, LOCKTYPE_RTMUTEX); \ pr_cont("\n"); #define DO_TESTCASE_2I(desc, name, nr) \ @@ -1825,7 +1903,6 @@ void locking_selftest(void) DO_TESTCASE_6R("A-B-C-D-B-C-D-A deadlock", ABCDBCDA); DO_TESTCASE_6("double unlock", double_unlock); DO_TESTCASE_6("initialize held", init_held); - DO_TESTCASE_6_SUCCESS("bad unlock order", bad_unlock_order); printk(" --------------------------------------------------------------------------\n"); print_testname("recursive read-lock"); diff --git a/lib/nlattr.c b/lib/nlattr.c index a7e0b16078df..fb52435be42d 100644 --- a/lib/nlattr.c +++ b/lib/nlattr.c @@ -352,7 +352,7 @@ struct nlattr *__nla_reserve(struct sk_buff *skb, int attrtype, int attrlen) { struct nlattr *nla; - nla = (struct nlattr *) skb_put(skb, nla_total_size(attrlen)); + nla = skb_put(skb, nla_total_size(attrlen)); nla->nla_type = attrtype; nla->nla_len = nla_attr_size(attrlen); @@ -398,12 +398,7 @@ EXPORT_SYMBOL(__nla_reserve_64bit); */ void *__nla_reserve_nohdr(struct sk_buff *skb, int attrlen) { - void *start; - - start = skb_put(skb, NLA_ALIGN(attrlen)); - memset(start, 0, NLA_ALIGN(attrlen)); - - return start; + return skb_put_zero(skb, NLA_ALIGN(attrlen)); } EXPORT_SYMBOL(__nla_reserve_nohdr); @@ -617,7 +612,7 @@ int nla_append(struct sk_buff *skb, int attrlen, const void *data) if (unlikely(skb_tailroom(skb) < NLA_ALIGN(attrlen))) return -EMSGSIZE; - memcpy(skb_put(skb, attrlen), data, attrlen); + skb_put_data(skb, data, attrlen); return 0; } EXPORT_SYMBOL(nla_append); diff --git a/lib/nmi_backtrace.c b/lib/nmi_backtrace.c index 4e8a30d1c22f..0bc0a3535a8a 100644 --- a/lib/nmi_backtrace.c +++ b/lib/nmi_backtrace.c @@ -86,9 +86,11 @@ void nmi_trigger_cpumask_backtrace(const cpumask_t *mask, bool nmi_cpu_backtrace(struct pt_regs *regs) { + static arch_spinlock_t lock = __ARCH_SPIN_LOCK_UNLOCKED; int cpu = smp_processor_id(); if (cpumask_test_cpu(cpu, to_cpumask(backtrace_mask))) { + arch_spin_lock(&lock); if (regs && cpu_in_idle(instruction_pointer(regs))) { pr_warn("NMI backtrace for cpu %d skipped: idling at pc %#lx\n", cpu, instruction_pointer(regs)); @@ -99,6 +101,7 @@ bool nmi_cpu_backtrace(struct pt_regs *regs) else dump_stack(); } + arch_spin_unlock(&lock); cpumask_clear_cpu(cpu, to_cpumask(backtrace_mask)); return true; } diff --git a/lib/percpu_counter.c b/lib/percpu_counter.c index 9c21000df0b5..8ee7e5ec21be 100644 --- a/lib/percpu_counter.c +++ b/lib/percpu_counter.c @@ -72,7 +72,7 @@ void percpu_counter_set(struct percpu_counter *fbc, s64 amount) } EXPORT_SYMBOL(percpu_counter_set); -void __percpu_counter_add(struct percpu_counter *fbc, s64 amount, s32 batch) +void percpu_counter_add_batch(struct percpu_counter *fbc, s64 amount, s32 batch) { s64 count; @@ -89,7 +89,7 @@ void __percpu_counter_add(struct percpu_counter *fbc, s64 amount, s32 batch) } preempt_enable(); } -EXPORT_SYMBOL(__percpu_counter_add); +EXPORT_SYMBOL(percpu_counter_add_batch); /* * Add up all the per-cpu counts, return the result. This is a more accurate diff --git a/lib/refcount.c b/lib/refcount.c index 9f906783987e..5d0582a9480c 100644 --- a/lib/refcount.c +++ b/lib/refcount.c @@ -37,6 +37,8 @@ #include <linux/refcount.h> #include <linux/bug.h> +#ifdef CONFIG_REFCOUNT_FULL + /** * refcount_add_not_zero - add a value to a refcount unless it is 0 * @i: the value to add to the refcount @@ -225,6 +227,7 @@ void refcount_dec(refcount_t *r) WARN_ONCE(refcount_dec_and_test(r), "refcount_t: decrement hit 0; leaking memory.\n"); } EXPORT_SYMBOL(refcount_dec); +#endif /* CONFIG_REFCOUNT_FULL */ /** * refcount_dec_if_one - decrement a refcount if it is 1 diff --git a/lib/scatterlist.c b/lib/scatterlist.c index c6cf82242d65..be7b4dd6b68d 100644 --- a/lib/scatterlist.c +++ b/lib/scatterlist.c @@ -751,3 +751,38 @@ size_t sg_pcopy_to_buffer(struct scatterlist *sgl, unsigned int nents, return sg_copy_buffer(sgl, nents, buf, buflen, skip, true); } EXPORT_SYMBOL(sg_pcopy_to_buffer); + +/** + * sg_zero_buffer - Zero-out a part of a SG list + * @sgl: The SG list + * @nents: Number of SG entries + * @buflen: The number of bytes to zero out + * @skip: Number of bytes to skip before zeroing + * + * Returns the number of bytes zeroed. + **/ +size_t sg_zero_buffer(struct scatterlist *sgl, unsigned int nents, + size_t buflen, off_t skip) +{ + unsigned int offset = 0; + struct sg_mapping_iter miter; + unsigned int sg_flags = SG_MITER_ATOMIC | SG_MITER_TO_SG; + + sg_miter_start(&miter, sgl, nents, sg_flags); + + if (!sg_miter_skip(&miter, skip)) + return false; + + while (offset < buflen && sg_miter_next(&miter)) { + unsigned int len; + + len = min(miter.length, buflen - offset); + memset(miter.addr, 0, len); + + offset += len; + } + + sg_miter_stop(&miter); + return offset; +} +EXPORT_SYMBOL(sg_zero_buffer); diff --git a/lib/smp_processor_id.c b/lib/smp_processor_id.c index 690d75b132fa..2fb007be0212 100644 --- a/lib/smp_processor_id.c +++ b/lib/smp_processor_id.c @@ -28,7 +28,7 @@ notrace static unsigned int check_preemption_disabled(const char *what1, /* * It is valid to assume CPU-locality during early bootup: */ - if (system_state != SYSTEM_RUNNING) + if (system_state < SYSTEM_SCHEDULING) goto out; /* diff --git a/lib/strnlen_user.c b/lib/strnlen_user.c index 8e105ed4df12..a5f567747ced 100644 --- a/lib/strnlen_user.c +++ b/lib/strnlen_user.c @@ -121,37 +121,3 @@ long strnlen_user(const char __user *str, long count) return 0; } EXPORT_SYMBOL(strnlen_user); - -/** - * strlen_user: - Get the size of a user string INCLUDING final NUL. - * @str: The string to measure. - * - * Context: User context only. This function may sleep if pagefaults are - * enabled. - * - * Get the size of a NUL-terminated string in user space. - * - * Returns the size of the string INCLUDING the terminating NUL. - * On exception, returns 0. - * - * If there is a limit on the length of a valid string, you may wish to - * consider using strnlen_user() instead. - */ -long strlen_user(const char __user *str) -{ - unsigned long max_addr, src_addr; - - max_addr = user_addr_max(); - src_addr = (unsigned long)str; - if (likely(src_addr < max_addr)) { - unsigned long max = max_addr - src_addr; - long retval; - - user_access_begin(); - retval = do_strnlen_user(str, ~0ul, max); - user_access_end(); - return retval; - } - return 0; -} -EXPORT_SYMBOL(strlen_user); diff --git a/lib/test_bpf.c b/lib/test_bpf.c index 889bc31785be..d9d5a410955c 100644 --- a/lib/test_bpf.c +++ b/lib/test_bpf.c @@ -84,6 +84,7 @@ struct bpf_test { } test[MAX_SUBTESTS]; int (*fill_helper)(struct bpf_test *self); __u8 frag_data[MAX_DATA]; + int stack_depth; /* for eBPF only, since tests don't call verifier */ }; /* Large test cases need separate allocation and fill handler. */ @@ -434,6 +435,30 @@ loop: return 0; } +static int bpf_fill_jump_around_ld_abs(struct bpf_test *self) +{ + unsigned int len = BPF_MAXINSNS; + struct bpf_insn *insn; + int i = 0; + + insn = kmalloc_array(len, sizeof(*insn), GFP_KERNEL); + if (!insn) + return -ENOMEM; + + insn[i++] = BPF_MOV64_REG(R6, R1); + insn[i++] = BPF_LD_ABS(BPF_B, 0); + insn[i] = BPF_JMP_IMM(BPF_JEQ, R0, 10, len - i - 2); + i++; + while (i < len - 1) + insn[i++] = BPF_LD_ABS(BPF_B, 1); + insn[i] = BPF_EXIT_INSN(); + + self->u.ptr.insns = insn; + self->u.ptr.len = len; + + return 0; +} + static int __bpf_fill_stxdw(struct bpf_test *self, int size) { unsigned int len = BPF_MAXINSNS; @@ -455,6 +480,7 @@ static int __bpf_fill_stxdw(struct bpf_test *self, int size) self->u.ptr.insns = insn; self->u.ptr.len = len; + self->stack_depth = 40; return 0; } @@ -2317,7 +2343,8 @@ static struct bpf_test tests[] = { { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0x06, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x10, 0xbf, 0x48, 0xd6, 0x43, 0xd6}, - { { 38, 256 } } + { { 38, 256 } }, + .stack_depth = 64, }, /* BPF_ALU | BPF_MOV | BPF_X */ { @@ -4169,6 +4196,7 @@ static struct bpf_test tests[] = { INTERNAL, { }, { { 0, 0xff } }, + .stack_depth = 40, }, { "ST_MEM_B: Store/Load byte: max positive", @@ -4181,6 +4209,7 @@ static struct bpf_test tests[] = { INTERNAL, { }, { { 0, 0x7f } }, + .stack_depth = 40, }, { "STX_MEM_B: Store/Load byte: max negative", @@ -4194,6 +4223,7 @@ static struct bpf_test tests[] = { INTERNAL, { }, { { 0, 0xff } }, + .stack_depth = 40, }, { "ST_MEM_H: Store/Load half word: max negative", @@ -4206,6 +4236,7 @@ static struct bpf_test tests[] = { INTERNAL, { }, { { 0, 0xffff } }, + .stack_depth = 40, }, { "ST_MEM_H: Store/Load half word: max positive", @@ -4218,6 +4249,7 @@ static struct bpf_test tests[] = { INTERNAL, { }, { { 0, 0x7fff } }, + .stack_depth = 40, }, { "STX_MEM_H: Store/Load half word: max negative", @@ -4231,6 +4263,7 @@ static struct bpf_test tests[] = { INTERNAL, { }, { { 0, 0xffff } }, + .stack_depth = 40, }, { "ST_MEM_W: Store/Load word: max negative", @@ -4243,6 +4276,7 @@ static struct bpf_test tests[] = { INTERNAL, { }, { { 0, 0xffffffff } }, + .stack_depth = 40, }, { "ST_MEM_W: Store/Load word: max positive", @@ -4255,6 +4289,7 @@ static struct bpf_test tests[] = { INTERNAL, { }, { { 0, 0x7fffffff } }, + .stack_depth = 40, }, { "STX_MEM_W: Store/Load word: max negative", @@ -4268,6 +4303,7 @@ static struct bpf_test tests[] = { INTERNAL, { }, { { 0, 0xffffffff } }, + .stack_depth = 40, }, { "ST_MEM_DW: Store/Load double word: max negative", @@ -4280,6 +4316,7 @@ static struct bpf_test tests[] = { INTERNAL, { }, { { 0, 0xffffffff } }, + .stack_depth = 40, }, { "ST_MEM_DW: Store/Load double word: max negative 2", @@ -4297,6 +4334,7 @@ static struct bpf_test tests[] = { INTERNAL, { }, { { 0, 0x1 } }, + .stack_depth = 40, }, { "ST_MEM_DW: Store/Load double word: max positive", @@ -4309,6 +4347,7 @@ static struct bpf_test tests[] = { INTERNAL, { }, { { 0, 0x7fffffff } }, + .stack_depth = 40, }, { "STX_MEM_DW: Store/Load double word: max negative", @@ -4322,6 +4361,7 @@ static struct bpf_test tests[] = { INTERNAL, { }, { { 0, 0xffffffff } }, + .stack_depth = 40, }, /* BPF_STX | BPF_XADD | BPF_W/DW */ { @@ -4336,6 +4376,7 @@ static struct bpf_test tests[] = { INTERNAL, { }, { { 0, 0x22 } }, + .stack_depth = 40, }, { "STX_XADD_W: Test side-effects, r10: 0x12 + 0x10 = 0x22", @@ -4351,6 +4392,7 @@ static struct bpf_test tests[] = { INTERNAL, { }, { { 0, 0 } }, + .stack_depth = 40, }, { "STX_XADD_W: Test side-effects, r0: 0x12 + 0x10 = 0x22", @@ -4363,6 +4405,7 @@ static struct bpf_test tests[] = { INTERNAL, { }, { { 0, 0x12 } }, + .stack_depth = 40, }, { "STX_XADD_W: X + 1 + 1 + 1 + ...", @@ -4384,6 +4427,7 @@ static struct bpf_test tests[] = { INTERNAL, { }, { { 0, 0x22 } }, + .stack_depth = 40, }, { "STX_XADD_DW: Test side-effects, r10: 0x12 + 0x10 = 0x22", @@ -4399,6 +4443,7 @@ static struct bpf_test tests[] = { INTERNAL, { }, { { 0, 0 } }, + .stack_depth = 40, }, { "STX_XADD_DW: Test side-effects, r0: 0x12 + 0x10 = 0x22", @@ -4411,6 +4456,7 @@ static struct bpf_test tests[] = { INTERNAL, { }, { { 0, 0x12 } }, + .stack_depth = 40, }, { "STX_XADD_DW: X + 1 + 1 + 1 + ...", @@ -4504,6 +4550,44 @@ static struct bpf_test tests[] = { { }, { { 0, 1 } }, }, + { + "JMP_JSGE_K: Signed jump: value walk 1", + .u.insns_int = { + BPF_ALU32_IMM(BPF_MOV, R0, 0), + BPF_LD_IMM64(R1, -3), + BPF_JMP_IMM(BPF_JSGE, R1, 0, 6), + BPF_ALU64_IMM(BPF_ADD, R1, 1), + BPF_JMP_IMM(BPF_JSGE, R1, 0, 4), + BPF_ALU64_IMM(BPF_ADD, R1, 1), + BPF_JMP_IMM(BPF_JSGE, R1, 0, 2), + BPF_ALU64_IMM(BPF_ADD, R1, 1), + BPF_JMP_IMM(BPF_JSGE, R1, 0, 1), + BPF_EXIT_INSN(), /* bad exit */ + BPF_ALU32_IMM(BPF_MOV, R0, 1), /* good exit */ + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 1 } }, + }, + { + "JMP_JSGE_K: Signed jump: value walk 2", + .u.insns_int = { + BPF_ALU32_IMM(BPF_MOV, R0, 0), + BPF_LD_IMM64(R1, -3), + BPF_JMP_IMM(BPF_JSGE, R1, 0, 4), + BPF_ALU64_IMM(BPF_ADD, R1, 2), + BPF_JMP_IMM(BPF_JSGE, R1, 0, 2), + BPF_ALU64_IMM(BPF_ADD, R1, 2), + BPF_JMP_IMM(BPF_JSGE, R1, 0, 1), + BPF_EXIT_INSN(), /* bad exit */ + BPF_ALU32_IMM(BPF_MOV, R0, 1), /* good exit */ + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 1 } }, + }, /* BPF_JMP | BPF_JGT | BPF_K */ { "JMP_JGT_K: if (3 > 2) return 1", @@ -4984,6 +5068,14 @@ static struct bpf_test tests[] = { { { ETH_HLEN, 0xbef } }, .fill_helper = bpf_fill_ld_abs_vlan_push_pop, }, + { + "BPF_MAXINSNS: jump around ld_abs", + { }, + INTERNAL, + { 10, 11 }, + { { 2, 10 } }, + .fill_helper = bpf_fill_jump_around_ld_abs, + }, /* * LD_IND / LD_ABS on fragmented SKBs */ @@ -5625,7 +5717,7 @@ static struct sk_buff *populate_skb(char *buf, int size) if (!skb) return NULL; - memcpy(__skb_put(skb, size), buf, size); + __skb_put_data(skb, buf, size); /* Initialize a fake skb with test pattern. */ skb_reset_mac_header(skb); @@ -5771,6 +5863,7 @@ static struct bpf_prog *generate_filter(int which, int *err) /* Type doesn't really matter here as long as it's not unspec. */ fp->type = BPF_PROG_TYPE_SOCKET_FILTER; memcpy(fp->insnsi, fptr, fp->len * sizeof(struct bpf_insn)); + fp->aux->stack_depth = tests[which].stack_depth; /* We cannot error here as we don't need type compatibility * checks. diff --git a/lib/test_uuid.c b/lib/test_uuid.c index 547d3127a3cf..478c049630b5 100644 --- a/lib/test_uuid.c +++ b/lib/test_uuid.c @@ -11,25 +11,25 @@ struct test_uuid_data { const char *uuid; - uuid_le le; - uuid_be be; + guid_t le; + uuid_t be; }; static const struct test_uuid_data test_uuid_test_data[] = { { .uuid = "c33f4995-3701-450e-9fbf-206a2e98e576", - .le = UUID_LE(0xc33f4995, 0x3701, 0x450e, 0x9f, 0xbf, 0x20, 0x6a, 0x2e, 0x98, 0xe5, 0x76), - .be = UUID_BE(0xc33f4995, 0x3701, 0x450e, 0x9f, 0xbf, 0x20, 0x6a, 0x2e, 0x98, 0xe5, 0x76), + .le = GUID_INIT(0xc33f4995, 0x3701, 0x450e, 0x9f, 0xbf, 0x20, 0x6a, 0x2e, 0x98, 0xe5, 0x76), + .be = UUID_INIT(0xc33f4995, 0x3701, 0x450e, 0x9f, 0xbf, 0x20, 0x6a, 0x2e, 0x98, 0xe5, 0x76), }, { .uuid = "64b4371c-77c1-48f9-8221-29f054fc023b", - .le = UUID_LE(0x64b4371c, 0x77c1, 0x48f9, 0x82, 0x21, 0x29, 0xf0, 0x54, 0xfc, 0x02, 0x3b), - .be = UUID_BE(0x64b4371c, 0x77c1, 0x48f9, 0x82, 0x21, 0x29, 0xf0, 0x54, 0xfc, 0x02, 0x3b), + .le = GUID_INIT(0x64b4371c, 0x77c1, 0x48f9, 0x82, 0x21, 0x29, 0xf0, 0x54, 0xfc, 0x02, 0x3b), + .be = UUID_INIT(0x64b4371c, 0x77c1, 0x48f9, 0x82, 0x21, 0x29, 0xf0, 0x54, 0xfc, 0x02, 0x3b), }, { .uuid = "0cb4ddff-a545-4401-9d06-688af53e7f84", - .le = UUID_LE(0x0cb4ddff, 0xa545, 0x4401, 0x9d, 0x06, 0x68, 0x8a, 0xf5, 0x3e, 0x7f, 0x84), - .be = UUID_BE(0x0cb4ddff, 0xa545, 0x4401, 0x9d, 0x06, 0x68, 0x8a, 0xf5, 0x3e, 0x7f, 0x84), + .le = GUID_INIT(0x0cb4ddff, 0xa545, 0x4401, 0x9d, 0x06, 0x68, 0x8a, 0xf5, 0x3e, 0x7f, 0x84), + .be = UUID_INIT(0x0cb4ddff, 0xa545, 0x4401, 0x9d, 0x06, 0x68, 0x8a, 0xf5, 0x3e, 0x7f, 0x84), }, }; @@ -61,28 +61,28 @@ static void __init test_uuid_failed(const char *prefix, bool wrong, bool be, static void __init test_uuid_test(const struct test_uuid_data *data) { - uuid_le le; - uuid_be be; + guid_t le; + uuid_t be; char buf[48]; /* LE */ total_tests++; - if (uuid_le_to_bin(data->uuid, &le)) + if (guid_parse(data->uuid, &le)) test_uuid_failed("conversion", false, false, data->uuid, NULL); total_tests++; - if (uuid_le_cmp(data->le, le)) { + if (!guid_equal(&data->le, &le)) { sprintf(buf, "%pUl", &le); test_uuid_failed("cmp", false, false, data->uuid, buf); } /* BE */ total_tests++; - if (uuid_be_to_bin(data->uuid, &be)) + if (uuid_parse(data->uuid, &be)) test_uuid_failed("conversion", false, true, data->uuid, NULL); total_tests++; - if (uuid_be_cmp(data->be, be)) { + if (uuid_equal(&data->be, &be)) { sprintf(buf, "%pUb", &be); test_uuid_failed("cmp", false, true, data->uuid, buf); } @@ -90,17 +90,17 @@ static void __init test_uuid_test(const struct test_uuid_data *data) static void __init test_uuid_wrong(const char *data) { - uuid_le le; - uuid_be be; + guid_t le; + uuid_t be; /* LE */ total_tests++; - if (!uuid_le_to_bin(data, &le)) + if (!guid_parse(data, &le)) test_uuid_failed("negative", true, false, data, NULL); /* BE */ total_tests++; - if (!uuid_be_to_bin(data, &be)) + if (!uuid_parse(data, &be)) test_uuid_failed("negative", true, true, data, NULL); } diff --git a/lib/uuid.c b/lib/uuid.c index 37687af77ff8..680b9fb9ba09 100644 --- a/lib/uuid.c +++ b/lib/uuid.c @@ -21,10 +21,13 @@ #include <linux/uuid.h> #include <linux/random.h> -const u8 uuid_le_index[16] = {3,2,1,0,5,4,7,6,8,9,10,11,12,13,14,15}; -EXPORT_SYMBOL(uuid_le_index); -const u8 uuid_be_index[16] = {0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15}; -EXPORT_SYMBOL(uuid_be_index); +const guid_t guid_null; +EXPORT_SYMBOL(guid_null); +const uuid_t uuid_null; +EXPORT_SYMBOL(uuid_null); + +const u8 guid_index[16] = {3,2,1,0,5,4,7,6,8,9,10,11,12,13,14,15}; +const u8 uuid_index[16] = {0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15}; /*************************************************************** * Random UUID interface @@ -53,21 +56,21 @@ static void __uuid_gen_common(__u8 b[16]) b[8] = (b[8] & 0x3F) | 0x80; } -void uuid_le_gen(uuid_le *lu) +void guid_gen(guid_t *lu) { __uuid_gen_common(lu->b); /* version 4 : random generation */ lu->b[7] = (lu->b[7] & 0x0F) | 0x40; } -EXPORT_SYMBOL_GPL(uuid_le_gen); +EXPORT_SYMBOL_GPL(guid_gen); -void uuid_be_gen(uuid_be *bu) +void uuid_gen(uuid_t *bu) { __uuid_gen_common(bu->b); /* version 4 : random generation */ bu->b[6] = (bu->b[6] & 0x0F) | 0x40; } -EXPORT_SYMBOL_GPL(uuid_be_gen); +EXPORT_SYMBOL_GPL(uuid_gen); /** * uuid_is_valid - checks if UUID string valid @@ -97,7 +100,7 @@ bool uuid_is_valid(const char *uuid) } EXPORT_SYMBOL(uuid_is_valid); -static int __uuid_to_bin(const char *uuid, __u8 b[16], const u8 ei[16]) +static int __uuid_parse(const char *uuid, __u8 b[16], const u8 ei[16]) { static const u8 si[16] = {0,2,4,6,9,11,14,16,19,21,24,26,28,30,32,34}; unsigned int i; @@ -115,14 +118,14 @@ static int __uuid_to_bin(const char *uuid, __u8 b[16], const u8 ei[16]) return 0; } -int uuid_le_to_bin(const char *uuid, uuid_le *u) +int guid_parse(const char *uuid, guid_t *u) { - return __uuid_to_bin(uuid, u->b, uuid_le_index); + return __uuid_parse(uuid, u->b, guid_index); } -EXPORT_SYMBOL(uuid_le_to_bin); +EXPORT_SYMBOL(guid_parse); -int uuid_be_to_bin(const char *uuid, uuid_be *u) +int uuid_parse(const char *uuid, uuid_t *u) { - return __uuid_to_bin(uuid, u->b, uuid_be_index); + return __uuid_parse(uuid, u->b, uuid_index); } -EXPORT_SYMBOL(uuid_be_to_bin); +EXPORT_SYMBOL(uuid_parse); diff --git a/lib/vsprintf.c b/lib/vsprintf.c index ff8f1269f301..86c3385b9eb3 100644 --- a/lib/vsprintf.c +++ b/lib/vsprintf.c @@ -1309,14 +1309,14 @@ char *uuid_string(char *buf, char *end, const u8 *addr, char uuid[UUID_STRING_LEN + 1]; char *p = uuid; int i; - const u8 *index = uuid_be_index; + const u8 *index = uuid_index; bool uc = false; switch (*(++fmt)) { case 'L': uc = true; /* fall-through */ case 'l': - index = uuid_le_index; + index = guid_index; break; case 'B': uc = true; |