summaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/Kconfig.debug33
-rw-r--r--lib/Makefile2
-rw-r--r--lib/asn1_decoder.c19
-rw-r--r--lib/assoc_array.c4
-rw-r--r--lib/iov_iter.c19
-rw-r--r--lib/lz4/lz4defs.h25
-rw-r--r--lib/mpi/mpicoder.c122
-rw-r--r--lib/nlattr.c103
-rw-r--r--lib/proportions.c407
-rw-r--r--lib/rhashtable.c6
-rw-r--r--lib/stackdepot.c10
-rw-r--r--lib/test_bpf.c234
-rw-r--r--lib/test_rhashtable.c2
13 files changed, 466 insertions, 520 deletions
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 1e9a607534ca..f4b797a690ba 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -1289,6 +1289,39 @@ config TORTURE_TEST
tristate
default n
+config RCU_PERF_TEST
+ tristate "performance tests for RCU"
+ depends on DEBUG_KERNEL
+ select TORTURE_TEST
+ select SRCU
+ select TASKS_RCU
+ default n
+ help
+ This option provides a kernel module that runs performance
+ tests on the RCU infrastructure. The kernel module may be built
+ after the fact on the running kernel to be tested, if desired.
+
+ Say Y here if you want RCU performance tests to be built into
+ the kernel.
+ Say M if you want the RCU performance tests to build as a module.
+ Say N if you are unsure.
+
+config RCU_PERF_TEST_RUNNABLE
+ bool "performance tests for RCU runnable by default"
+ depends on RCU_PERF_TEST = y
+ default n
+ help
+ This option provides a way to build the RCU performance tests
+ directly into the kernel without them starting up at boot time.
+ You can use /sys/module to manually override this setting.
+ This /proc file is available only when the RCU performance
+ tests have been built into the kernel.
+
+ Say Y here if you want the RCU performance tests to start during
+ boot (you probably don't).
+ Say N here if you want the RCU performance tests to start only
+ after being manually enabled via /sys/module.
+
config RCU_TORTURE_TEST
tristate "torture tests for RCU"
depends on DEBUG_KERNEL
diff --git a/lib/Makefile b/lib/Makefile
index 7bd6fd436c97..a65e9a861535 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -23,7 +23,7 @@ lib-y := ctype.o string.o vsprintf.o cmdline.o \
rbtree.o radix-tree.o dump_stack.o timerqueue.o\
idr.o int_sqrt.o extable.o \
sha1.o md5.o irq_regs.o argv_split.o \
- proportions.o flex_proportions.o ratelimit.o show_mem.o \
+ flex_proportions.o ratelimit.o show_mem.o \
is_single_threaded.o plist.o decompress.o kobject_uevent.o \
earlycpio.o seq_buf.o nmi_backtrace.o
diff --git a/lib/asn1_decoder.c b/lib/asn1_decoder.c
index 2b3f46c049d4..0bd8a611eb83 100644
--- a/lib/asn1_decoder.c
+++ b/lib/asn1_decoder.c
@@ -12,6 +12,7 @@
#include <linux/export.h>
#include <linux/kernel.h>
#include <linux/errno.h>
+#include <linux/module.h>
#include <linux/asn1_decoder.h>
#include <linux/asn1_ber_bytecode.h>
@@ -74,7 +75,7 @@ next_tag:
/* Extract a tag from the data */
tag = data[dp++];
- if (tag == 0) {
+ if (tag == ASN1_EOC) {
/* It appears to be an EOC. */
if (data[dp++] != 0)
goto invalid_eoc;
@@ -96,10 +97,8 @@ next_tag:
/* Extract the length */
len = data[dp++];
- if (len <= 0x7f) {
- dp += len;
- goto next_tag;
- }
+ if (len <= 0x7f)
+ goto check_length;
if (unlikely(len == ASN1_INDEFINITE_LENGTH)) {
/* Indefinite length */
@@ -110,14 +109,18 @@ next_tag:
}
n = len - 0x80;
- if (unlikely(n > sizeof(size_t) - 1))
+ if (unlikely(n > sizeof(len) - 1))
goto length_too_long;
if (unlikely(n > datalen - dp))
goto data_overrun_error;
- for (len = 0; n > 0; n--) {
+ len = 0;
+ for (; n > 0; n--) {
len <<= 8;
len |= data[dp++];
}
+check_length:
+ if (len > datalen - dp)
+ goto data_overrun_error;
dp += len;
goto next_tag;
@@ -504,3 +507,5 @@ error:
return -EBADMSG;
}
EXPORT_SYMBOL_GPL(asn1_ber_decoder);
+
+MODULE_LICENSE("GPL");
diff --git a/lib/assoc_array.c b/lib/assoc_array.c
index 03dd576e6773..59fd7c0b119c 100644
--- a/lib/assoc_array.c
+++ b/lib/assoc_array.c
@@ -524,7 +524,9 @@ static bool assoc_array_insert_into_terminal_node(struct assoc_array_edit *edit,
free_slot = i;
continue;
}
- if (ops->compare_object(assoc_array_ptr_to_leaf(ptr), index_key)) {
+ if (assoc_array_ptr_is_leaf(ptr) &&
+ ops->compare_object(assoc_array_ptr_to_leaf(ptr),
+ index_key)) {
pr_devel("replace in slot %d\n", i);
edit->leaf_p = &node->slots[i];
edit->dead_leaf = node->slots[i];
diff --git a/lib/iov_iter.c b/lib/iov_iter.c
index 5fecddc32b1b..ca5316e0087b 100644
--- a/lib/iov_iter.c
+++ b/lib/iov_iter.c
@@ -569,6 +569,25 @@ unsigned long iov_iter_alignment(const struct iov_iter *i)
}
EXPORT_SYMBOL(iov_iter_alignment);
+unsigned long iov_iter_gap_alignment(const struct iov_iter *i)
+{
+ unsigned long res = 0;
+ size_t size = i->count;
+ if (!size)
+ return 0;
+
+ iterate_all_kinds(i, size, v,
+ (res |= (!res ? 0 : (unsigned long)v.iov_base) |
+ (size != v.iov_len ? size : 0), 0),
+ (res |= (!res ? 0 : (unsigned long)v.bv_offset) |
+ (size != v.bv_len ? size : 0)),
+ (res |= (!res ? 0 : (unsigned long)v.iov_base) |
+ (size != v.iov_len ? size : 0))
+ );
+ return res;
+}
+EXPORT_SYMBOL(iov_iter_gap_alignment);
+
ssize_t iov_iter_get_pages(struct iov_iter *i,
struct page **pages, size_t maxsize, unsigned maxpages,
size_t *start)
diff --git a/lib/lz4/lz4defs.h b/lib/lz4/lz4defs.h
index abcecdc2d0f2..c79d7ea8a38e 100644
--- a/lib/lz4/lz4defs.h
+++ b/lib/lz4/lz4defs.h
@@ -11,8 +11,7 @@
/*
* Detects 64 bits mode
*/
-#if (defined(__x86_64__) || defined(__x86_64) || defined(__amd64__) \
- || defined(__ppc64__) || defined(__LP64__))
+#if defined(CONFIG_64BIT)
#define LZ4_ARCH64 1
#else
#define LZ4_ARCH64 0
@@ -25,9 +24,7 @@
typedef struct _U16_S { u16 v; } U16_S;
typedef struct _U32_S { u32 v; } U32_S;
typedef struct _U64_S { u64 v; } U64_S;
-#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) \
- || defined(CONFIG_ARM) && __LINUX_ARM_ARCH__ >= 6 \
- && defined(ARM_EFFICIENT_UNALIGNED_ACCESS)
+#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
#define A16(x) (((U16_S *)(x))->v)
#define A32(x) (((U32_S *)(x))->v)
@@ -35,6 +32,10 @@ typedef struct _U64_S { u64 v; } U64_S;
#define PUT4(s, d) (A32(d) = A32(s))
#define PUT8(s, d) (A64(d) = A64(s))
+
+#define LZ4_READ_LITTLEENDIAN_16(d, s, p) \
+ (d = s - A16(p))
+
#define LZ4_WRITE_LITTLEENDIAN_16(p, v) \
do { \
A16(p) = v; \
@@ -51,10 +52,13 @@ typedef struct _U64_S { u64 v; } U64_S;
#define PUT8(s, d) \
put_unaligned(get_unaligned((const u64 *) s), (u64 *) d)
-#define LZ4_WRITE_LITTLEENDIAN_16(p, v) \
- do { \
- put_unaligned(v, (u16 *)(p)); \
- p += 2; \
+#define LZ4_READ_LITTLEENDIAN_16(d, s, p) \
+ (d = s - get_unaligned_le16(p))
+
+#define LZ4_WRITE_LITTLEENDIAN_16(p, v) \
+ do { \
+ put_unaligned_le16(v, (u16 *)(p)); \
+ p += 2; \
} while (0)
#endif
@@ -140,9 +144,6 @@ typedef struct _U64_S { u64 v; } U64_S;
#endif
-#define LZ4_READ_LITTLEENDIAN_16(d, s, p) \
- (d = s - get_unaligned_le16(p))
-
#define LZ4_WILDCOPY(s, d, e) \
do { \
LZ4_COPYPACKET(s, d); \
diff --git a/lib/mpi/mpicoder.c b/lib/mpi/mpicoder.c
index eb15e7dc7b65..747606f9e4a3 100644
--- a/lib/mpi/mpicoder.c
+++ b/lib/mpi/mpicoder.c
@@ -20,6 +20,8 @@
#include <linux/bitops.h>
#include <linux/count_zeros.h>
+#include <linux/byteorder/generic.h>
+#include <linux/string.h>
#include "mpi-internal.h"
#define MAX_EXTERN_MPI_BITS 16384
@@ -163,7 +165,13 @@ int mpi_read_buffer(MPI a, uint8_t *buf, unsigned buf_len, unsigned *nbytes,
int *sign)
{
uint8_t *p;
- mpi_limb_t alimb;
+#if BYTES_PER_MPI_LIMB == 4
+ __be32 alimb;
+#elif BYTES_PER_MPI_LIMB == 8
+ __be64 alimb;
+#else
+#error please implement for this limb size.
+#endif
unsigned int n = mpi_get_size(a);
int i, lzeros;
@@ -183,38 +191,19 @@ int mpi_read_buffer(MPI a, uint8_t *buf, unsigned buf_len, unsigned *nbytes,
p = buf;
*nbytes = n - lzeros;
- for (i = a->nlimbs - 1; i >= 0; i--) {
- alimb = a->d[i];
+ for (i = a->nlimbs - 1 - lzeros / BYTES_PER_MPI_LIMB,
+ lzeros %= BYTES_PER_MPI_LIMB;
+ i >= 0; i--) {
#if BYTES_PER_MPI_LIMB == 4
- *p++ = alimb >> 24;
- *p++ = alimb >> 16;
- *p++ = alimb >> 8;
- *p++ = alimb;
+ alimb = cpu_to_be32(a->d[i]);
#elif BYTES_PER_MPI_LIMB == 8
- *p++ = alimb >> 56;
- *p++ = alimb >> 48;
- *p++ = alimb >> 40;
- *p++ = alimb >> 32;
- *p++ = alimb >> 24;
- *p++ = alimb >> 16;
- *p++ = alimb >> 8;
- *p++ = alimb;
+ alimb = cpu_to_be64(a->d[i]);
#else
#error please implement for this limb size.
#endif
-
- if (lzeros > 0) {
- if (lzeros >= sizeof(alimb)) {
- p -= sizeof(alimb);
- } else {
- mpi_limb_t *limb1 = (void *)p - sizeof(alimb);
- mpi_limb_t *limb2 = (void *)p - sizeof(alimb)
- + lzeros;
- *limb1 = *limb2;
- p -= lzeros;
- }
- lzeros -= sizeof(alimb);
- }
+ memcpy(p, (u8 *)&alimb + lzeros, BYTES_PER_MPI_LIMB - lzeros);
+ p += BYTES_PER_MPI_LIMB - lzeros;
+ lzeros = 0;
}
return 0;
}
@@ -359,7 +348,13 @@ int mpi_write_to_sgl(MPI a, struct scatterlist *sgl, unsigned *nbytes,
int *sign)
{
u8 *p, *p2;
- mpi_limb_t alimb, alimb2;
+#if BYTES_PER_MPI_LIMB == 4
+ __be32 alimb;
+#elif BYTES_PER_MPI_LIMB == 8
+ __be64 alimb;
+#else
+#error please implement for this limb size.
+#endif
unsigned int n = mpi_get_size(a);
int i, x, y = 0, lzeros, buf_len;
@@ -380,42 +375,22 @@ int mpi_write_to_sgl(MPI a, struct scatterlist *sgl, unsigned *nbytes,
buf_len = sgl->length;
p2 = sg_virt(sgl);
- for (i = a->nlimbs - 1; i >= 0; i--) {
- alimb = a->d[i];
- p = (u8 *)&alimb2;
+ for (i = a->nlimbs - 1 - lzeros / BYTES_PER_MPI_LIMB,
+ lzeros %= BYTES_PER_MPI_LIMB;
+ i >= 0; i--) {
#if BYTES_PER_MPI_LIMB == 4
- *p++ = alimb >> 24;
- *p++ = alimb >> 16;
- *p++ = alimb >> 8;
- *p++ = alimb;
+ alimb = cpu_to_be32(a->d[i]);
#elif BYTES_PER_MPI_LIMB == 8
- *p++ = alimb >> 56;
- *p++ = alimb >> 48;
- *p++ = alimb >> 40;
- *p++ = alimb >> 32;
- *p++ = alimb >> 24;
- *p++ = alimb >> 16;
- *p++ = alimb >> 8;
- *p++ = alimb;
+ alimb = cpu_to_be64(a->d[i]);
#else
#error please implement for this limb size.
#endif
- if (lzeros > 0) {
- if (lzeros >= sizeof(alimb)) {
- p -= sizeof(alimb);
- continue;
- } else {
- mpi_limb_t *limb1 = (void *)p - sizeof(alimb);
- mpi_limb_t *limb2 = (void *)p - sizeof(alimb)
- + lzeros;
- *limb1 = *limb2;
- p -= lzeros;
- y = lzeros;
- }
- lzeros -= sizeof(alimb);
+ if (lzeros) {
+ y = lzeros;
+ lzeros = 0;
}
- p = p - (sizeof(alimb) - y);
+ p = (u8 *)&alimb + y;
for (x = 0; x < sizeof(alimb) - y; x++) {
if (!buf_len) {
@@ -443,15 +418,15 @@ EXPORT_SYMBOL_GPL(mpi_write_to_sgl);
* a new MPI and reads the content of the sgl to the MPI.
*
* @sgl: scatterlist to read from
- * @len: number of bytes to read
+ * @nbytes: number of bytes to read
*
* Return: Pointer to a new MPI or NULL on error
*/
-MPI mpi_read_raw_from_sgl(struct scatterlist *sgl, unsigned int len)
+MPI mpi_read_raw_from_sgl(struct scatterlist *sgl, unsigned int nbytes)
{
struct scatterlist *sg;
int x, i, j, z, lzeros, ents;
- unsigned int nbits, nlimbs, nbytes;
+ unsigned int nbits, nlimbs;
mpi_limb_t a;
MPI val = NULL;
@@ -472,16 +447,12 @@ MPI mpi_read_raw_from_sgl(struct scatterlist *sgl, unsigned int len)
break;
ents--;
+ nbytes -= lzeros;
lzeros = 0;
}
sgl = sg;
-
- if (!ents)
- nbytes = 0;
- else
- nbytes = len - lzeros;
-
+ nbytes -= lzeros;
nbits = nbytes * 8;
if (nbits > MAX_EXTERN_MPI_BITS) {
pr_info("MPI: mpi too large (%u bits)\n", nbits);
@@ -489,9 +460,8 @@ MPI mpi_read_raw_from_sgl(struct scatterlist *sgl, unsigned int len)
}
if (nbytes > 0)
- nbits -= count_leading_zeros(*(u8 *)(sg_virt(sgl) + lzeros));
- else
- nbits = 0;
+ nbits -= count_leading_zeros(*(u8 *)(sg_virt(sgl) + lzeros)) -
+ (BITS_PER_LONG - 8);
nlimbs = DIV_ROUND_UP(nbytes, BYTES_PER_MPI_LIMB);
val = mpi_alloc(nlimbs);
@@ -507,19 +477,14 @@ MPI mpi_read_raw_from_sgl(struct scatterlist *sgl, unsigned int len)
j = nlimbs - 1;
a = 0;
- z = 0;
- x = BYTES_PER_MPI_LIMB - nbytes % BYTES_PER_MPI_LIMB;
- x %= BYTES_PER_MPI_LIMB;
+ z = BYTES_PER_MPI_LIMB - nbytes % BYTES_PER_MPI_LIMB;
+ z %= BYTES_PER_MPI_LIMB;
for_each_sg(sgl, sg, ents, i) {
const u8 *buffer = sg_virt(sg) + lzeros;
int len = sg->length - lzeros;
- int buf_shift = x;
-
- if (sg_is_last(sg) && (len % BYTES_PER_MPI_LIMB))
- len += BYTES_PER_MPI_LIMB - (len % BYTES_PER_MPI_LIMB);
- for (; x < len + buf_shift; x++) {
+ for (x = 0; x < len; x++) {
a <<= 8;
a |= *buffer++;
if (((z + x + 1) % BYTES_PER_MPI_LIMB) == 0) {
@@ -528,7 +493,6 @@ MPI mpi_read_raw_from_sgl(struct scatterlist *sgl, unsigned int len)
}
}
z += x;
- x = 0;
lzeros = 0;
}
return val;
diff --git a/lib/nlattr.c b/lib/nlattr.c
index f5907d23272d..fce1e9afc6d9 100644
--- a/lib/nlattr.c
+++ b/lib/nlattr.c
@@ -355,6 +355,30 @@ struct nlattr *__nla_reserve(struct sk_buff *skb, int attrtype, int attrlen)
EXPORT_SYMBOL(__nla_reserve);
/**
+ * __nla_reserve_64bit - reserve room for attribute on the skb and align it
+ * @skb: socket buffer to reserve room on
+ * @attrtype: attribute type
+ * @attrlen: length of attribute payload
+ * @padattr: attribute type for the padding
+ *
+ * Adds a netlink attribute header to a socket buffer and reserves
+ * room for the payload but does not copy it. It also ensure that this
+ * attribute will have a 64-bit aligned nla_data() area.
+ *
+ * The caller is responsible to ensure that the skb provides enough
+ * tailroom for the attribute header and payload.
+ */
+struct nlattr *__nla_reserve_64bit(struct sk_buff *skb, int attrtype,
+ int attrlen, int padattr)
+{
+ if (nla_need_padding_for_64bit(skb))
+ nla_align_64bit(skb, padattr);
+
+ return __nla_reserve(skb, attrtype, attrlen);
+}
+EXPORT_SYMBOL(__nla_reserve_64bit);
+
+/**
* __nla_reserve_nohdr - reserve room for attribute without header
* @skb: socket buffer to reserve room on
* @attrlen: length of attribute payload
@@ -397,6 +421,36 @@ struct nlattr *nla_reserve(struct sk_buff *skb, int attrtype, int attrlen)
EXPORT_SYMBOL(nla_reserve);
/**
+ * nla_reserve_64bit - reserve room for attribute on the skb and align it
+ * @skb: socket buffer to reserve room on
+ * @attrtype: attribute type
+ * @attrlen: length of attribute payload
+ * @padattr: attribute type for the padding
+ *
+ * Adds a netlink attribute header to a socket buffer and reserves
+ * room for the payload but does not copy it. It also ensure that this
+ * attribute will have a 64-bit aligned nla_data() area.
+ *
+ * Returns NULL if the tailroom of the skb is insufficient to store
+ * the attribute header and payload.
+ */
+struct nlattr *nla_reserve_64bit(struct sk_buff *skb, int attrtype, int attrlen,
+ int padattr)
+{
+ size_t len;
+
+ if (nla_need_padding_for_64bit(skb))
+ len = nla_total_size_64bit(attrlen);
+ else
+ len = nla_total_size(attrlen);
+ if (unlikely(skb_tailroom(skb) < len))
+ return NULL;
+
+ return __nla_reserve_64bit(skb, attrtype, attrlen, padattr);
+}
+EXPORT_SYMBOL(nla_reserve_64bit);
+
+/**
* nla_reserve_nohdr - reserve room for attribute without header
* @skb: socket buffer to reserve room on
* @attrlen: length of attribute payload
@@ -436,6 +490,27 @@ void __nla_put(struct sk_buff *skb, int attrtype, int attrlen,
EXPORT_SYMBOL(__nla_put);
/**
+ * __nla_put_64bit - Add a netlink attribute to a socket buffer and align it
+ * @skb: socket buffer to add attribute to
+ * @attrtype: attribute type
+ * @attrlen: length of attribute payload
+ * @data: head of attribute payload
+ * @padattr: attribute type for the padding
+ *
+ * The caller is responsible to ensure that the skb provides enough
+ * tailroom for the attribute header and payload.
+ */
+void __nla_put_64bit(struct sk_buff *skb, int attrtype, int attrlen,
+ const void *data, int padattr)
+{
+ struct nlattr *nla;
+
+ nla = __nla_reserve_64bit(skb, attrtype, attrlen, padattr);
+ memcpy(nla_data(nla), data, attrlen);
+}
+EXPORT_SYMBOL(__nla_put_64bit);
+
+/**
* __nla_put_nohdr - Add a netlink attribute without header
* @skb: socket buffer to add attribute to
* @attrlen: length of attribute payload
@@ -474,6 +549,34 @@ int nla_put(struct sk_buff *skb, int attrtype, int attrlen, const void *data)
EXPORT_SYMBOL(nla_put);
/**
+ * nla_put_64bit - Add a netlink attribute to a socket buffer and align it
+ * @skb: socket buffer to add attribute to
+ * @attrtype: attribute type
+ * @attrlen: length of attribute payload
+ * @data: head of attribute payload
+ * @padattr: attribute type for the padding
+ *
+ * Returns -EMSGSIZE if the tailroom of the skb is insufficient to store
+ * the attribute header and payload.
+ */
+int nla_put_64bit(struct sk_buff *skb, int attrtype, int attrlen,
+ const void *data, int padattr)
+{
+ size_t len;
+
+ if (nla_need_padding_for_64bit(skb))
+ len = nla_total_size_64bit(attrlen);
+ else
+ len = nla_total_size(attrlen);
+ if (unlikely(skb_tailroom(skb) < len))
+ return -EMSGSIZE;
+
+ __nla_put_64bit(skb, attrtype, attrlen, data, padattr);
+ return 0;
+}
+EXPORT_SYMBOL(nla_put_64bit);
+
+/**
* nla_put_nohdr - Add a netlink attribute without header
* @skb: socket buffer to add attribute to
* @attrlen: length of attribute payload
diff --git a/lib/proportions.c b/lib/proportions.c
deleted file mode 100644
index efa54f259ea9..000000000000
--- a/lib/proportions.c
+++ /dev/null
@@ -1,407 +0,0 @@
-/*
- * Floating proportions
- *
- * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra
- *
- * Description:
- *
- * The floating proportion is a time derivative with an exponentially decaying
- * history:
- *
- * p_{j} = \Sum_{i=0} (dx_{j}/dt_{-i}) / 2^(1+i)
- *
- * Where j is an element from {prop_local}, x_{j} is j's number of events,
- * and i the time period over which the differential is taken. So d/dt_{-i} is
- * the differential over the i-th last period.
- *
- * The decaying history gives smooth transitions. The time differential carries
- * the notion of speed.
- *
- * The denominator is 2^(1+i) because we want the series to be normalised, ie.
- *
- * \Sum_{i=0} 1/2^(1+i) = 1
- *
- * Further more, if we measure time (t) in the same events as x; so that:
- *
- * t = \Sum_{j} x_{j}
- *
- * we get that:
- *
- * \Sum_{j} p_{j} = 1
- *
- * Writing this in an iterative fashion we get (dropping the 'd's):
- *
- * if (++x_{j}, ++t > period)
- * t /= 2;
- * for_each (j)
- * x_{j} /= 2;
- *
- * so that:
- *
- * p_{j} = x_{j} / t;
- *
- * We optimize away the '/= 2' for the global time delta by noting that:
- *
- * if (++t > period) t /= 2:
- *
- * Can be approximated by:
- *
- * period/2 + (++t % period/2)
- *
- * [ Furthermore, when we choose period to be 2^n it can be written in terms of
- * binary operations and wraparound artefacts disappear. ]
- *
- * Also note that this yields a natural counter of the elapsed periods:
- *
- * c = t / (period/2)
- *
- * [ Its monotonic increasing property can be applied to mitigate the wrap-
- * around issue. ]
- *
- * This allows us to do away with the loop over all prop_locals on each period
- * expiration. By remembering the period count under which it was last accessed
- * as c_{j}, we can obtain the number of 'missed' cycles from:
- *
- * c - c_{j}
- *
- * We can then lazily catch up to the global period count every time we are
- * going to use x_{j}, by doing:
- *
- * x_{j} /= 2^(c - c_{j}), c_{j} = c
- */
-
-#include <linux/proportions.h>
-#include <linux/rcupdate.h>
-
-int prop_descriptor_init(struct prop_descriptor *pd, int shift, gfp_t gfp)
-{
- int err;
-
- if (shift > PROP_MAX_SHIFT)
- shift = PROP_MAX_SHIFT;
-
- pd->index = 0;
- pd->pg[0].shift = shift;
- mutex_init(&pd->mutex);
- err = percpu_counter_init(&pd->pg[0].events, 0, gfp);
- if (err)
- goto out;
-
- err = percpu_counter_init(&pd->pg[1].events, 0, gfp);
- if (err)
- percpu_counter_destroy(&pd->pg[0].events);
-
-out:
- return err;
-}
-
-/*
- * We have two copies, and flip between them to make it seem like an atomic
- * update. The update is not really atomic wrt the events counter, but
- * it is internally consistent with the bit layout depending on shift.
- *
- * We copy the events count, move the bits around and flip the index.
- */
-void prop_change_shift(struct prop_descriptor *pd, int shift)
-{
- int index;
- int offset;
- u64 events;
- unsigned long flags;
-
- if (shift > PROP_MAX_SHIFT)
- shift = PROP_MAX_SHIFT;
-
- mutex_lock(&pd->mutex);
-
- index = pd->index ^ 1;
- offset = pd->pg[pd->index].shift - shift;
- if (!offset)
- goto out;
-
- pd->pg[index].shift = shift;
-
- local_irq_save(flags);
- events = percpu_counter_sum(&pd->pg[pd->index].events);
- if (offset < 0)
- events <<= -offset;
- else
- events >>= offset;
- percpu_counter_set(&pd->pg[index].events, events);
-
- /*
- * ensure the new pg is fully written before the switch
- */
- smp_wmb();
- pd->index = index;
- local_irq_restore(flags);
-
- synchronize_rcu();
-
-out:
- mutex_unlock(&pd->mutex);
-}
-
-/*
- * wrap the access to the data in an rcu_read_lock() section;
- * this is used to track the active references.
- */
-static struct prop_global *prop_get_global(struct prop_descriptor *pd)
-__acquires(RCU)
-{
- int index;
-
- rcu_read_lock();
- index = pd->index;
- /*
- * match the wmb from vcd_flip()
- */
- smp_rmb();
- return &pd->pg[index];
-}
-
-static void prop_put_global(struct prop_descriptor *pd, struct prop_global *pg)
-__releases(RCU)
-{
- rcu_read_unlock();
-}
-
-static void
-prop_adjust_shift(int *pl_shift, unsigned long *pl_period, int new_shift)
-{
- int offset = *pl_shift - new_shift;
-
- if (!offset)
- return;
-
- if (offset < 0)
- *pl_period <<= -offset;
- else
- *pl_period >>= offset;
-
- *pl_shift = new_shift;
-}
-
-/*
- * PERCPU
- */
-
-#define PROP_BATCH (8*(1+ilog2(nr_cpu_ids)))
-
-int prop_local_init_percpu(struct prop_local_percpu *pl, gfp_t gfp)
-{
- raw_spin_lock_init(&pl->lock);
- pl->shift = 0;
- pl->period = 0;
- return percpu_counter_init(&pl->events, 0, gfp);
-}
-
-void prop_local_destroy_percpu(struct prop_local_percpu *pl)
-{
- percpu_counter_destroy(&pl->events);
-}
-
-/*
- * Catch up with missed period expirations.
- *
- * until (c_{j} == c)
- * x_{j} -= x_{j}/2;
- * c_{j}++;
- */
-static
-void prop_norm_percpu(struct prop_global *pg, struct prop_local_percpu *pl)
-{
- unsigned long period = 1UL << (pg->shift - 1);
- unsigned long period_mask = ~(period - 1);
- unsigned long global_period;
- unsigned long flags;
-
- global_period = percpu_counter_read(&pg->events);
- global_period &= period_mask;
-
- /*
- * Fast path - check if the local and global period count still match
- * outside of the lock.
- */
- if (pl->period == global_period)
- return;
-
- raw_spin_lock_irqsave(&pl->lock, flags);
- prop_adjust_shift(&pl->shift, &pl->period, pg->shift);
-
- /*
- * For each missed period, we half the local counter.
- * basically:
- * pl->events >> (global_period - pl->period);
- */
- period = (global_period - pl->period) >> (pg->shift - 1);
- if (period < BITS_PER_LONG) {
- s64 val = percpu_counter_read(&pl->events);
-
- if (val < (nr_cpu_ids * PROP_BATCH))
- val = percpu_counter_sum(&pl->events);
-
- __percpu_counter_add(&pl->events, -val + (val >> period),
- PROP_BATCH);
- } else
- percpu_counter_set(&pl->events, 0);
-
- pl->period = global_period;
- raw_spin_unlock_irqrestore(&pl->lock, flags);
-}
-
-/*
- * ++x_{j}, ++t
- */
-void __prop_inc_percpu(struct prop_descriptor *pd, struct prop_local_percpu *pl)
-{
- struct prop_global *pg = prop_get_global(pd);
-
- prop_norm_percpu(pg, pl);
- __percpu_counter_add(&pl->events, 1, PROP_BATCH);
- percpu_counter_add(&pg->events, 1);
- prop_put_global(pd, pg);
-}
-
-/*
- * identical to __prop_inc_percpu, except that it limits this pl's fraction to
- * @frac/PROP_FRAC_BASE by ignoring events when this limit has been exceeded.
- */
-void __prop_inc_percpu_max(struct prop_descriptor *pd,
- struct prop_local_percpu *pl, long frac)
-{
- struct prop_global *pg = prop_get_global(pd);
-
- prop_norm_percpu(pg, pl);
-
- if (unlikely(frac != PROP_FRAC_BASE)) {
- unsigned long period_2 = 1UL << (pg->shift - 1);
- unsigned long counter_mask = period_2 - 1;
- unsigned long global_count;
- long numerator, denominator;
-
- numerator = percpu_counter_read_positive(&pl->events);
- global_count = percpu_counter_read(&pg->events);
- denominator = period_2 + (global_count & counter_mask);
-
- if (numerator > ((denominator * frac) >> PROP_FRAC_SHIFT))
- goto out_put;
- }
-
- percpu_counter_add(&pl->events, 1);
- percpu_counter_add(&pg->events, 1);
-
-out_put:
- prop_put_global(pd, pg);
-}
-
-/*
- * Obtain a fraction of this proportion
- *
- * p_{j} = x_{j} / (period/2 + t % period/2)
- */
-void prop_fraction_percpu(struct prop_descriptor *pd,
- struct prop_local_percpu *pl,
- long *numerator, long *denominator)
-{
- struct prop_global *pg = prop_get_global(pd);
- unsigned long period_2 = 1UL << (pg->shift - 1);
- unsigned long counter_mask = period_2 - 1;
- unsigned long global_count;
-
- prop_norm_percpu(pg, pl);
- *numerator = percpu_counter_read_positive(&pl->events);
-
- global_count = percpu_counter_read(&pg->events);
- *denominator = period_2 + (global_count & counter_mask);
-
- prop_put_global(pd, pg);
-}
-
-/*
- * SINGLE
- */
-
-int prop_local_init_single(struct prop_local_single *pl)
-{
- raw_spin_lock_init(&pl->lock);
- pl->shift = 0;
- pl->period = 0;
- pl->events = 0;
- return 0;
-}
-
-void prop_local_destroy_single(struct prop_local_single *pl)
-{
-}
-
-/*
- * Catch up with missed period expirations.
- */
-static
-void prop_norm_single(struct prop_global *pg, struct prop_local_single *pl)
-{
- unsigned long period = 1UL << (pg->shift - 1);
- unsigned long period_mask = ~(period - 1);
- unsigned long global_period;
- unsigned long flags;
-
- global_period = percpu_counter_read(&pg->events);
- global_period &= period_mask;
-
- /*
- * Fast path - check if the local and global period count still match
- * outside of the lock.
- */
- if (pl->period == global_period)
- return;
-
- raw_spin_lock_irqsave(&pl->lock, flags);
- prop_adjust_shift(&pl->shift, &pl->period, pg->shift);
- /*
- * For each missed period, we half the local counter.
- */
- period = (global_period - pl->period) >> (pg->shift - 1);
- if (likely(period < BITS_PER_LONG))
- pl->events >>= period;
- else
- pl->events = 0;
- pl->period = global_period;
- raw_spin_unlock_irqrestore(&pl->lock, flags);
-}
-
-/*
- * ++x_{j}, ++t
- */
-void __prop_inc_single(struct prop_descriptor *pd, struct prop_local_single *pl)
-{
- struct prop_global *pg = prop_get_global(pd);
-
- prop_norm_single(pg, pl);
- pl->events++;
- percpu_counter_add(&pg->events, 1);
- prop_put_global(pd, pg);
-}
-
-/*
- * Obtain a fraction of this proportion
- *
- * p_{j} = x_{j} / (period/2 + t % period/2)
- */
-void prop_fraction_single(struct prop_descriptor *pd,
- struct prop_local_single *pl,
- long *numerator, long *denominator)
-{
- struct prop_global *pg = prop_get_global(pd);
- unsigned long period_2 = 1UL << (pg->shift - 1);
- unsigned long counter_mask = period_2 - 1;
- unsigned long global_count;
-
- prop_norm_single(pg, pl);
- *numerator = pl->events;
-
- global_count = percpu_counter_read(&pg->events);
- *denominator = period_2 + (global_count & counter_mask);
-
- prop_put_global(pd, pg);
-}
diff --git a/lib/rhashtable.c b/lib/rhashtable.c
index cc808707d1cf..5d845ffd7982 100644
--- a/lib/rhashtable.c
+++ b/lib/rhashtable.c
@@ -487,6 +487,7 @@ EXPORT_SYMBOL_GPL(rhashtable_insert_slow);
* rhashtable_walk_init - Initialise an iterator
* @ht: Table to walk over
* @iter: Hash table Iterator
+ * @gfp: GFP flags for allocations
*
* This function prepares a hash table walk.
*
@@ -504,14 +505,15 @@ EXPORT_SYMBOL_GPL(rhashtable_insert_slow);
* You must call rhashtable_walk_exit if this function returns
* successfully.
*/
-int rhashtable_walk_init(struct rhashtable *ht, struct rhashtable_iter *iter)
+int rhashtable_walk_init(struct rhashtable *ht, struct rhashtable_iter *iter,
+ gfp_t gfp)
{
iter->ht = ht;
iter->p = NULL;
iter->slot = 0;
iter->skip = 0;
- iter->walker = kmalloc(sizeof(*iter->walker), GFP_KERNEL);
+ iter->walker = kmalloc(sizeof(*iter->walker), gfp);
if (!iter->walker)
return -ENOMEM;
diff --git a/lib/stackdepot.c b/lib/stackdepot.c
index 654c9d87e83a..53ad6c0831ae 100644
--- a/lib/stackdepot.c
+++ b/lib/stackdepot.c
@@ -42,12 +42,14 @@
#define DEPOT_STACK_BITS (sizeof(depot_stack_handle_t) * 8)
+#define STACK_ALLOC_NULL_PROTECTION_BITS 1
#define STACK_ALLOC_ORDER 2 /* 'Slab' size order for stack depot, 4 pages */
#define STACK_ALLOC_SIZE (1LL << (PAGE_SHIFT + STACK_ALLOC_ORDER))
#define STACK_ALLOC_ALIGN 4
#define STACK_ALLOC_OFFSET_BITS (STACK_ALLOC_ORDER + PAGE_SHIFT - \
STACK_ALLOC_ALIGN)
-#define STACK_ALLOC_INDEX_BITS (DEPOT_STACK_BITS - STACK_ALLOC_OFFSET_BITS)
+#define STACK_ALLOC_INDEX_BITS (DEPOT_STACK_BITS - \
+ STACK_ALLOC_NULL_PROTECTION_BITS - STACK_ALLOC_OFFSET_BITS)
#define STACK_ALLOC_SLABS_CAP 1024
#define STACK_ALLOC_MAX_SLABS \
(((1LL << (STACK_ALLOC_INDEX_BITS)) < STACK_ALLOC_SLABS_CAP) ? \
@@ -59,6 +61,7 @@ union handle_parts {
struct {
u32 slabindex : STACK_ALLOC_INDEX_BITS;
u32 offset : STACK_ALLOC_OFFSET_BITS;
+ u32 valid : STACK_ALLOC_NULL_PROTECTION_BITS;
};
};
@@ -136,6 +139,7 @@ static struct stack_record *depot_alloc_stack(unsigned long *entries, int size,
stack->size = size;
stack->handle.slabindex = depot_index;
stack->handle.offset = depot_offset >> STACK_ALLOC_ALIGN;
+ stack->handle.valid = 1;
memcpy(stack->entries, entries, size * sizeof(unsigned long));
depot_offset += required_size;
@@ -210,10 +214,6 @@ depot_stack_handle_t depot_save_stack(struct stack_trace *trace,
goto fast_exit;
hash = hash_stack(trace->entries, trace->nr_entries);
- /* Bad luck, we won't store this stack. */
- if (hash == 0)
- goto exit;
-
bucket = &stack_table[hash & STACK_HASH_MASK];
/*
diff --git a/lib/test_bpf.c b/lib/test_bpf.c
index 27a7a26b1ece..93f45011a59d 100644
--- a/lib/test_bpf.c
+++ b/lib/test_bpf.c
@@ -2444,6 +2444,22 @@ static struct bpf_test tests[] = {
{ { 0, 4294967295U } },
},
{
+ "ALU_ADD_X: 2 + 4294967294 = 0",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 2),
+ BPF_LD_IMM64(R1, 4294967294U),
+ BPF_ALU32_REG(BPF_ADD, R0, R1),
+ BPF_JMP_IMM(BPF_JEQ, R0, 0, 2),
+ BPF_ALU32_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ BPF_ALU32_IMM(BPF_MOV, R0, 1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ },
+ {
"ALU64_ADD_X: 1 + 2 = 3",
.u.insns_int = {
BPF_LD_IMM64(R0, 1),
@@ -2467,6 +2483,23 @@ static struct bpf_test tests[] = {
{ },
{ { 0, 4294967295U } },
},
+ {
+ "ALU64_ADD_X: 2 + 4294967294 = 4294967296",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 2),
+ BPF_LD_IMM64(R1, 4294967294U),
+ BPF_LD_IMM64(R2, 4294967296ULL),
+ BPF_ALU64_REG(BPF_ADD, R0, R1),
+ BPF_JMP_REG(BPF_JEQ, R0, R2, 2),
+ BPF_MOV32_IMM(R0, 0),
+ BPF_EXIT_INSN(),
+ BPF_MOV32_IMM(R0, 1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ },
/* BPF_ALU | BPF_ADD | BPF_K */
{
"ALU_ADD_K: 1 + 2 = 3",
@@ -2502,6 +2535,21 @@ static struct bpf_test tests[] = {
{ { 0, 4294967295U } },
},
{
+ "ALU_ADD_K: 4294967294 + 2 = 0",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 4294967294U),
+ BPF_ALU32_IMM(BPF_ADD, R0, 2),
+ BPF_JMP_IMM(BPF_JEQ, R0, 0, 2),
+ BPF_ALU32_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ BPF_ALU32_IMM(BPF_MOV, R0, 1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ },
+ {
"ALU_ADD_K: 0 + (-1) = 0x00000000ffffffff",
.u.insns_int = {
BPF_LD_IMM64(R2, 0x0),
@@ -2518,6 +2566,70 @@ static struct bpf_test tests[] = {
{ { 0, 0x1 } },
},
{
+ "ALU_ADD_K: 0 + 0xffff = 0xffff",
+ .u.insns_int = {
+ BPF_LD_IMM64(R2, 0x0),
+ BPF_LD_IMM64(R3, 0xffff),
+ BPF_ALU32_IMM(BPF_ADD, R2, 0xffff),
+ BPF_JMP_REG(BPF_JEQ, R2, R3, 2),
+ BPF_MOV32_IMM(R0, 2),
+ BPF_EXIT_INSN(),
+ BPF_MOV32_IMM(R0, 1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0x1 } },
+ },
+ {
+ "ALU_ADD_K: 0 + 0x7fffffff = 0x7fffffff",
+ .u.insns_int = {
+ BPF_LD_IMM64(R2, 0x0),
+ BPF_LD_IMM64(R3, 0x7fffffff),
+ BPF_ALU32_IMM(BPF_ADD, R2, 0x7fffffff),
+ BPF_JMP_REG(BPF_JEQ, R2, R3, 2),
+ BPF_MOV32_IMM(R0, 2),
+ BPF_EXIT_INSN(),
+ BPF_MOV32_IMM(R0, 1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0x1 } },
+ },
+ {
+ "ALU_ADD_K: 0 + 0x80000000 = 0x80000000",
+ .u.insns_int = {
+ BPF_LD_IMM64(R2, 0x0),
+ BPF_LD_IMM64(R3, 0x80000000),
+ BPF_ALU32_IMM(BPF_ADD, R2, 0x80000000),
+ BPF_JMP_REG(BPF_JEQ, R2, R3, 2),
+ BPF_MOV32_IMM(R0, 2),
+ BPF_EXIT_INSN(),
+ BPF_MOV32_IMM(R0, 1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0x1 } },
+ },
+ {
+ "ALU_ADD_K: 0 + 0x80008000 = 0x80008000",
+ .u.insns_int = {
+ BPF_LD_IMM64(R2, 0x0),
+ BPF_LD_IMM64(R3, 0x80008000),
+ BPF_ALU32_IMM(BPF_ADD, R2, 0x80008000),
+ BPF_JMP_REG(BPF_JEQ, R2, R3, 2),
+ BPF_MOV32_IMM(R0, 2),
+ BPF_EXIT_INSN(),
+ BPF_MOV32_IMM(R0, 1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0x1 } },
+ },
+ {
"ALU64_ADD_K: 1 + 2 = 3",
.u.insns_int = {
BPF_LD_IMM64(R0, 1),
@@ -2551,6 +2663,22 @@ static struct bpf_test tests[] = {
{ { 0, 2147483647 } },
},
{
+ "ALU64_ADD_K: 4294967294 + 2 = 4294967296",
+ .u.insns_int = {
+ BPF_LD_IMM64(R0, 4294967294U),
+ BPF_LD_IMM64(R1, 4294967296ULL),
+ BPF_ALU64_IMM(BPF_ADD, R0, 2),
+ BPF_JMP_REG(BPF_JEQ, R0, R1, 2),
+ BPF_ALU32_IMM(BPF_MOV, R0, 0),
+ BPF_EXIT_INSN(),
+ BPF_ALU32_IMM(BPF_MOV, R0, 1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ },
+ {
"ALU64_ADD_K: 2147483646 + -2147483647 = -1",
.u.insns_int = {
BPF_LD_IMM64(R0, 2147483646),
@@ -2593,6 +2721,70 @@ static struct bpf_test tests[] = {
{ },
{ { 0, 0x1 } },
},
+ {
+ "ALU64_ADD_K: 0 + 0xffff = 0xffff",
+ .u.insns_int = {
+ BPF_LD_IMM64(R2, 0x0),
+ BPF_LD_IMM64(R3, 0xffff),
+ BPF_ALU64_IMM(BPF_ADD, R2, 0xffff),
+ BPF_JMP_REG(BPF_JEQ, R2, R3, 2),
+ BPF_MOV32_IMM(R0, 2),
+ BPF_EXIT_INSN(),
+ BPF_MOV32_IMM(R0, 1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0x1 } },
+ },
+ {
+ "ALU64_ADD_K: 0 + 0x7fffffff = 0x7fffffff",
+ .u.insns_int = {
+ BPF_LD_IMM64(R2, 0x0),
+ BPF_LD_IMM64(R3, 0x7fffffff),
+ BPF_ALU64_IMM(BPF_ADD, R2, 0x7fffffff),
+ BPF_JMP_REG(BPF_JEQ, R2, R3, 2),
+ BPF_MOV32_IMM(R0, 2),
+ BPF_EXIT_INSN(),
+ BPF_MOV32_IMM(R0, 1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0x1 } },
+ },
+ {
+ "ALU64_ADD_K: 0 + 0x80000000 = 0xffffffff80000000",
+ .u.insns_int = {
+ BPF_LD_IMM64(R2, 0x0),
+ BPF_LD_IMM64(R3, 0xffffffff80000000LL),
+ BPF_ALU64_IMM(BPF_ADD, R2, 0x80000000),
+ BPF_JMP_REG(BPF_JEQ, R2, R3, 2),
+ BPF_MOV32_IMM(R0, 2),
+ BPF_EXIT_INSN(),
+ BPF_MOV32_IMM(R0, 1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0x1 } },
+ },
+ {
+ "ALU_ADD_K: 0 + 0x80008000 = 0xffffffff80008000",
+ .u.insns_int = {
+ BPF_LD_IMM64(R2, 0x0),
+ BPF_LD_IMM64(R3, 0xffffffff80008000LL),
+ BPF_ALU64_IMM(BPF_ADD, R2, 0x80008000),
+ BPF_JMP_REG(BPF_JEQ, R2, R3, 2),
+ BPF_MOV32_IMM(R0, 2),
+ BPF_EXIT_INSN(),
+ BPF_MOV32_IMM(R0, 1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0x1 } },
+ },
/* BPF_ALU | BPF_SUB | BPF_X */
{
"ALU_SUB_X: 3 - 1 = 2",
@@ -4222,6 +4414,20 @@ static struct bpf_test tests[] = {
{ },
{ { 0, 1 } },
},
+ {
+ "JMP_JGT_K: Unsigned jump: if (-1 > 1) return 1",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, 0),
+ BPF_LD_IMM64(R1, -1),
+ BPF_JMP_IMM(BPF_JGT, R1, 1, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU32_IMM(BPF_MOV, R0, 1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ },
/* BPF_JMP | BPF_JGE | BPF_K */
{
"JMP_JGE_K: if (3 >= 2) return 1",
@@ -4303,7 +4509,7 @@ static struct bpf_test tests[] = {
.u.insns_int = {
BPF_ALU32_IMM(BPF_MOV, R0, 0),
BPF_LD_IMM64(R1, 3),
- BPF_JMP_IMM(BPF_JNE, R1, 2, 1),
+ BPF_JMP_IMM(BPF_JSET, R1, 2, 1),
BPF_EXIT_INSN(),
BPF_ALU32_IMM(BPF_MOV, R0, 1),
BPF_EXIT_INSN(),
@@ -4317,7 +4523,7 @@ static struct bpf_test tests[] = {
.u.insns_int = {
BPF_ALU32_IMM(BPF_MOV, R0, 0),
BPF_LD_IMM64(R1, 3),
- BPF_JMP_IMM(BPF_JNE, R1, 0xffffffff, 1),
+ BPF_JMP_IMM(BPF_JSET, R1, 0xffffffff, 1),
BPF_EXIT_INSN(),
BPF_ALU32_IMM(BPF_MOV, R0, 1),
BPF_EXIT_INSN(),
@@ -4404,6 +4610,21 @@ static struct bpf_test tests[] = {
{ },
{ { 0, 1 } },
},
+ {
+ "JMP_JGT_X: Unsigned jump: if (-1 > 1) return 1",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, 0),
+ BPF_LD_IMM64(R1, -1),
+ BPF_LD_IMM64(R2, 1),
+ BPF_JMP_REG(BPF_JGT, R1, R2, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU32_IMM(BPF_MOV, R0, 1),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ },
/* BPF_JMP | BPF_JGE | BPF_X */
{
"JMP_JGE_X: if (3 >= 2) return 1",
@@ -4474,7 +4695,7 @@ static struct bpf_test tests[] = {
BPF_ALU32_IMM(BPF_MOV, R0, 0),
BPF_LD_IMM64(R1, 3),
BPF_LD_IMM64(R2, 2),
- BPF_JMP_REG(BPF_JNE, R1, R2, 1),
+ BPF_JMP_REG(BPF_JSET, R1, R2, 1),
BPF_EXIT_INSN(),
BPF_ALU32_IMM(BPF_MOV, R0, 1),
BPF_EXIT_INSN(),
@@ -4489,7 +4710,7 @@ static struct bpf_test tests[] = {
BPF_ALU32_IMM(BPF_MOV, R0, 0),
BPF_LD_IMM64(R1, 3),
BPF_LD_IMM64(R2, 0xffffffff),
- BPF_JMP_REG(BPF_JNE, R1, R2, 1),
+ BPF_JMP_REG(BPF_JSET, R1, R2, 1),
BPF_EXIT_INSN(),
BPF_ALU32_IMM(BPF_MOV, R0, 1),
BPF_EXIT_INSN(),
@@ -5400,7 +5621,10 @@ static struct bpf_prog *generate_filter(int which, int *err)
fp->type = BPF_PROG_TYPE_SOCKET_FILTER;
memcpy(fp->insnsi, fptr, fp->len * sizeof(struct bpf_insn));
- bpf_prog_select_runtime(fp);
+ /* We cannot error here as we don't need type compatibility
+ * checks.
+ */
+ fp = bpf_prog_select_runtime(fp, err);
break;
}
diff --git a/lib/test_rhashtable.c b/lib/test_rhashtable.c
index 270bf7289b1e..297fdb5e74bd 100644
--- a/lib/test_rhashtable.c
+++ b/lib/test_rhashtable.c
@@ -143,7 +143,7 @@ static void test_bucket_stats(struct rhashtable *ht)
struct rhashtable_iter hti;
struct rhash_head *pos;
- err = rhashtable_walk_init(ht, &hti);
+ err = rhashtable_walk_init(ht, &hti, GFP_KERNEL);
if (err) {
pr_warn("Test failed: allocation error");
return;