summaryrefslogtreecommitdiffstats
path: root/tools/testing
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2019-06-20 00:06:27 -0400
committerDavid S. Miller <davem@davemloft.net>2019-06-20 00:06:27 -0400
commitdca73a65a68329ee386d3ff473152bac66eaab39 (patch)
tree97c41afb932bdd6cbe67e7ffc38bfe5952c97798 /tools/testing
parent497ad9f5b2dc86b733761b9afa44ecfa2f17be65 (diff)
parent94079b64255fe40b9b53fd2e4081f68b9b14f54a (diff)
downloadlinux-dca73a65a68329ee386d3ff473152bac66eaab39.tar.bz2
Merge git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-next
Alexei Starovoitov says: ==================== pull-request: bpf-next 2019-06-19 The following pull-request contains BPF updates for your *net-next* tree. The main changes are: 1) new SO_REUSEPORT_DETACH_BPF setsocktopt, from Martin. 2) BTF based map definition, from Andrii. 3) support bpf_map_lookup_elem for xskmap, from Jonathan. 4) bounded loops and scalar precision logic in the verifier, from Alexei. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'tools/testing')
-rw-r--r--tools/testing/selftests/bpf/Makefile3
-rw-r--r--tools/testing/selftests/bpf/bpf_endian.h1
-rw-r--r--tools/testing/selftests/bpf/bpf_helpers.h4
-rw-r--r--tools/testing/selftests/bpf/bpf_util.h37
-rw-r--r--tools/testing/selftests/bpf/cgroup_helpers.c2
-rw-r--r--tools/testing/selftests/bpf/prog_tests/bpf_verif_scale.c67
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_flow.c18
-rw-r--r--tools/testing/selftests/bpf/progs/loop1.c28
-rw-r--r--tools/testing/selftests/bpf/progs/loop2.c28
-rw-r--r--tools/testing/selftests/bpf/progs/loop3.c22
-rw-r--r--tools/testing/selftests/bpf/progs/netcnt_prog.c22
-rw-r--r--tools/testing/selftests/bpf/progs/pyperf.h6
-rw-r--r--tools/testing/selftests/bpf/progs/pyperf600.c9
-rw-r--r--tools/testing/selftests/bpf/progs/pyperf600_nounroll.c8
-rw-r--r--tools/testing/selftests/bpf/progs/socket_cookie_prog.c49
-rw-r--r--tools/testing/selftests/bpf/progs/sockmap_parse_prog.c1
-rw-r--r--tools/testing/selftests/bpf/progs/sockmap_tcp_msg_prog.c2
-rw-r--r--tools/testing/selftests/bpf/progs/sockmap_verdict_prog.c1
-rw-r--r--tools/testing/selftests/bpf/progs/strobemeta.c10
-rw-r--r--tools/testing/selftests/bpf/progs/strobemeta.h528
-rw-r--r--tools/testing/selftests/bpf/progs/strobemeta_nounroll1.c9
-rw-r--r--tools/testing/selftests/bpf/progs/strobemeta_nounroll2.c9
-rw-r--r--tools/testing/selftests/bpf/progs/test_btf_newkv.c73
-rw-r--r--tools/testing/selftests/bpf/progs/test_get_stack_rawtp.c27
-rw-r--r--tools/testing/selftests/bpf/progs/test_global_data.c27
-rw-r--r--tools/testing/selftests/bpf/progs/test_l4lb.c45
-rw-r--r--tools/testing/selftests/bpf/progs/test_l4lb_noinline.c45
-rw-r--r--tools/testing/selftests/bpf/progs/test_map_lock.c22
-rw-r--r--tools/testing/selftests/bpf/progs/test_seg6_loop.c261
-rw-r--r--tools/testing/selftests/bpf/progs/test_select_reuseport_kern.c45
-rw-r--r--tools/testing/selftests/bpf/progs/test_send_signal_kern.c22
-rw-r--r--tools/testing/selftests/bpf/progs/test_sock_fields_kern.c60
-rw-r--r--tools/testing/selftests/bpf/progs/test_spin_lock.c33
-rw-r--r--tools/testing/selftests/bpf/progs/test_stacktrace_build_id.c44
-rw-r--r--tools/testing/selftests/bpf/progs/test_stacktrace_map.c40
-rw-r--r--tools/testing/selftests/bpf/progs/test_sysctl_loop1.c71
-rw-r--r--tools/testing/selftests/bpf/progs/test_sysctl_loop2.c72
-rw-r--r--tools/testing/selftests/bpf/progs/test_sysctl_prog.c5
-rw-r--r--tools/testing/selftests/bpf/progs/test_tcp_estats.c9
-rw-r--r--tools/testing/selftests/bpf/progs/test_tcpbpf_kern.c18
-rw-r--r--tools/testing/selftests/bpf/progs/test_tcpnotify_kern.c18
-rw-r--r--tools/testing/selftests/bpf/progs/test_xdp.c18
-rw-r--r--tools/testing/selftests/bpf/progs/test_xdp_loop.c231
-rw-r--r--tools/testing/selftests/bpf/progs/test_xdp_noinline.c60
-rw-r--r--tools/testing/selftests/bpf/test_btf.c10
-rw-r--r--tools/testing/selftests/bpf/test_select_reuseport.c54
-rw-r--r--tools/testing/selftests/bpf/test_socket_cookie.c24
-rw-r--r--tools/testing/selftests/bpf/test_verifier.c11
-rw-r--r--tools/testing/selftests/bpf/verifier/calls.c22
-rw-r--r--tools/testing/selftests/bpf/verifier/cfg.c11
-rw-r--r--tools/testing/selftests/bpf/verifier/direct_packet_access.c3
-rw-r--r--tools/testing/selftests/bpf/verifier/helper_access_var_len.c28
-rw-r--r--tools/testing/selftests/bpf/verifier/loops1.c161
-rw-r--r--tools/testing/selftests/bpf/verifier/prevent_map_lookup.c15
-rw-r--r--tools/testing/selftests/bpf/verifier/sock.c18
55 files changed, 2119 insertions, 348 deletions
diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile
index d8df5c9b5b2f..fb5ce43e28b3 100644
--- a/tools/testing/selftests/bpf/Makefile
+++ b/tools/testing/selftests/bpf/Makefile
@@ -280,4 +280,5 @@ $(OUTPUT)/verifier/tests.h: $(VERIFIER_TESTS_DIR) $(VERIFIER_TEST_FILES)
) > $(VERIFIER_TESTS_H))
EXTRA_CLEAN := $(TEST_CUSTOM_PROGS) $(ALU32_BUILD_DIR) \
- $(VERIFIER_TESTS_H) $(PROG_TESTS_H) $(MAP_TESTS_H)
+ $(VERIFIER_TESTS_H) $(PROG_TESTS_H) $(MAP_TESTS_H) \
+ feature
diff --git a/tools/testing/selftests/bpf/bpf_endian.h b/tools/testing/selftests/bpf/bpf_endian.h
index b25595ea4a78..05f036df8a4c 100644
--- a/tools/testing/selftests/bpf/bpf_endian.h
+++ b/tools/testing/selftests/bpf/bpf_endian.h
@@ -2,6 +2,7 @@
#ifndef __BPF_ENDIAN__
#define __BPF_ENDIAN__
+#include <linux/stddef.h>
#include <linux/swab.h>
/* LLVM's BPF target selects the endianness of the CPU
diff --git a/tools/testing/selftests/bpf/bpf_helpers.h b/tools/testing/selftests/bpf/bpf_helpers.h
index e6d243b7cd74..1a5b1accf091 100644
--- a/tools/testing/selftests/bpf/bpf_helpers.h
+++ b/tools/testing/selftests/bpf/bpf_helpers.h
@@ -31,7 +31,7 @@ static int (*bpf_map_pop_elem)(void *map, void *value) =
(void *) BPF_FUNC_map_pop_elem;
static int (*bpf_map_peek_elem)(void *map, void *value) =
(void *) BPF_FUNC_map_peek_elem;
-static int (*bpf_probe_read)(void *dst, int size, void *unsafe_ptr) =
+static int (*bpf_probe_read)(void *dst, int size, const void *unsafe_ptr) =
(void *) BPF_FUNC_probe_read;
static unsigned long long (*bpf_ktime_get_ns)(void) =
(void *) BPF_FUNC_ktime_get_ns;
@@ -62,7 +62,7 @@ static int (*bpf_perf_event_output)(void *ctx, void *map,
(void *) BPF_FUNC_perf_event_output;
static int (*bpf_get_stackid)(void *ctx, void *map, int flags) =
(void *) BPF_FUNC_get_stackid;
-static int (*bpf_probe_write_user)(void *dst, void *src, int size) =
+static int (*bpf_probe_write_user)(void *dst, const void *src, int size) =
(void *) BPF_FUNC_probe_write_user;
static int (*bpf_current_task_under_cgroup)(void *map, int index) =
(void *) BPF_FUNC_current_task_under_cgroup;
diff --git a/tools/testing/selftests/bpf/bpf_util.h b/tools/testing/selftests/bpf/bpf_util.h
index a29206ebbd13..ec219f84e041 100644
--- a/tools/testing/selftests/bpf/bpf_util.h
+++ b/tools/testing/selftests/bpf/bpf_util.h
@@ -6,44 +6,17 @@
#include <stdlib.h>
#include <string.h>
#include <errno.h>
+#include <libbpf.h> /* libbpf_num_possible_cpus */
static inline unsigned int bpf_num_possible_cpus(void)
{
- static const char *fcpu = "/sys/devices/system/cpu/possible";
- unsigned int start, end, possible_cpus = 0;
- char buff[128];
- FILE *fp;
- int len, n, i, j = 0;
+ int possible_cpus = libbpf_num_possible_cpus();
- fp = fopen(fcpu, "r");
- if (!fp) {
- printf("Failed to open %s: '%s'!\n", fcpu, strerror(errno));
+ if (possible_cpus < 0) {
+ printf("Failed to get # of possible cpus: '%s'!\n",
+ strerror(-possible_cpus));
exit(1);
}
-
- if (!fgets(buff, sizeof(buff), fp)) {
- printf("Failed to read %s!\n", fcpu);
- exit(1);
- }
-
- len = strlen(buff);
- for (i = 0; i <= len; i++) {
- if (buff[i] == ',' || buff[i] == '\0') {
- buff[i] = '\0';
- n = sscanf(&buff[j], "%u-%u", &start, &end);
- if (n <= 0) {
- printf("Failed to retrieve # possible CPUs!\n");
- exit(1);
- } else if (n == 1) {
- end = start;
- }
- possible_cpus += end - start + 1;
- j = i + 1;
- }
- }
-
- fclose(fp);
-
return possible_cpus;
}
diff --git a/tools/testing/selftests/bpf/cgroup_helpers.c b/tools/testing/selftests/bpf/cgroup_helpers.c
index 0d89f0396be4..e95c33e333a4 100644
--- a/tools/testing/selftests/bpf/cgroup_helpers.c
+++ b/tools/testing/selftests/bpf/cgroup_helpers.c
@@ -47,7 +47,7 @@ int enable_all_controllers(char *cgroup_path)
char buf[PATH_MAX];
char *c, *c2;
int fd, cfd;
- size_t len;
+ ssize_t len;
snprintf(path, sizeof(path), "%s/cgroup.controllers", cgroup_path);
fd = open(path, O_RDONLY);
diff --git a/tools/testing/selftests/bpf/prog_tests/bpf_verif_scale.c b/tools/testing/selftests/bpf/prog_tests/bpf_verif_scale.c
index c0091137074b..e1b55261526f 100644
--- a/tools/testing/selftests/bpf/prog_tests/bpf_verif_scale.c
+++ b/tools/testing/selftests/bpf/prog_tests/bpf_verif_scale.c
@@ -5,7 +5,7 @@ static int libbpf_debug_print(enum libbpf_print_level level,
const char *format, va_list args)
{
if (level != LIBBPF_DEBUG)
- return 0;
+ return vfprintf(stderr, format, args);
if (!strstr(format, "verifier log"))
return 0;
@@ -32,24 +32,69 @@ static int check_load(const char *file, enum bpf_prog_type type)
void test_bpf_verif_scale(void)
{
- const char *scale[] = {
- "./test_verif_scale1.o", "./test_verif_scale2.o", "./test_verif_scale3.o"
+ const char *sched_cls[] = {
+ "./test_verif_scale1.o", "./test_verif_scale2.o", "./test_verif_scale3.o",
};
- const char *pyperf[] = {
- "./pyperf50.o", "./pyperf100.o", "./pyperf180.o"
+ const char *raw_tp[] = {
+ /* full unroll by llvm */
+ "./pyperf50.o", "./pyperf100.o", "./pyperf180.o",
+
+ /* partial unroll. llvm will unroll loop ~150 times.
+ * C loop count -> 600.
+ * Asm loop count -> 4.
+ * 16k insns in loop body.
+ * Total of 5 such loops. Total program size ~82k insns.
+ */
+ "./pyperf600.o",
+
+ /* no unroll at all.
+ * C loop count -> 600.
+ * ASM loop count -> 600.
+ * ~110 insns in loop body.
+ * Total of 5 such loops. Total program size ~1500 insns.
+ */
+ "./pyperf600_nounroll.o",
+
+ "./loop1.o", "./loop2.o",
+
+ /* partial unroll. 19k insn in a loop.
+ * Total program size 20.8k insn.
+ * ~350k processed_insns
+ */
+ "./strobemeta.o",
+
+ /* no unroll, tiny loops */
+ "./strobemeta_nounroll1.o",
+ "./strobemeta_nounroll2.o",
+ };
+ const char *cg_sysctl[] = {
+ "./test_sysctl_loop1.o", "./test_sysctl_loop2.o",
};
int err, i;
if (verifier_stats)
libbpf_set_print(libbpf_debug_print);
- for (i = 0; i < ARRAY_SIZE(scale); i++) {
- err = check_load(scale[i], BPF_PROG_TYPE_SCHED_CLS);
- printf("test_scale:%s:%s\n", scale[i], err ? "FAIL" : "OK");
+ err = check_load("./loop3.o", BPF_PROG_TYPE_RAW_TRACEPOINT);
+ printf("test_scale:loop3:%s\n", err ? (error_cnt--, "OK") : "FAIL");
+
+ for (i = 0; i < ARRAY_SIZE(sched_cls); i++) {
+ err = check_load(sched_cls[i], BPF_PROG_TYPE_SCHED_CLS);
+ printf("test_scale:%s:%s\n", sched_cls[i], err ? "FAIL" : "OK");
}
- for (i = 0; i < ARRAY_SIZE(pyperf); i++) {
- err = check_load(pyperf[i], BPF_PROG_TYPE_RAW_TRACEPOINT);
- printf("test_scale:%s:%s\n", pyperf[i], err ? "FAIL" : "OK");
+ for (i = 0; i < ARRAY_SIZE(raw_tp); i++) {
+ err = check_load(raw_tp[i], BPF_PROG_TYPE_RAW_TRACEPOINT);
+ printf("test_scale:%s:%s\n", raw_tp[i], err ? "FAIL" : "OK");
}
+
+ for (i = 0; i < ARRAY_SIZE(cg_sysctl); i++) {
+ err = check_load(cg_sysctl[i], BPF_PROG_TYPE_CGROUP_SYSCTL);
+ printf("test_scale:%s:%s\n", cg_sysctl[i], err ? "FAIL" : "OK");
+ }
+ err = check_load("./test_xdp_loop.o", BPF_PROG_TYPE_XDP);
+ printf("test_scale:test_xdp_loop:%s\n", err ? "FAIL" : "OK");
+
+ err = check_load("./test_seg6_loop.o", BPF_PROG_TYPE_LWT_SEG6LOCAL);
+ printf("test_scale:test_seg6_loop:%s\n", err ? "FAIL" : "OK");
}
diff --git a/tools/testing/selftests/bpf/progs/bpf_flow.c b/tools/testing/selftests/bpf/progs/bpf_flow.c
index 81ad9a0b29d0..849f42e548b5 100644
--- a/tools/testing/selftests/bpf/progs/bpf_flow.c
+++ b/tools/testing/selftests/bpf/progs/bpf_flow.c
@@ -57,17 +57,25 @@ struct frag_hdr {
__be32 identification;
};
-struct bpf_map_def SEC("maps") jmp_table = {
+struct {
+ __u32 type;
+ __u32 max_entries;
+ __u32 key_size;
+ __u32 value_size;
+} jmp_table SEC(".maps") = {
.type = BPF_MAP_TYPE_PROG_ARRAY,
+ .max_entries = 8,
.key_size = sizeof(__u32),
.value_size = sizeof(__u32),
- .max_entries = 8
};
-struct bpf_map_def SEC("maps") last_dissection = {
+struct {
+ __u32 type;
+ __u32 max_entries;
+ __u32 *key;
+ struct bpf_flow_keys *value;
+} last_dissection SEC(".maps") = {
.type = BPF_MAP_TYPE_ARRAY,
- .key_size = sizeof(__u32),
- .value_size = sizeof(struct bpf_flow_keys),
.max_entries = 1,
};
diff --git a/tools/testing/selftests/bpf/progs/loop1.c b/tools/testing/selftests/bpf/progs/loop1.c
new file mode 100644
index 000000000000..dea395af9ea9
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/loop1.c
@@ -0,0 +1,28 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2019 Facebook
+#include <linux/sched.h>
+#include <linux/ptrace.h>
+#include <stdint.h>
+#include <stddef.h>
+#include <stdbool.h>
+#include <linux/bpf.h>
+#include "bpf_helpers.h"
+
+char _license[] SEC("license") = "GPL";
+
+SEC("raw_tracepoint/kfree_skb")
+int nested_loops(volatile struct pt_regs* ctx)
+{
+ int i, j, sum = 0, m;
+
+ for (j = 0; j < 300; j++)
+ for (i = 0; i < j; i++) {
+ if (j & 1)
+ m = ctx->rax;
+ else
+ m = j;
+ sum += i * m;
+ }
+
+ return sum;
+}
diff --git a/tools/testing/selftests/bpf/progs/loop2.c b/tools/testing/selftests/bpf/progs/loop2.c
new file mode 100644
index 000000000000..0637bd8e8bcf
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/loop2.c
@@ -0,0 +1,28 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2019 Facebook
+#include <linux/sched.h>
+#include <linux/ptrace.h>
+#include <stdint.h>
+#include <stddef.h>
+#include <stdbool.h>
+#include <linux/bpf.h>
+#include "bpf_helpers.h"
+
+char _license[] SEC("license") = "GPL";
+
+SEC("raw_tracepoint/consume_skb")
+int while_true(volatile struct pt_regs* ctx)
+{
+ int i = 0;
+
+ while (true) {
+ if (ctx->rax & 1)
+ i += 3;
+ else
+ i += 7;
+ if (i > 40)
+ break;
+ }
+
+ return i;
+}
diff --git a/tools/testing/selftests/bpf/progs/loop3.c b/tools/testing/selftests/bpf/progs/loop3.c
new file mode 100644
index 000000000000..30a0f6cba080
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/loop3.c
@@ -0,0 +1,22 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2019 Facebook
+#include <linux/sched.h>
+#include <linux/ptrace.h>
+#include <stdint.h>
+#include <stddef.h>
+#include <stdbool.h>
+#include <linux/bpf.h>
+#include "bpf_helpers.h"
+
+char _license[] SEC("license") = "GPL";
+
+SEC("raw_tracepoint/consume_skb")
+int while_true(volatile struct pt_regs* ctx)
+{
+ __u64 i = 0, sum = 0;
+ do {
+ i++;
+ sum += ctx->rax;
+ } while (i < 0x100000000ULL);
+ return sum;
+}
diff --git a/tools/testing/selftests/bpf/progs/netcnt_prog.c b/tools/testing/selftests/bpf/progs/netcnt_prog.c
index 9f741e69cebe..a25c82a5b7c8 100644
--- a/tools/testing/selftests/bpf/progs/netcnt_prog.c
+++ b/tools/testing/selftests/bpf/progs/netcnt_prog.c
@@ -10,24 +10,22 @@
#define REFRESH_TIME_NS 100000000
#define NS_PER_SEC 1000000000
-struct bpf_map_def SEC("maps") percpu_netcnt = {
+struct {
+ __u32 type;
+ struct bpf_cgroup_storage_key *key;
+ struct percpu_net_cnt *value;
+} percpu_netcnt SEC(".maps") = {
.type = BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE,
- .key_size = sizeof(struct bpf_cgroup_storage_key),
- .value_size = sizeof(struct percpu_net_cnt),
};
-BPF_ANNOTATE_KV_PAIR(percpu_netcnt, struct bpf_cgroup_storage_key,
- struct percpu_net_cnt);
-
-struct bpf_map_def SEC("maps") netcnt = {
+struct {
+ __u32 type;
+ struct bpf_cgroup_storage_key *key;
+ struct net_cnt *value;
+} netcnt SEC(".maps") = {
.type = BPF_MAP_TYPE_CGROUP_STORAGE,
- .key_size = sizeof(struct bpf_cgroup_storage_key),
- .value_size = sizeof(struct net_cnt),
};
-BPF_ANNOTATE_KV_PAIR(netcnt, struct bpf_cgroup_storage_key,
- struct net_cnt);
-
SEC("cgroup/skb")
int bpf_nextcnt(struct __sk_buff *skb)
{
diff --git a/tools/testing/selftests/bpf/progs/pyperf.h b/tools/testing/selftests/bpf/progs/pyperf.h
index 0cc5e4ee90bd..6b0781391be5 100644
--- a/tools/testing/selftests/bpf/progs/pyperf.h
+++ b/tools/testing/selftests/bpf/progs/pyperf.h
@@ -220,7 +220,11 @@ static inline __attribute__((__always_inline__)) int __on_event(struct pt_regs *
int32_t* symbol_counter = bpf_map_lookup_elem(&symbolmap, &sym);
if (symbol_counter == NULL)
return 0;
-#pragma unroll
+#ifdef NO_UNROLL
+#pragma clang loop unroll(disable)
+#else
+#pragma clang loop unroll(full)
+#endif
/* Unwind python stack */
for (int i = 0; i < STACK_MAX_LEN; ++i) {
if (frame_ptr && get_frame_data(frame_ptr, pidData, &frame, &sym)) {
diff --git a/tools/testing/selftests/bpf/progs/pyperf600.c b/tools/testing/selftests/bpf/progs/pyperf600.c
new file mode 100644
index 000000000000..cb49b89e37cd
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/pyperf600.c
@@ -0,0 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2019 Facebook
+#define STACK_MAX_LEN 600
+/* clang will not unroll the loop 600 times.
+ * Instead it will unroll it to the amount it deemed
+ * appropriate, but the loop will still execute 600 times.
+ * Total program size is around 90k insns
+ */
+#include "pyperf.h"
diff --git a/tools/testing/selftests/bpf/progs/pyperf600_nounroll.c b/tools/testing/selftests/bpf/progs/pyperf600_nounroll.c
new file mode 100644
index 000000000000..6beff7502f4d
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/pyperf600_nounroll.c
@@ -0,0 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2019 Facebook
+#define STACK_MAX_LEN 600
+#define NO_UNROLL
+/* clang will not unroll at all.
+ * Total program size is around 2k insns
+ */
+#include "pyperf.h"
diff --git a/tools/testing/selftests/bpf/progs/socket_cookie_prog.c b/tools/testing/selftests/bpf/progs/socket_cookie_prog.c
index 9ff8ac4b0bf6..6aabb681fb9a 100644
--- a/tools/testing/selftests/bpf/progs/socket_cookie_prog.c
+++ b/tools/testing/selftests/bpf/progs/socket_cookie_prog.c
@@ -7,25 +7,36 @@
#include "bpf_helpers.h"
#include "bpf_endian.h"
-struct bpf_map_def SEC("maps") socket_cookies = {
- .type = BPF_MAP_TYPE_HASH,
- .key_size = sizeof(__u64),
- .value_size = sizeof(__u32),
- .max_entries = 1 << 8,
+struct socket_cookie {
+ __u64 cookie_key;
+ __u32 cookie_value;
+};
+
+struct {
+ __u32 type;
+ __u32 map_flags;
+ int *key;
+ struct socket_cookie *value;
+} socket_cookies SEC(".maps") = {
+ .type = BPF_MAP_TYPE_SK_STORAGE,
+ .map_flags = BPF_F_NO_PREALLOC,
};
SEC("cgroup/connect6")
int set_cookie(struct bpf_sock_addr *ctx)
{
- __u32 cookie_value = 0xFF;
- __u64 cookie_key;
+ struct socket_cookie *p;
if (ctx->family != AF_INET6 || ctx->user_family != AF_INET6)
return 1;
- cookie_key = bpf_get_socket_cookie(ctx);
- if (bpf_map_update_elem(&socket_cookies, &cookie_key, &cookie_value, 0))
- return 0;
+ p = bpf_sk_storage_get(&socket_cookies, ctx->sk, 0,
+ BPF_SK_STORAGE_GET_F_CREATE);
+ if (!p)
+ return 1;
+
+ p->cookie_value = 0xFF;
+ p->cookie_key = bpf_get_socket_cookie(ctx);
return 1;
}
@@ -33,9 +44,8 @@ int set_cookie(struct bpf_sock_addr *ctx)
SEC("sockops")
int update_cookie(struct bpf_sock_ops *ctx)
{
- __u32 new_cookie_value;
- __u32 *cookie_value;
- __u64 cookie_key;
+ struct bpf_sock *sk;
+ struct socket_cookie *p;
if (ctx->family != AF_INET6)
return 1;
@@ -43,14 +53,17 @@ int update_cookie(struct bpf_sock_ops *ctx)
if (ctx->op != BPF_SOCK_OPS_TCP_CONNECT_CB)
return 1;
- cookie_key = bpf_get_socket_cookie(ctx);
+ if (!ctx->sk)
+ return 1;
+
+ p = bpf_sk_storage_get(&socket_cookies, ctx->sk, 0, 0);
+ if (!p)
+ return 1;
- cookie_value = bpf_map_lookup_elem(&socket_cookies, &cookie_key);
- if (!cookie_value)
+ if (p->cookie_key != bpf_get_socket_cookie(ctx))
return 1;
- new_cookie_value = (ctx->local_port << 8) | *cookie_value;
- bpf_map_update_elem(&socket_cookies, &cookie_key, &new_cookie_value, 0);
+ p->cookie_value = (ctx->local_port << 8) | p->cookie_value;
return 1;
}
diff --git a/tools/testing/selftests/bpf/progs/sockmap_parse_prog.c b/tools/testing/selftests/bpf/progs/sockmap_parse_prog.c
index ed3e4a551c57..9390e0244259 100644
--- a/tools/testing/selftests/bpf/progs/sockmap_parse_prog.c
+++ b/tools/testing/selftests/bpf/progs/sockmap_parse_prog.c
@@ -1,6 +1,5 @@
#include <linux/bpf.h>
#include "bpf_helpers.h"
-#include "bpf_util.h"
#include "bpf_endian.h"
int _version SEC("version") = 1;
diff --git a/tools/testing/selftests/bpf/progs/sockmap_tcp_msg_prog.c b/tools/testing/selftests/bpf/progs/sockmap_tcp_msg_prog.c
index 65fbfdb6cd3a..e80484d98a1a 100644
--- a/tools/testing/selftests/bpf/progs/sockmap_tcp_msg_prog.c
+++ b/tools/testing/selftests/bpf/progs/sockmap_tcp_msg_prog.c
@@ -1,6 +1,6 @@
#include <linux/bpf.h>
+
#include "bpf_helpers.h"
-#include "bpf_util.h"
#include "bpf_endian.h"
int _version SEC("version") = 1;
diff --git a/tools/testing/selftests/bpf/progs/sockmap_verdict_prog.c b/tools/testing/selftests/bpf/progs/sockmap_verdict_prog.c
index bdc22be46f2e..d85c874ef25e 100644
--- a/tools/testing/selftests/bpf/progs/sockmap_verdict_prog.c
+++ b/tools/testing/selftests/bpf/progs/sockmap_verdict_prog.c
@@ -1,6 +1,5 @@
#include <linux/bpf.h>
#include "bpf_helpers.h"
-#include "bpf_util.h"
#include "bpf_endian.h"
int _version SEC("version") = 1;
diff --git a/tools/testing/selftests/bpf/progs/strobemeta.c b/tools/testing/selftests/bpf/progs/strobemeta.c
new file mode 100644
index 000000000000..d3df3d86f092
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/strobemeta.c
@@ -0,0 +1,10 @@
+// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
+// Copyright (c) 2019 Facebook
+
+#define STROBE_MAX_INTS 2
+#define STROBE_MAX_STRS 25
+#define STROBE_MAX_MAPS 100
+#define STROBE_MAX_MAP_ENTRIES 20
+/* full unroll by llvm #undef NO_UNROLL */
+#include "strobemeta.h"
+
diff --git a/tools/testing/selftests/bpf/progs/strobemeta.h b/tools/testing/selftests/bpf/progs/strobemeta.h
new file mode 100644
index 000000000000..1ff73f60a3e4
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/strobemeta.h
@@ -0,0 +1,528 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2019 Facebook
+
+#include <stdint.h>
+#include <stddef.h>
+#include <stdbool.h>
+#include <linux/bpf.h>
+#include <linux/ptrace.h>
+#include <linux/sched.h>
+#include <linux/types.h>
+#include "bpf_helpers.h"
+
+typedef uint32_t pid_t;
+struct task_struct {};
+
+#define TASK_COMM_LEN 16
+#define PERF_MAX_STACK_DEPTH 127
+
+#define STROBE_TYPE_INVALID 0
+#define STROBE_TYPE_INT 1
+#define STROBE_TYPE_STR 2
+#define STROBE_TYPE_MAP 3
+
+#define STACK_TABLE_EPOCH_SHIFT 20
+#define STROBE_MAX_STR_LEN 1
+#define STROBE_MAX_CFGS 32
+#define STROBE_MAX_PAYLOAD \
+ (STROBE_MAX_STRS * STROBE_MAX_STR_LEN + \
+ STROBE_MAX_MAPS * (1 + STROBE_MAX_MAP_ENTRIES * 2) * STROBE_MAX_STR_LEN)
+
+struct strobe_value_header {
+ /*
+ * meaning depends on type:
+ * 1. int: 0, if value not set, 1 otherwise
+ * 2. str: 1 always, whether value is set or not is determined by ptr
+ * 3. map: 1 always, pointer points to additional struct with number
+ * of entries (up to STROBE_MAX_MAP_ENTRIES)
+ */
+ uint16_t len;
+ /*
+ * _reserved might be used for some future fields/flags, but we always
+ * want to keep strobe_value_header to be 8 bytes, so BPF can read 16
+ * bytes in one go and get both header and value
+ */
+ uint8_t _reserved[6];
+};
+
+/*
+ * strobe_value_generic is used from BPF probe only, but needs to be a union
+ * of strobe_value_int/strobe_value_str/strobe_value_map
+ */
+struct strobe_value_generic {
+ struct strobe_value_header header;
+ union {
+ int64_t val;
+ void *ptr;
+ };
+};
+
+struct strobe_value_int {
+ struct strobe_value_header header;
+ int64_t value;
+};
+
+struct strobe_value_str {
+ struct strobe_value_header header;
+ const char* value;
+};
+
+struct strobe_value_map {
+ struct strobe_value_header header;
+ const struct strobe_map_raw* value;
+};
+
+struct strobe_map_entry {
+ const char* key;
+ const char* val;
+};
+
+/*
+ * Map of C-string key/value pairs with fixed maximum capacity. Each map has
+ * corresponding int64 ID, which application can use (or ignore) in whatever
+ * way appropriate. Map is "write-only", there is no way to get data out of
+ * map. Map is intended to be used to provide metadata for profilers and is
+ * not to be used for internal in-app communication. All methods are
+ * thread-safe.
+ */
+struct strobe_map_raw {
+ /*
+ * general purpose unique ID that's up to application to decide
+ * whether and how to use; for request metadata use case id is unique
+ * request ID that's used to match metadata with stack traces on
+ * Strobelight backend side
+ */
+ int64_t id;
+ /* number of used entries in map */
+ int64_t cnt;
+ /*
+ * having volatile doesn't change anything on BPF side, but clang
+ * emits warnings for passing `volatile const char *` into
+ * bpf_probe_read_str that expects just `const char *`
+ */
+ const char* tag;
+ /*
+ * key/value entries, each consisting of 2 pointers to key and value
+ * C strings
+ */
+ struct strobe_map_entry entries[STROBE_MAX_MAP_ENTRIES];
+};
+
+/* Following values define supported values of TLS mode */
+#define TLS_NOT_SET -1
+#define TLS_LOCAL_EXEC 0
+#define TLS_IMM_EXEC 1
+#define TLS_GENERAL_DYN 2
+
+/*
+ * structure that universally represents TLS location (both for static
+ * executables and shared libraries)
+ */
+struct strobe_value_loc {
+ /*
+ * tls_mode defines what TLS mode was used for particular metavariable:
+ * - -1 (TLS_NOT_SET) - no metavariable;
+ * - 0 (TLS_LOCAL_EXEC) - Local Executable mode;
+ * - 1 (TLS_IMM_EXEC) - Immediate Executable mode;
+ * - 2 (TLS_GENERAL_DYN) - General Dynamic mode;
+ * Local Dynamic mode is not yet supported, because never seen in
+ * practice. Mode defines how offset field is interpreted. See
+ * calc_location() in below for details.
+ */
+ int64_t tls_mode;
+ /*
+ * TLS_LOCAL_EXEC: offset from thread pointer (fs:0 for x86-64,
+ * tpidr_el0 for aarch64).
+ * TLS_IMM_EXEC: absolute address of GOT entry containing offset
+ * from thread pointer;
+ * TLS_GENERAL_DYN: absolute addres of double GOT entry
+ * containing tls_index_t struct;
+ */
+ int64_t offset;
+};
+
+struct strobemeta_cfg {
+ int64_t req_meta_idx;
+ struct strobe_value_loc int_locs[STROBE_MAX_INTS];
+ struct strobe_value_loc str_locs[STROBE_MAX_STRS];
+ struct strobe_value_loc map_locs[STROBE_MAX_MAPS];
+};
+
+struct strobe_map_descr {
+ uint64_t id;
+ int16_t tag_len;
+ /*
+ * cnt <0 - map value isn't set;
+ * 0 - map has id set, but no key/value entries
+ */
+ int16_t cnt;
+ /*
+ * both key_lens[i] and val_lens[i] should be >0 for present key/value
+ * entry
+ */
+ uint16_t key_lens[STROBE_MAX_MAP_ENTRIES];
+ uint16_t val_lens[STROBE_MAX_MAP_ENTRIES];
+};
+
+struct strobemeta_payload {
+ /* req_id has valid request ID, if req_meta_valid == 1 */
+ int64_t req_id;
+ uint8_t req_meta_valid;
+ /*
+ * mask has Nth bit set to 1, if Nth metavar was present and
+ * successfully read
+ */
+ uint64_t int_vals_set_mask;
+ int64_t int_vals[STROBE_MAX_INTS];
+ /* len is >0 for present values */
+ uint16_t str_lens[STROBE_MAX_STRS];
+ /* if map_descrs[i].cnt == -1, metavar is not present/set */
+ struct strobe_map_descr map_descrs[STROBE_MAX_MAPS];
+ /*
+ * payload has compactly packed values of str and map variables in the
+ * form: strval1\0strval2\0map1key1\0map1val1\0map2key1\0map2val1\0
+ * (and so on); str_lens[i], key_lens[i] and val_lens[i] determines
+ * value length
+ */
+ char payload[STROBE_MAX_PAYLOAD];
+};
+
+struct strobelight_bpf_sample {
+ uint64_t ktime;
+ char comm[TASK_COMM_LEN];
+ pid_t pid;
+ int user_stack_id;
+ int kernel_stack_id;
+ int has_meta;
+ struct strobemeta_payload metadata;
+ /*
+ * makes it possible to pass (<real payload size> + 1) as data size to
+ * perf_submit() to avoid perf_submit's paranoia about passing zero as
+ * size, as it deduces that <real payload size> might be
+ * **theoretically** zero
+ */
+ char dummy_safeguard;
+};
+
+struct bpf_map_def SEC("maps") samples = {
+ .type = BPF_MAP_TYPE_PERF_EVENT_ARRAY,
+ .key_size = sizeof(int),
+ .value_size = sizeof(int),
+ .max_entries = 32,
+};
+
+struct bpf_map_def SEC("maps") stacks_0 = {
+ .type = BPF_MAP_TYPE_STACK_TRACE,
+ .key_size = sizeof(uint32_t),
+ .value_size = sizeof(uint64_t) * PERF_MAX_STACK_DEPTH,
+ .max_entries = 16,
+};
+
+struct bpf_map_def SEC("maps") stacks_1 = {
+ .type = BPF_MAP_TYPE_STACK_TRACE,
+ .key_size = sizeof(uint32_t),
+ .value_size = sizeof(uint64_t) * PERF_MAX_STACK_DEPTH,
+ .max_entries = 16,
+};
+
+struct bpf_map_def SEC("maps") sample_heap = {
+ .type = BPF_MAP_TYPE_PERCPU_ARRAY,
+ .key_size = sizeof(uint32_t),
+ .value_size = sizeof(struct strobelight_bpf_sample),
+ .max_entries = 1,
+};
+
+struct bpf_map_def SEC("maps") strobemeta_cfgs = {
+ .type = BPF_MAP_TYPE_PERCPU_ARRAY,
+ .key_size = sizeof(pid_t),
+ .value_size = sizeof(struct strobemeta_cfg),
+ .max_entries = STROBE_MAX_CFGS,
+};
+
+/* Type for the dtv. */
+/* https://github.com/lattera/glibc/blob/master/nptl/sysdeps/x86_64/tls.h#L34 */
+typedef union dtv {
+ size_t counter;
+ struct {
+ void* val;
+ bool is_static;
+ } pointer;
+} dtv_t;
+
+/* Partial definition for tcbhead_t */
+/* https://github.com/bminor/glibc/blob/master/sysdeps/x86_64/nptl/tls.h#L42 */
+struct tcbhead {
+ void* tcb;
+ dtv_t* dtv;
+};
+
+/*
+ * TLS module/offset information for shared library case.
+ * For x86-64, this is mapped onto two entries in GOT.
+ * For aarch64, this is pointed to by second GOT entry.
+ */
+struct tls_index {
+ uint64_t module;
+ uint64_t offset;
+};
+
+static inline __attribute__((always_inline))
+void *calc_location(struct strobe_value_loc *loc, void *tls_base)
+{
+ /*
+ * tls_mode value is:
+ * - -1 (TLS_NOT_SET), if no metavar is present;
+ * - 0 (TLS_LOCAL_EXEC), if metavar uses Local Executable mode of TLS
+ * (offset from fs:0 for x86-64 or tpidr_el0 for aarch64);
+ * - 1 (TLS_IMM_EXEC), if metavar uses Immediate Executable mode of TLS;
+ * - 2 (TLS_GENERAL_DYN), if metavar uses General Dynamic mode of TLS;
+ * This schema allows to use something like:
+ * (tls_mode + 1) * (tls_base + offset)
+ * to get NULL for "no metavar" location, or correct pointer for local
+ * executable mode without doing extra ifs.
+ */
+ if (loc->tls_mode <= TLS_LOCAL_EXEC) {
+ /* static executable is simple, we just have offset from
+ * tls_base */
+ void *addr = tls_base + loc->offset;
+ /* multiply by (tls_mode + 1) to get NULL, if we have no
+ * metavar in this slot */
+ return (void *)((loc->tls_mode + 1) * (int64_t)addr);
+ }
+ /*
+ * Other modes are more complicated, we need to jump through few hoops.
+ *
+ * For immediate executable mode (currently supported only for aarch64):
+ * - loc->offset is pointing to a GOT entry containing fixed offset
+ * relative to tls_base;
+ *
+ * For general dynamic mode:
+ * - loc->offset is pointing to a beginning of double GOT entries;
+ * - (for aarch64 only) second entry points to tls_index_t struct;
+ * - (for x86-64 only) two GOT entries are already tls_index_t;
+ * - tls_index_t->module is used to find start of TLS section in
+ * which variable resides;
+ * - tls_index_t->offset provides offset within that TLS section,
+ * pointing to value of variable.
+ */
+ struct tls_index tls_index;
+ dtv_t *dtv;
+ void *tls_ptr;
+
+ bpf_probe_read(&tls_index, sizeof(struct tls_index),
+ (void *)loc->offset);
+ /* valid module index is always positive */
+ if (tls_index.module > 0) {
+ /* dtv = ((struct tcbhead *)tls_base)->dtv[tls_index.module] */
+ bpf_probe_read(&dtv, sizeof(dtv),
+ &((struct tcbhead *)tls_base)->dtv);
+ dtv += tls_index.module;
+ } else {
+ dtv = NULL;
+ }
+ bpf_probe_read(&tls_ptr, sizeof(void *), dtv);
+ /* if pointer has (void *)-1 value, then TLS wasn't initialized yet */
+ return tls_ptr && tls_ptr != (void *)-1
+ ? tls_ptr + tls_index.offset
+ : NULL;
+}
+
+static inline __attribute__((always_inline))
+void read_int_var(struct strobemeta_cfg *cfg, size_t idx, void *tls_base,
+ struct strobe_value_generic *value,
+ struct strobemeta_payload *data)
+{
+ void *location = calc_location(&cfg->int_locs[idx], tls_base);
+ if (!location)
+ return;
+
+ bpf_probe_read(value, sizeof(struct strobe_value_generic), location);
+ data->int_vals[idx] = value->val;
+ if (value->header.len)
+ data->int_vals_set_mask |= (1 << idx);
+}
+
+static inline __attribute__((always_inline))
+uint64_t read_str_var(struct strobemeta_cfg* cfg, size_t idx, void *tls_base,
+ struct strobe_value_generic *value,
+ struct strobemeta_payload *data, void *payload)
+{
+ void *location;
+ uint32_t len;
+
+ data->str_lens[idx] = 0;
+ location = calc_location(&cfg->str_locs[idx], tls_base);
+ if (!location)
+ return 0;
+
+ bpf_probe_read(value, sizeof(struct strobe_value_generic), location);
+ len = bpf_probe_read_str(payload, STROBE_MAX_STR_LEN, value->ptr);
+ /*
+ * if bpf_probe_read_str returns error (<0), due to casting to
+ * unsinged int, it will become big number, so next check is
+ * sufficient to check for errors AND prove to BPF verifier, that
+ * bpf_probe_read_str won't return anything bigger than
+ * STROBE_MAX_STR_LEN
+ */
+ if (len > STROBE_MAX_STR_LEN)
+ return 0;
+
+ data->str_lens[idx] = len;
+ return len;
+}
+
+static inline __attribute__((always_inline))
+void *read_map_var(struct strobemeta_cfg *cfg, size_t idx, void *tls_base,
+ struct strobe_value_generic *value,
+ struct strobemeta_payload* data, void *payload)
+{
+ struct strobe_map_descr* descr = &data->map_descrs[idx];
+ struct strobe_map_raw map;
+ void *location;
+ uint32_t len;
+ int i;
+
+ descr->tag_len = 0; /* presume no tag is set */
+ descr->cnt = -1; /* presume no value is set */
+
+ location = calc_location(&cfg->map_locs[idx], tls_base);
+ if (!location)
+ return payload;
+
+ bpf_probe_read(value, sizeof(struct strobe_value_generic), location);
+ if (bpf_probe_read(&map, sizeof(struct strobe_map_raw), value->ptr))
+ return payload;
+
+ descr->id = map.id;
+ descr->cnt = map.cnt;
+ if (cfg->req_meta_idx == idx) {
+ data->req_id = map.id;
+ data->req_meta_valid = 1;
+ }
+
+ len = bpf_probe_read_str(payload, STROBE_MAX_STR_LEN, map.tag);
+ if (len <= STROBE_MAX_STR_LEN) {
+ descr->tag_len = len;
+ payload += len;
+ }
+
+#ifdef NO_UNROLL
+#pragma clang loop unroll(disable)
+#else
+#pragma unroll
+#endif
+ for (int i = 0; i < STROBE_MAX_MAP_ENTRIES && i < map.cnt; ++i) {
+ descr->key_lens[i] = 0;
+ len = bpf_probe_read_str(payload, STROBE_MAX_STR_LEN,
+ map.entries[i].key);
+ if (len <= STROBE_MAX_STR_LEN) {
+ descr->key_lens[i] = len;
+ payload += len;
+ }
+ descr->val_lens[i] = 0;
+ len = bpf_probe_read_str(payload, STROBE_MAX_STR_LEN,
+ map.entries[i].val);
+ if (len <= STROBE_MAX_STR_LEN) {
+ descr->val_lens[i] = len;
+ payload += len;
+ }
+ }
+
+ return payload;
+}
+
+/*
+ * read_strobe_meta returns NULL, if no metadata was read; otherwise returns
+ * pointer to *right after* payload ends
+ */
+static inline __attribute__((always_inline))
+void *read_strobe_meta(struct task_struct* task,
+ struct strobemeta_payload* data) {
+ pid_t pid = bpf_get_current_pid_tgid() >> 32;
+ struct strobe_value_generic value = {0};
+ struct strobemeta_cfg *cfg;
+ void *tls_base, *payload;
+
+ cfg = bpf_map_lookup_elem(&strobemeta_cfgs, &pid);
+ if (!cfg)
+ return NULL;
+
+ data->int_vals_set_mask = 0;
+ data->req_meta_valid = 0;
+ payload = data->payload;
+ /*
+ * we don't have struct task_struct definition, it should be:
+ * tls_base = (void *)task->thread.fsbase;
+ */
+ tls_base = (void *)task;
+
+#ifdef NO_UNROLL
+#pragma clang loop unroll(disable)
+#else
+#pragma unroll
+#endif
+ for (int i = 0; i < STROBE_MAX_INTS; ++i) {
+ read_int_var(cfg, i, tls_base, &value, data);
+ }
+#ifdef NO_UNROLL
+#pragma clang loop unroll(disable)
+#else
+#pragma unroll
+#endif
+ for (int i = 0; i < STROBE_MAX_STRS; ++i) {
+ payload += read_str_var(cfg, i, tls_base, &value, data, payload);
+ }
+#ifdef NO_UNROLL
+#pragma clang loop unroll(disable)
+#else
+#pragma unroll
+#endif
+ for (int i = 0; i < STROBE_MAX_MAPS; ++i) {
+ payload = read_map_var(cfg, i, tls_base, &value, data, payload);
+ }
+ /*
+ * return pointer right after end of payload, so it's possible to
+ * calculate exact amount of useful data that needs to be sent
+ */
+ return payload;
+}
+
+SEC("raw_tracepoint/kfree_skb")
+int on_event(struct pt_regs *ctx) {
+ pid_t pid = bpf_get_current_pid_tgid() >> 32;
+ struct strobelight_bpf_sample* sample;
+ struct task_struct *task;
+ uint32_t zero = 0;
+ uint64_t ktime_ns;
+ void *sample_end;
+
+ sample = bpf_map_lookup_elem(&sample_heap, &zero);
+ if (!sample)
+ return 0; /* this will never happen */
+
+ sample->pid = pid;
+ bpf_get_current_comm(&sample->comm, TASK_COMM_LEN);
+ ktime_ns = bpf_ktime_get_ns();
+ sample->ktime = ktime_ns;
+
+ task = (struct task_struct *)bpf_get_current_task();
+ sample_end = read_strobe_meta(task, &sample->metadata);
+ sample->has_meta = sample_end != NULL;
+ sample_end = sample_end ? : &sample->metadata;
+
+ if ((ktime_ns >> STACK_TABLE_EPOCH_SHIFT) & 1) {
+ sample->kernel_stack_id = bpf_get_stackid(ctx, &stacks_1, 0);
+ sample->user_stack_id = bpf_get_stackid(ctx, &stacks_1, BPF_F_USER_STACK);
+ } else {
+ sample->kernel_stack_id = bpf_get_stackid(ctx, &stacks_0, 0);
+ sample->user_stack_id = bpf_get_stackid(ctx, &stacks_0, BPF_F_USER_STACK);
+ }
+
+ uint64_t sample_size = sample_end - (void *)sample;
+ /* should always be true */
+ if (sample_size < sizeof(struct strobelight_bpf_sample))
+ bpf_perf_event_output(ctx, &samples, 0, sample, 1 + sample_size);
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/strobemeta_nounroll1.c b/tools/testing/selftests/bpf/progs/strobemeta_nounroll1.c
new file mode 100644
index 000000000000..f0a1669e11d6
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/strobemeta_nounroll1.c
@@ -0,0 +1,9 @@
+// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
+// Copyright (c) 2019 Facebook
+
+#define STROBE_MAX_INTS 2
+#define STROBE_MAX_STRS 25
+#define STROBE_MAX_MAPS 13
+#define STROBE_MAX_MAP_ENTRIES 20
+#define NO_UNROLL
+#include "strobemeta.h"
diff --git a/tools/testing/selftests/bpf/progs/strobemeta_nounroll2.c b/tools/testing/selftests/bpf/progs/strobemeta_nounroll2.c
new file mode 100644
index 000000000000..4291a7d642e7
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/strobemeta_nounroll2.c
@@ -0,0 +1,9 @@
+// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
+// Copyright (c) 2019 Facebook
+
+#define STROBE_MAX_INTS 2
+#define STROBE_MAX_STRS 25
+#define STROBE_MAX_MAPS 30
+#define STROBE_MAX_MAP_ENTRIES 20
+#define NO_UNROLL
+#include "strobemeta.h"
diff --git a/tools/testing/selftests/bpf/progs/test_btf_newkv.c b/tools/testing/selftests/bpf/progs/test_btf_newkv.c
new file mode 100644
index 000000000000..28c16bb583b6
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_btf_newkv.c
@@ -0,0 +1,73 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2018 Facebook */
+#include <linux/bpf.h>
+#include "bpf_helpers.h"
+
+int _version SEC("version") = 1;
+
+struct ipv_counts {
+ unsigned int v4;
+ unsigned int v6;
+};
+
+/* just to validate we can handle maps in multiple sections */
+struct bpf_map_def SEC("maps") btf_map_legacy = {
+ .type = BPF_MAP_TYPE_ARRAY,
+ .key_size = sizeof(int),
+ .value_size = sizeof(long long),
+ .max_entries = 4,
+};
+
+BPF_ANNOTATE_KV_PAIR(btf_map_legacy, int, struct ipv_counts);
+
+struct {
+ int *key;
+ struct ipv_counts *value;
+ unsigned int type;
+ unsigned int max_entries;
+} btf_map SEC(".maps") = {
+ .type = BPF_MAP_TYPE_ARRAY,
+ .max_entries = 4,
+};
+
+struct dummy_tracepoint_args {
+ unsigned long long pad;
+ struct sock *sock;
+};
+
+__attribute__((noinline))
+static int test_long_fname_2(struct dummy_tracepoint_args *arg)
+{
+ struct ipv_counts *counts;
+ int key = 0;
+
+ if (!arg->sock)
+ return 0;
+
+ counts = bpf_map_lookup_elem(&btf_map, &key);
+ if (!counts)
+ return 0;
+
+ counts->v6++;
+
+ /* just verify we can reference both maps */
+ counts = bpf_map_lookup_elem(&btf_map_legacy, &key);
+ if (!counts)
+ return 0;
+
+ return 0;
+}
+
+__attribute__((noinline))
+static int test_long_fname_1(struct dummy_tracepoint_args *arg)
+{
+ return test_long_fname_2(arg);
+}
+
+SEC("dummy_tracepoint")
+int _dummy_tracepoint(struct dummy_tracepoint_args *arg)
+{
+ return test_long_fname_1(arg);
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/test_get_stack_rawtp.c b/tools/testing/selftests/bpf/progs/test_get_stack_rawtp.c
index f6d9f238e00a..aaa6ec250e15 100644
--- a/tools/testing/selftests/bpf/progs/test_get_stack_rawtp.c
+++ b/tools/testing/selftests/bpf/progs/test_get_stack_rawtp.c
@@ -15,17 +15,25 @@ struct stack_trace_t {
struct bpf_stack_build_id user_stack_buildid[MAX_STACK_RAWTP];
};
-struct bpf_map_def SEC("maps") perfmap = {
+struct {
+ __u32 type;
+ __u32 max_entries;
+ __u32 key_size;
+ __u32 value_size;
+} perfmap SEC(".maps") = {
.type = BPF_MAP_TYPE_PERF_EVENT_ARRAY,
+ .max_entries = 2,
.key_size = sizeof(int),
.value_size = sizeof(__u32),
- .max_entries = 2,
};
-struct bpf_map_def SEC("maps") stackdata_map = {
+struct {
+ __u32 type;
+ __u32 max_entries;
+ __u32 *key;
+ struct stack_trace_t *value;
+} stackdata_map SEC(".maps") = {
.type = BPF_MAP_TYPE_PERCPU_ARRAY,
- .key_size = sizeof(__u32),
- .value_size = sizeof(struct stack_trace_t),
.max_entries = 1,
};
@@ -47,10 +55,13 @@ struct bpf_map_def SEC("maps") stackdata_map = {
* issue and avoid complicated C programming massaging.
* This is an acceptable workaround since there is one entry here.
*/
-struct bpf_map_def SEC("maps") rawdata_map = {
+struct {
+ __u32 type;
+ __u32 max_entries;
+ __u32 *key;
+ __u64 (*value)[2 * MAX_STACK_RAWTP];
+} rawdata_map SEC(".maps") = {
.type = BPF_MAP_TYPE_PERCPU_ARRAY,
- .key_size = sizeof(__u32),
- .value_size = MAX_STACK_RAWTP * sizeof(__u64) * 2,
.max_entries = 1,
};
diff --git a/tools/testing/selftests/bpf/progs/test_global_data.c b/tools/testing/selftests/bpf/progs/test_global_data.c
index 5ab14e941980..866cc7ddbe43 100644
--- a/tools/testing/selftests/bpf/progs/test_global_data.c
+++ b/tools/testing/selftests/bpf/progs/test_global_data.c
@@ -7,17 +7,23 @@
#include "bpf_helpers.h"
-struct bpf_map_def SEC("maps") result_number = {
+struct {
+ __u32 type;
+ __u32 max_entries;
+ __u32 *key;
+ __u64 *value;
+} result_number SEC(".maps") = {
.type = BPF_MAP_TYPE_ARRAY,
- .key_size = sizeof(__u32),
- .value_size = sizeof(__u64),
.max_entries = 11,
};
-struct bpf_map_def SEC("maps") result_string = {
+struct {
+ __u32 type;
+ __u32 max_entries;
+ __u32 *key;
+ const char (*value)[32];
+} result_string SEC(".maps") = {
.type = BPF_MAP_TYPE_ARRAY,
- .key_size = sizeof(__u32),
- .value_size = 32,
.max_entries = 5,
};
@@ -27,10 +33,13 @@ struct foo {
__u64 c;
};
-struct bpf_map_def SEC("maps") result_struct = {
+struct {
+ __u32 type;
+ __u32 max_entries;
+ __u32 *key;
+ struct foo *value;
+} result_struct SEC(".maps") = {
.type = BPF_MAP_TYPE_ARRAY,
- .key_size = sizeof(__u32),
- .value_size = sizeof(struct foo),
.max_entries = 5,
};
diff --git a/tools/testing/selftests/bpf/progs/test_l4lb.c b/tools/testing/selftests/bpf/progs/test_l4lb.c
index 1e10c9590991..848cbb90f581 100644
--- a/tools/testing/selftests/bpf/progs/test_l4lb.c
+++ b/tools/testing/selftests/bpf/progs/test_l4lb.c
@@ -169,38 +169,53 @@ struct eth_hdr {
unsigned short eth_proto;
};
-struct bpf_map_def SEC("maps") vip_map = {
+struct {
+ __u32 type;
+ __u32 max_entries;
+ struct vip *key;
+ struct vip_meta *value;
+} vip_map SEC(".maps") = {
.type = BPF_MAP_TYPE_HASH,
- .key_size = sizeof(struct vip),
- .value_size = sizeof(struct vip_meta),
.max_entries = MAX_VIPS,
};
-struct bpf_map_def SEC("maps") ch_rings = {
+struct {
+ __u32 type;
+ __u32 max_entries;
+ __u32 *key;
+ __u32 *value;
+} ch_rings SEC(".maps") = {
.type = BPF_MAP_TYPE_ARRAY,
- .key_size = sizeof(__u32),
- .value_size = sizeof(__u32),
.max_entries = CH_RINGS_SIZE,
};
-struct bpf_map_def SEC("maps") reals = {
+struct {
+ __u32 type;
+ __u32 max_entries;
+ __u32 *key;
+ struct real_definition *value;
+} reals SEC(".maps") = {
.type = BPF_MAP_TYPE_ARRAY,
- .key_size = sizeof(__u32),
- .value_size = sizeof(struct real_definition),
.max_entries = MAX_REALS,
};
-struct bpf_map_def SEC("maps") stats = {
+struct {
+ __u32 type;
+ __u32 max_entries;
+ __u32 *key;
+ struct vip_stats *value;
+} stats SEC(".maps") = {
.type = BPF_MAP_TYPE_PERCPU_ARRAY,
- .key_size = sizeof(__u32),
- .value_size = sizeof(struct vip_stats),
.max_entries = MAX_VIPS,
};
-struct bpf_map_def SEC("maps") ctl_array = {
+struct {
+ __u32 type;
+ __u32 max_entries;
+ __u32 *key;
+ struct ctl_value *value;
+} ctl_array SEC(".maps") = {
.type = BPF_MAP_TYPE_ARRAY,
- .key_size = sizeof(__u32),
- .value_size = sizeof(struct ctl_value),
.max_entries = CTL_MAP_SIZE,
};
diff --git a/tools/testing/selftests/bpf/progs/test_l4lb_noinline.c b/tools/testing/selftests/bpf/progs/test_l4lb_noinline.c
index ba44a14e6dc4..c63ecf3ca573 100644
--- a/tools/testing/selftests/bpf/progs/test_l4lb_noinline.c
+++ b/tools/testing/selftests/bpf/progs/test_l4lb_noinline.c
@@ -165,38 +165,53 @@ struct eth_hdr {
unsigned short eth_proto;
};
-struct bpf_map_def SEC("maps") vip_map = {
+struct {
+ __u32 type;
+ __u32 max_entries;
+ struct vip *key;
+ struct vip_meta *value;
+} vip_map SEC(".maps") = {
.type = BPF_MAP_TYPE_HASH,
- .key_size = sizeof(struct vip),
- .value_size = sizeof(struct vip_meta),
.max_entries = MAX_VIPS,
};
-struct bpf_map_def SEC("maps") ch_rings = {
+struct {
+ __u32 type;
+ __u32 max_entries;
+ __u32 *key;
+ __u32 *value;
+} ch_rings SEC(".maps") = {
.type = BPF_MAP_TYPE_ARRAY,
- .key_size = sizeof(__u32),
- .value_size = sizeof(__u32),
.max_entries = CH_RINGS_SIZE,
};
-struct bpf_map_def SEC("maps") reals = {
+struct {
+ __u32 type;
+ __u32 max_entries;
+ __u32 *key;
+ struct real_definition *value;
+} reals SEC(".maps") = {
.type = BPF_MAP_TYPE_ARRAY,
- .key_size = sizeof(__u32),
- .value_size = sizeof(struct real_definition),
.max_entries = MAX_REALS,
};
-struct bpf_map_def SEC("maps") stats = {
+struct {
+ __u32 type;
+ __u32 max_entries;
+ __u32 *key;
+ struct vip_stats *value;
+} stats SEC(".maps") = {
.type = BPF_MAP_TYPE_PERCPU_ARRAY,
- .key_size = sizeof(__u32),
- .value_size = sizeof(struct vip_stats),
.max_entries = MAX_VIPS,
};
-struct bpf_map_def SEC("maps") ctl_array = {
+struct {
+ __u32 type;
+ __u32 max_entries;
+ __u32 *key;
+ struct ctl_value *value;
+} ctl_array SEC(".maps") = {
.type = BPF_MAP_TYPE_ARRAY,
- .key_size = sizeof(__u32),
- .value_size = sizeof(struct ctl_value),
.max_entries = CTL_MAP_SIZE,
};
diff --git a/tools/testing/selftests/bpf/progs/test_map_lock.c b/tools/testing/selftests/bpf/progs/test_map_lock.c
index af8cc68ed2f9..40d9c2853393 100644
--- a/tools/testing/selftests/bpf/progs/test_map_lock.c
+++ b/tools/testing/selftests/bpf/progs/test_map_lock.c
@@ -11,29 +11,31 @@ struct hmap_elem {
int var[VAR_NUM];
};
-struct bpf_map_def SEC("maps") hash_map = {
+struct {
+ __u32 type;
+ __u32 max_entries;
+ __u32 *key;
+ struct hmap_elem *value;
+} hash_map SEC(".maps") = {
.type = BPF_MAP_TYPE_HASH,
- .key_size = sizeof(int),
- .value_size = sizeof(struct hmap_elem),
.max_entries = 1,
};
-BPF_ANNOTATE_KV_PAIR(hash_map, int, struct hmap_elem);
-
struct array_elem {
struct bpf_spin_lock lock;
int var[VAR_NUM];
};
-struct bpf_map_def SEC("maps") array_map = {
+struct {
+ __u32 type;
+ __u32 max_entries;
+ int *key;
+ struct array_elem *value;
+} array_map SEC(".maps") = {
.type = BPF_MAP_TYPE_ARRAY,
- .key_size = sizeof(int),
- .value_size = sizeof(struct array_elem),
.max_entries = 1,
};
-BPF_ANNOTATE_KV_PAIR(array_map, int, struct array_elem);
-
SEC("map_lock_demo")
int bpf_map_lock_test(struct __sk_buff *skb)
{
diff --git a/tools/testing/selftests/bpf/progs/test_seg6_loop.c b/tools/testing/selftests/bpf/progs/test_seg6_loop.c
new file mode 100644
index 000000000000..463964d79f73
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_seg6_loop.c
@@ -0,0 +1,261 @@
+#include <stddef.h>
+#include <inttypes.h>
+#include <errno.h>
+#include <linux/seg6_local.h>
+#include <linux/bpf.h>
+#include "bpf_helpers.h"
+#include "bpf_endian.h"
+
+/* Packet parsing state machine helpers. */
+#define cursor_advance(_cursor, _len) \
+ ({ void *_tmp = _cursor; _cursor += _len; _tmp; })
+
+#define SR6_FLAG_ALERT (1 << 4)
+
+#define htonll(x) ((bpf_htonl(1)) == 1 ? (x) : ((uint64_t)bpf_htonl((x) & \
+ 0xFFFFFFFF) << 32) | bpf_htonl((x) >> 32))
+#define ntohll(x) ((bpf_ntohl(1)) == 1 ? (x) : ((uint64_t)bpf_ntohl((x) & \
+ 0xFFFFFFFF) << 32) | bpf_ntohl((x) >> 32))
+#define BPF_PACKET_HEADER __attribute__((packed))
+
+struct ip6_t {
+ unsigned int ver:4;
+ unsigned int priority:8;
+ unsigned int flow_label:20;
+ unsigned short payload_len;
+ unsigned char next_header;
+ unsigned char hop_limit;
+ unsigned long long src_hi;
+ unsigned long long src_lo;
+ unsigned long long dst_hi;
+ unsigned long long dst_lo;
+} BPF_PACKET_HEADER;
+
+struct ip6_addr_t {
+ unsigned long long hi;
+ unsigned long long lo;
+} BPF_PACKET_HEADER;
+
+struct ip6_srh_t {
+ unsigned char nexthdr;
+ unsigned char hdrlen;
+ unsigned char type;
+ unsigned char segments_left;
+ unsigned char first_segment;
+ unsigned char flags;
+ unsigned short tag;
+
+ struct ip6_addr_t segments[0];
+} BPF_PACKET_HEADER;
+
+struct sr6_tlv_t {
+ unsigned char type;
+ unsigned char len;
+ unsigned char value[0];
+} BPF_PACKET_HEADER;
+
+static __attribute__((always_inline)) struct ip6_srh_t *get_srh(struct __sk_buff *skb)
+{
+ void *cursor, *data_end;
+ struct ip6_srh_t *srh;
+ struct ip6_t *ip;
+ uint8_t *ipver;
+
+ data_end = (void *)(long)skb->data_end;
+ cursor = (void *)(long)skb->data;
+ ipver = (uint8_t *)cursor;
+
+ if ((void *)ipver + sizeof(*ipver) > data_end)
+ return NULL;
+
+ if ((*ipver >> 4) != 6)
+ return NULL;
+
+ ip = cursor_advance(cursor, sizeof(*ip));
+ if ((void *)ip + sizeof(*ip) > data_end)
+ return NULL;
+
+ if (ip->next_header != 43)
+ return NULL;
+
+ srh = cursor_advance(cursor, sizeof(*srh));
+ if ((void *)srh + sizeof(*srh) > data_end)
+ return NULL;
+
+ if (srh->type != 4)
+ return NULL;
+
+ return srh;
+}
+
+static __attribute__((always_inline))
+int update_tlv_pad(struct __sk_buff *skb, uint32_t new_pad,
+ uint32_t old_pad, uint32_t pad_off)
+{
+ int err;
+
+ if (new_pad != old_pad) {
+ err = bpf_lwt_seg6_adjust_srh(skb, pad_off,
+ (int) new_pad - (int) old_pad);
+ if (err)
+ return err;
+ }
+
+ if (new_pad > 0) {
+ char pad_tlv_buf[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0};
+ struct sr6_tlv_t *pad_tlv = (struct sr6_tlv_t *) pad_tlv_buf;
+
+ pad_tlv->type = SR6_TLV_PADDING;
+ pad_tlv->len = new_pad - 2;
+
+ err = bpf_lwt_seg6_store_bytes(skb, pad_off,
+ (void *)pad_tlv_buf, new_pad);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static __attribute__((always_inline))
+int is_valid_tlv_boundary(struct __sk_buff *skb, struct ip6_srh_t *srh,
+ uint32_t *tlv_off, uint32_t *pad_size,
+ uint32_t *pad_off)
+{
+ uint32_t srh_off, cur_off;
+ int offset_valid = 0;
+ int err;
+
+ srh_off = (char *)srh - (char *)(long)skb->data;
+ // cur_off = end of segments, start of possible TLVs
+ cur_off = srh_off + sizeof(*srh) +
+ sizeof(struct ip6_addr_t) * (srh->first_segment + 1);
+
+ *pad_off = 0;
+
+ // we can only go as far as ~10 TLVs due to the BPF max stack size
+ #pragma clang loop unroll(disable)
+ for (int i = 0; i < 100; i++) {
+ struct sr6_tlv_t tlv;
+
+ if (cur_off == *tlv_off)
+ offset_valid = 1;
+
+ if (cur_off >= srh_off + ((srh->hdrlen + 1) << 3))
+ break;
+
+ err = bpf_skb_load_bytes(skb, cur_off, &tlv, sizeof(tlv));
+ if (err)
+ return err;
+
+ if (tlv.type == SR6_TLV_PADDING) {
+ *pad_size = tlv.len + sizeof(tlv);
+ *pad_off = cur_off;
+
+ if (*tlv_off == srh_off) {
+ *tlv_off = cur_off;
+ offset_valid = 1;
+ }
+ break;
+
+ } else if (tlv.type == SR6_TLV_HMAC) {
+ break;
+ }
+
+ cur_off += sizeof(tlv) + tlv.len;
+ } // we reached the padding or HMAC TLVs, or the end of the SRH
+
+ if (*pad_off == 0)
+ *pad_off = cur_off;
+
+ if (*tlv_off == -1)
+ *tlv_off = cur_off;
+ else if (!offset_valid)
+ return -EINVAL;
+
+ return 0;
+}
+
+static __attribute__((always_inline))
+int add_tlv(struct __sk_buff *skb, struct ip6_srh_t *srh, uint32_t tlv_off,
+ struct sr6_tlv_t *itlv, uint8_t tlv_size)
+{
+ uint32_t srh_off = (char *)srh - (char *)(long)skb->data;
+ uint8_t len_remaining, new_pad;
+ uint32_t pad_off = 0;
+ uint32_t pad_size = 0;
+ uint32_t partial_srh_len;
+ int err;
+
+ if (tlv_off != -1)
+ tlv_off += srh_off;
+
+ if (itlv->type == SR6_TLV_PADDING || itlv->type == SR6_TLV_HMAC)
+ return -EINVAL;
+
+ err = is_valid_tlv_boundary(skb, srh, &tlv_off, &pad_size, &pad_off);
+ if (err)
+ return err;
+
+ err = bpf_lwt_seg6_adjust_srh(skb, tlv_off, sizeof(*itlv) + itlv->len);
+ if (err)
+ return err;
+
+ err = bpf_lwt_seg6_store_bytes(skb, tlv_off, (void *)itlv, tlv_size);
+ if (err)
+ return err;
+
+ // the following can't be moved inside update_tlv_pad because the
+ // bpf verifier has some issues with it
+ pad_off += sizeof(*itlv) + itlv->len;
+ partial_srh_len = pad_off - srh_off;
+ len_remaining = partial_srh_len % 8;
+ new_pad = 8 - len_remaining;
+
+ if (new_pad == 1) // cannot pad for 1 byte only
+ new_pad = 9;
+ else if (new_pad == 8)
+ new_pad = 0;
+
+ return update_tlv_pad(skb, new_pad, pad_size, pad_off);
+}
+
+// Add an Egress TLV fc00::4, add the flag A,
+// and apply End.X action to fc42::1
+SEC("lwt_seg6local")
+int __add_egr_x(struct __sk_buff *skb)
+{
+ unsigned long long hi = 0xfc42000000000000;
+ unsigned long long lo = 0x1;
+ struct ip6_srh_t *srh = get_srh(skb);
+ uint8_t new_flags = SR6_FLAG_ALERT;
+ struct ip6_addr_t addr;
+ int err, offset;
+
+ if (srh == NULL)
+ return BPF_DROP;
+
+ uint8_t tlv[20] = {2, 18, 0, 0, 0xfd, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x4};
+
+ err = add_tlv(skb, srh, (srh->hdrlen+1) << 3,
+ (struct sr6_tlv_t *)&tlv, 20);
+ if (err)
+ return BPF_DROP;
+
+ offset = sizeof(struct ip6_t) + offsetof(struct ip6_srh_t, flags);
+ err = bpf_lwt_seg6_store_bytes(skb, offset,
+ (void *)&new_flags, sizeof(new_flags));
+ if (err)
+ return BPF_DROP;
+
+ addr.lo = htonll(lo);
+ addr.hi = htonll(hi);
+ err = bpf_lwt_seg6_action(skb, SEG6_LOCAL_ACTION_END_X,
+ (void *)&addr, sizeof(addr));
+ if (err)
+ return BPF_DROP;
+ return BPF_REDIRECT;
+}
+char __license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/test_select_reuseport_kern.c b/tools/testing/selftests/bpf/progs/test_select_reuseport_kern.c
index 5b54ec637ada..435a9527733e 100644
--- a/tools/testing/selftests/bpf/progs/test_select_reuseport_kern.c
+++ b/tools/testing/selftests/bpf/progs/test_select_reuseport_kern.c
@@ -21,38 +21,55 @@ int _version SEC("version") = 1;
#define offsetof(TYPE, MEMBER) ((size_t) &((TYPE *)0)->MEMBER)
#endif
-struct bpf_map_def SEC("maps") outer_map = {
+struct {
+ __u32 type;
+ __u32 max_entries;
+ __u32 key_size;
+ __u32 value_size;
+} outer_map SEC(".maps") = {
.type = BPF_MAP_TYPE_ARRAY_OF_MAPS,
+ .max_entries = 1,
.key_size = sizeof(__u32),
.value_size = sizeof(__u32),
- .max_entries = 1,
};
-struct bpf_map_def SEC("maps") result_map = {
+struct {
+ __u32 type;
+ __u32 max_entries;
+ __u32 *key;
+ __u32 *value;
+} result_map SEC(".maps") = {
.type = BPF_MAP_TYPE_ARRAY,
- .key_size = sizeof(__u32),
- .value_size = sizeof(__u32),
.max_entries = NR_RESULTS,
};
-struct bpf_map_def SEC("maps") tmp_index_ovr_map = {
+struct {
+ __u32 type;
+ __u32 max_entries;
+ __u32 *key;
+ int *value;
+} tmp_index_ovr_map SEC(".maps") = {
.type = BPF_MAP_TYPE_ARRAY,
- .key_size = sizeof(__u32),
- .value_size = sizeof(int),
.max_entries = 1,
};
-struct bpf_map_def SEC("maps") linum_map = {
+struct {
+ __u32 type;
+ __u32 max_entries;
+ __u32 *key;
+ __u32 *value;
+} linum_map SEC(".maps") = {
.type = BPF_MAP_TYPE_ARRAY,
- .key_size = sizeof(__u32),
- .value_size = sizeof(__u32),
.max_entries = 1,
};
-struct bpf_map_def SEC("maps") data_check_map = {
+struct {
+ __u32 type;
+ __u32 max_entries;
+ __u32 *key;
+ struct data_check *value;
+} data_check_map SEC(".maps") = {
.type = BPF_MAP_TYPE_ARRAY,
- .key_size = sizeof(__u32),
- .value_size = sizeof(struct data_check),
.max_entries = 1,
};
diff --git a/tools/testing/selftests/bpf/progs/test_send_signal_kern.c b/tools/testing/selftests/bpf/progs/test_send_signal_kern.c
index 45a1a1a2c345..6ac68be5d68b 100644
--- a/tools/testing/selftests/bpf/progs/test_send_signal_kern.c
+++ b/tools/testing/selftests/bpf/progs/test_send_signal_kern.c
@@ -4,24 +4,26 @@
#include <linux/version.h>
#include "bpf_helpers.h"
-struct bpf_map_def SEC("maps") info_map = {
+struct {
+ __u32 type;
+ __u32 max_entries;
+ __u32 *key;
+ __u64 *value;
+} info_map SEC(".maps") = {
.type = BPF_MAP_TYPE_ARRAY,
- .key_size = sizeof(__u32),
- .value_size = sizeof(__u64),
.max_entries = 1,
};
-BPF_ANNOTATE_KV_PAIR(info_map, __u32, __u64);
-
-struct bpf_map_def SEC("maps") status_map = {
+struct {
+ __u32 type;
+ __u32 max_entries;
+ __u32 *key;
+ __u64 *value;
+} status_map SEC(".maps") = {
.type = BPF_MAP_TYPE_ARRAY,
- .key_size = sizeof(__u32),
- .value_size = sizeof(__u64),
.max_entries = 1,
};
-BPF_ANNOTATE_KV_PAIR(status_map, __u32, __u64);
-
SEC("send_signal_demo")
int bpf_send_signal_test(void *ctx)
{
diff --git a/tools/testing/selftests/bpf/progs/test_sock_fields_kern.c b/tools/testing/selftests/bpf/progs/test_sock_fields_kern.c
index 1c39e4ccb7f1..c3d383d650cb 100644
--- a/tools/testing/selftests/bpf/progs/test_sock_fields_kern.c
+++ b/tools/testing/selftests/bpf/progs/test_sock_fields_kern.c
@@ -27,31 +27,43 @@ enum bpf_linum_array_idx {
__NR_BPF_LINUM_ARRAY_IDX,
};
-struct bpf_map_def SEC("maps") addr_map = {
+struct {
+ __u32 type;
+ __u32 max_entries;
+ __u32 *key;
+ struct sockaddr_in6 *value;
+} addr_map SEC(".maps") = {
.type = BPF_MAP_TYPE_ARRAY,
- .key_size = sizeof(__u32),
- .value_size = sizeof(struct sockaddr_in6),
.max_entries = __NR_BPF_ADDR_ARRAY_IDX,
};
-struct bpf_map_def SEC("maps") sock_result_map = {
+struct {
+ __u32 type;
+ __u32 max_entries;
+ __u32 *key;
+ struct bpf_sock *value;
+} sock_result_map SEC(".maps") = {
.type = BPF_MAP_TYPE_ARRAY,
- .key_size = sizeof(__u32),
- .value_size = sizeof(struct bpf_sock),
.max_entries = __NR_BPF_RESULT_ARRAY_IDX,
};
-struct bpf_map_def SEC("maps") tcp_sock_result_map = {
+struct {
+ __u32 type;
+ __u32 max_entries;
+ __u32 *key;
+ struct bpf_tcp_sock *value;
+} tcp_sock_result_map SEC(".maps") = {
.type = BPF_MAP_TYPE_ARRAY,
- .key_size = sizeof(__u32),
- .value_size = sizeof(struct bpf_tcp_sock),
.max_entries = __NR_BPF_RESULT_ARRAY_IDX,
};
-struct bpf_map_def SEC("maps") linum_map = {
+struct {
+ __u32 type;
+ __u32 max_entries;
+ __u32 *key;
+ __u32 *value;
+} linum_map SEC(".maps") = {
.type = BPF_MAP_TYPE_ARRAY,
- .key_size = sizeof(__u32),
- .value_size = sizeof(__u32),
.max_entries = __NR_BPF_LINUM_ARRAY_IDX,
};
@@ -60,26 +72,26 @@ struct bpf_spinlock_cnt {
__u32 cnt;
};
-struct bpf_map_def SEC("maps") sk_pkt_out_cnt = {
+struct {
+ __u32 type;
+ __u32 map_flags;
+ int *key;
+ struct bpf_spinlock_cnt *value;
+} sk_pkt_out_cnt SEC(".maps") = {
.type = BPF_MAP_TYPE_SK_STORAGE,
- .key_size = sizeof(int),
- .value_size = sizeof(struct bpf_spinlock_cnt),
- .max_entries = 0,
.map_flags = BPF_F_NO_PREALLOC,
};
-BPF_ANNOTATE_KV_PAIR(sk_pkt_out_cnt, int, struct bpf_spinlock_cnt);
-
-struct bpf_map_def SEC("maps") sk_pkt_out_cnt10 = {
+struct {
+ __u32 type;
+ __u32 map_flags;
+ int *key;
+ struct bpf_spinlock_cnt *value;
+} sk_pkt_out_cnt10 SEC(".maps") = {
.type = BPF_MAP_TYPE_SK_STORAGE,
- .key_size = sizeof(int),
- .value_size = sizeof(struct bpf_spinlock_cnt),
- .max_entries = 0,
.map_flags = BPF_F_NO_PREALLOC,
};
-BPF_ANNOTATE_KV_PAIR(sk_pkt_out_cnt10, int, struct bpf_spinlock_cnt);
-
static bool is_loopback6(__u32 *a6)
{
return !a6[0] && !a6[1] && !a6[2] && a6[3] == bpf_htonl(1);
diff --git a/tools/testing/selftests/bpf/progs/test_spin_lock.c b/tools/testing/selftests/bpf/progs/test_spin_lock.c
index 40f904312090..0a77ae36d981 100644
--- a/tools/testing/selftests/bpf/progs/test_spin_lock.c
+++ b/tools/testing/selftests/bpf/progs/test_spin_lock.c
@@ -10,30 +10,29 @@ struct hmap_elem {
int test_padding;
};
-struct bpf_map_def SEC("maps") hmap = {
+struct {
+ __u32 type;
+ __u32 max_entries;
+ int *key;
+ struct hmap_elem *value;
+} hmap SEC(".maps") = {
.type = BPF_MAP_TYPE_HASH,
- .key_size = sizeof(int),
- .value_size = sizeof(struct hmap_elem),
.max_entries = 1,
};
-BPF_ANNOTATE_KV_PAIR(hmap, int, struct hmap_elem);
-
-
struct cls_elem {
struct bpf_spin_lock lock;
volatile int cnt;
};
-struct bpf_map_def SEC("maps") cls_map = {
+struct {
+ __u32 type;
+ struct bpf_cgroup_storage_key *key;
+ struct cls_elem *value;
+} cls_map SEC(".maps") = {
.type = BPF_MAP_TYPE_CGROUP_STORAGE,
- .key_size = sizeof(struct bpf_cgroup_storage_key),
- .value_size = sizeof(struct cls_elem),
};
-BPF_ANNOTATE_KV_PAIR(cls_map, struct bpf_cgroup_storage_key,
- struct cls_elem);
-
struct bpf_vqueue {
struct bpf_spin_lock lock;
/* 4 byte hole */
@@ -42,14 +41,16 @@ struct bpf_vqueue {
unsigned int rate;
};
-struct bpf_map_def SEC("maps") vqueue = {
+struct {
+ __u32 type;
+ __u32 max_entries;
+ int *key;
+ struct bpf_vqueue *value;
+} vqueue SEC(".maps") = {
.type = BPF_MAP_TYPE_ARRAY,
- .key_size = sizeof(int),
- .value_size = sizeof(struct bpf_vqueue),
.max_entries = 1,
};
-BPF_ANNOTATE_KV_PAIR(vqueue, int, struct bpf_vqueue);
#define CREDIT_PER_NS(delta, rate) (((delta) * rate) >> 20)
SEC("spin_lock_demo")
diff --git a/tools/testing/selftests/bpf/progs/test_stacktrace_build_id.c b/tools/testing/selftests/bpf/progs/test_stacktrace_build_id.c
index d86c281e957f..fcf2280bb60c 100644
--- a/tools/testing/selftests/bpf/progs/test_stacktrace_build_id.c
+++ b/tools/testing/selftests/bpf/progs/test_stacktrace_build_id.c
@@ -8,34 +8,50 @@
#define PERF_MAX_STACK_DEPTH 127
#endif
-struct bpf_map_def SEC("maps") control_map = {
+struct {
+ __u32 type;
+ __u32 max_entries;
+ __u32 *key;
+ __u32 *value;
+} control_map SEC(".maps") = {
.type = BPF_MAP_TYPE_ARRAY,
- .key_size = sizeof(__u32),
- .value_size = sizeof(__u32),
.max_entries = 1,
};
-struct bpf_map_def SEC("maps") stackid_hmap = {
+struct {
+ __u32 type;
+ __u32 max_entries;
+ __u32 *key;
+ __u32 *value;
+} stackid_hmap SEC(".maps") = {
.type = BPF_MAP_TYPE_HASH,
- .key_size = sizeof(__u32),
- .value_size = sizeof(__u32),
.max_entries = 16384,
};
-struct bpf_map_def SEC("maps") stackmap = {
+typedef struct bpf_stack_build_id stack_trace_t[PERF_MAX_STACK_DEPTH];
+
+struct {
+ __u32 type;
+ __u32 max_entries;
+ __u32 map_flags;
+ __u32 key_size;
+ __u32 value_size;
+} stackmap SEC(".maps") = {
.type = BPF_MAP_TYPE_STACK_TRACE,
- .key_size = sizeof(__u32),
- .value_size = sizeof(struct bpf_stack_build_id)
- * PERF_MAX_STACK_DEPTH,
.max_entries = 128,
.map_flags = BPF_F_STACK_BUILD_ID,
+ .key_size = sizeof(__u32),
+ .value_size = sizeof(stack_trace_t),
};
-struct bpf_map_def SEC("maps") stack_amap = {
+struct {
+ __u32 type;
+ __u32 max_entries;
+ __u32 *key;
+ /* there seems to be a bug in kernel not handling typedef properly */
+ struct bpf_stack_build_id (*value)[PERF_MAX_STACK_DEPTH];
+} stack_amap SEC(".maps") = {
.type = BPF_MAP_TYPE_ARRAY,
- .key_size = sizeof(__u32),
- .value_size = sizeof(struct bpf_stack_build_id)
- * PERF_MAX_STACK_DEPTH,
.max_entries = 128,
};
diff --git a/tools/testing/selftests/bpf/progs/test_stacktrace_map.c b/tools/testing/selftests/bpf/progs/test_stacktrace_map.c
index af111af7ca1a..7ad09adbf648 100644
--- a/tools/testing/selftests/bpf/progs/test_stacktrace_map.c
+++ b/tools/testing/selftests/bpf/progs/test_stacktrace_map.c
@@ -8,31 +8,47 @@
#define PERF_MAX_STACK_DEPTH 127
#endif
-struct bpf_map_def SEC("maps") control_map = {
+struct {
+ __u32 type;
+ __u32 max_entries;
+ __u32 *key;
+ __u32 *value;
+} control_map SEC(".maps") = {
.type = BPF_MAP_TYPE_ARRAY,
- .key_size = sizeof(__u32),
- .value_size = sizeof(__u32),
.max_entries = 1,
};
-struct bpf_map_def SEC("maps") stackid_hmap = {
+struct {
+ __u32 type;
+ __u32 max_entries;
+ __u32 *key;
+ __u32 *value;
+} stackid_hmap SEC(".maps") = {
.type = BPF_MAP_TYPE_HASH,
- .key_size = sizeof(__u32),
- .value_size = sizeof(__u32),
.max_entries = 16384,
};
-struct bpf_map_def SEC("maps") stackmap = {
+typedef __u64 stack_trace_t[PERF_MAX_STACK_DEPTH];
+
+struct {
+ __u32 type;
+ __u32 max_entries;
+ __u32 key_size;
+ __u32 value_size;
+} stackmap SEC(".maps") = {
.type = BPF_MAP_TYPE_STACK_TRACE,
- .key_size = sizeof(__u32),
- .value_size = sizeof(__u64) * PERF_MAX_STACK_DEPTH,
.max_entries = 16384,
+ .key_size = sizeof(__u32),
+ .value_size = sizeof(stack_trace_t),
};
-struct bpf_map_def SEC("maps") stack_amap = {
+struct {
+ __u32 type;
+ __u32 max_entries;
+ __u32 *key;
+ __u64 (*value)[PERF_MAX_STACK_DEPTH];
+} stack_amap SEC(".maps") = {
.type = BPF_MAP_TYPE_ARRAY,
- .key_size = sizeof(__u32),
- .value_size = sizeof(__u64) * PERF_MAX_STACK_DEPTH,
.max_entries = 16384,
};
diff --git a/tools/testing/selftests/bpf/progs/test_sysctl_loop1.c b/tools/testing/selftests/bpf/progs/test_sysctl_loop1.c
new file mode 100644
index 000000000000..608a06871572
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_sysctl_loop1.c
@@ -0,0 +1,71 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2019 Facebook
+
+#include <stdint.h>
+#include <string.h>
+
+#include <linux/stddef.h>
+#include <linux/bpf.h>
+
+#include "bpf_helpers.h"
+
+#ifndef ARRAY_SIZE
+#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
+#endif
+
+/* tcp_mem sysctl has only 3 ints, but this test is doing TCP_MEM_LOOPS */
+#define TCP_MEM_LOOPS 28 /* because 30 doesn't fit into 512 bytes of stack */
+#define MAX_ULONG_STR_LEN 7
+#define MAX_VALUE_STR_LEN (TCP_MEM_LOOPS * MAX_ULONG_STR_LEN)
+
+static __always_inline int is_tcp_mem(struct bpf_sysctl *ctx)
+{
+ volatile char tcp_mem_name[] = "net/ipv4/tcp_mem/very_very_very_very_long_pointless_string";
+ unsigned char i;
+ char name[64];
+ int ret;
+
+ memset(name, 0, sizeof(name));
+ ret = bpf_sysctl_get_name(ctx, name, sizeof(name), 0);
+ if (ret < 0 || ret != sizeof(tcp_mem_name) - 1)
+ return 0;
+
+#pragma clang loop unroll(disable)
+ for (i = 0; i < sizeof(tcp_mem_name); ++i)
+ if (name[i] != tcp_mem_name[i])
+ return 0;
+
+ return 1;
+}
+
+SEC("cgroup/sysctl")
+int sysctl_tcp_mem(struct bpf_sysctl *ctx)
+{
+ unsigned long tcp_mem[TCP_MEM_LOOPS] = {};
+ char value[MAX_VALUE_STR_LEN];
+ unsigned char i, off = 0;
+ int ret;
+
+ if (ctx->write)
+ return 0;
+
+ if (!is_tcp_mem(ctx))
+ return 0;
+
+ ret = bpf_sysctl_get_current_value(ctx, value, MAX_VALUE_STR_LEN);
+ if (ret < 0 || ret >= MAX_VALUE_STR_LEN)
+ return 0;
+
+#pragma clang loop unroll(disable)
+ for (i = 0; i < ARRAY_SIZE(tcp_mem); ++i) {
+ ret = bpf_strtoul(value + off, MAX_ULONG_STR_LEN, 0,
+ tcp_mem + i);
+ if (ret <= 0 || ret > MAX_ULONG_STR_LEN)
+ return 0;
+ off += ret & MAX_ULONG_STR_LEN;
+ }
+
+ return tcp_mem[0] < tcp_mem[1] && tcp_mem[1] < tcp_mem[2];
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/test_sysctl_loop2.c b/tools/testing/selftests/bpf/progs/test_sysctl_loop2.c
new file mode 100644
index 000000000000..cb201cbe11e7
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_sysctl_loop2.c
@@ -0,0 +1,72 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2019 Facebook
+
+#include <stdint.h>
+#include <string.h>
+
+#include <linux/stddef.h>
+#include <linux/bpf.h>
+
+#include "bpf_helpers.h"
+
+#ifndef ARRAY_SIZE
+#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
+#endif
+
+/* tcp_mem sysctl has only 3 ints, but this test is doing TCP_MEM_LOOPS */
+#define TCP_MEM_LOOPS 20 /* because 30 doesn't fit into 512 bytes of stack */
+#define MAX_ULONG_STR_LEN 7
+#define MAX_VALUE_STR_LEN (TCP_MEM_LOOPS * MAX_ULONG_STR_LEN)
+
+static __attribute__((noinline)) int is_tcp_mem(struct bpf_sysctl *ctx)
+{
+ volatile char tcp_mem_name[] = "net/ipv4/tcp_mem/very_very_very_very_long_pointless_string_to_stress_byte_loop";
+ unsigned char i;
+ char name[64];
+ int ret;
+
+ memset(name, 0, sizeof(name));
+ ret = bpf_sysctl_get_name(ctx, name, sizeof(name), 0);
+ if (ret < 0 || ret != sizeof(tcp_mem_name) - 1)
+ return 0;
+
+#pragma clang loop unroll(disable)
+ for (i = 0; i < sizeof(tcp_mem_name); ++i)
+ if (name[i] != tcp_mem_name[i])
+ return 0;
+
+ return 1;
+}
+
+
+SEC("cgroup/sysctl")
+int sysctl_tcp_mem(struct bpf_sysctl *ctx)
+{
+ unsigned long tcp_mem[TCP_MEM_LOOPS] = {};
+ char value[MAX_VALUE_STR_LEN];
+ unsigned char i, off = 0;
+ int ret;
+
+ if (ctx->write)
+ return 0;
+
+ if (!is_tcp_mem(ctx))
+ return 0;
+
+ ret = bpf_sysctl_get_current_value(ctx, value, MAX_VALUE_STR_LEN);
+ if (ret < 0 || ret >= MAX_VALUE_STR_LEN)
+ return 0;
+
+#pragma clang loop unroll(disable)
+ for (i = 0; i < ARRAY_SIZE(tcp_mem); ++i) {
+ ret = bpf_strtoul(value + off, MAX_ULONG_STR_LEN, 0,
+ tcp_mem + i);
+ if (ret <= 0 || ret > MAX_ULONG_STR_LEN)
+ return 0;
+ off += ret & MAX_ULONG_STR_LEN;
+ }
+
+ return tcp_mem[0] < tcp_mem[1] && tcp_mem[1] < tcp_mem[2];
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/test_sysctl_prog.c b/tools/testing/selftests/bpf/progs/test_sysctl_prog.c
index a295cad805d7..5cbbff416998 100644
--- a/tools/testing/selftests/bpf/progs/test_sysctl_prog.c
+++ b/tools/testing/selftests/bpf/progs/test_sysctl_prog.c
@@ -8,7 +8,6 @@
#include <linux/bpf.h>
#include "bpf_helpers.h"
-#include "bpf_util.h"
/* Max supported length of a string with unsigned long in base 10 (pow2 - 1). */
#define MAX_ULONG_STR_LEN 0xF
@@ -16,6 +15,10 @@
/* Max supported length of sysctl value string (pow2). */
#define MAX_VALUE_STR_LEN 0x40
+#ifndef ARRAY_SIZE
+#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
+#endif
+
static __always_inline int is_tcp_mem(struct bpf_sysctl *ctx)
{
char tcp_mem_name[] = "net/ipv4/tcp_mem";
diff --git a/tools/testing/selftests/bpf/progs/test_tcp_estats.c b/tools/testing/selftests/bpf/progs/test_tcp_estats.c
index bee3bbecc0c4..df98f7e32832 100644
--- a/tools/testing/selftests/bpf/progs/test_tcp_estats.c
+++ b/tools/testing/selftests/bpf/progs/test_tcp_estats.c
@@ -148,10 +148,13 @@ struct tcp_estats_basic_event {
struct tcp_estats_conn_id conn_id;
};
-struct bpf_map_def SEC("maps") ev_record_map = {
+struct {
+ __u32 type;
+ __u32 max_entries;
+ __u32 *key;
+ struct tcp_estats_basic_event *value;
+} ev_record_map SEC(".maps") = {
.type = BPF_MAP_TYPE_HASH,
- .key_size = sizeof(__u32),
- .value_size = sizeof(struct tcp_estats_basic_event),
.max_entries = 1024,
};
diff --git a/tools/testing/selftests/bpf/progs/test_tcpbpf_kern.c b/tools/testing/selftests/bpf/progs/test_tcpbpf_kern.c
index c7c3240e0dd4..38e10c9fd996 100644
--- a/tools/testing/selftests/bpf/progs/test_tcpbpf_kern.c
+++ b/tools/testing/selftests/bpf/progs/test_tcpbpf_kern.c
@@ -14,17 +14,23 @@
#include "bpf_endian.h"
#include "test_tcpbpf.h"
-struct bpf_map_def SEC("maps") global_map = {
+struct {
+ __u32 type;
+ __u32 max_entries;
+ __u32 *key;
+ struct tcpbpf_globals *value;
+} global_map SEC(".maps") = {
.type = BPF_MAP_TYPE_ARRAY,
- .key_size = sizeof(__u32),
- .value_size = sizeof(struct tcpbpf_globals),
.max_entries = 4,
};
-struct bpf_map_def SEC("maps") sockopt_results = {
+struct {
+ __u32 type;
+ __u32 max_entries;
+ __u32 *key;
+ int *value;
+} sockopt_results SEC(".maps") = {
.type = BPF_MAP_TYPE_ARRAY,
- .key_size = sizeof(__u32),
- .value_size = sizeof(int),
.max_entries = 2,
};
diff --git a/tools/testing/selftests/bpf/progs/test_tcpnotify_kern.c b/tools/testing/selftests/bpf/progs/test_tcpnotify_kern.c
index ec6db6e64c41..d073d37d4e27 100644
--- a/tools/testing/selftests/bpf/progs/test_tcpnotify_kern.c
+++ b/tools/testing/selftests/bpf/progs/test_tcpnotify_kern.c
@@ -14,18 +14,26 @@
#include "bpf_endian.h"
#include "test_tcpnotify.h"
-struct bpf_map_def SEC("maps") global_map = {
+struct {
+ __u32 type;
+ __u32 max_entries;
+ __u32 *key;
+ struct tcpnotify_globals *value;
+} global_map SEC(".maps") = {
.type = BPF_MAP_TYPE_ARRAY,
- .key_size = sizeof(__u32),
- .value_size = sizeof(struct tcpnotify_globals),
.max_entries = 4,
};
-struct bpf_map_def SEC("maps") perf_event_map = {
+struct {
+ __u32 type;
+ __u32 max_entries;
+ __u32 key_size;
+ __u32 value_size;
+} perf_event_map SEC(".maps") = {
.type = BPF_MAP_TYPE_PERF_EVENT_ARRAY,
+ .max_entries = 2,
.key_size = sizeof(int),
.value_size = sizeof(__u32),
- .max_entries = 2,
};
int _version SEC("version") = 1;
diff --git a/tools/testing/selftests/bpf/progs/test_xdp.c b/tools/testing/selftests/bpf/progs/test_xdp.c
index 5e7df8bb5b5d..ec3d2c1c8cf9 100644
--- a/tools/testing/selftests/bpf/progs/test_xdp.c
+++ b/tools/testing/selftests/bpf/progs/test_xdp.c
@@ -22,17 +22,23 @@
int _version SEC("version") = 1;
-struct bpf_map_def SEC("maps") rxcnt = {
+struct {
+ __u32 type;
+ __u32 max_entries;
+ __u32 *key;
+ __u64 *value;
+} rxcnt SEC(".maps") = {
.type = BPF_MAP_TYPE_PERCPU_ARRAY,
- .key_size = sizeof(__u32),
- .value_size = sizeof(__u64),
.max_entries = 256,
};
-struct bpf_map_def SEC("maps") vip2tnl = {
+struct {
+ __u32 type;
+ __u32 max_entries;
+ struct vip *key;
+ struct iptnl_info *value;
+} vip2tnl SEC(".maps") = {
.type = BPF_MAP_TYPE_HASH,
- .key_size = sizeof(struct vip),
- .value_size = sizeof(struct iptnl_info),
.max_entries = MAX_IPTNL_ENTRIES,
};
diff --git a/tools/testing/selftests/bpf/progs/test_xdp_loop.c b/tools/testing/selftests/bpf/progs/test_xdp_loop.c
new file mode 100644
index 000000000000..7fa4677df22e
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_xdp_loop.c
@@ -0,0 +1,231 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2019 Facebook
+#include <stddef.h>
+#include <string.h>
+#include <linux/bpf.h>
+#include <linux/if_ether.h>
+#include <linux/if_packet.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <linux/in.h>
+#include <linux/udp.h>
+#include <linux/tcp.h>
+#include <linux/pkt_cls.h>
+#include <sys/socket.h>
+#include "bpf_helpers.h"
+#include "bpf_endian.h"
+#include "test_iptunnel_common.h"
+
+int _version SEC("version") = 1;
+
+struct bpf_map_def SEC("maps") rxcnt = {
+ .type = BPF_MAP_TYPE_PERCPU_ARRAY,
+ .key_size = sizeof(__u32),
+ .value_size = sizeof(__u64),
+ .max_entries = 256,
+};
+
+struct bpf_map_def SEC("maps") vip2tnl = {
+ .type = BPF_MAP_TYPE_HASH,
+ .key_size = sizeof(struct vip),
+ .value_size = sizeof(struct iptnl_info),
+ .max_entries = MAX_IPTNL_ENTRIES,
+};
+
+static __always_inline void count_tx(__u32 protocol)
+{
+ __u64 *rxcnt_count;
+
+ rxcnt_count = bpf_map_lookup_elem(&rxcnt, &protocol);
+ if (rxcnt_count)
+ *rxcnt_count += 1;
+}
+
+static __always_inline int get_dport(void *trans_data, void *data_end,
+ __u8 protocol)
+{
+ struct tcphdr *th;
+ struct udphdr *uh;
+
+ switch (protocol) {
+ case IPPROTO_TCP:
+ th = (struct tcphdr *)trans_data;
+ if (th + 1 > data_end)
+ return -1;
+ return th->dest;
+ case IPPROTO_UDP:
+ uh = (struct udphdr *)trans_data;
+ if (uh + 1 > data_end)
+ return -1;
+ return uh->dest;
+ default:
+ return 0;
+ }
+}
+
+static __always_inline void set_ethhdr(struct ethhdr *new_eth,
+ const struct ethhdr *old_eth,
+ const struct iptnl_info *tnl,
+ __be16 h_proto)
+{
+ memcpy(new_eth->h_source, old_eth->h_dest, sizeof(new_eth->h_source));
+ memcpy(new_eth->h_dest, tnl->dmac, sizeof(new_eth->h_dest));
+ new_eth->h_proto = h_proto;
+}
+
+static __always_inline int handle_ipv4(struct xdp_md *xdp)
+{
+ void *data_end = (void *)(long)xdp->data_end;
+ void *data = (void *)(long)xdp->data;
+ struct iptnl_info *tnl;
+ struct ethhdr *new_eth;
+ struct ethhdr *old_eth;
+ struct iphdr *iph = data + sizeof(struct ethhdr);
+ __u16 *next_iph;
+ __u16 payload_len;
+ struct vip vip = {};
+ int dport;
+ __u32 csum = 0;
+ int i;
+
+ if (iph + 1 > data_end)
+ return XDP_DROP;
+
+ dport = get_dport(iph + 1, data_end, iph->protocol);
+ if (dport == -1)
+ return XDP_DROP;
+
+ vip.protocol = iph->protocol;
+ vip.family = AF_INET;
+ vip.daddr.v4 = iph->daddr;
+ vip.dport = dport;
+ payload_len = bpf_ntohs(iph->tot_len);
+
+ tnl = bpf_map_lookup_elem(&vip2tnl, &vip);
+ /* It only does v4-in-v4 */
+ if (!tnl || tnl->family != AF_INET)
+ return XDP_PASS;
+
+ if (bpf_xdp_adjust_head(xdp, 0 - (int)sizeof(struct iphdr)))
+ return XDP_DROP;
+
+ data = (void *)(long)xdp->data;
+ data_end = (void *)(long)xdp->data_end;
+
+ new_eth = data;
+ iph = data + sizeof(*new_eth);
+ old_eth = data + sizeof(*iph);
+
+ if (new_eth + 1 > data_end ||
+ old_eth + 1 > data_end ||
+ iph + 1 > data_end)
+ return XDP_DROP;
+
+ set_ethhdr(new_eth, old_eth, tnl, bpf_htons(ETH_P_IP));
+
+ iph->version = 4;
+ iph->ihl = sizeof(*iph) >> 2;
+ iph->frag_off = 0;
+ iph->protocol = IPPROTO_IPIP;
+ iph->check = 0;
+ iph->tos = 0;
+ iph->tot_len = bpf_htons(payload_len + sizeof(*iph));
+ iph->daddr = tnl->daddr.v4;
+ iph->saddr = tnl->saddr.v4;
+ iph->ttl = 8;
+
+ next_iph = (__u16 *)iph;
+#pragma clang loop unroll(disable)
+ for (i = 0; i < sizeof(*iph) >> 1; i++)
+ csum += *next_iph++;
+
+ iph->check = ~((csum & 0xffff) + (csum >> 16));
+
+ count_tx(vip.protocol);
+
+ return XDP_TX;
+}
+
+static __always_inline int handle_ipv6(struct xdp_md *xdp)
+{
+ void *data_end = (void *)(long)xdp->data_end;
+ void *data = (void *)(long)xdp->data;
+ struct iptnl_info *tnl;
+ struct ethhdr *new_eth;
+ struct ethhdr *old_eth;
+ struct ipv6hdr *ip6h = data + sizeof(struct ethhdr);
+ __u16 payload_len;
+ struct vip vip = {};
+ int dport;
+
+ if (ip6h + 1 > data_end)
+ return XDP_DROP;
+
+ dport = get_dport(ip6h + 1, data_end, ip6h->nexthdr);
+ if (dport == -1)
+ return XDP_DROP;
+
+ vip.protocol = ip6h->nexthdr;
+ vip.family = AF_INET6;
+ memcpy(vip.daddr.v6, ip6h->daddr.s6_addr32, sizeof(vip.daddr));
+ vip.dport = dport;
+ payload_len = ip6h->payload_len;
+
+ tnl = bpf_map_lookup_elem(&vip2tnl, &vip);
+ /* It only does v6-in-v6 */
+ if (!tnl || tnl->family != AF_INET6)
+ return XDP_PASS;
+
+ if (bpf_xdp_adjust_head(xdp, 0 - (int)sizeof(struct ipv6hdr)))
+ return XDP_DROP;
+
+ data = (void *)(long)xdp->data;
+ data_end = (void *)(long)xdp->data_end;
+
+ new_eth = data;
+ ip6h = data + sizeof(*new_eth);
+ old_eth = data + sizeof(*ip6h);
+
+ if (new_eth + 1 > data_end || old_eth + 1 > data_end ||
+ ip6h + 1 > data_end)
+ return XDP_DROP;
+
+ set_ethhdr(new_eth, old_eth, tnl, bpf_htons(ETH_P_IPV6));
+
+ ip6h->version = 6;
+ ip6h->priority = 0;
+ memset(ip6h->flow_lbl, 0, sizeof(ip6h->flow_lbl));
+ ip6h->payload_len = bpf_htons(bpf_ntohs(payload_len) + sizeof(*ip6h));
+ ip6h->nexthdr = IPPROTO_IPV6;
+ ip6h->hop_limit = 8;
+ memcpy(ip6h->saddr.s6_addr32, tnl->saddr.v6, sizeof(tnl->saddr.v6));
+ memcpy(ip6h->daddr.s6_addr32, tnl->daddr.v6, sizeof(tnl->daddr.v6));
+
+ count_tx(vip.protocol);
+
+ return XDP_TX;
+}
+
+SEC("xdp_tx_iptunnel")
+int _xdp_tx_iptunnel(struct xdp_md *xdp)
+{
+ void *data_end = (void *)(long)xdp->data_end;
+ void *data = (void *)(long)xdp->data;
+ struct ethhdr *eth = data;
+ __u16 h_proto;
+
+ if (eth + 1 > data_end)
+ return XDP_DROP;
+
+ h_proto = eth->h_proto;
+
+ if (h_proto == bpf_htons(ETH_P_IP))
+ return handle_ipv4(xdp);
+ else if (h_proto == bpf_htons(ETH_P_IPV6))
+
+ return handle_ipv6(xdp);
+ else
+ return XDP_DROP;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/test_xdp_noinline.c b/tools/testing/selftests/bpf/progs/test_xdp_noinline.c
index 4fe6aaad22a4..d2eddb5553d1 100644
--- a/tools/testing/selftests/bpf/progs/test_xdp_noinline.c
+++ b/tools/testing/selftests/bpf/progs/test_xdp_noinline.c
@@ -163,52 +163,66 @@ struct lb_stats {
__u64 v1;
};
-struct bpf_map_def __attribute__ ((section("maps"), used)) vip_map = {
+struct {
+ __u32 type;
+ __u32 max_entries;
+ struct vip_definition *key;
+ struct vip_meta *value;
+} vip_map SEC(".maps") = {
.type = BPF_MAP_TYPE_HASH,
- .key_size = sizeof(struct vip_definition),
- .value_size = sizeof(struct vip_meta),
.max_entries = 512,
- .map_flags = 0,
};
-struct bpf_map_def __attribute__ ((section("maps"), used)) lru_cache = {
+struct {
+ __u32 type;
+ __u32 max_entries;
+ __u32 map_flags;
+ struct flow_key *key;
+ struct real_pos_lru *value;
+} lru_cache SEC(".maps") = {
.type = BPF_MAP_TYPE_LRU_HASH,
- .key_size = sizeof(struct flow_key),
- .value_size = sizeof(struct real_pos_lru),
.max_entries = 300,
.map_flags = 1U << 1,
};
-struct bpf_map_def __attribute__ ((section("maps"), used)) ch_rings = {
+struct {
+ __u32 type;
+ __u32 max_entries;
+ __u32 *key;
+ __u32 *value;
+} ch_rings SEC(".maps") = {
.type = BPF_MAP_TYPE_ARRAY,
- .key_size = sizeof(__u32),
- .value_size = sizeof(__u32),
.max_entries = 12 * 655,
- .map_flags = 0,
};
-struct bpf_map_def __attribute__ ((section("maps"), used)) reals = {
+struct {
+ __u32 type;
+ __u32 max_entries;
+ __u32 *key;
+ struct real_definition *value;
+} reals SEC(".maps") = {
.type = BPF_MAP_TYPE_ARRAY,
- .key_size = sizeof(__u32),
- .value_size = sizeof(struct real_definition),
.max_entries = 40,
- .map_flags = 0,
};
-struct bpf_map_def __attribute__ ((section("maps"), used)) stats = {
+struct {
+ __u32 type;
+ __u32 max_entries;
+ __u32 *key;
+ struct lb_stats *value;
+} stats SEC(".maps") = {
.type = BPF_MAP_TYPE_PERCPU_ARRAY,
- .key_size = sizeof(__u32),
- .value_size = sizeof(struct lb_stats),
.max_entries = 515,
- .map_flags = 0,
};
-struct bpf_map_def __attribute__ ((section("maps"), used)) ctl_array = {
+struct {
+ __u32 type;
+ __u32 max_entries;
+ __u32 *key;
+ struct ctl_value *value;
+} ctl_array SEC(".maps") = {
.type = BPF_MAP_TYPE_ARRAY,
- .key_size = sizeof(__u32),
- .value_size = sizeof(struct ctl_value),
.max_entries = 16,
- .map_flags = 0,
};
struct eth_hdr {
diff --git a/tools/testing/selftests/bpf/test_btf.c b/tools/testing/selftests/bpf/test_btf.c
index 289daf54dec4..8351cb5f4a20 100644
--- a/tools/testing/selftests/bpf/test_btf.c
+++ b/tools/testing/selftests/bpf/test_btf.c
@@ -4016,13 +4016,9 @@ struct btf_file_test {
};
static struct btf_file_test file_tests[] = {
-{
- .file = "test_btf_haskv.o",
-},
-{
- .file = "test_btf_nokv.o",
- .btf_kv_notfound = true,
-},
+ { .file = "test_btf_haskv.o", },
+ { .file = "test_btf_newkv.o", },
+ { .file = "test_btf_nokv.o", .btf_kv_notfound = true, },
};
static int do_test_file(unsigned int test_num)
diff --git a/tools/testing/selftests/bpf/test_select_reuseport.c b/tools/testing/selftests/bpf/test_select_reuseport.c
index 75646d9b34aa..7566c13eb51a 100644
--- a/tools/testing/selftests/bpf/test_select_reuseport.c
+++ b/tools/testing/selftests/bpf/test_select_reuseport.c
@@ -523,6 +523,58 @@ static void test_pass_on_err(int type, sa_family_t family)
printf("OK\n");
}
+static void test_detach_bpf(int type, sa_family_t family)
+{
+#ifdef SO_DETACH_REUSEPORT_BPF
+ __u32 nr_run_before = 0, nr_run_after = 0, tmp, i;
+ struct epoll_event ev;
+ int cli_fd, err, nev;
+ struct cmd cmd = {};
+ int optvalue = 0;
+
+ printf("%s: ", __func__);
+ err = setsockopt(sk_fds[0], SOL_SOCKET, SO_DETACH_REUSEPORT_BPF,
+ &optvalue, sizeof(optvalue));
+ CHECK(err == -1, "setsockopt(SO_DETACH_REUSEPORT_BPF)",
+ "err:%d errno:%d\n", err, errno);
+
+ err = setsockopt(sk_fds[1], SOL_SOCKET, SO_DETACH_REUSEPORT_BPF,
+ &optvalue, sizeof(optvalue));
+ CHECK(err == 0 || errno != ENOENT, "setsockopt(SO_DETACH_REUSEPORT_BPF)",
+ "err:%d errno:%d\n", err, errno);
+
+ for (i = 0; i < NR_RESULTS; i++) {
+ err = bpf_map_lookup_elem(result_map, &i, &tmp);
+ CHECK(err == -1, "lookup_elem(result_map)",
+ "i:%u err:%d errno:%d\n", i, err, errno);
+ nr_run_before += tmp;
+ }
+
+ cli_fd = send_data(type, family, &cmd, sizeof(cmd), PASS);
+ nev = epoll_wait(epfd, &ev, 1, 5);
+ CHECK(nev <= 0, "nev <= 0",
+ "nev:%d expected:1 type:%d family:%d data:(0, 0)\n",
+ nev, type, family);
+
+ for (i = 0; i < NR_RESULTS; i++) {
+ err = bpf_map_lookup_elem(result_map, &i, &tmp);
+ CHECK(err == -1, "lookup_elem(result_map)",
+ "i:%u err:%d errno:%d\n", i, err, errno);
+ nr_run_after += tmp;
+ }
+
+ CHECK(nr_run_before != nr_run_after,
+ "nr_run_before != nr_run_after",
+ "nr_run_before:%u nr_run_after:%u\n",
+ nr_run_before, nr_run_after);
+
+ printf("OK\n");
+ close(cli_fd);
+#else
+ printf("%s: SKIP\n", __func__);
+#endif
+}
+
static void prepare_sk_fds(int type, sa_family_t family, bool inany)
{
const int first = REUSEPORT_ARRAY_SIZE - 1;
@@ -664,6 +716,8 @@ static void test_all(void)
test_pass(type, family);
test_syncookie(type, family);
test_pass_on_err(type, family);
+ /* Must be the last test */
+ test_detach_bpf(type, family);
cleanup_per_test();
printf("\n");
diff --git a/tools/testing/selftests/bpf/test_socket_cookie.c b/tools/testing/selftests/bpf/test_socket_cookie.c
index cac8ee57a013..15653b0e26eb 100644
--- a/tools/testing/selftests/bpf/test_socket_cookie.c
+++ b/tools/testing/selftests/bpf/test_socket_cookie.c
@@ -18,6 +18,11 @@
#define CG_PATH "/foo"
#define SOCKET_COOKIE_PROG "./socket_cookie_prog.o"
+struct socket_cookie {
+ __u64 cookie_key;
+ __u32 cookie_value;
+};
+
static int start_server(void)
{
struct sockaddr_in6 addr;
@@ -89,8 +94,7 @@ static int validate_map(struct bpf_map *map, int client_fd)
__u32 cookie_expected_value;
struct sockaddr_in6 addr;
socklen_t len = sizeof(addr);
- __u32 cookie_value;
- __u64 cookie_key;
+ struct socket_cookie val;
int err = 0;
int map_fd;
@@ -101,17 +105,7 @@ static int validate_map(struct bpf_map *map, int client_fd)
map_fd = bpf_map__fd(map);
- err = bpf_map_get_next_key(map_fd, NULL, &cookie_key);
- if (err) {
- log_err("Can't get cookie key from map");
- goto out;
- }
-
- err = bpf_map_lookup_elem(map_fd, &cookie_key, &cookie_value);
- if (err) {
- log_err("Can't get cookie value from map");
- goto out;
- }
+ err = bpf_map_lookup_elem(map_fd, &client_fd, &val);
err = getsockname(client_fd, (struct sockaddr *)&addr, &len);
if (err) {
@@ -120,8 +114,8 @@ static int validate_map(struct bpf_map *map, int client_fd)
}
cookie_expected_value = (ntohs(addr.sin6_port) << 8) | 0xFF;
- if (cookie_value != cookie_expected_value) {
- log_err("Unexpected value in map: %x != %x", cookie_value,
+ if (val.cookie_value != cookie_expected_value) {
+ log_err("Unexpected value in map: %x != %x", val.cookie_value,
cookie_expected_value);
goto err;
}
diff --git a/tools/testing/selftests/bpf/test_verifier.c b/tools/testing/selftests/bpf/test_verifier.c
index 6cb307201958..c5514daf8865 100644
--- a/tools/testing/selftests/bpf/test_verifier.c
+++ b/tools/testing/selftests/bpf/test_verifier.c
@@ -234,10 +234,10 @@ static void bpf_fill_scale1(struct bpf_test *self)
insn[i++] = BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6,
-8 * (k % 64 + 1));
}
- /* every jump adds 1 step to insn_processed, so to stay exactly
- * within 1m limit add MAX_TEST_INSNS - MAX_JMP_SEQ - 1 MOVs and 1 EXIT
+ /* is_state_visited() doesn't allocate state for pruning for every jump.
+ * Hence multiply jmps by 4 to accommodate that heuristic
*/
- while (i < MAX_TEST_INSNS - MAX_JMP_SEQ - 1)
+ while (i < MAX_TEST_INSNS - MAX_JMP_SEQ * 4)
insn[i++] = BPF_ALU64_IMM(BPF_MOV, BPF_REG_0, 42);
insn[i] = BPF_EXIT_INSN();
self->prog_len = i + 1;
@@ -266,10 +266,7 @@ static void bpf_fill_scale2(struct bpf_test *self)
insn[i++] = BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6,
-8 * (k % (64 - 4 * FUNC_NEST) + 1));
}
- /* every jump adds 1 step to insn_processed, so to stay exactly
- * within 1m limit add MAX_TEST_INSNS - MAX_JMP_SEQ - 1 MOVs and 1 EXIT
- */
- while (i < MAX_TEST_INSNS - MAX_JMP_SEQ - 1)
+ while (i < MAX_TEST_INSNS - MAX_JMP_SEQ * 4)
insn[i++] = BPF_ALU64_IMM(BPF_MOV, BPF_REG_0, 42);
insn[i] = BPF_EXIT_INSN();
self->prog_len = i + 1;
diff --git a/tools/testing/selftests/bpf/verifier/calls.c b/tools/testing/selftests/bpf/verifier/calls.c
index 9093a8f64dc6..2d752c4f8d9d 100644
--- a/tools/testing/selftests/bpf/verifier/calls.c
+++ b/tools/testing/selftests/bpf/verifier/calls.c
@@ -215,9 +215,11 @@
BPF_MOV64_IMM(BPF_REG_0, 3),
BPF_JMP_IMM(BPF_JA, 0, 0, -6),
},
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- .errstr = "back-edge from insn",
- .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
+ .errstr_unpriv = "back-edge from insn",
+ .result_unpriv = REJECT,
+ .result = ACCEPT,
+ .retval = 1,
},
{
"calls: conditional call 4",
@@ -250,22 +252,24 @@
BPF_MOV64_IMM(BPF_REG_0, 3),
BPF_EXIT_INSN(),
},
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- .errstr = "back-edge from insn",
- .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .retval = 1,
},
{
"calls: conditional call 6",
.insns = {
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, -2),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, -3),
BPF_EXIT_INSN(),
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, mark)),
BPF_EXIT_INSN(),
},
- .prog_type = BPF_PROG_TYPE_TRACEPOINT,
- .errstr = "back-edge from insn",
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .errstr = "infinite loop detected",
.result = REJECT,
},
{
diff --git a/tools/testing/selftests/bpf/verifier/cfg.c b/tools/testing/selftests/bpf/verifier/cfg.c
index 349c0862fb4c..4eb76ed739ce 100644
--- a/tools/testing/selftests/bpf/verifier/cfg.c
+++ b/tools/testing/selftests/bpf/verifier/cfg.c
@@ -41,7 +41,8 @@
BPF_JMP_IMM(BPF_JA, 0, 0, -1),
BPF_EXIT_INSN(),
},
- .errstr = "back-edge",
+ .errstr = "unreachable insn 1",
+ .errstr_unpriv = "back-edge",
.result = REJECT,
},
{
@@ -53,18 +54,20 @@
BPF_JMP_IMM(BPF_JA, 0, 0, -4),
BPF_EXIT_INSN(),
},
- .errstr = "back-edge",
+ .errstr = "unreachable insn 4",
+ .errstr_unpriv = "back-edge",
.result = REJECT,
},
{
"conditional loop",
.insns = {
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
BPF_MOV64_REG(BPF_REG_3, BPF_REG_0),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, -3),
BPF_EXIT_INSN(),
},
- .errstr = "back-edge",
+ .errstr = "infinite loop detected",
+ .errstr_unpriv = "back-edge",
.result = REJECT,
},
diff --git a/tools/testing/selftests/bpf/verifier/direct_packet_access.c b/tools/testing/selftests/bpf/verifier/direct_packet_access.c
index d5c596fdc4b9..2c5fbe7bcd27 100644
--- a/tools/testing/selftests/bpf/verifier/direct_packet_access.c
+++ b/tools/testing/selftests/bpf/verifier/direct_packet_access.c
@@ -511,7 +511,8 @@
offsetof(struct __sk_buff, data)),
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
offsetof(struct __sk_buff, data_end)),
- BPF_MOV64_IMM(BPF_REG_0, 0xffffffff),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, mark)),
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 0xffff),
diff --git a/tools/testing/selftests/bpf/verifier/helper_access_var_len.c b/tools/testing/selftests/bpf/verifier/helper_access_var_len.c
index 1f39d845c64f..67ab12410050 100644
--- a/tools/testing/selftests/bpf/verifier/helper_access_var_len.c
+++ b/tools/testing/selftests/bpf/verifier/helper_access_var_len.c
@@ -29,9 +29,9 @@
{
"helper access to variable memory: stack, bitwise AND, zero included",
.insns = {
+ BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, 8),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
- BPF_MOV64_IMM(BPF_REG_2, 16),
BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 64),
@@ -46,9 +46,9 @@
{
"helper access to variable memory: stack, bitwise AND + JMP, wrong max",
.insns = {
+ BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, 8),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
- BPF_MOV64_IMM(BPF_REG_2, 16),
BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 65),
@@ -122,9 +122,9 @@
{
"helper access to variable memory: stack, JMP, bounds + offset",
.insns = {
+ BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, 8),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
- BPF_MOV64_IMM(BPF_REG_2, 16),
BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 64, 5),
@@ -143,9 +143,9 @@
{
"helper access to variable memory: stack, JMP, wrong max",
.insns = {
+ BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, 8),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
- BPF_MOV64_IMM(BPF_REG_2, 16),
BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 65, 4),
@@ -163,9 +163,9 @@
{
"helper access to variable memory: stack, JMP, no max check",
.insns = {
+ BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, 8),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
- BPF_MOV64_IMM(BPF_REG_2, 16),
BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
BPF_MOV64_IMM(BPF_REG_4, 0),
@@ -183,9 +183,9 @@
{
"helper access to variable memory: stack, JMP, no min check",
.insns = {
+ BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, 8),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
- BPF_MOV64_IMM(BPF_REG_2, 16),
BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 64, 3),
@@ -201,9 +201,9 @@
{
"helper access to variable memory: stack, JMP (signed), no min check",
.insns = {
+ BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, 8),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
- BPF_MOV64_IMM(BPF_REG_2, 16),
BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, -128),
BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, -128),
BPF_JMP_IMM(BPF_JSGT, BPF_REG_2, 64, 3),
@@ -244,6 +244,7 @@
{
"helper access to variable memory: map, JMP, wrong max",
.insns = {
+ BPF_LDX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 8),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
@@ -251,7 +252,7 @@
BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 10),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_6),
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
BPF_JMP_IMM(BPF_JSGT, BPF_REG_2, sizeof(struct test_val) + 1, 4),
@@ -262,7 +263,7 @@
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
- .fixup_map_hash_48b = { 3 },
+ .fixup_map_hash_48b = { 4 },
.errstr = "invalid access to map value, value_size=48 off=0 size=49",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
@@ -296,6 +297,7 @@
{
"helper access to variable memory: map adjusted, JMP, wrong max",
.insns = {
+ BPF_LDX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 8),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
@@ -304,7 +306,7 @@
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 20),
- BPF_MOV64_IMM(BPF_REG_2, sizeof(struct test_val)),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_6),
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
BPF_JMP_IMM(BPF_JSGT, BPF_REG_2, sizeof(struct test_val) - 19, 4),
@@ -315,7 +317,7 @@
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
- .fixup_map_hash_48b = { 3 },
+ .fixup_map_hash_48b = { 4 },
.errstr = "R1 min value is outside of the array range",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
@@ -337,8 +339,8 @@
{
"helper access to variable memory: size > 0 not allowed on NULL (ARG_PTR_TO_MEM_OR_NULL)",
.insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0),
BPF_MOV64_IMM(BPF_REG_1, 0),
- BPF_MOV64_IMM(BPF_REG_2, 1),
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 64),
@@ -562,6 +564,7 @@
{
"helper access to variable memory: 8 bytes leak",
.insns = {
+ BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, 8),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -64),
BPF_MOV64_IMM(BPF_REG_0, 0),
@@ -572,7 +575,6 @@
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
- BPF_MOV64_IMM(BPF_REG_2, 1),
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 63),
diff --git a/tools/testing/selftests/bpf/verifier/loops1.c b/tools/testing/selftests/bpf/verifier/loops1.c
new file mode 100644
index 000000000000..5e980a5ab69d
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/loops1.c
@@ -0,0 +1,161 @@
+{
+ "bounded loop, count to 4",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 1),
+ BPF_JMP_IMM(BPF_JLT, BPF_REG_0, 4, -2),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ .retval = 4,
+},
+{
+ "bounded loop, count to 20",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 3),
+ BPF_JMP_IMM(BPF_JLT, BPF_REG_0, 20, -2),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "bounded loop, count from positive unknown to 4",
+ .insns = {
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
+ BPF_JMP_IMM(BPF_JSLT, BPF_REG_0, 0, 2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 1),
+ BPF_JMP_IMM(BPF_JLT, BPF_REG_0, 4, -2),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ .retval = 4,
+},
+{
+ "bounded loop, count from totally unknown to 4",
+ .insns = {
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 1),
+ BPF_JMP_IMM(BPF_JLT, BPF_REG_0, 4, -2),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "bounded loop, count to 4 with equality",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 1),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 4, -2),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "bounded loop, start in the middle",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_JMP_A(1),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 1),
+ BPF_JMP_IMM(BPF_JLT, BPF_REG_0, 4, -2),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "back-edge",
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ .retval = 4,
+},
+{
+ "bounded loop containing a forward jump",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 1),
+ BPF_JMP_REG(BPF_JEQ, BPF_REG_0, BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JLT, BPF_REG_0, 4, -3),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ .retval = 4,
+},
+{
+ "bounded loop that jumps out rather than in",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_6, 0),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1),
+ BPF_JMP_IMM(BPF_JGT, BPF_REG_6, 10000, 2),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
+ BPF_JMP_A(-4),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "infinite loop after a conditional jump",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 5),
+ BPF_JMP_IMM(BPF_JLT, BPF_REG_0, 4, 2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 1),
+ BPF_JMP_A(-2),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "program is too large",
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "bounded recursion",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 1),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
+ BPF_JMP_IMM(BPF_JLT, BPF_REG_1, 4, 1),
+ BPF_EXIT_INSN(),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -5),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "back-edge",
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "infinite loop in two jumps",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_JMP_A(0),
+ BPF_JMP_IMM(BPF_JLT, BPF_REG_0, 4, -2),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "loop detected",
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "infinite loop: three-jump trick",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 1),
+ BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
+ BPF_JMP_IMM(BPF_JLT, BPF_REG_0, 2, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 1),
+ BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
+ BPF_JMP_IMM(BPF_JLT, BPF_REG_0, 2, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 1),
+ BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
+ BPF_JMP_IMM(BPF_JLT, BPF_REG_0, 2, -11),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .errstr = "loop detected",
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
diff --git a/tools/testing/selftests/bpf/verifier/prevent_map_lookup.c b/tools/testing/selftests/bpf/verifier/prevent_map_lookup.c
index bbdba990fefb..da7a4b37cb98 100644
--- a/tools/testing/selftests/bpf/verifier/prevent_map_lookup.c
+++ b/tools/testing/selftests/bpf/verifier/prevent_map_lookup.c
@@ -29,21 +29,6 @@
.prog_type = BPF_PROG_TYPE_SOCK_OPS,
},
{
- "prevent map lookup in xskmap",
- .insns = {
- BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
- BPF_LD_MAP_FD(BPF_REG_1, 0),
- BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
- BPF_EXIT_INSN(),
- },
- .fixup_map_xskmap = { 3 },
- .result = REJECT,
- .errstr = "cannot pass map_type 17 into func bpf_map_lookup_elem",
- .prog_type = BPF_PROG_TYPE_XDP,
-},
-{
"prevent map lookup in stack trace",
.insns = {
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
diff --git a/tools/testing/selftests/bpf/verifier/sock.c b/tools/testing/selftests/bpf/verifier/sock.c
index b31cd2cf50d0..9ed192e14f5f 100644
--- a/tools/testing/selftests/bpf/verifier/sock.c
+++ b/tools/testing/selftests/bpf/verifier/sock.c
@@ -498,3 +498,21 @@
.result = REJECT,
.errstr = "cannot pass map_type 24 into func bpf_map_lookup_elem",
},
+{
+ "bpf_map_lookup_elem(xskmap, &key); xs->queue_id",
+ .insns = {
+ BPF_ST_MEM(BPF_W, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, offsetof(struct bpf_xdp_sock, queue_id)),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_xskmap = { 3 },
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .result = ACCEPT,
+},