From b03e19465b972bd06104207380e0e42e7f03ab29 Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Sat, 19 Feb 2022 20:27:20 -0800 Subject: selftests/bpf: Fix btfgen tests There turned out to be a few problems with btfgen selftests. First, core_btfgen tests are failing in BPF CI due to the use of full-featured bpftool, which has extra dependencies on libbfd, libcap, etc, which are present in BPF CI's build environment, but those shared libraries are missing in QEMU image in which test_progs is running. To fix this problem, use minimal bootstrap version of bpftool instead. It only depend on libelf and libz, same as libbpf, so doesn't add any new requirements (and bootstrap bpftool still implementes entire `bpftool gen` functionality, which is quite convenient). Second problem is even more interesting. Both core_btfgen and core_reloc reuse the same set of struct core_reloc_test_case array of test case definitions. That in itself is not a problem, but btfgen test replaces test_case->btf_src_file property with the path to temporary file into which minimized BTF is output by bpftool. This interferes with original core_reloc tests, depending on order of tests execution (core_btfgen is run first in sequential mode and skrews up subsequent core_reloc run by pointing to already deleted temporary file, instead of the original BTF files) and whether those two runs share the same process (in parallel mode the chances are high for them to run in two separate processes and so not interfere with each other). To prevent this interference, create and use local copy of a test definition. Mark original array as constant to catch accidental modifcations. Note that setup_type_id_case_success() and setup_type_id_case_success() still modify common test_case->output memory area, but it is ok as each setup function has to re-initialize it completely anyways. In sequential mode it leads to deterministic and correct initialization. In parallel mode they will either each have their own process, or if core_reloc and core_btfgen happen to be run by the same worker process, they will still do that sequentially within the worker process. If they are sharded across multiple processes, they don't really share anything anyways. Also, rename core_btfgen into core_reloc_btfgen, as it is indeed just a "flavor" of core_reloc test, not an independent set of tests. So make it more obvious. Last problem that needed solving was that location of bpftool differs between test_progs and test_progs' flavors (e.g., test_progs-no_alu32). To keep it simple, create a symlink to bpftool both inside selftests/bpf/ directory and selftests/bpf/ subdirectory. That way, from inside core_reloc test, location to bpftool is just "./bpftool". v2->v3: - fix bpftool location relative the test_progs-no_alu32; v1->v2: - fix corruption of core_reloc_test_case. Fixes: 704c91e59fe0 ("selftests/bpf: Test "bpftool gen min_core_btf") Signed-off-by: Andrii Nakryiko Signed-off-by: Alexei Starovoitov Acked-by: Yucong Sun Link: https://lore.kernel.org/bpf/20220220042720.3336684-1-andrii@kernel.org --- tools/testing/selftests/bpf/prog_tests/core_reloc.c | 17 +++++++++++------ 1 file changed, 11 insertions(+), 6 deletions(-) (limited to 'tools/testing/selftests/bpf/prog_tests') diff --git a/tools/testing/selftests/bpf/prog_tests/core_reloc.c b/tools/testing/selftests/bpf/prog_tests/core_reloc.c index 8fbb40a832d5..f28f75aa9154 100644 --- a/tools/testing/selftests/bpf/prog_tests/core_reloc.c +++ b/tools/testing/selftests/bpf/prog_tests/core_reloc.c @@ -512,7 +512,7 @@ static int __trigger_module_test_read(const struct core_reloc_test_case *test) } -static struct core_reloc_test_case test_cases[] = { +static const struct core_reloc_test_case test_cases[] = { /* validate we can find kernel image and use its BTF for relocs */ { .case_name = "kernel", @@ -843,7 +843,7 @@ static int run_btfgen(const char *src_btf, const char *dst_btf, const char *objp int n; n = snprintf(command, sizeof(command), - "./tools/build/bpftool/bpftool gen min_core_btf %s %s %s", + "./bpftool gen min_core_btf %s %s %s", src_btf, dst_btf, objpath); if (n < 0 || n >= sizeof(command)) return -1; @@ -855,7 +855,7 @@ static void run_core_reloc_tests(bool use_btfgen) { const size_t mmap_sz = roundup_page(sizeof(struct data)); DECLARE_LIBBPF_OPTS(bpf_object_open_opts, open_opts); - struct core_reloc_test_case *test_case; + struct core_reloc_test_case *test_case, test_case_copy; const char *tp_name, *probe_name; int err, i, equal, fd; struct bpf_link *link = NULL; @@ -870,7 +870,10 @@ static void run_core_reloc_tests(bool use_btfgen) for (i = 0; i < ARRAY_SIZE(test_cases); i++) { char btf_file[] = "/tmp/core_reloc.btf.XXXXXX"; - test_case = &test_cases[i]; + + test_case_copy = test_cases[i]; + test_case = &test_case_copy; + if (!test__start_subtest(test_case->case_name)) continue; @@ -881,6 +884,7 @@ static void run_core_reloc_tests(bool use_btfgen) /* generate a "minimal" BTF file and use it as source */ if (use_btfgen) { + if (!test_case->btf_src_file || test_case->fails) { test__skip(); continue; @@ -989,7 +993,8 @@ cleanup: CHECK_FAIL(munmap(mmap_data, mmap_sz)); mmap_data = NULL; } - remove(btf_file); + if (use_btfgen) + remove(test_case->btf_src_file); bpf_link__destroy(link); link = NULL; bpf_object__close(obj); @@ -1001,7 +1006,7 @@ void test_core_reloc(void) run_core_reloc_tests(false); } -void test_core_btfgen(void) +void test_core_reloc_btfgen(void) { run_core_reloc_tests(true); } -- cgit v1.2.3 From 07609c193a0cfd1e3532a7dd81383c9d458f485c Mon Sep 17 00:00:00 2001 From: Hou Tao Date: Thu, 17 Feb 2022 15:22:32 +0800 Subject: bpf, selftests: Use raw_tp program for atomic test Now atomic tests will attach fentry program and run it through bpf_prog_test_run_opts(), but attaching fentry program depends on BPF trampoline which is only available under x86-64. Considering many archs have atomic support, using raw_tp program instead. Signed-off-by: Hou Tao Signed-off-by: Daniel Borkmann Acked-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/20220217072232.1186625-5-houtao1@huawei.com --- tools/testing/selftests/bpf/prog_tests/atomics.c | 91 ++++++------------------ tools/testing/selftests/bpf/progs/atomics.c | 28 ++++---- 2 files changed, 36 insertions(+), 83 deletions(-) (limited to 'tools/testing/selftests/bpf/prog_tests') diff --git a/tools/testing/selftests/bpf/prog_tests/atomics.c b/tools/testing/selftests/bpf/prog_tests/atomics.c index ab62aba10e2b..13e101f370a1 100644 --- a/tools/testing/selftests/bpf/prog_tests/atomics.c +++ b/tools/testing/selftests/bpf/prog_tests/atomics.c @@ -7,19 +7,15 @@ static void test_add(struct atomics_lskel *skel) { int err, prog_fd; - int link_fd; LIBBPF_OPTS(bpf_test_run_opts, topts); - link_fd = atomics_lskel__add__attach(skel); - if (!ASSERT_GT(link_fd, 0, "attach(add)")) - return; - + /* No need to attach it, just run it directly */ prog_fd = skel->progs.add.prog_fd; err = bpf_prog_test_run_opts(prog_fd, &topts); if (!ASSERT_OK(err, "test_run_opts err")) - goto cleanup; + return; if (!ASSERT_OK(topts.retval, "test_run_opts retval")) - goto cleanup; + return; ASSERT_EQ(skel->data->add64_value, 3, "add64_value"); ASSERT_EQ(skel->bss->add64_result, 1, "add64_result"); @@ -31,27 +27,20 @@ static void test_add(struct atomics_lskel *skel) ASSERT_EQ(skel->bss->add_stack_result, 1, "add_stack_result"); ASSERT_EQ(skel->data->add_noreturn_value, 3, "add_noreturn_value"); - -cleanup: - close(link_fd); } static void test_sub(struct atomics_lskel *skel) { int err, prog_fd; - int link_fd; LIBBPF_OPTS(bpf_test_run_opts, topts); - link_fd = atomics_lskel__sub__attach(skel); - if (!ASSERT_GT(link_fd, 0, "attach(sub)")) - return; - + /* No need to attach it, just run it directly */ prog_fd = skel->progs.sub.prog_fd; err = bpf_prog_test_run_opts(prog_fd, &topts); if (!ASSERT_OK(err, "test_run_opts err")) - goto cleanup; + return; if (!ASSERT_OK(topts.retval, "test_run_opts retval")) - goto cleanup; + return; ASSERT_EQ(skel->data->sub64_value, -1, "sub64_value"); ASSERT_EQ(skel->bss->sub64_result, 1, "sub64_result"); @@ -63,27 +52,20 @@ static void test_sub(struct atomics_lskel *skel) ASSERT_EQ(skel->bss->sub_stack_result, 1, "sub_stack_result"); ASSERT_EQ(skel->data->sub_noreturn_value, -1, "sub_noreturn_value"); - -cleanup: - close(link_fd); } static void test_and(struct atomics_lskel *skel) { int err, prog_fd; - int link_fd; LIBBPF_OPTS(bpf_test_run_opts, topts); - link_fd = atomics_lskel__and__attach(skel); - if (!ASSERT_GT(link_fd, 0, "attach(and)")) - return; - + /* No need to attach it, just run it directly */ prog_fd = skel->progs.and.prog_fd; err = bpf_prog_test_run_opts(prog_fd, &topts); if (!ASSERT_OK(err, "test_run_opts err")) - goto cleanup; + return; if (!ASSERT_OK(topts.retval, "test_run_opts retval")) - goto cleanup; + return; ASSERT_EQ(skel->data->and64_value, 0x010ull << 32, "and64_value"); ASSERT_EQ(skel->bss->and64_result, 0x110ull << 32, "and64_result"); @@ -92,26 +74,20 @@ static void test_and(struct atomics_lskel *skel) ASSERT_EQ(skel->bss->and32_result, 0x110, "and32_result"); ASSERT_EQ(skel->data->and_noreturn_value, 0x010ull << 32, "and_noreturn_value"); -cleanup: - close(link_fd); } static void test_or(struct atomics_lskel *skel) { int err, prog_fd; - int link_fd; LIBBPF_OPTS(bpf_test_run_opts, topts); - link_fd = atomics_lskel__or__attach(skel); - if (!ASSERT_GT(link_fd, 0, "attach(or)")) - return; - + /* No need to attach it, just run it directly */ prog_fd = skel->progs.or.prog_fd; err = bpf_prog_test_run_opts(prog_fd, &topts); if (!ASSERT_OK(err, "test_run_opts err")) - goto cleanup; + return; if (!ASSERT_OK(topts.retval, "test_run_opts retval")) - goto cleanup; + return; ASSERT_EQ(skel->data->or64_value, 0x111ull << 32, "or64_value"); ASSERT_EQ(skel->bss->or64_result, 0x110ull << 32, "or64_result"); @@ -120,26 +96,20 @@ static void test_or(struct atomics_lskel *skel) ASSERT_EQ(skel->bss->or32_result, 0x110, "or32_result"); ASSERT_EQ(skel->data->or_noreturn_value, 0x111ull << 32, "or_noreturn_value"); -cleanup: - close(link_fd); } static void test_xor(struct atomics_lskel *skel) { int err, prog_fd; - int link_fd; LIBBPF_OPTS(bpf_test_run_opts, topts); - link_fd = atomics_lskel__xor__attach(skel); - if (!ASSERT_GT(link_fd, 0, "attach(xor)")) - return; - + /* No need to attach it, just run it directly */ prog_fd = skel->progs.xor.prog_fd; err = bpf_prog_test_run_opts(prog_fd, &topts); if (!ASSERT_OK(err, "test_run_opts err")) - goto cleanup; + return; if (!ASSERT_OK(topts.retval, "test_run_opts retval")) - goto cleanup; + return; ASSERT_EQ(skel->data->xor64_value, 0x101ull << 32, "xor64_value"); ASSERT_EQ(skel->bss->xor64_result, 0x110ull << 32, "xor64_result"); @@ -148,26 +118,20 @@ static void test_xor(struct atomics_lskel *skel) ASSERT_EQ(skel->bss->xor32_result, 0x110, "xor32_result"); ASSERT_EQ(skel->data->xor_noreturn_value, 0x101ull << 32, "xor_nxoreturn_value"); -cleanup: - close(link_fd); } static void test_cmpxchg(struct atomics_lskel *skel) { int err, prog_fd; - int link_fd; LIBBPF_OPTS(bpf_test_run_opts, topts); - link_fd = atomics_lskel__cmpxchg__attach(skel); - if (!ASSERT_GT(link_fd, 0, "attach(cmpxchg)")) - return; - + /* No need to attach it, just run it directly */ prog_fd = skel->progs.cmpxchg.prog_fd; err = bpf_prog_test_run_opts(prog_fd, &topts); if (!ASSERT_OK(err, "test_run_opts err")) - goto cleanup; + return; if (!ASSERT_OK(topts.retval, "test_run_opts retval")) - goto cleanup; + return; ASSERT_EQ(skel->data->cmpxchg64_value, 2, "cmpxchg64_value"); ASSERT_EQ(skel->bss->cmpxchg64_result_fail, 1, "cmpxchg_result_fail"); @@ -176,45 +140,34 @@ static void test_cmpxchg(struct atomics_lskel *skel) ASSERT_EQ(skel->data->cmpxchg32_value, 2, "lcmpxchg32_value"); ASSERT_EQ(skel->bss->cmpxchg32_result_fail, 1, "cmpxchg_result_fail"); ASSERT_EQ(skel->bss->cmpxchg32_result_succeed, 1, "cmpxchg_result_succeed"); - -cleanup: - close(link_fd); } static void test_xchg(struct atomics_lskel *skel) { int err, prog_fd; - int link_fd; LIBBPF_OPTS(bpf_test_run_opts, topts); - link_fd = atomics_lskel__xchg__attach(skel); - if (!ASSERT_GT(link_fd, 0, "attach(xchg)")) - return; - + /* No need to attach it, just run it directly */ prog_fd = skel->progs.xchg.prog_fd; err = bpf_prog_test_run_opts(prog_fd, &topts); if (!ASSERT_OK(err, "test_run_opts err")) - goto cleanup; + return; if (!ASSERT_OK(topts.retval, "test_run_opts retval")) - goto cleanup; + return; ASSERT_EQ(skel->data->xchg64_value, 2, "xchg64_value"); ASSERT_EQ(skel->bss->xchg64_result, 1, "xchg64_result"); ASSERT_EQ(skel->data->xchg32_value, 2, "xchg32_value"); ASSERT_EQ(skel->bss->xchg32_result, 1, "xchg32_result"); - -cleanup: - close(link_fd); } void test_atomics(void) { struct atomics_lskel *skel; - __u32 duration = 0; skel = atomics_lskel__open_and_load(); - if (CHECK(!skel, "skel_load", "atomics skeleton failed\n")) + if (!ASSERT_OK_PTR(skel, "atomics skeleton load")) return; if (skel->data->skip_tests) { diff --git a/tools/testing/selftests/bpf/progs/atomics.c b/tools/testing/selftests/bpf/progs/atomics.c index 16e57313204a..f89c7f0cc53b 100644 --- a/tools/testing/selftests/bpf/progs/atomics.c +++ b/tools/testing/selftests/bpf/progs/atomics.c @@ -20,8 +20,8 @@ __u64 add_stack_value_copy = 0; __u64 add_stack_result = 0; __u64 add_noreturn_value = 1; -SEC("fentry/bpf_fentry_test1") -int BPF_PROG(add, int a) +SEC("raw_tp/sys_enter") +int add(const void *ctx) { if (pid != (bpf_get_current_pid_tgid() >> 32)) return 0; @@ -46,8 +46,8 @@ __s64 sub_stack_value_copy = 0; __s64 sub_stack_result = 0; __s64 sub_noreturn_value = 1; -SEC("fentry/bpf_fentry_test1") -int BPF_PROG(sub, int a) +SEC("raw_tp/sys_enter") +int sub(const void *ctx) { if (pid != (bpf_get_current_pid_tgid() >> 32)) return 0; @@ -70,8 +70,8 @@ __u32 and32_value = 0x110; __u32 and32_result = 0; __u64 and_noreturn_value = (0x110ull << 32); -SEC("fentry/bpf_fentry_test1") -int BPF_PROG(and, int a) +SEC("raw_tp/sys_enter") +int and(const void *ctx) { if (pid != (bpf_get_current_pid_tgid() >> 32)) return 0; @@ -91,8 +91,8 @@ __u32 or32_value = 0x110; __u32 or32_result = 0; __u64 or_noreturn_value = (0x110ull << 32); -SEC("fentry/bpf_fentry_test1") -int BPF_PROG(or, int a) +SEC("raw_tp/sys_enter") +int or(const void *ctx) { if (pid != (bpf_get_current_pid_tgid() >> 32)) return 0; @@ -111,8 +111,8 @@ __u32 xor32_value = 0x110; __u32 xor32_result = 0; __u64 xor_noreturn_value = (0x110ull << 32); -SEC("fentry/bpf_fentry_test1") -int BPF_PROG(xor, int a) +SEC("raw_tp/sys_enter") +int xor(const void *ctx) { if (pid != (bpf_get_current_pid_tgid() >> 32)) return 0; @@ -132,8 +132,8 @@ __u32 cmpxchg32_value = 1; __u32 cmpxchg32_result_fail = 0; __u32 cmpxchg32_result_succeed = 0; -SEC("fentry/bpf_fentry_test1") -int BPF_PROG(cmpxchg, int a) +SEC("raw_tp/sys_enter") +int cmpxchg(const void *ctx) { if (pid != (bpf_get_current_pid_tgid() >> 32)) return 0; @@ -153,8 +153,8 @@ __u64 xchg64_result = 0; __u32 xchg32_value = 1; __u32 xchg32_result = 0; -SEC("fentry/bpf_fentry_test1") -int BPF_PROG(xchg, int a) +SEC("raw_tp/sys_enter") +int xchg(const void *ctx) { if (pid != (bpf_get_current_pid_tgid() >> 32)) return 0; -- cgit v1.2.3 From bd004cad78c04d762a99127af2f8208b9579af21 Mon Sep 17 00:00:00 2001 From: Xu Kuohai Date: Tue, 1 Mar 2022 00:32:50 -0500 Subject: selftests/bpf: Update btf_dump case for conflicting names Update btf_dump case for conflicting names caused by forward declaration. Signed-off-by: Xu Kuohai Signed-off-by: Daniel Borkmann Acked-by: Song Liu Link: https://lore.kernel.org/bpf/20220301053250.1464204-3-xukuohai@huawei.com --- tools/testing/selftests/bpf/prog_tests/btf_dump.c | 54 +++++++++++++++++------ 1 file changed, 41 insertions(+), 13 deletions(-) (limited to 'tools/testing/selftests/bpf/prog_tests') diff --git a/tools/testing/selftests/bpf/prog_tests/btf_dump.c b/tools/testing/selftests/bpf/prog_tests/btf_dump.c index 9e26903f9170..5fce7008d1ff 100644 --- a/tools/testing/selftests/bpf/prog_tests/btf_dump.c +++ b/tools/testing/selftests/bpf/prog_tests/btf_dump.c @@ -148,22 +148,38 @@ static void test_btf_dump_incremental(void) /* First, generate BTF corresponding to the following C code: * - * enum { VAL = 1 }; + * enum x; + * + * enum x { X = 1 }; + * + * enum { Y = 1 }; + * + * struct s; * * struct s { int x; }; * */ + id = btf__add_enum(btf, "x", 4); + ASSERT_EQ(id, 1, "enum_declaration_id"); + id = btf__add_enum(btf, "x", 4); + ASSERT_EQ(id, 2, "named_enum_id"); + err = btf__add_enum_value(btf, "X", 1); + ASSERT_OK(err, "named_enum_val_ok"); + id = btf__add_enum(btf, NULL, 4); - ASSERT_EQ(id, 1, "enum_id"); - err = btf__add_enum_value(btf, "VAL", 1); - ASSERT_OK(err, "enum_val_ok"); + ASSERT_EQ(id, 3, "anon_enum_id"); + err = btf__add_enum_value(btf, "Y", 1); + ASSERT_OK(err, "anon_enum_val_ok"); id = btf__add_int(btf, "int", 4, BTF_INT_SIGNED); - ASSERT_EQ(id, 2, "int_id"); + ASSERT_EQ(id, 4, "int_id"); + + id = btf__add_fwd(btf, "s", BTF_FWD_STRUCT); + ASSERT_EQ(id, 5, "fwd_id"); id = btf__add_struct(btf, "s", 4); - ASSERT_EQ(id, 3, "struct_id"); - err = btf__add_field(btf, "x", 2, 0, 0); + ASSERT_EQ(id, 6, "struct_id"); + err = btf__add_field(btf, "x", 4, 0, 0); ASSERT_OK(err, "field_ok"); for (i = 1; i < btf__type_cnt(btf); i++) { @@ -173,11 +189,20 @@ static void test_btf_dump_incremental(void) fflush(dump_buf_file); dump_buf[dump_buf_sz] = 0; /* some libc implementations don't do this */ + ASSERT_STREQ(dump_buf, +"enum x;\n" +"\n" +"enum x {\n" +" X = 1,\n" +"};\n" +"\n" "enum {\n" -" VAL = 1,\n" +" Y = 1,\n" "};\n" "\n" +"struct s;\n" +"\n" "struct s {\n" " int x;\n" "};\n\n", "c_dump1"); @@ -199,10 +224,12 @@ static void test_btf_dump_incremental(void) fseek(dump_buf_file, 0, SEEK_SET); id = btf__add_struct(btf, "s", 4); - ASSERT_EQ(id, 4, "struct_id"); - err = btf__add_field(btf, "x", 1, 0, 0); + ASSERT_EQ(id, 7, "struct_id"); + err = btf__add_field(btf, "x", 2, 0, 0); + ASSERT_OK(err, "field_ok"); + err = btf__add_field(btf, "y", 3, 32, 0); ASSERT_OK(err, "field_ok"); - err = btf__add_field(btf, "s", 3, 32, 0); + err = btf__add_field(btf, "s", 6, 64, 0); ASSERT_OK(err, "field_ok"); for (i = 1; i < btf__type_cnt(btf); i++) { @@ -214,9 +241,10 @@ static void test_btf_dump_incremental(void) dump_buf[dump_buf_sz] = 0; /* some libc implementations don't do this */ ASSERT_STREQ(dump_buf, "struct s___2 {\n" +" enum x x;\n" " enum {\n" -" VAL___2 = 1,\n" -" } x;\n" +" Y___2 = 1,\n" +" } y;\n" " struct s s;\n" "};\n\n" , "c_dump1"); -- cgit v1.2.3 From 7df5072cc05fd1aab5823bbc465d033cd292fca8 Mon Sep 17 00:00:00 2001 From: Mykola Lysenko Date: Tue, 1 Mar 2022 14:27:45 -0800 Subject: bpf: Small BPF verifier log improvements In particular these include: 1) Remove output of inv for scalars in print_verifier_state 2) Replace inv with scalar in verifier error messages 3) Remove _value suffixes for umin/umax/s32_min/etc (except map_value) 4) Remove output of id=0 5) Remove output of ref_obj_id=0 Signed-off-by: Mykola Lysenko Signed-off-by: Daniel Borkmann Acked-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/20220301222745.1667206-1-mykolal@fb.com --- kernel/bpf/verifier.c | 64 +++--- tools/testing/selftests/bpf/prog_tests/align.c | 218 ++++++++++----------- tools/testing/selftests/bpf/prog_tests/log_buf.c | 4 +- .../selftests/bpf/verifier/atomic_invalid.c | 6 +- tools/testing/selftests/bpf/verifier/bounds.c | 4 +- tools/testing/selftests/bpf/verifier/calls.c | 6 +- tools/testing/selftests/bpf/verifier/ctx.c | 4 +- .../selftests/bpf/verifier/direct_packet_access.c | 2 +- .../selftests/bpf/verifier/helper_access_var_len.c | 6 +- tools/testing/selftests/bpf/verifier/jmp32.c | 16 +- tools/testing/selftests/bpf/verifier/precise.c | 4 +- tools/testing/selftests/bpf/verifier/raw_stack.c | 4 +- .../testing/selftests/bpf/verifier/ref_tracking.c | 6 +- .../selftests/bpf/verifier/search_pruning.c | 2 +- tools/testing/selftests/bpf/verifier/sock.c | 2 +- tools/testing/selftests/bpf/verifier/spill_fill.c | 38 ++-- tools/testing/selftests/bpf/verifier/unpriv.c | 4 +- .../selftests/bpf/verifier/value_illegal_alu.c | 4 +- .../selftests/bpf/verifier/value_ptr_arith.c | 4 +- tools/testing/selftests/bpf/verifier/var_off.c | 2 +- 20 files changed, 203 insertions(+), 197 deletions(-) (limited to 'tools/testing/selftests/bpf/prog_tests') diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index d7473fee247c..a57db4b2803c 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -539,7 +539,7 @@ static const char *reg_type_str(struct bpf_verifier_env *env, char postfix[16] = {0}, prefix[32] = {0}; static const char * const str[] = { [NOT_INIT] = "?", - [SCALAR_VALUE] = "inv", + [SCALAR_VALUE] = "scalar", [PTR_TO_CTX] = "ctx", [CONST_PTR_TO_MAP] = "map_ptr", [PTR_TO_MAP_VALUE] = "map_value", @@ -685,74 +685,80 @@ static void print_verifier_state(struct bpf_verifier_env *env, continue; verbose(env, " R%d", i); print_liveness(env, reg->live); - verbose(env, "=%s", reg_type_str(env, t)); + verbose(env, "="); if (t == SCALAR_VALUE && reg->precise) verbose(env, "P"); if ((t == SCALAR_VALUE || t == PTR_TO_STACK) && tnum_is_const(reg->var_off)) { /* reg->off should be 0 for SCALAR_VALUE */ + verbose(env, "%s", t == SCALAR_VALUE ? "" : reg_type_str(env, t)); verbose(env, "%lld", reg->var_off.value + reg->off); } else { + const char *sep = ""; + + verbose(env, "%s", reg_type_str(env, t)); if (base_type(t) == PTR_TO_BTF_ID || base_type(t) == PTR_TO_PERCPU_BTF_ID) verbose(env, "%s", kernel_type_name(reg->btf, reg->btf_id)); - verbose(env, "(id=%d", reg->id); - if (reg_type_may_be_refcounted_or_null(t)) - verbose(env, ",ref_obj_id=%d", reg->ref_obj_id); + verbose(env, "("); +/* + * _a stands for append, was shortened to avoid multiline statements below. + * This macro is used to output a comma separated list of attributes. + */ +#define verbose_a(fmt, ...) ({ verbose(env, "%s" fmt, sep, __VA_ARGS__); sep = ","; }) + + if (reg->id) + verbose_a("id=%d", reg->id); + if (reg_type_may_be_refcounted_or_null(t) && reg->ref_obj_id) + verbose_a("ref_obj_id=%d", reg->ref_obj_id); if (t != SCALAR_VALUE) - verbose(env, ",off=%d", reg->off); + verbose_a("off=%d", reg->off); if (type_is_pkt_pointer(t)) - verbose(env, ",r=%d", reg->range); + verbose_a("r=%d", reg->range); else if (base_type(t) == CONST_PTR_TO_MAP || base_type(t) == PTR_TO_MAP_KEY || base_type(t) == PTR_TO_MAP_VALUE) - verbose(env, ",ks=%d,vs=%d", - reg->map_ptr->key_size, - reg->map_ptr->value_size); + verbose_a("ks=%d,vs=%d", + reg->map_ptr->key_size, + reg->map_ptr->value_size); if (tnum_is_const(reg->var_off)) { /* Typically an immediate SCALAR_VALUE, but * could be a pointer whose offset is too big * for reg->off */ - verbose(env, ",imm=%llx", reg->var_off.value); + verbose_a("imm=%llx", reg->var_off.value); } else { if (reg->smin_value != reg->umin_value && reg->smin_value != S64_MIN) - verbose(env, ",smin_value=%lld", - (long long)reg->smin_value); + verbose_a("smin=%lld", (long long)reg->smin_value); if (reg->smax_value != reg->umax_value && reg->smax_value != S64_MAX) - verbose(env, ",smax_value=%lld", - (long long)reg->smax_value); + verbose_a("smax=%lld", (long long)reg->smax_value); if (reg->umin_value != 0) - verbose(env, ",umin_value=%llu", - (unsigned long long)reg->umin_value); + verbose_a("umin=%llu", (unsigned long long)reg->umin_value); if (reg->umax_value != U64_MAX) - verbose(env, ",umax_value=%llu", - (unsigned long long)reg->umax_value); + verbose_a("umax=%llu", (unsigned long long)reg->umax_value); if (!tnum_is_unknown(reg->var_off)) { char tn_buf[48]; tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); - verbose(env, ",var_off=%s", tn_buf); + verbose_a("var_off=%s", tn_buf); } if (reg->s32_min_value != reg->smin_value && reg->s32_min_value != S32_MIN) - verbose(env, ",s32_min_value=%d", - (int)(reg->s32_min_value)); + verbose_a("s32_min=%d", (int)(reg->s32_min_value)); if (reg->s32_max_value != reg->smax_value && reg->s32_max_value != S32_MAX) - verbose(env, ",s32_max_value=%d", - (int)(reg->s32_max_value)); + verbose_a("s32_max=%d", (int)(reg->s32_max_value)); if (reg->u32_min_value != reg->umin_value && reg->u32_min_value != U32_MIN) - verbose(env, ",u32_min_value=%d", - (int)(reg->u32_min_value)); + verbose_a("u32_min=%d", (int)(reg->u32_min_value)); if (reg->u32_max_value != reg->umax_value && reg->u32_max_value != U32_MAX) - verbose(env, ",u32_max_value=%d", - (int)(reg->u32_max_value)); + verbose_a("u32_max=%d", (int)(reg->u32_max_value)); } +#undef verbose_a + verbose(env, ")"); } } @@ -777,7 +783,7 @@ static void print_verifier_state(struct bpf_verifier_env *env, if (is_spilled_reg(&state->stack[i])) { reg = &state->stack[i].spilled_ptr; t = reg->type; - verbose(env, "=%s", reg_type_str(env, t)); + verbose(env, "=%s", t == SCALAR_VALUE ? "" : reg_type_str(env, t)); if (t == SCALAR_VALUE && reg->precise) verbose(env, "P"); if (t == SCALAR_VALUE && tnum_is_const(reg->var_off)) diff --git a/tools/testing/selftests/bpf/prog_tests/align.c b/tools/testing/selftests/bpf/prog_tests/align.c index 0ee29e11eaee..970f09156eb4 100644 --- a/tools/testing/selftests/bpf/prog_tests/align.c +++ b/tools/testing/selftests/bpf/prog_tests/align.c @@ -39,13 +39,13 @@ static struct bpf_align_test tests[] = { }, .prog_type = BPF_PROG_TYPE_SCHED_CLS, .matches = { - {0, "R1=ctx(id=0,off=0,imm=0)"}, + {0, "R1=ctx(off=0,imm=0)"}, {0, "R10=fp0"}, - {0, "R3_w=inv2"}, - {1, "R3_w=inv4"}, - {2, "R3_w=inv8"}, - {3, "R3_w=inv16"}, - {4, "R3_w=inv32"}, + {0, "R3_w=2"}, + {1, "R3_w=4"}, + {2, "R3_w=8"}, + {3, "R3_w=16"}, + {4, "R3_w=32"}, }, }, { @@ -67,19 +67,19 @@ static struct bpf_align_test tests[] = { }, .prog_type = BPF_PROG_TYPE_SCHED_CLS, .matches = { - {0, "R1=ctx(id=0,off=0,imm=0)"}, + {0, "R1=ctx(off=0,imm=0)"}, {0, "R10=fp0"}, - {0, "R3_w=inv1"}, - {1, "R3_w=inv2"}, - {2, "R3_w=inv4"}, - {3, "R3_w=inv8"}, - {4, "R3_w=inv16"}, - {5, "R3_w=inv1"}, - {6, "R4_w=inv32"}, - {7, "R4_w=inv16"}, - {8, "R4_w=inv8"}, - {9, "R4_w=inv4"}, - {10, "R4_w=inv2"}, + {0, "R3_w=1"}, + {1, "R3_w=2"}, + {2, "R3_w=4"}, + {3, "R3_w=8"}, + {4, "R3_w=16"}, + {5, "R3_w=1"}, + {6, "R4_w=32"}, + {7, "R4_w=16"}, + {8, "R4_w=8"}, + {9, "R4_w=4"}, + {10, "R4_w=2"}, }, }, { @@ -96,14 +96,14 @@ static struct bpf_align_test tests[] = { }, .prog_type = BPF_PROG_TYPE_SCHED_CLS, .matches = { - {0, "R1=ctx(id=0,off=0,imm=0)"}, + {0, "R1=ctx(off=0,imm=0)"}, {0, "R10=fp0"}, - {0, "R3_w=inv4"}, - {1, "R3_w=inv8"}, - {2, "R3_w=inv10"}, - {3, "R4_w=inv8"}, - {4, "R4_w=inv12"}, - {5, "R4_w=inv14"}, + {0, "R3_w=4"}, + {1, "R3_w=8"}, + {2, "R3_w=10"}, + {3, "R4_w=8"}, + {4, "R4_w=12"}, + {5, "R4_w=14"}, }, }, { @@ -118,12 +118,12 @@ static struct bpf_align_test tests[] = { }, .prog_type = BPF_PROG_TYPE_SCHED_CLS, .matches = { - {0, "R1=ctx(id=0,off=0,imm=0)"}, + {0, "R1=ctx(off=0,imm=0)"}, {0, "R10=fp0"}, - {0, "R3_w=inv7"}, - {1, "R3_w=inv7"}, - {2, "R3_w=inv14"}, - {3, "R3_w=inv56"}, + {0, "R3_w=7"}, + {1, "R3_w=7"}, + {2, "R3_w=14"}, + {3, "R3_w=56"}, }, }, @@ -161,19 +161,19 @@ static struct bpf_align_test tests[] = { }, .prog_type = BPF_PROG_TYPE_SCHED_CLS, .matches = { - {6, "R0_w=pkt(id=0,off=8,r=8,imm=0)"}, - {6, "R3_w=inv(id=0,umax_value=255,var_off=(0x0; 0xff))"}, - {7, "R3_w=inv(id=0,umax_value=510,var_off=(0x0; 0x1fe))"}, - {8, "R3_w=inv(id=0,umax_value=1020,var_off=(0x0; 0x3fc))"}, - {9, "R3_w=inv(id=0,umax_value=2040,var_off=(0x0; 0x7f8))"}, - {10, "R3_w=inv(id=0,umax_value=4080,var_off=(0x0; 0xff0))"}, - {12, "R3_w=pkt_end(id=0,off=0,imm=0)"}, - {17, "R4_w=inv(id=0,umax_value=255,var_off=(0x0; 0xff))"}, - {18, "R4_w=inv(id=0,umax_value=8160,var_off=(0x0; 0x1fe0))"}, - {19, "R4_w=inv(id=0,umax_value=4080,var_off=(0x0; 0xff0))"}, - {20, "R4_w=inv(id=0,umax_value=2040,var_off=(0x0; 0x7f8))"}, - {21, "R4_w=inv(id=0,umax_value=1020,var_off=(0x0; 0x3fc))"}, - {22, "R4_w=inv(id=0,umax_value=510,var_off=(0x0; 0x1fe))"}, + {6, "R0_w=pkt(off=8,r=8,imm=0)"}, + {6, "R3_w=scalar(umax=255,var_off=(0x0; 0xff))"}, + {7, "R3_w=scalar(umax=510,var_off=(0x0; 0x1fe))"}, + {8, "R3_w=scalar(umax=1020,var_off=(0x0; 0x3fc))"}, + {9, "R3_w=scalar(umax=2040,var_off=(0x0; 0x7f8))"}, + {10, "R3_w=scalar(umax=4080,var_off=(0x0; 0xff0))"}, + {12, "R3_w=pkt_end(off=0,imm=0)"}, + {17, "R4_w=scalar(umax=255,var_off=(0x0; 0xff))"}, + {18, "R4_w=scalar(umax=8160,var_off=(0x0; 0x1fe0))"}, + {19, "R4_w=scalar(umax=4080,var_off=(0x0; 0xff0))"}, + {20, "R4_w=scalar(umax=2040,var_off=(0x0; 0x7f8))"}, + {21, "R4_w=scalar(umax=1020,var_off=(0x0; 0x3fc))"}, + {22, "R4_w=scalar(umax=510,var_off=(0x0; 0x1fe))"}, }, }, { @@ -194,16 +194,16 @@ static struct bpf_align_test tests[] = { }, .prog_type = BPF_PROG_TYPE_SCHED_CLS, .matches = { - {6, "R3_w=inv(id=0,umax_value=255,var_off=(0x0; 0xff))"}, - {7, "R4_w=inv(id=1,umax_value=255,var_off=(0x0; 0xff))"}, - {8, "R4_w=inv(id=0,umax_value=255,var_off=(0x0; 0xff))"}, - {9, "R4_w=inv(id=1,umax_value=255,var_off=(0x0; 0xff))"}, - {10, "R4_w=inv(id=0,umax_value=510,var_off=(0x0; 0x1fe))"}, - {11, "R4_w=inv(id=1,umax_value=255,var_off=(0x0; 0xff))"}, - {12, "R4_w=inv(id=0,umax_value=1020,var_off=(0x0; 0x3fc))"}, - {13, "R4_w=inv(id=1,umax_value=255,var_off=(0x0; 0xff))"}, - {14, "R4_w=inv(id=0,umax_value=2040,var_off=(0x0; 0x7f8))"}, - {15, "R4_w=inv(id=0,umax_value=4080,var_off=(0x0; 0xff0))"}, + {6, "R3_w=scalar(umax=255,var_off=(0x0; 0xff))"}, + {7, "R4_w=scalar(id=1,umax=255,var_off=(0x0; 0xff))"}, + {8, "R4_w=scalar(umax=255,var_off=(0x0; 0xff))"}, + {9, "R4_w=scalar(id=1,umax=255,var_off=(0x0; 0xff))"}, + {10, "R4_w=scalar(umax=510,var_off=(0x0; 0x1fe))"}, + {11, "R4_w=scalar(id=1,umax=255,var_off=(0x0; 0xff))"}, + {12, "R4_w=scalar(umax=1020,var_off=(0x0; 0x3fc))"}, + {13, "R4_w=scalar(id=1,umax=255,var_off=(0x0; 0xff))"}, + {14, "R4_w=scalar(umax=2040,var_off=(0x0; 0x7f8))"}, + {15, "R4_w=scalar(umax=4080,var_off=(0x0; 0xff0))"}, }, }, { @@ -234,14 +234,14 @@ static struct bpf_align_test tests[] = { }, .prog_type = BPF_PROG_TYPE_SCHED_CLS, .matches = { - {2, "R5_w=pkt(id=0,off=0,r=0,imm=0)"}, - {4, "R5_w=pkt(id=0,off=14,r=0,imm=0)"}, - {5, "R4_w=pkt(id=0,off=14,r=0,imm=0)"}, - {9, "R2=pkt(id=0,off=0,r=18,imm=0)"}, - {10, "R5=pkt(id=0,off=14,r=18,imm=0)"}, - {10, "R4_w=inv(id=0,umax_value=255,var_off=(0x0; 0xff))"}, - {13, "R4_w=inv(id=0,umax_value=65535,var_off=(0x0; 0xffff))"}, - {14, "R4_w=inv(id=0,umax_value=65535,var_off=(0x0; 0xffff))"}, + {2, "R5_w=pkt(off=0,r=0,imm=0)"}, + {4, "R5_w=pkt(off=14,r=0,imm=0)"}, + {5, "R4_w=pkt(off=14,r=0,imm=0)"}, + {9, "R2=pkt(off=0,r=18,imm=0)"}, + {10, "R5=pkt(off=14,r=18,imm=0)"}, + {10, "R4_w=scalar(umax=255,var_off=(0x0; 0xff))"}, + {13, "R4_w=scalar(umax=65535,var_off=(0x0; 0xffff))"}, + {14, "R4_w=scalar(umax=65535,var_off=(0x0; 0xffff))"}, }, }, { @@ -296,59 +296,59 @@ static struct bpf_align_test tests[] = { /* Calculated offset in R6 has unknown value, but known * alignment of 4. */ - {6, "R2_w=pkt(id=0,off=0,r=8,imm=0)"}, - {7, "R6_w=inv(id=0,umax_value=1020,var_off=(0x0; 0x3fc))"}, + {6, "R2_w=pkt(off=0,r=8,imm=0)"}, + {7, "R6_w=scalar(umax=1020,var_off=(0x0; 0x3fc))"}, /* Offset is added to packet pointer R5, resulting in * known fixed offset, and variable offset from R6. */ - {11, "R5_w=pkt(id=1,off=14,r=0,umax_value=1020,var_off=(0x0; 0x3fc))"}, + {11, "R5_w=pkt(id=1,off=14,r=0,umax=1020,var_off=(0x0; 0x3fc))"}, /* At the time the word size load is performed from R5, * it's total offset is NET_IP_ALIGN + reg->off (0) + * reg->aux_off (14) which is 16. Then the variable * offset is considered using reg->aux_off_align which * is 4 and meets the load's requirements. */ - {15, "R4=pkt(id=1,off=18,r=18,umax_value=1020,var_off=(0x0; 0x3fc))"}, - {15, "R5=pkt(id=1,off=14,r=18,umax_value=1020,var_off=(0x0; 0x3fc))"}, + {15, "R4=pkt(id=1,off=18,r=18,umax=1020,var_off=(0x0; 0x3fc))"}, + {15, "R5=pkt(id=1,off=14,r=18,umax=1020,var_off=(0x0; 0x3fc))"}, /* Variable offset is added to R5 packet pointer, * resulting in auxiliary alignment of 4. */ - {17, "R5_w=pkt(id=2,off=0,r=0,umax_value=1020,var_off=(0x0; 0x3fc))"}, + {17, "R5_w=pkt(id=2,off=0,r=0,umax=1020,var_off=(0x0; 0x3fc))"}, /* Constant offset is added to R5, resulting in * reg->off of 14. */ - {18, "R5_w=pkt(id=2,off=14,r=0,umax_value=1020,var_off=(0x0; 0x3fc))"}, + {18, "R5_w=pkt(id=2,off=14,r=0,umax=1020,var_off=(0x0; 0x3fc))"}, /* At the time the word size load is performed from R5, * its total fixed offset is NET_IP_ALIGN + reg->off * (14) which is 16. Then the variable offset is 4-byte * aligned, so the total offset is 4-byte aligned and * meets the load's requirements. */ - {23, "R4=pkt(id=2,off=18,r=18,umax_value=1020,var_off=(0x0; 0x3fc))"}, - {23, "R5=pkt(id=2,off=14,r=18,umax_value=1020,var_off=(0x0; 0x3fc))"}, + {23, "R4=pkt(id=2,off=18,r=18,umax=1020,var_off=(0x0; 0x3fc))"}, + {23, "R5=pkt(id=2,off=14,r=18,umax=1020,var_off=(0x0; 0x3fc))"}, /* Constant offset is added to R5 packet pointer, * resulting in reg->off value of 14. */ - {25, "R5_w=pkt(id=0,off=14,r=8"}, + {25, "R5_w=pkt(off=14,r=8"}, /* Variable offset is added to R5, resulting in a * variable offset of (4n). */ - {26, "R5_w=pkt(id=3,off=14,r=0,umax_value=1020,var_off=(0x0; 0x3fc))"}, + {26, "R5_w=pkt(id=3,off=14,r=0,umax=1020,var_off=(0x0; 0x3fc))"}, /* Constant is added to R5 again, setting reg->off to 18. */ - {27, "R5_w=pkt(id=3,off=18,r=0,umax_value=1020,var_off=(0x0; 0x3fc))"}, + {27, "R5_w=pkt(id=3,off=18,r=0,umax=1020,var_off=(0x0; 0x3fc))"}, /* And once more we add a variable; resulting var_off * is still (4n), fixed offset is not changed. * Also, we create a new reg->id. */ - {28, "R5_w=pkt(id=4,off=18,r=0,umax_value=2040,var_off=(0x0; 0x7fc)"}, + {28, "R5_w=pkt(id=4,off=18,r=0,umax=2040,var_off=(0x0; 0x7fc)"}, /* At the time the word size load is performed from R5, * its total fixed offset is NET_IP_ALIGN + reg->off (18) * which is 20. Then the variable offset is (4n), so * the total offset is 4-byte aligned and meets the * load's requirements. */ - {33, "R4=pkt(id=4,off=22,r=22,umax_value=2040,var_off=(0x0; 0x7fc)"}, - {33, "R5=pkt(id=4,off=18,r=22,umax_value=2040,var_off=(0x0; 0x7fc)"}, + {33, "R4=pkt(id=4,off=22,r=22,umax=2040,var_off=(0x0; 0x7fc)"}, + {33, "R5=pkt(id=4,off=18,r=22,umax=2040,var_off=(0x0; 0x7fc)"}, }, }, { @@ -386,36 +386,36 @@ static struct bpf_align_test tests[] = { /* Calculated offset in R6 has unknown value, but known * alignment of 4. */ - {6, "R2_w=pkt(id=0,off=0,r=8,imm=0)"}, - {7, "R6_w=inv(id=0,umax_value=1020,var_off=(0x0; 0x3fc))"}, + {6, "R2_w=pkt(off=0,r=8,imm=0)"}, + {7, "R6_w=scalar(umax=1020,var_off=(0x0; 0x3fc))"}, /* Adding 14 makes R6 be (4n+2) */ - {8, "R6_w=inv(id=0,umin_value=14,umax_value=1034,var_off=(0x2; 0x7fc))"}, + {8, "R6_w=scalar(umin=14,umax=1034,var_off=(0x2; 0x7fc))"}, /* Packet pointer has (4n+2) offset */ - {11, "R5_w=pkt(id=1,off=0,r=0,umin_value=14,umax_value=1034,var_off=(0x2; 0x7fc)"}, - {12, "R4=pkt(id=1,off=4,r=0,umin_value=14,umax_value=1034,var_off=(0x2; 0x7fc)"}, + {11, "R5_w=pkt(id=1,off=0,r=0,umin=14,umax=1034,var_off=(0x2; 0x7fc)"}, + {12, "R4=pkt(id=1,off=4,r=0,umin=14,umax=1034,var_off=(0x2; 0x7fc)"}, /* At the time the word size load is performed from R5, * its total fixed offset is NET_IP_ALIGN + reg->off (0) * which is 2. Then the variable offset is (4n+2), so * the total offset is 4-byte aligned and meets the * load's requirements. */ - {15, "R5=pkt(id=1,off=0,r=4,umin_value=14,umax_value=1034,var_off=(0x2; 0x7fc)"}, + {15, "R5=pkt(id=1,off=0,r=4,umin=14,umax=1034,var_off=(0x2; 0x7fc)"}, /* Newly read value in R6 was shifted left by 2, so has * known alignment of 4. */ - {17, "R6_w=inv(id=0,umax_value=1020,var_off=(0x0; 0x3fc))"}, + {17, "R6_w=scalar(umax=1020,var_off=(0x0; 0x3fc))"}, /* Added (4n) to packet pointer's (4n+2) var_off, giving * another (4n+2). */ - {19, "R5_w=pkt(id=2,off=0,r=0,umin_value=14,umax_value=2054,var_off=(0x2; 0xffc)"}, - {20, "R4=pkt(id=2,off=4,r=0,umin_value=14,umax_value=2054,var_off=(0x2; 0xffc)"}, + {19, "R5_w=pkt(id=2,off=0,r=0,umin=14,umax=2054,var_off=(0x2; 0xffc)"}, + {20, "R4=pkt(id=2,off=4,r=0,umin=14,umax=2054,var_off=(0x2; 0xffc)"}, /* At the time the word size load is performed from R5, * its total fixed offset is NET_IP_ALIGN + reg->off (0) * which is 2. Then the variable offset is (4n+2), so * the total offset is 4-byte aligned and meets the * load's requirements. */ - {23, "R5=pkt(id=2,off=0,r=4,umin_value=14,umax_value=2054,var_off=(0x2; 0xffc)"}, + {23, "R5=pkt(id=2,off=0,r=4,umin=14,umax=2054,var_off=(0x2; 0xffc)"}, }, }, { @@ -448,18 +448,18 @@ static struct bpf_align_test tests[] = { .prog_type = BPF_PROG_TYPE_SCHED_CLS, .result = REJECT, .matches = { - {3, "R5_w=pkt_end(id=0,off=0,imm=0)"}, + {3, "R5_w=pkt_end(off=0,imm=0)"}, /* (ptr - ptr) << 2 == unknown, (4n) */ - {5, "R5_w=inv(id=0,smax_value=9223372036854775804,umax_value=18446744073709551612,var_off=(0x0; 0xfffffffffffffffc)"}, + {5, "R5_w=scalar(smax=9223372036854775804,umax=18446744073709551612,var_off=(0x0; 0xfffffffffffffffc)"}, /* (4n) + 14 == (4n+2). We blow our bounds, because * the add could overflow. */ - {6, "R5_w=inv(id=0,smin_value=-9223372036854775806,smax_value=9223372036854775806,umin_value=2,umax_value=18446744073709551614,var_off=(0x2; 0xfffffffffffffffc)"}, + {6, "R5_w=scalar(smin=-9223372036854775806,smax=9223372036854775806,umin=2,umax=18446744073709551614,var_off=(0x2; 0xfffffffffffffffc)"}, /* Checked s>=0 */ - {9, "R5=inv(id=0,umin_value=2,umax_value=9223372036854775806,var_off=(0x2; 0x7ffffffffffffffc)"}, + {9, "R5=scalar(umin=2,umax=9223372036854775806,var_off=(0x2; 0x7ffffffffffffffc)"}, /* packet pointer + nonnegative (4n+2) */ - {11, "R6_w=pkt(id=1,off=0,r=0,umin_value=2,umax_value=9223372036854775806,var_off=(0x2; 0x7ffffffffffffffc)"}, - {12, "R4_w=pkt(id=1,off=4,r=0,umin_value=2,umax_value=9223372036854775806,var_off=(0x2; 0x7ffffffffffffffc)"}, + {11, "R6_w=pkt(id=1,off=0,r=0,umin=2,umax=9223372036854775806,var_off=(0x2; 0x7ffffffffffffffc)"}, + {12, "R4_w=pkt(id=1,off=4,r=0,umin=2,umax=9223372036854775806,var_off=(0x2; 0x7ffffffffffffffc)"}, /* NET_IP_ALIGN + (4n+2) == (4n), alignment is fine. * We checked the bounds, but it might have been able * to overflow if the packet pointer started in the @@ -467,7 +467,7 @@ static struct bpf_align_test tests[] = { * So we did not get a 'range' on R6, and the access * attempt will fail. */ - {15, "R6_w=pkt(id=1,off=0,r=0,umin_value=2,umax_value=9223372036854775806,var_off=(0x2; 0x7ffffffffffffffc)"}, + {15, "R6_w=pkt(id=1,off=0,r=0,umin=2,umax=9223372036854775806,var_off=(0x2; 0x7ffffffffffffffc)"}, } }, { @@ -502,23 +502,23 @@ static struct bpf_align_test tests[] = { /* Calculated offset in R6 has unknown value, but known * alignment of 4. */ - {6, "R2_w=pkt(id=0,off=0,r=8,imm=0)"}, - {8, "R6_w=inv(id=0,umax_value=1020,var_off=(0x0; 0x3fc))"}, + {6, "R2_w=pkt(off=0,r=8,imm=0)"}, + {8, "R6_w=scalar(umax=1020,var_off=(0x0; 0x3fc))"}, /* Adding 14 makes R6 be (4n+2) */ - {9, "R6_w=inv(id=0,umin_value=14,umax_value=1034,var_off=(0x2; 0x7fc))"}, + {9, "R6_w=scalar(umin=14,umax=1034,var_off=(0x2; 0x7fc))"}, /* New unknown value in R7 is (4n) */ - {10, "R7_w=inv(id=0,umax_value=1020,var_off=(0x0; 0x3fc))"}, + {10, "R7_w=scalar(umax=1020,var_off=(0x0; 0x3fc))"}, /* Subtracting it from R6 blows our unsigned bounds */ - {11, "R6=inv(id=0,smin_value=-1006,smax_value=1034,umin_value=2,umax_value=18446744073709551614,var_off=(0x2; 0xfffffffffffffffc)"}, + {11, "R6=scalar(smin=-1006,smax=1034,umin=2,umax=18446744073709551614,var_off=(0x2; 0xfffffffffffffffc)"}, /* Checked s>= 0 */ - {14, "R6=inv(id=0,umin_value=2,umax_value=1034,var_off=(0x2; 0x7fc))"}, + {14, "R6=scalar(umin=2,umax=1034,var_off=(0x2; 0x7fc))"}, /* At the time the word size load is performed from R5, * its total fixed offset is NET_IP_ALIGN + reg->off (0) * which is 2. Then the variable offset is (4n+2), so * the total offset is 4-byte aligned and meets the * load's requirements. */ - {20, "R5=pkt(id=2,off=0,r=4,umin_value=2,umax_value=1034,var_off=(0x2; 0x7fc)"}, + {20, "R5=pkt(id=2,off=0,r=4,umin=2,umax=1034,var_off=(0x2; 0x7fc)"}, }, }, @@ -556,23 +556,23 @@ static struct bpf_align_test tests[] = { /* Calculated offset in R6 has unknown value, but known * alignment of 4. */ - {6, "R2_w=pkt(id=0,off=0,r=8,imm=0)"}, - {9, "R6_w=inv(id=0,umax_value=60,var_off=(0x0; 0x3c))"}, + {6, "R2_w=pkt(off=0,r=8,imm=0)"}, + {9, "R6_w=scalar(umax=60,var_off=(0x0; 0x3c))"}, /* Adding 14 makes R6 be (4n+2) */ - {10, "R6_w=inv(id=0,umin_value=14,umax_value=74,var_off=(0x2; 0x7c))"}, + {10, "R6_w=scalar(umin=14,umax=74,var_off=(0x2; 0x7c))"}, /* Subtracting from packet pointer overflows ubounds */ - {13, "R5_w=pkt(id=2,off=0,r=8,umin_value=18446744073709551542,umax_value=18446744073709551602,var_off=(0xffffffffffffff82; 0x7c)"}, + {13, "R5_w=pkt(id=2,off=0,r=8,umin=18446744073709551542,umax=18446744073709551602,var_off=(0xffffffffffffff82; 0x7c)"}, /* New unknown value in R7 is (4n), >= 76 */ - {14, "R7_w=inv(id=0,umin_value=76,umax_value=1096,var_off=(0x0; 0x7fc))"}, + {14, "R7_w=scalar(umin=76,umax=1096,var_off=(0x0; 0x7fc))"}, /* Adding it to packet pointer gives nice bounds again */ - {16, "R5_w=pkt(id=3,off=0,r=0,umin_value=2,umax_value=1082,var_off=(0x2; 0xfffffffc)"}, + {16, "R5_w=pkt(id=3,off=0,r=0,umin=2,umax=1082,var_off=(0x2; 0xfffffffc)"}, /* At the time the word size load is performed from R5, * its total fixed offset is NET_IP_ALIGN + reg->off (0) * which is 2. Then the variable offset is (4n+2), so * the total offset is 4-byte aligned and meets the * load's requirements. */ - {20, "R5=pkt(id=3,off=0,r=4,umin_value=2,umax_value=1082,var_off=(0x2; 0xfffffffc)"}, + {20, "R5=pkt(id=3,off=0,r=4,umin=2,umax=1082,var_off=(0x2; 0xfffffffc)"}, }, }, }; @@ -648,8 +648,8 @@ static int do_test_single(struct bpf_align_test *test) /* Check the next line as well in case the previous line * did not have a corresponding bpf insn. Example: * func#0 @0 - * 0: R1=ctx(id=0,off=0,imm=0) R10=fp0 - * 0: (b7) r3 = 2 ; R3_w=inv2 + * 0: R1=ctx(off=0,imm=0) R10=fp0 + * 0: (b7) r3 = 2 ; R3_w=2 */ if (!strstr(line_ptr, m.match)) { cur_line = -1; diff --git a/tools/testing/selftests/bpf/prog_tests/log_buf.c b/tools/testing/selftests/bpf/prog_tests/log_buf.c index 1ef377a7e731..fe9a23e65ef4 100644 --- a/tools/testing/selftests/bpf/prog_tests/log_buf.c +++ b/tools/testing/selftests/bpf/prog_tests/log_buf.c @@ -78,7 +78,7 @@ static void obj_load_log_buf(void) ASSERT_OK_PTR(strstr(libbpf_log_buf, "prog 'bad_prog': BPF program load failed"), "libbpf_log_not_empty"); ASSERT_OK_PTR(strstr(obj_log_buf, "DATASEC license"), "obj_log_not_empty"); - ASSERT_OK_PTR(strstr(good_log_buf, "0: R1=ctx(id=0,off=0,imm=0) R10=fp0"), + ASSERT_OK_PTR(strstr(good_log_buf, "0: R1=ctx(off=0,imm=0) R10=fp0"), "good_log_verbose"); ASSERT_OK_PTR(strstr(bad_log_buf, "invalid access to map value, value_size=16 off=16000 size=4"), "bad_log_not_empty"); @@ -175,7 +175,7 @@ static void bpf_prog_load_log_buf(void) opts.log_level = 2; fd = bpf_prog_load(BPF_PROG_TYPE_SOCKET_FILTER, "good_prog", "GPL", good_prog_insns, good_prog_insn_cnt, &opts); - ASSERT_OK_PTR(strstr(log_buf, "0: R1=ctx(id=0,off=0,imm=0) R10=fp0"), "good_log_2"); + ASSERT_OK_PTR(strstr(log_buf, "0: R1=ctx(off=0,imm=0) R10=fp0"), "good_log_2"); ASSERT_GE(fd, 0, "good_fd2"); if (fd >= 0) close(fd); diff --git a/tools/testing/selftests/bpf/verifier/atomic_invalid.c b/tools/testing/selftests/bpf/verifier/atomic_invalid.c index 39272720b2f6..25f4ac1c69ab 100644 --- a/tools/testing/selftests/bpf/verifier/atomic_invalid.c +++ b/tools/testing/selftests/bpf/verifier/atomic_invalid.c @@ -1,6 +1,6 @@ -#define __INVALID_ATOMIC_ACCESS_TEST(op) \ +#define __INVALID_ATOMIC_ACCESS_TEST(op) \ { \ - "atomic " #op " access through non-pointer ", \ + "atomic " #op " access through non-pointer ", \ .insns = { \ BPF_MOV64_IMM(BPF_REG_0, 1), \ BPF_MOV64_IMM(BPF_REG_1, 0), \ @@ -9,7 +9,7 @@ BPF_EXIT_INSN(), \ }, \ .result = REJECT, \ - .errstr = "R1 invalid mem access 'inv'" \ + .errstr = "R1 invalid mem access 'scalar'" \ } __INVALID_ATOMIC_ACCESS_TEST(BPF_ADD), __INVALID_ATOMIC_ACCESS_TEST(BPF_ADD | BPF_FETCH), diff --git a/tools/testing/selftests/bpf/verifier/bounds.c b/tools/testing/selftests/bpf/verifier/bounds.c index e061e8799ce2..33125d5f6772 100644 --- a/tools/testing/selftests/bpf/verifier/bounds.c +++ b/tools/testing/selftests/bpf/verifier/bounds.c @@ -508,7 +508,7 @@ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, -1), BPF_EXIT_INSN(), }, - .errstr_unpriv = "R0 invalid mem access 'inv'", + .errstr_unpriv = "R0 invalid mem access 'scalar'", .result_unpriv = REJECT, .result = ACCEPT }, @@ -530,7 +530,7 @@ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, -1), BPF_EXIT_INSN(), }, - .errstr_unpriv = "R0 invalid mem access 'inv'", + .errstr_unpriv = "R0 invalid mem access 'scalar'", .result_unpriv = REJECT, .result = ACCEPT }, diff --git a/tools/testing/selftests/bpf/verifier/calls.c b/tools/testing/selftests/bpf/verifier/calls.c index 0a8ea60c2a80..f890333259ad 100644 --- a/tools/testing/selftests/bpf/verifier/calls.c +++ b/tools/testing/selftests/bpf/verifier/calls.c @@ -188,7 +188,7 @@ }, .prog_type = BPF_PROG_TYPE_SCHED_CLS, .result = REJECT, - .errstr = "R0 invalid mem access 'inv'", + .errstr = "R0 invalid mem access 'scalar'", }, { "calls: multiple ret types in subprog 2", @@ -491,7 +491,7 @@ BPF_EXIT_INSN(), }, .result = REJECT, - .errstr = "R6 invalid mem access 'inv'", + .errstr = "R6 invalid mem access 'scalar'", .prog_type = BPF_PROG_TYPE_XDP, .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, }, @@ -1697,7 +1697,7 @@ .prog_type = BPF_PROG_TYPE_SCHED_CLS, .fixup_map_hash_8b = { 12, 22 }, .result = REJECT, - .errstr = "R0 invalid mem access 'inv'", + .errstr = "R0 invalid mem access 'scalar'", }, { "calls: pkt_ptr spill into caller stack", diff --git a/tools/testing/selftests/bpf/verifier/ctx.c b/tools/testing/selftests/bpf/verifier/ctx.c index 23080862aafd..60f6fbe03f19 100644 --- a/tools/testing/selftests/bpf/verifier/ctx.c +++ b/tools/testing/selftests/bpf/verifier/ctx.c @@ -127,7 +127,7 @@ .prog_type = BPF_PROG_TYPE_CGROUP_SOCK_ADDR, .expected_attach_type = BPF_CGROUP_UDP6_SENDMSG, .result = REJECT, - .errstr = "R1 type=inv expected=ctx", + .errstr = "R1 type=scalar expected=ctx", }, { "pass ctx or null check, 4: ctx - const", @@ -193,5 +193,5 @@ .prog_type = BPF_PROG_TYPE_CGROUP_SOCK, .expected_attach_type = BPF_CGROUP_INET4_POST_BIND, .result = REJECT, - .errstr = "R1 type=inv expected=ctx", + .errstr = "R1 type=scalar expected=ctx", }, diff --git a/tools/testing/selftests/bpf/verifier/direct_packet_access.c b/tools/testing/selftests/bpf/verifier/direct_packet_access.c index ac1e19d0f520..11acd1855acf 100644 --- a/tools/testing/selftests/bpf/verifier/direct_packet_access.c +++ b/tools/testing/selftests/bpf/verifier/direct_packet_access.c @@ -339,7 +339,7 @@ BPF_MOV64_IMM(BPF_REG_0, 0), BPF_EXIT_INSN(), }, - .errstr = "R2 invalid mem access 'inv'", + .errstr = "R2 invalid mem access 'scalar'", .result = REJECT, .prog_type = BPF_PROG_TYPE_SCHED_CLS, .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, diff --git a/tools/testing/selftests/bpf/verifier/helper_access_var_len.c b/tools/testing/selftests/bpf/verifier/helper_access_var_len.c index 0ab7f1dfc97a..a6c869a7319c 100644 --- a/tools/testing/selftests/bpf/verifier/helper_access_var_len.c +++ b/tools/testing/selftests/bpf/verifier/helper_access_var_len.c @@ -350,7 +350,7 @@ BPF_EMIT_CALL(BPF_FUNC_csum_diff), BPF_EXIT_INSN(), }, - .errstr = "R1 type=inv expected=fp", + .errstr = "R1 type=scalar expected=fp", .result = REJECT, .prog_type = BPF_PROG_TYPE_SCHED_CLS, }, @@ -471,7 +471,7 @@ BPF_EMIT_CALL(BPF_FUNC_probe_read_kernel), BPF_EXIT_INSN(), }, - .errstr = "R1 type=inv expected=fp", + .errstr = "R1 type=scalar expected=fp", .result = REJECT, .prog_type = BPF_PROG_TYPE_TRACEPOINT, }, @@ -484,7 +484,7 @@ BPF_EMIT_CALL(BPF_FUNC_probe_read_kernel), BPF_EXIT_INSN(), }, - .errstr = "R1 type=inv expected=fp", + .errstr = "R1 type=scalar expected=fp", .result = REJECT, .prog_type = BPF_PROG_TYPE_TRACEPOINT, }, diff --git a/tools/testing/selftests/bpf/verifier/jmp32.c b/tools/testing/selftests/bpf/verifier/jmp32.c index 1c857b2fbdf0..6ddc418fdfaf 100644 --- a/tools/testing/selftests/bpf/verifier/jmp32.c +++ b/tools/testing/selftests/bpf/verifier/jmp32.c @@ -286,7 +286,7 @@ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0), BPF_EXIT_INSN(), }, - .errstr_unpriv = "R0 invalid mem access 'inv'", + .errstr_unpriv = "R0 invalid mem access 'scalar'", .result_unpriv = REJECT, .result = ACCEPT, .retval = 2, @@ -356,7 +356,7 @@ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0), BPF_EXIT_INSN(), }, - .errstr_unpriv = "R0 invalid mem access 'inv'", + .errstr_unpriv = "R0 invalid mem access 'scalar'", .result_unpriv = REJECT, .result = ACCEPT, .retval = 2, @@ -426,7 +426,7 @@ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0), BPF_EXIT_INSN(), }, - .errstr_unpriv = "R0 invalid mem access 'inv'", + .errstr_unpriv = "R0 invalid mem access 'scalar'", .result_unpriv = REJECT, .result = ACCEPT, .retval = 2, @@ -496,7 +496,7 @@ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0), BPF_EXIT_INSN(), }, - .errstr_unpriv = "R0 invalid mem access 'inv'", + .errstr_unpriv = "R0 invalid mem access 'scalar'", .result_unpriv = REJECT, .result = ACCEPT, .retval = 2, @@ -566,7 +566,7 @@ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0), BPF_EXIT_INSN(), }, - .errstr_unpriv = "R0 invalid mem access 'inv'", + .errstr_unpriv = "R0 invalid mem access 'scalar'", .result_unpriv = REJECT, .result = ACCEPT, .retval = 2, @@ -636,7 +636,7 @@ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0), BPF_EXIT_INSN(), }, - .errstr_unpriv = "R0 invalid mem access 'inv'", + .errstr_unpriv = "R0 invalid mem access 'scalar'", .result_unpriv = REJECT, .result = ACCEPT, .retval = 2, @@ -706,7 +706,7 @@ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0), BPF_EXIT_INSN(), }, - .errstr_unpriv = "R0 invalid mem access 'inv'", + .errstr_unpriv = "R0 invalid mem access 'scalar'", .result_unpriv = REJECT, .result = ACCEPT, .retval = 2, @@ -776,7 +776,7 @@ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0), BPF_EXIT_INSN(), }, - .errstr_unpriv = "R0 invalid mem access 'inv'", + .errstr_unpriv = "R0 invalid mem access 'scalar'", .result_unpriv = REJECT, .result = ACCEPT, .retval = 2, diff --git a/tools/testing/selftests/bpf/verifier/precise.c b/tools/testing/selftests/bpf/verifier/precise.c index 6dc8003ffc70..9e754423fa8b 100644 --- a/tools/testing/selftests/bpf/verifier/precise.c +++ b/tools/testing/selftests/bpf/verifier/precise.c @@ -27,7 +27,7 @@ BPF_JMP_IMM(BPF_JLT, BPF_REG_2, 8, 1), BPF_EXIT_INSN(), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1), /* R2=inv(umin=1, umax=8) */ + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1), /* R2=scalar(umin=1, umax=8) */ BPF_MOV64_REG(BPF_REG_1, BPF_REG_FP), BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8), BPF_MOV64_IMM(BPF_REG_3, 0), @@ -87,7 +87,7 @@ BPF_JMP_IMM(BPF_JLT, BPF_REG_2, 8, 1), BPF_EXIT_INSN(), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1), /* R2=inv(umin=1, umax=8) */ + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1), /* R2=scalar(umin=1, umax=8) */ BPF_MOV64_REG(BPF_REG_1, BPF_REG_FP), BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8), BPF_MOV64_IMM(BPF_REG_3, 0), diff --git a/tools/testing/selftests/bpf/verifier/raw_stack.c b/tools/testing/selftests/bpf/verifier/raw_stack.c index cc8e8c3cdc03..eb5ed936580b 100644 --- a/tools/testing/selftests/bpf/verifier/raw_stack.c +++ b/tools/testing/selftests/bpf/verifier/raw_stack.c @@ -132,7 +132,7 @@ BPF_EXIT_INSN(), }, .result = REJECT, - .errstr = "R0 invalid mem access 'inv'", + .errstr = "R0 invalid mem access 'scalar'", .prog_type = BPF_PROG_TYPE_SCHED_CLS, .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, }, @@ -162,7 +162,7 @@ BPF_EXIT_INSN(), }, .result = REJECT, - .errstr = "R3 invalid mem access 'inv'", + .errstr = "R3 invalid mem access 'scalar'", .prog_type = BPF_PROG_TYPE_SCHED_CLS, .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, }, diff --git a/tools/testing/selftests/bpf/verifier/ref_tracking.c b/tools/testing/selftests/bpf/verifier/ref_tracking.c index 3b6ee009c00b..fbd682520e47 100644 --- a/tools/testing/selftests/bpf/verifier/ref_tracking.c +++ b/tools/testing/selftests/bpf/verifier/ref_tracking.c @@ -162,7 +162,7 @@ BPF_EXIT_INSN(), }, .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .errstr = "type=inv expected=sock", + .errstr = "type=scalar expected=sock", .result = REJECT, }, { @@ -178,7 +178,7 @@ BPF_EXIT_INSN(), }, .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .errstr = "type=inv expected=sock", + .errstr = "type=scalar expected=sock", .result = REJECT, }, { @@ -274,7 +274,7 @@ BPF_EXIT_INSN(), }, .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .errstr = "type=inv expected=sock", + .errstr = "type=scalar expected=sock", .result = REJECT, }, { diff --git a/tools/testing/selftests/bpf/verifier/search_pruning.c b/tools/testing/selftests/bpf/verifier/search_pruning.c index 682519769fe3..68b14fdfebdb 100644 --- a/tools/testing/selftests/bpf/verifier/search_pruning.c +++ b/tools/testing/selftests/bpf/verifier/search_pruning.c @@ -104,7 +104,7 @@ BPF_EXIT_INSN(), }, .fixup_map_hash_8b = { 3 }, - .errstr = "R6 invalid mem access 'inv'", + .errstr = "R6 invalid mem access 'scalar'", .result = REJECT, .prog_type = BPF_PROG_TYPE_TRACEPOINT, }, diff --git a/tools/testing/selftests/bpf/verifier/sock.c b/tools/testing/selftests/bpf/verifier/sock.c index 8c224eac93df..86b24cad27a7 100644 --- a/tools/testing/selftests/bpf/verifier/sock.c +++ b/tools/testing/selftests/bpf/verifier/sock.c @@ -502,7 +502,7 @@ .fixup_sk_storage_map = { 11 }, .prog_type = BPF_PROG_TYPE_SCHED_CLS, .result = REJECT, - .errstr = "R3 type=inv expected=fp", + .errstr = "R3 type=scalar expected=fp", }, { "sk_storage_get(map, skb->sk, &stack_value, 1): stack_value", diff --git a/tools/testing/selftests/bpf/verifier/spill_fill.c b/tools/testing/selftests/bpf/verifier/spill_fill.c index 8cfc5349d2a8..e23f07175e1b 100644 --- a/tools/testing/selftests/bpf/verifier/spill_fill.c +++ b/tools/testing/selftests/bpf/verifier/spill_fill.c @@ -102,7 +102,7 @@ BPF_EXIT_INSN(), }, .errstr_unpriv = "attempt to corrupt spilled", - .errstr = "R0 invalid mem access 'inv", + .errstr = "R0 invalid mem access 'scalar'", .result = REJECT, .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, }, @@ -147,11 +147,11 @@ BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_10, -8), /* r0 = r2 */ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), - /* r0 += r4 R0=pkt R2=pkt R3=pkt_end R4=inv20 */ + /* r0 += r4 R0=pkt R2=pkt R3=pkt_end R4=20 */ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_4), - /* if (r0 > r3) R0=pkt,off=20 R2=pkt R3=pkt_end R4=inv20 */ + /* if (r0 > r3) R0=pkt,off=20 R2=pkt R3=pkt_end R4=20 */ BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1), - /* r0 = *(u32 *)r2 R0=pkt,off=20,r=20 R2=pkt,r=20 R3=pkt_end R4=inv20 */ + /* r0 = *(u32 *)r2 R0=pkt,off=20,r=20 R2=pkt,r=20 R3=pkt_end R4=20 */ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_2, 0), BPF_MOV64_IMM(BPF_REG_0, 0), BPF_EXIT_INSN(), @@ -190,11 +190,11 @@ BPF_LDX_MEM(BPF_H, BPF_REG_4, BPF_REG_10, -8), /* r0 = r2 */ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), - /* r0 += r4 R0=pkt R2=pkt R3=pkt_end R4=inv,umax=65535 */ + /* r0 += r4 R0=pkt R2=pkt R3=pkt_end R4=umax=65535 */ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_4), - /* if (r0 > r3) R0=pkt,umax=65535 R2=pkt R3=pkt_end R4=inv,umax=65535 */ + /* if (r0 > r3) R0=pkt,umax=65535 R2=pkt R3=pkt_end R4=umax=65535 */ BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1), - /* r0 = *(u32 *)r2 R0=pkt,umax=65535 R2=pkt R3=pkt_end R4=inv20 */ + /* r0 = *(u32 *)r2 R0=pkt,umax=65535 R2=pkt R3=pkt_end R4=20 */ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_2, 0), BPF_MOV64_IMM(BPF_REG_0, 0), BPF_EXIT_INSN(), @@ -222,11 +222,11 @@ BPF_LDX_MEM(BPF_H, BPF_REG_4, BPF_REG_10, -8), /* r0 = r2 */ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), - /* r0 += r4 R0=pkt R2=pkt R3=pkt_end R4=inv,umax=65535 */ + /* r0 += r4 R0=pkt R2=pkt R3=pkt_end R4=umax=65535 */ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_4), - /* if (r0 > r3) R0=pkt,umax=65535 R2=pkt R3=pkt_end R4=inv,umax=65535 */ + /* if (r0 > r3) R0=pkt,umax=65535 R2=pkt R3=pkt_end R4=umax=65535 */ BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1), - /* r0 = *(u32 *)r2 R0=pkt,umax=65535 R2=pkt R3=pkt_end R4=inv20 */ + /* r0 = *(u32 *)r2 R0=pkt,umax=65535 R2=pkt R3=pkt_end R4=20 */ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_2, 0), BPF_MOV64_IMM(BPF_REG_0, 0), BPF_EXIT_INSN(), @@ -250,11 +250,11 @@ BPF_LDX_MEM(BPF_H, BPF_REG_4, BPF_REG_10, -6), /* r0 = r2 */ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), - /* r0 += r4 R0=pkt R2=pkt R3=pkt_end R4=inv,umax=65535 */ + /* r0 += r4 R0=pkt R2=pkt R3=pkt_end R4=umax=65535 */ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_4), - /* if (r0 > r3) R0=pkt,umax=65535 R2=pkt R3=pkt_end R4=inv,umax=65535 */ + /* if (r0 > r3) R0=pkt,umax=65535 R2=pkt R3=pkt_end R4=umax=65535 */ BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1), - /* r0 = *(u32 *)r2 R0=pkt,umax=65535 R2=pkt R3=pkt_end R4=inv20 */ + /* r0 = *(u32 *)r2 R0=pkt,umax=65535 R2=pkt R3=pkt_end R4=20 */ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_2, 0), BPF_MOV64_IMM(BPF_REG_0, 0), BPF_EXIT_INSN(), @@ -280,11 +280,11 @@ BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_10, -4), /* r0 = r2 */ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), - /* r0 += r4 R0=pkt R2=pkt R3=pkt_end R4=inv,umax=U32_MAX */ + /* r0 += r4 R0=pkt R2=pkt R3=pkt_end R4=umax=U32_MAX */ BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_4), - /* if (r0 > r3) R0=pkt,umax=U32_MAX R2=pkt R3=pkt_end R4=inv */ + /* if (r0 > r3) R0=pkt,umax=U32_MAX R2=pkt R3=pkt_end R4= */ BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1), - /* r0 = *(u32 *)r2 R0=pkt,umax=U32_MAX R2=pkt R3=pkt_end R4=inv */ + /* r0 = *(u32 *)r2 R0=pkt,umax=U32_MAX R2=pkt R3=pkt_end R4= */ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_2, 0), BPF_MOV64_IMM(BPF_REG_0, 0), BPF_EXIT_INSN(), @@ -305,13 +305,13 @@ BPF_JMP_IMM(BPF_JLE, BPF_REG_4, 40, 2), BPF_MOV64_IMM(BPF_REG_0, 0), BPF_EXIT_INSN(), - /* *(u32 *)(r10 -8) = r4 R4=inv,umax=40 */ + /* *(u32 *)(r10 -8) = r4 R4=umax=40 */ BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_4, -8), /* r4 = (*u32 *)(r10 - 8) */ BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_10, -8), - /* r2 += r4 R2=pkt R4=inv,umax=40 */ + /* r2 += r4 R2=pkt R4=umax=40 */ BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_4), - /* r0 = r2 R2=pkt,umax=40 R4=inv,umax=40 */ + /* r0 = r2 R2=pkt,umax=40 R4=umax=40 */ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), /* r2 += 20 R0=pkt,umax=40 R2=pkt,umax=40 */ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 20), diff --git a/tools/testing/selftests/bpf/verifier/unpriv.c b/tools/testing/selftests/bpf/verifier/unpriv.c index 111801aea5e3..878ca26c3f0a 100644 --- a/tools/testing/selftests/bpf/verifier/unpriv.c +++ b/tools/testing/selftests/bpf/verifier/unpriv.c @@ -214,7 +214,7 @@ BPF_EXIT_INSN(), }, .result = REJECT, - .errstr = "R1 type=inv expected=ctx", + .errstr = "R1 type=scalar expected=ctx", .prog_type = BPF_PROG_TYPE_SCHED_CLS, }, { @@ -420,7 +420,7 @@ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_7, 0), BPF_EXIT_INSN(), }, - .errstr_unpriv = "R7 invalid mem access 'inv'", + .errstr_unpriv = "R7 invalid mem access 'scalar'", .result_unpriv = REJECT, .result = ACCEPT, .retval = 0, diff --git a/tools/testing/selftests/bpf/verifier/value_illegal_alu.c b/tools/testing/selftests/bpf/verifier/value_illegal_alu.c index 489062867218..d6f29eb4bd57 100644 --- a/tools/testing/selftests/bpf/verifier/value_illegal_alu.c +++ b/tools/testing/selftests/bpf/verifier/value_illegal_alu.c @@ -64,7 +64,7 @@ }, .fixup_map_hash_48b = { 3 }, .errstr_unpriv = "R0 pointer arithmetic prohibited", - .errstr = "invalid mem access 'inv'", + .errstr = "invalid mem access 'scalar'", .result = REJECT, .result_unpriv = REJECT, .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, @@ -89,7 +89,7 @@ }, .fixup_map_hash_48b = { 3 }, .errstr_unpriv = "leaking pointer from stack off -8", - .errstr = "R0 invalid mem access 'inv'", + .errstr = "R0 invalid mem access 'scalar'", .result = REJECT, .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, }, diff --git a/tools/testing/selftests/bpf/verifier/value_ptr_arith.c b/tools/testing/selftests/bpf/verifier/value_ptr_arith.c index 359f3e8f8b60..249187d3c530 100644 --- a/tools/testing/selftests/bpf/verifier/value_ptr_arith.c +++ b/tools/testing/selftests/bpf/verifier/value_ptr_arith.c @@ -397,7 +397,7 @@ .fixup_map_array_48b = { 1 }, .result = ACCEPT, .result_unpriv = REJECT, - .errstr_unpriv = "R0 invalid mem access 'inv'", + .errstr_unpriv = "R0 invalid mem access 'scalar'", .retval = 0, }, { @@ -1074,7 +1074,7 @@ }, .fixup_map_array_48b = { 3 }, .result = REJECT, - .errstr = "R0 invalid mem access 'inv'", + .errstr = "R0 invalid mem access 'scalar'", .errstr_unpriv = "R0 pointer -= pointer prohibited", }, { diff --git a/tools/testing/selftests/bpf/verifier/var_off.c b/tools/testing/selftests/bpf/verifier/var_off.c index eab1f7f56e2f..187c6f6e32bc 100644 --- a/tools/testing/selftests/bpf/verifier/var_off.c +++ b/tools/testing/selftests/bpf/verifier/var_off.c @@ -131,7 +131,7 @@ * write might have overwritten the spilled pointer (i.e. we lose track * of the spilled register when we analyze the write). */ - .errstr = "R2 invalid mem access 'inv'", + .errstr = "R2 invalid mem access 'scalar'", .result = REJECT, }, { -- cgit v1.2.3