From 63fe3fd393dc4e7ea3948e79947362ffbb0fd616 Mon Sep 17 00:00:00 2001 From: Yonghong Song Date: Mon, 10 Aug 2020 20:08:52 -0700 Subject: libbpf: Do not use __builtin_offsetof for offsetof Commit 5fbc220862fc ("tools/libpf: Add offsetof/container_of macro in bpf_helpers.h") added a macro offsetof() to get the offset of a structure member: #define offsetof(TYPE, MEMBER) ((size_t)&((TYPE *)0)->MEMBER) In certain use cases, size_t type may not be available so Commit da7a35062bcc ("libbpf bpf_helpers: Use __builtin_offsetof for offsetof") changed to use __builtin_offsetof which removed the dependency on type size_t, which I suggested. But using __builtin_offsetof will prevent CO-RE relocation generation in case that, e.g., TYPE is annotated with "preserve_access_info" where a relocation is desirable in case the member offset is changed in a different kernel version. So this patch reverted back to the original macro but using "unsigned long" instead of "site_t". Fixes: da7a35062bcc ("libbpf bpf_helpers: Use __builtin_offsetof for offsetof") Signed-off-by: Yonghong Song Signed-off-by: Daniel Borkmann Acked-by: Andrii Nakryiko Acked-by: Ian Rogers Link: https://lore.kernel.org/bpf/20200811030852.3396929-1-yhs@fb.com --- tools/lib/bpf/bpf_helpers.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'tools') diff --git a/tools/lib/bpf/bpf_helpers.h b/tools/lib/bpf/bpf_helpers.h index bc14db706b88..e9a4ecddb7a5 100644 --- a/tools/lib/bpf/bpf_helpers.h +++ b/tools/lib/bpf/bpf_helpers.h @@ -40,7 +40,7 @@ * Helper macro to manipulate data structures */ #ifndef offsetof -#define offsetof(TYPE, MEMBER) __builtin_offsetof(TYPE, MEMBER) +#define offsetof(TYPE, MEMBER) ((unsigned long)&((TYPE *)0)->MEMBER) #endif #ifndef container_of #define container_of(ptr, type, member) \ -- cgit v1.2.3 From 0390c429dbed4068bd2cd8dded937d9a5ec24cd2 Mon Sep 17 00:00:00 2001 From: Jianlin Lv Date: Mon, 10 Aug 2020 23:39:40 +0800 Subject: selftests/bpf: Fix segmentation fault in test_progs test_progs reports the segmentation fault as below: $ sudo ./test_progs -t mmap --verbose test_mmap:PASS:skel_open_and_load 0 nsec [...] test_mmap:PASS:adv_mmap1 0 nsec test_mmap:PASS:adv_mmap2 0 nsec test_mmap:PASS:adv_mmap3 0 nsec test_mmap:PASS:adv_mmap4 0 nsec Segmentation fault This issue was triggered because mmap() and munmap() used inconsistent length parameters; mmap() creates a new mapping of 3 * page_size, but the length parameter set in the subsequent re-map and munmap() functions is 4 * page_size; this leads to the destruction of the process space. To fix this issue, first create 4 pages of anonymous mapping, then do all the mmap() with MAP_FIXED. Another issue is that when unmap the second page fails, the length parameter to delete tmp1 mappings should be 4 * page_size. Signed-off-by: Jianlin Lv Signed-off-by: Daniel Borkmann Acked-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/20200810153940.125508-1-Jianlin.Lv@arm.com --- tools/testing/selftests/bpf/prog_tests/mmap.c | 19 +++++++++++++------ 1 file changed, 13 insertions(+), 6 deletions(-) (limited to 'tools') diff --git a/tools/testing/selftests/bpf/prog_tests/mmap.c b/tools/testing/selftests/bpf/prog_tests/mmap.c index 43d0b5578f46..9c3c5c0f068f 100644 --- a/tools/testing/selftests/bpf/prog_tests/mmap.c +++ b/tools/testing/selftests/bpf/prog_tests/mmap.c @@ -21,7 +21,7 @@ void test_mmap(void) const long page_size = sysconf(_SC_PAGE_SIZE); int err, duration = 0, i, data_map_fd, data_map_id, tmp_fd, rdmap_fd; struct bpf_map *data_map, *bss_map; - void *bss_mmaped = NULL, *map_mmaped = NULL, *tmp1, *tmp2; + void *bss_mmaped = NULL, *map_mmaped = NULL, *tmp0, *tmp1, *tmp2; struct test_mmap__bss *bss_data; struct bpf_map_info map_info; __u32 map_info_sz = sizeof(map_info); @@ -183,16 +183,23 @@ void test_mmap(void) /* check some more advanced mmap() manipulations */ + tmp0 = mmap(NULL, 4 * page_size, PROT_READ, MAP_SHARED | MAP_ANONYMOUS, + -1, 0); + if (CHECK(tmp0 == MAP_FAILED, "adv_mmap0", "errno %d\n", errno)) + goto cleanup; + /* map all but last page: pages 1-3 mapped */ - tmp1 = mmap(NULL, 3 * page_size, PROT_READ, MAP_SHARED, + tmp1 = mmap(tmp0, 3 * page_size, PROT_READ, MAP_SHARED | MAP_FIXED, data_map_fd, 0); - if (CHECK(tmp1 == MAP_FAILED, "adv_mmap1", "errno %d\n", errno)) + if (CHECK(tmp0 != tmp1, "adv_mmap1", "tmp0: %p, tmp1: %p\n", tmp0, tmp1)) { + munmap(tmp0, 4 * page_size); goto cleanup; + } /* unmap second page: pages 1, 3 mapped */ err = munmap(tmp1 + page_size, page_size); if (CHECK(err, "adv_mmap2", "errno %d\n", errno)) { - munmap(tmp1, map_sz); + munmap(tmp1, 4 * page_size); goto cleanup; } @@ -201,7 +208,7 @@ void test_mmap(void) MAP_SHARED | MAP_FIXED, data_map_fd, 0); if (CHECK(tmp2 == MAP_FAILED, "adv_mmap3", "errno %d\n", errno)) { munmap(tmp1, page_size); - munmap(tmp1 + 2*page_size, page_size); + munmap(tmp1 + 2*page_size, 2 * page_size); goto cleanup; } CHECK(tmp1 + page_size != tmp2, "adv_mmap4", @@ -211,7 +218,7 @@ void test_mmap(void) tmp2 = mmap(tmp1, 4 * page_size, PROT_READ, MAP_SHARED | MAP_FIXED, data_map_fd, 0); if (CHECK(tmp2 == MAP_FAILED, "adv_mmap5", "errno %d\n", errno)) { - munmap(tmp1, 3 * page_size); /* unmap page 1 */ + munmap(tmp1, 4 * page_size); /* unmap page 1 */ goto cleanup; } CHECK(tmp1 != tmp2, "adv_mmap6", "tmp1: %p, tmp2: %p\n", tmp1, tmp2); -- cgit v1.2.3 From da7bdfdd23b858e6d97a1e4b461548e23d16977f Mon Sep 17 00:00:00 2001 From: Stanislav Fomichev Date: Fri, 7 Aug 2020 15:38:46 -0700 Subject: selftests/bpf: Fix v4_to_v6 in sk_lookup I'm getting some garbage in bytes 8 and 9 when doing conversion from sockaddr_in to sockaddr_in6 (leftover from AF_INET?). Let's explicitly clear the higher bytes. Fixes: 0ab5539f8584 ("selftests/bpf: Tests for BPF_SK_LOOKUP attach point") Signed-off-by: Stanislav Fomichev Signed-off-by: Daniel Borkmann Reviewed-by: Jakub Sitnicki Link: https://lore.kernel.org/bpf/20200807223846.4190917-1-sdf@google.com --- tools/testing/selftests/bpf/prog_tests/sk_lookup.c | 1 + 1 file changed, 1 insertion(+) (limited to 'tools') diff --git a/tools/testing/selftests/bpf/prog_tests/sk_lookup.c b/tools/testing/selftests/bpf/prog_tests/sk_lookup.c index c571584c00f5..9ff0412e1fd3 100644 --- a/tools/testing/selftests/bpf/prog_tests/sk_lookup.c +++ b/tools/testing/selftests/bpf/prog_tests/sk_lookup.c @@ -309,6 +309,7 @@ static void v4_to_v6(struct sockaddr_storage *ss) v6->sin6_addr.s6_addr[10] = 0xff; v6->sin6_addr.s6_addr[11] = 0xff; memcpy(&v6->sin6_addr.s6_addr[12], &v4.sin_addr.s_addr, 4); + memset(&v6->sin6_addr.s6_addr[0], 0, 10); } static int udp_recv_send(int server_fd) -- cgit v1.2.3 From 8faf7fc597d59b142af41ddd4a2d59485f75f88a Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Tue, 11 Aug 2020 19:59:07 -0700 Subject: tools/bpftool: Make skeleton code C++17-friendly by dropping typeof() Seems like C++17 standard mode doesn't recognize typeof() anymore. This can be tested by compiling test_cpp test with -std=c++17 or -std=c++1z options. The use of typeof in skeleton generated code is unnecessary, all types are well-known at the time of code generation, so remove all typeof()'s to make skeleton code more future-proof when interacting with C++ compilers. Fixes: 985ead416df3 ("bpftool: Add skeleton codegen command") Signed-off-by: Andrii Nakryiko Signed-off-by: Alexei Starovoitov Acked-by: Song Liu Link: https://lore.kernel.org/bpf/20200812025907.1371956-1-andriin@fb.com --- tools/bpf/bpftool/gen.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'tools') diff --git a/tools/bpf/bpftool/gen.c b/tools/bpf/bpftool/gen.c index 8a4c2b3b0cd6..db80e836816e 100644 --- a/tools/bpf/bpftool/gen.c +++ b/tools/bpf/bpftool/gen.c @@ -397,7 +397,7 @@ static int do_skeleton(int argc, char **argv) { \n\ struct %1$s *obj; \n\ \n\ - obj = (typeof(obj))calloc(1, sizeof(*obj)); \n\ + obj = (struct %1$s *)calloc(1, sizeof(*obj)); \n\ if (!obj) \n\ return NULL; \n\ if (%1$s__create_skeleton(obj)) \n\ @@ -461,7 +461,7 @@ static int do_skeleton(int argc, char **argv) { \n\ struct bpf_object_skeleton *s; \n\ \n\ - s = (typeof(s))calloc(1, sizeof(*s)); \n\ + s = (struct bpf_object_skeleton *)calloc(1, sizeof(*s));\n\ if (!s) \n\ return -1; \n\ obj->skeleton = s; \n\ @@ -479,7 +479,7 @@ static int do_skeleton(int argc, char **argv) /* maps */ \n\ s->map_cnt = %zu; \n\ s->map_skel_sz = sizeof(*s->maps); \n\ - s->maps = (typeof(s->maps))calloc(s->map_cnt, s->map_skel_sz);\n\ + s->maps = (struct bpf_map_skeleton *)calloc(s->map_cnt, s->map_skel_sz);\n\ if (!s->maps) \n\ goto err; \n\ ", @@ -515,7 +515,7 @@ static int do_skeleton(int argc, char **argv) /* programs */ \n\ s->prog_cnt = %zu; \n\ s->prog_skel_sz = sizeof(*s->progs); \n\ - s->progs = (typeof(s->progs))calloc(s->prog_cnt, s->prog_skel_sz);\n\ + s->progs = (struct bpf_prog_skeleton *)calloc(s->prog_cnt, s->prog_skel_sz);\n\ if (!s->progs) \n\ goto err; \n\ ", -- cgit v1.2.3 From 702eddc77a905782083b14ccd05b23840675fd18 Mon Sep 17 00:00:00 2001 From: Jean-Philippe Brucker Date: Wed, 12 Aug 2020 16:39:10 +0200 Subject: libbpf: Handle GCC built-in types for Arm NEON When building Arm NEON (SIMD) code from lib/raid6/neon.uc, GCC emits DWARF information using a base type "__Poly8_t", which is internal to GCC and not recognized by Clang. This causes build failures when building with Clang a vmlinux.h generated from an arm64 kernel that was built with GCC. vmlinux.h:47284:9: error: unknown type name '__Poly8_t' typedef __Poly8_t poly8x16_t[16]; ^~~~~~~~~ The polyX_t types are defined as unsigned integers in the "Arm C Language Extension" document (101028_Q220_00_en). Emit typedefs based on standard integer types for the GCC internal types, similar to those emitted by Clang. Including linux/kernel.h to use ARRAY_SIZE() incidentally redefined max(), causing a build bug due to different types, hence the seemingly unrelated change. Reported-by: Jakov Petrina Signed-off-by: Jean-Philippe Brucker Signed-off-by: Alexei Starovoitov Acked-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/20200812143909.3293280-1-jean-philippe@linaro.org --- tools/lib/bpf/btf_dump.c | 35 ++++++++++++++++++++++++++++++++++- 1 file changed, 34 insertions(+), 1 deletion(-) (limited to 'tools') diff --git a/tools/lib/bpf/btf_dump.c b/tools/lib/bpf/btf_dump.c index cf711168d34a..ac81f3f8957a 100644 --- a/tools/lib/bpf/btf_dump.c +++ b/tools/lib/bpf/btf_dump.c @@ -13,6 +13,7 @@ #include #include #include +#include #include "btf.h" #include "hashmap.h" #include "libbpf.h" @@ -549,6 +550,9 @@ static int btf_dump_order_type(struct btf_dump *d, __u32 id, bool through_ptr) } } +static void btf_dump_emit_missing_aliases(struct btf_dump *d, __u32 id, + const struct btf_type *t); + static void btf_dump_emit_struct_fwd(struct btf_dump *d, __u32 id, const struct btf_type *t); static void btf_dump_emit_struct_def(struct btf_dump *d, __u32 id, @@ -671,6 +675,9 @@ static void btf_dump_emit_type(struct btf_dump *d, __u32 id, __u32 cont_id) switch (kind) { case BTF_KIND_INT: + /* Emit type alias definitions if necessary */ + btf_dump_emit_missing_aliases(d, id, t); + tstate->emit_state = EMITTED; break; case BTF_KIND_ENUM: @@ -870,7 +877,7 @@ static void btf_dump_emit_struct_def(struct btf_dump *d, btf_dump_printf(d, ": %d", m_sz); off = m_off + m_sz; } else { - m_sz = max(0, btf__resolve_size(d->btf, m->type)); + m_sz = max(0LL, btf__resolve_size(d->btf, m->type)); off = m_off + m_sz * 8; } btf_dump_printf(d, ";"); @@ -890,6 +897,32 @@ static void btf_dump_emit_struct_def(struct btf_dump *d, btf_dump_printf(d, " __attribute__((packed))"); } +static const char *missing_base_types[][2] = { + /* + * GCC emits typedefs to its internal __PolyX_t types when compiling Arm + * SIMD intrinsics. Alias them to standard base types. + */ + { "__Poly8_t", "unsigned char" }, + { "__Poly16_t", "unsigned short" }, + { "__Poly64_t", "unsigned long long" }, + { "__Poly128_t", "unsigned __int128" }, +}; + +static void btf_dump_emit_missing_aliases(struct btf_dump *d, __u32 id, + const struct btf_type *t) +{ + const char *name = btf_dump_type_name(d, id); + int i; + + for (i = 0; i < ARRAY_SIZE(missing_base_types); i++) { + if (strcmp(name, missing_base_types[i][0]) == 0) { + btf_dump_printf(d, "typedef %s %s;\n\n", + missing_base_types[i][1], name); + break; + } + } +} + static void btf_dump_emit_enum_fwd(struct btf_dump *d, __u32 id, const struct btf_type *t) { -- cgit v1.2.3 From 23ab656be263813acc3c20623757d3cd1496d9e1 Mon Sep 17 00:00:00 2001 From: Toke Høiland-Jørgensen Date: Thu, 13 Aug 2020 16:29:05 +0200 Subject: libbpf: Prevent overriding errno when logging errors MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Turns out there were a few more instances where libbpf didn't save the errno before writing an error message, causing errno to be overridden by the printf() return and the error disappearing if logging is enabled. Signed-off-by: Toke Høiland-Jørgensen Signed-off-by: Daniel Borkmann Acked-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/20200813142905.160381-1-toke@redhat.com --- tools/lib/bpf/libbpf.c | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) (limited to 'tools') diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c index 0a06124f7999..0d48c18d5030 100644 --- a/tools/lib/bpf/libbpf.c +++ b/tools/lib/bpf/libbpf.c @@ -3478,10 +3478,11 @@ bpf_object__probe_global_data(struct bpf_object *obj) map = bpf_create_map_xattr(&map_attr); if (map < 0) { - cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg)); + ret = -errno; + cp = libbpf_strerror_r(ret, errmsg, sizeof(errmsg)); pr_warn("Error in %s():%s(%d). Couldn't create simple array map.\n", - __func__, cp, errno); - return -errno; + __func__, cp, -ret); + return ret; } insns[0].imm = map; @@ -6012,9 +6013,10 @@ int bpf_program__pin_instance(struct bpf_program *prog, const char *path, } if (bpf_obj_pin(prog->instances.fds[instance], path)) { - cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg)); + err = -errno; + cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg)); pr_warn("failed to pin program: %s\n", cp); - return -errno; + return err; } pr_debug("pinned program '%s'\n", path); -- cgit v1.2.3 From 86ed4be68fdee23df4843a59f91c1ac7fc05e860 Mon Sep 17 00:00:00 2001 From: John Fastabend Date: Tue, 11 Aug 2020 15:05:14 -0700 Subject: bpf, selftests: Add tests for ctx access in sock_ops with single register To verify fix ("bpf: sock_ops ctx access may stomp registers in corner case") we want to force compiler to generate the following code when accessing a field with BPF_TCP_SOCK_GET_COMMON, r1 = *(u32 *)(r1 + 96) // r1 is skops ptr Rather than depend on clang to do this we add the test with inline asm to the tcpbpf test. This saves us from having to create another runner and ensures that if we break this again test_tcpbpf will crash. With above code we get the xlated code, 11: (7b) *(u64 *)(r1 +32) = r9 12: (61) r9 = *(u32 *)(r1 +28) 13: (15) if r9 == 0x0 goto pc+4 14: (79) r9 = *(u64 *)(r1 +32) 15: (79) r1 = *(u64 *)(r1 +0) 16: (61) r1 = *(u32 *)(r1 +2348) 17: (05) goto pc+1 18: (79) r9 = *(u64 *)(r1 +32) We also add the normal case where src_reg != dst_reg so we can compare code generation easily from llvm-objdump and ensure that case continues to work correctly. The normal code is xlated to, 20: (b7) r1 = 0 21: (61) r1 = *(u32 *)(r3 +28) 22: (15) if r1 == 0x0 goto pc+2 23: (79) r1 = *(u64 *)(r3 +0) 24: (61) r1 = *(u32 *)(r1 +2348) Where the temp variable is not used. Signed-off-by: John Fastabend Signed-off-by: Daniel Borkmann Acked-by: Song Liu Acked-by: Martin KaFai Lau Link: https://lore.kernel.org/bpf/159718351457.4728.3295119261717842496.stgit@john-Precision-5820-Tower --- tools/testing/selftests/bpf/progs/test_tcpbpf_kern.c | 13 +++++++++++++ 1 file changed, 13 insertions(+) (limited to 'tools') diff --git a/tools/testing/selftests/bpf/progs/test_tcpbpf_kern.c b/tools/testing/selftests/bpf/progs/test_tcpbpf_kern.c index 1f1966e86e9f..f8b136827fcc 100644 --- a/tools/testing/selftests/bpf/progs/test_tcpbpf_kern.c +++ b/tools/testing/selftests/bpf/progs/test_tcpbpf_kern.c @@ -54,6 +54,7 @@ SEC("sockops") int bpf_testcb(struct bpf_sock_ops *skops) { char header[sizeof(struct ipv6hdr) + sizeof(struct tcphdr)]; + struct bpf_sock_ops *reuse = skops; struct tcphdr *thdr; int good_call_rv = 0; int bad_call_rv = 0; @@ -62,6 +63,18 @@ int bpf_testcb(struct bpf_sock_ops *skops) int v = 0; int op; + /* Test reading fields in bpf_sock_ops using single register */ + asm volatile ( + "%[reuse] = *(u32 *)(%[reuse] +96)" + : [reuse] "+r"(reuse) + :); + + asm volatile ( + "%[op] = *(u32 *)(%[skops] +96)" + : [op] "+r"(op) + : [skops] "r"(skops) + :); + op = (int) skops->op; update_event_map(op); -- cgit v1.2.3 From 8e0c1517565f06027b68caf2875620ddf6914eee Mon Sep 17 00:00:00 2001 From: John Fastabend Date: Tue, 11 Aug 2020 15:05:33 -0700 Subject: bpf, selftests: Add tests for sock_ops load with r9, r8.r7 registers Loads in sock_ops case when using high registers requires extra logic to ensure the correct temporary value is used. We need to ensure the temp register does not use either the src_reg or dst_reg. Lets add an asm test to force the logic is triggered. The xlated code is here, 30: (7b) *(u64 *)(r9 +32) = r7 31: (61) r7 = *(u32 *)(r9 +28) 32: (15) if r7 == 0x0 goto pc+2 33: (79) r7 = *(u64 *)(r9 +0) 34: (63) *(u32 *)(r7 +916) = r8 35: (79) r7 = *(u64 *)(r9 +32) Notice r9 and r8 are not used for temp registers and r7 is chosen. Signed-off-by: John Fastabend Signed-off-by: Daniel Borkmann Acked-by: Song Liu Acked-by: Martin KaFai Lau Link: https://lore.kernel.org/bpf/159718353345.4728.8805043614257933227.stgit@john-Precision-5820-Tower --- tools/testing/selftests/bpf/progs/test_tcpbpf_kern.c | 7 +++++++ 1 file changed, 7 insertions(+) (limited to 'tools') diff --git a/tools/testing/selftests/bpf/progs/test_tcpbpf_kern.c b/tools/testing/selftests/bpf/progs/test_tcpbpf_kern.c index f8b136827fcc..6420b61fbbc8 100644 --- a/tools/testing/selftests/bpf/progs/test_tcpbpf_kern.c +++ b/tools/testing/selftests/bpf/progs/test_tcpbpf_kern.c @@ -75,6 +75,13 @@ int bpf_testcb(struct bpf_sock_ops *skops) : [skops] "r"(skops) :); + asm volatile ( + "r9 = %[skops];\n" + "r8 = *(u32 *)(r9 +164);\n" + "*(u32 *)(r9 +164) = r8;\n" + :: [skops] "r"(skops) + : "r9", "r8"); + op = (int) skops->op; update_event_map(op); -- cgit v1.2.3 From 9efa9e499799f939968aff1123cc7e8184960e48 Mon Sep 17 00:00:00 2001 From: John Fastabend Date: Tue, 11 Aug 2020 15:05:53 -0700 Subject: bpf, selftests: Add tests to sock_ops for loading sk Add tests to directly accesse sock_ops sk field. Then use it to ensure a bad pointer access will fault if something goes wrong. We do three tests: The first test ensures when we read sock_ops sk pointer into the same register that we don't fault as described earlier. Here r9 is chosen as the temp register. The xlated code is, 36: (7b) *(u64 *)(r1 +32) = r9 37: (61) r9 = *(u32 *)(r1 +28) 38: (15) if r9 == 0x0 goto pc+3 39: (79) r9 = *(u64 *)(r1 +32) 40: (79) r1 = *(u64 *)(r1 +0) 41: (05) goto pc+1 42: (79) r9 = *(u64 *)(r1 +32) The second test ensures the temp register selection does not collide with in-use register r9. Shown here r8 is chosen because r9 is the sock_ops pointer. The xlated code is as follows, 46: (7b) *(u64 *)(r9 +32) = r8 47: (61) r8 = *(u32 *)(r9 +28) 48: (15) if r8 == 0x0 goto pc+3 49: (79) r8 = *(u64 *)(r9 +32) 50: (79) r9 = *(u64 *)(r9 +0) 51: (05) goto pc+1 52: (79) r8 = *(u64 *)(r9 +32) And finally, ensure we didn't break the base case where dst_reg does not equal the source register, 56: (61) r2 = *(u32 *)(r1 +28) 57: (15) if r2 == 0x0 goto pc+1 58: (79) r2 = *(u64 *)(r1 +0) Notice it takes us an extra four instructions when src reg is the same as dst reg. One to save the reg, two to restore depending on the branch taken and a goto to jump over the second restore. Signed-off-by: John Fastabend Signed-off-by: Daniel Borkmann Acked-by: Song Liu Acked-by: Martin KaFai Lau Link: https://lore.kernel.org/bpf/159718355325.4728.4163036953345999636.stgit@john-Precision-5820-Tower --- .../testing/selftests/bpf/progs/test_tcpbpf_kern.c | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) (limited to 'tools') diff --git a/tools/testing/selftests/bpf/progs/test_tcpbpf_kern.c b/tools/testing/selftests/bpf/progs/test_tcpbpf_kern.c index 6420b61fbbc8..3e6912e4df3d 100644 --- a/tools/testing/selftests/bpf/progs/test_tcpbpf_kern.c +++ b/tools/testing/selftests/bpf/progs/test_tcpbpf_kern.c @@ -82,6 +82,27 @@ int bpf_testcb(struct bpf_sock_ops *skops) :: [skops] "r"(skops) : "r9", "r8"); + asm volatile ( + "r1 = %[skops];\n" + "r1 = *(u64 *)(r1 +184);\n" + "if r1 == 0 goto +1;\n" + "r1 = *(u32 *)(r1 +4);\n" + :: [skops] "r"(skops):"r1"); + + asm volatile ( + "r9 = %[skops];\n" + "r9 = *(u64 *)(r9 +184);\n" + "if r9 == 0 goto +1;\n" + "r9 = *(u32 *)(r9 +4);\n" + :: [skops] "r"(skops):"r9"); + + asm volatile ( + "r1 = %[skops];\n" + "r2 = *(u64 *)(r1 +184);\n" + "if r2 == 0 goto +1;\n" + "r2 = *(u32 *)(r2 +4);\n" + :: [skops] "r"(skops):"r1", "r2"); + op = (int) skops->op; update_event_map(op); -- cgit v1.2.3 From 09f44b753a7d120becc80213c3459183c8acd26b Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Thu, 13 Aug 2020 13:49:37 -0700 Subject: tools/bpftool: Fix compilation warnings in 32-bit mode Fix few compilation warnings in bpftool when compiling in 32-bit mode. Abstract away u64 to pointer conversion into a helper function. Signed-off-by: Andrii Nakryiko Signed-off-by: Alexei Starovoitov Link: https://lore.kernel.org/bpf/20200813204945.1020225-2-andriin@fb.com --- tools/bpf/bpftool/btf_dumper.c | 2 +- tools/bpf/bpftool/link.c | 4 ++-- tools/bpf/bpftool/main.h | 10 +++++++++- tools/bpf/bpftool/prog.c | 16 ++++++++-------- 4 files changed, 20 insertions(+), 12 deletions(-) (limited to 'tools') diff --git a/tools/bpf/bpftool/btf_dumper.c b/tools/bpf/bpftool/btf_dumper.c index ede162f83eea..0e9310727281 100644 --- a/tools/bpf/bpftool/btf_dumper.c +++ b/tools/bpf/bpftool/btf_dumper.c @@ -67,7 +67,7 @@ static int dump_prog_id_as_func_ptr(const struct btf_dumper *d, if (!info->btf_id || !info->nr_func_info || btf__get_from_id(info->btf_id, &prog_btf)) goto print; - finfo = (struct bpf_func_info *)info->func_info; + finfo = u64_to_ptr(info->func_info); func_type = btf__type_by_id(prog_btf, finfo->type_id); if (!func_type || !btf_is_func(func_type)) goto print; diff --git a/tools/bpf/bpftool/link.c b/tools/bpf/bpftool/link.c index 1b793759170e..a89f09e3c848 100644 --- a/tools/bpf/bpftool/link.c +++ b/tools/bpf/bpftool/link.c @@ -106,7 +106,7 @@ static int show_link_close_json(int fd, struct bpf_link_info *info) switch (info->type) { case BPF_LINK_TYPE_RAW_TRACEPOINT: jsonw_string_field(json_wtr, "tp_name", - (const char *)info->raw_tracepoint.tp_name); + u64_to_ptr(info->raw_tracepoint.tp_name)); break; case BPF_LINK_TYPE_TRACING: err = get_prog_info(info->prog_id, &prog_info); @@ -185,7 +185,7 @@ static int show_link_close_plain(int fd, struct bpf_link_info *info) switch (info->type) { case BPF_LINK_TYPE_RAW_TRACEPOINT: printf("\n\ttp '%s' ", - (const char *)info->raw_tracepoint.tp_name); + (const char *)u64_to_ptr(info->raw_tracepoint.tp_name)); break; case BPF_LINK_TYPE_TRACING: err = get_prog_info(info->prog_id, &prog_info); diff --git a/tools/bpf/bpftool/main.h b/tools/bpf/bpftool/main.h index e3a79b5a9960..c46e52137b87 100644 --- a/tools/bpf/bpftool/main.h +++ b/tools/bpf/bpftool/main.h @@ -21,7 +21,15 @@ /* Make sure we do not use kernel-only integer typedefs */ #pragma GCC poison u8 u16 u32 u64 s8 s16 s32 s64 -#define ptr_to_u64(ptr) ((__u64)(unsigned long)(ptr)) +static inline __u64 ptr_to_u64(const void *ptr) +{ + return (__u64)(unsigned long)ptr; +} + +static inline void *u64_to_ptr(__u64 ptr) +{ + return (void *)(unsigned long)ptr; +} #define NEXT_ARG() ({ argc--; argv++; if (argc < 0) usage(); }) #define NEXT_ARGP() ({ (*argc)--; (*argv)++; if (*argc < 0) usage(); }) diff --git a/tools/bpf/bpftool/prog.c b/tools/bpf/bpftool/prog.c index 158995d853b0..d393eb8263a6 100644 --- a/tools/bpf/bpftool/prog.c +++ b/tools/bpf/bpftool/prog.c @@ -428,14 +428,14 @@ prog_dump(struct bpf_prog_info *info, enum dump_mode mode, p_info("no instructions returned"); return -1; } - buf = (unsigned char *)(info->jited_prog_insns); + buf = u64_to_ptr(info->jited_prog_insns); member_len = info->jited_prog_len; } else { /* DUMP_XLATED */ if (info->xlated_prog_len == 0 || !info->xlated_prog_insns) { p_err("error retrieving insn dump: kernel.kptr_restrict set?"); return -1; } - buf = (unsigned char *)info->xlated_prog_insns; + buf = u64_to_ptr(info->xlated_prog_insns); member_len = info->xlated_prog_len; } @@ -444,7 +444,7 @@ prog_dump(struct bpf_prog_info *info, enum dump_mode mode, return -1; } - func_info = (void *)info->func_info; + func_info = u64_to_ptr(info->func_info); if (info->nr_line_info) { prog_linfo = bpf_prog_linfo__new(info); @@ -462,7 +462,7 @@ prog_dump(struct bpf_prog_info *info, enum dump_mode mode, n = write(fd, buf, member_len); close(fd); - if (n != member_len) { + if (n != (ssize_t)member_len) { p_err("error writing output file: %s", n < 0 ? strerror(errno) : "short write"); return -1; @@ -492,13 +492,13 @@ prog_dump(struct bpf_prog_info *info, enum dump_mode mode, __u32 i; if (info->nr_jited_ksyms) { kernel_syms_load(&dd); - ksyms = (__u64 *) info->jited_ksyms; + ksyms = u64_to_ptr(info->jited_ksyms); } if (json_output) jsonw_start_array(json_wtr); - lens = (__u32 *) info->jited_func_lens; + lens = u64_to_ptr(info->jited_func_lens); for (i = 0; i < info->nr_jited_func_lens; i++) { if (ksyms) { sym = kernel_syms_search(&dd, ksyms[i]); @@ -559,7 +559,7 @@ prog_dump(struct bpf_prog_info *info, enum dump_mode mode, } else { kernel_syms_load(&dd); dd.nr_jited_ksyms = info->nr_jited_ksyms; - dd.jited_ksyms = (__u64 *) info->jited_ksyms; + dd.jited_ksyms = u64_to_ptr(info->jited_ksyms); dd.btf = btf; dd.func_info = func_info; dd.finfo_rec_size = info->func_info_rec_size; @@ -1681,7 +1681,7 @@ static char *profile_target_name(int tgt_fd) goto out; } - func_info = (struct bpf_func_info *)(info_linear->info.func_info); + func_info = u64_to_ptr(info_linear->info.func_info); t = btf__type_by_id(btf, func_info[0].type_id); if (!t) { p_err("btf %d doesn't have type %d", -- cgit v1.2.3 From 9028bbcc3e12510cac13a9554f1a1e39667a4387 Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Thu, 13 Aug 2020 13:49:38 -0700 Subject: selftest/bpf: Fix compilation warnings in 32-bit mode Fix compilation warnings emitted when compiling selftests for 32-bit platform (x86 in my case). Signed-off-by: Andrii Nakryiko Signed-off-by: Alexei Starovoitov Link: https://lore.kernel.org/bpf/20200813204945.1020225-3-andriin@fb.com --- tools/testing/selftests/bpf/prog_tests/bpf_obj_id.c | 8 ++++---- tools/testing/selftests/bpf/prog_tests/core_extern.c | 4 ++-- tools/testing/selftests/bpf/prog_tests/fexit_bpf2bpf.c | 6 +++--- tools/testing/selftests/bpf/prog_tests/flow_dissector.c | 2 +- tools/testing/selftests/bpf/prog_tests/global_data.c | 6 +++--- tools/testing/selftests/bpf/prog_tests/prog_run_xattr.c | 2 +- tools/testing/selftests/bpf/prog_tests/skb_ctx.c | 2 +- tools/testing/selftests/bpf/test_btf.c | 8 ++++---- tools/testing/selftests/bpf/test_progs.h | 5 +++++ 9 files changed, 24 insertions(+), 19 deletions(-) (limited to 'tools') diff --git a/tools/testing/selftests/bpf/prog_tests/bpf_obj_id.c b/tools/testing/selftests/bpf/prog_tests/bpf_obj_id.c index 7afa4160416f..284d5921c345 100644 --- a/tools/testing/selftests/bpf/prog_tests/bpf_obj_id.c +++ b/tools/testing/selftests/bpf/prog_tests/bpf_obj_id.c @@ -159,15 +159,15 @@ void test_bpf_obj_id(void) /* Check getting link info */ info_len = sizeof(struct bpf_link_info) * 2; bzero(&link_infos[i], info_len); - link_infos[i].raw_tracepoint.tp_name = (__u64)&tp_name; + link_infos[i].raw_tracepoint.tp_name = ptr_to_u64(&tp_name); link_infos[i].raw_tracepoint.tp_name_len = sizeof(tp_name); err = bpf_obj_get_info_by_fd(bpf_link__fd(links[i]), &link_infos[i], &info_len); if (CHECK(err || link_infos[i].type != BPF_LINK_TYPE_RAW_TRACEPOINT || link_infos[i].prog_id != prog_infos[i].id || - link_infos[i].raw_tracepoint.tp_name != (__u64)&tp_name || - strcmp((char *)link_infos[i].raw_tracepoint.tp_name, + link_infos[i].raw_tracepoint.tp_name != ptr_to_u64(&tp_name) || + strcmp(u64_to_ptr(link_infos[i].raw_tracepoint.tp_name), "sys_enter") || info_len != sizeof(struct bpf_link_info), "get-link-info(fd)", @@ -178,7 +178,7 @@ void test_bpf_obj_id(void) link_infos[i].type, BPF_LINK_TYPE_RAW_TRACEPOINT, link_infos[i].id, link_infos[i].prog_id, prog_infos[i].id, - (char *)link_infos[i].raw_tracepoint.tp_name, + (const char *)u64_to_ptr(link_infos[i].raw_tracepoint.tp_name), "sys_enter")) goto done; diff --git a/tools/testing/selftests/bpf/prog_tests/core_extern.c b/tools/testing/selftests/bpf/prog_tests/core_extern.c index b093787e9448..1931a158510e 100644 --- a/tools/testing/selftests/bpf/prog_tests/core_extern.c +++ b/tools/testing/selftests/bpf/prog_tests/core_extern.c @@ -159,8 +159,8 @@ void test_core_extern(void) exp = (uint64_t *)&t->data; for (j = 0; j < n; j++) { CHECK(got[j] != exp[j], "check_res", - "result #%d: expected %lx, but got %lx\n", - j, exp[j], got[j]); + "result #%d: expected %llx, but got %llx\n", + j, (__u64)exp[j], (__u64)got[j]); } cleanup: test_core_extern__destroy(skel); diff --git a/tools/testing/selftests/bpf/prog_tests/fexit_bpf2bpf.c b/tools/testing/selftests/bpf/prog_tests/fexit_bpf2bpf.c index a895bfed55db..197d0d217b56 100644 --- a/tools/testing/selftests/bpf/prog_tests/fexit_bpf2bpf.c +++ b/tools/testing/selftests/bpf/prog_tests/fexit_bpf2bpf.c @@ -16,7 +16,7 @@ static void test_fexit_bpf2bpf_common(const char *obj_file, __u32 duration = 0, retval; struct bpf_map *data_map; const int zero = 0; - u64 *result = NULL; + __u64 *result = NULL; err = bpf_prog_load(target_obj_file, BPF_PROG_TYPE_UNSPEC, &pkt_obj, &pkt_fd); @@ -29,7 +29,7 @@ static void test_fexit_bpf2bpf_common(const char *obj_file, link = calloc(sizeof(struct bpf_link *), prog_cnt); prog = calloc(sizeof(struct bpf_program *), prog_cnt); - result = malloc((prog_cnt + 32 /* spare */) * sizeof(u64)); + result = malloc((prog_cnt + 32 /* spare */) * sizeof(__u64)); if (CHECK(!link || !prog || !result, "alloc_memory", "failed to alloc memory")) goto close_prog; @@ -72,7 +72,7 @@ static void test_fexit_bpf2bpf_common(const char *obj_file, goto close_prog; for (i = 0; i < prog_cnt; i++) - if (CHECK(result[i] != 1, "result", "fexit_bpf2bpf failed err %ld\n", + if (CHECK(result[i] != 1, "result", "fexit_bpf2bpf failed err %llu\n", result[i])) goto close_prog; diff --git a/tools/testing/selftests/bpf/prog_tests/flow_dissector.c b/tools/testing/selftests/bpf/prog_tests/flow_dissector.c index f11f187990e9..cd6dc80edf18 100644 --- a/tools/testing/selftests/bpf/prog_tests/flow_dissector.c +++ b/tools/testing/selftests/bpf/prog_tests/flow_dissector.c @@ -591,7 +591,7 @@ void test_flow_dissector(void) CHECK_ATTR(tattr.data_size_out != sizeof(flow_keys) || err || tattr.retval != 1, tests[i].name, - "err %d errno %d retval %d duration %d size %u/%lu\n", + "err %d errno %d retval %d duration %d size %u/%zu\n", err, errno, tattr.retval, tattr.duration, tattr.data_size_out, sizeof(flow_keys)); CHECK_FLOW_KEYS(tests[i].name, flow_keys, tests[i].keys); diff --git a/tools/testing/selftests/bpf/prog_tests/global_data.c b/tools/testing/selftests/bpf/prog_tests/global_data.c index e3cb62b0a110..9efa7e50eab2 100644 --- a/tools/testing/selftests/bpf/prog_tests/global_data.c +++ b/tools/testing/selftests/bpf/prog_tests/global_data.c @@ -5,7 +5,7 @@ static void test_global_data_number(struct bpf_object *obj, __u32 duration) { int i, err, map_fd; - uint64_t num; + __u64 num; map_fd = bpf_find_map(__func__, obj, "result_number"); if (CHECK_FAIL(map_fd < 0)) @@ -14,7 +14,7 @@ static void test_global_data_number(struct bpf_object *obj, __u32 duration) struct { char *name; uint32_t key; - uint64_t num; + __u64 num; } tests[] = { { "relocate .bss reference", 0, 0 }, { "relocate .data reference", 1, 42 }, @@ -32,7 +32,7 @@ static void test_global_data_number(struct bpf_object *obj, __u32 duration) for (i = 0; i < sizeof(tests) / sizeof(tests[0]); i++) { err = bpf_map_lookup_elem(map_fd, &tests[i].key, &num); CHECK(err || num != tests[i].num, tests[i].name, - "err %d result %lx expected %lx\n", + "err %d result %llx expected %llx\n", err, num, tests[i].num); } } diff --git a/tools/testing/selftests/bpf/prog_tests/prog_run_xattr.c b/tools/testing/selftests/bpf/prog_tests/prog_run_xattr.c index dde2b7ae7bc9..935a294f049a 100644 --- a/tools/testing/selftests/bpf/prog_tests/prog_run_xattr.c +++ b/tools/testing/selftests/bpf/prog_tests/prog_run_xattr.c @@ -28,7 +28,7 @@ void test_prog_run_xattr(void) "err %d errno %d retval %d\n", err, errno, tattr.retval); CHECK_ATTR(tattr.data_size_out != sizeof(pkt_v4), "data_size_out", - "incorrect output size, want %lu have %u\n", + "incorrect output size, want %zu have %u\n", sizeof(pkt_v4), tattr.data_size_out); CHECK_ATTR(buf[5] != 0, "overflow", diff --git a/tools/testing/selftests/bpf/prog_tests/skb_ctx.c b/tools/testing/selftests/bpf/prog_tests/skb_ctx.c index 25de86af2d03..fafeddaad6a9 100644 --- a/tools/testing/selftests/bpf/prog_tests/skb_ctx.c +++ b/tools/testing/selftests/bpf/prog_tests/skb_ctx.c @@ -81,7 +81,7 @@ void test_skb_ctx(void) CHECK_ATTR(tattr.ctx_size_out != sizeof(skb), "ctx_size_out", - "incorrect output size, want %lu have %u\n", + "incorrect output size, want %zu have %u\n", sizeof(skb), tattr.ctx_size_out); for (i = 0; i < 5; i++) diff --git a/tools/testing/selftests/bpf/test_btf.c b/tools/testing/selftests/bpf/test_btf.c index 305fae8f80a9..c75fc6447186 100644 --- a/tools/testing/selftests/bpf/test_btf.c +++ b/tools/testing/selftests/bpf/test_btf.c @@ -3883,7 +3883,7 @@ static int test_big_btf_info(unsigned int test_num) info_garbage.garbage = 0; err = bpf_obj_get_info_by_fd(btf_fd, info, &info_len); if (CHECK(err || info_len != sizeof(*info), - "err:%d errno:%d info_len:%u sizeof(*info):%lu", + "err:%d errno:%d info_len:%u sizeof(*info):%zu", err, errno, info_len, sizeof(*info))) { err = -1; goto done; @@ -4094,7 +4094,7 @@ static int do_test_get_info(unsigned int test_num) if (CHECK(err || !info.id || info_len != sizeof(info) || info.btf_size != raw_btf_size || (ret = memcmp(raw_btf, user_btf, expected_nbytes)), - "err:%d errno:%d info.id:%u info_len:%u sizeof(info):%lu raw_btf_size:%u info.btf_size:%u expected_nbytes:%u memcmp:%d", + "err:%d errno:%d info.id:%u info_len:%u sizeof(info):%zu raw_btf_size:%u info.btf_size:%u expected_nbytes:%u memcmp:%d", err, errno, info.id, info_len, sizeof(info), raw_btf_size, info.btf_size, expected_nbytes, ret)) { err = -1; @@ -4730,7 +4730,7 @@ ssize_t get_pprint_expected_line(enum pprint_mapv_kind_t mapv_kind, nexpected_line = snprintf(expected_line, line_size, "%s%u: {%u,0,%d,0x%x,0x%x,0x%x," - "{%lu|[%u,%u,%u,%u,%u,%u,%u,%u]},%s," + "{%llu|[%u,%u,%u,%u,%u,%u,%u,%u]},%s," "%u,0x%x,[[%d,%d],[%d,%d]]}\n", percpu_map ? "\tcpu" : "", percpu_map ? cpu : next_key, @@ -4738,7 +4738,7 @@ ssize_t get_pprint_expected_line(enum pprint_mapv_kind_t mapv_kind, v->unused_bits2a, v->bits28, v->unused_bits2b, - v->ui64, + (__u64)v->ui64, v->ui8a[0], v->ui8a[1], v->ui8a[2], v->ui8a[3], v->ui8a[4], v->ui8a[5], diff --git a/tools/testing/selftests/bpf/test_progs.h b/tools/testing/selftests/bpf/test_progs.h index 6e09bf738473..dbb820dde138 100644 --- a/tools/testing/selftests/bpf/test_progs.h +++ b/tools/testing/selftests/bpf/test_progs.h @@ -135,6 +135,11 @@ static inline __u64 ptr_to_u64(const void *ptr) return (__u64) (unsigned long) ptr; } +static inline void *u64_to_ptr(__u64 ptr) +{ + return (void *) (unsigned long) ptr; +} + int bpf_find_map(const char *test, struct bpf_object *obj, const char *name); int compare_map_keys(int map1_fd, int map2_fd); int compare_stack_ips(int smap_fd, int amap_fd, int stack_trace_len); -- cgit v1.2.3 From 15728ad3e71c120278105f20fa65b3735e715e0f Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Thu, 13 Aug 2020 13:49:39 -0700 Subject: libbpf: Fix BTF-defined map-in-map initialization on 32-bit host arches Libbpf built in 32-bit mode should be careful about not conflating 64-bit BPF pointers in BPF ELF file and host architecture pointers. This patch fixes issue of incorrect initializating of map-in-map inner map slots due to such difference. Fixes: 646f02ffdd49 ("libbpf: Add BTF-defined map-in-map support") Signed-off-by: Andrii Nakryiko Signed-off-by: Alexei Starovoitov Link: https://lore.kernel.org/bpf/20200813204945.1020225-4-andriin@fb.com --- tools/lib/bpf/libbpf.c | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-) (limited to 'tools') diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c index 0d48c18d5030..6accddaaedab 100644 --- a/tools/lib/bpf/libbpf.c +++ b/tools/lib/bpf/libbpf.c @@ -5195,7 +5195,8 @@ static int bpf_object__collect_st_ops_relos(struct bpf_object *obj, static int bpf_object__collect_map_relos(struct bpf_object *obj, GElf_Shdr *shdr, Elf_Data *data) { - int i, j, nrels, new_sz, ptr_sz = sizeof(void *); + const int bpf_ptr_sz = 8, host_ptr_sz = sizeof(void *); + int i, j, nrels, new_sz; const struct btf_var_secinfo *vi = NULL; const struct btf_type *sec, *var, *def; const struct btf_member *member; @@ -5244,7 +5245,7 @@ static int bpf_object__collect_map_relos(struct bpf_object *obj, vi = btf_var_secinfos(sec) + map->btf_var_idx; if (vi->offset <= rel.r_offset && - rel.r_offset + sizeof(void *) <= vi->offset + vi->size) + rel.r_offset + bpf_ptr_sz <= vi->offset + vi->size) break; } if (j == obj->nr_maps) { @@ -5280,17 +5281,20 @@ static int bpf_object__collect_map_relos(struct bpf_object *obj, return -EINVAL; moff = rel.r_offset - vi->offset - moff; - if (moff % ptr_sz) + /* here we use BPF pointer size, which is always 64 bit, as we + * are parsing ELF that was built for BPF target + */ + if (moff % bpf_ptr_sz) return -EINVAL; - moff /= ptr_sz; + moff /= bpf_ptr_sz; if (moff >= map->init_slots_sz) { new_sz = moff + 1; - tmp = realloc(map->init_slots, new_sz * ptr_sz); + tmp = realloc(map->init_slots, new_sz * host_ptr_sz); if (!tmp) return -ENOMEM; map->init_slots = tmp; memset(map->init_slots + map->init_slots_sz, 0, - (new_sz - map->init_slots_sz) * ptr_sz); + (new_sz - map->init_slots_sz) * host_ptr_sz); map->init_slots_sz = new_sz; } map->init_slots[moff] = targ_map; -- cgit v1.2.3 From 44ad23dfbccbcd26d6ca504eba1ac55755864969 Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Thu, 13 Aug 2020 13:49:40 -0700 Subject: libbpf: Handle BTF pointer sizes more carefully With libbpf and BTF it is pretty common to have libbpf built for one architecture, while BTF information was generated for a different architecture (typically, but not always, BPF). In such case, the size of a pointer might differ betweem architectures. libbpf previously was always making an assumption that pointer size for BTF is the same as native architecture pointer size, but that breaks for cases where libbpf is built as 32-bit library, while BTF is for 64-bit architecture. To solve this, add heuristic to determine pointer size by searching for `long` or `unsigned long` integer type and using its size as a pointer size. Also, allow to override the pointer size with a new API btf__set_pointer_size(), for cases where application knows which pointer size should be used. User application can check what libbpf "guessed" by looking at the result of btf__pointer_size(). If it's not 0, then libbpf successfully determined a pointer size, otherwise native arch pointer size will be used. For cases where BTF is parsed from ELF file, use ELF's class (32-bit or 64-bit) to determine pointer size. Fixes: 8a138aed4a80 ("bpf: btf: Add BTF support to libbpf") Fixes: 351131b51c7a ("libbpf: add btf_dump API for BTF-to-C conversion") Signed-off-by: Andrii Nakryiko Signed-off-by: Alexei Starovoitov Link: https://lore.kernel.org/bpf/20200813204945.1020225-5-andriin@fb.com --- tools/lib/bpf/btf.c | 83 ++++++++++++++++++++++++++++++++++++++++++++++-- tools/lib/bpf/btf.h | 2 ++ tools/lib/bpf/btf_dump.c | 4 ++- tools/lib/bpf/libbpf.map | 2 ++ 4 files changed, 87 insertions(+), 4 deletions(-) (limited to 'tools') diff --git a/tools/lib/bpf/btf.c b/tools/lib/bpf/btf.c index 4843e44916f7..7dfca7016aaa 100644 --- a/tools/lib/bpf/btf.c +++ b/tools/lib/bpf/btf.c @@ -41,6 +41,7 @@ struct btf { __u32 types_size; __u32 data_size; int fd; + int ptr_sz; }; static inline __u64 ptr_to_u64(const void *ptr) @@ -221,6 +222,70 @@ const struct btf_type *btf__type_by_id(const struct btf *btf, __u32 type_id) return btf->types[type_id]; } +static int determine_ptr_size(const struct btf *btf) +{ + const struct btf_type *t; + const char *name; + int i; + + for (i = 1; i <= btf->nr_types; i++) { + t = btf__type_by_id(btf, i); + if (!btf_is_int(t)) + continue; + + name = btf__name_by_offset(btf, t->name_off); + if (!name) + continue; + + if (strcmp(name, "long int") == 0 || + strcmp(name, "long unsigned int") == 0) { + if (t->size != 4 && t->size != 8) + continue; + return t->size; + } + } + + return -1; +} + +static size_t btf_ptr_sz(const struct btf *btf) +{ + if (!btf->ptr_sz) + ((struct btf *)btf)->ptr_sz = determine_ptr_size(btf); + return btf->ptr_sz < 0 ? sizeof(void *) : btf->ptr_sz; +} + +/* Return pointer size this BTF instance assumes. The size is heuristically + * determined by looking for 'long' or 'unsigned long' integer type and + * recording its size in bytes. If BTF type information doesn't have any such + * type, this function returns 0. In the latter case, native architecture's + * pointer size is assumed, so will be either 4 or 8, depending on + * architecture that libbpf was compiled for. It's possible to override + * guessed value by using btf__set_pointer_size() API. + */ +size_t btf__pointer_size(const struct btf *btf) +{ + if (!btf->ptr_sz) + ((struct btf *)btf)->ptr_sz = determine_ptr_size(btf); + + if (btf->ptr_sz < 0) + /* not enough BTF type info to guess */ + return 0; + + return btf->ptr_sz; +} + +/* Override or set pointer size in bytes. Only values of 4 and 8 are + * supported. + */ +int btf__set_pointer_size(struct btf *btf, size_t ptr_sz) +{ + if (ptr_sz != 4 && ptr_sz != 8) + return -EINVAL; + btf->ptr_sz = ptr_sz; + return 0; +} + static bool btf_type_is_void(const struct btf_type *t) { return t == &btf_void || btf_is_fwd(t); @@ -253,7 +318,7 @@ __s64 btf__resolve_size(const struct btf *btf, __u32 type_id) size = t->size; goto done; case BTF_KIND_PTR: - size = sizeof(void *); + size = btf_ptr_sz(btf); goto done; case BTF_KIND_TYPEDEF: case BTF_KIND_VOLATILE: @@ -293,9 +358,9 @@ int btf__align_of(const struct btf *btf, __u32 id) switch (kind) { case BTF_KIND_INT: case BTF_KIND_ENUM: - return min(sizeof(void *), (size_t)t->size); + return min(btf_ptr_sz(btf), (size_t)t->size); case BTF_KIND_PTR: - return sizeof(void *); + return btf_ptr_sz(btf); case BTF_KIND_TYPEDEF: case BTF_KIND_VOLATILE: case BTF_KIND_CONST: @@ -533,6 +598,18 @@ struct btf *btf__parse_elf(const char *path, struct btf_ext **btf_ext) if (IS_ERR(btf)) goto done; + switch (gelf_getclass(elf)) { + case ELFCLASS32: + btf__set_pointer_size(btf, 4); + break; + case ELFCLASS64: + btf__set_pointer_size(btf, 8); + break; + default: + pr_warn("failed to get ELF class (bitness) for %s\n", path); + break; + } + if (btf_ext && btf_ext_data) { *btf_ext = btf_ext__new(btf_ext_data->d_buf, btf_ext_data->d_size); diff --git a/tools/lib/bpf/btf.h b/tools/lib/bpf/btf.h index f4a1a1d2b9a3..1ca14448df4c 100644 --- a/tools/lib/bpf/btf.h +++ b/tools/lib/bpf/btf.h @@ -76,6 +76,8 @@ LIBBPF_API __s32 btf__find_by_name_kind(const struct btf *btf, LIBBPF_API __u32 btf__get_nr_types(const struct btf *btf); LIBBPF_API const struct btf_type *btf__type_by_id(const struct btf *btf, __u32 id); +LIBBPF_API size_t btf__pointer_size(const struct btf *btf); +LIBBPF_API int btf__set_pointer_size(struct btf *btf, size_t ptr_sz); LIBBPF_API __s64 btf__resolve_size(const struct btf *btf, __u32 type_id); LIBBPF_API int btf__resolve_type(const struct btf *btf, __u32 type_id); LIBBPF_API int btf__align_of(const struct btf *btf, __u32 id); diff --git a/tools/lib/bpf/btf_dump.c b/tools/lib/bpf/btf_dump.c index ac81f3f8957a..fe39bd774697 100644 --- a/tools/lib/bpf/btf_dump.c +++ b/tools/lib/bpf/btf_dump.c @@ -61,6 +61,7 @@ struct btf_dump { const struct btf_ext *btf_ext; btf_dump_printf_fn_t printf_fn; struct btf_dump_opts opts; + int ptr_sz; bool strip_mods; /* per-type auxiliary state */ @@ -139,6 +140,7 @@ struct btf_dump *btf_dump__new(const struct btf *btf, d->btf_ext = btf_ext; d->printf_fn = printf_fn; d->opts.ctx = opts ? opts->ctx : NULL; + d->ptr_sz = btf__pointer_size(btf) ? : sizeof(void *); d->type_names = hashmap__new(str_hash_fn, str_equal_fn, NULL); if (IS_ERR(d->type_names)) { @@ -804,7 +806,7 @@ static void btf_dump_emit_bit_padding(const struct btf_dump *d, int align, int lvl) { int off_diff = m_off - cur_off; - int ptr_bits = sizeof(void *) * 8; + int ptr_bits = d->ptr_sz * 8; if (off_diff <= 0) /* no gap */ diff --git a/tools/lib/bpf/libbpf.map b/tools/lib/bpf/libbpf.map index 0c4722bfdd0a..e35bd6cdbdbf 100644 --- a/tools/lib/bpf/libbpf.map +++ b/tools/lib/bpf/libbpf.map @@ -295,5 +295,7 @@ LIBBPF_0.1.0 { bpf_program__set_sk_lookup; btf__parse; btf__parse_raw; + btf__pointer_size; btf__set_fd; + btf__set_pointer_size; } LIBBPF_0.0.9; -- cgit v1.2.3 From eed7818adf03e874994b966aa33bc00204dd275a Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Thu, 13 Aug 2020 13:49:41 -0700 Subject: selftests/bpf: Fix btf_dump test cases on 32-bit arches Fix btf_dump test cases by hard-coding BPF's pointer size of 8 bytes for cases where it's impossible to deterimne the pointer size (no long type in BTF). In cases where it's known, validate libbpf correctly determines it as 8. Signed-off-by: Andrii Nakryiko Signed-off-by: Alexei Starovoitov Link: https://lore.kernel.org/bpf/20200813204945.1020225-6-andriin@fb.com --- tools/testing/selftests/bpf/prog_tests/btf_dump.c | 27 +++++++++++++++++------ 1 file changed, 20 insertions(+), 7 deletions(-) (limited to 'tools') diff --git a/tools/testing/selftests/bpf/prog_tests/btf_dump.c b/tools/testing/selftests/bpf/prog_tests/btf_dump.c index cb33a7ee4e04..39fb81d9daeb 100644 --- a/tools/testing/selftests/bpf/prog_tests/btf_dump.c +++ b/tools/testing/selftests/bpf/prog_tests/btf_dump.c @@ -12,15 +12,16 @@ void btf_dump_printf(void *ctx, const char *fmt, va_list args) static struct btf_dump_test_case { const char *name; const char *file; + bool known_ptr_sz; struct btf_dump_opts opts; } btf_dump_test_cases[] = { - {"btf_dump: syntax", "btf_dump_test_case_syntax", {}}, - {"btf_dump: ordering", "btf_dump_test_case_ordering", {}}, - {"btf_dump: padding", "btf_dump_test_case_padding", {}}, - {"btf_dump: packing", "btf_dump_test_case_packing", {}}, - {"btf_dump: bitfields", "btf_dump_test_case_bitfields", {}}, - {"btf_dump: multidim", "btf_dump_test_case_multidim", {}}, - {"btf_dump: namespacing", "btf_dump_test_case_namespacing", {}}, + {"btf_dump: syntax", "btf_dump_test_case_syntax", true, {}}, + {"btf_dump: ordering", "btf_dump_test_case_ordering", false, {}}, + {"btf_dump: padding", "btf_dump_test_case_padding", true, {}}, + {"btf_dump: packing", "btf_dump_test_case_packing", true, {}}, + {"btf_dump: bitfields", "btf_dump_test_case_bitfields", true, {}}, + {"btf_dump: multidim", "btf_dump_test_case_multidim", false, {}}, + {"btf_dump: namespacing", "btf_dump_test_case_namespacing", false, {}}, }; static int btf_dump_all_types(const struct btf *btf, @@ -62,6 +63,18 @@ static int test_btf_dump_case(int n, struct btf_dump_test_case *t) goto done; } + /* tests with t->known_ptr_sz have no "long" or "unsigned long" type, + * so it's impossible to determine correct pointer size; but if they + * do, it should be 8 regardless of host architecture, becaues BPF + * target is always 64-bit + */ + if (!t->known_ptr_sz) { + btf__set_pointer_size(btf, 8); + } else { + CHECK(btf__pointer_size(btf) != 8, "ptr_sz", "exp %d, got %zu\n", + 8, btf__pointer_size(btf)); + } + snprintf(out_file, sizeof(out_file), "/tmp/%s.output.XXXXXX", t->file); fd = mkstemp(out_file); if (CHECK(fd < 0, "create_tmp", "failed to create file: %d\n", fd)) { -- cgit v1.2.3 From 4c01925f583eaa7d9d003dc87a4b75b8140b4ff6 Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Thu, 13 Aug 2020 13:49:42 -0700 Subject: libbpf: Enforce 64-bitness of BTF for BPF object files BPF object files are always targeting 64-bit BPF target architecture, so enforce that at BTF level as well. Signed-off-by: Andrii Nakryiko Signed-off-by: Alexei Starovoitov Link: https://lore.kernel.org/bpf/20200813204945.1020225-7-andriin@fb.com --- tools/lib/bpf/libbpf.c | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'tools') diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c index 6accddaaedab..5d20b2da4427 100644 --- a/tools/lib/bpf/libbpf.c +++ b/tools/lib/bpf/libbpf.c @@ -2434,6 +2434,8 @@ static int bpf_object__init_btf(struct bpf_object *obj, BTF_ELF_SEC, err); goto out; } + /* enforce 8-byte pointers for BPF-targeted BTFs */ + btf__set_pointer_size(obj->btf, 8); err = 0; } if (btf_ext_data) { @@ -2542,6 +2544,8 @@ static int bpf_object__sanitize_and_load_btf(struct bpf_object *obj) if (IS_ERR(kern_btf)) return PTR_ERR(kern_btf); + /* enforce 8-byte pointers for BPF-targeted BTFs */ + btf__set_pointer_size(obj->btf, 8); bpf_object__sanitize_btf(obj, kern_btf); } -- cgit v1.2.3 From 5705d705832f74395c5465ce93192688f543006a Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Thu, 13 Aug 2020 13:49:43 -0700 Subject: selftests/bpf: Correct various core_reloc 64-bit assumptions Ensure that types are memory layout- and field alignment-compatible regardless of 32/64-bitness mix of libbpf and BPF architecture. Signed-off-by: Andrii Nakryiko Signed-off-by: Alexei Starovoitov Link: https://lore.kernel.org/bpf/20200813204945.1020225-8-andriin@fb.com --- .../testing/selftests/bpf/prog_tests/core_reloc.c | 20 +++---- .../testing/selftests/bpf/progs/core_reloc_types.h | 69 ++++++++++++---------- 2 files changed, 47 insertions(+), 42 deletions(-) (limited to 'tools') diff --git a/tools/testing/selftests/bpf/prog_tests/core_reloc.c b/tools/testing/selftests/bpf/prog_tests/core_reloc.c index 084ed26a7d78..a54eafc5e4b3 100644 --- a/tools/testing/selftests/bpf/prog_tests/core_reloc.c +++ b/tools/testing/selftests/bpf/prog_tests/core_reloc.c @@ -237,8 +237,8 @@ .union_sz = sizeof(((type *)0)->union_field), \ .arr_sz = sizeof(((type *)0)->arr_field), \ .arr_elem_sz = sizeof(((type *)0)->arr_field[0]), \ - .ptr_sz = sizeof(((type *)0)->ptr_field), \ - .enum_sz = sizeof(((type *)0)->enum_field), \ + .ptr_sz = 8, /* always 8-byte pointer for BPF */ \ + .enum_sz = sizeof(((type *)0)->enum_field), \ } #define SIZE_CASE(name) { \ @@ -432,20 +432,20 @@ static struct core_reloc_test_case test_cases[] = { .sb4 = -1, .sb20 = -0x17654321, .u32 = 0xBEEF, - .s32 = -0x3FEDCBA987654321, + .s32 = -0x3FEDCBA987654321LL, }), BITFIELDS_CASE(bitfields___bitfield_vs_int, { - .ub1 = 0xFEDCBA9876543210, + .ub1 = 0xFEDCBA9876543210LL, .ub2 = 0xA6, - .ub7 = -0x7EDCBA987654321, - .sb4 = -0x6123456789ABCDE, - .sb20 = 0xD00D, + .ub7 = -0x7EDCBA987654321LL, + .sb4 = -0x6123456789ABCDELL, + .sb20 = 0xD00DLL, .u32 = -0x76543, - .s32 = 0x0ADEADBEEFBADB0B, + .s32 = 0x0ADEADBEEFBADB0BLL, }), BITFIELDS_CASE(bitfields___just_big_enough, { - .ub1 = 0xF, - .ub2 = 0x0812345678FEDCBA, + .ub1 = 0xFLL, + .ub2 = 0x0812345678FEDCBALL, }), BITFIELDS_ERR_CASE(bitfields___err_too_big_bitfield), diff --git a/tools/testing/selftests/bpf/progs/core_reloc_types.h b/tools/testing/selftests/bpf/progs/core_reloc_types.h index 34d84717c946..69139ed66216 100644 --- a/tools/testing/selftests/bpf/progs/core_reloc_types.h +++ b/tools/testing/selftests/bpf/progs/core_reloc_types.h @@ -1,5 +1,10 @@ #include #include + +void preserce_ptr_sz_fn(long x) {} + +#define __bpf_aligned __attribute__((aligned(8))) + /* * KERNEL */ @@ -444,51 +449,51 @@ struct core_reloc_primitives { char a; int b; enum core_reloc_primitives_enum c; - void *d; - int (*f)(const char *); + void *d __bpf_aligned; + int (*f)(const char *) __bpf_aligned; }; struct core_reloc_primitives___diff_enum_def { char a; int b; - void *d; - int (*f)(const char *); + void *d __bpf_aligned; + int (*f)(const char *) __bpf_aligned; enum { X = 100, Y = 200, - } c; /* inline enum def with differing set of values */ + } c __bpf_aligned; /* inline enum def with differing set of values */ }; struct core_reloc_primitives___diff_func_proto { - void (*f)(int); /* incompatible function prototype */ - void *d; - enum core_reloc_primitives_enum c; + void (*f)(int) __bpf_aligned; /* incompatible function prototype */ + void *d __bpf_aligned; + enum core_reloc_primitives_enum c __bpf_aligned; int b; char a; }; struct core_reloc_primitives___diff_ptr_type { - const char * const d; /* different pointee type + modifiers */ - char a; + const char * const d __bpf_aligned; /* different pointee type + modifiers */ + char a __bpf_aligned; int b; enum core_reloc_primitives_enum c; - int (*f)(const char *); + int (*f)(const char *) __bpf_aligned; }; struct core_reloc_primitives___err_non_enum { char a[1]; int b; int c; /* int instead of enum */ - void *d; - int (*f)(const char *); + void *d __bpf_aligned; + int (*f)(const char *) __bpf_aligned; }; struct core_reloc_primitives___err_non_int { char a[1]; - int *b; /* ptr instead of int */ - enum core_reloc_primitives_enum c; - void *d; - int (*f)(const char *); + int *b __bpf_aligned; /* ptr instead of int */ + enum core_reloc_primitives_enum c __bpf_aligned; + void *d __bpf_aligned; + int (*f)(const char *) __bpf_aligned; }; struct core_reloc_primitives___err_non_ptr { @@ -496,7 +501,7 @@ struct core_reloc_primitives___err_non_ptr { int b; enum core_reloc_primitives_enum c; int d; /* int instead of ptr */ - int (*f)(const char *); + int (*f)(const char *) __bpf_aligned; }; /* @@ -507,7 +512,7 @@ struct core_reloc_mods_output { }; typedef const int int_t; -typedef const char *char_ptr_t; +typedef const char *char_ptr_t __bpf_aligned; typedef const int arr_t[7]; struct core_reloc_mods_substruct { @@ -523,9 +528,9 @@ typedef struct { struct core_reloc_mods { int a; int_t b; - char *c; + char *c __bpf_aligned; char_ptr_t d; - int e[3]; + int e[3] __bpf_aligned; arr_t f; struct core_reloc_mods_substruct g; core_reloc_mods_substruct_t h; @@ -535,9 +540,9 @@ struct core_reloc_mods { struct core_reloc_mods___mod_swap { int b; int_t a; - char *d; + char *d __bpf_aligned; char_ptr_t c; - int f[3]; + int f[3] __bpf_aligned; arr_t e; struct { int y; @@ -555,7 +560,7 @@ typedef arr1_t arr2_t; typedef arr2_t arr3_t; typedef arr3_t arr4_t; -typedef const char * const volatile fancy_char_ptr_t; +typedef const char * const volatile fancy_char_ptr_t __bpf_aligned; typedef core_reloc_mods_substruct_t core_reloc_mods_substruct_tt; @@ -567,7 +572,7 @@ struct core_reloc_mods___typedefs { arr4_t e; fancy_char_ptr_t d; fancy_char_ptr_t c; - int3_t b; + int3_t b __bpf_aligned; int3_t a; }; @@ -739,19 +744,19 @@ struct core_reloc_bitfields___bit_sz_change { int8_t sb4: 1; /* 4 -> 1 */ int32_t sb20: 30; /* 20 -> 30 */ /* non-bitfields */ - uint16_t u32; /* 32 -> 16 */ - int64_t s32; /* 32 -> 64 */ + uint16_t u32; /* 32 -> 16 */ + int64_t s32 __bpf_aligned; /* 32 -> 64 */ }; /* turn bitfield into non-bitfield and vice versa */ struct core_reloc_bitfields___bitfield_vs_int { uint64_t ub1; /* 3 -> 64 non-bitfield */ uint8_t ub2; /* 20 -> 8 non-bitfield */ - int64_t ub7; /* 7 -> 64 non-bitfield signed */ - int64_t sb4; /* 4 -> 64 non-bitfield signed */ - uint64_t sb20; /* 20 -> 16 non-bitfield unsigned */ - int32_t u32: 20; /* 32 non-bitfield -> 20 bitfield */ - uint64_t s32: 60; /* 32 non-bitfield -> 60 bitfield */ + int64_t ub7 __bpf_aligned; /* 7 -> 64 non-bitfield signed */ + int64_t sb4 __bpf_aligned; /* 4 -> 64 non-bitfield signed */ + uint64_t sb20 __bpf_aligned; /* 20 -> 16 non-bitfield unsigned */ + int32_t u32: 20; /* 32 non-bitfield -> 20 bitfield */ + uint64_t s32: 60 __bpf_aligned; /* 32 non-bitfield -> 60 bitfield */ }; struct core_reloc_bitfields___just_big_enough { -- cgit v1.2.3 From 0f993845d723c87656552837b412994d6086f086 Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Thu, 13 Aug 2020 13:49:44 -0700 Subject: tools/bpftool: Generate data section struct with conservative alignment The comment in the code describes this in good details. Generate such a memory layout that would work both on 32-bit and 64-bit architectures for user-space. Signed-off-by: Andrii Nakryiko Signed-off-by: Alexei Starovoitov Link: https://lore.kernel.org/bpf/20200813204945.1020225-9-andriin@fb.com --- tools/bpf/bpftool/gen.c | 14 ++++++++++++++ 1 file changed, 14 insertions(+) (limited to 'tools') diff --git a/tools/bpf/bpftool/gen.c b/tools/bpf/bpftool/gen.c index db80e836816e..f61184653633 100644 --- a/tools/bpf/bpftool/gen.c +++ b/tools/bpf/bpftool/gen.c @@ -143,6 +143,20 @@ static int codegen_datasec_def(struct bpf_object *obj, var_name, align); return -EINVAL; } + /* Assume 32-bit architectures when generating data section + * struct memory layout. Given bpftool can't know which target + * host architecture it's emitting skeleton for, we need to be + * conservative and assume 32-bit one to ensure enough padding + * bytes are generated for pointer and long types. This will + * still work correctly for 64-bit architectures, because in + * the worst case we'll generate unnecessary padding field, + * which on 64-bit architectures is not strictly necessary and + * would be handled by natural 8-byte alignment. But it still + * will be a correct memory layout, based on recorded offsets + * in BTF. + */ + if (align > 4) + align = 4; align_off = (off + align - 1) / align * align; if (align_off != need_off) { -- cgit v1.2.3 From 4fccd2ff74fbad222c69c7604307e0773a37ab8d Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Thu, 13 Aug 2020 13:49:45 -0700 Subject: selftests/bpf: Make test_varlen work with 32-bit user-space arch Despite bpftool generating data section memory layout that will work for 32-bit architectures on user-space side, BPF programs should be careful to not use ambiguous types like `long`, which have different size in 32-bit and 64-bit environments. Fix that in test by using __u64 explicitly, which is a recommended approach anyway. Signed-off-by: Andrii Nakryiko Signed-off-by: Alexei Starovoitov Link: https://lore.kernel.org/bpf/20200813204945.1020225-10-andriin@fb.com --- tools/testing/selftests/bpf/prog_tests/varlen.c | 8 ++++---- tools/testing/selftests/bpf/progs/test_varlen.c | 6 +++--- 2 files changed, 7 insertions(+), 7 deletions(-) (limited to 'tools') diff --git a/tools/testing/selftests/bpf/prog_tests/varlen.c b/tools/testing/selftests/bpf/prog_tests/varlen.c index c75525eab02c..dd324b4933db 100644 --- a/tools/testing/selftests/bpf/prog_tests/varlen.c +++ b/tools/testing/selftests/bpf/prog_tests/varlen.c @@ -44,25 +44,25 @@ void test_varlen(void) CHECK_VAL(bss->payload1_len2, size2); CHECK_VAL(bss->total1, size1 + size2); CHECK(memcmp(bss->payload1, exp_str, size1 + size2), "content_check", - "doesn't match!"); + "doesn't match!\n"); CHECK_VAL(data->payload2_len1, size1); CHECK_VAL(data->payload2_len2, size2); CHECK_VAL(data->total2, size1 + size2); CHECK(memcmp(data->payload2, exp_str, size1 + size2), "content_check", - "doesn't match!"); + "doesn't match!\n"); CHECK_VAL(data->payload3_len1, size1); CHECK_VAL(data->payload3_len2, size2); CHECK_VAL(data->total3, size1 + size2); CHECK(memcmp(data->payload3, exp_str, size1 + size2), "content_check", - "doesn't match!"); + "doesn't match!\n"); CHECK_VAL(data->payload4_len1, size1); CHECK_VAL(data->payload4_len2, size2); CHECK_VAL(data->total4, size1 + size2); CHECK(memcmp(data->payload4, exp_str, size1 + size2), "content_check", - "doesn't match!"); + "doesn't match!\n"); cleanup: test_varlen__destroy(skel); } diff --git a/tools/testing/selftests/bpf/progs/test_varlen.c b/tools/testing/selftests/bpf/progs/test_varlen.c index cd4b72c55dfe..913acdffd90f 100644 --- a/tools/testing/selftests/bpf/progs/test_varlen.c +++ b/tools/testing/selftests/bpf/progs/test_varlen.c @@ -15,9 +15,9 @@ int test_pid = 0; bool capture = false; /* .bss */ -long payload1_len1 = 0; -long payload1_len2 = 0; -long total1 = 0; +__u64 payload1_len1 = 0; +__u64 payload1_len2 = 0; +__u64 total1 = 0; char payload1[MAX_LEN + MAX_LEN] = {}; /* .data */ -- cgit v1.2.3