From 313e7f6fb1d9a8814424c2c6878848648c9c090f Mon Sep 17 00:00:00 2001 From: Ilya Leoshkevich Date: Mon, 28 Oct 2019 11:20:49 +0100 Subject: selftest/bpf: Use -m{little, big}-endian for clang When cross-compiling tests from x86 to s390, the resulting BPF objects fail to load due to endianness mismatch. Fix by using BPF-GCC endianness check for clang as well. Signed-off-by: Ilya Leoshkevich Signed-off-by: Daniel Borkmann Acked-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/20191028102049.7489-1-iii@linux.ibm.com --- tools/testing/selftests/bpf/Makefile | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) (limited to 'tools') diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile index 933f39381039..3209c208f3b3 100644 --- a/tools/testing/selftests/bpf/Makefile +++ b/tools/testing/selftests/bpf/Makefile @@ -131,8 +131,13 @@ $(shell $(1) -v -E - &1 \ | sed -n '/<...> search starts here:/,/End of search list./{ s| \(/.*\)|-idirafter \1|p }') endef +# Determine target endianness. +IS_LITTLE_ENDIAN = $(shell $(CC) -dM -E - Date: Mon, 28 Oct 2019 11:21:10 +0100 Subject: selftests/bpf: Restore $(OUTPUT)/test_stub.o rule `make O=/linux-build kselftest TARGETS=bpf` fails with make[3]: *** No rule to make target '/linux-build/bpf/test_stub.o', needed by '/linux-build/bpf/test_verifier' The same command without the O= part works, presumably thanks to the implicit rule. Fix by restoring the explicit $(OUTPUT)/test_stub.o rule. Fixes: 74b5a5968fe8 ("selftests/bpf: Replace test_progs and test_maps w/ general rule") Signed-off-by: Ilya Leoshkevich Signed-off-by: Daniel Borkmann Acked-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/20191028102110.7545-1-iii@linux.ibm.com --- tools/testing/selftests/bpf/Makefile | 3 +++ 1 file changed, 3 insertions(+) (limited to 'tools') diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile index 3209c208f3b3..b334a6db15c1 100644 --- a/tools/testing/selftests/bpf/Makefile +++ b/tools/testing/selftests/bpf/Makefile @@ -89,6 +89,9 @@ $(notdir $(TEST_GEN_PROGS) \ $(OUTPUT)/urandom_read: urandom_read.c $(CC) -o $@ $< -Wl,--build-id +$(OUTPUT)/test_stub.o: test_stub.c + $(CC) -c $(CFLAGS) -o $@ $< + BPFOBJ := $(OUTPUT)/libbpf.a $(TEST_GEN_PROGS) $(TEST_GEN_PROGS_EXTENDED): $(OUTPUT)/test_stub.o $(BPFOBJ) -- cgit v1.2.3 From 94ff9ebb49a546b7f009ed840bafa235c96d4c4b Mon Sep 17 00:00:00 2001 From: Magnus Karlsson Date: Fri, 25 Oct 2019 11:17:15 +0200 Subject: libbpf: Fix compatibility for kernels without need_wakeup When the need_wakeup flag was added to AF_XDP, the format of the XDP_MMAP_OFFSETS getsockopt was extended. Code was added to the kernel to take care of compatibility issues arrising from running applications using any of the two formats. However, libbpf was not extended to take care of the case when the application/libbpf uses the new format but the kernel only supports the old format. This patch adds support in libbpf for parsing the old format, before the need_wakeup flag was added, and emulating a set of static need_wakeup flags that will always work for the application. v2 -> v3: * Incorporated code improvements suggested by Jonathan Lemon v1 -> v2: * Rebased to bpf-next * Rewrote the code as the previous version made you blind Fixes: a4500432c2587cb2a ("libbpf: add support for need_wakeup flag in AF_XDP part") Reported-by: Eloy Degen Signed-off-by: Magnus Karlsson Signed-off-by: Alexei Starovoitov Acked-by: Jonathan Lemon Link: https://lore.kernel.org/bpf/1571995035-21889-1-git-send-email-magnus.karlsson@intel.com --- tools/lib/bpf/xsk.c | 83 +++++++++++++++++++++++++++++++++++++++++++++-------- 1 file changed, 71 insertions(+), 12 deletions(-) (limited to 'tools') diff --git a/tools/lib/bpf/xsk.c b/tools/lib/bpf/xsk.c index 0b6287d1b15e..d54111133123 100644 --- a/tools/lib/bpf/xsk.c +++ b/tools/lib/bpf/xsk.c @@ -73,6 +73,21 @@ struct xsk_nl_info { int fd; }; +/* Up until and including Linux 5.3 */ +struct xdp_ring_offset_v1 { + __u64 producer; + __u64 consumer; + __u64 desc; +}; + +/* Up until and including Linux 5.3 */ +struct xdp_mmap_offsets_v1 { + struct xdp_ring_offset_v1 rx; + struct xdp_ring_offset_v1 tx; + struct xdp_ring_offset_v1 fr; + struct xdp_ring_offset_v1 cr; +}; + int xsk_umem__fd(const struct xsk_umem *umem) { return umem ? umem->fd : -EINVAL; @@ -133,6 +148,58 @@ static int xsk_set_xdp_socket_config(struct xsk_socket_config *cfg, return 0; } +static void xsk_mmap_offsets_v1(struct xdp_mmap_offsets *off) +{ + struct xdp_mmap_offsets_v1 off_v1; + + /* getsockopt on a kernel <= 5.3 has no flags fields. + * Copy over the offsets to the correct places in the >=5.4 format + * and put the flags where they would have been on that kernel. + */ + memcpy(&off_v1, off, sizeof(off_v1)); + + off->rx.producer = off_v1.rx.producer; + off->rx.consumer = off_v1.rx.consumer; + off->rx.desc = off_v1.rx.desc; + off->rx.flags = off_v1.rx.consumer + sizeof(u32); + + off->tx.producer = off_v1.tx.producer; + off->tx.consumer = off_v1.tx.consumer; + off->tx.desc = off_v1.tx.desc; + off->tx.flags = off_v1.tx.consumer + sizeof(u32); + + off->fr.producer = off_v1.fr.producer; + off->fr.consumer = off_v1.fr.consumer; + off->fr.desc = off_v1.fr.desc; + off->fr.flags = off_v1.fr.consumer + sizeof(u32); + + off->cr.producer = off_v1.cr.producer; + off->cr.consumer = off_v1.cr.consumer; + off->cr.desc = off_v1.cr.desc; + off->cr.flags = off_v1.cr.consumer + sizeof(u32); +} + +static int xsk_get_mmap_offsets(int fd, struct xdp_mmap_offsets *off) +{ + socklen_t optlen; + int err; + + optlen = sizeof(*off); + err = getsockopt(fd, SOL_XDP, XDP_MMAP_OFFSETS, off, &optlen); + if (err) + return err; + + if (optlen == sizeof(*off)) + return 0; + + if (optlen == sizeof(struct xdp_mmap_offsets_v1)) { + xsk_mmap_offsets_v1(off); + return 0; + } + + return -EINVAL; +} + int xsk_umem__create_v0_0_4(struct xsk_umem **umem_ptr, void *umem_area, __u64 size, struct xsk_ring_prod *fill, struct xsk_ring_cons *comp, @@ -141,7 +208,6 @@ int xsk_umem__create_v0_0_4(struct xsk_umem **umem_ptr, void *umem_area, struct xdp_mmap_offsets off; struct xdp_umem_reg mr; struct xsk_umem *umem; - socklen_t optlen; void *map; int err; @@ -190,8 +256,7 @@ int xsk_umem__create_v0_0_4(struct xsk_umem **umem_ptr, void *umem_area, goto out_socket; } - optlen = sizeof(off); - err = getsockopt(umem->fd, SOL_XDP, XDP_MMAP_OFFSETS, &off, &optlen); + err = xsk_get_mmap_offsets(umem->fd, &off); if (err) { err = -errno; goto out_socket; @@ -514,7 +579,6 @@ int xsk_socket__create(struct xsk_socket **xsk_ptr, const char *ifname, struct sockaddr_xdp sxdp = {}; struct xdp_mmap_offsets off; struct xsk_socket *xsk; - socklen_t optlen; int err; if (!umem || !xsk_ptr || !rx || !tx) @@ -573,8 +637,7 @@ int xsk_socket__create(struct xsk_socket **xsk_ptr, const char *ifname, } } - optlen = sizeof(off); - err = getsockopt(xsk->fd, SOL_XDP, XDP_MMAP_OFFSETS, &off, &optlen); + err = xsk_get_mmap_offsets(xsk->fd, &off); if (err) { err = -errno; goto out_socket; @@ -660,7 +723,6 @@ out_xsk_alloc: int xsk_umem__delete(struct xsk_umem *umem) { struct xdp_mmap_offsets off; - socklen_t optlen; int err; if (!umem) @@ -669,8 +731,7 @@ int xsk_umem__delete(struct xsk_umem *umem) if (umem->refcount) return -EBUSY; - optlen = sizeof(off); - err = getsockopt(umem->fd, SOL_XDP, XDP_MMAP_OFFSETS, &off, &optlen); + err = xsk_get_mmap_offsets(umem->fd, &off); if (!err) { munmap(umem->fill->ring - off.fr.desc, off.fr.desc + umem->config.fill_size * sizeof(__u64)); @@ -688,7 +749,6 @@ void xsk_socket__delete(struct xsk_socket *xsk) { size_t desc_sz = sizeof(struct xdp_desc); struct xdp_mmap_offsets off; - socklen_t optlen; int err; if (!xsk) @@ -699,8 +759,7 @@ void xsk_socket__delete(struct xsk_socket *xsk) close(xsk->prog_fd); } - optlen = sizeof(off); - err = getsockopt(xsk->fd, SOL_XDP, XDP_MMAP_OFFSETS, &off, &optlen); + err = xsk_get_mmap_offsets(xsk->fd, &off); if (!err) { if (xsk->rx) { munmap(xsk->rx->ring - off.rx.desc, -- cgit v1.2.3 From d3a3aa0c59e83d8c13a758128b10dd459ce0d866 Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Mon, 28 Oct 2019 16:37:27 -0700 Subject: libbpf: Fix off-by-one error in ELF sanity check libbpf's bpf_object__elf_collect() does simple sanity check after iterating over all ELF sections, if checks that .strtab index is correct. Unfortunately, due to section indices being 1-based, the check breaks for cases when .strtab ends up being the very last section in ELF. Fixes: 77ba9a5b48a7 ("tools lib bpf: Fetch map names from correct strtab") Signed-off-by: Andrii Nakryiko Signed-off-by: Alexei Starovoitov Link: https://lore.kernel.org/bpf/20191028233727.1286699-1-andriin@fb.com --- tools/lib/bpf/libbpf.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'tools') diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c index d71631a01926..5d15cc4dfcd6 100644 --- a/tools/lib/bpf/libbpf.c +++ b/tools/lib/bpf/libbpf.c @@ -1664,7 +1664,7 @@ static int bpf_object__elf_collect(struct bpf_object *obj, bool relaxed_maps) } } - if (!obj->efile.strtabidx || obj->efile.strtabidx >= idx) { + if (!obj->efile.strtabidx || obj->efile.strtabidx > idx) { pr_warn("Corrupted ELF file: index of strtab invalid\n"); return -LIBBPF_ERRNO__FORMAT; } -- cgit v1.2.3 From a566e35f1e8b4b3be1e96a804d1cca38b578167c Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Mon, 28 Oct 2019 22:59:53 -0700 Subject: libbpf: Don't use kernel-side u32 type in xsk.c MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit u32 is a kernel-side typedef. User-space library is supposed to use __u32. This breaks Github's projection of libbpf. Do u32 -> __u32 fix. Fixes: 94ff9ebb49a5 ("libbpf: Fix compatibility for kernels without need_wakeup") Signed-off-by: Andrii Nakryiko Signed-off-by: Alexei Starovoitov Acked-by: Björn Töpel Cc: Magnus Karlsson Link: https://lore.kernel.org/bpf/20191029055953.2461336-1-andriin@fb.com --- tools/lib/bpf/xsk.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'tools') diff --git a/tools/lib/bpf/xsk.c b/tools/lib/bpf/xsk.c index d54111133123..74d84f36a5b2 100644 --- a/tools/lib/bpf/xsk.c +++ b/tools/lib/bpf/xsk.c @@ -161,22 +161,22 @@ static void xsk_mmap_offsets_v1(struct xdp_mmap_offsets *off) off->rx.producer = off_v1.rx.producer; off->rx.consumer = off_v1.rx.consumer; off->rx.desc = off_v1.rx.desc; - off->rx.flags = off_v1.rx.consumer + sizeof(u32); + off->rx.flags = off_v1.rx.consumer + sizeof(__u32); off->tx.producer = off_v1.tx.producer; off->tx.consumer = off_v1.tx.consumer; off->tx.desc = off_v1.tx.desc; - off->tx.flags = off_v1.tx.consumer + sizeof(u32); + off->tx.flags = off_v1.tx.consumer + sizeof(__u32); off->fr.producer = off_v1.fr.producer; off->fr.consumer = off_v1.fr.consumer; off->fr.desc = off_v1.fr.desc; - off->fr.flags = off_v1.fr.consumer + sizeof(u32); + off->fr.flags = off_v1.fr.consumer + sizeof(__u32); off->cr.producer = off_v1.cr.producer; off->cr.consumer = off_v1.cr.consumer; off->cr.desc = off_v1.cr.desc; - off->cr.flags = off_v1.cr.consumer + sizeof(u32); + off->cr.flags = off_v1.cr.consumer + sizeof(__u32); } static int xsk_get_mmap_offsets(int fd, struct xdp_mmap_offsets *off) -- cgit v1.2.3 From 9ffccb76062ab882e45bbfb9d370e366c27fa04b Mon Sep 17 00:00:00 2001 From: Ilya Leoshkevich Date: Tue, 29 Oct 2019 15:30:27 +0100 Subject: selftests/bpf: Test narrow load from bpf_sysctl.write There are tests for full and narrows loads from bpf_sysctl.file_pos, but for bpf_sysctl.write only full load is tested. Add the missing test. Suggested-by: Andrey Ignatov Signed-off-by: Ilya Leoshkevich Signed-off-by: Daniel Borkmann Acked-by: Andrey Ignatov Link: https://lore.kernel.org/bpf/20191029143027.28681-1-iii@linux.ibm.com --- tools/testing/selftests/bpf/test_sysctl.c | 23 +++++++++++++++++++++++ 1 file changed, 23 insertions(+) (limited to 'tools') diff --git a/tools/testing/selftests/bpf/test_sysctl.c b/tools/testing/selftests/bpf/test_sysctl.c index a320e3844b17..7aff907003d3 100644 --- a/tools/testing/selftests/bpf/test_sysctl.c +++ b/tools/testing/selftests/bpf/test_sysctl.c @@ -120,6 +120,29 @@ static struct sysctl_test tests[] = { .newval = "(none)", /* same as default, should fail anyway */ .result = OP_EPERM, }, + { + .descr = "ctx:write sysctl:write read ok narrow", + .insns = { + /* u64 w = (u16)write & 1; */ +#if __BYTE_ORDER == __LITTLE_ENDIAN + BPF_LDX_MEM(BPF_H, BPF_REG_7, BPF_REG_1, + offsetof(struct bpf_sysctl, write)), +#else + BPF_LDX_MEM(BPF_H, BPF_REG_7, BPF_REG_1, + offsetof(struct bpf_sysctl, write) + 2), +#endif + BPF_ALU64_IMM(BPF_AND, BPF_REG_7, 1), + /* return 1 - w; */ + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_7), + BPF_EXIT_INSN(), + }, + .attach_type = BPF_CGROUP_SYSCTL, + .sysctl = "kernel/domainname", + .open_flags = O_WRONLY, + .newval = "(none)", /* same as default, should fail anyway */ + .result = OP_EPERM, + }, { .descr = "ctx:write sysctl:read write reject", .insns = { -- cgit v1.2.3 From 12a8654b2e5aab37b22c9608d008f9f0565862c0 Mon Sep 17 00:00:00 2001 From: Alexei Starovoitov Date: Wed, 30 Oct 2019 15:32:12 -0700 Subject: libbpf: Add support for prog_tracing Cleanup libbpf from expected_attach_type == attach_btf_id hack and introduce BPF_PROG_TYPE_TRACING. Signed-off-by: Alexei Starovoitov Signed-off-by: Daniel Borkmann Acked-by: Andrii Nakryiko Acked-by: Martin KaFai Lau Link: https://lore.kernel.org/bpf/20191030223212.953010-3-ast@kernel.org --- tools/include/uapi/linux/bpf.h | 2 ++ tools/lib/bpf/bpf.c | 8 ++--- tools/lib/bpf/bpf.h | 5 ++- tools/lib/bpf/libbpf.c | 79 ++++++++++++++++++++++++++++++------------ tools/lib/bpf/libbpf.h | 2 ++ tools/lib/bpf/libbpf.map | 2 ++ tools/lib/bpf/libbpf_probes.c | 1 + 7 files changed, 71 insertions(+), 28 deletions(-) (limited to 'tools') diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h index 4af8b0819a32..a6bf19dabaab 100644 --- a/tools/include/uapi/linux/bpf.h +++ b/tools/include/uapi/linux/bpf.h @@ -173,6 +173,7 @@ enum bpf_prog_type { BPF_PROG_TYPE_CGROUP_SYSCTL, BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE, BPF_PROG_TYPE_CGROUP_SOCKOPT, + BPF_PROG_TYPE_TRACING, }; enum bpf_attach_type { @@ -199,6 +200,7 @@ enum bpf_attach_type { BPF_CGROUP_UDP6_RECVMSG, BPF_CGROUP_GETSOCKOPT, BPF_CGROUP_SETSOCKOPT, + BPF_TRACE_RAW_TP, __MAX_BPF_ATTACH_TYPE }; diff --git a/tools/lib/bpf/bpf.c b/tools/lib/bpf/bpf.c index 79046067720f..ca0d635b1d5e 100644 --- a/tools/lib/bpf/bpf.c +++ b/tools/lib/bpf/bpf.c @@ -228,9 +228,10 @@ int bpf_load_program_xattr(const struct bpf_load_program_attr *load_attr, memset(&attr, 0, sizeof(attr)); attr.prog_type = load_attr->prog_type; attr.expected_attach_type = load_attr->expected_attach_type; - if (attr.prog_type == BPF_PROG_TYPE_RAW_TRACEPOINT) - /* expected_attach_type is ignored for tracing progs */ - attr.attach_btf_id = attr.expected_attach_type; + if (attr.prog_type == BPF_PROG_TYPE_TRACING) + attr.attach_btf_id = load_attr->attach_btf_id; + else + attr.prog_ifindex = load_attr->prog_ifindex; attr.insn_cnt = (__u32)load_attr->insns_cnt; attr.insns = ptr_to_u64(load_attr->insns); attr.license = ptr_to_u64(load_attr->license); @@ -245,7 +246,6 @@ int bpf_load_program_xattr(const struct bpf_load_program_attr *load_attr, } attr.kern_version = load_attr->kern_version; - attr.prog_ifindex = load_attr->prog_ifindex; attr.prog_btf_fd = load_attr->prog_btf_fd; attr.func_info_rec_size = load_attr->func_info_rec_size; attr.func_info_cnt = load_attr->func_info_cnt; diff --git a/tools/lib/bpf/bpf.h b/tools/lib/bpf/bpf.h index 0db01334740f..1c53bc5b4b3c 100644 --- a/tools/lib/bpf/bpf.h +++ b/tools/lib/bpf/bpf.h @@ -78,7 +78,10 @@ struct bpf_load_program_attr { size_t insns_cnt; const char *license; __u32 kern_version; - __u32 prog_ifindex; + union { + __u32 prog_ifindex; + __u32 attach_btf_id; + }; __u32 prog_btf_fd; __u32 func_info_rec_size; const void *func_info; diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c index 5d15cc4dfcd6..c80f316f1320 100644 --- a/tools/lib/bpf/libbpf.c +++ b/tools/lib/bpf/libbpf.c @@ -188,6 +188,7 @@ struct bpf_program { bpf_program_clear_priv_t clear_priv; enum bpf_attach_type expected_attach_type; + __u32 attach_btf_id; void *func_info; __u32 func_info_rec_size; __u32 func_info_cnt; @@ -3446,6 +3447,7 @@ load_program(struct bpf_program *prog, struct bpf_insn *insns, int insns_cnt, load_attr.line_info_cnt = prog->line_info_cnt; load_attr.log_level = prog->log_level; load_attr.prog_flags = prog->prog_flags; + load_attr.attach_btf_id = prog->attach_btf_id; retry_load: log_buf = malloc(log_buf_size); @@ -3607,6 +3609,8 @@ bpf_object__load_progs(struct bpf_object *obj, int log_level) return 0; } +static int libbpf_attach_btf_id_by_name(const char *name, __u32 *btf_id); + static struct bpf_object * __bpf_object__open(const char *path, const void *obj_buf, size_t obj_buf_sz, struct bpf_object_open_opts *opts) @@ -3656,6 +3660,7 @@ __bpf_object__open(const char *path, const void *obj_buf, size_t obj_buf_sz, bpf_object__for_each_program(prog, obj) { enum bpf_prog_type prog_type; enum bpf_attach_type attach_type; + __u32 btf_id; err = libbpf_prog_type_by_name(prog->section_name, &prog_type, &attach_type); @@ -3667,6 +3672,12 @@ __bpf_object__open(const char *path, const void *obj_buf, size_t obj_buf_sz, bpf_program__set_type(prog, prog_type); bpf_program__set_expected_attach_type(prog, attach_type); + if (prog_type == BPF_PROG_TYPE_TRACING) { + err = libbpf_attach_btf_id_by_name(prog->section_name, &btf_id); + if (err) + goto out; + prog->attach_btf_id = btf_id; + } } return obj; @@ -4518,6 +4529,7 @@ BPF_PROG_TYPE_FNS(tracepoint, BPF_PROG_TYPE_TRACEPOINT); BPF_PROG_TYPE_FNS(raw_tracepoint, BPF_PROG_TYPE_RAW_TRACEPOINT); BPF_PROG_TYPE_FNS(xdp, BPF_PROG_TYPE_XDP); BPF_PROG_TYPE_FNS(perf_event, BPF_PROG_TYPE_PERF_EVENT); +BPF_PROG_TYPE_FNS(tracing, BPF_PROG_TYPE_TRACING); enum bpf_attach_type bpf_program__get_expected_attach_type(struct bpf_program *prog) @@ -4546,7 +4558,8 @@ void bpf_program__set_expected_attach_type(struct bpf_program *prog, BPF_PROG_SEC_IMPL(string, ptype, eatype, 1, 0, eatype) /* Programs that use BTF to identify attach point */ -#define BPF_PROG_BTF(string, ptype) BPF_PROG_SEC_IMPL(string, ptype, 0, 0, 1, 0) +#define BPF_PROG_BTF(string, ptype, eatype) \ + BPF_PROG_SEC_IMPL(string, ptype, eatype, 0, 1, 0) /* Programs that can be attached but attach type can't be identified by section * name. Kept for backward compatibility. @@ -4573,7 +4586,8 @@ static const struct { BPF_PROG_SEC("tp/", BPF_PROG_TYPE_TRACEPOINT), BPF_PROG_SEC("raw_tracepoint/", BPF_PROG_TYPE_RAW_TRACEPOINT), BPF_PROG_SEC("raw_tp/", BPF_PROG_TYPE_RAW_TRACEPOINT), - BPF_PROG_BTF("tp_btf/", BPF_PROG_TYPE_RAW_TRACEPOINT), + BPF_PROG_BTF("tp_btf/", BPF_PROG_TYPE_TRACING, + BPF_TRACE_RAW_TP), BPF_PROG_SEC("xdp", BPF_PROG_TYPE_XDP), BPF_PROG_SEC("perf_event", BPF_PROG_TYPE_PERF_EVENT), BPF_PROG_SEC("lwt_in", BPF_PROG_TYPE_LWT_IN), @@ -4678,27 +4692,6 @@ int libbpf_prog_type_by_name(const char *name, enum bpf_prog_type *prog_type, continue; *prog_type = section_names[i].prog_type; *expected_attach_type = section_names[i].expected_attach_type; - if (section_names[i].is_attach_btf) { - struct btf *btf = bpf_core_find_kernel_btf(); - char raw_tp_btf_name[128] = "btf_trace_"; - char *dst = raw_tp_btf_name + sizeof("btf_trace_") - 1; - int ret; - - if (IS_ERR(btf)) { - pr_warn("vmlinux BTF is not found\n"); - return -EINVAL; - } - /* prepend "btf_trace_" prefix per kernel convention */ - strncat(dst, name + section_names[i].len, - sizeof(raw_tp_btf_name) - sizeof("btf_trace_")); - ret = btf__find_by_name(btf, raw_tp_btf_name); - btf__free(btf); - if (ret <= 0) { - pr_warn("%s is not found in vmlinux BTF\n", dst); - return -EINVAL; - } - *expected_attach_type = ret; - } return 0; } pr_warn("failed to guess program type based on ELF section name '%s'\n", name); @@ -4711,6 +4704,46 @@ int libbpf_prog_type_by_name(const char *name, enum bpf_prog_type *prog_type, return -ESRCH; } +#define BTF_PREFIX "btf_trace_" +static int libbpf_attach_btf_id_by_name(const char *name, __u32 *btf_id) +{ + struct btf *btf = bpf_core_find_kernel_btf(); + char raw_tp_btf_name[128] = BTF_PREFIX; + char *dst = raw_tp_btf_name + sizeof(BTF_PREFIX) - 1; + int ret, i, err = -EINVAL; + + if (IS_ERR(btf)) { + pr_warn("vmlinux BTF is not found\n"); + return -EINVAL; + } + + if (!name) + goto out; + + for (i = 0; i < ARRAY_SIZE(section_names); i++) { + if (!section_names[i].is_attach_btf) + continue; + if (strncmp(name, section_names[i].sec, section_names[i].len)) + continue; + /* prepend "btf_trace_" prefix per kernel convention */ + strncat(dst, name + section_names[i].len, + sizeof(raw_tp_btf_name) - sizeof(BTF_PREFIX)); + ret = btf__find_by_name(btf, raw_tp_btf_name); + if (ret <= 0) { + pr_warn("%s is not found in vmlinux BTF\n", dst); + goto out; + } + *btf_id = ret; + err = 0; + goto out; + } + pr_warn("failed to identify btf_id based on ELF section name '%s'\n", name); + err = -ESRCH; +out: + btf__free(btf); + return err; +} + int libbpf_attach_type_by_name(const char *name, enum bpf_attach_type *attach_type) { diff --git a/tools/lib/bpf/libbpf.h b/tools/lib/bpf/libbpf.h index c63e2ff84abc..2b126ee5e173 100644 --- a/tools/lib/bpf/libbpf.h +++ b/tools/lib/bpf/libbpf.h @@ -307,6 +307,7 @@ LIBBPF_API int bpf_program__set_sched_cls(struct bpf_program *prog); LIBBPF_API int bpf_program__set_sched_act(struct bpf_program *prog); LIBBPF_API int bpf_program__set_xdp(struct bpf_program *prog); LIBBPF_API int bpf_program__set_perf_event(struct bpf_program *prog); +LIBBPF_API int bpf_program__set_tracing(struct bpf_program *prog); LIBBPF_API enum bpf_prog_type bpf_program__get_type(struct bpf_program *prog); LIBBPF_API void bpf_program__set_type(struct bpf_program *prog, @@ -326,6 +327,7 @@ LIBBPF_API bool bpf_program__is_sched_cls(const struct bpf_program *prog); LIBBPF_API bool bpf_program__is_sched_act(const struct bpf_program *prog); LIBBPF_API bool bpf_program__is_xdp(const struct bpf_program *prog); LIBBPF_API bool bpf_program__is_perf_event(const struct bpf_program *prog); +LIBBPF_API bool bpf_program__is_tracing(const struct bpf_program *prog); /* * No need for __attribute__((packed)), all members of 'bpf_map_def' diff --git a/tools/lib/bpf/libbpf.map b/tools/lib/bpf/libbpf.map index d1473ea4d7a5..69dded5af00b 100644 --- a/tools/lib/bpf/libbpf.map +++ b/tools/lib/bpf/libbpf.map @@ -197,4 +197,6 @@ LIBBPF_0.0.6 { bpf_object__open_mem; bpf_program__get_expected_attach_type; bpf_program__get_type; + bpf_program__is_tracing; + bpf_program__set_tracing; } LIBBPF_0.0.5; diff --git a/tools/lib/bpf/libbpf_probes.c b/tools/lib/bpf/libbpf_probes.c index 4b0b0364f5fc..a9eb8b322671 100644 --- a/tools/lib/bpf/libbpf_probes.c +++ b/tools/lib/bpf/libbpf_probes.c @@ -102,6 +102,7 @@ probe_load(enum bpf_prog_type prog_type, const struct bpf_insn *insns, case BPF_PROG_TYPE_FLOW_DISSECTOR: case BPF_PROG_TYPE_CGROUP_SYSCTL: case BPF_PROG_TYPE_CGROUP_SOCKOPT: + case BPF_PROG_TYPE_TRACING: default: break; } -- cgit v1.2.3 From 75b0bfd2e1a7d0c698b617f5e5ceaf056bdcaef7 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Thu, 31 Oct 2019 17:51:27 -0700 Subject: Revert "selftests: bpf: Don't try to read files without read permission" This reverts commit 5bc60de50dfe ("selftests: bpf: Don't try to read files without read permission"). Quoted commit does not work at all, and was never tested. Script requires root permissions (and tests for them) and os.access() will always return true for root. The correct fix is needed in the bpf tree, so let's just revert and save ourselves the merge conflict. Signed-off-by: Jakub Kicinski Signed-off-by: Daniel Borkmann Acked-by: Alexei Starovoitov Cc: Jiri Pirko Link: https://lore.kernel.org/bpf/20191101005127.1355-1-jakub.kicinski@netronome.com --- tools/testing/selftests/bpf/test_offload.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'tools') diff --git a/tools/testing/selftests/bpf/test_offload.py b/tools/testing/selftests/bpf/test_offload.py index c44c650bde3a..15a666329a34 100755 --- a/tools/testing/selftests/bpf/test_offload.py +++ b/tools/testing/selftests/bpf/test_offload.py @@ -312,7 +312,7 @@ class DebugfsDir: if f == "ports": continue p = os.path.join(path, f) - if os.path.isfile(p) and os.access(p, os.R_OK): + if os.path.isfile(p): _, out = cmd('cat %s/%s' % (path, f)) dfs[f] = out.strip() elif os.path.isdir(p): -- cgit v1.2.3 From d1b4574a4b86565325ef2e545eda8dfc9aa07c60 Mon Sep 17 00:00:00 2001 From: Toke Høiland-Jørgensen Date: Sat, 2 Nov 2019 12:09:37 +0100 Subject: libbpf: Fix error handling in bpf_map__reuse_fd() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit bpf_map__reuse_fd() was calling close() in the error path before returning an error value based on errno. However, close can change errno, so that can lead to potentially misleading error messages. Instead, explicitly store errno in the err variable before each goto. Signed-off-by: Toke Høiland-Jørgensen Signed-off-by: Alexei Starovoitov Acked-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/157269297769.394725.12634985106772698611.stgit@toke.dk --- tools/lib/bpf/libbpf.c | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) (limited to 'tools') diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c index c80f316f1320..36152c8729ea 100644 --- a/tools/lib/bpf/libbpf.c +++ b/tools/lib/bpf/libbpf.c @@ -1917,16 +1917,22 @@ int bpf_map__reuse_fd(struct bpf_map *map, int fd) return -errno; new_fd = open("/", O_RDONLY | O_CLOEXEC); - if (new_fd < 0) + if (new_fd < 0) { + err = -errno; goto err_free_new_name; + } new_fd = dup3(fd, new_fd, O_CLOEXEC); - if (new_fd < 0) + if (new_fd < 0) { + err = -errno; goto err_close_new_fd; + } err = zclose(map->fd); - if (err) + if (err) { + err = -errno; goto err_close_new_fd; + } free(map->name); map->fd = new_fd; @@ -1945,7 +1951,7 @@ err_close_new_fd: close(new_fd); err_free_new_name: free(new_name); - return -errno; + return err; } int bpf_map__resize(struct bpf_map *map, __u32 max_entries) -- cgit v1.2.3 From 4580b25fcee5347327aaffcec31c615ec28a889a Mon Sep 17 00:00:00 2001 From: Toke Høiland-Jørgensen Date: Sat, 2 Nov 2019 12:09:38 +0100 Subject: libbpf: Store map pin path and status in struct bpf_map MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Support storing and setting a pin path in struct bpf_map, which can be used for automatic pinning. Also store the pin status so we can avoid attempts to re-pin a map that has already been pinned (or reused from a previous pinning). The behaviour of bpf_object__{un,}pin_maps() is changed so that if it is called with a NULL path argument (which was previously illegal), it will (un)pin only those maps that have a pin_path set. Signed-off-by: Toke Høiland-Jørgensen Signed-off-by: Alexei Starovoitov Acked-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/157269297876.394725.14782206533681896279.stgit@toke.dk --- tools/lib/bpf/libbpf.c | 164 +++++++++++++++++++++++++++++++++++------------ tools/lib/bpf/libbpf.h | 8 +++ tools/lib/bpf/libbpf.map | 3 + 3 files changed, 134 insertions(+), 41 deletions(-) (limited to 'tools') diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c index 36152c8729ea..22f6dd18de6f 100644 --- a/tools/lib/bpf/libbpf.c +++ b/tools/lib/bpf/libbpf.c @@ -227,6 +227,8 @@ struct bpf_map { void *priv; bpf_map_clear_priv_t clear_priv; enum libbpf_map_type libbpf_type; + char *pin_path; + bool pinned; }; struct bpf_secdata { @@ -4036,47 +4038,119 @@ int bpf_map__pin(struct bpf_map *map, const char *path) char *cp, errmsg[STRERR_BUFSIZE]; int err; - err = check_path(path); - if (err) - return err; - if (map == NULL) { pr_warn("invalid map pointer\n"); return -EINVAL; } - if (bpf_obj_pin(map->fd, path)) { - cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg)); - pr_warn("failed to pin map: %s\n", cp); - return -errno; + if (map->pin_path) { + if (path && strcmp(path, map->pin_path)) { + pr_warn("map '%s' already has pin path '%s' different from '%s'\n", + bpf_map__name(map), map->pin_path, path); + return -EINVAL; + } else if (map->pinned) { + pr_debug("map '%s' already pinned at '%s'; not re-pinning\n", + bpf_map__name(map), map->pin_path); + return 0; + } + } else { + if (!path) { + pr_warn("missing a path to pin map '%s' at\n", + bpf_map__name(map)); + return -EINVAL; + } else if (map->pinned) { + pr_warn("map '%s' already pinned\n", bpf_map__name(map)); + return -EEXIST; + } + + map->pin_path = strdup(path); + if (!map->pin_path) { + err = -errno; + goto out_err; + } } - pr_debug("pinned map '%s'\n", path); + err = check_path(map->pin_path); + if (err) + return err; + + if (bpf_obj_pin(map->fd, map->pin_path)) { + err = -errno; + goto out_err; + } + + map->pinned = true; + pr_debug("pinned map '%s'\n", map->pin_path); return 0; + +out_err: + cp = libbpf_strerror_r(-err, errmsg, sizeof(errmsg)); + pr_warn("failed to pin map: %s\n", cp); + return err; } int bpf_map__unpin(struct bpf_map *map, const char *path) { int err; - err = check_path(path); - if (err) - return err; - if (map == NULL) { pr_warn("invalid map pointer\n"); return -EINVAL; } + if (map->pin_path) { + if (path && strcmp(path, map->pin_path)) { + pr_warn("map '%s' already has pin path '%s' different from '%s'\n", + bpf_map__name(map), map->pin_path, path); + return -EINVAL; + } + path = map->pin_path; + } else if (!path) { + pr_warn("no path to unpin map '%s' from\n", + bpf_map__name(map)); + return -EINVAL; + } + + err = check_path(path); + if (err) + return err; + err = unlink(path); if (err != 0) return -errno; - pr_debug("unpinned map '%s'\n", path); + + map->pinned = false; + pr_debug("unpinned map '%s' from '%s'\n", bpf_map__name(map), path); return 0; } +int bpf_map__set_pin_path(struct bpf_map *map, const char *path) +{ + char *new = NULL; + + if (path) { + new = strdup(path); + if (!new) + return -errno; + } + + free(map->pin_path); + map->pin_path = new; + return 0; +} + +const char *bpf_map__get_pin_path(const struct bpf_map *map) +{ + return map->pin_path; +} + +bool bpf_map__is_pinned(const struct bpf_map *map) +{ + return map->pinned; +} + int bpf_object__pin_maps(struct bpf_object *obj, const char *path) { struct bpf_map *map; @@ -4095,20 +4169,27 @@ int bpf_object__pin_maps(struct bpf_object *obj, const char *path) return err; bpf_object__for_each_map(map, obj) { + char *pin_path = NULL; char buf[PATH_MAX]; - int len; - len = snprintf(buf, PATH_MAX, "%s/%s", path, - bpf_map__name(map)); - if (len < 0) { - err = -EINVAL; - goto err_unpin_maps; - } else if (len >= PATH_MAX) { - err = -ENAMETOOLONG; - goto err_unpin_maps; + if (path) { + int len; + + len = snprintf(buf, PATH_MAX, "%s/%s", path, + bpf_map__name(map)); + if (len < 0) { + err = -EINVAL; + goto err_unpin_maps; + } else if (len >= PATH_MAX) { + err = -ENAMETOOLONG; + goto err_unpin_maps; + } + pin_path = buf; + } else if (!map->pin_path) { + continue; } - err = bpf_map__pin(map, buf); + err = bpf_map__pin(map, pin_path); if (err) goto err_unpin_maps; } @@ -4117,17 +4198,10 @@ int bpf_object__pin_maps(struct bpf_object *obj, const char *path) err_unpin_maps: while ((map = bpf_map__prev(map, obj))) { - char buf[PATH_MAX]; - int len; - - len = snprintf(buf, PATH_MAX, "%s/%s", path, - bpf_map__name(map)); - if (len < 0) - continue; - else if (len >= PATH_MAX) + if (!map->pin_path) continue; - bpf_map__unpin(map, buf); + bpf_map__unpin(map, NULL); } return err; @@ -4142,17 +4216,24 @@ int bpf_object__unpin_maps(struct bpf_object *obj, const char *path) return -ENOENT; bpf_object__for_each_map(map, obj) { + char *pin_path = NULL; char buf[PATH_MAX]; - int len; - len = snprintf(buf, PATH_MAX, "%s/%s", path, - bpf_map__name(map)); - if (len < 0) - return -EINVAL; - else if (len >= PATH_MAX) - return -ENAMETOOLONG; + if (path) { + int len; + + len = snprintf(buf, PATH_MAX, "%s/%s", path, + bpf_map__name(map)); + if (len < 0) + return -EINVAL; + else if (len >= PATH_MAX) + return -ENAMETOOLONG; + pin_path = buf; + } else if (!map->pin_path) { + continue; + } - err = bpf_map__unpin(map, buf); + err = bpf_map__unpin(map, pin_path); if (err) return err; } @@ -4277,6 +4358,7 @@ void bpf_object__close(struct bpf_object *obj) for (i = 0; i < obj->nr_maps; i++) { zfree(&obj->maps[i].name); + zfree(&obj->maps[i].pin_path); if (obj->maps[i].clear_priv) obj->maps[i].clear_priv(&obj->maps[i], obj->maps[i].priv); diff --git a/tools/lib/bpf/libbpf.h b/tools/lib/bpf/libbpf.h index 2b126ee5e173..e71773a6bfdf 100644 --- a/tools/lib/bpf/libbpf.h +++ b/tools/lib/bpf/libbpf.h @@ -124,6 +124,11 @@ int bpf_object__section_size(const struct bpf_object *obj, const char *name, __u32 *size); int bpf_object__variable_offset(const struct bpf_object *obj, const char *name, __u32 *off); + +/* pin_maps and unpin_maps can both be called with a NULL path, in which case + * they will use the pin_path attribute of each map (and ignore all maps that + * don't have a pin_path set). + */ LIBBPF_API int bpf_object__pin_maps(struct bpf_object *obj, const char *path); LIBBPF_API int bpf_object__unpin_maps(struct bpf_object *obj, const char *path); @@ -387,6 +392,9 @@ LIBBPF_API int bpf_map__resize(struct bpf_map *map, __u32 max_entries); LIBBPF_API bool bpf_map__is_offload_neutral(const struct bpf_map *map); LIBBPF_API bool bpf_map__is_internal(const struct bpf_map *map); LIBBPF_API void bpf_map__set_ifindex(struct bpf_map *map, __u32 ifindex); +LIBBPF_API int bpf_map__set_pin_path(struct bpf_map *map, const char *path); +LIBBPF_API const char *bpf_map__get_pin_path(const struct bpf_map *map); +LIBBPF_API bool bpf_map__is_pinned(const struct bpf_map *map); LIBBPF_API int bpf_map__pin(struct bpf_map *map, const char *path); LIBBPF_API int bpf_map__unpin(struct bpf_map *map, const char *path); diff --git a/tools/lib/bpf/libbpf.map b/tools/lib/bpf/libbpf.map index 69dded5af00b..86173cbb159d 100644 --- a/tools/lib/bpf/libbpf.map +++ b/tools/lib/bpf/libbpf.map @@ -193,6 +193,9 @@ LIBBPF_0.0.5 { LIBBPF_0.0.6 { global: + bpf_map__get_pin_path; + bpf_map__is_pinned; + bpf_map__set_pin_path; bpf_object__open_file; bpf_object__open_mem; bpf_program__get_expected_attach_type; -- cgit v1.2.3 From 196f8487f51ee6e2a46f51e10ac3f4ca67574ba9 Mon Sep 17 00:00:00 2001 From: Toke Høiland-Jørgensen Date: Sat, 2 Nov 2019 12:09:39 +0100 Subject: libbpf: Move directory creation into _pin() functions MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The existing pin_*() functions all try to create the parent directory before pinning. Move this check into the per-object _pin() functions instead. This ensures consistent behaviour when auto-pinning is added (which doesn't go through the top-level pin_maps() function), at the cost of a few more calls to mkdir(). Signed-off-by: Toke Høiland-Jørgensen Signed-off-by: Alexei Starovoitov Acked-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/157269297985.394725.5882630952992598610.stgit@toke.dk --- tools/lib/bpf/libbpf.c | 61 ++++++++++++++++++++++++++++---------------------- 1 file changed, 34 insertions(+), 27 deletions(-) (limited to 'tools') diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c index 22f6dd18de6f..fa15f3b315ba 100644 --- a/tools/lib/bpf/libbpf.c +++ b/tools/lib/bpf/libbpf.c @@ -3816,6 +3816,28 @@ int bpf_object__load(struct bpf_object *obj) return bpf_object__load_xattr(&attr); } +static int make_parent_dir(const char *path) +{ + char *cp, errmsg[STRERR_BUFSIZE]; + char *dname, *dir; + int err = 0; + + dname = strdup(path); + if (dname == NULL) + return -ENOMEM; + + dir = dirname(dname); + if (mkdir(dir, 0700) && errno != EEXIST) + err = -errno; + + free(dname); + if (err) { + cp = libbpf_strerror_r(-err, errmsg, sizeof(errmsg)); + pr_warn("failed to mkdir %s: %s\n", path, cp); + } + return err; +} + static int check_path(const char *path) { char *cp, errmsg[STRERR_BUFSIZE]; @@ -3852,6 +3874,10 @@ int bpf_program__pin_instance(struct bpf_program *prog, const char *path, char *cp, errmsg[STRERR_BUFSIZE]; int err; + err = make_parent_dir(path); + if (err) + return err; + err = check_path(path); if (err) return err; @@ -3905,25 +3931,14 @@ int bpf_program__unpin_instance(struct bpf_program *prog, const char *path, return 0; } -static int make_dir(const char *path) -{ - char *cp, errmsg[STRERR_BUFSIZE]; - int err = 0; - - if (mkdir(path, 0700) && errno != EEXIST) - err = -errno; - - if (err) { - cp = libbpf_strerror_r(-err, errmsg, sizeof(errmsg)); - pr_warn("failed to mkdir %s: %s\n", path, cp); - } - return err; -} - int bpf_program__pin(struct bpf_program *prog, const char *path) { int i, err; + err = make_parent_dir(path); + if (err) + return err; + err = check_path(path); if (err) return err; @@ -3944,10 +3959,6 @@ int bpf_program__pin(struct bpf_program *prog, const char *path) return bpf_program__pin_instance(prog, path, 0); } - err = make_dir(path); - if (err) - return err; - for (i = 0; i < prog->instances.nr; i++) { char buf[PATH_MAX]; int len; @@ -4070,6 +4081,10 @@ int bpf_map__pin(struct bpf_map *map, const char *path) } } + err = make_parent_dir(map->pin_path); + if (err) + return err; + err = check_path(map->pin_path); if (err) return err; @@ -4164,10 +4179,6 @@ int bpf_object__pin_maps(struct bpf_object *obj, const char *path) return -ENOENT; } - err = make_dir(path); - if (err) - return err; - bpf_object__for_each_map(map, obj) { char *pin_path = NULL; char buf[PATH_MAX]; @@ -4254,10 +4265,6 @@ int bpf_object__pin_programs(struct bpf_object *obj, const char *path) return -ENOENT; } - err = make_dir(path); - if (err) - return err; - bpf_object__for_each_program(prog, obj) { char buf[PATH_MAX]; int len; -- cgit v1.2.3 From 57a00f41644f20b11c12a27061d814655f633544 Mon Sep 17 00:00:00 2001 From: Toke Høiland-Jørgensen Date: Sat, 2 Nov 2019 12:09:41 +0100 Subject: libbpf: Add auto-pinning of maps when loading BPF objects MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This adds support to libbpf for setting map pinning information as part of the BTF map declaration, to get automatic map pinning (and reuse) on load. The pinning type currently only supports a single PIN_BY_NAME mode, where each map will be pinned by its name in a path that can be overridden, but defaults to /sys/fs/bpf. Since auto-pinning only does something if any maps actually have a 'pinning' BTF attribute set, we default the new option to enabled, on the assumption that seamless pinning is what most callers want. When a map has a pin_path set at load time, libbpf will compare the map pinned at that location (if any), and if the attributes match, will re-use that map instead of creating a new one. If no existing map is found, the newly created map will instead be pinned at the location. Programs wanting to customise the pinning can override the pinning paths using bpf_map__set_pin_path() before calling bpf_object__load() (including setting it to NULL to disable pinning of a particular map). Signed-off-by: Toke Høiland-Jørgensen Signed-off-by: Alexei Starovoitov Acked-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/157269298092.394725.3966306029218559681.stgit@toke.dk --- tools/lib/bpf/bpf_helpers.h | 6 ++ tools/lib/bpf/libbpf.c | 146 +++++++++++++++++++++++++++++++++++++++++--- tools/lib/bpf/libbpf.h | 13 +++- 3 files changed, 156 insertions(+), 9 deletions(-) (limited to 'tools') diff --git a/tools/lib/bpf/bpf_helpers.h b/tools/lib/bpf/bpf_helpers.h index 2203595f38c3..0c7d28292898 100644 --- a/tools/lib/bpf/bpf_helpers.h +++ b/tools/lib/bpf/bpf_helpers.h @@ -38,4 +38,10 @@ struct bpf_map_def { unsigned int map_flags; }; +enum libbpf_pin_type { + LIBBPF_PIN_NONE, + /* PIN_BY_NAME: pin maps by name (in /sys/fs/bpf by default) */ + LIBBPF_PIN_BY_NAME, +}; + #endif diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c index fa15f3b315ba..7aa2a2a22cef 100644 --- a/tools/lib/bpf/libbpf.c +++ b/tools/lib/bpf/libbpf.c @@ -1093,10 +1093,32 @@ static bool get_map_field_int(const char *map_name, const struct btf *btf, return true; } +static int build_map_pin_path(struct bpf_map *map, const char *path) +{ + char buf[PATH_MAX]; + int err, len; + + if (!path) + path = "/sys/fs/bpf"; + + len = snprintf(buf, PATH_MAX, "%s/%s", path, bpf_map__name(map)); + if (len < 0) + return -EINVAL; + else if (len >= PATH_MAX) + return -ENAMETOOLONG; + + err = bpf_map__set_pin_path(map, buf); + if (err) + return err; + + return 0; +} + static int bpf_object__init_user_btf_map(struct bpf_object *obj, const struct btf_type *sec, int var_idx, int sec_idx, - const Elf_Data *data, bool strict) + const Elf_Data *data, bool strict, + const char *pin_root_path) { const struct btf_type *var, *def, *t; const struct btf_var_secinfo *vi; @@ -1271,6 +1293,30 @@ static int bpf_object__init_user_btf_map(struct bpf_object *obj, } map->def.value_size = sz; map->btf_value_type_id = t->type; + } else if (strcmp(name, "pinning") == 0) { + __u32 val; + int err; + + if (!get_map_field_int(map_name, obj->btf, def, m, + &val)) + return -EINVAL; + pr_debug("map '%s': found pinning = %u.\n", + map_name, val); + + if (val != LIBBPF_PIN_NONE && + val != LIBBPF_PIN_BY_NAME) { + pr_warn("map '%s': invalid pinning value %u.\n", + map_name, val); + return -EINVAL; + } + if (val == LIBBPF_PIN_BY_NAME) { + err = build_map_pin_path(map, pin_root_path); + if (err) { + pr_warn("map '%s': couldn't build pin path.\n", + map_name); + return err; + } + } } else { if (strict) { pr_warn("map '%s': unknown field '%s'.\n", @@ -1290,7 +1336,8 @@ static int bpf_object__init_user_btf_map(struct bpf_object *obj, return 0; } -static int bpf_object__init_user_btf_maps(struct bpf_object *obj, bool strict) +static int bpf_object__init_user_btf_maps(struct bpf_object *obj, bool strict, + const char *pin_root_path) { const struct btf_type *sec = NULL; int nr_types, i, vlen, err; @@ -1332,7 +1379,7 @@ static int bpf_object__init_user_btf_maps(struct bpf_object *obj, bool strict) for (i = 0; i < vlen; i++) { err = bpf_object__init_user_btf_map(obj, sec, i, obj->efile.btf_maps_shndx, - data, strict); + data, strict, pin_root_path); if (err) return err; } @@ -1340,7 +1387,8 @@ static int bpf_object__init_user_btf_maps(struct bpf_object *obj, bool strict) return 0; } -static int bpf_object__init_maps(struct bpf_object *obj, bool relaxed_maps) +static int bpf_object__init_maps(struct bpf_object *obj, bool relaxed_maps, + const char *pin_root_path) { bool strict = !relaxed_maps; int err; @@ -1349,7 +1397,7 @@ static int bpf_object__init_maps(struct bpf_object *obj, bool relaxed_maps) if (err) return err; - err = bpf_object__init_user_btf_maps(obj, strict); + err = bpf_object__init_user_btf_maps(obj, strict, pin_root_path); if (err) return err; @@ -1538,7 +1586,8 @@ static int bpf_object__sanitize_and_load_btf(struct bpf_object *obj) return 0; } -static int bpf_object__elf_collect(struct bpf_object *obj, bool relaxed_maps) +static int bpf_object__elf_collect(struct bpf_object *obj, bool relaxed_maps, + const char *pin_root_path) { Elf *elf = obj->efile.elf; GElf_Ehdr *ep = &obj->efile.ehdr; @@ -1673,7 +1722,7 @@ static int bpf_object__elf_collect(struct bpf_object *obj, bool relaxed_maps) } err = bpf_object__init_btf(obj, btf_data, btf_ext_data); if (!err) - err = bpf_object__init_maps(obj, relaxed_maps); + err = bpf_object__init_maps(obj, relaxed_maps, pin_root_path); if (!err) err = bpf_object__sanitize_and_load_btf(obj); if (!err) @@ -2129,6 +2178,66 @@ bpf_object__probe_caps(struct bpf_object *obj) return 0; } +static bool map_is_reuse_compat(const struct bpf_map *map, int map_fd) +{ + struct bpf_map_info map_info = {}; + char msg[STRERR_BUFSIZE]; + __u32 map_info_len; + + map_info_len = sizeof(map_info); + + if (bpf_obj_get_info_by_fd(map_fd, &map_info, &map_info_len)) { + pr_warn("failed to get map info for map FD %d: %s\n", + map_fd, libbpf_strerror_r(errno, msg, sizeof(msg))); + return false; + } + + return (map_info.type == map->def.type && + map_info.key_size == map->def.key_size && + map_info.value_size == map->def.value_size && + map_info.max_entries == map->def.max_entries && + map_info.map_flags == map->def.map_flags); +} + +static int +bpf_object__reuse_map(struct bpf_map *map) +{ + char *cp, errmsg[STRERR_BUFSIZE]; + int err, pin_fd; + + pin_fd = bpf_obj_get(map->pin_path); + if (pin_fd < 0) { + err = -errno; + if (err == -ENOENT) { + pr_debug("found no pinned map to reuse at '%s'\n", + map->pin_path); + return 0; + } + + cp = libbpf_strerror_r(-err, errmsg, sizeof(errmsg)); + pr_warn("couldn't retrieve pinned map '%s': %s\n", + map->pin_path, cp); + return err; + } + + if (!map_is_reuse_compat(map, pin_fd)) { + pr_warn("couldn't reuse pinned map at '%s': parameter mismatch\n", + map->pin_path); + close(pin_fd); + return -EINVAL; + } + + err = bpf_map__reuse_fd(map, pin_fd); + if (err) { + close(pin_fd); + return err; + } + map->pinned = true; + pr_debug("reused pinned map at '%s'\n", map->pin_path); + + return 0; +} + static int bpf_object__populate_internal_map(struct bpf_object *obj, struct bpf_map *map) { @@ -2171,6 +2280,15 @@ bpf_object__create_maps(struct bpf_object *obj) char *cp, errmsg[STRERR_BUFSIZE]; int *pfd = &map->fd; + if (map->pin_path) { + err = bpf_object__reuse_map(map); + if (err) { + pr_warn("error reusing pinned map %s\n", + map->name); + return err; + } + } + if (map->fd >= 0) { pr_debug("skip map create (preset) %s: fd=%d\n", map->name, map->fd); @@ -2249,6 +2367,15 @@ err_out: } } + if (map->pin_path && !map->pinned) { + err = bpf_map__pin(map, NULL); + if (err) { + pr_warn("failed to auto-pin map name '%s' at '%s'\n", + map->name, map->pin_path); + return err; + } + } + pr_debug("created map %s: fd=%d\n", map->name, *pfd); } @@ -3623,6 +3750,7 @@ static struct bpf_object * __bpf_object__open(const char *path, const void *obj_buf, size_t obj_buf_sz, struct bpf_object_open_opts *opts) { + const char *pin_root_path; struct bpf_program *prog; struct bpf_object *obj; const char *obj_name; @@ -3657,11 +3785,13 @@ __bpf_object__open(const char *path, const void *obj_buf, size_t obj_buf_sz, obj->relaxed_core_relocs = OPTS_GET(opts, relaxed_core_relocs, false); relaxed_maps = OPTS_GET(opts, relaxed_maps, false); + pin_root_path = OPTS_GET(opts, pin_root_path, NULL); CHECK_ERR(bpf_object__elf_init(obj), err, out); CHECK_ERR(bpf_object__check_endianness(obj), err, out); CHECK_ERR(bpf_object__probe_caps(obj), err, out); - CHECK_ERR(bpf_object__elf_collect(obj, relaxed_maps), err, out); + CHECK_ERR(bpf_object__elf_collect(obj, relaxed_maps, pin_root_path), + err, out); CHECK_ERR(bpf_object__collect_reloc(obj), err, out); bpf_object__elf_finish(obj); diff --git a/tools/lib/bpf/libbpf.h b/tools/lib/bpf/libbpf.h index e71773a6bfdf..6ddc0419337b 100644 --- a/tools/lib/bpf/libbpf.h +++ b/tools/lib/bpf/libbpf.h @@ -103,8 +103,13 @@ struct bpf_object_open_opts { bool relaxed_maps; /* process CO-RE relocations non-strictly, allowing them to fail */ bool relaxed_core_relocs; + /* maps that set the 'pinning' attribute in their definition will have + * their pin_path attribute set to a file in this directory, and be + * auto-pinned to that path on load; defaults to "/sys/fs/bpf". + */ + const char *pin_root_path; }; -#define bpf_object_open_opts__last_field relaxed_core_relocs +#define bpf_object_open_opts__last_field pin_root_path LIBBPF_API struct bpf_object *bpf_object__open(const char *path); LIBBPF_API struct bpf_object * @@ -125,6 +130,12 @@ int bpf_object__section_size(const struct bpf_object *obj, const char *name, int bpf_object__variable_offset(const struct bpf_object *obj, const char *name, __u32 *off); +enum libbpf_pin_type { + LIBBPF_PIN_NONE, + /* PIN_BY_NAME: pin maps by name (in /sys/fs/bpf by default) */ + LIBBPF_PIN_BY_NAME, +}; + /* pin_maps and unpin_maps can both be called with a NULL path, in which case * they will use the pin_path attribute of each map (and ignore all maps that * don't have a pin_path set). -- cgit v1.2.3 From 2f4a32cc83a584ac3669d44fa2aa1d7f115d44c1 Mon Sep 17 00:00:00 2001 From: Toke Høiland-Jørgensen Date: Sat, 2 Nov 2019 12:09:42 +0100 Subject: selftests: Add tests for automatic map pinning MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This adds a new BPF selftest to exercise the new automatic map pinning code. Signed-off-by: Toke Høiland-Jørgensen Signed-off-by: Alexei Starovoitov Acked-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/157269298209.394725.15420085139296213182.stgit@toke.dk --- tools/testing/selftests/bpf/prog_tests/pinning.c | 210 +++++++++++++++++++++ tools/testing/selftests/bpf/progs/test_pinning.c | 31 +++ .../selftests/bpf/progs/test_pinning_invalid.c | 16 ++ 3 files changed, 257 insertions(+) create mode 100644 tools/testing/selftests/bpf/prog_tests/pinning.c create mode 100644 tools/testing/selftests/bpf/progs/test_pinning.c create mode 100644 tools/testing/selftests/bpf/progs/test_pinning_invalid.c (limited to 'tools') diff --git a/tools/testing/selftests/bpf/prog_tests/pinning.c b/tools/testing/selftests/bpf/prog_tests/pinning.c new file mode 100644 index 000000000000..525388971e08 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/pinning.c @@ -0,0 +1,210 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include +#include +#include +#include + +__u32 get_map_id(struct bpf_object *obj, const char *name) +{ + struct bpf_map_info map_info = {}; + __u32 map_info_len, duration = 0; + struct bpf_map *map; + int err; + + map_info_len = sizeof(map_info); + + map = bpf_object__find_map_by_name(obj, name); + if (CHECK(!map, "find map", "NULL map")) + return 0; + + err = bpf_obj_get_info_by_fd(bpf_map__fd(map), + &map_info, &map_info_len); + CHECK(err, "get map info", "err %d errno %d", err, errno); + return map_info.id; +} + +void test_pinning(void) +{ + const char *file_invalid = "./test_pinning_invalid.o"; + const char *custpinpath = "/sys/fs/bpf/custom/pinmap"; + const char *nopinpath = "/sys/fs/bpf/nopinmap"; + const char *nopinpath2 = "/sys/fs/bpf/nopinmap2"; + const char *custpath = "/sys/fs/bpf/custom"; + const char *pinpath = "/sys/fs/bpf/pinmap"; + const char *file = "./test_pinning.o"; + __u32 map_id, map_id2, duration = 0; + struct stat statbuf = {}; + struct bpf_object *obj; + struct bpf_map *map; + int err; + DECLARE_LIBBPF_OPTS(bpf_object_open_opts, opts, + .pin_root_path = custpath, + ); + + /* check that opening fails with invalid pinning value in map def */ + obj = bpf_object__open_file(file_invalid, NULL); + err = libbpf_get_error(obj); + if (CHECK(err != -EINVAL, "invalid open", "err %d errno %d\n", err, errno)) { + obj = NULL; + goto out; + } + + /* open the valid object file */ + obj = bpf_object__open_file(file, NULL); + err = libbpf_get_error(obj); + if (CHECK(err, "default open", "err %d errno %d\n", err, errno)) { + obj = NULL; + goto out; + } + + err = bpf_object__load(obj); + if (CHECK(err, "default load", "err %d errno %d\n", err, errno)) + goto out; + + /* check that pinmap was pinned */ + err = stat(pinpath, &statbuf); + if (CHECK(err, "stat pinpath", "err %d errno %d\n", err, errno)) + goto out; + + /* check that nopinmap was *not* pinned */ + err = stat(nopinpath, &statbuf); + if (CHECK(!err || errno != ENOENT, "stat nopinpath", + "err %d errno %d\n", err, errno)) + goto out; + + /* check that nopinmap2 was *not* pinned */ + err = stat(nopinpath2, &statbuf); + if (CHECK(!err || errno != ENOENT, "stat nopinpath2", + "err %d errno %d\n", err, errno)) + goto out; + + map_id = get_map_id(obj, "pinmap"); + if (!map_id) + goto out; + + bpf_object__close(obj); + + obj = bpf_object__open_file(file, NULL); + if (CHECK_FAIL(libbpf_get_error(obj))) { + obj = NULL; + goto out; + } + + err = bpf_object__load(obj); + if (CHECK(err, "default load", "err %d errno %d\n", err, errno)) + goto out; + + /* check that same map ID was reused for second load */ + map_id2 = get_map_id(obj, "pinmap"); + if (CHECK(map_id != map_id2, "check reuse", + "err %d errno %d id %d id2 %d\n", err, errno, map_id, map_id2)) + goto out; + + /* should be no-op to re-pin same map */ + map = bpf_object__find_map_by_name(obj, "pinmap"); + if (CHECK(!map, "find map", "NULL map")) + goto out; + + err = bpf_map__pin(map, NULL); + if (CHECK(err, "re-pin map", "err %d errno %d\n", err, errno)) + goto out; + + /* but error to pin at different location */ + err = bpf_map__pin(map, "/sys/fs/bpf/other"); + if (CHECK(!err, "pin map different", "err %d errno %d\n", err, errno)) + goto out; + + /* unpin maps with a pin_path set */ + err = bpf_object__unpin_maps(obj, NULL); + if (CHECK(err, "unpin maps", "err %d errno %d\n", err, errno)) + goto out; + + /* and re-pin them... */ + err = bpf_object__pin_maps(obj, NULL); + if (CHECK(err, "pin maps", "err %d errno %d\n", err, errno)) + goto out; + + /* set pinning path of other map and re-pin all */ + map = bpf_object__find_map_by_name(obj, "nopinmap"); + if (CHECK(!map, "find map", "NULL map")) + goto out; + + err = bpf_map__set_pin_path(map, custpinpath); + if (CHECK(err, "set pin path", "err %d errno %d\n", err, errno)) + goto out; + + /* should only pin the one unpinned map */ + err = bpf_object__pin_maps(obj, NULL); + if (CHECK(err, "pin maps", "err %d errno %d\n", err, errno)) + goto out; + + /* check that nopinmap was pinned at the custom path */ + err = stat(custpinpath, &statbuf); + if (CHECK(err, "stat custpinpath", "err %d errno %d\n", err, errno)) + goto out; + + /* remove the custom pin path to re-test it with auto-pinning below */ + err = unlink(custpinpath); + if (CHECK(err, "unlink custpinpath", "err %d errno %d\n", err, errno)) + goto out; + + err = rmdir(custpath); + if (CHECK(err, "rmdir custpindir", "err %d errno %d\n", err, errno)) + goto out; + + bpf_object__close(obj); + + /* open the valid object file again */ + obj = bpf_object__open_file(file, NULL); + err = libbpf_get_error(obj); + if (CHECK(err, "default open", "err %d errno %d\n", err, errno)) { + obj = NULL; + goto out; + } + + /* swap pin paths of the two maps */ + bpf_object__for_each_map(map, obj) { + if (!strcmp(bpf_map__name(map), "nopinmap")) + err = bpf_map__set_pin_path(map, pinpath); + else if (!strcmp(bpf_map__name(map), "pinmap")) + err = bpf_map__set_pin_path(map, NULL); + else + continue; + + if (CHECK(err, "set pin path", "err %d errno %d\n", err, errno)) + goto out; + } + + /* should fail because of map parameter mismatch */ + err = bpf_object__load(obj); + if (CHECK(err != -EINVAL, "param mismatch load", "err %d errno %d\n", err, errno)) + goto out; + + bpf_object__close(obj); + + /* test auto-pinning at custom path with open opt */ + obj = bpf_object__open_file(file, &opts); + if (CHECK_FAIL(libbpf_get_error(obj))) { + obj = NULL; + goto out; + } + + err = bpf_object__load(obj); + if (CHECK(err, "custom load", "err %d errno %d\n", err, errno)) + goto out; + + /* check that pinmap was pinned at the custom path */ + err = stat(custpinpath, &statbuf); + if (CHECK(err, "stat custpinpath", "err %d errno %d\n", err, errno)) + goto out; + +out: + unlink(pinpath); + unlink(nopinpath); + unlink(nopinpath2); + unlink(custpinpath); + rmdir(custpath); + if (obj) + bpf_object__close(obj); +} diff --git a/tools/testing/selftests/bpf/progs/test_pinning.c b/tools/testing/selftests/bpf/progs/test_pinning.c new file mode 100644 index 000000000000..f69a4a50d056 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/test_pinning.c @@ -0,0 +1,31 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include +#include "bpf_helpers.h" + +int _version SEC("version") = 1; + +struct { + __uint(type, BPF_MAP_TYPE_ARRAY); + __uint(max_entries, 1); + __type(key, __u32); + __type(value, __u64); + __uint(pinning, LIBBPF_PIN_BY_NAME); +} pinmap SEC(".maps"); + +struct { + __uint(type, BPF_MAP_TYPE_HASH); + __uint(max_entries, 1); + __type(key, __u32); + __type(value, __u64); +} nopinmap SEC(".maps"); + +struct { + __uint(type, BPF_MAP_TYPE_ARRAY); + __uint(max_entries, 1); + __type(key, __u32); + __type(value, __u64); + __uint(pinning, LIBBPF_PIN_NONE); +} nopinmap2 SEC(".maps"); + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/test_pinning_invalid.c b/tools/testing/selftests/bpf/progs/test_pinning_invalid.c new file mode 100644 index 000000000000..51b38abe7ba1 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/test_pinning_invalid.c @@ -0,0 +1,16 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include +#include "bpf_helpers.h" + +int _version SEC("version") = 1; + +struct { + __uint(type, BPF_MAP_TYPE_ARRAY); + __uint(max_entries, 1); + __type(key, __u32); + __type(value, __u64); + __uint(pinning, 2); /* invalid */ +} nopinmap3 SEC(".maps"); + +char _license[] SEC("license") = "GPL"; -- cgit v1.2.3 From 6ae08ae3dea2cfa03dd3665a3c8475c2d429ef47 Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Sat, 2 Nov 2019 00:17:59 +0100 Subject: bpf: Add probe_read_{user, kernel} and probe_read_{user, kernel}_str helpers The current bpf_probe_read() and bpf_probe_read_str() helpers are broken in that they assume they can be used for probing memory access for kernel space addresses /as well as/ user space addresses. However, plain use of probe_kernel_read() for both cases will attempt to always access kernel space address space given access is performed under KERNEL_DS and some archs in-fact have overlapping address spaces where a kernel pointer and user pointer would have the /same/ address value and therefore accessing application memory via bpf_probe_read{,_str}() would read garbage values. Lets fix BPF side by making use of recently added 3d7081822f7f ("uaccess: Add non-pagefault user-space read functions"). Unfortunately, the only way to fix this status quo is to add dedicated bpf_probe_read_{user,kernel}() and bpf_probe_read_{user,kernel}_str() helpers. The bpf_probe_read{,_str}() helpers are kept as-is to retain their current behavior. The two *_user() variants attempt the access always under USER_DS set, the two *_kernel() variants will -EFAULT when accessing user memory if the underlying architecture has non-overlapping address ranges, also avoiding throwing the kernel warning via 00c42373d397 ("x86-64: add warning for non-canonical user access address dereferences"). Fixes: a5e8c07059d0 ("bpf: add bpf_probe_read_str helper") Fixes: 2541517c32be ("tracing, perf: Implement BPF programs attached to kprobes") Signed-off-by: Daniel Borkmann Signed-off-by: Alexei Starovoitov Acked-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/796ee46e948bc808d54891a1108435f8652c6ca4.1572649915.git.daniel@iogearbox.net --- include/uapi/linux/bpf.h | 122 ++++++++++++++++++--------- kernel/trace/bpf_trace.c | 181 ++++++++++++++++++++++++++++++----------- tools/include/uapi/linux/bpf.h | 122 ++++++++++++++++++--------- 3 files changed, 299 insertions(+), 126 deletions(-) (limited to 'tools') diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index a6bf19dabaab..df6809a76404 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -563,10 +563,13 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_probe_read(void *dst, u32 size, const void *src) + * int bpf_probe_read(void *dst, u32 size, const void *unsafe_ptr) * Description * For tracing programs, safely attempt to read *size* bytes from - * address *src* and store the data in *dst*. + * kernel space address *unsafe_ptr* and store the data in *dst*. + * + * Generally, use bpf_probe_read_user() or bpf_probe_read_kernel() + * instead. * Return * 0 on success, or a negative error in case of failure. * @@ -1428,45 +1431,14 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_probe_read_str(void *dst, int size, const void *unsafe_ptr) + * int bpf_probe_read_str(void *dst, u32 size, const void *unsafe_ptr) * Description - * Copy a NUL terminated string from an unsafe address - * *unsafe_ptr* to *dst*. The *size* should include the - * terminating NUL byte. In case the string length is smaller than - * *size*, the target is not padded with further NUL bytes. If the - * string length is larger than *size*, just *size*-1 bytes are - * copied and the last byte is set to NUL. - * - * On success, the length of the copied string is returned. This - * makes this helper useful in tracing programs for reading - * strings, and more importantly to get its length at runtime. See - * the following snippet: - * - * :: - * - * SEC("kprobe/sys_open") - * void bpf_sys_open(struct pt_regs *ctx) - * { - * char buf[PATHLEN]; // PATHLEN is defined to 256 - * int res = bpf_probe_read_str(buf, sizeof(buf), - * ctx->di); - * - * // Consume buf, for example push it to - * // userspace via bpf_perf_event_output(); we - * // can use res (the string length) as event - * // size, after checking its boundaries. - * } - * - * In comparison, using **bpf_probe_read()** helper here instead - * to read the string would require to estimate the length at - * compile time, and would often result in copying more memory - * than necessary. + * Copy a NUL terminated string from an unsafe kernel address + * *unsafe_ptr* to *dst*. See bpf_probe_read_kernel_str() for + * more details. * - * Another useful use case is when parsing individual process - * arguments or individual environment variables navigating - * *current*\ **->mm->arg_start** and *current*\ - * **->mm->env_start**: using this helper and the return value, - * one can quickly iterate at the right offset of the memory area. + * Generally, use bpf_probe_read_user_str() or bpf_probe_read_kernel_str() + * instead. * Return * On success, the strictly positive length of the string, * including the trailing NUL character. On error, a negative @@ -2777,6 +2749,72 @@ union bpf_attr { * restricted to raw_tracepoint bpf programs. * Return * 0 on success, or a negative error in case of failure. + * + * int bpf_probe_read_user(void *dst, u32 size, const void *unsafe_ptr) + * Description + * Safely attempt to read *size* bytes from user space address + * *unsafe_ptr* and store the data in *dst*. + * Return + * 0 on success, or a negative error in case of failure. + * + * int bpf_probe_read_kernel(void *dst, u32 size, const void *unsafe_ptr) + * Description + * Safely attempt to read *size* bytes from kernel space address + * *unsafe_ptr* and store the data in *dst*. + * Return + * 0 on success, or a negative error in case of failure. + * + * int bpf_probe_read_user_str(void *dst, u32 size, const void *unsafe_ptr) + * Description + * Copy a NUL terminated string from an unsafe user address + * *unsafe_ptr* to *dst*. The *size* should include the + * terminating NUL byte. In case the string length is smaller than + * *size*, the target is not padded with further NUL bytes. If the + * string length is larger than *size*, just *size*-1 bytes are + * copied and the last byte is set to NUL. + * + * On success, the length of the copied string is returned. This + * makes this helper useful in tracing programs for reading + * strings, and more importantly to get its length at runtime. See + * the following snippet: + * + * :: + * + * SEC("kprobe/sys_open") + * void bpf_sys_open(struct pt_regs *ctx) + * { + * char buf[PATHLEN]; // PATHLEN is defined to 256 + * int res = bpf_probe_read_user_str(buf, sizeof(buf), + * ctx->di); + * + * // Consume buf, for example push it to + * // userspace via bpf_perf_event_output(); we + * // can use res (the string length) as event + * // size, after checking its boundaries. + * } + * + * In comparison, using **bpf_probe_read_user()** helper here + * instead to read the string would require to estimate the length + * at compile time, and would often result in copying more memory + * than necessary. + * + * Another useful use case is when parsing individual process + * arguments or individual environment variables navigating + * *current*\ **->mm->arg_start** and *current*\ + * **->mm->env_start**: using this helper and the return value, + * one can quickly iterate at the right offset of the memory area. + * Return + * On success, the strictly positive length of the string, + * including the trailing NUL character. On error, a negative + * value. + * + * int bpf_probe_read_kernel_str(void *dst, u32 size, const void *unsafe_ptr) + * Description + * Copy a NUL terminated string from an unsafe kernel address *unsafe_ptr* + * to *dst*. Same semantics as with bpf_probe_read_user_str() apply. + * Return + * On success, the strictly positive length of the string, including + * the trailing NUL character. On error, a negative value. */ #define __BPF_FUNC_MAPPER(FN) \ FN(unspec), \ @@ -2890,7 +2928,11 @@ union bpf_attr { FN(sk_storage_delete), \ FN(send_signal), \ FN(tcp_gen_syncookie), \ - FN(skb_output), + FN(skb_output), \ + FN(probe_read_user), \ + FN(probe_read_kernel), \ + FN(probe_read_user_str), \ + FN(probe_read_kernel_str), /* integer value in 'imm' field of BPF_CALL instruction selects which helper * function eBPF program intends to call diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c index 2d87fcdcb19b..ffc91d4935ac 100644 --- a/kernel/trace/bpf_trace.c +++ b/kernel/trace/bpf_trace.c @@ -138,24 +138,140 @@ static const struct bpf_func_proto bpf_override_return_proto = { }; #endif -BPF_CALL_3(bpf_probe_read, void *, dst, u32, size, const void *, unsafe_ptr) +BPF_CALL_3(bpf_probe_read_user, void *, dst, u32, size, + const void __user *, unsafe_ptr) { - int ret; + int ret = probe_user_read(dst, unsafe_ptr, size); - ret = security_locked_down(LOCKDOWN_BPF_READ); - if (ret < 0) - goto out; + if (unlikely(ret < 0)) + memset(dst, 0, size); + + return ret; +} + +static const struct bpf_func_proto bpf_probe_read_user_proto = { + .func = bpf_probe_read_user, + .gpl_only = true, + .ret_type = RET_INTEGER, + .arg1_type = ARG_PTR_TO_UNINIT_MEM, + .arg2_type = ARG_CONST_SIZE_OR_ZERO, + .arg3_type = ARG_ANYTHING, +}; + +BPF_CALL_3(bpf_probe_read_user_str, void *, dst, u32, size, + const void __user *, unsafe_ptr) +{ + int ret = strncpy_from_unsafe_user(dst, unsafe_ptr, size); + + if (unlikely(ret < 0)) + memset(dst, 0, size); + + return ret; +} + +static const struct bpf_func_proto bpf_probe_read_user_str_proto = { + .func = bpf_probe_read_user_str, + .gpl_only = true, + .ret_type = RET_INTEGER, + .arg1_type = ARG_PTR_TO_UNINIT_MEM, + .arg2_type = ARG_CONST_SIZE_OR_ZERO, + .arg3_type = ARG_ANYTHING, +}; - ret = probe_kernel_read(dst, unsafe_ptr, size); +static __always_inline int +bpf_probe_read_kernel_common(void *dst, u32 size, const void *unsafe_ptr, + const bool compat) +{ + int ret = security_locked_down(LOCKDOWN_BPF_READ); + + if (unlikely(ret < 0)) + goto out; + ret = compat ? probe_kernel_read(dst, unsafe_ptr, size) : + probe_kernel_read_strict(dst, unsafe_ptr, size); if (unlikely(ret < 0)) out: memset(dst, 0, size); + return ret; +} + +BPF_CALL_3(bpf_probe_read_kernel, void *, dst, u32, size, + const void *, unsafe_ptr) +{ + return bpf_probe_read_kernel_common(dst, size, unsafe_ptr, false); +} + +static const struct bpf_func_proto bpf_probe_read_kernel_proto = { + .func = bpf_probe_read_kernel, + .gpl_only = true, + .ret_type = RET_INTEGER, + .arg1_type = ARG_PTR_TO_UNINIT_MEM, + .arg2_type = ARG_CONST_SIZE_OR_ZERO, + .arg3_type = ARG_ANYTHING, +}; + +BPF_CALL_3(bpf_probe_read_compat, void *, dst, u32, size, + const void *, unsafe_ptr) +{ + return bpf_probe_read_kernel_common(dst, size, unsafe_ptr, true); +} +static const struct bpf_func_proto bpf_probe_read_compat_proto = { + .func = bpf_probe_read_compat, + .gpl_only = true, + .ret_type = RET_INTEGER, + .arg1_type = ARG_PTR_TO_UNINIT_MEM, + .arg2_type = ARG_CONST_SIZE_OR_ZERO, + .arg3_type = ARG_ANYTHING, +}; + +static __always_inline int +bpf_probe_read_kernel_str_common(void *dst, u32 size, const void *unsafe_ptr, + const bool compat) +{ + int ret = security_locked_down(LOCKDOWN_BPF_READ); + + if (unlikely(ret < 0)) + goto out; + /* + * The strncpy_from_unsafe_*() call will likely not fill the entire + * buffer, but that's okay in this circumstance as we're probing + * arbitrary memory anyway similar to bpf_probe_read_*() and might + * as well probe the stack. Thus, memory is explicitly cleared + * only in error case, so that improper users ignoring return + * code altogether don't copy garbage; otherwise length of string + * is returned that can be used for bpf_perf_event_output() et al. + */ + ret = compat ? strncpy_from_unsafe(dst, unsafe_ptr, size) : + strncpy_from_unsafe_strict(dst, unsafe_ptr, size); + if (unlikely(ret < 0)) +out: + memset(dst, 0, size); return ret; } -static const struct bpf_func_proto bpf_probe_read_proto = { - .func = bpf_probe_read, +BPF_CALL_3(bpf_probe_read_kernel_str, void *, dst, u32, size, + const void *, unsafe_ptr) +{ + return bpf_probe_read_kernel_str_common(dst, size, unsafe_ptr, false); +} + +static const struct bpf_func_proto bpf_probe_read_kernel_str_proto = { + .func = bpf_probe_read_kernel_str, + .gpl_only = true, + .ret_type = RET_INTEGER, + .arg1_type = ARG_PTR_TO_UNINIT_MEM, + .arg2_type = ARG_CONST_SIZE_OR_ZERO, + .arg3_type = ARG_ANYTHING, +}; + +BPF_CALL_3(bpf_probe_read_compat_str, void *, dst, u32, size, + const void *, unsafe_ptr) +{ + return bpf_probe_read_kernel_str_common(dst, size, unsafe_ptr, true); +} + +static const struct bpf_func_proto bpf_probe_read_compat_str_proto = { + .func = bpf_probe_read_compat_str, .gpl_only = true, .ret_type = RET_INTEGER, .arg1_type = ARG_PTR_TO_UNINIT_MEM, @@ -583,41 +699,6 @@ static const struct bpf_func_proto bpf_current_task_under_cgroup_proto = { .arg2_type = ARG_ANYTHING, }; -BPF_CALL_3(bpf_probe_read_str, void *, dst, u32, size, - const void *, unsafe_ptr) -{ - int ret; - - ret = security_locked_down(LOCKDOWN_BPF_READ); - if (ret < 0) - goto out; - - /* - * The strncpy_from_unsafe() call will likely not fill the entire - * buffer, but that's okay in this circumstance as we're probing - * arbitrary memory anyway similar to bpf_probe_read() and might - * as well probe the stack. Thus, memory is explicitly cleared - * only in error case, so that improper users ignoring return - * code altogether don't copy garbage; otherwise length of string - * is returned that can be used for bpf_perf_event_output() et al. - */ - ret = strncpy_from_unsafe(dst, unsafe_ptr, size); - if (unlikely(ret < 0)) -out: - memset(dst, 0, size); - - return ret; -} - -static const struct bpf_func_proto bpf_probe_read_str_proto = { - .func = bpf_probe_read_str, - .gpl_only = true, - .ret_type = RET_INTEGER, - .arg1_type = ARG_PTR_TO_UNINIT_MEM, - .arg2_type = ARG_CONST_SIZE_OR_ZERO, - .arg3_type = ARG_ANYTHING, -}; - struct send_signal_irq_work { struct irq_work irq_work; struct task_struct *task; @@ -697,8 +778,6 @@ tracing_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) return &bpf_map_pop_elem_proto; case BPF_FUNC_map_peek_elem: return &bpf_map_peek_elem_proto; - case BPF_FUNC_probe_read: - return &bpf_probe_read_proto; case BPF_FUNC_ktime_get_ns: return &bpf_ktime_get_ns_proto; case BPF_FUNC_tail_call: @@ -725,8 +804,18 @@ tracing_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) return &bpf_current_task_under_cgroup_proto; case BPF_FUNC_get_prandom_u32: return &bpf_get_prandom_u32_proto; + case BPF_FUNC_probe_read_user: + return &bpf_probe_read_user_proto; + case BPF_FUNC_probe_read_kernel: + return &bpf_probe_read_kernel_proto; + case BPF_FUNC_probe_read: + return &bpf_probe_read_compat_proto; + case BPF_FUNC_probe_read_user_str: + return &bpf_probe_read_user_str_proto; + case BPF_FUNC_probe_read_kernel_str: + return &bpf_probe_read_kernel_str_proto; case BPF_FUNC_probe_read_str: - return &bpf_probe_read_str_proto; + return &bpf_probe_read_compat_str_proto; #ifdef CONFIG_CGROUPS case BPF_FUNC_get_current_cgroup_id: return &bpf_get_current_cgroup_id_proto; diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h index a6bf19dabaab..df6809a76404 100644 --- a/tools/include/uapi/linux/bpf.h +++ b/tools/include/uapi/linux/bpf.h @@ -563,10 +563,13 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_probe_read(void *dst, u32 size, const void *src) + * int bpf_probe_read(void *dst, u32 size, const void *unsafe_ptr) * Description * For tracing programs, safely attempt to read *size* bytes from - * address *src* and store the data in *dst*. + * kernel space address *unsafe_ptr* and store the data in *dst*. + * + * Generally, use bpf_probe_read_user() or bpf_probe_read_kernel() + * instead. * Return * 0 on success, or a negative error in case of failure. * @@ -1428,45 +1431,14 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_probe_read_str(void *dst, int size, const void *unsafe_ptr) + * int bpf_probe_read_str(void *dst, u32 size, const void *unsafe_ptr) * Description - * Copy a NUL terminated string from an unsafe address - * *unsafe_ptr* to *dst*. The *size* should include the - * terminating NUL byte. In case the string length is smaller than - * *size*, the target is not padded with further NUL bytes. If the - * string length is larger than *size*, just *size*-1 bytes are - * copied and the last byte is set to NUL. - * - * On success, the length of the copied string is returned. This - * makes this helper useful in tracing programs for reading - * strings, and more importantly to get its length at runtime. See - * the following snippet: - * - * :: - * - * SEC("kprobe/sys_open") - * void bpf_sys_open(struct pt_regs *ctx) - * { - * char buf[PATHLEN]; // PATHLEN is defined to 256 - * int res = bpf_probe_read_str(buf, sizeof(buf), - * ctx->di); - * - * // Consume buf, for example push it to - * // userspace via bpf_perf_event_output(); we - * // can use res (the string length) as event - * // size, after checking its boundaries. - * } - * - * In comparison, using **bpf_probe_read()** helper here instead - * to read the string would require to estimate the length at - * compile time, and would often result in copying more memory - * than necessary. + * Copy a NUL terminated string from an unsafe kernel address + * *unsafe_ptr* to *dst*. See bpf_probe_read_kernel_str() for + * more details. * - * Another useful use case is when parsing individual process - * arguments or individual environment variables navigating - * *current*\ **->mm->arg_start** and *current*\ - * **->mm->env_start**: using this helper and the return value, - * one can quickly iterate at the right offset of the memory area. + * Generally, use bpf_probe_read_user_str() or bpf_probe_read_kernel_str() + * instead. * Return * On success, the strictly positive length of the string, * including the trailing NUL character. On error, a negative @@ -2777,6 +2749,72 @@ union bpf_attr { * restricted to raw_tracepoint bpf programs. * Return * 0 on success, or a negative error in case of failure. + * + * int bpf_probe_read_user(void *dst, u32 size, const void *unsafe_ptr) + * Description + * Safely attempt to read *size* bytes from user space address + * *unsafe_ptr* and store the data in *dst*. + * Return + * 0 on success, or a negative error in case of failure. + * + * int bpf_probe_read_kernel(void *dst, u32 size, const void *unsafe_ptr) + * Description + * Safely attempt to read *size* bytes from kernel space address + * *unsafe_ptr* and store the data in *dst*. + * Return + * 0 on success, or a negative error in case of failure. + * + * int bpf_probe_read_user_str(void *dst, u32 size, const void *unsafe_ptr) + * Description + * Copy a NUL terminated string from an unsafe user address + * *unsafe_ptr* to *dst*. The *size* should include the + * terminating NUL byte. In case the string length is smaller than + * *size*, the target is not padded with further NUL bytes. If the + * string length is larger than *size*, just *size*-1 bytes are + * copied and the last byte is set to NUL. + * + * On success, the length of the copied string is returned. This + * makes this helper useful in tracing programs for reading + * strings, and more importantly to get its length at runtime. See + * the following snippet: + * + * :: + * + * SEC("kprobe/sys_open") + * void bpf_sys_open(struct pt_regs *ctx) + * { + * char buf[PATHLEN]; // PATHLEN is defined to 256 + * int res = bpf_probe_read_user_str(buf, sizeof(buf), + * ctx->di); + * + * // Consume buf, for example push it to + * // userspace via bpf_perf_event_output(); we + * // can use res (the string length) as event + * // size, after checking its boundaries. + * } + * + * In comparison, using **bpf_probe_read_user()** helper here + * instead to read the string would require to estimate the length + * at compile time, and would often result in copying more memory + * than necessary. + * + * Another useful use case is when parsing individual process + * arguments or individual environment variables navigating + * *current*\ **->mm->arg_start** and *current*\ + * **->mm->env_start**: using this helper and the return value, + * one can quickly iterate at the right offset of the memory area. + * Return + * On success, the strictly positive length of the string, + * including the trailing NUL character. On error, a negative + * value. + * + * int bpf_probe_read_kernel_str(void *dst, u32 size, const void *unsafe_ptr) + * Description + * Copy a NUL terminated string from an unsafe kernel address *unsafe_ptr* + * to *dst*. Same semantics as with bpf_probe_read_user_str() apply. + * Return + * On success, the strictly positive length of the string, including + * the trailing NUL character. On error, a negative value. */ #define __BPF_FUNC_MAPPER(FN) \ FN(unspec), \ @@ -2890,7 +2928,11 @@ union bpf_attr { FN(sk_storage_delete), \ FN(send_signal), \ FN(tcp_gen_syncookie), \ - FN(skb_output), + FN(skb_output), \ + FN(probe_read_user), \ + FN(probe_read_kernel), \ + FN(probe_read_user_str), \ + FN(probe_read_kernel_str), /* integer value in 'imm' field of BPF_CALL instruction selects which helper * function eBPF program intends to call -- cgit v1.2.3 From 50f9aa44cac7256551b2e0901831e432a6c52b7f Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Sat, 2 Nov 2019 00:18:02 +0100 Subject: bpf, testing: Convert prog tests to probe_read_{user, kernel}{, _str} helper Use probe read *_{kernel,user}{,_str}() helpers instead of bpf_probe_read() or bpf_probe_read_user_str() for program tests where appropriate. Signed-off-by: Daniel Borkmann Signed-off-by: Alexei Starovoitov Acked-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/4a61d4b71ce3765587d8ef5cb93afa18515e5b3e.1572649915.git.daniel@iogearbox.net --- tools/testing/selftests/bpf/progs/kfree_skb.c | 4 +- tools/testing/selftests/bpf/progs/pyperf.h | 67 ++++++++++++---------- tools/testing/selftests/bpf/progs/strobemeta.h | 36 ++++++------ .../testing/selftests/bpf/progs/test_tcp_estats.c | 2 +- 4 files changed, 57 insertions(+), 52 deletions(-) (limited to 'tools') diff --git a/tools/testing/selftests/bpf/progs/kfree_skb.c b/tools/testing/selftests/bpf/progs/kfree_skb.c index 89af8a921ee4..489319ea1d6a 100644 --- a/tools/testing/selftests/bpf/progs/kfree_skb.c +++ b/tools/testing/selftests/bpf/progs/kfree_skb.c @@ -79,11 +79,11 @@ int trace_kfree_skb(struct trace_kfree_skb *ctx) func = ptr->func; })); - bpf_probe_read(&pkt_type, sizeof(pkt_type), _(&skb->__pkt_type_offset)); + bpf_probe_read_kernel(&pkt_type, sizeof(pkt_type), _(&skb->__pkt_type_offset)); pkt_type &= 7; /* read eth proto */ - bpf_probe_read(&pkt_data, sizeof(pkt_data), data + 12); + bpf_probe_read_kernel(&pkt_data, sizeof(pkt_data), data + 12); bpf_printk("rcuhead.next %llx func %llx\n", ptr, func); bpf_printk("skb->len %d users %d pkt_type %x\n", diff --git a/tools/testing/selftests/bpf/progs/pyperf.h b/tools/testing/selftests/bpf/progs/pyperf.h index 003fe106fc70..71d383cc9b85 100644 --- a/tools/testing/selftests/bpf/progs/pyperf.h +++ b/tools/testing/selftests/bpf/progs/pyperf.h @@ -72,9 +72,9 @@ static __always_inline void *get_thread_state(void *tls_base, PidData *pidData) void* thread_state; int key; - bpf_probe_read(&key, sizeof(key), (void*)(long)pidData->tls_key_addr); - bpf_probe_read(&thread_state, sizeof(thread_state), - tls_base + 0x310 + key * 0x10 + 0x08); + bpf_probe_read_user(&key, sizeof(key), (void*)(long)pidData->tls_key_addr); + bpf_probe_read_user(&thread_state, sizeof(thread_state), + tls_base + 0x310 + key * 0x10 + 0x08); return thread_state; } @@ -82,31 +82,33 @@ static __always_inline bool get_frame_data(void *frame_ptr, PidData *pidData, FrameData *frame, Symbol *symbol) { // read data from PyFrameObject - bpf_probe_read(&frame->f_back, - sizeof(frame->f_back), - frame_ptr + pidData->offsets.PyFrameObject_back); - bpf_probe_read(&frame->f_code, - sizeof(frame->f_code), - frame_ptr + pidData->offsets.PyFrameObject_code); + bpf_probe_read_user(&frame->f_back, + sizeof(frame->f_back), + frame_ptr + pidData->offsets.PyFrameObject_back); + bpf_probe_read_user(&frame->f_code, + sizeof(frame->f_code), + frame_ptr + pidData->offsets.PyFrameObject_code); // read data from PyCodeObject if (!frame->f_code) return false; - bpf_probe_read(&frame->co_filename, - sizeof(frame->co_filename), - frame->f_code + pidData->offsets.PyCodeObject_filename); - bpf_probe_read(&frame->co_name, - sizeof(frame->co_name), - frame->f_code + pidData->offsets.PyCodeObject_name); + bpf_probe_read_user(&frame->co_filename, + sizeof(frame->co_filename), + frame->f_code + pidData->offsets.PyCodeObject_filename); + bpf_probe_read_user(&frame->co_name, + sizeof(frame->co_name), + frame->f_code + pidData->offsets.PyCodeObject_name); // read actual names into symbol if (frame->co_filename) - bpf_probe_read_str(&symbol->file, - sizeof(symbol->file), - frame->co_filename + pidData->offsets.String_data); + bpf_probe_read_user_str(&symbol->file, + sizeof(symbol->file), + frame->co_filename + + pidData->offsets.String_data); if (frame->co_name) - bpf_probe_read_str(&symbol->name, - sizeof(symbol->name), - frame->co_name + pidData->offsets.String_data); + bpf_probe_read_user_str(&symbol->name, + sizeof(symbol->name), + frame->co_name + + pidData->offsets.String_data); return true; } @@ -174,9 +176,9 @@ static __always_inline int __on_event(struct pt_regs *ctx) event->kernel_stack_id = bpf_get_stackid(ctx, &stackmap, 0); void* thread_state_current = (void*)0; - bpf_probe_read(&thread_state_current, - sizeof(thread_state_current), - (void*)(long)pidData->current_state_addr); + bpf_probe_read_user(&thread_state_current, + sizeof(thread_state_current), + (void*)(long)pidData->current_state_addr); struct task_struct* task = (struct task_struct*)bpf_get_current_task(); void* tls_base = (void*)task; @@ -188,11 +190,13 @@ static __always_inline int __on_event(struct pt_regs *ctx) if (pidData->use_tls) { uint64_t pthread_created; uint64_t pthread_self; - bpf_probe_read(&pthread_self, sizeof(pthread_self), tls_base + 0x10); + bpf_probe_read_user(&pthread_self, sizeof(pthread_self), + tls_base + 0x10); - bpf_probe_read(&pthread_created, - sizeof(pthread_created), - thread_state + pidData->offsets.PyThreadState_thread); + bpf_probe_read_user(&pthread_created, + sizeof(pthread_created), + thread_state + + pidData->offsets.PyThreadState_thread); event->pthread_match = pthread_created == pthread_self; } else { event->pthread_match = 1; @@ -204,9 +208,10 @@ static __always_inline int __on_event(struct pt_regs *ctx) Symbol sym = {}; int cur_cpu = bpf_get_smp_processor_id(); - bpf_probe_read(&frame_ptr, - sizeof(frame_ptr), - thread_state + pidData->offsets.PyThreadState_frame); + bpf_probe_read_user(&frame_ptr, + sizeof(frame_ptr), + thread_state + + pidData->offsets.PyThreadState_frame); int32_t* symbol_counter = bpf_map_lookup_elem(&symbolmap, &sym); if (symbol_counter == NULL) diff --git a/tools/testing/selftests/bpf/progs/strobemeta.h b/tools/testing/selftests/bpf/progs/strobemeta.h index 067eb625d01c..4bf16e0a1b0e 100644 --- a/tools/testing/selftests/bpf/progs/strobemeta.h +++ b/tools/testing/selftests/bpf/progs/strobemeta.h @@ -98,7 +98,7 @@ struct strobe_map_raw { /* * having volatile doesn't change anything on BPF side, but clang * emits warnings for passing `volatile const char *` into - * bpf_probe_read_str that expects just `const char *` + * bpf_probe_read_user_str that expects just `const char *` */ const char* tag; /* @@ -309,18 +309,18 @@ static __always_inline void *calc_location(struct strobe_value_loc *loc, dtv_t *dtv; void *tls_ptr; - bpf_probe_read(&tls_index, sizeof(struct tls_index), - (void *)loc->offset); + bpf_probe_read_user(&tls_index, sizeof(struct tls_index), + (void *)loc->offset); /* valid module index is always positive */ if (tls_index.module > 0) { /* dtv = ((struct tcbhead *)tls_base)->dtv[tls_index.module] */ - bpf_probe_read(&dtv, sizeof(dtv), - &((struct tcbhead *)tls_base)->dtv); + bpf_probe_read_user(&dtv, sizeof(dtv), + &((struct tcbhead *)tls_base)->dtv); dtv += tls_index.module; } else { dtv = NULL; } - bpf_probe_read(&tls_ptr, sizeof(void *), dtv); + bpf_probe_read_user(&tls_ptr, sizeof(void *), dtv); /* if pointer has (void *)-1 value, then TLS wasn't initialized yet */ return tls_ptr && tls_ptr != (void *)-1 ? tls_ptr + tls_index.offset @@ -336,7 +336,7 @@ static __always_inline void read_int_var(struct strobemeta_cfg *cfg, if (!location) return; - bpf_probe_read(value, sizeof(struct strobe_value_generic), location); + bpf_probe_read_user(value, sizeof(struct strobe_value_generic), location); data->int_vals[idx] = value->val; if (value->header.len) data->int_vals_set_mask |= (1 << idx); @@ -356,13 +356,13 @@ static __always_inline uint64_t read_str_var(struct strobemeta_cfg *cfg, if (!location) return 0; - bpf_probe_read(value, sizeof(struct strobe_value_generic), location); - len = bpf_probe_read_str(payload, STROBE_MAX_STR_LEN, value->ptr); + bpf_probe_read_user(value, sizeof(struct strobe_value_generic), location); + len = bpf_probe_read_user_str(payload, STROBE_MAX_STR_LEN, value->ptr); /* - * if bpf_probe_read_str returns error (<0), due to casting to + * if bpf_probe_read_user_str returns error (<0), due to casting to * unsinged int, it will become big number, so next check is * sufficient to check for errors AND prove to BPF verifier, that - * bpf_probe_read_str won't return anything bigger than + * bpf_probe_read_user_str won't return anything bigger than * STROBE_MAX_STR_LEN */ if (len > STROBE_MAX_STR_LEN) @@ -391,8 +391,8 @@ static __always_inline void *read_map_var(struct strobemeta_cfg *cfg, if (!location) return payload; - bpf_probe_read(value, sizeof(struct strobe_value_generic), location); - if (bpf_probe_read(&map, sizeof(struct strobe_map_raw), value->ptr)) + bpf_probe_read_user(value, sizeof(struct strobe_value_generic), location); + if (bpf_probe_read_user(&map, sizeof(struct strobe_map_raw), value->ptr)) return payload; descr->id = map.id; @@ -402,7 +402,7 @@ static __always_inline void *read_map_var(struct strobemeta_cfg *cfg, data->req_meta_valid = 1; } - len = bpf_probe_read_str(payload, STROBE_MAX_STR_LEN, map.tag); + len = bpf_probe_read_user_str(payload, STROBE_MAX_STR_LEN, map.tag); if (len <= STROBE_MAX_STR_LEN) { descr->tag_len = len; payload += len; @@ -418,15 +418,15 @@ static __always_inline void *read_map_var(struct strobemeta_cfg *cfg, break; descr->key_lens[i] = 0; - len = bpf_probe_read_str(payload, STROBE_MAX_STR_LEN, - map.entries[i].key); + len = bpf_probe_read_user_str(payload, STROBE_MAX_STR_LEN, + map.entries[i].key); if (len <= STROBE_MAX_STR_LEN) { descr->key_lens[i] = len; payload += len; } descr->val_lens[i] = 0; - len = bpf_probe_read_str(payload, STROBE_MAX_STR_LEN, - map.entries[i].val); + len = bpf_probe_read_user_str(payload, STROBE_MAX_STR_LEN, + map.entries[i].val); if (len <= STROBE_MAX_STR_LEN) { descr->val_lens[i] = len; payload += len; diff --git a/tools/testing/selftests/bpf/progs/test_tcp_estats.c b/tools/testing/selftests/bpf/progs/test_tcp_estats.c index c8c595da38d4..87b7d934ce73 100644 --- a/tools/testing/selftests/bpf/progs/test_tcp_estats.c +++ b/tools/testing/selftests/bpf/progs/test_tcp_estats.c @@ -38,7 +38,7 @@ #include #include "bpf_helpers.h" -#define _(P) ({typeof(P) val = 0; bpf_probe_read(&val, sizeof(val), &P); val;}) +#define _(P) ({typeof(P) val = 0; bpf_probe_read_kernel(&val, sizeof(val), &P); val;}) #define TCP_ESTATS_MAGIC 0xBAADBEEF /* This test case needs "sock" and "pt_regs" data structure. -- cgit v1.2.3 From fa553d9b57d4a98a160d1926b4e263e7a78c0cf3 Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Sat, 2 Nov 2019 00:18:03 +0100 Subject: bpf, testing: Add selftest to read/write sockaddr from user space Tested on x86-64 and Ilya was also kind enough to give it a spin on s390x, both passing with probe_user:OK there. The test is using the newly added bpf_probe_read_user() to dump sockaddr from connect call into .bss BPF map and overrides the user buffer via bpf_probe_write_user(): # ./test_progs [...] #17 pkt_md_access:OK #18 probe_user:OK #19 prog_run_xattr:OK [...] Signed-off-by: Daniel Borkmann Signed-off-by: Alexei Starovoitov Tested-by: Ilya Leoshkevich Acked-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/90f449d8af25354e05080e82fc6e2d3179da30ea.1572649915.git.daniel@iogearbox.net --- .../testing/selftests/bpf/prog_tests/probe_user.c | 78 ++++++++++++++++++++++ .../testing/selftests/bpf/progs/test_probe_user.c | 26 ++++++++ 2 files changed, 104 insertions(+) create mode 100644 tools/testing/selftests/bpf/prog_tests/probe_user.c create mode 100644 tools/testing/selftests/bpf/progs/test_probe_user.c (limited to 'tools') diff --git a/tools/testing/selftests/bpf/prog_tests/probe_user.c b/tools/testing/selftests/bpf/prog_tests/probe_user.c new file mode 100644 index 000000000000..8a3187dec048 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/probe_user.c @@ -0,0 +1,78 @@ +// SPDX-License-Identifier: GPL-2.0 +#include + +void test_probe_user(void) +{ +#define kprobe_name "__sys_connect" + const char *prog_name = "kprobe/" kprobe_name; + const char *obj_file = "./test_probe_user.o"; + DECLARE_LIBBPF_OPTS(bpf_object_open_opts, opts, ); + int err, results_map_fd, sock_fd, duration = 0; + struct sockaddr curr, orig, tmp; + struct sockaddr_in *in = (struct sockaddr_in *)&curr; + struct bpf_link *kprobe_link = NULL; + struct bpf_program *kprobe_prog; + struct bpf_object *obj; + static const int zero = 0; + + obj = bpf_object__open_file(obj_file, &opts); + if (CHECK(IS_ERR(obj), "obj_open_file", "err %ld\n", PTR_ERR(obj))) + return; + + kprobe_prog = bpf_object__find_program_by_title(obj, prog_name); + if (CHECK(!kprobe_prog, "find_probe", + "prog '%s' not found\n", prog_name)) + goto cleanup; + + err = bpf_object__load(obj); + if (CHECK(err, "obj_load", "err %d\n", err)) + goto cleanup; + + results_map_fd = bpf_find_map(__func__, obj, "test_pro.bss"); + if (CHECK(results_map_fd < 0, "find_bss_map", + "err %d\n", results_map_fd)) + goto cleanup; + + kprobe_link = bpf_program__attach_kprobe(kprobe_prog, false, + kprobe_name); + if (CHECK(IS_ERR(kprobe_link), "attach_kprobe", + "err %ld\n", PTR_ERR(kprobe_link))) { + kprobe_link = NULL; + goto cleanup; + } + + memset(&curr, 0, sizeof(curr)); + in->sin_family = AF_INET; + in->sin_port = htons(5555); + in->sin_addr.s_addr = inet_addr("255.255.255.255"); + memcpy(&orig, &curr, sizeof(curr)); + + sock_fd = socket(AF_INET, SOCK_STREAM, 0); + if (CHECK(sock_fd < 0, "create_sock_fd", "err %d\n", sock_fd)) + goto cleanup; + + connect(sock_fd, &curr, sizeof(curr)); + close(sock_fd); + + err = bpf_map_lookup_elem(results_map_fd, &zero, &tmp); + if (CHECK(err, "get_kprobe_res", + "failed to get kprobe res: %d\n", err)) + goto cleanup; + + in = (struct sockaddr_in *)&tmp; + if (CHECK(memcmp(&tmp, &orig, sizeof(orig)), "check_kprobe_res", + "wrong kprobe res from probe read: %s:%u\n", + inet_ntoa(in->sin_addr), ntohs(in->sin_port))) + goto cleanup; + + memset(&tmp, 0xab, sizeof(tmp)); + + in = (struct sockaddr_in *)&curr; + if (CHECK(memcmp(&curr, &tmp, sizeof(tmp)), "check_kprobe_res", + "wrong kprobe res from probe write: %s:%u\n", + inet_ntoa(in->sin_addr), ntohs(in->sin_port))) + goto cleanup; +cleanup: + bpf_link__destroy(kprobe_link); + bpf_object__close(obj); +} diff --git a/tools/testing/selftests/bpf/progs/test_probe_user.c b/tools/testing/selftests/bpf/progs/test_probe_user.c new file mode 100644 index 000000000000..1871e2ece0c4 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/test_probe_user.c @@ -0,0 +1,26 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include +#include + +#include + +#include "bpf_helpers.h" +#include "bpf_tracing.h" + +static struct sockaddr_in old; + +SEC("kprobe/__sys_connect") +int handle_sys_connect(struct pt_regs *ctx) +{ + void *ptr = (void *)PT_REGS_PARM2(ctx); + struct sockaddr_in new; + + bpf_probe_read_user(&old, sizeof(old), ptr); + __builtin_memset(&new, 0xab, sizeof(new)); + bpf_probe_write_user(ptr, &new, sizeof(new)); + + return 0; +} + +char _license[] SEC("license") = "GPL"; -- cgit v1.2.3