From fba60b171a0322830b446dd28170092c47243d39 Mon Sep 17 00:00:00 2001 From: Mauricio Vásquez Date: Fri, 7 Jan 2022 10:26:19 -0500 Subject: libbpf: Use IS_ERR_OR_NULL() in hashmap__free() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit hashmap__new() uses ERR_PTR() to return an error so it's better to use IS_ERR_OR_NULL() in order to check the pointer before calling free(). This will prevent freeing an invalid pointer if somebody calls hashmap__free() with the result of a failed hashmap__new() call. Signed-off-by: Mauricio Vásquez Signed-off-by: Andrii Nakryiko Acked-by: Song Liu Link: https://lore.kernel.org/bpf/20220107152620.192327-1-mauricio@kinvolk.io --- tools/lib/bpf/hashmap.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'tools/lib/bpf') diff --git a/tools/lib/bpf/hashmap.c b/tools/lib/bpf/hashmap.c index 3c20b126d60d..aeb09c288716 100644 --- a/tools/lib/bpf/hashmap.c +++ b/tools/lib/bpf/hashmap.c @@ -75,7 +75,7 @@ void hashmap__clear(struct hashmap *map) void hashmap__free(struct hashmap *map) { - if (!map) + if (IS_ERR_OR_NULL(map)) return; hashmap__clear(map); @@ -238,4 +238,3 @@ bool hashmap__delete(struct hashmap *map, const void *key, return true; } - -- cgit v1.2.3 From d6c9c24e891216890264320f5534051fd196ace8 Mon Sep 17 00:00:00 2001 From: Christy Lee Date: Fri, 7 Jan 2022 10:46:03 -0800 Subject: libbpf: Rename bpf_prog_attach_xattr() to bpf_prog_attach_opts() All xattr APIs are being dropped, let's converge to the convention used in high-level APIs and rename bpf_prog_attach_xattr to bpf_prog_attach_opts. Signed-off-by: Christy Lee Signed-off-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/20220107184604.3668544-2-christylee@fb.com --- tools/lib/bpf/bpf.c | 9 +++++++-- tools/lib/bpf/bpf.h | 4 ++++ tools/lib/bpf/libbpf.map | 1 + 3 files changed, 12 insertions(+), 2 deletions(-) (limited to 'tools/lib/bpf') diff --git a/tools/lib/bpf/bpf.c b/tools/lib/bpf/bpf.c index 550b4cbb6c99..418b259166f8 100644 --- a/tools/lib/bpf/bpf.c +++ b/tools/lib/bpf/bpf.c @@ -754,10 +754,10 @@ int bpf_prog_attach(int prog_fd, int target_fd, enum bpf_attach_type type, .flags = flags, ); - return bpf_prog_attach_xattr(prog_fd, target_fd, type, &opts); + return bpf_prog_attach_opts(prog_fd, target_fd, type, &opts); } -int bpf_prog_attach_xattr(int prog_fd, int target_fd, +int bpf_prog_attach_opts(int prog_fd, int target_fd, enum bpf_attach_type type, const struct bpf_prog_attach_opts *opts) { @@ -778,6 +778,11 @@ int bpf_prog_attach_xattr(int prog_fd, int target_fd, return libbpf_err_errno(ret); } +__attribute__((alias("bpf_prog_attach_opts"))) +int bpf_prog_attach_xattr(int prog_fd, int target_fd, + enum bpf_attach_type type, + const struct bpf_prog_attach_opts *opts); + int bpf_prog_detach(int target_fd, enum bpf_attach_type type) { union bpf_attr attr; diff --git a/tools/lib/bpf/bpf.h b/tools/lib/bpf/bpf.h index 14e0d97ad2cf..c2e8327010f9 100644 --- a/tools/lib/bpf/bpf.h +++ b/tools/lib/bpf/bpf.h @@ -391,6 +391,10 @@ struct bpf_prog_attach_opts { LIBBPF_API int bpf_prog_attach(int prog_fd, int attachable_fd, enum bpf_attach_type type, unsigned int flags); +LIBBPF_API int bpf_prog_attach_opts(int prog_fd, int attachable_fd, + enum bpf_attach_type type, + const struct bpf_prog_attach_opts *opts); +LIBBPF_DEPRECATED_SINCE(0, 8, "use bpf_prog_attach_opts() instead") LIBBPF_API int bpf_prog_attach_xattr(int prog_fd, int attachable_fd, enum bpf_attach_type type, const struct bpf_prog_attach_opts *opts); diff --git a/tools/lib/bpf/libbpf.map b/tools/lib/bpf/libbpf.map index 529783967793..8262cfca2240 100644 --- a/tools/lib/bpf/libbpf.map +++ b/tools/lib/bpf/libbpf.map @@ -247,6 +247,7 @@ LIBBPF_0.0.8 { bpf_link_create; bpf_link_update; bpf_map__set_initial_value; + bpf_prog_attach_opts; bpf_program__attach_cgroup; bpf_program__attach_lsm; bpf_program__is_lsm; -- cgit v1.2.3 From a32ea51a3f17ce6524c9fc19d311e708331c8b5f Mon Sep 17 00:00:00 2001 From: Yafang Shao Date: Sat, 8 Jan 2022 13:47:39 +0000 Subject: libbpf: Fix possible NULL pointer dereference when destroying skeleton When I checked the code in skeleton header file generated with my own bpf prog, I found there may be possible NULL pointer dereference when destroying skeleton. Then I checked the in-tree bpf progs, finding that is a common issue. Let's take the generated samples/bpf/xdp_redirect_cpu.skel.h for example. Below is the generated code in xdp_redirect_cpu__create_skeleton(): xdp_redirect_cpu__create_skeleton struct bpf_object_skeleton *s; s = (struct bpf_object_skeleton *)calloc(1, sizeof(*s)); if (!s) goto error; ... error: bpf_object__destroy_skeleton(s); return -ENOMEM; After goto error, the NULL 's' will be deferenced in bpf_object__destroy_skeleton(). We can simply fix this issue by just adding a NULL check in bpf_object__destroy_skeleton(). Fixes: d66562fba1ce ("libbpf: Add BPF object skeleton support") Signed-off-by: Yafang Shao Signed-off-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/20220108134739.32541-1-laoar.shao@gmail.com --- tools/lib/bpf/libbpf.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'tools/lib/bpf') diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c index 7f10dd501a52..fdb3536afa7d 100644 --- a/tools/lib/bpf/libbpf.c +++ b/tools/lib/bpf/libbpf.c @@ -11795,6 +11795,9 @@ void bpf_object__detach_skeleton(struct bpf_object_skeleton *s) void bpf_object__destroy_skeleton(struct bpf_object_skeleton *s) { + if (!s) + return; + if (s->progs) bpf_object__detach_skeleton(s); if (s->obj) -- cgit v1.2.3 From 063fa26aab7d4b987b2c797d12dc457a475011e5 Mon Sep 17 00:00:00 2001 From: Christy Lee Date: Fri, 7 Jan 2022 16:42:18 -0800 Subject: libbpf: Deprecate bpf_map__def() API All fields accessed via bpf_map_def can now be accessed via appropirate getters and setters. Mark bpf_map__def() API as deprecated. Signed-off-by: Christy Lee Signed-off-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/20220108004218.355761-6-christylee@fb.com --- tools/lib/bpf/libbpf.h | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'tools/lib/bpf') diff --git a/tools/lib/bpf/libbpf.h b/tools/lib/bpf/libbpf.h index 8b9bc5e90c2b..9728551501ae 100644 --- a/tools/lib/bpf/libbpf.h +++ b/tools/lib/bpf/libbpf.h @@ -706,7 +706,8 @@ bpf_object__prev_map(const struct bpf_object *obj, const struct bpf_map *map); LIBBPF_API int bpf_map__fd(const struct bpf_map *map); LIBBPF_API int bpf_map__reuse_fd(struct bpf_map *map, int fd); /* get map definition */ -LIBBPF_API const struct bpf_map_def *bpf_map__def(const struct bpf_map *map); +LIBBPF_API LIBBPF_DEPRECATED_SINCE(0, 8, "use appropriate getters or setters instead") +const struct bpf_map_def *bpf_map__def(const struct bpf_map *map); /* get map name */ LIBBPF_API const char *bpf_map__name(const struct bpf_map *map); /* get/set map type */ -- cgit v1.2.3 From eaa266d83a3730a15de2ceebcc89e8f6290e8cf6 Mon Sep 17 00:00:00 2001 From: Toke Høiland-Jørgensen Date: Tue, 18 Jan 2022 15:13:27 +0100 Subject: libbpf: Define BTF_KIND_* constants in btf.h to avoid compilation errors MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The btf.h header included with libbpf contains inline helper functions to check for various BTF kinds. These helpers directly reference the BTF_KIND_* constants defined in the kernel header, and because the header file is included in user applications, this happens in the user application compile units. This presents a problem if a user application is compiled on a system with older kernel headers because the constants are not available. To avoid this, add #defines of the constants directly in btf.h before using them. Since the kernel header moved to an enum for BTF_KIND_*, the #defines can shadow the enum values without any errors, so we only need #ifndef guards for the constants that predates the conversion to enum. We group these so there's only one guard for groups of values that were added together. [0] Closes: https://github.com/libbpf/libbpf/issues/436 Fixes: 223f903e9c83 ("bpf: Rename BTF_KIND_TAG to BTF_KIND_DECL_TAG") Fixes: 5b84bd10363e ("libbpf: Add support for BTF_KIND_TAG") Signed-off-by: Toke Høiland-Jørgensen Signed-off-by: Andrii Nakryiko Acked-by: Arnaldo Carvalho de Melo Link: https://lore.kernel.org/bpf/20220118141327.34231-1-toke@redhat.com --- tools/lib/bpf/btf.h | 22 +++++++++++++++++++++- 1 file changed, 21 insertions(+), 1 deletion(-) (limited to 'tools/lib/bpf') diff --git a/tools/lib/bpf/btf.h b/tools/lib/bpf/btf.h index 061839f04525..51862fdee850 100644 --- a/tools/lib/bpf/btf.h +++ b/tools/lib/bpf/btf.h @@ -375,8 +375,28 @@ btf_dump__dump_type_data(struct btf_dump *d, __u32 id, const struct btf_dump_type_data_opts *opts); /* - * A set of helpers for easier BTF types handling + * A set of helpers for easier BTF types handling. + * + * The inline functions below rely on constants from the kernel headers which + * may not be available for applications including this header file. To avoid + * compilation errors, we define all the constants here that were added after + * the initial introduction of the BTF_KIND* constants. */ +#ifndef BTF_KIND_FUNC +#define BTF_KIND_FUNC 12 /* Function */ +#define BTF_KIND_FUNC_PROTO 13 /* Function Proto */ +#endif +#ifndef BTF_KIND_VAR +#define BTF_KIND_VAR 14 /* Variable */ +#define BTF_KIND_DATASEC 15 /* Section */ +#endif +#ifndef BTF_KIND_FLOAT +#define BTF_KIND_FLOAT 16 /* Floating point */ +#endif +/* The kernel header switched to enums, so these two were never #defined */ +#define BTF_KIND_DECL_TAG 17 /* Decl Tag */ +#define BTF_KIND_TYPE_TAG 18 /* Type Tag */ + static inline __u16 btf_kind(const struct btf_type *t) { return BTF_INFO_KIND(t->info); -- cgit v1.2.3 From d81283d272661094ecc564709f25c7b7543308e0 Mon Sep 17 00:00:00 2001 From: Kui-Feng Lee Date: Wed, 19 Jan 2022 10:02:14 -0800 Subject: libbpf: Improve btf__add_btf() with an additional hashmap for strings. Add a hashmap to map the string offsets from a source btf to the string offsets from a target btf to reduce overheads. btf__add_btf() calls btf__add_str() to add strings from a source to a target btf. It causes many string comparisons, and it is a major hotspot when adding a big btf. btf__add_str() uses strcmp() to check if a hash entry is the right one. The extra hashmap here compares offsets of strings, that are much cheaper. It remembers the results of btf__add_str() for later uses to reduce the cost. We are parallelizing BTF encoding for pahole by creating separated btf instances for worker threads. These per-thread btf instances will be added to the btf instance of the main thread by calling btf__add_str() to deduplicate and write out. With this patch and -j4, the running time of pahole drops to about 6.0s from 6.6s. The following lines are the summary of 'perf stat' w/o the change. 6.668126396 seconds time elapsed 13.451054000 seconds user 0.715520000 seconds sys The following lines are the summary w/ the change. 5.986973919 seconds time elapsed 12.939903000 seconds user 0.724152000 seconds sys V4 fixes a bug of error checking against the pointer returned by hashmap__new(). [v3] https://lore.kernel.org/bpf/20220118232053.2113139-1-kuifeng@fb.com/ [v2] https://lore.kernel.org/bpf/20220114193713.461349-1-kuifeng@fb.com/ Signed-off-by: Kui-Feng Lee Signed-off-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/20220119180214.255634-1-kuifeng@fb.com --- tools/lib/bpf/btf.c | 31 ++++++++++++++++++++++++++++++- 1 file changed, 30 insertions(+), 1 deletion(-) (limited to 'tools/lib/bpf') diff --git a/tools/lib/bpf/btf.c b/tools/lib/bpf/btf.c index 9aa19c89f758..1383e26c5d1f 100644 --- a/tools/lib/bpf/btf.c +++ b/tools/lib/bpf/btf.c @@ -1620,20 +1620,37 @@ static int btf_commit_type(struct btf *btf, int data_sz) struct btf_pipe { const struct btf *src; struct btf *dst; + struct hashmap *str_off_map; /* map string offsets from src to dst */ }; static int btf_rewrite_str(__u32 *str_off, void *ctx) { struct btf_pipe *p = ctx; - int off; + void *mapped_off; + int off, err; if (!*str_off) /* nothing to do for empty strings */ return 0; + if (p->str_off_map && + hashmap__find(p->str_off_map, (void *)(long)*str_off, &mapped_off)) { + *str_off = (__u32)(long)mapped_off; + return 0; + } + off = btf__add_str(p->dst, btf__str_by_offset(p->src, *str_off)); if (off < 0) return off; + /* Remember string mapping from src to dst. It avoids + * performing expensive string comparisons. + */ + if (p->str_off_map) { + err = hashmap__append(p->str_off_map, (void *)(long)*str_off, (void *)(long)off); + if (err) + return err; + } + *str_off = off; return 0; } @@ -1680,6 +1697,9 @@ static int btf_rewrite_type_ids(__u32 *type_id, void *ctx) return 0; } +static size_t btf_dedup_identity_hash_fn(const void *key, void *ctx); +static bool btf_dedup_equal_fn(const void *k1, const void *k2, void *ctx); + int btf__add_btf(struct btf *btf, const struct btf *src_btf) { struct btf_pipe p = { .src = src_btf, .dst = btf }; @@ -1713,6 +1733,11 @@ int btf__add_btf(struct btf *btf, const struct btf *src_btf) if (!off) return libbpf_err(-ENOMEM); + /* Map the string offsets from src_btf to the offsets from btf to improve performance */ + p.str_off_map = hashmap__new(btf_dedup_identity_hash_fn, btf_dedup_equal_fn, NULL); + if (IS_ERR(p.str_off_map)) + return libbpf_err(-ENOMEM); + /* bulk copy types data for all types from src_btf */ memcpy(t, src_btf->types_data, data_sz); @@ -1754,6 +1779,8 @@ int btf__add_btf(struct btf *btf, const struct btf *src_btf) btf->hdr->str_off += data_sz; btf->nr_types += cnt; + hashmap__free(p.str_off_map); + /* return type ID of the first added BTF type */ return btf->start_id + btf->nr_types - cnt; err_out: @@ -1767,6 +1794,8 @@ err_out: * wasn't modified, so doesn't need restoring, see big comment above */ btf->hdr->str_len = old_strs_len; + hashmap__free(p.str_off_map); + return libbpf_err(err); } -- cgit v1.2.3 From 93b8952d223af03c51fba0c6258173d2ffbd2cb7 Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Wed, 19 Jan 2022 22:05:28 -0800 Subject: libbpf: deprecate legacy BPF map definitions Enact deprecation of legacy BPF map definition in SEC("maps") ([0]). For the definitions themselves introduce LIBBPF_STRICT_MAP_DEFINITIONS flag for libbpf strict mode. If it is set, error out on any struct bpf_map_def-based map definition. If not set, libbpf will print out a warning for each legacy BPF map to raise awareness that it goes away. For any use of BPF_ANNOTATE_KV_PAIR() macro providing a legacy way to associate BTF key/value type information with legacy BPF map definition, warn through libbpf's pr_warn() error message (but don't fail BPF object open). BPF-side struct bpf_map_def is marked as deprecated. User-space struct bpf_map_def has to be used internally in libbpf, so it is left untouched. It should be enough for bpf_map__def() to be marked deprecated to raise awareness that it goes away. bpftool is an interesting case that utilizes libbpf to open BPF ELF object to generate skeleton. As such, even though bpftool itself uses full on strict libbpf mode (LIBBPF_STRICT_ALL), it has to relax it a bit for BPF map definition handling to minimize unnecessary disruptions. So opt-out of LIBBPF_STRICT_MAP_DEFINITIONS for bpftool. User's code that will later use generated skeleton will make its own decision whether to enforce LIBBPF_STRICT_MAP_DEFINITIONS or not. There are few tests in selftests/bpf that are consciously using legacy BPF map definitions to test libbpf functionality. For those, temporary opt out of LIBBPF_STRICT_MAP_DEFINITIONS mode for the duration of those tests. [0] Closes: https://github.com/libbpf/libbpf/issues/272 Signed-off-by: Andrii Nakryiko Link: https://lore.kernel.org/r/20220120060529.1890907-4-andrii@kernel.org Signed-off-by: Alexei Starovoitov --- tools/bpf/bpftool/main.c | 9 ++++++++- tools/lib/bpf/bpf_helpers.h | 2 +- tools/lib/bpf/libbpf.c | 8 ++++++++ tools/lib/bpf/libbpf_legacy.h | 5 +++++ tools/testing/selftests/bpf/prog_tests/btf.c | 4 ++++ 5 files changed, 26 insertions(+), 2 deletions(-) (limited to 'tools/lib/bpf') diff --git a/tools/bpf/bpftool/main.c b/tools/bpf/bpftool/main.c index 020e91a542d5..9d01fa9de033 100644 --- a/tools/bpf/bpftool/main.c +++ b/tools/bpf/bpftool/main.c @@ -478,7 +478,14 @@ int main(int argc, char **argv) } if (!legacy_libbpf) { - ret = libbpf_set_strict_mode(LIBBPF_STRICT_ALL); + enum libbpf_strict_mode mode; + + /* Allow legacy map definitions for skeleton generation. + * It will still be rejected if users use LIBBPF_STRICT_ALL + * mode for loading generated skeleton. + */ + mode = (__LIBBPF_STRICT_LAST - 1) & ~LIBBPF_STRICT_MAP_DEFINITIONS; + ret = libbpf_set_strict_mode(mode); if (ret) p_err("failed to enable libbpf strict mode: %d", ret); } diff --git a/tools/lib/bpf/bpf_helpers.h b/tools/lib/bpf/bpf_helpers.h index 963b1060d944..44df982d2a5c 100644 --- a/tools/lib/bpf/bpf_helpers.h +++ b/tools/lib/bpf/bpf_helpers.h @@ -133,7 +133,7 @@ struct bpf_map_def { unsigned int value_size; unsigned int max_entries; unsigned int map_flags; -}; +} __attribute__((deprecated("use BTF-defined maps in .maps section"))); enum libbpf_pin_type { LIBBPF_PIN_NONE, diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c index fdb3536afa7d..fc6d530e20db 100644 --- a/tools/lib/bpf/libbpf.c +++ b/tools/lib/bpf/libbpf.c @@ -1937,6 +1937,11 @@ static int bpf_object__init_user_maps(struct bpf_object *obj, bool strict) if (obj->efile.maps_shndx < 0) return 0; + if (libbpf_mode & LIBBPF_STRICT_MAP_DEFINITIONS) { + pr_warn("legacy map definitions in SEC(\"maps\") are not supported\n"); + return -EOPNOTSUPP; + } + if (!symbols) return -EINVAL; @@ -1999,6 +2004,8 @@ static int bpf_object__init_user_maps(struct bpf_object *obj, bool strict) return -LIBBPF_ERRNO__FORMAT; } + pr_warn("map '%s' (legacy): legacy map definitions are deprecated, use BTF-defined maps instead\n", map_name); + if (ELF64_ST_BIND(sym->st_info) == STB_LOCAL) { pr_warn("map '%s' (legacy): static maps are not supported\n", map_name); return -ENOTSUP; @@ -4190,6 +4197,7 @@ static int bpf_map_find_btf_info(struct bpf_object *obj, struct bpf_map *map) return 0; if (!bpf_map__is_internal(map)) { + pr_warn("Use of BPF_ANNOTATE_KV_PAIR is deprecated, use BTF-defined maps in .maps section instead\n"); ret = btf__get_map_kv_tids(obj->btf, map->name, def->key_size, def->value_size, &key_type_id, &value_type_id); diff --git a/tools/lib/bpf/libbpf_legacy.h b/tools/lib/bpf/libbpf_legacy.h index 79131f761a27..3c2b281c2bc3 100644 --- a/tools/lib/bpf/libbpf_legacy.h +++ b/tools/lib/bpf/libbpf_legacy.h @@ -73,6 +73,11 @@ enum libbpf_strict_mode { * operation. */ LIBBPF_STRICT_AUTO_RLIMIT_MEMLOCK = 0x10, + /* + * Error out on any SEC("maps") map definition, which are deprecated + * in favor of BTF-defined map definitions in SEC(".maps"). + */ + LIBBPF_STRICT_MAP_DEFINITIONS = 0x20, __LIBBPF_STRICT_LAST, }; diff --git a/tools/testing/selftests/bpf/prog_tests/btf.c b/tools/testing/selftests/bpf/prog_tests/btf.c index 8ba53acf9eb4..14f9b6136794 100644 --- a/tools/testing/selftests/bpf/prog_tests/btf.c +++ b/tools/testing/selftests/bpf/prog_tests/btf.c @@ -4560,6 +4560,8 @@ static void do_test_file(unsigned int test_num) has_btf_ext = btf_ext != NULL; btf_ext__free(btf_ext); + /* temporary disable LIBBPF_STRICT_MAP_DEFINITIONS to test legacy maps */ + libbpf_set_strict_mode((__LIBBPF_STRICT_LAST - 1) & ~LIBBPF_STRICT_MAP_DEFINITIONS); obj = bpf_object__open(test->file); err = libbpf_get_error(obj); if (CHECK(err, "obj: %d", err)) @@ -4684,6 +4686,8 @@ skip: fprintf(stderr, "OK"); done: + libbpf_set_strict_mode(LIBBPF_STRICT_ALL); + btf__free(btf); free(func_info); bpf_object__close(obj); -- cgit v1.2.3 From c359821ac65b7cda0e24f75f6ffd465c3f771204 Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Wed, 19 Jan 2022 22:14:19 -0800 Subject: libbpf: streamline low-level XDP APIs MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Introduce 4 new netlink-based XDP APIs for attaching, detaching, and querying XDP programs: - bpf_xdp_attach; - bpf_xdp_detach; - bpf_xdp_query; - bpf_xdp_query_id. These APIs replace bpf_set_link_xdp_fd, bpf_set_link_xdp_fd_opts, bpf_get_link_xdp_id, and bpf_get_link_xdp_info APIs ([0]). The latter don't follow a consistent naming pattern and some of them use non-extensible approaches (e.g., struct xdp_link_info which can't be modified without breaking libbpf ABI). The approach I took with these low-level XDP APIs is similar to what we did with low-level TC APIs. There is a nice duality of bpf_tc_attach vs bpf_xdp_attach, and so on. I left bpf_xdp_attach() to support detaching when -1 is specified for prog_fd for generality and convenience, but bpf_xdp_detach() is preferred due to clearer naming and associated semantics. Both bpf_xdp_attach() and bpf_xdp_detach() accept the same opts struct allowing to specify expected old_prog_fd. While doing the refactoring, I noticed that old APIs require users to specify opts with old_fd == -1 to declare "don't care about already attached XDP prog fd" condition. Otherwise, FD 0 is assumed, which is essentially never an intended behavior. So I made this behavior consistent with other kernel and libbpf APIs, in which zero FD means "no FD". This seems to be more in line with the latest thinking in BPF land and should cause less user confusion, hopefully. For querying, I left two APIs, both more generic bpf_xdp_query() allowing to query multiple IDs and attach mode, but also a specialization of it, bpf_xdp_query_id(), which returns only requested prog_id. Uses of prog_id returning bpf_get_link_xdp_id() were so prevalent across selftests and samples, that it seemed a very common use case and using bpf_xdp_query() for doing it felt very cumbersome with a highly branches if/else chain based on flags and attach mode. Old APIs are scheduled for deprecation in libbpf 0.8 release. [0] Closes: https://github.com/libbpf/libbpf/issues/309 Signed-off-by: Andrii Nakryiko Acked-by: Toke Høiland-Jørgensen Link: https://lore.kernel.org/r/20220120061422.2710637-2-andrii@kernel.org Signed-off-by: Alexei Starovoitov --- tools/lib/bpf/libbpf.h | 29 ++++++++++++ tools/lib/bpf/libbpf.map | 4 ++ tools/lib/bpf/netlink.c | 117 ++++++++++++++++++++++++++++++++++------------- 3 files changed, 117 insertions(+), 33 deletions(-) (limited to 'tools/lib/bpf') diff --git a/tools/lib/bpf/libbpf.h b/tools/lib/bpf/libbpf.h index 9728551501ae..94670066de62 100644 --- a/tools/lib/bpf/libbpf.h +++ b/tools/lib/bpf/libbpf.h @@ -833,13 +833,42 @@ struct bpf_xdp_set_link_opts { }; #define bpf_xdp_set_link_opts__last_field old_fd +LIBBPF_DEPRECATED_SINCE(0, 8, "use bpf_xdp_attach() instead") LIBBPF_API int bpf_set_link_xdp_fd(int ifindex, int fd, __u32 flags); +LIBBPF_DEPRECATED_SINCE(0, 8, "use bpf_xdp_attach() instead") LIBBPF_API int bpf_set_link_xdp_fd_opts(int ifindex, int fd, __u32 flags, const struct bpf_xdp_set_link_opts *opts); +LIBBPF_DEPRECATED_SINCE(0, 8, "use bpf_xdp_query_id() instead") LIBBPF_API int bpf_get_link_xdp_id(int ifindex, __u32 *prog_id, __u32 flags); +LIBBPF_DEPRECATED_SINCE(0, 8, "use bpf_xdp_query() instead") LIBBPF_API int bpf_get_link_xdp_info(int ifindex, struct xdp_link_info *info, size_t info_size, __u32 flags); +struct bpf_xdp_attach_opts { + size_t sz; + int old_prog_fd; + size_t :0; +}; +#define bpf_xdp_attach_opts__last_field old_prog_fd + +struct bpf_xdp_query_opts { + size_t sz; + __u32 prog_id; /* output */ + __u32 drv_prog_id; /* output */ + __u32 hw_prog_id; /* output */ + __u32 skb_prog_id; /* output */ + __u8 attach_mode; /* output */ + size_t :0; +}; +#define bpf_xdp_query_opts__last_field attach_mode + +LIBBPF_API int bpf_xdp_attach(int ifindex, int prog_fd, __u32 flags, + const struct bpf_xdp_attach_opts *opts); +LIBBPF_API int bpf_xdp_detach(int ifindex, __u32 flags, + const struct bpf_xdp_attach_opts *opts); +LIBBPF_API int bpf_xdp_query(int ifindex, int flags, struct bpf_xdp_query_opts *opts); +LIBBPF_API int bpf_xdp_query_id(int ifindex, int flags, __u32 *prog_id); + /* TC related API */ enum bpf_tc_attach_point { BPF_TC_INGRESS = 1 << 0, diff --git a/tools/lib/bpf/libbpf.map b/tools/lib/bpf/libbpf.map index 8262cfca2240..e10f0822845a 100644 --- a/tools/lib/bpf/libbpf.map +++ b/tools/lib/bpf/libbpf.map @@ -428,6 +428,10 @@ LIBBPF_0.7.0 { bpf_program__log_level; bpf_program__set_log_buf; bpf_program__set_log_level; + bpf_xdp_attach; + bpf_xdp_detach; + bpf_xdp_query; + bpf_xdp_query_id; libbpf_probe_bpf_helper; libbpf_probe_bpf_map_type; libbpf_probe_bpf_prog_type; diff --git a/tools/lib/bpf/netlink.c b/tools/lib/bpf/netlink.c index 39f25e09b51e..c39c37f99d5c 100644 --- a/tools/lib/bpf/netlink.c +++ b/tools/lib/bpf/netlink.c @@ -217,6 +217,28 @@ static int __bpf_set_link_xdp_fd_replace(int ifindex, int fd, int old_fd, return libbpf_netlink_send_recv(&req, NULL, NULL, NULL); } +int bpf_xdp_attach(int ifindex, int prog_fd, __u32 flags, const struct bpf_xdp_attach_opts *opts) +{ + int old_prog_fd, err; + + if (!OPTS_VALID(opts, bpf_xdp_attach_opts)) + return libbpf_err(-EINVAL); + + old_prog_fd = OPTS_GET(opts, old_prog_fd, 0); + if (old_prog_fd) + flags |= XDP_FLAGS_REPLACE; + else + old_prog_fd = -1; + + err = __bpf_set_link_xdp_fd_replace(ifindex, prog_fd, old_prog_fd, flags); + return libbpf_err(err); +} + +int bpf_xdp_detach(int ifindex, __u32 flags, const struct bpf_xdp_attach_opts *opts) +{ + return bpf_xdp_attach(ifindex, -1, flags, opts); +} + int bpf_set_link_xdp_fd_opts(int ifindex, int fd, __u32 flags, const struct bpf_xdp_set_link_opts *opts) { @@ -303,69 +325,98 @@ static int get_xdp_info(void *cookie, void *msg, struct nlattr **tb) return 0; } -int bpf_get_link_xdp_info(int ifindex, struct xdp_link_info *info, - size_t info_size, __u32 flags) +int bpf_xdp_query(int ifindex, int xdp_flags, struct bpf_xdp_query_opts *opts) { - struct xdp_id_md xdp_id = {}; - __u32 mask; - int ret; struct libbpf_nla_req req = { .nh.nlmsg_len = NLMSG_LENGTH(sizeof(struct ifinfomsg)), .nh.nlmsg_type = RTM_GETLINK, .nh.nlmsg_flags = NLM_F_DUMP | NLM_F_REQUEST, .ifinfo.ifi_family = AF_PACKET, }; + struct xdp_id_md xdp_id = {}; + int err; - if (flags & ~XDP_FLAGS_MASK || !info_size) + if (!OPTS_VALID(opts, bpf_xdp_query_opts)) + return libbpf_err(-EINVAL); + + if (xdp_flags & ~XDP_FLAGS_MASK) return libbpf_err(-EINVAL); /* Check whether the single {HW,DRV,SKB} mode is set */ - flags &= (XDP_FLAGS_SKB_MODE | XDP_FLAGS_DRV_MODE | XDP_FLAGS_HW_MODE); - mask = flags - 1; - if (flags && flags & mask) + xdp_flags &= XDP_FLAGS_SKB_MODE | XDP_FLAGS_DRV_MODE | XDP_FLAGS_HW_MODE; + if (xdp_flags & (xdp_flags - 1)) return libbpf_err(-EINVAL); xdp_id.ifindex = ifindex; - xdp_id.flags = flags; + xdp_id.flags = xdp_flags; - ret = libbpf_netlink_send_recv(&req, __dump_link_nlmsg, + err = libbpf_netlink_send_recv(&req, __dump_link_nlmsg, get_xdp_info, &xdp_id); - if (!ret) { - size_t sz = min(info_size, sizeof(xdp_id.info)); + if (err) + return libbpf_err(err); - memcpy(info, &xdp_id.info, sz); - memset((void *) info + sz, 0, info_size - sz); - } + OPTS_SET(opts, prog_id, xdp_id.info.prog_id); + OPTS_SET(opts, drv_prog_id, xdp_id.info.drv_prog_id); + OPTS_SET(opts, hw_prog_id, xdp_id.info.hw_prog_id); + OPTS_SET(opts, skb_prog_id, xdp_id.info.skb_prog_id); + OPTS_SET(opts, attach_mode, xdp_id.info.attach_mode); - return libbpf_err(ret); + return 0; } -static __u32 get_xdp_id(struct xdp_link_info *info, __u32 flags) +int bpf_get_link_xdp_info(int ifindex, struct xdp_link_info *info, + size_t info_size, __u32 flags) { - flags &= XDP_FLAGS_MODES; + LIBBPF_OPTS(bpf_xdp_query_opts, opts); + size_t sz; + int err; + + if (!info_size) + return libbpf_err(-EINVAL); - if (info->attach_mode != XDP_ATTACHED_MULTI && !flags) - return info->prog_id; - if (flags & XDP_FLAGS_DRV_MODE) - return info->drv_prog_id; - if (flags & XDP_FLAGS_HW_MODE) - return info->hw_prog_id; - if (flags & XDP_FLAGS_SKB_MODE) - return info->skb_prog_id; + err = bpf_xdp_query(ifindex, flags, &opts); + if (err) + return libbpf_err(err); + + /* struct xdp_link_info field layout matches struct bpf_xdp_query_opts + * layout after sz field + */ + sz = min(info_size, offsetofend(struct xdp_link_info, attach_mode)); + memcpy(info, &opts.prog_id, sz); + memset((void *)info + sz, 0, info_size - sz); return 0; } -int bpf_get_link_xdp_id(int ifindex, __u32 *prog_id, __u32 flags) +int bpf_xdp_query_id(int ifindex, int flags, __u32 *prog_id) { - struct xdp_link_info info; + LIBBPF_OPTS(bpf_xdp_query_opts, opts); int ret; - ret = bpf_get_link_xdp_info(ifindex, &info, sizeof(info), flags); - if (!ret) - *prog_id = get_xdp_id(&info, flags); + ret = bpf_xdp_query(ifindex, flags, &opts); + if (ret) + return libbpf_err(ret); + + flags &= XDP_FLAGS_MODES; - return libbpf_err(ret); + if (opts.attach_mode != XDP_ATTACHED_MULTI && !flags) + *prog_id = opts.prog_id; + else if (flags & XDP_FLAGS_DRV_MODE) + *prog_id = opts.drv_prog_id; + else if (flags & XDP_FLAGS_HW_MODE) + *prog_id = opts.hw_prog_id; + else if (flags & XDP_FLAGS_SKB_MODE) + *prog_id = opts.skb_prog_id; + else + *prog_id = 0; + + return 0; +} + + +int bpf_get_link_xdp_id(int ifindex, __u32 *prog_id, __u32 flags) +{ + return bpf_xdp_query_id(ifindex, flags, prog_id); } typedef int (*qdisc_config_t)(struct libbpf_nla_req *req); -- cgit v1.2.3 From 082c4bfba4f77d6c65b451d7ef23093a75cc50e7 Mon Sep 17 00:00:00 2001 From: Lorenzo Bianconi Date: Fri, 21 Jan 2022 11:10:01 +0100 Subject: libbpf: Add SEC name for xdp frags programs Introduce support for the following SEC entries for XDP frags property: - SEC("xdp.frags") - SEC("xdp.frags/devmap") - SEC("xdp.frags/cpumap") Acked-by: Toke Hoiland-Jorgensen Acked-by: John Fastabend Signed-off-by: Lorenzo Bianconi Link: https://lore.kernel.org/r/af23b6e4841c171ad1af01917839b77847a4bc27.1642758637.git.lorenzo@kernel.org Signed-off-by: Alexei Starovoitov --- tools/lib/bpf/libbpf.c | 8 ++++++++ 1 file changed, 8 insertions(+) (limited to 'tools/lib/bpf') diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c index fc6d530e20db..a8c750373ad5 100644 --- a/tools/lib/bpf/libbpf.c +++ b/tools/lib/bpf/libbpf.c @@ -235,6 +235,8 @@ enum sec_def_flags { SEC_SLEEPABLE = 8, /* allow non-strict prefix matching */ SEC_SLOPPY_PFX = 16, + /* BPF program support non-linear XDP buffer */ + SEC_XDP_FRAGS = 32, }; struct bpf_sec_def { @@ -6570,6 +6572,9 @@ static int libbpf_preload_prog(struct bpf_program *prog, if (def & SEC_SLEEPABLE) opts->prog_flags |= BPF_F_SLEEPABLE; + if (prog->type == BPF_PROG_TYPE_XDP && (def & SEC_XDP_FRAGS)) + opts->prog_flags |= BPF_F_XDP_HAS_FRAGS; + if ((prog->type == BPF_PROG_TYPE_TRACING || prog->type == BPF_PROG_TYPE_LSM || prog->type == BPF_PROG_TYPE_EXT) && !prog->attach_btf_id) { @@ -8608,8 +8613,11 @@ static const struct bpf_sec_def section_defs[] = { SEC_DEF("lsm.s/", LSM, BPF_LSM_MAC, SEC_ATTACH_BTF | SEC_SLEEPABLE, attach_lsm), SEC_DEF("iter/", TRACING, BPF_TRACE_ITER, SEC_ATTACH_BTF, attach_iter), SEC_DEF("syscall", SYSCALL, 0, SEC_SLEEPABLE), + SEC_DEF("xdp.frags/devmap", XDP, BPF_XDP_DEVMAP, SEC_XDP_FRAGS), SEC_DEF("xdp_devmap/", XDP, BPF_XDP_DEVMAP, SEC_ATTACHABLE), + SEC_DEF("xdp.frags/cpumap", XDP, BPF_XDP_CPUMAP, SEC_XDP_FRAGS), SEC_DEF("xdp_cpumap/", XDP, BPF_XDP_CPUMAP, SEC_ATTACHABLE), + SEC_DEF("xdp.frags", XDP, BPF_XDP, SEC_XDP_FRAGS), SEC_DEF("xdp", XDP, BPF_XDP, SEC_ATTACHABLE_OPT | SEC_SLOPPY_PFX), SEC_DEF("perf_event", PERF_EVENT, 0, SEC_NONE | SEC_SLOPPY_PFX), SEC_DEF("lwt_in", LWT_IN, 0, SEC_NONE | SEC_SLOPPY_PFX), -- cgit v1.2.3 From a8b77f7463a500584a69fed60bb4da57ac435138 Mon Sep 17 00:00:00 2001 From: Kenny Yu Date: Mon, 24 Jan 2022 10:54:02 -0800 Subject: libbpf: Add "iter.s" section for sleepable bpf iterator programs This adds a new bpf section "iter.s" to allow bpf iterator programs to be sleepable. Signed-off-by: Kenny Yu Acked-by: Andrii Nakryiko Link: https://lore.kernel.org/r/20220124185403.468466-4-kennyyu@fb.com Signed-off-by: Alexei Starovoitov --- tools/lib/bpf/libbpf.c | 1 + 1 file changed, 1 insertion(+) (limited to 'tools/lib/bpf') diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c index a8c750373ad5..57894f125df3 100644 --- a/tools/lib/bpf/libbpf.c +++ b/tools/lib/bpf/libbpf.c @@ -8612,6 +8612,7 @@ static const struct bpf_sec_def section_defs[] = { SEC_DEF("lsm/", LSM, BPF_LSM_MAC, SEC_ATTACH_BTF, attach_lsm), SEC_DEF("lsm.s/", LSM, BPF_LSM_MAC, SEC_ATTACH_BTF | SEC_SLEEPABLE, attach_lsm), SEC_DEF("iter/", TRACING, BPF_TRACE_ITER, SEC_ATTACH_BTF, attach_iter), + SEC_DEF("iter.s/", TRACING, BPF_TRACE_ITER, SEC_ATTACH_BTF | SEC_SLEEPABLE, attach_iter), SEC_DEF("syscall", SYSCALL, 0, SEC_SLEEPABLE), SEC_DEF("xdp.frags/devmap", XDP, BPF_XDP_DEVMAP, SEC_XDP_FRAGS), SEC_DEF("xdp_devmap/", XDP, BPF_XDP_DEVMAP, SEC_ATTACHABLE), -- cgit v1.2.3 From 9f45f70ab21eb2d3f4fcb41e4785abe380d406c5 Mon Sep 17 00:00:00 2001 From: Christy Lee Date: Mon, 24 Jan 2022 16:59:22 -0800 Subject: libbpf: Mark bpf_object__open_buffer() API deprecated Deprecate bpf_object__open_buffer() API in favor of the unified opts-based bpf_object__open_mem() API. Signed-off-by: Christy Lee Signed-off-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/20220125005923.418339-2-christylee@fb.com --- tools/lib/bpf/libbpf.h | 1 + 1 file changed, 1 insertion(+) (limited to 'tools/lib/bpf') diff --git a/tools/lib/bpf/libbpf.h b/tools/lib/bpf/libbpf.h index 94670066de62..281cb19591e5 100644 --- a/tools/lib/bpf/libbpf.h +++ b/tools/lib/bpf/libbpf.h @@ -180,6 +180,7 @@ bpf_object__open_mem(const void *obj_buf, size_t obj_buf_sz, const struct bpf_object_open_opts *opts); /* deprecated bpf_object__open variants */ +LIBBPF_DEPRECATED_SINCE(0, 8, "use bpf_object__open_mem() instead") LIBBPF_API struct bpf_object * bpf_object__open_buffer(const void *obj_buf, size_t obj_buf_sz, const char *name); -- cgit v1.2.3 From fc76387003d6907e298fd6b87f13847c4edddab1 Mon Sep 17 00:00:00 2001 From: Christy Lee Date: Mon, 24 Jan 2022 17:09:17 -0800 Subject: libbpf: Mark bpf_object__open_xattr() deprecated Mark bpf_object__open_xattr() as deprecated, use bpf_object__open_file() instead. [0] Closes: https://github.com/libbpf/libbpf/issues/287 Signed-off-by: Christy Lee Signed-off-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/20220125010917.679975-1-christylee@fb.com --- tools/lib/bpf/libbpf.c | 2 +- tools/lib/bpf/libbpf.h | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) (limited to 'tools/lib/bpf') diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c index 57894f125df3..9e248ab28fdc 100644 --- a/tools/lib/bpf/libbpf.c +++ b/tools/lib/bpf/libbpf.c @@ -9460,7 +9460,7 @@ static int bpf_prog_load_xattr2(const struct bpf_prog_load_attr *attr, open_attr.file = attr->file; open_attr.prog_type = attr->prog_type; - obj = bpf_object__open_xattr(&open_attr); + obj = __bpf_object__open_xattr(&open_attr, 0); err = libbpf_get_error(obj); if (err) return libbpf_err(-ENOENT); diff --git a/tools/lib/bpf/libbpf.h b/tools/lib/bpf/libbpf.h index 281cb19591e5..1bd021560f9c 100644 --- a/tools/lib/bpf/libbpf.h +++ b/tools/lib/bpf/libbpf.h @@ -184,6 +184,7 @@ LIBBPF_DEPRECATED_SINCE(0, 8, "use bpf_object__open_mem() instead") LIBBPF_API struct bpf_object * bpf_object__open_buffer(const void *obj_buf, size_t obj_buf_sz, const char *name); +LIBBPF_DEPRECATED_SINCE(0, 7, "use bpf_object__open_file() instead") LIBBPF_API struct bpf_object * bpf_object__open_xattr(struct bpf_object_open_attr *attr); -- cgit v1.2.3 From d084df3b7a4c49fb2abec55f8d512c51d643c408 Mon Sep 17 00:00:00 2001 From: Kenta Tada Date: Mon, 24 Jan 2022 23:16:21 +0900 Subject: libbpf: Fix the incorrect register read for syscalls on x86_64 Currently, rcx is read as the fourth parameter of syscall on x86_64. But x86_64 Linux System Call convention uses r10 actually. This commit adds the wrapper for users who want to access to syscall params to analyze the user space. Signed-off-by: Kenta Tada Signed-off-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/20220124141622.4378-3-Kenta.Tada@sony.com --- tools/lib/bpf/bpf_tracing.h | 34 ++++++++++++++++++++++++++++++++++ 1 file changed, 34 insertions(+) (limited to 'tools/lib/bpf') diff --git a/tools/lib/bpf/bpf_tracing.h b/tools/lib/bpf/bpf_tracing.h index 90f56b0f585f..032ba809f3e5 100644 --- a/tools/lib/bpf/bpf_tracing.h +++ b/tools/lib/bpf/bpf_tracing.h @@ -70,6 +70,7 @@ #define __PT_PARM2_REG si #define __PT_PARM3_REG dx #define __PT_PARM4_REG cx +#define __PT_PARM4_REG_SYSCALL r10 /* syscall uses r10 */ #define __PT_PARM5_REG r8 #define __PT_RET_REG sp #define __PT_FP_REG bp @@ -99,6 +100,7 @@ #define __PT_PARM2_REG rsi #define __PT_PARM3_REG rdx #define __PT_PARM4_REG rcx +#define __PT_PARM4_REG_SYSCALL r10 /* syscall uses r10 */ #define __PT_PARM5_REG r8 #define __PT_RET_REG rsp #define __PT_FP_REG rbp @@ -263,6 +265,26 @@ struct pt_regs; #endif +#define PT_REGS_PARM1_SYSCALL(x) PT_REGS_PARM1(x) +#define PT_REGS_PARM2_SYSCALL(x) PT_REGS_PARM2(x) +#define PT_REGS_PARM3_SYSCALL(x) PT_REGS_PARM3(x) +#ifdef __PT_PARM4_REG_SYSCALL +#define PT_REGS_PARM4_SYSCALL(x) (__PT_REGS_CAST(x)->__PT_PARM4_REG_SYSCALL) +#else /* __PT_PARM4_REG_SYSCALL */ +#define PT_REGS_PARM4_SYSCALL(x) PT_REGS_PARM4(x) +#endif +#define PT_REGS_PARM5_SYSCALL(x) PT_REGS_PARM5(x) + +#define PT_REGS_PARM1_CORE_SYSCALL(x) PT_REGS_PARM1_CORE(x) +#define PT_REGS_PARM2_CORE_SYSCALL(x) PT_REGS_PARM2_CORE(x) +#define PT_REGS_PARM3_CORE_SYSCALL(x) PT_REGS_PARM3_CORE(x) +#ifdef __PT_PARM4_REG_SYSCALL +#define PT_REGS_PARM4_CORE_SYSCALL(x) BPF_CORE_READ(__PT_REGS_CAST(x), __PT_PARM4_REG_SYSCALL) +#else /* __PT_PARM4_REG_SYSCALL */ +#define PT_REGS_PARM4_CORE_SYSCALL(x) PT_REGS_PARM4_CORE(x) +#endif +#define PT_REGS_PARM5_CORE_SYSCALL(x) PT_REGS_PARM5_CORE(x) + #else /* defined(bpf_target_defined) */ #define PT_REGS_PARM1(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; }) @@ -290,6 +312,18 @@ struct pt_regs; #define BPF_KPROBE_READ_RET_IP(ip, ctx) ({ _Pragma(__BPF_TARGET_MISSING); 0l; }) #define BPF_KRETPROBE_READ_RET_IP(ip, ctx) ({ _Pragma(__BPF_TARGET_MISSING); 0l; }) +#define PT_REGS_PARM1_SYSCALL(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; }) +#define PT_REGS_PARM2_SYSCALL(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; }) +#define PT_REGS_PARM3_SYSCALL(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; }) +#define PT_REGS_PARM4_SYSCALL(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; }) +#define PT_REGS_PARM5_SYSCALL(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; }) + +#define PT_REGS_PARM1_CORE_SYSCALL(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; }) +#define PT_REGS_PARM2_CORE_SYSCALL(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; }) +#define PT_REGS_PARM3_CORE_SYSCALL(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; }) +#define PT_REGS_PARM4_CORE_SYSCALL(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; }) +#define PT_REGS_PARM5_CORE_SYSCALL(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; }) + #endif /* defined(bpf_target_defined) */ #ifndef ___bpf_concat -- cgit v1.2.3 From 20eccf29e2979a18411517061998bac7d12c8543 Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Mon, 24 Jan 2022 11:42:48 -0800 Subject: libbpf: hide and discourage inconsistently named getters Move a bunch of "getters" into libbpf_legacy.h to keep them there in libbpf 1.0. See [0] for discussion of "Discouraged APIs". These getters don't add any maintenance burden and are simple alias, but they are inconsistent in naming. So keep them in libbpf_legacy.h instead of libbpf.h to "hide" them in favor of preferred getters ([1]). Also add two missing getters: bpf_program__type() and bpf_program__expected_attach_type(). [0] https://github.com/libbpf/libbpf/wiki/Libbpf:-the-road-to-v1.0#handling-deprecation-of-apis-and-functionality [1] Closes: https://github.com/libbpf/libbpf/issues/307 Signed-off-by: Andrii Nakryiko Link: https://lore.kernel.org/r/20220124194254.2051434-2-andrii@kernel.org Signed-off-by: Alexei Starovoitov --- tools/lib/bpf/btf.h | 5 +---- tools/lib/bpf/libbpf.c | 21 ++++++++++++--------- tools/lib/bpf/libbpf.h | 5 ++--- tools/lib/bpf/libbpf.map | 2 ++ tools/lib/bpf/libbpf_internal.h | 3 +++ tools/lib/bpf/libbpf_legacy.h | 17 +++++++++++++++++ 6 files changed, 37 insertions(+), 16 deletions(-) (limited to 'tools/lib/bpf') diff --git a/tools/lib/bpf/btf.h b/tools/lib/bpf/btf.h index 51862fdee850..96b44d55db6e 100644 --- a/tools/lib/bpf/btf.h +++ b/tools/lib/bpf/btf.h @@ -147,8 +147,6 @@ LIBBPF_API int btf__resolve_type(const struct btf *btf, __u32 type_id); LIBBPF_API int btf__align_of(const struct btf *btf, __u32 id); LIBBPF_API int btf__fd(const struct btf *btf); LIBBPF_API void btf__set_fd(struct btf *btf, int fd); -LIBBPF_DEPRECATED_SINCE(0, 7, "use btf__raw_data() instead") -LIBBPF_API const void *btf__get_raw_data(const struct btf *btf, __u32 *size); LIBBPF_API const void *btf__raw_data(const struct btf *btf, __u32 *size); LIBBPF_API const char *btf__name_by_offset(const struct btf *btf, __u32 offset); LIBBPF_API const char *btf__str_by_offset(const struct btf *btf, __u32 offset); @@ -159,8 +157,7 @@ LIBBPF_API int btf__get_map_kv_tids(const struct btf *btf, const char *map_name, LIBBPF_API struct btf_ext *btf_ext__new(const __u8 *data, __u32 size); LIBBPF_API void btf_ext__free(struct btf_ext *btf_ext); -LIBBPF_API const void *btf_ext__get_raw_data(const struct btf_ext *btf_ext, - __u32 *size); +LIBBPF_API const void *btf_ext__raw_data(const struct btf_ext *btf_ext, __u32 *size); LIBBPF_API LIBBPF_DEPRECATED("btf_ext__reloc_func_info was never meant as a public API and has wrong assumptions embedded in it; it will be removed in the future libbpf versions") int btf_ext__reloc_func_info(const struct btf *btf, const struct btf_ext *btf_ext, diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c index 9e248ab28fdc..4ce94f4ed34a 100644 --- a/tools/lib/bpf/libbpf.c +++ b/tools/lib/bpf/libbpf.c @@ -7896,10 +7896,8 @@ int bpf_map__set_pin_path(struct bpf_map *map, const char *path) return 0; } -const char *bpf_map__get_pin_path(const struct bpf_map *map) -{ - return map->pin_path; -} +__alias(bpf_map__pin_path) +const char *bpf_map__get_pin_path(const struct bpf_map *map); const char *bpf_map__pin_path(const struct bpf_map *map) { @@ -8464,7 +8462,10 @@ static int bpf_program_nth_fd(const struct bpf_program *prog, int n) return fd; } -enum bpf_prog_type bpf_program__get_type(const struct bpf_program *prog) +__alias(bpf_program__type) +enum bpf_prog_type bpf_program__get_type(const struct bpf_program *prog); + +enum bpf_prog_type bpf_program__type(const struct bpf_program *prog) { return prog->type; } @@ -8508,8 +8509,10 @@ BPF_PROG_TYPE_FNS(struct_ops, BPF_PROG_TYPE_STRUCT_OPS); BPF_PROG_TYPE_FNS(extension, BPF_PROG_TYPE_EXT); BPF_PROG_TYPE_FNS(sk_lookup, BPF_PROG_TYPE_SK_LOOKUP); -enum bpf_attach_type -bpf_program__get_expected_attach_type(const struct bpf_program *prog) +__alias(bpf_program__expected_attach_type) +enum bpf_attach_type bpf_program__get_expected_attach_type(const struct bpf_program *prog); + +enum bpf_attach_type bpf_program__expected_attach_type(const struct bpf_program *prog) { return prog->expected_attach_type; } @@ -9477,7 +9480,7 @@ static int bpf_prog_load_xattr2(const struct bpf_prog_load_attr *attr, bpf_program__set_expected_attach_type(prog, attach_type); } - if (bpf_program__get_type(prog) == BPF_PROG_TYPE_UNSPEC) { + if (bpf_program__type(prog) == BPF_PROG_TYPE_UNSPEC) { /* * we haven't guessed from section name and user * didn't provide a fallback type, too bad... @@ -10528,7 +10531,7 @@ bpf_program__attach_fd(const struct bpf_program *prog, int target_fd, int btf_id return libbpf_err_ptr(-ENOMEM); link->detach = &bpf_link__detach_fd; - attach_type = bpf_program__get_expected_attach_type(prog); + attach_type = bpf_program__expected_attach_type(prog); link_fd = bpf_link_create(prog_fd, target_fd, attach_type, &opts); if (link_fd < 0) { link_fd = -errno; diff --git a/tools/lib/bpf/libbpf.h b/tools/lib/bpf/libbpf.h index 1bd021560f9c..56bddb1553d3 100644 --- a/tools/lib/bpf/libbpf.h +++ b/tools/lib/bpf/libbpf.h @@ -607,12 +607,12 @@ LIBBPF_API int bpf_program__set_struct_ops(struct bpf_program *prog); LIBBPF_API int bpf_program__set_extension(struct bpf_program *prog); LIBBPF_API int bpf_program__set_sk_lookup(struct bpf_program *prog); -LIBBPF_API enum bpf_prog_type bpf_program__get_type(const struct bpf_program *prog); +LIBBPF_API enum bpf_prog_type bpf_program__type(const struct bpf_program *prog); LIBBPF_API void bpf_program__set_type(struct bpf_program *prog, enum bpf_prog_type type); LIBBPF_API enum bpf_attach_type -bpf_program__get_expected_attach_type(const struct bpf_program *prog); +bpf_program__expected_attach_type(const struct bpf_program *prog); LIBBPF_API void bpf_program__set_expected_attach_type(struct bpf_program *prog, enum bpf_attach_type type); @@ -760,7 +760,6 @@ LIBBPF_API bool bpf_map__is_offload_neutral(const struct bpf_map *map); */ LIBBPF_API bool bpf_map__is_internal(const struct bpf_map *map); LIBBPF_API int bpf_map__set_pin_path(struct bpf_map *map, const char *path); -LIBBPF_API const char *bpf_map__get_pin_path(const struct bpf_map *map); LIBBPF_API const char *bpf_map__pin_path(const struct bpf_map *map); LIBBPF_API bool bpf_map__is_pinned(const struct bpf_map *map); LIBBPF_API int bpf_map__pin(struct bpf_map *map, const char *path); diff --git a/tools/lib/bpf/libbpf.map b/tools/lib/bpf/libbpf.map index e10f0822845a..aef6253a90c8 100644 --- a/tools/lib/bpf/libbpf.map +++ b/tools/lib/bpf/libbpf.map @@ -424,10 +424,12 @@ LIBBPF_0.6.0 { LIBBPF_0.7.0 { global: bpf_btf_load; + bpf_program__expected_attach_type; bpf_program__log_buf; bpf_program__log_level; bpf_program__set_log_buf; bpf_program__set_log_level; + bpf_program__type; bpf_xdp_attach; bpf_xdp_detach; bpf_xdp_query; diff --git a/tools/lib/bpf/libbpf_internal.h b/tools/lib/bpf/libbpf_internal.h index 1565679eb432..bc86b82e90d1 100644 --- a/tools/lib/bpf/libbpf_internal.h +++ b/tools/lib/bpf/libbpf_internal.h @@ -92,6 +92,9 @@ # define offsetofend(TYPE, FIELD) \ (offsetof(TYPE, FIELD) + sizeof(((TYPE *)0)->FIELD)) #endif +#ifndef __alias +#define __alias(symbol) __attribute__((alias(#symbol))) +#endif /* Check whether a string `str` has prefix `pfx`, regardless if `pfx` is * a string literal known at compilation time or char * pointer known only at diff --git a/tools/lib/bpf/libbpf_legacy.h b/tools/lib/bpf/libbpf_legacy.h index 3c2b281c2bc3..a283cf031665 100644 --- a/tools/lib/bpf/libbpf_legacy.h +++ b/tools/lib/bpf/libbpf_legacy.h @@ -86,6 +86,23 @@ LIBBPF_API int libbpf_set_strict_mode(enum libbpf_strict_mode mode); #define DECLARE_LIBBPF_OPTS LIBBPF_OPTS +/* "Discouraged" APIs which don't follow consistent libbpf naming patterns. + * They are normally a trivial aliases or wrappers for proper APIs and are + * left to minimize unnecessary disruption for users of libbpf. But they + * shouldn't be used going forward. + */ + +struct bpf_program; +struct bpf_map; +struct btf; +struct btf_ext; + +LIBBPF_API enum bpf_prog_type bpf_program__get_type(const struct bpf_program *prog); +LIBBPF_API enum bpf_attach_type bpf_program__get_expected_attach_type(const struct bpf_program *prog); +LIBBPF_API const char *bpf_map__get_pin_path(const struct bpf_map *map); +LIBBPF_API const void *btf__get_raw_data(const struct btf *btf, __u32 *size); +LIBBPF_API const void *btf_ext__get_raw_data(const struct btf_ext *btf_ext, __u32 *size); + #ifdef __cplusplus } /* extern "C" */ #endif -- cgit v1.2.3 From c5023b8f2693035d7ce2cf8153298002182049ff Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Mon, 24 Jan 2022 11:42:49 -0800 Subject: libbpf: deprecate bpf_map__resize() Deprecated bpf_map__resize() in favor of bpf_map__set_max_entries() setter. In addition to having a surprising name (users often don't realize that they need to use bpf_map__resize()), the name also implies some magic way of resizing BPF map after it is created, which is clearly not the case. Another minor annoyance is that bpf_map__resize() disallows 0 value for max_entries, which in some cases is totally acceptable (e.g., like for BPF perf buf case to let libbpf auto-create one buffer per each available CPU core). [0] Closes: https://github.com/libbpf/libbpf/issues/304 Signed-off-by: Andrii Nakryiko Link: https://lore.kernel.org/r/20220124194254.2051434-3-andrii@kernel.org Signed-off-by: Alexei Starovoitov --- tools/lib/bpf/libbpf.h | 1 + 1 file changed, 1 insertion(+) (limited to 'tools/lib/bpf') diff --git a/tools/lib/bpf/libbpf.h b/tools/lib/bpf/libbpf.h index 56bddb1553d3..5aaf17f3608e 100644 --- a/tools/lib/bpf/libbpf.h +++ b/tools/lib/bpf/libbpf.h @@ -718,6 +718,7 @@ LIBBPF_API int bpf_map__set_type(struct bpf_map *map, enum bpf_map_type type); /* get/set map size (max_entries) */ LIBBPF_API __u32 bpf_map__max_entries(const struct bpf_map *map); LIBBPF_API int bpf_map__set_max_entries(struct bpf_map *map, __u32 max_entries); +LIBBPF_DEPRECATED_SINCE(0, 8, "use bpf_map__set_max_entries() instead") LIBBPF_API int bpf_map__resize(struct bpf_map *map, __u32 max_entries); /* get/set map flags */ LIBBPF_API __u32 bpf_map__map_flags(const struct bpf_map *map); -- cgit v1.2.3 From 5d98fce86e12a4d4c23f82ffbd320f853f1f3f1f Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Mon, 24 Jan 2022 11:42:50 -0800 Subject: libbpf: deprecate bpf_program__is_() and bpf_program__set_() APIs Not sure why these APIs were added in the first place instead of a completely generic (and not requiring constantly adding new APIs with each new BPF program type) bpf_program__type() and bpf_program__set_type() APIs. But as it is right now, there are 13 such specialized is_type/set_type APIs, while latest kernel is already at 30+ BPF program types. Instead of completing the set of APIs and keep chasing kernel's bpf_prog_type enum, deprecate existing subset and recommend generic bpf_program__type() and bpf_program__set_type() APIs. Signed-off-by: Andrii Nakryiko Link: https://lore.kernel.org/r/20220124194254.2051434-4-andrii@kernel.org Signed-off-by: Alexei Starovoitov --- tools/lib/bpf/libbpf.h | 26 ++++++++++++++++++++++++++ 1 file changed, 26 insertions(+) (limited to 'tools/lib/bpf') diff --git a/tools/lib/bpf/libbpf.h b/tools/lib/bpf/libbpf.h index 5aaf17f3608e..5762b57aecfc 100644 --- a/tools/lib/bpf/libbpf.h +++ b/tools/lib/bpf/libbpf.h @@ -593,18 +593,31 @@ LIBBPF_API int bpf_program__nth_fd(const struct bpf_program *prog, int n); /* * Adjust type of BPF program. Default is kprobe. */ +LIBBPF_DEPRECATED_SINCE(0, 8, "use bpf_program__set_type() instead") LIBBPF_API int bpf_program__set_socket_filter(struct bpf_program *prog); +LIBBPF_DEPRECATED_SINCE(0, 8, "use bpf_program__set_type() instead") LIBBPF_API int bpf_program__set_tracepoint(struct bpf_program *prog); +LIBBPF_DEPRECATED_SINCE(0, 8, "use bpf_program__set_type() instead") LIBBPF_API int bpf_program__set_raw_tracepoint(struct bpf_program *prog); +LIBBPF_DEPRECATED_SINCE(0, 8, "use bpf_program__set_type() instead") LIBBPF_API int bpf_program__set_kprobe(struct bpf_program *prog); +LIBBPF_DEPRECATED_SINCE(0, 8, "use bpf_program__set_type() instead") LIBBPF_API int bpf_program__set_lsm(struct bpf_program *prog); +LIBBPF_DEPRECATED_SINCE(0, 8, "use bpf_program__set_type() instead") LIBBPF_API int bpf_program__set_sched_cls(struct bpf_program *prog); +LIBBPF_DEPRECATED_SINCE(0, 8, "use bpf_program__set_type() instead") LIBBPF_API int bpf_program__set_sched_act(struct bpf_program *prog); +LIBBPF_DEPRECATED_SINCE(0, 8, "use bpf_program__set_type() instead") LIBBPF_API int bpf_program__set_xdp(struct bpf_program *prog); +LIBBPF_DEPRECATED_SINCE(0, 8, "use bpf_program__set_type() instead") LIBBPF_API int bpf_program__set_perf_event(struct bpf_program *prog); +LIBBPF_DEPRECATED_SINCE(0, 8, "use bpf_program__set_type() instead") LIBBPF_API int bpf_program__set_tracing(struct bpf_program *prog); +LIBBPF_DEPRECATED_SINCE(0, 8, "use bpf_program__set_type() instead") LIBBPF_API int bpf_program__set_struct_ops(struct bpf_program *prog); +LIBBPF_DEPRECATED_SINCE(0, 8, "use bpf_program__set_type() instead") LIBBPF_API int bpf_program__set_extension(struct bpf_program *prog); +LIBBPF_DEPRECATED_SINCE(0, 8, "use bpf_program__set_type() instead") LIBBPF_API int bpf_program__set_sk_lookup(struct bpf_program *prog); LIBBPF_API enum bpf_prog_type bpf_program__type(const struct bpf_program *prog); @@ -633,18 +646,31 @@ LIBBPF_API int bpf_program__set_attach_target(struct bpf_program *prog, int attach_prog_fd, const char *attach_func_name); +LIBBPF_DEPRECATED_SINCE(0, 8, "use bpf_program__type() instead") LIBBPF_API bool bpf_program__is_socket_filter(const struct bpf_program *prog); +LIBBPF_DEPRECATED_SINCE(0, 8, "use bpf_program__type() instead") LIBBPF_API bool bpf_program__is_tracepoint(const struct bpf_program *prog); +LIBBPF_DEPRECATED_SINCE(0, 8, "use bpf_program__type() instead") LIBBPF_API bool bpf_program__is_raw_tracepoint(const struct bpf_program *prog); +LIBBPF_DEPRECATED_SINCE(0, 8, "use bpf_program__type() instead") LIBBPF_API bool bpf_program__is_kprobe(const struct bpf_program *prog); +LIBBPF_DEPRECATED_SINCE(0, 8, "use bpf_program__type() instead") LIBBPF_API bool bpf_program__is_lsm(const struct bpf_program *prog); +LIBBPF_DEPRECATED_SINCE(0, 8, "use bpf_program__type() instead") LIBBPF_API bool bpf_program__is_sched_cls(const struct bpf_program *prog); +LIBBPF_DEPRECATED_SINCE(0, 8, "use bpf_program__type() instead") LIBBPF_API bool bpf_program__is_sched_act(const struct bpf_program *prog); +LIBBPF_DEPRECATED_SINCE(0, 8, "use bpf_program__type() instead") LIBBPF_API bool bpf_program__is_xdp(const struct bpf_program *prog); +LIBBPF_DEPRECATED_SINCE(0, 8, "use bpf_program__type() instead") LIBBPF_API bool bpf_program__is_perf_event(const struct bpf_program *prog); +LIBBPF_DEPRECATED_SINCE(0, 8, "use bpf_program__type() instead") LIBBPF_API bool bpf_program__is_tracing(const struct bpf_program *prog); +LIBBPF_DEPRECATED_SINCE(0, 8, "use bpf_program__type() instead") LIBBPF_API bool bpf_program__is_struct_ops(const struct bpf_program *prog); +LIBBPF_DEPRECATED_SINCE(0, 8, "use bpf_program__type() instead") LIBBPF_API bool bpf_program__is_extension(const struct bpf_program *prog); +LIBBPF_DEPRECATED_SINCE(0, 8, "use bpf_program__type() instead") LIBBPF_API bool bpf_program__is_sk_lookup(const struct bpf_program *prog); /* -- cgit v1.2.3 From 5ee32ea24ce7075736e03b4b0b5954a8a323cd73 Mon Sep 17 00:00:00 2001 From: Dave Marchevsky Date: Mon, 31 Jan 2022 17:46:10 -0800 Subject: libbpf: Deprecate btf_ext rec_size APIs btf_ext__{func,line}_info_rec_size functions are used in conjunction with already-deprecated btf_ext__reloc_{func,line}_info functions. Since struct btf_ext is opaque to the user it was necessary to expose rec_size getters in the past. btf_ext__reloc_{func,line}_info were deprecated in commit 8505e8709b5ee ("libbpf: Implement generalized .BTF.ext func/line info adjustment") as they're not compatible with support for multiple programs per section. It was decided[0] that users of these APIs should implement their own .btf.ext parsing to access this data, in which case the rec_size getters are unnecessary. So deprecate them from libbpf 0.7.0 onwards. [0] Closes: https://github.com/libbpf/libbpf/issues/277 Signed-off-by: Dave Marchevsky Signed-off-by: Daniel Borkmann Link: https://lore.kernel.org/bpf/20220201014610.3522985-1-davemarchevsky@fb.com --- tools/lib/bpf/btf.h | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) (limited to 'tools/lib/bpf') diff --git a/tools/lib/bpf/btf.h b/tools/lib/bpf/btf.h index 96b44d55db6e..b10729fd830c 100644 --- a/tools/lib/bpf/btf.h +++ b/tools/lib/bpf/btf.h @@ -168,8 +168,10 @@ int btf_ext__reloc_line_info(const struct btf *btf, const struct btf_ext *btf_ext, const char *sec_name, __u32 insns_cnt, void **line_info, __u32 *cnt); -LIBBPF_API __u32 btf_ext__func_info_rec_size(const struct btf_ext *btf_ext); -LIBBPF_API __u32 btf_ext__line_info_rec_size(const struct btf_ext *btf_ext); +LIBBPF_API LIBBPF_DEPRECATED("btf_ext__reloc_func_info is deprecated; write custom func_info parsing to fetch rec_size") +__u32 btf_ext__func_info_rec_size(const struct btf_ext *btf_ext); +LIBBPF_API LIBBPF_DEPRECATED("btf_ext__reloc_line_info is deprecated; write custom line_info parsing to fetch rec_size") +__u32 btf_ext__line_info_rec_size(const struct btf_ext *btf_ext); LIBBPF_API int btf__find_str(struct btf *btf, const char *s); LIBBPF_API int btf__add_str(struct btf *btf, const char *s); -- cgit v1.2.3 From 4a4d4cee48e284a2993d417c2f19439c4e1489be Mon Sep 17 00:00:00 2001 From: Lorenzo Bianconi Date: Tue, 1 Feb 2022 15:58:08 +0100 Subject: libbpf: Deprecate xdp_cpumap, xdp_devmap and classifier sec definitions Deprecate xdp_cpumap, xdp_devmap and classifier sec definitions. Introduce xdp/devmap and xdp/cpumap definitions according to the standard for SEC("") in libbpf: - prog_type.prog_flags/attach_place Signed-off-by: Lorenzo Bianconi Signed-off-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/5c7bd9426b3ce6a31d9a4b1f97eb299e1467fc52.1643727185.git.lorenzo@kernel.org --- tools/lib/bpf/libbpf.c | 14 +++++++++++--- 1 file changed, 11 insertions(+), 3 deletions(-) (limited to 'tools/lib/bpf') diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c index 4ce94f4ed34a..1b0936b016d9 100644 --- a/tools/lib/bpf/libbpf.c +++ b/tools/lib/bpf/libbpf.c @@ -237,6 +237,8 @@ enum sec_def_flags { SEC_SLOPPY_PFX = 16, /* BPF program support non-linear XDP buffer */ SEC_XDP_FRAGS = 32, + /* deprecated sec definitions not supposed to be used */ + SEC_DEPRECATED = 64, }; struct bpf_sec_def { @@ -6575,6 +6577,10 @@ static int libbpf_preload_prog(struct bpf_program *prog, if (prog->type == BPF_PROG_TYPE_XDP && (def & SEC_XDP_FRAGS)) opts->prog_flags |= BPF_F_XDP_HAS_FRAGS; + if (def & SEC_DEPRECATED) + pr_warn("SEC(\"%s\") is deprecated, please see https://github.com/libbpf/libbpf/wiki/Libbpf-1.0-migration-guide#bpf-program-sec-annotation-deprecations for details\n", + prog->sec_name); + if ((prog->type == BPF_PROG_TYPE_TRACING || prog->type == BPF_PROG_TYPE_LSM || prog->type == BPF_PROG_TYPE_EXT) && !prog->attach_btf_id) { @@ -8596,7 +8602,7 @@ static const struct bpf_sec_def section_defs[] = { SEC_DEF("kretprobe/", KPROBE, 0, SEC_NONE, attach_kprobe), SEC_DEF("uretprobe/", KPROBE, 0, SEC_NONE), SEC_DEF("tc", SCHED_CLS, 0, SEC_NONE), - SEC_DEF("classifier", SCHED_CLS, 0, SEC_NONE | SEC_SLOPPY_PFX), + SEC_DEF("classifier", SCHED_CLS, 0, SEC_NONE | SEC_SLOPPY_PFX | SEC_DEPRECATED), SEC_DEF("action", SCHED_ACT, 0, SEC_NONE | SEC_SLOPPY_PFX), SEC_DEF("tracepoint/", TRACEPOINT, 0, SEC_NONE, attach_tp), SEC_DEF("tp/", TRACEPOINT, 0, SEC_NONE, attach_tp), @@ -8618,9 +8624,11 @@ static const struct bpf_sec_def section_defs[] = { SEC_DEF("iter.s/", TRACING, BPF_TRACE_ITER, SEC_ATTACH_BTF | SEC_SLEEPABLE, attach_iter), SEC_DEF("syscall", SYSCALL, 0, SEC_SLEEPABLE), SEC_DEF("xdp.frags/devmap", XDP, BPF_XDP_DEVMAP, SEC_XDP_FRAGS), - SEC_DEF("xdp_devmap/", XDP, BPF_XDP_DEVMAP, SEC_ATTACHABLE), + SEC_DEF("xdp/devmap", XDP, BPF_XDP_DEVMAP, SEC_ATTACHABLE), + SEC_DEF("xdp_devmap/", XDP, BPF_XDP_DEVMAP, SEC_ATTACHABLE | SEC_DEPRECATED), SEC_DEF("xdp.frags/cpumap", XDP, BPF_XDP_CPUMAP, SEC_XDP_FRAGS), - SEC_DEF("xdp_cpumap/", XDP, BPF_XDP_CPUMAP, SEC_ATTACHABLE), + SEC_DEF("xdp/cpumap", XDP, BPF_XDP_CPUMAP, SEC_ATTACHABLE), + SEC_DEF("xdp_cpumap/", XDP, BPF_XDP_CPUMAP, SEC_ATTACHABLE | SEC_DEPRECATED), SEC_DEF("xdp.frags", XDP, BPF_XDP, SEC_XDP_FRAGS), SEC_DEF("xdp", XDP, BPF_XDP, SEC_ATTACHABLE_OPT | SEC_SLOPPY_PFX), SEC_DEF("perf_event", PERF_EVENT, 0, SEC_NONE | SEC_SLOPPY_PFX), -- cgit v1.2.3 From e981f41fd029d37b3e1c8aad2d72d3fe57a104d6 Mon Sep 17 00:00:00 2001 From: Alexei Starovoitov Date: Mon, 31 Jan 2022 14:05:23 -0800 Subject: libbpf: Open code low level bpf commands. Open code low level bpf commands used by light skeleton to be able to avoid full libbpf eventually. Signed-off-by: Alexei Starovoitov Signed-off-by: Daniel Borkmann Acked-by: Martin KaFai Lau Link: https://lore.kernel.org/bpf/20220131220528.98088-3-alexei.starovoitov@gmail.com --- tools/lib/bpf/skel_internal.h | 44 +++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 42 insertions(+), 2 deletions(-) (limited to 'tools/lib/bpf') diff --git a/tools/lib/bpf/skel_internal.h b/tools/lib/bpf/skel_internal.h index 0b84d8e6b72a..57507f1c1934 100644 --- a/tools/lib/bpf/skel_internal.h +++ b/tools/lib/bpf/skel_internal.h @@ -70,19 +70,59 @@ static inline int skel_closenz(int fd) return -EINVAL; } +#ifndef offsetofend +#define offsetofend(TYPE, MEMBER) \ + (offsetof(TYPE, MEMBER) + sizeof((((TYPE *)0)->MEMBER))) +#endif + +static inline int skel_map_create(enum bpf_map_type map_type, + const char *map_name, + __u32 key_size, + __u32 value_size, + __u32 max_entries) +{ + const size_t attr_sz = offsetofend(union bpf_attr, map_extra); + union bpf_attr attr; + + memset(&attr, 0, attr_sz); + + attr.map_type = map_type; + strncpy(attr.map_name, map_name, sizeof(attr.map_name)); + attr.key_size = key_size; + attr.value_size = value_size; + attr.max_entries = max_entries; + + return skel_sys_bpf(BPF_MAP_CREATE, &attr, attr_sz); +} + +static inline int skel_map_update_elem(int fd, const void *key, + const void *value, __u64 flags) +{ + const size_t attr_sz = offsetofend(union bpf_attr, flags); + union bpf_attr attr; + + memset(&attr, 0, attr_sz); + attr.map_fd = fd; + attr.key = (long) key; + attr.value = (long) value; + attr.flags = flags; + + return skel_sys_bpf(BPF_MAP_UPDATE_ELEM, &attr, attr_sz); +} + static inline int bpf_load_and_run(struct bpf_load_and_run_opts *opts) { int map_fd = -1, prog_fd = -1, key = 0, err; union bpf_attr attr; - map_fd = bpf_map_create(BPF_MAP_TYPE_ARRAY, "__loader.map", 4, opts->data_sz, 1, NULL); + map_fd = skel_map_create(BPF_MAP_TYPE_ARRAY, "__loader.map", 4, opts->data_sz, 1); if (map_fd < 0) { opts->errstr = "failed to create loader map"; err = -errno; goto out; } - err = bpf_map_update_elem(map_fd, &key, opts->data, 0); + err = skel_map_update_elem(map_fd, &key, opts->data, 0); if (err < 0) { opts->errstr = "failed to update loader map"; err = -errno; -- cgit v1.2.3 From c69f94a33d12a9c49f1800c54838ee19447ac176 Mon Sep 17 00:00:00 2001 From: Alexei Starovoitov Date: Mon, 31 Jan 2022 14:05:24 -0800 Subject: libbpf: Open code raw_tp_open and link_create commands. Open code raw_tracepoint_open and link_create used by light skeleton to be able to avoid full libbpf eventually. Signed-off-by: Alexei Starovoitov Signed-off-by: Daniel Borkmann Acked-by: Martin KaFai Lau Link: https://lore.kernel.org/bpf/20220131220528.98088-4-alexei.starovoitov@gmail.com --- tools/bpf/bpftool/gen.c | 6 +++--- tools/lib/bpf/skel_internal.h | 26 ++++++++++++++++++++++++++ 2 files changed, 29 insertions(+), 3 deletions(-) (limited to 'tools/lib/bpf') diff --git a/tools/bpf/bpftool/gen.c b/tools/bpf/bpftool/gen.c index 8eacfc79ec43..eacfc6a2060d 100644 --- a/tools/bpf/bpftool/gen.c +++ b/tools/bpf/bpftool/gen.c @@ -381,13 +381,13 @@ static void codegen_attach_detach(struct bpf_object *obj, const char *obj_name) switch (bpf_program__type(prog)) { case BPF_PROG_TYPE_RAW_TRACEPOINT: tp_name = strchr(bpf_program__section_name(prog), '/') + 1; - printf("\tint fd = bpf_raw_tracepoint_open(\"%s\", prog_fd);\n", tp_name); + printf("\tint fd = skel_raw_tracepoint_open(\"%s\", prog_fd);\n", tp_name); break; case BPF_PROG_TYPE_TRACING: if (bpf_program__expected_attach_type(prog) == BPF_TRACE_ITER) - printf("\tint fd = bpf_link_create(prog_fd, 0, BPF_TRACE_ITER, NULL);\n"); + printf("\tint fd = skel_link_create(prog_fd, 0, BPF_TRACE_ITER);\n"); else - printf("\tint fd = bpf_raw_tracepoint_open(NULL, prog_fd);\n"); + printf("\tint fd = skel_raw_tracepoint_open(NULL, prog_fd);\n"); break; default: printf("\tint fd = ((void)prog_fd, 0); /* auto-attach not supported */\n"); diff --git a/tools/lib/bpf/skel_internal.h b/tools/lib/bpf/skel_internal.h index 57507f1c1934..dcd3336512d4 100644 --- a/tools/lib/bpf/skel_internal.h +++ b/tools/lib/bpf/skel_internal.h @@ -110,6 +110,32 @@ static inline int skel_map_update_elem(int fd, const void *key, return skel_sys_bpf(BPF_MAP_UPDATE_ELEM, &attr, attr_sz); } +static inline int skel_raw_tracepoint_open(const char *name, int prog_fd) +{ + const size_t attr_sz = offsetofend(union bpf_attr, raw_tracepoint.prog_fd); + union bpf_attr attr; + + memset(&attr, 0, attr_sz); + attr.raw_tracepoint.name = (long) name; + attr.raw_tracepoint.prog_fd = prog_fd; + + return skel_sys_bpf(BPF_RAW_TRACEPOINT_OPEN, &attr, attr_sz); +} + +static inline int skel_link_create(int prog_fd, int target_fd, + enum bpf_attach_type attach_type) +{ + const size_t attr_sz = offsetofend(union bpf_attr, link_create.iter_info_len); + union bpf_attr attr; + + memset(&attr, 0, attr_sz); + attr.link_create.prog_fd = prog_fd; + attr.link_create.target_fd = target_fd; + attr.link_create.attach_type = attach_type; + + return skel_sys_bpf(BPF_LINK_CREATE, &attr, attr_sz); +} + static inline int bpf_load_and_run(struct bpf_load_and_run_opts *opts) { int map_fd = -1, prog_fd = -1, key = 0, err; -- cgit v1.2.3 From 3e1ab843d2d4d2d4e67f9488c1497aedc014328a Mon Sep 17 00:00:00 2001 From: Delyan Kratunov Date: Wed, 2 Feb 2022 15:54:23 -0800 Subject: libbpf: Deprecate bpf_prog_test_run_xattr and bpf_prog_test_run Deprecate non-extendable bpf_prog_test_run{,_xattr} in favor of OPTS-based bpf_prog_test_run_opts ([0]). [0] Closes: https://github.com/libbpf/libbpf/issues/286 Signed-off-by: Delyan Kratunov Signed-off-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/20220202235423.1097270-5-delyank@fb.com --- tools/lib/bpf/bpf.h | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'tools/lib/bpf') diff --git a/tools/lib/bpf/bpf.h b/tools/lib/bpf/bpf.h index c2e8327010f9..16b21757b8bf 100644 --- a/tools/lib/bpf/bpf.h +++ b/tools/lib/bpf/bpf.h @@ -453,12 +453,14 @@ struct bpf_prog_test_run_attr { * out: length of cxt_out */ }; +LIBBPF_DEPRECATED_SINCE(0, 7, "use bpf_prog_test_run_opts() instead") LIBBPF_API int bpf_prog_test_run_xattr(struct bpf_prog_test_run_attr *test_attr); /* * bpf_prog_test_run does not check that data_out is large enough. Consider - * using bpf_prog_test_run_xattr instead. + * using bpf_prog_test_run_opts instead. */ +LIBBPF_DEPRECATED_SINCE(0, 7, "use bpf_prog_test_run_opts() instead") LIBBPF_API int bpf_prog_test_run(int prog_fd, int repeat, void *data, __u32 size, void *data_out, __u32 *size_out, __u32 *retval, __u32 *duration); -- cgit v1.2.3 From a5dd9589f0ababa9ca645d96cfaa8161d45dcb74 Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Wed, 2 Feb 2022 14:59:11 -0800 Subject: libbpf: Stop using deprecated bpf_map__is_offload_neutral() Open-code bpf_map__is_offload_neutral() logic in one place in to-be-deprecated bpf_prog_load_xattr2. Signed-off-by: Andrii Nakryiko Signed-off-by: Daniel Borkmann Reviewed-by: Quentin Monnet Link: https://lore.kernel.org/bpf/20220202225916.3313522-2-andrii@kernel.org --- tools/lib/bpf/libbpf.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'tools/lib/bpf') diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c index 1b0936b016d9..81605de8654e 100644 --- a/tools/lib/bpf/libbpf.c +++ b/tools/lib/bpf/libbpf.c @@ -9505,7 +9505,7 @@ static int bpf_prog_load_xattr2(const struct bpf_prog_load_attr *attr, } bpf_object__for_each_map(map, obj) { - if (!bpf_map__is_offload_neutral(map)) + if (map->def.type != BPF_MAP_TYPE_PERF_EVENT_ARRAY) map->map_ifindex = attr->ifindex; } -- cgit v1.2.3 From ca33aa4ec5cbae7ddb995e565bc3b67ee4532b7a Mon Sep 17 00:00:00 2001 From: Delyan Kratunov Date: Thu, 3 Feb 2022 10:00:32 -0800 Subject: libbpf: Deprecate priv/set_priv storage Arbitrary storage via bpf_*__set_priv/__priv is being deprecated without a replacement ([1]). perf uses this capability, but most of that is going away with the removal of prologue generation ([2]). perf is already suppressing deprecation warnings, so the remaining cleanup will happen separately. [1]: Closes: https://github.com/libbpf/libbpf/issues/294 [2]: https://lore.kernel.org/bpf/20220123221932.537060-1-jolsa@kernel.org/ Signed-off-by: Delyan Kratunov Signed-off-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/20220203180032.1921580-1-delyank@fb.com --- tools/lib/bpf/libbpf.h | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) (limited to 'tools/lib/bpf') diff --git a/tools/lib/bpf/libbpf.h b/tools/lib/bpf/libbpf.h index 5762b57aecfc..c8d8daad212e 100644 --- a/tools/lib/bpf/libbpf.h +++ b/tools/lib/bpf/libbpf.h @@ -246,8 +246,10 @@ struct bpf_object *bpf_object__next(struct bpf_object *prev); (pos) = (tmp), (tmp) = bpf_object__next(tmp)) typedef void (*bpf_object_clear_priv_t)(struct bpf_object *, void *); +LIBBPF_DEPRECATED_SINCE(0, 7, "storage via set_priv/priv is deprecated") LIBBPF_API int bpf_object__set_priv(struct bpf_object *obj, void *priv, bpf_object_clear_priv_t clear_priv); +LIBBPF_DEPRECATED_SINCE(0, 7, "storage via set_priv/priv is deprecated") LIBBPF_API void *bpf_object__priv(const struct bpf_object *prog); LIBBPF_API int @@ -279,9 +281,10 @@ bpf_object__prev_program(const struct bpf_object *obj, struct bpf_program *prog) typedef void (*bpf_program_clear_priv_t)(struct bpf_program *, void *); +LIBBPF_DEPRECATED_SINCE(0, 7, "storage via set_priv/priv is deprecated") LIBBPF_API int bpf_program__set_priv(struct bpf_program *prog, void *priv, bpf_program_clear_priv_t clear_priv); - +LIBBPF_DEPRECATED_SINCE(0, 7, "storage via set_priv/priv is deprecated") LIBBPF_API void *bpf_program__priv(const struct bpf_program *prog); LIBBPF_API void bpf_program__set_ifindex(struct bpf_program *prog, __u32 ifindex); @@ -769,8 +772,10 @@ LIBBPF_API __u64 bpf_map__map_extra(const struct bpf_map *map); LIBBPF_API int bpf_map__set_map_extra(struct bpf_map *map, __u64 map_extra); typedef void (*bpf_map_clear_priv_t)(struct bpf_map *, void *); +LIBBPF_DEPRECATED_SINCE(0, 7, "storage via set_priv/priv is deprecated") LIBBPF_API int bpf_map__set_priv(struct bpf_map *map, void *priv, bpf_map_clear_priv_t clear_priv); +LIBBPF_DEPRECATED_SINCE(0, 7, "storage via set_priv/priv is deprecated") LIBBPF_API void *bpf_map__priv(const struct bpf_map *map); LIBBPF_API int bpf_map__set_initial_value(struct bpf_map *map, const void *data, size_t size); -- cgit v1.2.3 From 227a0713b319e7a8605312dee1c97c97a719a9fc Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Thu, 3 Feb 2022 14:50:17 -0800 Subject: libbpf: Deprecate forgotten btf__get_map_kv_tids() btf__get_map_kv_tids() is in the same group of APIs as btf_ext__reloc_func_info()/btf_ext__reloc_line_info() which were only used by BCC. It was missed to be marked as deprecated in [0]. Fixing that to complete [1]. [0] https://patchwork.kernel.org/project/netdevbpf/patch/20220201014610.3522985-1-davemarchevsky@fb.com/ [1] Closes: https://github.com/libbpf/libbpf/issues/277 Signed-off-by: Andrii Nakryiko Signed-off-by: Daniel Borkmann Acked-by: Yonghong Song Link: https://lore.kernel.org/bpf/20220203225017.1795946-1-andrii@kernel.org --- tools/lib/bpf/btf.h | 1 + tools/lib/bpf/libbpf.c | 3 +++ 2 files changed, 4 insertions(+) (limited to 'tools/lib/bpf') diff --git a/tools/lib/bpf/btf.h b/tools/lib/bpf/btf.h index b10729fd830c..951ac7475794 100644 --- a/tools/lib/bpf/btf.h +++ b/tools/lib/bpf/btf.h @@ -150,6 +150,7 @@ LIBBPF_API void btf__set_fd(struct btf *btf, int fd); LIBBPF_API const void *btf__raw_data(const struct btf *btf, __u32 *size); LIBBPF_API const char *btf__name_by_offset(const struct btf *btf, __u32 offset); LIBBPF_API const char *btf__str_by_offset(const struct btf *btf, __u32 offset); +LIBBPF_DEPRECATED_SINCE(0, 7, "this API is not necessary when BTF-defined maps are used") LIBBPF_API int btf__get_map_kv_tids(const struct btf *btf, const char *map_name, __u32 expected_key_size, __u32 expected_value_size, diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c index 81605de8654e..904cdf83002b 100644 --- a/tools/lib/bpf/libbpf.c +++ b/tools/lib/bpf/libbpf.c @@ -4202,9 +4202,12 @@ static int bpf_map_find_btf_info(struct bpf_object *obj, struct bpf_map *map) if (!bpf_map__is_internal(map)) { pr_warn("Use of BPF_ANNOTATE_KV_PAIR is deprecated, use BTF-defined maps in .maps section instead\n"); +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wdeprecated-declarations" ret = btf__get_map_kv_tids(obj->btf, map->name, def->key_size, def->value_size, &key_type_id, &value_type_id); +#pragma GCC diagnostic pop } else { /* * LLVM annotates global data differently in BTF, that is, -- cgit v1.2.3 From 0908a66ad1124c1634c33847ac662106f7f2c198 Mon Sep 17 00:00:00 2001 From: Yonghong Song Date: Fri, 4 Feb 2022 13:43:55 -0800 Subject: libbpf: Fix build issue with llvm-readelf There are cases where clang compiler is packaged in a way readelf is a symbolic link to llvm-readelf. In such cases, llvm-readelf will be used instead of default binutils readelf, and the following error will appear during libbpf build: Warning: Num of global symbols in /home/yhs/work/bpf-next/tools/testing/selftests/bpf/tools/build/libbpf/sharedobjs/libbpf-in.o (367) does NOT match with num of versioned symbols in /home/yhs/work/bpf-next/tools/testing/selftests/bpf/tools/build/libbpf/libbpf.so libbpf.map (383). Please make sure all LIBBPF_API symbols are versioned in libbpf.map. --- /home/yhs/work/bpf-next/tools/testing/selftests/bpf/tools/build/libbpf/libbpf_global_syms.tmp ... +++ /home/yhs/work/bpf-next/tools/testing/selftests/bpf/tools/build/libbpf/libbpf_versioned_syms.tmp ... @@ -324,6 +324,22 @@ btf__str_by_offset btf__type_by_id btf__type_cnt +LIBBPF_0.0.1 +LIBBPF_0.0.2 +LIBBPF_0.0.3 +LIBBPF_0.0.4 +LIBBPF_0.0.5 +LIBBPF_0.0.6 +LIBBPF_0.0.7 +LIBBPF_0.0.8 +LIBBPF_0.0.9 +LIBBPF_0.1.0 +LIBBPF_0.2.0 +LIBBPF_0.3.0 +LIBBPF_0.4.0 +LIBBPF_0.5.0 +LIBBPF_0.6.0 +LIBBPF_0.7.0 libbpf_attach_type_by_name libbpf_find_kernel_btf libbpf_find_vmlinux_btf_id make[2]: *** [Makefile:184: check_abi] Error 1 make[1]: *** [Makefile:140: all] Error 2 The above failure is due to different printouts for some ABS versioned symbols. For example, with the same libbpf.so, $ /bin/readelf --dyn-syms --wide tools/lib/bpf/libbpf.so | grep "LIBBPF" | grep ABS 134: 0000000000000000 0 OBJECT GLOBAL DEFAULT ABS LIBBPF_0.5.0 202: 0000000000000000 0 OBJECT GLOBAL DEFAULT ABS LIBBPF_0.6.0 ... $ /opt/llvm/bin/readelf --dyn-syms --wide tools/lib/bpf/libbpf.so | grep "LIBBPF" | grep ABS 134: 0000000000000000 0 OBJECT GLOBAL DEFAULT ABS LIBBPF_0.5.0@@LIBBPF_0.5.0 202: 0000000000000000 0 OBJECT GLOBAL DEFAULT ABS LIBBPF_0.6.0@@LIBBPF_0.6.0 ... The binutils readelf doesn't print out the symbol LIBBPF_* version and llvm-readelf does. Such a difference caused libbpf build failure with llvm-readelf. The proposed fix filters out all ABS symbols as they are not part of the comparison. This works for both binutils readelf and llvm-readelf. Reported-by: Delyan Kratunov Signed-off-by: Yonghong Song Signed-off-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/20220204214355.502108-1-yhs@fb.com --- tools/lib/bpf/Makefile | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'tools/lib/bpf') diff --git a/tools/lib/bpf/Makefile b/tools/lib/bpf/Makefile index f947b61b2107..b8b37fe76006 100644 --- a/tools/lib/bpf/Makefile +++ b/tools/lib/bpf/Makefile @@ -131,7 +131,7 @@ GLOBAL_SYM_COUNT = $(shell readelf -s --wide $(BPF_IN_SHARED) | \ sort -u | wc -l) VERSIONED_SYM_COUNT = $(shell readelf --dyn-syms --wide $(OUTPUT)libbpf.so | \ sed 's/\[.*\]//' | \ - awk '/GLOBAL/ && /DEFAULT/ && !/UND/ {print $$NF}' | \ + awk '/GLOBAL/ && /DEFAULT/ && !/UND|ABS/ {print $$NF}' | \ grep -Eo '[^ ]+@LIBBPF_' | cut -d@ -f1 | sort -u | wc -l) CMD_TARGETS = $(LIB_TARGET) $(PC_FILE) @@ -194,7 +194,7 @@ check_abi: $(OUTPUT)libbpf.so $(VERSION_SCRIPT) sort -u > $(OUTPUT)libbpf_global_syms.tmp; \ readelf --dyn-syms --wide $(OUTPUT)libbpf.so | \ sed 's/\[.*\]//' | \ - awk '/GLOBAL/ && /DEFAULT/ && !/UND/ {print $$NF}'| \ + awk '/GLOBAL/ && /DEFAULT/ && !/UND|ABS/ {print $$NF}'| \ grep -Eo '[^ ]+@LIBBPF_' | cut -d@ -f1 | \ sort -u > $(OUTPUT)libbpf_versioned_syms.tmp; \ diff -u $(OUTPUT)libbpf_global_syms.tmp \ -- cgit v1.2.3 From e4e835c87bb5ef7e7923e72da57c923bfcade418 Mon Sep 17 00:00:00 2001 From: Mauricio Vásquez Date: Mon, 7 Feb 2022 09:50:50 -0500 Subject: libbpf: Remove mode check in libbpf_set_strict_mode() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit libbpf_set_strict_mode() checks that the passed mode doesn't contain extra bits for LIBBPF_STRICT_* flags that don't exist yet. It makes it difficult for applications to disable some strict flags as something like "LIBBPF_STRICT_ALL & ~LIBBPF_STRICT_MAP_DEFINITIONS" is rejected by this check and they have to use a rather complicated formula to calculate it.[0] One possibility is to change LIBBPF_STRICT_ALL to only contain the bits of all existing LIBBPF_STRICT_* flags instead of 0xffffffff. However it's not possible because the idea is that applications compiled against older libbpf_legacy.h would still be opting into latest LIBBPF_STRICT_ALL features.[1] The other possibility is to remove that check so something like "LIBBPF_STRICT_ALL & ~LIBBPF_STRICT_MAP_DEFINITIONS" is allowed. It's what this commit does. [0]: https://lore.kernel.org/bpf/20220204220435.301896-1-mauricio@kinvolk.io/ [1]: https://lore.kernel.org/bpf/CAEf4BzaTWa9fELJLh+bxnOb0P1EMQmaRbJVG0L+nXZdy0b8G3Q@mail.gmail.com/ Fixes: 93b8952d223a ("libbpf: deprecate legacy BPF map definitions") Signed-off-by: Mauricio Vásquez Signed-off-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/20220207145052.124421-2-mauricio@kinvolk.io --- tools/lib/bpf/libbpf.c | 8 -------- 1 file changed, 8 deletions(-) (limited to 'tools/lib/bpf') diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c index 904cdf83002b..2262bcdfee92 100644 --- a/tools/lib/bpf/libbpf.c +++ b/tools/lib/bpf/libbpf.c @@ -156,14 +156,6 @@ enum libbpf_strict_mode libbpf_mode = LIBBPF_STRICT_NONE; int libbpf_set_strict_mode(enum libbpf_strict_mode mode) { - /* __LIBBPF_STRICT_LAST is the last power-of-2 value used + 1, so to - * get all possible values we compensate last +1, and then (2*x - 1) - * to get the bit mask - */ - if (mode != LIBBPF_STRICT_ALL - && (mode & ~((__LIBBPF_STRICT_LAST - 1) * 2 - 1))) - return errno = EINVAL, -EINVAL; - libbpf_mode = mode; return 0; } -- cgit v1.2.3 From 4172843ed4a38f97084032f74f07b2037b5da3a6 Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Tue, 8 Feb 2022 10:15:52 +0300 Subject: libbpf: Fix signedness bug in btf_dump_array_data() The btf__resolve_size() function returns negative error codes so "elem_size" must be signed for the error handling to work. Fixes: 920d16af9b42 ("libbpf: BTF dumper support for typed data") Signed-off-by: Dan Carpenter Signed-off-by: Daniel Borkmann Acked-by: Yonghong Song Link: https://lore.kernel.org/bpf/20220208071552.GB10495@kili --- tools/lib/bpf/btf_dump.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'tools/lib/bpf') diff --git a/tools/lib/bpf/btf_dump.c b/tools/lib/bpf/btf_dump.c index b9a3260c83cb..55aed9e398c3 100644 --- a/tools/lib/bpf/btf_dump.c +++ b/tools/lib/bpf/btf_dump.c @@ -1861,14 +1861,15 @@ static int btf_dump_array_data(struct btf_dump *d, { const struct btf_array *array = btf_array(t); const struct btf_type *elem_type; - __u32 i, elem_size = 0, elem_type_id; + __u32 i, elem_type_id; + __s64 elem_size; bool is_array_member; elem_type_id = array->type; elem_type = skip_mods_and_typedefs(d->btf, elem_type_id, NULL); elem_size = btf__resolve_size(d->btf, elem_type_id); if (elem_size <= 0) { - pr_warn("unexpected elem size %d for array type [%u]\n", elem_size, id); + pr_warn("unexpected elem size %lld for array type [%u]\n", elem_size, id); return -EINVAL; } -- cgit v1.2.3 From c5a1ffa0da76da02afc20a0946b594830488b324 Mon Sep 17 00:00:00 2001 From: Ilya Leoshkevich Date: Wed, 9 Feb 2022 03:17:37 +0100 Subject: libbpf: Add PT_REGS_SYSCALL_REGS macro Architectures that select ARCH_HAS_SYSCALL_WRAPPER pass a pointer to struct pt_regs to syscall handlers, others unpack it into individual function parameters. Introduce a macro to describe what a particular arch does. Signed-off-by: Ilya Leoshkevich Signed-off-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/20220209021745.2215452-3-iii@linux.ibm.com --- tools/lib/bpf/bpf_tracing.h | 10 ++++++++++ 1 file changed, 10 insertions(+) (limited to 'tools/lib/bpf') diff --git a/tools/lib/bpf/bpf_tracing.h b/tools/lib/bpf/bpf_tracing.h index 032ba809f3e5..a5e92656bfba 100644 --- a/tools/lib/bpf/bpf_tracing.h +++ b/tools/lib/bpf/bpf_tracing.h @@ -326,6 +326,16 @@ struct pt_regs; #endif /* defined(bpf_target_defined) */ +/* + * When invoked from a syscall handler kprobe, returns a pointer to a + * struct pt_regs containing syscall arguments and suitable for passing to + * PT_REGS_PARMn_SYSCALL() and PT_REGS_PARMn_CORE_SYSCALL(). + */ +#ifndef PT_REGS_SYSCALL_REGS +/* By default, assume that the arch selects ARCH_HAS_SYSCALL_WRAPPER. */ +#define PT_REGS_SYSCALL_REGS(ctx) ((struct pt_regs *)PT_REGS_PARM1(ctx)) +#endif + #ifndef ___bpf_concat #define ___bpf_concat(a, b) a ## b #endif -- cgit v1.2.3 From f07f1503469b11b739892d50c836992ffbe026ee Mon Sep 17 00:00:00 2001 From: Ilya Leoshkevich Date: Wed, 9 Feb 2022 03:17:39 +0100 Subject: libbpf: Fix accessing syscall arguments on powerpc powerpc does not select ARCH_HAS_SYSCALL_WRAPPER, so its syscall handlers take "unpacked" syscall arguments. Indicate this to libbpf using PT_REGS_SYSCALL_REGS macro. Reported-by: Heiko Carstens Signed-off-by: Ilya Leoshkevich Signed-off-by: Andrii Nakryiko Tested-by: Naveen N. Rao Link: https://lore.kernel.org/bpf/20220209021745.2215452-5-iii@linux.ibm.com --- tools/lib/bpf/bpf_tracing.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'tools/lib/bpf') diff --git a/tools/lib/bpf/bpf_tracing.h b/tools/lib/bpf/bpf_tracing.h index a5e92656bfba..20bc63770c9f 100644 --- a/tools/lib/bpf/bpf_tracing.h +++ b/tools/lib/bpf/bpf_tracing.h @@ -180,6 +180,8 @@ #define __PT_RC_REG gpr[3] #define __PT_SP_REG sp #define __PT_IP_REG nip +/* powerpc does not select ARCH_HAS_SYSCALL_WRAPPER. */ +#define PT_REGS_SYSCALL_REGS(ctx) ctx #elif defined(bpf_target_sparc) -- cgit v1.2.3 From 5c101153bfd67387ba159b7864176217a40757da Mon Sep 17 00:00:00 2001 From: Ilya Leoshkevich Date: Wed, 9 Feb 2022 03:17:40 +0100 Subject: libbpf: Fix riscv register names riscv registers are accessed via struct user_regs_struct, not struct pt_regs. The program counter member in this struct is called pc, not epc. The frame pointer is called s0, not fp. Fixes: 3cc31d794097 ("libbpf: Normalize PT_REGS_xxx() macro definitions") Signed-off-by: Ilya Leoshkevich Signed-off-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/20220209021745.2215452-6-iii@linux.ibm.com --- tools/lib/bpf/bpf_tracing.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'tools/lib/bpf') diff --git a/tools/lib/bpf/bpf_tracing.h b/tools/lib/bpf/bpf_tracing.h index 20bc63770c9f..03e501ac8f60 100644 --- a/tools/lib/bpf/bpf_tracing.h +++ b/tools/lib/bpf/bpf_tracing.h @@ -210,10 +210,10 @@ #define __PT_PARM4_REG a3 #define __PT_PARM5_REG a4 #define __PT_RET_REG ra -#define __PT_FP_REG fp +#define __PT_FP_REG s0 #define __PT_RC_REG a5 #define __PT_SP_REG sp -#define __PT_IP_REG epc +#define __PT_IP_REG pc #endif -- cgit v1.2.3 From cf0b5b2769233b026cfa41206109dea77b0d17e3 Mon Sep 17 00:00:00 2001 From: Ilya Leoshkevich Date: Wed, 9 Feb 2022 03:17:41 +0100 Subject: libbpf: Fix accessing syscall arguments on riscv riscv does not select ARCH_HAS_SYSCALL_WRAPPER, so its syscall handlers take "unpacked" syscall arguments. Indicate this to libbpf using PT_REGS_SYSCALL_REGS macro. Reported-by: Heiko Carstens Signed-off-by: Ilya Leoshkevich Signed-off-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/20220209021745.2215452-7-iii@linux.ibm.com --- tools/lib/bpf/bpf_tracing.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'tools/lib/bpf') diff --git a/tools/lib/bpf/bpf_tracing.h b/tools/lib/bpf/bpf_tracing.h index 03e501ac8f60..41a015ee6bfb 100644 --- a/tools/lib/bpf/bpf_tracing.h +++ b/tools/lib/bpf/bpf_tracing.h @@ -214,6 +214,8 @@ #define __PT_RC_REG a5 #define __PT_SP_REG sp #define __PT_IP_REG pc +/* riscv does not select ARCH_HAS_SYSCALL_WRAPPER. */ +#define PT_REGS_SYSCALL_REGS(ctx) ctx #endif -- cgit v1.2.3 From 60d16c5ccb811c9817bb0a71644b9ba14115f68e Mon Sep 17 00:00:00 2001 From: Ilya Leoshkevich Date: Wed, 9 Feb 2022 03:17:43 +0100 Subject: libbpf: Allow overriding PT_REGS_PARM1{_CORE}_SYSCALL arm64 and s390 need a special way to access the first syscall argument. Signed-off-by: Ilya Leoshkevich Signed-off-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/20220209021745.2215452-9-iii@linux.ibm.com --- tools/lib/bpf/bpf_tracing.h | 20 ++++++++++++-------- 1 file changed, 12 insertions(+), 8 deletions(-) (limited to 'tools/lib/bpf') diff --git a/tools/lib/bpf/bpf_tracing.h b/tools/lib/bpf/bpf_tracing.h index 41a015ee6bfb..276130d811ab 100644 --- a/tools/lib/bpf/bpf_tracing.h +++ b/tools/lib/bpf/bpf_tracing.h @@ -70,13 +70,15 @@ #define __PT_PARM2_REG si #define __PT_PARM3_REG dx #define __PT_PARM4_REG cx -#define __PT_PARM4_REG_SYSCALL r10 /* syscall uses r10 */ #define __PT_PARM5_REG r8 #define __PT_RET_REG sp #define __PT_FP_REG bp #define __PT_RC_REG ax #define __PT_SP_REG sp #define __PT_IP_REG ip +/* syscall uses r10 for PARM4 */ +#define PT_REGS_PARM4_SYSCALL(x) ((x)->r10) +#define PT_REGS_PARM4_CORE_SYSCALL(x) BPF_CORE_READ(x, r10) #else @@ -100,13 +102,15 @@ #define __PT_PARM2_REG rsi #define __PT_PARM3_REG rdx #define __PT_PARM4_REG rcx -#define __PT_PARM4_REG_SYSCALL r10 /* syscall uses r10 */ #define __PT_PARM5_REG r8 #define __PT_RET_REG rsp #define __PT_FP_REG rbp #define __PT_RC_REG rax #define __PT_SP_REG rsp #define __PT_IP_REG rip +/* syscall uses r10 for PARM4 */ +#define PT_REGS_PARM4_SYSCALL(x) ((x)->r10) +#define PT_REGS_PARM4_CORE_SYSCALL(x) BPF_CORE_READ(x, r10) #endif /* __i386__ */ @@ -269,22 +273,22 @@ struct pt_regs; #endif +#ifndef PT_REGS_PARM1_SYSCALL #define PT_REGS_PARM1_SYSCALL(x) PT_REGS_PARM1(x) +#endif #define PT_REGS_PARM2_SYSCALL(x) PT_REGS_PARM2(x) #define PT_REGS_PARM3_SYSCALL(x) PT_REGS_PARM3(x) -#ifdef __PT_PARM4_REG_SYSCALL -#define PT_REGS_PARM4_SYSCALL(x) (__PT_REGS_CAST(x)->__PT_PARM4_REG_SYSCALL) -#else /* __PT_PARM4_REG_SYSCALL */ +#ifndef PT_REGS_PARM4_SYSCALL #define PT_REGS_PARM4_SYSCALL(x) PT_REGS_PARM4(x) #endif #define PT_REGS_PARM5_SYSCALL(x) PT_REGS_PARM5(x) +#ifndef PT_REGS_PARM1_CORE_SYSCALL #define PT_REGS_PARM1_CORE_SYSCALL(x) PT_REGS_PARM1_CORE(x) +#endif #define PT_REGS_PARM2_CORE_SYSCALL(x) PT_REGS_PARM2_CORE(x) #define PT_REGS_PARM3_CORE_SYSCALL(x) PT_REGS_PARM3_CORE(x) -#ifdef __PT_PARM4_REG_SYSCALL -#define PT_REGS_PARM4_CORE_SYSCALL(x) BPF_CORE_READ(__PT_REGS_CAST(x), __PT_PARM4_REG_SYSCALL) -#else /* __PT_PARM4_REG_SYSCALL */ +#ifndef PT_REGS_PARM4_CORE_SYSCALL #define PT_REGS_PARM4_CORE_SYSCALL(x) PT_REGS_PARM4_CORE(x) #endif #define PT_REGS_PARM5_CORE_SYSCALL(x) PT_REGS_PARM5_CORE(x) -- cgit v1.2.3 From fbca4a2f649730b67488a8b36140ce4d2cf13c63 Mon Sep 17 00:00:00 2001 From: Ilya Leoshkevich Date: Wed, 9 Feb 2022 03:17:44 +0100 Subject: libbpf: Fix accessing the first syscall argument on arm64 On arm64, the first syscall argument should be accessed via orig_x0 (see arch/arm64/include/asm/syscall.h). Currently regs[0] is used instead, leading to bpf_syscall_macro test failure. orig_x0 cannot be added to struct user_pt_regs, since its layout is a part of the ABI. Therefore provide access to it only through PT_REGS_PARM1_CORE_SYSCALL() by using a struct pt_regs flavor. Reported-by: Heiko Carstens Signed-off-by: Ilya Leoshkevich Signed-off-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/20220209021745.2215452-10-iii@linux.ibm.com --- tools/lib/bpf/bpf_tracing.h | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'tools/lib/bpf') diff --git a/tools/lib/bpf/bpf_tracing.h b/tools/lib/bpf/bpf_tracing.h index 276130d811ab..b6453107f75c 100644 --- a/tools/lib/bpf/bpf_tracing.h +++ b/tools/lib/bpf/bpf_tracing.h @@ -146,6 +146,10 @@ #elif defined(bpf_target_arm64) +struct pt_regs___arm64 { + unsigned long orig_x0; +}; + /* arm64 provides struct user_pt_regs instead of struct pt_regs to userspace */ #define __PT_REGS_CAST(x) ((const struct user_pt_regs *)(x)) #define __PT_PARM1_REG regs[0] @@ -158,6 +162,8 @@ #define __PT_RC_REG regs[0] #define __PT_SP_REG sp #define __PT_IP_REG pc +#define PT_REGS_PARM1_SYSCALL(x) ({ _Pragma("GCC error \"use PT_REGS_PARM1_CORE_SYSCALL() instead\""); 0l; }) +#define PT_REGS_PARM1_CORE_SYSCALL(x) BPF_CORE_READ((const struct pt_regs___arm64 *)(x), orig_x0) #elif defined(bpf_target_mips) -- cgit v1.2.3 From 1f22a6f9f9a0f50218a11a0554709fd34a821fa3 Mon Sep 17 00:00:00 2001 From: Ilya Leoshkevich Date: Wed, 9 Feb 2022 03:17:45 +0100 Subject: libbpf: Fix accessing the first syscall argument on s390 On s390, the first syscall argument should be accessed via orig_gpr2 (see arch/s390/include/asm/syscall.h). Currently gpr[2] is used instead, leading to bpf_syscall_macro test failure. orig_gpr2 cannot be added to user_pt_regs, since its layout is a part of the ABI. Therefore provide access to it only through PT_REGS_PARM1_CORE_SYSCALL() by using a struct pt_regs flavor. Reported-by: Andrii Nakryiko Signed-off-by: Ilya Leoshkevich Signed-off-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/20220209021745.2215452-11-iii@linux.ibm.com --- tools/lib/bpf/bpf_tracing.h | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'tools/lib/bpf') diff --git a/tools/lib/bpf/bpf_tracing.h b/tools/lib/bpf/bpf_tracing.h index b6453107f75c..eb6eb3b28063 100644 --- a/tools/lib/bpf/bpf_tracing.h +++ b/tools/lib/bpf/bpf_tracing.h @@ -118,6 +118,10 @@ #elif defined(bpf_target_s390) +struct pt_regs___s390 { + unsigned long orig_gpr2; +}; + /* s390 provides user_pt_regs instead of struct pt_regs to userspace */ #define __PT_REGS_CAST(x) ((const user_pt_regs *)(x)) #define __PT_PARM1_REG gprs[2] @@ -130,6 +134,8 @@ #define __PT_RC_REG gprs[2] #define __PT_SP_REG gprs[15] #define __PT_IP_REG psw.addr +#define PT_REGS_PARM1_SYSCALL(x) ({ _Pragma("GCC error \"use PT_REGS_PARM1_CORE_SYSCALL() instead\""); 0l; }) +#define PT_REGS_PARM1_CORE_SYSCALL(x) BPF_CORE_READ((const struct pt_regs___s390 *)(x), orig_gpr2) #elif defined(bpf_target_arm) -- cgit v1.2.3 From 816ae109554756ce5e22e3aabde10161c4d0a4f7 Mon Sep 17 00:00:00 2001 From: Hengqi Chen Date: Mon, 7 Feb 2022 22:31:33 +0800 Subject: libbpf: Add BPF_KPROBE_SYSCALL macro Add syscall-specific variant of BPF_KPROBE named BPF_KPROBE_SYSCALL ([0]). The new macro hides the underlying way of getting syscall input arguments. With the new macro, the following code: SEC("kprobe/__x64_sys_close") int BPF_KPROBE(do_sys_close, struct pt_regs *regs) { int fd; fd = PT_REGS_PARM1_CORE(regs); /* do something with fd */ } can be written as: SEC("kprobe/__x64_sys_close") int BPF_KPROBE_SYSCALL(do_sys_close, int fd) { /* do something with fd */ } [0] Closes: https://github.com/libbpf/libbpf/issues/425 Signed-off-by: Hengqi Chen Signed-off-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/20220207143134.2977852-2-hengqi.chen@gmail.com --- tools/lib/bpf/bpf_tracing.h | 35 +++++++++++++++++++++++++++++++++++ 1 file changed, 35 insertions(+) (limited to 'tools/lib/bpf') diff --git a/tools/lib/bpf/bpf_tracing.h b/tools/lib/bpf/bpf_tracing.h index eb6eb3b28063..e3a8c947e89f 100644 --- a/tools/lib/bpf/bpf_tracing.h +++ b/tools/lib/bpf/bpf_tracing.h @@ -470,4 +470,39 @@ typeof(name(0)) name(struct pt_regs *ctx) \ } \ static __always_inline typeof(name(0)) ____##name(struct pt_regs *ctx, ##args) +#define ___bpf_syscall_args0() ctx +#define ___bpf_syscall_args1(x) ___bpf_syscall_args0(), (void *)PT_REGS_PARM1_CORE_SYSCALL(regs) +#define ___bpf_syscall_args2(x, args...) ___bpf_syscall_args1(args), (void *)PT_REGS_PARM2_CORE_SYSCALL(regs) +#define ___bpf_syscall_args3(x, args...) ___bpf_syscall_args2(args), (void *)PT_REGS_PARM3_CORE_SYSCALL(regs) +#define ___bpf_syscall_args4(x, args...) ___bpf_syscall_args3(args), (void *)PT_REGS_PARM4_CORE_SYSCALL(regs) +#define ___bpf_syscall_args5(x, args...) ___bpf_syscall_args4(args), (void *)PT_REGS_PARM5_CORE_SYSCALL(regs) +#define ___bpf_syscall_args(args...) ___bpf_apply(___bpf_syscall_args, ___bpf_narg(args))(args) + +/* + * BPF_KPROBE_SYSCALL is a variant of BPF_KPROBE, which is intended for + * tracing syscall functions, like __x64_sys_close. It hides the underlying + * platform-specific low-level way of getting syscall input arguments from + * struct pt_regs, and provides a familiar typed and named function arguments + * syntax and semantics of accessing syscall input parameters. + * + * Original struct pt_regs* context is preserved as 'ctx' argument. This might + * be necessary when using BPF helpers like bpf_perf_event_output(). + * + * This macro relies on BPF CO-RE support. + */ +#define BPF_KPROBE_SYSCALL(name, args...) \ +name(struct pt_regs *ctx); \ +static __attribute__((always_inline)) typeof(name(0)) \ +____##name(struct pt_regs *ctx, ##args); \ +typeof(name(0)) name(struct pt_regs *ctx) \ +{ \ + struct pt_regs *regs = PT_REGS_SYSCALL_REGS(ctx); \ + _Pragma("GCC diagnostic push") \ + _Pragma("GCC diagnostic ignored \"-Wint-conversion\"") \ + return ____##name(___bpf_syscall_args(args)); \ + _Pragma("GCC diagnostic pop") \ +} \ +static __attribute__((always_inline)) typeof(name(0)) \ +____##name(struct pt_regs *ctx, ##args) + #endif -- cgit v1.2.3 From dc37dc617fabfb1c3a16d49f5d8cc20e9e3608ca Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Tue, 8 Feb 2022 22:39:09 -0800 Subject: libbpf: Fix compilation warning due to mismatched printf format On ppc64le architecture __s64 is long int and requires %ld. Cast to ssize_t and use %zd to avoid architecture-specific specifiers. Fixes: 4172843ed4a3 ("libbpf: Fix signedness bug in btf_dump_array_data()") Signed-off-by: Andrii Nakryiko Signed-off-by: Daniel Borkmann Link: https://lore.kernel.org/bpf/20220209063909.1268319-1-andrii@kernel.org --- tools/lib/bpf/btf_dump.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'tools/lib/bpf') diff --git a/tools/lib/bpf/btf_dump.c b/tools/lib/bpf/btf_dump.c index 55aed9e398c3..07ebe70d3a30 100644 --- a/tools/lib/bpf/btf_dump.c +++ b/tools/lib/bpf/btf_dump.c @@ -1869,7 +1869,8 @@ static int btf_dump_array_data(struct btf_dump *d, elem_type = skip_mods_and_typedefs(d->btf, elem_type_id, NULL); elem_size = btf__resolve_size(d->btf, elem_type_id); if (elem_size <= 0) { - pr_warn("unexpected elem size %lld for array type [%u]\n", elem_size, id); + pr_warn("unexpected elem size %zd for array type [%u]\n", + (ssize_t)elem_size, id); return -EINVAL; } -- cgit v1.2.3 From 6fe65f1b4db3fff305896e997c2804b7b42236ce Mon Sep 17 00:00:00 2001 From: Alexei Starovoitov Date: Wed, 9 Feb 2022 15:19:58 -0800 Subject: libbpf: Prepare light skeleton for the kernel. Prepare light skeleton to be used in the kernel module and in the user space. The look and feel of lskel.h is mostly the same with the difference that for user space the skel->rodata is the same pointer before and after skel_load operation, while in the kernel the skel->rodata after skel_open and the skel->rodata after skel_load are different pointers. Typical usage of skeleton remains the same for kernel and user space: skel = my_bpf__open(); skel->rodata->my_global_var = init_val; err = my_bpf__load(skel); err = my_bpf__attach(skel); // access skel->rodata->my_global_var; // access skel->bss->another_var; Signed-off-by: Alexei Starovoitov Signed-off-by: Daniel Borkmann Acked-by: Yonghong Song Acked-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/20220209232001.27490-3-alexei.starovoitov@gmail.com --- tools/lib/bpf/gen_loader.c | 15 +++- tools/lib/bpf/skel_internal.h | 185 ++++++++++++++++++++++++++++++++++++++---- 2 files changed, 179 insertions(+), 21 deletions(-) (limited to 'tools/lib/bpf') diff --git a/tools/lib/bpf/gen_loader.c b/tools/lib/bpf/gen_loader.c index 8ecef1088ba2..927745b08014 100644 --- a/tools/lib/bpf/gen_loader.c +++ b/tools/lib/bpf/gen_loader.c @@ -1043,18 +1043,27 @@ void bpf_gen__map_update_elem(struct bpf_gen *gen, int map_idx, void *pvalue, value = add_data(gen, pvalue, value_size); key = add_data(gen, &zero, sizeof(zero)); - /* if (map_desc[map_idx].initial_value) - * copy_from_user(value, initial_value, value_size); + /* if (map_desc[map_idx].initial_value) { + * if (ctx->flags & BPF_SKEL_KERNEL) + * bpf_probe_read_kernel(value, value_size, initial_value); + * else + * bpf_copy_from_user(value, value_size, initial_value); + * } */ emit(gen, BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_6, sizeof(struct bpf_loader_ctx) + sizeof(struct bpf_map_desc) * map_idx + offsetof(struct bpf_map_desc, initial_value))); - emit(gen, BPF_JMP_IMM(BPF_JEQ, BPF_REG_3, 0, 4)); + emit(gen, BPF_JMP_IMM(BPF_JEQ, BPF_REG_3, 0, 8)); emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_1, BPF_PSEUDO_MAP_IDX_VALUE, 0, 0, 0, value)); emit(gen, BPF_MOV64_IMM(BPF_REG_2, value_size)); + emit(gen, BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_6, + offsetof(struct bpf_loader_ctx, flags))); + emit(gen, BPF_JMP_IMM(BPF_JSET, BPF_REG_0, BPF_SKEL_KERNEL, 2)); emit(gen, BPF_EMIT_CALL(BPF_FUNC_copy_from_user)); + emit(gen, BPF_JMP_IMM(BPF_JA, 0, 0, 1)); + emit(gen, BPF_EMIT_CALL(BPF_FUNC_probe_read_kernel)); map_update_attr = add_data(gen, &attr, attr_size); move_blob2blob(gen, attr_field(map_update_attr, map_fd), 4, diff --git a/tools/lib/bpf/skel_internal.h b/tools/lib/bpf/skel_internal.h index dcd3336512d4..bd6f4505e7b1 100644 --- a/tools/lib/bpf/skel_internal.h +++ b/tools/lib/bpf/skel_internal.h @@ -3,9 +3,19 @@ #ifndef __SKEL_INTERNAL_H #define __SKEL_INTERNAL_H +#ifdef __KERNEL__ +#include +#include +#include +#include +#include +#else #include #include #include +#include +#include "bpf.h" +#endif #ifndef __NR_bpf # if defined(__mips__) && defined(_ABIO32) @@ -25,24 +35,23 @@ * requested during loader program generation. */ struct bpf_map_desc { - union { - /* input for the loader prog */ - struct { - __aligned_u64 initial_value; - __u32 max_entries; - }; - /* output of the loader prog */ - struct { - int map_fd; - }; - }; + /* output of the loader prog */ + int map_fd; + /* input for the loader prog */ + __u32 max_entries; + __aligned_u64 initial_value; }; struct bpf_prog_desc { int prog_fd; }; +enum { + BPF_SKEL_KERNEL = (1ULL << 0), +}; + struct bpf_loader_ctx { - size_t sz; + __u32 sz; + __u32 flags; __u32 log_level; __u32 log_size; __u64 log_buf; @@ -57,12 +66,144 @@ struct bpf_load_and_run_opts { const char *errstr; }; +long bpf_sys_bpf(__u32 cmd, void *attr, __u32 attr_size); + static inline int skel_sys_bpf(enum bpf_cmd cmd, union bpf_attr *attr, unsigned int size) { +#ifdef __KERNEL__ + return bpf_sys_bpf(cmd, attr, size); +#else return syscall(__NR_bpf, cmd, attr, size); +#endif +} + +#ifdef __KERNEL__ +static inline int close(int fd) +{ + return close_fd(fd); +} + +static inline void *skel_alloc(size_t size) +{ + struct bpf_loader_ctx *ctx = kzalloc(size, GFP_KERNEL); + + if (!ctx) + return NULL; + ctx->flags |= BPF_SKEL_KERNEL; + return ctx; +} + +static inline void skel_free(const void *p) +{ + kfree(p); +} + +/* skel->bss/rodata maps are populated the following way: + * + * For kernel use: + * skel_prep_map_data() allocates kernel memory that kernel module can directly access. + * Generated lskel stores the pointer in skel->rodata and in skel->maps.rodata.initial_value. + * The loader program will perform probe_read_kernel() from maps.rodata.initial_value. + * skel_finalize_map_data() sets skel->rodata to point to actual value in a bpf map and + * does maps.rodata.initial_value = ~0ULL to signal skel_free_map_data() that kvfree + * is not nessary. + * + * For user space: + * skel_prep_map_data() mmaps anon memory into skel->rodata that can be accessed directly. + * Generated lskel stores the pointer in skel->rodata and in skel->maps.rodata.initial_value. + * The loader program will perform copy_from_user() from maps.rodata.initial_value. + * skel_finalize_map_data() remaps bpf array map value from the kernel memory into + * skel->rodata address. + * + * The "bpftool gen skeleton -L" command generates lskel.h that is suitable for + * both kernel and user space. The generated loader program does + * either bpf_probe_read_kernel() or bpf_copy_from_user() from initial_value + * depending on bpf_loader_ctx->flags. + */ +static inline void skel_free_map_data(void *p, __u64 addr, size_t sz) +{ + if (addr != ~0ULL) + kvfree(p); + /* When addr == ~0ULL the 'p' points to + * ((struct bpf_array *)map)->value. See skel_finalize_map_data. + */ } +static inline void *skel_prep_map_data(const void *val, size_t mmap_sz, size_t val_sz) +{ + void *addr; + + addr = kvmalloc(val_sz, GFP_KERNEL); + if (!addr) + return NULL; + memcpy(addr, val, val_sz); + return addr; +} + +static inline void *skel_finalize_map_data(__u64 *init_val, size_t mmap_sz, int flags, int fd) +{ + struct bpf_map *map; + void *addr = NULL; + + kvfree((void *) (long) *init_val); + *init_val = ~0ULL; + + /* At this point bpf_load_and_run() finished without error and + * 'fd' is a valid bpf map FD. All sanity checks below should succeed. + */ + map = bpf_map_get(fd); + if (IS_ERR(map)) + return NULL; + if (map->map_type != BPF_MAP_TYPE_ARRAY) + goto out; + addr = ((struct bpf_array *)map)->value; + /* the addr stays valid, since FD is not closed */ +out: + bpf_map_put(map); + return addr; +} + +#else + +static inline void *skel_alloc(size_t size) +{ + return calloc(1, size); +} + +static inline void skel_free(void *p) +{ + free(p); +} + +static inline void skel_free_map_data(void *p, __u64 addr, size_t sz) +{ + munmap(p, sz); +} + +static inline void *skel_prep_map_data(const void *val, size_t mmap_sz, size_t val_sz) +{ + void *addr; + + addr = mmap(NULL, mmap_sz, PROT_READ | PROT_WRITE, + MAP_SHARED | MAP_ANONYMOUS, -1, 0); + if (addr == (void *) -1) + return NULL; + memcpy(addr, val, val_sz); + return addr; +} + +static inline void *skel_finalize_map_data(__u64 *init_val, size_t mmap_sz, int flags, int fd) +{ + void *addr; + + addr = mmap((void *) (long) *init_val, mmap_sz, flags, MAP_SHARED | MAP_FIXED, fd, 0); + if (addr == (void *) -1) + return NULL; + return addr; +} +#endif + static inline int skel_closenz(int fd) { if (fd > 0) @@ -136,22 +277,28 @@ static inline int skel_link_create(int prog_fd, int target_fd, return skel_sys_bpf(BPF_LINK_CREATE, &attr, attr_sz); } +#ifdef __KERNEL__ +#define set_err +#else +#define set_err err = -errno +#endif + static inline int bpf_load_and_run(struct bpf_load_and_run_opts *opts) { int map_fd = -1, prog_fd = -1, key = 0, err; union bpf_attr attr; - map_fd = skel_map_create(BPF_MAP_TYPE_ARRAY, "__loader.map", 4, opts->data_sz, 1); + err = map_fd = skel_map_create(BPF_MAP_TYPE_ARRAY, "__loader.map", 4, opts->data_sz, 1); if (map_fd < 0) { opts->errstr = "failed to create loader map"; - err = -errno; + set_err; goto out; } err = skel_map_update_elem(map_fd, &key, opts->data, 0); if (err < 0) { opts->errstr = "failed to update loader map"; - err = -errno; + set_err; goto out; } @@ -166,10 +313,10 @@ static inline int bpf_load_and_run(struct bpf_load_and_run_opts *opts) attr.log_size = opts->ctx->log_size; attr.log_buf = opts->ctx->log_buf; attr.prog_flags = BPF_F_SLEEPABLE; - prog_fd = skel_sys_bpf(BPF_PROG_LOAD, &attr, sizeof(attr)); + err = prog_fd = skel_sys_bpf(BPF_PROG_LOAD, &attr, sizeof(attr)); if (prog_fd < 0) { opts->errstr = "failed to load loader prog"; - err = -errno; + set_err; goto out; } @@ -181,10 +328,12 @@ static inline int bpf_load_and_run(struct bpf_load_and_run_opts *opts) if (err < 0 || (int)attr.test.retval < 0) { opts->errstr = "failed to execute loader prog"; if (err < 0) { - err = -errno; + set_err; } else { err = (int)attr.test.retval; +#ifndef __KERNEL__ errno = -err; +#endif } goto out; } -- cgit v1.2.3 From d130e954a002b901391037c33b9ae11bae5aaa91 Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Fri, 11 Feb 2022 12:52:35 -0800 Subject: libbpf: Fix libbpf.map inheritance chain for LIBBPF_0.7.0 Ensure that LIBBPF_0.7.0 inherits everything from LIBBPF_0.6.0. Fixes: dbdd2c7f8cec ("libbpf: Add API to get/set log_level at per-program level") Signed-off-by: Andrii Nakryiko Signed-off-by: Alexei Starovoitov Link: https://lore.kernel.org/bpf/20220211205235.2089104-1-andrii@kernel.org --- tools/lib/bpf/libbpf.map | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'tools/lib/bpf') diff --git a/tools/lib/bpf/libbpf.map b/tools/lib/bpf/libbpf.map index aef6253a90c8..47e70c9058d9 100644 --- a/tools/lib/bpf/libbpf.map +++ b/tools/lib/bpf/libbpf.map @@ -438,4 +438,4 @@ LIBBPF_0.7.0 { libbpf_probe_bpf_map_type; libbpf_probe_bpf_prog_type; libbpf_set_memlock_rlim_max; -}; +} LIBBPF_0.6.0; -- cgit v1.2.3 From 9c3de619e13ee6693ec5ac74f50b7aa89056a70e Mon Sep 17 00:00:00 2001 From: Toke Høiland-Jørgensen Date: Sat, 12 Feb 2022 00:48:19 +0100 Subject: libbpf: Use dynamically allocated buffer when receiving netlink messages MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit When receiving netlink messages, libbpf was using a statically allocated stack buffer of 4k bytes. This happened to work fine on systems with a 4k page size, but on systems with larger page sizes it can lead to truncated messages. The user-visible impact of this was that libbpf would insist no XDP program was attached to some interfaces because that bit of the netlink message got chopped off. Fix this by switching to a dynamically allocated buffer; we borrow the approach from iproute2 of using recvmsg() with MSG_PEEK|MSG_TRUNC to get the actual size of the pending message before receiving it, adjusting the buffer as necessary. While we're at it, also add retries on interrupted system calls around the recvmsg() call. v2: - Move peek logic to libbpf_netlink_recv(), don't double free on ENOMEM. Fixes: 8bbb77b7c7a2 ("libbpf: Add various netlink helpers") Reported-by: Zhiqian Guan Signed-off-by: Toke Høiland-Jørgensen Signed-off-by: Andrii Nakryiko Acked-by: Kumar Kartikeya Dwivedi Link: https://lore.kernel.org/bpf/20220211234819.612288-1-toke@redhat.com --- tools/lib/bpf/netlink.c | 55 +++++++++++++++++++++++++++++++++++++++++++++---- 1 file changed, 51 insertions(+), 4 deletions(-) (limited to 'tools/lib/bpf') diff --git a/tools/lib/bpf/netlink.c b/tools/lib/bpf/netlink.c index c39c37f99d5c..a598061f6fea 100644 --- a/tools/lib/bpf/netlink.c +++ b/tools/lib/bpf/netlink.c @@ -87,29 +87,75 @@ enum { NL_DONE, }; +static int netlink_recvmsg(int sock, struct msghdr *mhdr, int flags) +{ + int len; + + do { + len = recvmsg(sock, mhdr, flags); + } while (len < 0 && (errno == EINTR || errno == EAGAIN)); + + if (len < 0) + return -errno; + return len; +} + +static int alloc_iov(struct iovec *iov, int len) +{ + void *nbuf; + + nbuf = realloc(iov->iov_base, len); + if (!nbuf) + return -ENOMEM; + + iov->iov_base = nbuf; + iov->iov_len = len; + return 0; +} + static int libbpf_netlink_recv(int sock, __u32 nl_pid, int seq, __dump_nlmsg_t _fn, libbpf_dump_nlmsg_t fn, void *cookie) { + struct iovec iov = {}; + struct msghdr mhdr = { + .msg_iov = &iov, + .msg_iovlen = 1, + }; bool multipart = true; struct nlmsgerr *err; struct nlmsghdr *nh; - char buf[4096]; int len, ret; + ret = alloc_iov(&iov, 4096); + if (ret) + goto done; + while (multipart) { start: multipart = false; - len = recv(sock, buf, sizeof(buf), 0); + len = netlink_recvmsg(sock, &mhdr, MSG_PEEK | MSG_TRUNC); + if (len < 0) { + ret = len; + goto done; + } + + if (len > iov.iov_len) { + ret = alloc_iov(&iov, len); + if (ret) + goto done; + } + + len = netlink_recvmsg(sock, &mhdr, 0); if (len < 0) { - ret = -errno; + ret = len; goto done; } if (len == 0) break; - for (nh = (struct nlmsghdr *)buf; NLMSG_OK(nh, len); + for (nh = (struct nlmsghdr *)iov.iov_base; NLMSG_OK(nh, len); nh = NLMSG_NEXT(nh, len)) { if (nh->nlmsg_pid != nl_pid) { ret = -LIBBPF_ERRNO__WRNGPID; @@ -151,6 +197,7 @@ start: } ret = 0; done: + free(iov.iov_base); return ret; } -- cgit v1.2.3 From adb8fa195efdfaac5852aaac24810b456ce43b04 Mon Sep 17 00:00:00 2001 From: Mauricio Vásquez Date: Tue, 15 Feb 2022 17:58:50 -0500 Subject: libbpf: Split bpf_core_apply_relo() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit BTFGen needs to run the core relocation logic in order to understand what are the types involved in a given relocation. Currently bpf_core_apply_relo() calculates and **applies** a relocation to an instruction. Having both operations in the same function makes it difficult to only calculate the relocation without patching the instruction. This commit splits that logic in two different phases: (1) calculate the relocation and (2) patch the instruction. For the first phase bpf_core_apply_relo() is renamed to bpf_core_calc_relo_insn() who is now only on charge of calculating the relocation, the second phase uses the already existing bpf_core_patch_insn(). bpf_object__relocate_core() uses both of them and the BTFGen will use only bpf_core_calc_relo_insn(). Signed-off-by: Mauricio Vásquez Signed-off-by: Rafael David Tinoco Signed-off-by: Lorenzo Fontana Signed-off-by: Leonardo Di Donato Signed-off-by: Andrii Nakryiko Acked-by: Andrii Nakryiko Acked-by: Alexei Starovoitov Link: https://lore.kernel.org/bpf/20220215225856.671072-2-mauricio@kinvolk.io --- kernel/bpf/btf.c | 13 ++++++-- tools/lib/bpf/libbpf.c | 71 +++++++++++++++++++++++------------------- tools/lib/bpf/relo_core.c | 79 ++++++++++++++--------------------------------- tools/lib/bpf/relo_core.h | 42 +++++++++++++++++++++---- 4 files changed, 109 insertions(+), 96 deletions(-) (limited to 'tools/lib/bpf') diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c index 11740b300de9..f1d3d2a2f5f6 100644 --- a/kernel/bpf/btf.c +++ b/kernel/bpf/btf.c @@ -7225,6 +7225,7 @@ int bpf_core_apply(struct bpf_core_ctx *ctx, const struct bpf_core_relo *relo, { bool need_cands = relo->kind != BPF_CORE_TYPE_ID_LOCAL; struct bpf_core_cand_list cands = {}; + struct bpf_core_relo_res targ_res; struct bpf_core_spec *specs; int err; @@ -7264,13 +7265,19 @@ int bpf_core_apply(struct bpf_core_ctx *ctx, const struct bpf_core_relo *relo, cands.len = cc->cnt; /* cand_cache_mutex needs to span the cache lookup and * copy of btf pointer into bpf_core_cand_list, - * since module can be unloaded while bpf_core_apply_relo_insn + * since module can be unloaded while bpf_core_calc_relo_insn * is working with module's btf. */ } - err = bpf_core_apply_relo_insn((void *)ctx->log, insn, relo->insn_off / 8, - relo, relo_idx, ctx->btf, &cands, specs); + err = bpf_core_calc_relo_insn((void *)ctx->log, relo, relo_idx, ctx->btf, &cands, specs, + &targ_res); + if (err) + goto out; + + err = bpf_core_patch_insn((void *)ctx->log, insn, relo->insn_off / 8, relo, relo_idx, + &targ_res); + out: kfree(specs); if (need_cands) { diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c index 2262bcdfee92..d3c457fb045e 100644 --- a/tools/lib/bpf/libbpf.c +++ b/tools/lib/bpf/libbpf.c @@ -5530,11 +5530,12 @@ static int record_relo_core(struct bpf_program *prog, return 0; } -static int bpf_core_apply_relo(struct bpf_program *prog, - const struct bpf_core_relo *relo, - int relo_idx, - const struct btf *local_btf, - struct hashmap *cand_cache) +static int bpf_core_resolve_relo(struct bpf_program *prog, + const struct bpf_core_relo *relo, + int relo_idx, + const struct btf *local_btf, + struct hashmap *cand_cache, + struct bpf_core_relo_res *targ_res) { struct bpf_core_spec specs_scratch[3] = {}; const void *type_key = u32_as_hash_key(relo->type_id); @@ -5543,20 +5544,7 @@ static int bpf_core_apply_relo(struct bpf_program *prog, const struct btf_type *local_type; const char *local_name; __u32 local_id = relo->type_id; - struct bpf_insn *insn; - int insn_idx, err; - - if (relo->insn_off % BPF_INSN_SZ) - return -EINVAL; - insn_idx = relo->insn_off / BPF_INSN_SZ; - /* adjust insn_idx from section frame of reference to the local - * program's frame of reference; (sub-)program code is not yet - * relocated, so it's enough to just subtract in-section offset - */ - insn_idx = insn_idx - prog->sec_insn_off; - if (insn_idx >= prog->insns_cnt) - return -EINVAL; - insn = &prog->insns[insn_idx]; + int err; local_type = btf__type_by_id(local_btf, local_id); if (!local_type) @@ -5566,15 +5554,6 @@ static int bpf_core_apply_relo(struct bpf_program *prog, if (!local_name) return -EINVAL; - if (prog->obj->gen_loader) { - const char *spec_str = btf__name_by_offset(local_btf, relo->access_str_off); - - pr_debug("record_relo_core: prog %td insn[%d] %s %s %s final insn_idx %d\n", - prog - prog->obj->programs, relo->insn_off / 8, - btf_kind_str(local_type), local_name, spec_str, insn_idx); - return record_relo_core(prog, relo, insn_idx); - } - if (relo->kind != BPF_CORE_TYPE_ID_LOCAL && !hashmap__find(cand_cache, type_key, (void **)&cands)) { cands = bpf_core_find_cands(prog->obj, local_btf, local_id); @@ -5591,19 +5570,21 @@ static int bpf_core_apply_relo(struct bpf_program *prog, } } - return bpf_core_apply_relo_insn(prog_name, insn, insn_idx, relo, - relo_idx, local_btf, cands, specs_scratch); + return bpf_core_calc_relo_insn(prog_name, relo, relo_idx, local_btf, cands, specs_scratch, + targ_res); } static int bpf_object__relocate_core(struct bpf_object *obj, const char *targ_btf_path) { const struct btf_ext_info_sec *sec; + struct bpf_core_relo_res targ_res; const struct bpf_core_relo *rec; const struct btf_ext_info *seg; struct hashmap_entry *entry; struct hashmap *cand_cache = NULL; struct bpf_program *prog; + struct bpf_insn *insn; const char *sec_name; int i, err = 0, insn_idx, sec_idx; @@ -5654,6 +5635,8 @@ bpf_object__relocate_core(struct bpf_object *obj, const char *targ_btf_path) sec_name, sec->num_info); for_each_btf_ext_rec(seg, sec, i, rec) { + if (rec->insn_off % BPF_INSN_SZ) + return -EINVAL; insn_idx = rec->insn_off / BPF_INSN_SZ; prog = find_prog_by_sec_insn(obj, sec_idx, insn_idx); if (!prog) { @@ -5668,12 +5651,38 @@ bpf_object__relocate_core(struct bpf_object *obj, const char *targ_btf_path) if (!prog->load) continue; - err = bpf_core_apply_relo(prog, rec, i, obj->btf, cand_cache); + /* adjust insn_idx from section frame of reference to the local + * program's frame of reference; (sub-)program code is not yet + * relocated, so it's enough to just subtract in-section offset + */ + insn_idx = insn_idx - prog->sec_insn_off; + if (insn_idx >= prog->insns_cnt) + return -EINVAL; + insn = &prog->insns[insn_idx]; + + if (prog->obj->gen_loader) { + err = record_relo_core(prog, rec, insn_idx); + if (err) { + pr_warn("prog '%s': relo #%d: failed to record relocation: %d\n", + prog->name, i, err); + goto out; + } + continue; + } + + err = bpf_core_resolve_relo(prog, rec, i, obj->btf, cand_cache, &targ_res); if (err) { pr_warn("prog '%s': relo #%d: failed to relocate: %d\n", prog->name, i, err); goto out; } + + err = bpf_core_patch_insn(prog->name, insn, insn_idx, rec, i, &targ_res); + if (err) { + pr_warn("prog '%s': relo #%d: failed to patch insn #%u: %d\n", + prog->name, i, insn_idx, err); + goto out; + } } } diff --git a/tools/lib/bpf/relo_core.c b/tools/lib/bpf/relo_core.c index 910865e29edc..f946f23eab20 100644 --- a/tools/lib/bpf/relo_core.c +++ b/tools/lib/bpf/relo_core.c @@ -775,31 +775,6 @@ static int bpf_core_calc_enumval_relo(const struct bpf_core_relo *relo, return 0; } -struct bpf_core_relo_res -{ - /* expected value in the instruction, unless validate == false */ - __u32 orig_val; - /* new value that needs to be patched up to */ - __u32 new_val; - /* relocation unsuccessful, poison instruction, but don't fail load */ - bool poison; - /* some relocations can't be validated against orig_val */ - bool validate; - /* for field byte offset relocations or the forms: - * *(T *)(rX + ) = rY - * rX = *(T *)(rY + ), - * we remember original and resolved field size to adjust direct - * memory loads of pointers and integers; this is necessary for 32-bit - * host kernel architectures, but also allows to automatically - * relocate fields that were resized from, e.g., u32 to u64, etc. - */ - bool fail_memsz_adjust; - __u32 orig_sz; - __u32 orig_type_id; - __u32 new_sz; - __u32 new_type_id; -}; - /* Calculate original and target relocation values, given local and target * specs and relocation kind. These values are calculated for each candidate. * If there are multiple candidates, resulting values should all be consistent @@ -951,9 +926,9 @@ static int insn_bytes_to_bpf_size(__u32 sz) * 5. *(T *)(rX + ) = rY, where T is one of {u8, u16, u32, u64}; * 6. *(T *)(rX + ) = , where T is one of {u8, u16, u32, u64}. */ -static int bpf_core_patch_insn(const char *prog_name, struct bpf_insn *insn, - int insn_idx, const struct bpf_core_relo *relo, - int relo_idx, const struct bpf_core_relo_res *res) +int bpf_core_patch_insn(const char *prog_name, struct bpf_insn *insn, + int insn_idx, const struct bpf_core_relo *relo, + int relo_idx, const struct bpf_core_relo_res *res) { __u32 orig_val, new_val; __u8 class; @@ -1128,7 +1103,7 @@ static void bpf_core_dump_spec(const char *prog_name, int level, const struct bp } /* - * CO-RE relocate single instruction. + * Calculate CO-RE relocation target result. * * The outline and important points of the algorithm: * 1. For given local type, find corresponding candidate target types. @@ -1177,18 +1152,18 @@ static void bpf_core_dump_spec(const char *prog_name, int level, const struct bp * between multiple relocations for the same type ID and is updated as some * of the candidates are pruned due to structural incompatibility. */ -int bpf_core_apply_relo_insn(const char *prog_name, struct bpf_insn *insn, - int insn_idx, - const struct bpf_core_relo *relo, - int relo_idx, - const struct btf *local_btf, - struct bpf_core_cand_list *cands, - struct bpf_core_spec *specs_scratch) +int bpf_core_calc_relo_insn(const char *prog_name, + const struct bpf_core_relo *relo, + int relo_idx, + const struct btf *local_btf, + struct bpf_core_cand_list *cands, + struct bpf_core_spec *specs_scratch, + struct bpf_core_relo_res *targ_res) { struct bpf_core_spec *local_spec = &specs_scratch[0]; struct bpf_core_spec *cand_spec = &specs_scratch[1]; struct bpf_core_spec *targ_spec = &specs_scratch[2]; - struct bpf_core_relo_res cand_res, targ_res; + struct bpf_core_relo_res cand_res; const struct btf_type *local_type; const char *local_name; __u32 local_id; @@ -1223,12 +1198,12 @@ int bpf_core_apply_relo_insn(const char *prog_name, struct bpf_insn *insn, /* TYPE_ID_LOCAL relo is special and doesn't need candidate search */ if (relo->kind == BPF_CORE_TYPE_ID_LOCAL) { /* bpf_insn's imm value could get out of sync during linking */ - memset(&targ_res, 0, sizeof(targ_res)); - targ_res.validate = false; - targ_res.poison = false; - targ_res.orig_val = local_spec->root_type_id; - targ_res.new_val = local_spec->root_type_id; - goto patch_insn; + memset(targ_res, 0, sizeof(*targ_res)); + targ_res->validate = false; + targ_res->poison = false; + targ_res->orig_val = local_spec->root_type_id; + targ_res->new_val = local_spec->root_type_id; + return 0; } /* libbpf doesn't support candidate search for anonymous types */ @@ -1262,7 +1237,7 @@ int bpf_core_apply_relo_insn(const char *prog_name, struct bpf_insn *insn, return err; if (j == 0) { - targ_res = cand_res; + *targ_res = cand_res; *targ_spec = *cand_spec; } else if (cand_spec->bit_offset != targ_spec->bit_offset) { /* if there are many field relo candidates, they @@ -1272,7 +1247,8 @@ int bpf_core_apply_relo_insn(const char *prog_name, struct bpf_insn *insn, prog_name, relo_idx, cand_spec->bit_offset, targ_spec->bit_offset); return -EINVAL; - } else if (cand_res.poison != targ_res.poison || cand_res.new_val != targ_res.new_val) { + } else if (cand_res.poison != targ_res->poison || + cand_res.new_val != targ_res->new_val) { /* all candidates should result in the same relocation * decision and value, otherwise it's dangerous to * proceed due to ambiguity @@ -1280,7 +1256,7 @@ int bpf_core_apply_relo_insn(const char *prog_name, struct bpf_insn *insn, pr_warn("prog '%s': relo #%d: relocation decision ambiguity: %s %u != %s %u\n", prog_name, relo_idx, cand_res.poison ? "failure" : "success", cand_res.new_val, - targ_res.poison ? "failure" : "success", targ_res.new_val); + targ_res->poison ? "failure" : "success", targ_res->new_val); return -EINVAL; } @@ -1314,19 +1290,10 @@ int bpf_core_apply_relo_insn(const char *prog_name, struct bpf_insn *insn, prog_name, relo_idx); /* calculate single target relo result explicitly */ - err = bpf_core_calc_relo(prog_name, relo, relo_idx, local_spec, NULL, &targ_res); + err = bpf_core_calc_relo(prog_name, relo, relo_idx, local_spec, NULL, targ_res); if (err) return err; } -patch_insn: - /* bpf_core_patch_insn() should know how to handle missing targ_spec */ - err = bpf_core_patch_insn(prog_name, insn, insn_idx, relo, relo_idx, &targ_res); - if (err) { - pr_warn("prog '%s': relo #%d: failed to patch insn #%u: %d\n", - prog_name, relo_idx, relo->insn_off / 8, err); - return -EINVAL; - } - return 0; } diff --git a/tools/lib/bpf/relo_core.h b/tools/lib/bpf/relo_core.h index 17799819ad7c..a28bf3711ce2 100644 --- a/tools/lib/bpf/relo_core.h +++ b/tools/lib/bpf/relo_core.h @@ -44,14 +44,44 @@ struct bpf_core_spec { __u32 bit_offset; }; -int bpf_core_apply_relo_insn(const char *prog_name, - struct bpf_insn *insn, int insn_idx, - const struct bpf_core_relo *relo, int relo_idx, - const struct btf *local_btf, - struct bpf_core_cand_list *cands, - struct bpf_core_spec *specs_scratch); +struct bpf_core_relo_res { + /* expected value in the instruction, unless validate == false */ + __u32 orig_val; + /* new value that needs to be patched up to */ + __u32 new_val; + /* relocation unsuccessful, poison instruction, but don't fail load */ + bool poison; + /* some relocations can't be validated against orig_val */ + bool validate; + /* for field byte offset relocations or the forms: + * *(T *)(rX + ) = rY + * rX = *(T *)(rY + ), + * we remember original and resolved field size to adjust direct + * memory loads of pointers and integers; this is necessary for 32-bit + * host kernel architectures, but also allows to automatically + * relocate fields that were resized from, e.g., u32 to u64, etc. + */ + bool fail_memsz_adjust; + __u32 orig_sz; + __u32 orig_type_id; + __u32 new_sz; + __u32 new_type_id; +}; + int bpf_core_types_are_compat(const struct btf *local_btf, __u32 local_id, const struct btf *targ_btf, __u32 targ_id); size_t bpf_core_essential_name_len(const char *name); + +int bpf_core_calc_relo_insn(const char *prog_name, + const struct bpf_core_relo *relo, int relo_idx, + const struct btf *local_btf, + struct bpf_core_cand_list *cands, + struct bpf_core_spec *specs_scratch, + struct bpf_core_relo_res *targ_res); + +int bpf_core_patch_insn(const char *prog_name, struct bpf_insn *insn, + int insn_idx, const struct bpf_core_relo *relo, + int relo_idx, const struct bpf_core_relo_res *res); + #endif -- cgit v1.2.3 From 8de6cae40bce6e19f39de60056cad39a7274169d Mon Sep 17 00:00:00 2001 From: Mauricio Vásquez Date: Tue, 15 Feb 2022 17:58:51 -0500 Subject: libbpf: Expose bpf_core_{add,free}_cands() to bpftool MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Expose bpf_core_add_cands() and bpf_core_free_cands() to handle candidates list. Signed-off-by: Mauricio Vásquez Signed-off-by: Rafael David Tinoco Signed-off-by: Lorenzo Fontana Signed-off-by: Leonardo Di Donato Signed-off-by: Andrii Nakryiko Acked-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/20220215225856.671072-3-mauricio@kinvolk.io --- tools/lib/bpf/libbpf.c | 17 ++++++++++------- tools/lib/bpf/libbpf_internal.h | 9 +++++++++ 2 files changed, 19 insertions(+), 7 deletions(-) (limited to 'tools/lib/bpf') diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c index d3c457fb045e..ad43b6ce825e 100644 --- a/tools/lib/bpf/libbpf.c +++ b/tools/lib/bpf/libbpf.c @@ -5192,18 +5192,21 @@ size_t bpf_core_essential_name_len(const char *name) return n; } -static void bpf_core_free_cands(struct bpf_core_cand_list *cands) +void bpf_core_free_cands(struct bpf_core_cand_list *cands) { + if (!cands) + return; + free(cands->cands); free(cands); } -static int bpf_core_add_cands(struct bpf_core_cand *local_cand, - size_t local_essent_len, - const struct btf *targ_btf, - const char *targ_btf_name, - int targ_start_id, - struct bpf_core_cand_list *cands) +int bpf_core_add_cands(struct bpf_core_cand *local_cand, + size_t local_essent_len, + const struct btf *targ_btf, + const char *targ_btf_name, + int targ_start_id, + struct bpf_core_cand_list *cands) { struct bpf_core_cand *new_cands, *cand; const struct btf_type *t, *local_t; diff --git a/tools/lib/bpf/libbpf_internal.h b/tools/lib/bpf/libbpf_internal.h index bc86b82e90d1..4fda8bdf0a0d 100644 --- a/tools/lib/bpf/libbpf_internal.h +++ b/tools/lib/bpf/libbpf_internal.h @@ -529,4 +529,13 @@ static inline int ensure_good_fd(int fd) return fd; } +/* The following two functions are exposed to bpftool */ +int bpf_core_add_cands(struct bpf_core_cand *local_cand, + size_t local_essent_len, + const struct btf *targ_btf, + const char *targ_btf_name, + int targ_start_id, + struct bpf_core_cand_list *cands); +void bpf_core_free_cands(struct bpf_core_cand_list *cands); + #endif /* __LIBBPF_LIBBPF_INTERNAL_H */ -- cgit v1.2.3 From 1b8c924a05934d2e758ec7da7bd217ef8ebd80ce Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Wed, 16 Feb 2022 23:39:58 -0800 Subject: libbpf: Fix memleak in libbpf_netlink_recv() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Ensure that libbpf_netlink_recv() frees dynamically allocated buffer in all code paths. Fixes: 9c3de619e13e ("libbpf: Use dynamically allocated buffer when receiving netlink messages") Signed-off-by: Andrii Nakryiko Signed-off-by: Daniel Borkmann Acked-by: Toke Høiland-Jørgensen Link: https://lore.kernel.org/bpf/20220217073958.276959-1-andrii@kernel.org --- tools/lib/bpf/netlink.c | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) (limited to 'tools/lib/bpf') diff --git a/tools/lib/bpf/netlink.c b/tools/lib/bpf/netlink.c index a598061f6fea..cbc8967d5402 100644 --- a/tools/lib/bpf/netlink.c +++ b/tools/lib/bpf/netlink.c @@ -176,7 +176,8 @@ start: libbpf_nla_dump_errormsg(nh); goto done; case NLMSG_DONE: - return 0; + ret = 0; + goto done; default: break; } @@ -188,9 +189,10 @@ start: case NL_NEXT: goto start; case NL_DONE: - return 0; + ret = 0; + goto done; default: - return ret; + goto done; } } } -- cgit v1.2.3 From 6966d4c4425b6796b1da13a6f86d09825df3d323 Mon Sep 17 00:00:00 2001 From: Yuntao Wang Date: Sun, 20 Feb 2022 15:27:50 +0800 Subject: libbpf: Remove redundant check in btf_fixup_datasec() The check 't->size && t->size != size' is redundant because if t->size compares unequal to 0, we will just skip straight to sorting variables. Signed-off-by: Yuntao Wang Signed-off-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/20220220072750.209215-1-ytcoode@gmail.com --- tools/lib/bpf/libbpf.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'tools/lib/bpf') diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c index ad43b6ce825e..7e978feaf822 100644 --- a/tools/lib/bpf/libbpf.c +++ b/tools/lib/bpf/libbpf.c @@ -2795,7 +2795,7 @@ static int btf_fixup_datasec(struct bpf_object *obj, struct btf *btf, goto sort_vars; ret = find_elf_sec_sz(obj, name, &size); - if (ret || !size || (t->size && t->size != size)) { + if (ret || !size) { pr_debug("Invalid size for section %s: %u bytes\n", name, size); return -ENOENT; } -- cgit v1.2.3 From 08894d9c647aad08ddd19398e03a0aa1a70b7dc8 Mon Sep 17 00:00:00 2001 From: Yuntao Wang Date: Wed, 23 Feb 2022 16:52:44 +0800 Subject: libbpf: Simplify the find_elf_sec_sz() function The check in the last return statement is unnecessary, we can just return the ret variable. But we can simplify the function further by returning 0 immediately if we find the section size and -ENOENT otherwise. Thus we can also remove the ret variable. Signed-off-by: Yuntao Wang Signed-off-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/20220223085244.3058118-1-ytcoode@gmail.com --- tools/lib/bpf/libbpf.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) (limited to 'tools/lib/bpf') diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c index 7e978feaf822..776b8e034d62 100644 --- a/tools/lib/bpf/libbpf.c +++ b/tools/lib/bpf/libbpf.c @@ -1374,22 +1374,20 @@ static bool bpf_map_type__is_map_in_map(enum bpf_map_type type) static int find_elf_sec_sz(const struct bpf_object *obj, const char *name, __u32 *size) { - int ret = -ENOENT; Elf_Data *data; Elf_Scn *scn; - *size = 0; if (!name) return -EINVAL; scn = elf_sec_by_name(obj, name); data = elf_sec_data(obj, scn); if (data) { - ret = 0; /* found it */ *size = data->d_size; + return 0; /* found it */ } - return *size ? 0 : ret; + return -ENOENT; } static int find_elf_var_offset(const struct bpf_object *obj, const char *name, __u32 *off) -- cgit v1.2.3 From a4fbfdd7a160eccaafc093eb5b34f838b1ca0bf0 Mon Sep 17 00:00:00 2001 From: Stijn Tintel Date: Fri, 25 Feb 2022 17:23:55 +0200 Subject: libbpf: Fix BPF_MAP_TYPE_PERF_EVENT_ARRAY auto-pinning When a BPF map of type BPF_MAP_TYPE_PERF_EVENT_ARRAY doesn't have the max_entries parameter set, the map will be created with max_entries set to the number of available CPUs. When we try to reuse such a pinned map, map_is_reuse_compat will return false, as max_entries in the map definition differs from max_entries of the existing map, causing the following error: libbpf: couldn't reuse pinned map at '/sys/fs/bpf/m_logging': parameter mismatch Fix this by overwriting max_entries in the map definition. For this to work, we need to do this in bpf_object__create_maps, before calling bpf_object__reuse_map. Fixes: 57a00f41644f ("libbpf: Add auto-pinning of maps when loading BPF objects") Signed-off-by: Stijn Tintel Signed-off-by: Daniel Borkmann Acked-by: Song Liu Link: https://lore.kernel.org/bpf/20220225152355.315204-1-stijn@linux-ipv6.be --- tools/lib/bpf/libbpf.c | 44 +++++++++++++++++++++++++------------------- 1 file changed, 25 insertions(+), 19 deletions(-) (limited to 'tools/lib/bpf') diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c index 776b8e034d62..be6480e260c4 100644 --- a/tools/lib/bpf/libbpf.c +++ b/tools/lib/bpf/libbpf.c @@ -4859,7 +4859,6 @@ static int bpf_object__create_map(struct bpf_object *obj, struct bpf_map *map, b LIBBPF_OPTS(bpf_map_create_opts, create_attr); struct bpf_map_def *def = &map->def; const char *map_name = NULL; - __u32 max_entries; int err = 0; if (kernel_supports(obj, FEAT_PROG_NAME)) @@ -4869,21 +4868,6 @@ static int bpf_object__create_map(struct bpf_object *obj, struct bpf_map *map, b create_attr.numa_node = map->numa_node; create_attr.map_extra = map->map_extra; - if (def->type == BPF_MAP_TYPE_PERF_EVENT_ARRAY && !def->max_entries) { - int nr_cpus; - - nr_cpus = libbpf_num_possible_cpus(); - if (nr_cpus < 0) { - pr_warn("map '%s': failed to determine number of system CPUs: %d\n", - map->name, nr_cpus); - return nr_cpus; - } - pr_debug("map '%s': setting size to %d\n", map->name, nr_cpus); - max_entries = nr_cpus; - } else { - max_entries = def->max_entries; - } - if (bpf_map__is_struct_ops(map)) create_attr.btf_vmlinux_value_type_id = map->btf_vmlinux_value_type_id; @@ -4933,7 +4917,7 @@ static int bpf_object__create_map(struct bpf_object *obj, struct bpf_map *map, b if (obj->gen_loader) { bpf_gen__map_create(obj->gen_loader, def->type, map_name, - def->key_size, def->value_size, max_entries, + def->key_size, def->value_size, def->max_entries, &create_attr, is_inner ? -1 : map - obj->maps); /* Pretend to have valid FD to pass various fd >= 0 checks. * This fd == 0 will not be used with any syscall and will be reset to -1 eventually. @@ -4942,7 +4926,7 @@ static int bpf_object__create_map(struct bpf_object *obj, struct bpf_map *map, b } else { map->fd = bpf_map_create(def->type, map_name, def->key_size, def->value_size, - max_entries, &create_attr); + def->max_entries, &create_attr); } if (map->fd < 0 && (create_attr.btf_key_type_id || create_attr.btf_value_type_id)) { @@ -4959,7 +4943,7 @@ static int bpf_object__create_map(struct bpf_object *obj, struct bpf_map *map, b map->btf_value_type_id = 0; map->fd = bpf_map_create(def->type, map_name, def->key_size, def->value_size, - max_entries, &create_attr); + def->max_entries, &create_attr); } err = map->fd < 0 ? -errno : 0; @@ -5063,6 +5047,24 @@ static int bpf_object_init_prog_arrays(struct bpf_object *obj) return 0; } +static int map_set_def_max_entries(struct bpf_map *map) +{ + if (map->def.type == BPF_MAP_TYPE_PERF_EVENT_ARRAY && !map->def.max_entries) { + int nr_cpus; + + nr_cpus = libbpf_num_possible_cpus(); + if (nr_cpus < 0) { + pr_warn("map '%s': failed to determine number of system CPUs: %d\n", + map->name, nr_cpus); + return nr_cpus; + } + pr_debug("map '%s': setting size to %d\n", map->name, nr_cpus); + map->def.max_entries = nr_cpus; + } + + return 0; +} + static int bpf_object__create_maps(struct bpf_object *obj) { @@ -5095,6 +5097,10 @@ bpf_object__create_maps(struct bpf_object *obj) continue; } + err = map_set_def_max_entries(map); + if (err) + goto err_out; + retried = false; retry: if (map->pin_path) { -- cgit v1.2.3 From 4226961b0019b2e1612029e8950a9e911affc995 Mon Sep 17 00:00:00 2001 From: Xu Kuohai Date: Tue, 1 Mar 2022 00:32:49 -0500 Subject: libbpf: Skip forward declaration when counting duplicated type names Currently if a declaration appears in the BTF before the definition, the definition is dumped as a conflicting name, e.g.: $ bpftool btf dump file vmlinux format raw | grep "'unix_sock'" [81287] FWD 'unix_sock' fwd_kind=struct [89336] STRUCT 'unix_sock' size=1024 vlen=14 $ bpftool btf dump file vmlinux format c | grep "struct unix_sock" struct unix_sock; struct unix_sock___2 { <--- conflict, the "___2" is unexpected struct unix_sock___2 *unix_sk; This causes a compilation error if the dump output is used as a header file. Fix it by skipping declaration when counting duplicated type names. Fixes: 351131b51c7a ("libbpf: add btf_dump API for BTF-to-C conversion") Signed-off-by: Xu Kuohai Signed-off-by: Daniel Borkmann Acked-by: Song Liu Link: https://lore.kernel.org/bpf/20220301053250.1464204-2-xukuohai@huawei.com --- tools/lib/bpf/btf_dump.c | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'tools/lib/bpf') diff --git a/tools/lib/bpf/btf_dump.c b/tools/lib/bpf/btf_dump.c index 07ebe70d3a30..6b1bc1f43728 100644 --- a/tools/lib/bpf/btf_dump.c +++ b/tools/lib/bpf/btf_dump.c @@ -1505,6 +1505,11 @@ static const char *btf_dump_resolve_name(struct btf_dump *d, __u32 id, if (s->name_resolved) return *cached_name ? *cached_name : orig_name; + if (btf_is_fwd(t) || (btf_is_enum(t) && btf_vlen(t) == 0)) { + s->name_resolved = 1; + return orig_name; + } + dup_cnt = btf_dump_name_dups(d, name_map, orig_name); if (dup_cnt > 1) { const size_t max_len = 256; -- cgit v1.2.3 From 41332d6e3a430adc91e0af115b4261b0d2f116ec Mon Sep 17 00:00:00 2001 From: Yuntao Wang Date: Thu, 3 Mar 2022 08:59:21 +0800 Subject: libbpf: Add a check to ensure that page_cnt is non-zero The page_cnt parameter is used to specify the number of memory pages allocated for each per-CPU buffer, it must be non-zero and a power of 2. Currently, the __perf_buffer__new() function attempts to validate that the page_cnt is a power of 2 but forgets checking for the case where page_cnt is zero, we can fix it by replacing 'page_cnt & (page_cnt - 1)' with 'page_cnt == 0 || (page_cnt & (page_cnt - 1))'. If so, we also don't need to add a check in perf_buffer__new_v0_6_0() to make sure that page_cnt is non-zero and the check for zero in perf_buffer__new_raw_v0_6_0() can also be removed. The code will be cleaner and more readable. Signed-off-by: Yuntao Wang Signed-off-by: Daniel Borkmann Link: https://lore.kernel.org/bpf/20220303005921.53436-1-ytcoode@gmail.com --- tools/lib/bpf/libbpf.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'tools/lib/bpf') diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c index be6480e260c4..81bf01d67671 100644 --- a/tools/lib/bpf/libbpf.c +++ b/tools/lib/bpf/libbpf.c @@ -10951,7 +10951,7 @@ struct perf_buffer *perf_buffer__new_raw_v0_6_0(int map_fd, size_t page_cnt, { struct perf_buffer_params p = {}; - if (page_cnt == 0 || !attr) + if (!attr) return libbpf_err_ptr(-EINVAL); if (!OPTS_VALID(opts, perf_buffer_raw_opts)) @@ -10992,7 +10992,7 @@ static struct perf_buffer *__perf_buffer__new(int map_fd, size_t page_cnt, __u32 map_info_len; int err, i, j, n; - if (page_cnt & (page_cnt - 1)) { + if (page_cnt == 0 || (page_cnt & (page_cnt - 1))) { pr_warn("page count should be power of two, but is %zu\n", page_cnt); return ERR_PTR(-EINVAL); -- cgit v1.2.3 From 4fa5bcfe07f7e97d9a140c465e1056c91651c41d Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Fri, 4 Mar 2022 17:01:27 -0800 Subject: libbpf: Allow BPF program auto-attach handlers to bail out Allow some BPF program types to support auto-attach only in subste of cases. Currently, if some BPF program type specifies attach callback, it is assumed that during skeleton attach operation all such programs either successfully attach or entire skeleton attachment fails. If some program doesn't support auto-attachment from skeleton, such BPF program types shouldn't have attach callback specified. This is limiting for cases when, depending on how full the SEC("") definition is, there could either be enough details to support auto-attach or there might not be and user has to use some specific API to provide more details at runtime. One specific example of such desired behavior might be SEC("uprobe"). If it's specified as just uprobe auto-attach isn't possible. But if it's SEC("uprobe/:") then there are enough details to support auto-attach. Note that there is a somewhat subtle difference between auto-attach behavior of BPF skeleton and using "generic" bpf_program__attach(prog) (which uses the same attach handlers under the cover). Skeleton allow some programs within bpf_object to not have auto-attach implemented and doesn't treat that as an error. Instead such BPF programs are just skipped during skeleton's (optional) attach step. bpf_program__attach(), on the other hand, is called when user *expects* auto-attach to work, so if specified program doesn't implement or doesn't support auto-attach functionality, that will be treated as an error. Another improvement to the way libbpf is handling SEC()s would be to not require providing dummy kernel function name for kprobe. Currently, SEC("kprobe/whatever") is necessary even if actual kernel function is determined by user at runtime and bpf_program__attach_kprobe() is used to specify it. With changes in this patch, it's possible to support both SEC("kprobe") and SEC("kprobe/ Signed-off-by: Alexei Starovoitov Tested-by: Alan Maguire Reviewed-by: Alan Maguire Link: https://lore.kernel.org/bpf/20220305010129.1549719-2-andrii@kernel.org --- tools/lib/bpf/libbpf.c | 140 ++++++++++++++++++++++++++++++------------------- 1 file changed, 85 insertions(+), 55 deletions(-) (limited to 'tools/lib/bpf') diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c index 81bf01d67671..1da4a438ba00 100644 --- a/tools/lib/bpf/libbpf.c +++ b/tools/lib/bpf/libbpf.c @@ -201,11 +201,12 @@ struct reloc_desc { }; }; -struct bpf_sec_def; - -typedef int (*init_fn_t)(struct bpf_program *prog, long cookie); -typedef int (*preload_fn_t)(struct bpf_program *prog, struct bpf_prog_load_opts *opts, long cookie); -typedef struct bpf_link *(*attach_fn_t)(const struct bpf_program *prog, long cookie); +typedef int (*libbpf_prog_setup_fn_t)(struct bpf_program *prog, long cookie); +typedef int (*libbpf_prog_prepare_load_fn_t)(struct bpf_program *prog, + struct bpf_prog_load_opts *opts, long cookie); +/* If auto-attach is not supported, callback should return 0 and set link to NULL */ +typedef int (*libbpf_prog_attach_fn_t)(const struct bpf_program *prog, long cookie, + struct bpf_link **link); /* stored as sec_def->cookie for all libbpf-supported SEC()s */ enum sec_def_flags { @@ -239,9 +240,9 @@ struct bpf_sec_def { enum bpf_attach_type expected_attach_type; long cookie; - init_fn_t init_fn; - preload_fn_t preload_fn; - attach_fn_t attach_fn; + libbpf_prog_setup_fn_t prog_setup_fn; + libbpf_prog_prepare_load_fn_t prog_prepare_load_fn; + libbpf_prog_attach_fn_t prog_attach_fn; }; /* @@ -6572,9 +6573,9 @@ static int bpf_object__sanitize_prog(struct bpf_object *obj, struct bpf_program static int libbpf_find_attach_btf_id(struct bpf_program *prog, const char *attach_name, int *btf_obj_fd, int *btf_type_id); -/* this is called as prog->sec_def->preload_fn for libbpf-supported sec_defs */ -static int libbpf_preload_prog(struct bpf_program *prog, - struct bpf_prog_load_opts *opts, long cookie) +/* this is called as prog->sec_def->prog_prepare_load_fn for libbpf-supported sec_defs */ +static int libbpf_prepare_prog_load(struct bpf_program *prog, + struct bpf_prog_load_opts *opts, long cookie) { enum sec_def_flags def = cookie; @@ -6670,8 +6671,8 @@ static int bpf_object_load_prog_instance(struct bpf_object *obj, struct bpf_prog load_attr.fd_array = obj->fd_array; /* adjust load_attr if sec_def provides custom preload callback */ - if (prog->sec_def && prog->sec_def->preload_fn) { - err = prog->sec_def->preload_fn(prog, &load_attr, prog->sec_def->cookie); + if (prog->sec_def && prog->sec_def->prog_prepare_load_fn) { + err = prog->sec_def->prog_prepare_load_fn(prog, &load_attr, prog->sec_def->cookie); if (err < 0) { pr_warn("prog '%s': failed to prepare load attributes: %d\n", prog->name, err); @@ -6971,8 +6972,8 @@ static int bpf_object_init_progs(struct bpf_object *obj, const struct bpf_object /* sec_def can have custom callback which should be called * after bpf_program is initialized to adjust its properties */ - if (prog->sec_def->init_fn) { - err = prog->sec_def->init_fn(prog, prog->sec_def->cookie); + if (prog->sec_def->prog_setup_fn) { + err = prog->sec_def->prog_setup_fn(prog, prog->sec_def->cookie); if (err < 0) { pr_warn("prog '%s': failed to initialize: %d\n", prog->name, err); @@ -8593,16 +8594,16 @@ int bpf_program__set_log_buf(struct bpf_program *prog, char *log_buf, size_t log .prog_type = BPF_PROG_TYPE_##ptype, \ .expected_attach_type = atype, \ .cookie = (long)(flags), \ - .preload_fn = libbpf_preload_prog, \ + .prog_prepare_load_fn = libbpf_prepare_prog_load, \ __VA_ARGS__ \ } -static struct bpf_link *attach_kprobe(const struct bpf_program *prog, long cookie); -static struct bpf_link *attach_tp(const struct bpf_program *prog, long cookie); -static struct bpf_link *attach_raw_tp(const struct bpf_program *prog, long cookie); -static struct bpf_link *attach_trace(const struct bpf_program *prog, long cookie); -static struct bpf_link *attach_lsm(const struct bpf_program *prog, long cookie); -static struct bpf_link *attach_iter(const struct bpf_program *prog, long cookie); +static int attach_kprobe(const struct bpf_program *prog, long cookie, struct bpf_link **link); +static int attach_tp(const struct bpf_program *prog, long cookie, struct bpf_link **link); +static int attach_raw_tp(const struct bpf_program *prog, long cookie, struct bpf_link **link); +static int attach_trace(const struct bpf_program *prog, long cookie, struct bpf_link **link); +static int attach_lsm(const struct bpf_program *prog, long cookie, struct bpf_link **link); +static int attach_iter(const struct bpf_program *prog, long cookie, struct bpf_link **link); static const struct bpf_sec_def section_defs[] = { SEC_DEF("socket", SOCKET_FILTER, 0, SEC_NONE | SEC_SLOPPY_PFX), @@ -8752,7 +8753,7 @@ static char *libbpf_get_type_names(bool attach_type) const struct bpf_sec_def *sec_def = §ion_defs[i]; if (attach_type) { - if (sec_def->preload_fn != libbpf_preload_prog) + if (sec_def->prog_prepare_load_fn != libbpf_prepare_prog_load) continue; if (!(sec_def->cookie & SEC_ATTACHABLE)) @@ -9135,7 +9136,7 @@ int libbpf_attach_type_by_name(const char *name, return libbpf_err(-EINVAL); } - if (sec_def->preload_fn != libbpf_preload_prog) + if (sec_def->prog_prepare_load_fn != libbpf_prepare_prog_load) return libbpf_err(-EINVAL); if (!(sec_def->cookie & SEC_ATTACHABLE)) return libbpf_err(-EINVAL); @@ -10109,14 +10110,13 @@ struct bpf_link *bpf_program__attach_kprobe(const struct bpf_program *prog, return bpf_program__attach_kprobe_opts(prog, func_name, &opts); } -static struct bpf_link *attach_kprobe(const struct bpf_program *prog, long cookie) +static int attach_kprobe(const struct bpf_program *prog, long cookie, struct bpf_link **link) { DECLARE_LIBBPF_OPTS(bpf_kprobe_opts, opts); unsigned long offset = 0; - struct bpf_link *link; const char *func_name; char *func; - int n, err; + int n; opts.retprobe = str_has_pfx(prog->sec_name, "kretprobe/"); if (opts.retprobe) @@ -10126,21 +10126,19 @@ static struct bpf_link *attach_kprobe(const struct bpf_program *prog, long cooki n = sscanf(func_name, "%m[a-zA-Z0-9_.]+%li", &func, &offset); if (n < 1) { - err = -EINVAL; pr_warn("kprobe name is invalid: %s\n", func_name); - return libbpf_err_ptr(err); + return -EINVAL; } if (opts.retprobe && offset != 0) { free(func); - err = -EINVAL; pr_warn("kretprobes do not support offset specification\n"); - return libbpf_err_ptr(err); + return -EINVAL; } opts.offset = offset; - link = bpf_program__attach_kprobe_opts(prog, func, &opts); + *link = bpf_program__attach_kprobe_opts(prog, func, &opts); free(func); - return link; + return libbpf_get_error(*link); } static void gen_uprobe_legacy_event_name(char *buf, size_t buf_sz, @@ -10395,14 +10393,13 @@ struct bpf_link *bpf_program__attach_tracepoint(const struct bpf_program *prog, return bpf_program__attach_tracepoint_opts(prog, tp_category, tp_name, NULL); } -static struct bpf_link *attach_tp(const struct bpf_program *prog, long cookie) +static int attach_tp(const struct bpf_program *prog, long cookie, struct bpf_link **link) { char *sec_name, *tp_cat, *tp_name; - struct bpf_link *link; sec_name = strdup(prog->sec_name); if (!sec_name) - return libbpf_err_ptr(-ENOMEM); + return -ENOMEM; /* extract "tp//" or "tracepoint//" */ if (str_has_pfx(prog->sec_name, "tp/")) @@ -10412,14 +10409,14 @@ static struct bpf_link *attach_tp(const struct bpf_program *prog, long cookie) tp_name = strchr(tp_cat, '/'); if (!tp_name) { free(sec_name); - return libbpf_err_ptr(-EINVAL); + return -EINVAL; } *tp_name = '\0'; tp_name++; - link = bpf_program__attach_tracepoint(prog, tp_cat, tp_name); + *link = bpf_program__attach_tracepoint(prog, tp_cat, tp_name); free(sec_name); - return link; + return libbpf_get_error(*link); } struct bpf_link *bpf_program__attach_raw_tracepoint(const struct bpf_program *prog, @@ -10452,7 +10449,7 @@ struct bpf_link *bpf_program__attach_raw_tracepoint(const struct bpf_program *pr return link; } -static struct bpf_link *attach_raw_tp(const struct bpf_program *prog, long cookie) +static int attach_raw_tp(const struct bpf_program *prog, long cookie, struct bpf_link **link) { static const char *const prefixes[] = { "raw_tp/", @@ -10472,10 +10469,11 @@ static struct bpf_link *attach_raw_tp(const struct bpf_program *prog, long cooki if (!tp_name) { pr_warn("prog '%s': invalid section name '%s'\n", prog->name, prog->sec_name); - return libbpf_err_ptr(-EINVAL); + return -EINVAL; } - return bpf_program__attach_raw_tracepoint(prog, tp_name); + *link = bpf_program__attach_raw_tracepoint(prog, tp_name); + return libbpf_get_error(link); } /* Common logic for all BPF program types that attach to a btf_id */ @@ -10518,14 +10516,16 @@ struct bpf_link *bpf_program__attach_lsm(const struct bpf_program *prog) return bpf_program__attach_btf_id(prog); } -static struct bpf_link *attach_trace(const struct bpf_program *prog, long cookie) +static int attach_trace(const struct bpf_program *prog, long cookie, struct bpf_link **link) { - return bpf_program__attach_trace(prog); + *link = bpf_program__attach_trace(prog); + return libbpf_get_error(*link); } -static struct bpf_link *attach_lsm(const struct bpf_program *prog, long cookie) +static int attach_lsm(const struct bpf_program *prog, long cookie, struct bpf_link **link) { - return bpf_program__attach_lsm(prog); + *link = bpf_program__attach_lsm(prog); + return libbpf_get_error(*link); } static struct bpf_link * @@ -10654,17 +10654,33 @@ bpf_program__attach_iter(const struct bpf_program *prog, return link; } -static struct bpf_link *attach_iter(const struct bpf_program *prog, long cookie) +static int attach_iter(const struct bpf_program *prog, long cookie, struct bpf_link **link) { - return bpf_program__attach_iter(prog, NULL); + *link = bpf_program__attach_iter(prog, NULL); + return libbpf_get_error(*link); } struct bpf_link *bpf_program__attach(const struct bpf_program *prog) { - if (!prog->sec_def || !prog->sec_def->attach_fn) - return libbpf_err_ptr(-ESRCH); + struct bpf_link *link = NULL; + int err; + + if (!prog->sec_def || !prog->sec_def->prog_attach_fn) + return libbpf_err_ptr(-EOPNOTSUPP); - return prog->sec_def->attach_fn(prog, prog->sec_def->cookie); + err = prog->sec_def->prog_attach_fn(prog, prog->sec_def->cookie, &link); + if (err) + return libbpf_err_ptr(err); + + /* When calling bpf_program__attach() explicitly, auto-attach support + * is expected to work, so NULL returned link is considered an error. + * This is different for skeleton's attach, see comment in + * bpf_object__attach_skeleton(). + */ + if (!link) + return libbpf_err_ptr(-EOPNOTSUPP); + + return link; } static int bpf_link__detach_struct_ops(struct bpf_link *link) @@ -11805,16 +11821,30 @@ int bpf_object__attach_skeleton(struct bpf_object_skeleton *s) continue; /* auto-attaching not supported for this program */ - if (!prog->sec_def || !prog->sec_def->attach_fn) + if (!prog->sec_def || !prog->sec_def->prog_attach_fn) continue; - *link = bpf_program__attach(prog); - err = libbpf_get_error(*link); + /* if user already set the link manually, don't attempt auto-attach */ + if (*link) + continue; + + err = prog->sec_def->prog_attach_fn(prog, prog->sec_def->cookie, link); if (err) { - pr_warn("failed to auto-attach program '%s': %d\n", + pr_warn("prog '%s': failed to auto-attach: %d\n", bpf_program__name(prog), err); return libbpf_err(err); } + + /* It's possible that for some SEC() definitions auto-attach + * is supported in some cases (e.g., if definition completely + * specifies target information), but is not in other cases. + * SEC("uprobe") is one such case. If user specified target + * binary and function name, such BPF program can be + * auto-attached. But if not, it shouldn't trigger skeleton's + * attach to fail. It should just be skipped. + * attach_fn signals such case with returning 0 (no error) and + * setting link to NULL. + */ } return 0; -- cgit v1.2.3 From 697f104db8a6177daa5e1ffadda09c2e9285ad18 Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Fri, 4 Mar 2022 17:01:28 -0800 Subject: libbpf: Support custom SEC() handlers Allow registering and unregistering custom handlers for BPF program. This allows user applications and libraries to plug into libbpf's declarative SEC() definition handling logic. This allows to offload complex and intricate custom logic into external libraries, but still provide a great user experience. One such example is USDT handling library, which has a lot of code and complexity which doesn't make sense to put into libbpf directly, but it would be really great for users to be able to specify BPF programs with something like SEC("usdt/::") and have correct BPF program type set (BPF_PROGRAM_TYPE_KPROBE, as it is uprobe) and even support BPF skeleton's auto-attach logic. In some cases, it might be even good idea to override libbpf's default handling, like for SEC("perf_event") programs. With custom library, it's possible to extend logic to support specifying perf event specification right there in SEC() definition without burdening libbpf with lots of custom logic or extra library dependecies (e.g., libpfm4). With current patch it's possible to override libbpf's SEC("perf_event") handling and specify a completely custom ones. Further, it's possible to specify a generic fallback handling for any SEC() that doesn't match any other custom or standard libbpf handlers. This allows to accommodate whatever legacy use cases there might be, if necessary. See doc comments for libbpf_register_prog_handler() and libbpf_unregister_prog_handler() for detailed semantics. This patch also bumps libbpf development version to v0.8 and adds new APIs there. Signed-off-by: Andrii Nakryiko Signed-off-by: Alexei Starovoitov Tested-by: Alan Maguire Reviewed-by: Alan Maguire Link: https://lore.kernel.org/bpf/20220305010129.1549719-3-andrii@kernel.org --- tools/lib/bpf/libbpf.c | 204 ++++++++++++++++++++++++++++++----------- tools/lib/bpf/libbpf.h | 109 ++++++++++++++++++++++ tools/lib/bpf/libbpf.map | 6 ++ tools/lib/bpf/libbpf_version.h | 2 +- 4 files changed, 268 insertions(+), 53 deletions(-) (limited to 'tools/lib/bpf') diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c index 1da4a438ba00..43161fdd44bb 100644 --- a/tools/lib/bpf/libbpf.c +++ b/tools/lib/bpf/libbpf.c @@ -201,13 +201,6 @@ struct reloc_desc { }; }; -typedef int (*libbpf_prog_setup_fn_t)(struct bpf_program *prog, long cookie); -typedef int (*libbpf_prog_prepare_load_fn_t)(struct bpf_program *prog, - struct bpf_prog_load_opts *opts, long cookie); -/* If auto-attach is not supported, callback should return 0 and set link to NULL */ -typedef int (*libbpf_prog_attach_fn_t)(const struct bpf_program *prog, long cookie, - struct bpf_link **link); - /* stored as sec_def->cookie for all libbpf-supported SEC()s */ enum sec_def_flags { SEC_NONE = 0, @@ -235,10 +228,11 @@ enum sec_def_flags { }; struct bpf_sec_def { - const char *sec; + char *sec; enum bpf_prog_type prog_type; enum bpf_attach_type expected_attach_type; long cookie; + int handler_id; libbpf_prog_setup_fn_t prog_setup_fn; libbpf_prog_prepare_load_fn_t prog_prepare_load_fn; @@ -8590,7 +8584,7 @@ int bpf_program__set_log_buf(struct bpf_program *prog, char *log_buf, size_t log } #define SEC_DEF(sec_pfx, ptype, atype, flags, ...) { \ - .sec = sec_pfx, \ + .sec = (char *)sec_pfx, \ .prog_type = BPF_PROG_TYPE_##ptype, \ .expected_attach_type = atype, \ .cookie = (long)(flags), \ @@ -8683,61 +8677,167 @@ static const struct bpf_sec_def section_defs[] = { SEC_DEF("sk_lookup", SK_LOOKUP, BPF_SK_LOOKUP, SEC_ATTACHABLE | SEC_SLOPPY_PFX), }; -#define MAX_TYPE_NAME_SIZE 32 +static size_t custom_sec_def_cnt; +static struct bpf_sec_def *custom_sec_defs; +static struct bpf_sec_def custom_fallback_def; +static bool has_custom_fallback_def; -static const struct bpf_sec_def *find_sec_def(const char *sec_name) +static int last_custom_sec_def_handler_id; + +int libbpf_register_prog_handler(const char *sec, + enum bpf_prog_type prog_type, + enum bpf_attach_type exp_attach_type, + const struct libbpf_prog_handler_opts *opts) { - const struct bpf_sec_def *sec_def; - enum sec_def_flags sec_flags; - int i, n = ARRAY_SIZE(section_defs), len; - bool strict = libbpf_mode & LIBBPF_STRICT_SEC_NAME; + struct bpf_sec_def *sec_def; - for (i = 0; i < n; i++) { - sec_def = §ion_defs[i]; - sec_flags = sec_def->cookie; - len = strlen(sec_def->sec); + if (!OPTS_VALID(opts, libbpf_prog_handler_opts)) + return libbpf_err(-EINVAL); - /* "type/" always has to have proper SEC("type/extras") form */ - if (sec_def->sec[len - 1] == '/') { - if (str_has_pfx(sec_name, sec_def->sec)) - return sec_def; - continue; - } + if (last_custom_sec_def_handler_id == INT_MAX) /* prevent overflow */ + return libbpf_err(-E2BIG); - /* "type+" means it can be either exact SEC("type") or - * well-formed SEC("type/extras") with proper '/' separator - */ - if (sec_def->sec[len - 1] == '+') { - len--; - /* not even a prefix */ - if (strncmp(sec_name, sec_def->sec, len) != 0) - continue; - /* exact match or has '/' separator */ - if (sec_name[len] == '\0' || sec_name[len] == '/') - return sec_def; - continue; - } + if (sec) { + sec_def = libbpf_reallocarray(custom_sec_defs, custom_sec_def_cnt + 1, + sizeof(*sec_def)); + if (!sec_def) + return libbpf_err(-ENOMEM); - /* SEC_SLOPPY_PFX definitions are allowed to be just prefix - * matches, unless strict section name mode - * (LIBBPF_STRICT_SEC_NAME) is enabled, in which case the - * match has to be exact. - */ - if ((sec_flags & SEC_SLOPPY_PFX) && !strict) { - if (str_has_pfx(sec_name, sec_def->sec)) - return sec_def; - continue; - } + custom_sec_defs = sec_def; + sec_def = &custom_sec_defs[custom_sec_def_cnt]; + } else { + if (has_custom_fallback_def) + return libbpf_err(-EBUSY); - /* Definitions not marked SEC_SLOPPY_PFX (e.g., - * SEC("syscall")) are exact matches in both modes. - */ - if (strcmp(sec_name, sec_def->sec) == 0) + sec_def = &custom_fallback_def; + } + + sec_def->sec = sec ? strdup(sec) : NULL; + if (sec && !sec_def->sec) + return libbpf_err(-ENOMEM); + + sec_def->prog_type = prog_type; + sec_def->expected_attach_type = exp_attach_type; + sec_def->cookie = OPTS_GET(opts, cookie, 0); + + sec_def->prog_setup_fn = OPTS_GET(opts, prog_setup_fn, NULL); + sec_def->prog_prepare_load_fn = OPTS_GET(opts, prog_prepare_load_fn, NULL); + sec_def->prog_attach_fn = OPTS_GET(opts, prog_attach_fn, NULL); + + sec_def->handler_id = ++last_custom_sec_def_handler_id; + + if (sec) + custom_sec_def_cnt++; + else + has_custom_fallback_def = true; + + return sec_def->handler_id; +} + +int libbpf_unregister_prog_handler(int handler_id) +{ + struct bpf_sec_def *sec_defs; + int i; + + if (handler_id <= 0) + return libbpf_err(-EINVAL); + + if (has_custom_fallback_def && custom_fallback_def.handler_id == handler_id) { + memset(&custom_fallback_def, 0, sizeof(custom_fallback_def)); + has_custom_fallback_def = false; + return 0; + } + + for (i = 0; i < custom_sec_def_cnt; i++) { + if (custom_sec_defs[i].handler_id == handler_id) + break; + } + + if (i == custom_sec_def_cnt) + return libbpf_err(-ENOENT); + + free(custom_sec_defs[i].sec); + for (i = i + 1; i < custom_sec_def_cnt; i++) + custom_sec_defs[i - 1] = custom_sec_defs[i]; + custom_sec_def_cnt--; + + /* try to shrink the array, but it's ok if we couldn't */ + sec_defs = libbpf_reallocarray(custom_sec_defs, custom_sec_def_cnt, sizeof(*sec_defs)); + if (sec_defs) + custom_sec_defs = sec_defs; + + return 0; +} + +static bool sec_def_matches(const struct bpf_sec_def *sec_def, const char *sec_name, + bool allow_sloppy) +{ + size_t len = strlen(sec_def->sec); + + /* "type/" always has to have proper SEC("type/extras") form */ + if (sec_def->sec[len - 1] == '/') { + if (str_has_pfx(sec_name, sec_def->sec)) + return true; + return false; + } + + /* "type+" means it can be either exact SEC("type") or + * well-formed SEC("type/extras") with proper '/' separator + */ + if (sec_def->sec[len - 1] == '+') { + len--; + /* not even a prefix */ + if (strncmp(sec_name, sec_def->sec, len) != 0) + return false; + /* exact match or has '/' separator */ + if (sec_name[len] == '\0' || sec_name[len] == '/') + return true; + return false; + } + + /* SEC_SLOPPY_PFX definitions are allowed to be just prefix + * matches, unless strict section name mode + * (LIBBPF_STRICT_SEC_NAME) is enabled, in which case the + * match has to be exact. + */ + if (allow_sloppy && str_has_pfx(sec_name, sec_def->sec)) + return true; + + /* Definitions not marked SEC_SLOPPY_PFX (e.g., + * SEC("syscall")) are exact matches in both modes. + */ + return strcmp(sec_name, sec_def->sec) == 0; +} + +static const struct bpf_sec_def *find_sec_def(const char *sec_name) +{ + const struct bpf_sec_def *sec_def; + int i, n; + bool strict = libbpf_mode & LIBBPF_STRICT_SEC_NAME, allow_sloppy; + + n = custom_sec_def_cnt; + for (i = 0; i < n; i++) { + sec_def = &custom_sec_defs[i]; + if (sec_def_matches(sec_def, sec_name, false)) + return sec_def; + } + + n = ARRAY_SIZE(section_defs); + for (i = 0; i < n; i++) { + sec_def = §ion_defs[i]; + allow_sloppy = (sec_def->cookie & SEC_SLOPPY_PFX) && !strict; + if (sec_def_matches(sec_def, sec_name, allow_sloppy)) return sec_def; } + + if (has_custom_fallback_def) + return &custom_fallback_def; + return NULL; } +#define MAX_TYPE_NAME_SIZE 32 + static char *libbpf_get_type_names(bool attach_type) { int i, len = ARRAY_SIZE(section_defs) * MAX_TYPE_NAME_SIZE; diff --git a/tools/lib/bpf/libbpf.h b/tools/lib/bpf/libbpf.h index c8d8daad212e..c1b0c2ef14d8 100644 --- a/tools/lib/bpf/libbpf.h +++ b/tools/lib/bpf/libbpf.h @@ -1328,6 +1328,115 @@ LIBBPF_API int bpf_linker__add_file(struct bpf_linker *linker, LIBBPF_API int bpf_linker__finalize(struct bpf_linker *linker); LIBBPF_API void bpf_linker__free(struct bpf_linker *linker); +/* + * Custom handling of BPF program's SEC() definitions + */ + +struct bpf_prog_load_opts; /* defined in bpf.h */ + +/* Called during bpf_object__open() for each recognized BPF program. Callback + * can use various bpf_program__set_*() setters to adjust whatever properties + * are necessary. + */ +typedef int (*libbpf_prog_setup_fn_t)(struct bpf_program *prog, long cookie); + +/* Called right before libbpf performs bpf_prog_load() to load BPF program + * into the kernel. Callback can adjust opts as necessary. + */ +typedef int (*libbpf_prog_prepare_load_fn_t)(struct bpf_program *prog, + struct bpf_prog_load_opts *opts, long cookie); + +/* Called during skeleton attach or through bpf_program__attach(). If + * auto-attach is not supported, callback should return 0 and set link to + * NULL (it's not considered an error during skeleton attach, but it will be + * an error for bpf_program__attach() calls). On error, error should be + * returned directly and link set to NULL. On success, return 0 and set link + * to a valid struct bpf_link. + */ +typedef int (*libbpf_prog_attach_fn_t)(const struct bpf_program *prog, long cookie, + struct bpf_link **link); + +struct libbpf_prog_handler_opts { + /* size of this struct, for forward/backward compatiblity */ + size_t sz; + /* User-provided value that is passed to prog_setup_fn, + * prog_prepare_load_fn, and prog_attach_fn callbacks. Allows user to + * register one set of callbacks for multiple SEC() definitions and + * still be able to distinguish them, if necessary. For example, + * libbpf itself is using this to pass necessary flags (e.g., + * sleepable flag) to a common internal SEC() handler. + */ + long cookie; + /* BPF program initialization callback (see libbpf_prog_setup_fn_t). + * Callback is optional, pass NULL if it's not necessary. + */ + libbpf_prog_setup_fn_t prog_setup_fn; + /* BPF program loading callback (see libbpf_prog_prepare_load_fn_t). + * Callback is optional, pass NULL if it's not necessary. + */ + libbpf_prog_prepare_load_fn_t prog_prepare_load_fn; + /* BPF program attach callback (see libbpf_prog_attach_fn_t). + * Callback is optional, pass NULL if it's not necessary. + */ + libbpf_prog_attach_fn_t prog_attach_fn; +}; +#define libbpf_prog_handler_opts__last_field prog_attach_fn + +/** + * @brief **libbpf_register_prog_handler()** registers a custom BPF program + * SEC() handler. + * @param sec section prefix for which custom handler is registered + * @param prog_type BPF program type associated with specified section + * @param exp_attach_type Expected BPF attach type associated with specified section + * @param opts optional cookie, callbacks, and other extra options + * @return Non-negative handler ID is returned on success. This handler ID has + * to be passed to *libbpf_unregister_prog_handler()* to unregister such + * custom handler. Negative error code is returned on error. + * + * *sec* defines which SEC() definitions are handled by this custom handler + * registration. *sec* can have few different forms: + * - if *sec* is just a plain string (e.g., "abc"), it will match only + * SEC("abc"). If BPF program specifies SEC("abc/whatever") it will result + * in an error; + * - if *sec* is of the form "abc/", proper SEC() form is + * SEC("abc/something"), where acceptable "something" should be checked by + * *prog_init_fn* callback, if there are additional restrictions; + * - if *sec* is of the form "abc+", it will successfully match both + * SEC("abc") and SEC("abc/whatever") forms; + * - if *sec* is NULL, custom handler is registered for any BPF program that + * doesn't match any of the registered (custom or libbpf's own) SEC() + * handlers. There could be only one such generic custom handler registered + * at any given time. + * + * All custom handlers (except the one with *sec* == NULL) are processed + * before libbpf's own SEC() handlers. It is allowed to "override" libbpf's + * SEC() handlers by registering custom ones for the same section prefix + * (i.e., it's possible to have custom SEC("perf_event/LLC-load-misses") + * handler). + * + * Note, like much of global libbpf APIs (e.g., libbpf_set_print(), + * libbpf_set_strict_mode(), etc)) these APIs are not thread-safe. User needs + * to ensure synchronization if there is a risk of running this API from + * multiple threads simultaneously. + */ +LIBBPF_API int libbpf_register_prog_handler(const char *sec, + enum bpf_prog_type prog_type, + enum bpf_attach_type exp_attach_type, + const struct libbpf_prog_handler_opts *opts); +/** + * @brief *libbpf_unregister_prog_handler()* unregisters previously registered + * custom BPF program SEC() handler. + * @param handler_id handler ID returned by *libbpf_register_prog_handler()* + * after successful registration + * @return 0 on success, negative error code if handler isn't found + * + * Note, like much of global libbpf APIs (e.g., libbpf_set_print(), + * libbpf_set_strict_mode(), etc)) these APIs are not thread-safe. User needs + * to ensure synchronization if there is a risk of running this API from + * multiple threads simultaneously. + */ +LIBBPF_API int libbpf_unregister_prog_handler(int handler_id); + #ifdef __cplusplus } /* extern "C" */ #endif diff --git a/tools/lib/bpf/libbpf.map b/tools/lib/bpf/libbpf.map index 47e70c9058d9..df1b947792c8 100644 --- a/tools/lib/bpf/libbpf.map +++ b/tools/lib/bpf/libbpf.map @@ -439,3 +439,9 @@ LIBBPF_0.7.0 { libbpf_probe_bpf_prog_type; libbpf_set_memlock_rlim_max; } LIBBPF_0.6.0; + +LIBBPF_0.8.0 { + global: + libbpf_register_prog_handler; + libbpf_unregister_prog_handler; +} LIBBPF_0.7.0; diff --git a/tools/lib/bpf/libbpf_version.h b/tools/lib/bpf/libbpf_version.h index 0fefefc3500b..61f2039404b6 100644 --- a/tools/lib/bpf/libbpf_version.h +++ b/tools/lib/bpf/libbpf_version.h @@ -4,6 +4,6 @@ #define __LIBBPF_VERSION_H #define LIBBPF_MAJOR_VERSION 0 -#define LIBBPF_MINOR_VERSION 7 +#define LIBBPF_MINOR_VERSION 8 #endif /* __LIBBPF_VERSION_H */ -- cgit v1.2.3 From 9c6e6a80ee741adf6cb3cfd8eef7d1554f91fceb Mon Sep 17 00:00:00 2001 From: lic121 Date: Tue, 1 Mar 2022 13:26:23 +0000 Subject: libbpf: Unmap rings when umem deleted xsk_umem__create() does mmap for fill/comp rings, but xsk_umem__delete() doesn't do the unmap. This works fine for regular cases, because xsk_socket__delete() does unmap for the rings. But for the case that xsk_socket__create_shared() fails, umem rings are not unmapped. fill_save/comp_save are checked to determine if rings have already be unmapped by xsk. If fill_save and comp_save are NULL, it means that the rings have already been used by xsk. Then they are supposed to be unmapped by xsk_socket__delete(). Otherwise, xsk_umem__delete() does the unmap. Fixes: 2f6324a3937f ("libbpf: Support shared umems between queues and devices") Signed-off-by: Cheng Li Signed-off-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/20220301132623.GA19995@vscode.7~ --- tools/lib/bpf/xsk.c | 11 +++++++++++ 1 file changed, 11 insertions(+) (limited to 'tools/lib/bpf') diff --git a/tools/lib/bpf/xsk.c b/tools/lib/bpf/xsk.c index edafe56664f3..32a2f5749c71 100644 --- a/tools/lib/bpf/xsk.c +++ b/tools/lib/bpf/xsk.c @@ -1193,12 +1193,23 @@ int xsk_socket__create(struct xsk_socket **xsk_ptr, const char *ifname, int xsk_umem__delete(struct xsk_umem *umem) { + struct xdp_mmap_offsets off; + int err; + if (!umem) return 0; if (umem->refcount) return -EBUSY; + err = xsk_get_mmap_offsets(umem->fd, &off); + if (!err && umem->fill_save && umem->comp_save) { + munmap(umem->fill_save->ring - off.fr.desc, + off.fr.desc + umem->config.fill_size * sizeof(__u64)); + munmap(umem->comp_save->ring - off.cr.desc, + off.cr.desc + umem->config.comp_size * sizeof(__u64)); + } + close(umem->fd); free(umem); -- cgit v1.2.3 From 04b6de649e124f2ea9693a25a744e646c1203ff9 Mon Sep 17 00:00:00 2001 From: Guo Zhengkui Date: Sun, 6 Mar 2022 10:34:26 +0800 Subject: libbpf: Fix array_size.cocci warning Fix the following coccicheck warning: tools/lib/bpf/bpf.c:114:31-32: WARNING: Use ARRAY_SIZE tools/lib/bpf/xsk.c:484:34-35: WARNING: Use ARRAY_SIZE tools/lib/bpf/xsk.c:485:35-36: WARNING: Use ARRAY_SIZE It has been tested with gcc (Debian 8.3.0-6) 8.3.0 on x86_64. Signed-off-by: Guo Zhengkui Signed-off-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/20220306023426.19324-1-guozhengkui@vivo.com --- tools/lib/bpf/bpf.c | 3 ++- tools/lib/bpf/xsk.c | 4 ++-- 2 files changed, 4 insertions(+), 3 deletions(-) (limited to 'tools/lib/bpf') diff --git a/tools/lib/bpf/bpf.c b/tools/lib/bpf/bpf.c index 418b259166f8..3c7c180294fa 100644 --- a/tools/lib/bpf/bpf.c +++ b/tools/lib/bpf/bpf.c @@ -29,6 +29,7 @@ #include #include #include +#include #include #include #include "bpf.h" @@ -111,7 +112,7 @@ int probe_memcg_account(void) BPF_EMIT_CALL(BPF_FUNC_ktime_get_coarse_ns), BPF_EXIT_INSN(), }; - size_t insn_cnt = sizeof(insns) / sizeof(insns[0]); + size_t insn_cnt = ARRAY_SIZE(insns); union bpf_attr attr; int prog_fd; diff --git a/tools/lib/bpf/xsk.c b/tools/lib/bpf/xsk.c index 32a2f5749c71..af136f73b09d 100644 --- a/tools/lib/bpf/xsk.c +++ b/tools/lib/bpf/xsk.c @@ -481,8 +481,8 @@ static int xsk_load_xdp_prog(struct xsk_socket *xsk) BPF_EMIT_CALL(BPF_FUNC_redirect_map), BPF_EXIT_INSN(), }; - size_t insns_cnt[] = {sizeof(prog) / sizeof(struct bpf_insn), - sizeof(prog_redirect_flags) / sizeof(struct bpf_insn), + size_t insns_cnt[] = {ARRAY_SIZE(prog), + ARRAY_SIZE(prog_redirect_flags), }; struct bpf_insn *progs[] = {prog, prog_redirect_flags}; enum xsk_prog option = get_xsk_prog(); -- cgit v1.2.3 From 24592ad1ab18416a850f03d46d07ae483f808895 Mon Sep 17 00:00:00 2001 From: Toke Høiland-Jørgensen Date: Wed, 9 Mar 2022 11:53:44 +0100 Subject: libbpf: Support batch_size option to bpf_prog_test_run MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add support for setting the new batch_size parameter to BPF_PROG_TEST_RUN to libbpf; just add it as an option and pass it through to the kernel. Signed-off-by: Toke Høiland-Jørgensen Signed-off-by: Alexei Starovoitov Acked-by: Martin KaFai Lau Link: https://lore.kernel.org/bpf/20220309105346.100053-4-toke@redhat.com --- tools/lib/bpf/bpf.c | 1 + tools/lib/bpf/bpf.h | 3 ++- 2 files changed, 3 insertions(+), 1 deletion(-) (limited to 'tools/lib/bpf') diff --git a/tools/lib/bpf/bpf.c b/tools/lib/bpf/bpf.c index 3c7c180294fa..f69ce3a01385 100644 --- a/tools/lib/bpf/bpf.c +++ b/tools/lib/bpf/bpf.c @@ -995,6 +995,7 @@ int bpf_prog_test_run_opts(int prog_fd, struct bpf_test_run_opts *opts) memset(&attr, 0, sizeof(attr)); attr.test.prog_fd = prog_fd; + attr.test.batch_size = OPTS_GET(opts, batch_size, 0); attr.test.cpu = OPTS_GET(opts, cpu, 0); attr.test.flags = OPTS_GET(opts, flags, 0); attr.test.repeat = OPTS_GET(opts, repeat, 0); diff --git a/tools/lib/bpf/bpf.h b/tools/lib/bpf/bpf.h index 16b21757b8bf..5253cb4a4c0a 100644 --- a/tools/lib/bpf/bpf.h +++ b/tools/lib/bpf/bpf.h @@ -512,8 +512,9 @@ struct bpf_test_run_opts { __u32 duration; /* out: average per repetition in ns */ __u32 flags; __u32 cpu; + __u32 batch_size; }; -#define bpf_test_run_opts__last_field cpu +#define bpf_test_run_opts__last_field batch_size LIBBPF_API int bpf_prog_test_run_opts(int prog_fd, struct bpf_test_run_opts *opts); -- cgit v1.2.3 From 85153ac06283408e6ccaf002b02fd85f0bdab94b Mon Sep 17 00:00:00 2001 From: Jiri Olsa Date: Wed, 16 Mar 2022 13:24:13 +0100 Subject: libbpf: Add libbpf_kallsyms_parse function Move the kallsyms parsing in internal libbpf_kallsyms_parse function, so it can be used from other places. It will be used in following changes. Signed-off-by: Jiri Olsa Signed-off-by: Alexei Starovoitov Acked-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/20220316122419.933957-8-jolsa@kernel.org --- tools/lib/bpf/libbpf.c | 62 +++++++++++++++++++++++++---------------- tools/lib/bpf/libbpf_internal.h | 5 ++++ 2 files changed, 43 insertions(+), 24 deletions(-) (limited to 'tools/lib/bpf') diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c index 43161fdd44bb..1ca520a29fdb 100644 --- a/tools/lib/bpf/libbpf.c +++ b/tools/lib/bpf/libbpf.c @@ -7171,12 +7171,10 @@ static int bpf_object__sanitize_maps(struct bpf_object *obj) return 0; } -static int bpf_object__read_kallsyms_file(struct bpf_object *obj) +int libbpf_kallsyms_parse(kallsyms_cb_t cb, void *ctx) { char sym_type, sym_name[500]; unsigned long long sym_addr; - const struct btf_type *t; - struct extern_desc *ext; int ret, err = 0; FILE *f; @@ -7195,35 +7193,51 @@ static int bpf_object__read_kallsyms_file(struct bpf_object *obj) if (ret != 3) { pr_warn("failed to read kallsyms entry: %d\n", ret); err = -EINVAL; - goto out; + break; } - ext = find_extern_by_name(obj, sym_name); - if (!ext || ext->type != EXT_KSYM) - continue; - - t = btf__type_by_id(obj->btf, ext->btf_id); - if (!btf_is_var(t)) - continue; - - if (ext->is_set && ext->ksym.addr != sym_addr) { - pr_warn("extern (ksym) '%s' resolution is ambiguous: 0x%llx or 0x%llx\n", - sym_name, ext->ksym.addr, sym_addr); - err = -EINVAL; - goto out; - } - if (!ext->is_set) { - ext->is_set = true; - ext->ksym.addr = sym_addr; - pr_debug("extern (ksym) %s=0x%llx\n", sym_name, sym_addr); - } + err = cb(sym_addr, sym_type, sym_name, ctx); + if (err) + break; } -out: fclose(f); return err; } +static int kallsyms_cb(unsigned long long sym_addr, char sym_type, + const char *sym_name, void *ctx) +{ + struct bpf_object *obj = ctx; + const struct btf_type *t; + struct extern_desc *ext; + + ext = find_extern_by_name(obj, sym_name); + if (!ext || ext->type != EXT_KSYM) + return 0; + + t = btf__type_by_id(obj->btf, ext->btf_id); + if (!btf_is_var(t)) + return 0; + + if (ext->is_set && ext->ksym.addr != sym_addr) { + pr_warn("extern (ksym) '%s' resolution is ambiguous: 0x%llx or 0x%llx\n", + sym_name, ext->ksym.addr, sym_addr); + return -EINVAL; + } + if (!ext->is_set) { + ext->is_set = true; + ext->ksym.addr = sym_addr; + pr_debug("extern (ksym) %s=0x%llx\n", sym_name, sym_addr); + } + return 0; +} + +static int bpf_object__read_kallsyms_file(struct bpf_object *obj) +{ + return libbpf_kallsyms_parse(kallsyms_cb, obj); +} + static int find_ksym_btf_id(struct bpf_object *obj, const char *ksym_name, __u16 kind, struct btf **res_btf, struct module_btf **res_mod_btf) diff --git a/tools/lib/bpf/libbpf_internal.h b/tools/lib/bpf/libbpf_internal.h index 4fda8bdf0a0d..b6247dc7f8eb 100644 --- a/tools/lib/bpf/libbpf_internal.h +++ b/tools/lib/bpf/libbpf_internal.h @@ -449,6 +449,11 @@ __s32 btf__find_by_name_kind_own(const struct btf *btf, const char *type_name, extern enum libbpf_strict_mode libbpf_mode; +typedef int (*kallsyms_cb_t)(unsigned long long sym_addr, char sym_type, + const char *sym_name, void *ctx); + +int libbpf_kallsyms_parse(kallsyms_cb_t cb, void *arg); + /* handle direct returned errors */ static inline int libbpf_err(int ret) { -- cgit v1.2.3 From 5117c26e877352bcabd4871e6bcdebed4857a88d Mon Sep 17 00:00:00 2001 From: Jiri Olsa Date: Wed, 16 Mar 2022 13:24:14 +0100 Subject: libbpf: Add bpf_link_create support for multi kprobes Adding new kprobe_multi struct to bpf_link_create_opts object to pass multiple kprobe data to link_create attr uapi. Signed-off-by: Jiri Olsa Signed-off-by: Alexei Starovoitov Link: https://lore.kernel.org/bpf/20220316122419.933957-9-jolsa@kernel.org --- tools/lib/bpf/bpf.c | 9 +++++++++ tools/lib/bpf/bpf.h | 9 ++++++++- 2 files changed, 17 insertions(+), 1 deletion(-) (limited to 'tools/lib/bpf') diff --git a/tools/lib/bpf/bpf.c b/tools/lib/bpf/bpf.c index f69ce3a01385..cf27251adb92 100644 --- a/tools/lib/bpf/bpf.c +++ b/tools/lib/bpf/bpf.c @@ -854,6 +854,15 @@ int bpf_link_create(int prog_fd, int target_fd, if (!OPTS_ZEROED(opts, perf_event)) return libbpf_err(-EINVAL); break; + case BPF_TRACE_KPROBE_MULTI: + attr.link_create.kprobe_multi.flags = OPTS_GET(opts, kprobe_multi.flags, 0); + attr.link_create.kprobe_multi.cnt = OPTS_GET(opts, kprobe_multi.cnt, 0); + attr.link_create.kprobe_multi.syms = ptr_to_u64(OPTS_GET(opts, kprobe_multi.syms, 0)); + attr.link_create.kprobe_multi.addrs = ptr_to_u64(OPTS_GET(opts, kprobe_multi.addrs, 0)); + attr.link_create.kprobe_multi.cookies = ptr_to_u64(OPTS_GET(opts, kprobe_multi.cookies, 0)); + if (!OPTS_ZEROED(opts, kprobe_multi)) + return libbpf_err(-EINVAL); + break; default: if (!OPTS_ZEROED(opts, flags)) return libbpf_err(-EINVAL); diff --git a/tools/lib/bpf/bpf.h b/tools/lib/bpf/bpf.h index 5253cb4a4c0a..f4b4afb6d4ba 100644 --- a/tools/lib/bpf/bpf.h +++ b/tools/lib/bpf/bpf.h @@ -413,10 +413,17 @@ struct bpf_link_create_opts { struct { __u64 bpf_cookie; } perf_event; + struct { + __u32 flags; + __u32 cnt; + const char **syms; + const unsigned long *addrs; + const __u64 *cookies; + } kprobe_multi; }; size_t :0; }; -#define bpf_link_create_opts__last_field perf_event +#define bpf_link_create_opts__last_field kprobe_multi.cookies LIBBPF_API int bpf_link_create(int prog_fd, int target_fd, enum bpf_attach_type attach_type, -- cgit v1.2.3 From ddc6b04989eb099368d3e6b6eaf5a6b181a36f91 Mon Sep 17 00:00:00 2001 From: Jiri Olsa Date: Wed, 16 Mar 2022 13:24:15 +0100 Subject: libbpf: Add bpf_program__attach_kprobe_multi_opts function Adding bpf_program__attach_kprobe_multi_opts function for attaching kprobe program to multiple functions. struct bpf_link * bpf_program__attach_kprobe_multi_opts(const struct bpf_program *prog, const char *pattern, const struct bpf_kprobe_multi_opts *opts); User can specify functions to attach with 'pattern' argument that allows wildcards (*?' supported) or provide symbols or addresses directly through opts argument. These 3 options are mutually exclusive. When using symbols or addresses, user can also provide cookie value for each symbol/address that can be retrieved later in bpf program with bpf_get_attach_cookie helper. struct bpf_kprobe_multi_opts { size_t sz; const char **syms; const unsigned long *addrs; const __u64 *cookies; size_t cnt; bool retprobe; size_t :0; }; Symbols, addresses and cookies are provided through opts object (syms/addrs/cookies) as array pointers with specified count (cnt). Each cookie value is paired with provided function address or symbol with the same array index. The program can be also attached as return probe if 'retprobe' is set. For quick usage with NULL opts argument, like: bpf_program__attach_kprobe_multi_opts(prog, "ksys_*", NULL) the 'prog' will be attached as kprobe to 'ksys_*' functions. Also adding new program sections for automatic attachment: kprobe.multi/ kretprobe.multi/ The symbol_pattern is used as 'pattern' argument in bpf_program__attach_kprobe_multi_opts function. Signed-off-by: Jiri Olsa Signed-off-by: Alexei Starovoitov Link: https://lore.kernel.org/bpf/20220316122419.933957-10-jolsa@kernel.org --- tools/lib/bpf/libbpf.c | 160 +++++++++++++++++++++++++++++++++++++++++++++++ tools/lib/bpf/libbpf.h | 23 +++++++ tools/lib/bpf/libbpf.map | 1 + 3 files changed, 184 insertions(+) (limited to 'tools/lib/bpf') diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c index 1ca520a29fdb..f3a31478e23b 100644 --- a/tools/lib/bpf/libbpf.c +++ b/tools/lib/bpf/libbpf.c @@ -8610,6 +8610,7 @@ static int attach_kprobe(const struct bpf_program *prog, long cookie, struct bpf static int attach_tp(const struct bpf_program *prog, long cookie, struct bpf_link **link); static int attach_raw_tp(const struct bpf_program *prog, long cookie, struct bpf_link **link); static int attach_trace(const struct bpf_program *prog, long cookie, struct bpf_link **link); +static int attach_kprobe_multi(const struct bpf_program *prog, long cookie, struct bpf_link **link); static int attach_lsm(const struct bpf_program *prog, long cookie, struct bpf_link **link); static int attach_iter(const struct bpf_program *prog, long cookie, struct bpf_link **link); @@ -8621,6 +8622,8 @@ static const struct bpf_sec_def section_defs[] = { SEC_DEF("uprobe/", KPROBE, 0, SEC_NONE), SEC_DEF("kretprobe/", KPROBE, 0, SEC_NONE, attach_kprobe), SEC_DEF("uretprobe/", KPROBE, 0, SEC_NONE), + SEC_DEF("kprobe.multi/", KPROBE, BPF_TRACE_KPROBE_MULTI, SEC_NONE, attach_kprobe_multi), + SEC_DEF("kretprobe.multi/", KPROBE, BPF_TRACE_KPROBE_MULTI, SEC_NONE, attach_kprobe_multi), SEC_DEF("tc", SCHED_CLS, 0, SEC_NONE), SEC_DEF("classifier", SCHED_CLS, 0, SEC_NONE | SEC_SLOPPY_PFX | SEC_DEPRECATED), SEC_DEF("action", SCHED_ACT, 0, SEC_NONE | SEC_SLOPPY_PFX), @@ -10224,6 +10227,139 @@ struct bpf_link *bpf_program__attach_kprobe(const struct bpf_program *prog, return bpf_program__attach_kprobe_opts(prog, func_name, &opts); } +/* Adapted from perf/util/string.c */ +static bool glob_match(const char *str, const char *pat) +{ + while (*str && *pat && *pat != '*') { + if (*pat == '?') { /* Matches any single character */ + str++; + pat++; + continue; + } + if (*str != *pat) + return false; + str++; + pat++; + } + /* Check wild card */ + if (*pat == '*') { + while (*pat == '*') + pat++; + if (!*pat) /* Tail wild card matches all */ + return true; + while (*str) + if (glob_match(str++, pat)) + return true; + } + return !*str && !*pat; +} + +struct kprobe_multi_resolve { + const char *pattern; + unsigned long *addrs; + size_t cap; + size_t cnt; +}; + +static int +resolve_kprobe_multi_cb(unsigned long long sym_addr, char sym_type, + const char *sym_name, void *ctx) +{ + struct kprobe_multi_resolve *res = ctx; + int err; + + if (!glob_match(sym_name, res->pattern)) + return 0; + + err = libbpf_ensure_mem((void **) &res->addrs, &res->cap, sizeof(unsigned long), + res->cnt + 1); + if (err) + return err; + + res->addrs[res->cnt++] = (unsigned long) sym_addr; + return 0; +} + +struct bpf_link * +bpf_program__attach_kprobe_multi_opts(const struct bpf_program *prog, + const char *pattern, + const struct bpf_kprobe_multi_opts *opts) +{ + LIBBPF_OPTS(bpf_link_create_opts, lopts); + struct kprobe_multi_resolve res = { + .pattern = pattern, + }; + struct bpf_link *link = NULL; + char errmsg[STRERR_BUFSIZE]; + const unsigned long *addrs; + int err, link_fd, prog_fd; + const __u64 *cookies; + const char **syms; + bool retprobe; + size_t cnt; + + if (!OPTS_VALID(opts, bpf_kprobe_multi_opts)) + return libbpf_err_ptr(-EINVAL); + + syms = OPTS_GET(opts, syms, false); + addrs = OPTS_GET(opts, addrs, false); + cnt = OPTS_GET(opts, cnt, false); + cookies = OPTS_GET(opts, cookies, false); + + if (!pattern && !addrs && !syms) + return libbpf_err_ptr(-EINVAL); + if (pattern && (addrs || syms || cookies || cnt)) + return libbpf_err_ptr(-EINVAL); + if (!pattern && !cnt) + return libbpf_err_ptr(-EINVAL); + if (addrs && syms) + return libbpf_err_ptr(-EINVAL); + + if (pattern) { + err = libbpf_kallsyms_parse(resolve_kprobe_multi_cb, &res); + if (err) + goto error; + if (!res.cnt) { + err = -ENOENT; + goto error; + } + addrs = res.addrs; + cnt = res.cnt; + } + + retprobe = OPTS_GET(opts, retprobe, false); + + lopts.kprobe_multi.syms = syms; + lopts.kprobe_multi.addrs = addrs; + lopts.kprobe_multi.cookies = cookies; + lopts.kprobe_multi.cnt = cnt; + lopts.kprobe_multi.flags = retprobe ? BPF_F_KPROBE_MULTI_RETURN : 0; + + link = calloc(1, sizeof(*link)); + if (!link) { + err = -ENOMEM; + goto error; + } + link->detach = &bpf_link__detach_fd; + + prog_fd = bpf_program__fd(prog); + link_fd = bpf_link_create(prog_fd, 0, BPF_TRACE_KPROBE_MULTI, &lopts); + if (link_fd < 0) { + err = -errno; + pr_warn("prog '%s': failed to attach: %s\n", + prog->name, libbpf_strerror_r(err, errmsg, sizeof(errmsg))); + goto error; + } + link->fd = link_fd; + free(res.addrs); + return link; + +error: + free(link); + free(res.addrs); + return libbpf_err_ptr(err); +} + static int attach_kprobe(const struct bpf_program *prog, long cookie, struct bpf_link **link) { DECLARE_LIBBPF_OPTS(bpf_kprobe_opts, opts); @@ -10255,6 +10391,30 @@ static int attach_kprobe(const struct bpf_program *prog, long cookie, struct bpf return libbpf_get_error(*link); } +static int attach_kprobe_multi(const struct bpf_program *prog, long cookie, struct bpf_link **link) +{ + LIBBPF_OPTS(bpf_kprobe_multi_opts, opts); + const char *spec; + char *pattern; + int n; + + opts.retprobe = str_has_pfx(prog->sec_name, "kretprobe.multi/"); + if (opts.retprobe) + spec = prog->sec_name + sizeof("kretprobe.multi/") - 1; + else + spec = prog->sec_name + sizeof("kprobe.multi/") - 1; + + n = sscanf(spec, "%m[a-zA-Z0-9_.*?]", &pattern); + if (n < 1) { + pr_warn("kprobe multi pattern is invalid: %s\n", pattern); + return -EINVAL; + } + + *link = bpf_program__attach_kprobe_multi_opts(prog, pattern, &opts); + free(pattern); + return libbpf_get_error(*link); +} + static void gen_uprobe_legacy_event_name(char *buf, size_t buf_sz, const char *binary_path, uint64_t offset) { diff --git a/tools/lib/bpf/libbpf.h b/tools/lib/bpf/libbpf.h index c1b0c2ef14d8..d5239fb4abdc 100644 --- a/tools/lib/bpf/libbpf.h +++ b/tools/lib/bpf/libbpf.h @@ -425,6 +425,29 @@ bpf_program__attach_kprobe_opts(const struct bpf_program *prog, const char *func_name, const struct bpf_kprobe_opts *opts); +struct bpf_kprobe_multi_opts { + /* size of this struct, for forward/backward compatibility */ + size_t sz; + /* array of function symbols to attach */ + const char **syms; + /* array of function addresses to attach */ + const unsigned long *addrs; + /* array of user-provided values fetchable through bpf_get_attach_cookie */ + const __u64 *cookies; + /* number of elements in syms/addrs/cookies arrays */ + size_t cnt; + /* create return kprobes */ + bool retprobe; + size_t :0; +}; + +#define bpf_kprobe_multi_opts__last_field retprobe + +LIBBPF_API struct bpf_link * +bpf_program__attach_kprobe_multi_opts(const struct bpf_program *prog, + const char *pattern, + const struct bpf_kprobe_multi_opts *opts); + struct bpf_uprobe_opts { /* size of this struct, for forward/backward compatiblity */ size_t sz; diff --git a/tools/lib/bpf/libbpf.map b/tools/lib/bpf/libbpf.map index df1b947792c8..554c56e6e5d3 100644 --- a/tools/lib/bpf/libbpf.map +++ b/tools/lib/bpf/libbpf.map @@ -444,4 +444,5 @@ LIBBPF_0.8.0 { global: libbpf_register_prog_handler; libbpf_unregister_prog_handler; + bpf_program__attach_kprobe_multi_opts; } LIBBPF_0.7.0; -- cgit v1.2.3 From bc380eb9d04812eda23fd1d2904389012b50d946 Mon Sep 17 00:00:00 2001 From: Delyan Kratunov Date: Wed, 16 Mar 2022 23:37:24 +0000 Subject: libbpf: .text routines are subprograms in strict mode Currently, libbpf considers a single routine in .text to be a program. This is particularly confusing when it comes to library objects - a single routine meant to be used as an extern will instead be considered a bpf_program. This patch hides this compatibility behavior behind the pre-existing SEC_NAME strict mode flag. Signed-off-by: Delyan Kratunov Signed-off-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/018de8d0d67c04bf436055270d35d394ba393505.1647473511.git.delyank@fb.com --- tools/lib/bpf/libbpf.c | 7 +++++++ tools/lib/bpf/libbpf_legacy.h | 4 ++++ 2 files changed, 11 insertions(+) (limited to 'tools/lib/bpf') diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c index f3a31478e23b..3b7eb6dcc2f4 100644 --- a/tools/lib/bpf/libbpf.c +++ b/tools/lib/bpf/libbpf.c @@ -3832,7 +3832,14 @@ static bool prog_is_subprog(const struct bpf_object *obj, * .text programs are subprograms (even if they are not called from * other programs), because libbpf never explicitly supported mixing * SEC()-designated BPF programs and .text entry-point BPF programs. + * + * In libbpf 1.0 strict mode, we always consider .text + * programs to be subprograms. */ + + if (libbpf_mode & LIBBPF_STRICT_SEC_NAME) + return prog->sec_idx == obj->efile.text_shndx; + return prog->sec_idx == obj->efile.text_shndx && obj->nr_programs > 1; } diff --git a/tools/lib/bpf/libbpf_legacy.h b/tools/lib/bpf/libbpf_legacy.h index a283cf031665..d7bcbd01f66f 100644 --- a/tools/lib/bpf/libbpf_legacy.h +++ b/tools/lib/bpf/libbpf_legacy.h @@ -54,6 +54,10 @@ enum libbpf_strict_mode { * * Note, in this mode the program pin path will be based on the * function name instead of section name. + * + * Additionally, routines in the .text section are always considered + * sub-programs. Legacy behavior allows for a single routine in .text + * to be a program. */ LIBBPF_STRICT_SEC_NAME = 0x04, /* -- cgit v1.2.3 From 262cfb74ffdaed06e65b8b20e10241768a9a2e18 Mon Sep 17 00:00:00 2001 From: Delyan Kratunov Date: Wed, 16 Mar 2022 23:37:30 +0000 Subject: libbpf: Init btf_{key,value}_type_id on internal map open For internal and user maps, look up the key and value btf types on open() and not load(), so that `bpf_map_btf_value_type_id` is usable in `bpftool gen`. Signed-off-by: Delyan Kratunov Signed-off-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/78dbe4e457b4a05e098fc6c8f50014b680c86e4e.1647473511.git.delyank@fb.com --- tools/lib/bpf/libbpf.c | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) (limited to 'tools/lib/bpf') diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c index 3b7eb6dcc2f4..9de42d1556ee 100644 --- a/tools/lib/bpf/libbpf.c +++ b/tools/lib/bpf/libbpf.c @@ -1517,6 +1517,9 @@ static char *internal_map_name(struct bpf_object *obj, const char *real_name) return strdup(map_name); } +static int +bpf_map_find_btf_info(struct bpf_object *obj, struct bpf_map *map); + static int bpf_object__init_internal_map(struct bpf_object *obj, enum libbpf_map_type type, const char *real_name, int sec_idx, void *data, size_t data_sz) @@ -1564,6 +1567,9 @@ bpf_object__init_internal_map(struct bpf_object *obj, enum libbpf_map_type type, return err; } + /* failures are fine because of maps like .rodata.str1.1 */ + (void) bpf_map_find_btf_info(obj, map); + if (data) memcpy(map->mmaped, data, data_sz); @@ -2046,6 +2052,9 @@ static int bpf_object__init_user_maps(struct bpf_object *obj, bool strict) } memcpy(&map->def, def, sizeof(struct bpf_map_def)); } + + /* btf info may not exist but fill it in if it does exist */ + (void) bpf_map_find_btf_info(obj, map); } return 0; } @@ -2534,6 +2543,10 @@ static int bpf_object__init_user_btf_map(struct bpf_object *obj, fill_map_from_def(map->inner_map, &inner_def); } + err = bpf_map_find_btf_info(obj, map); + if (err) + return err; + return 0; } @@ -4873,7 +4886,7 @@ static int bpf_object__create_map(struct bpf_object *obj, struct bpf_map *map, b if (bpf_map__is_struct_ops(map)) create_attr.btf_vmlinux_value_type_id = map->btf_vmlinux_value_type_id; - if (obj->btf && btf__fd(obj->btf) >= 0 && !bpf_map_find_btf_info(obj, map)) { + if (obj->btf && btf__fd(obj->btf) >= 0) { create_attr.btf_fd = btf__fd(obj->btf); create_attr.btf_key_type_id = map->btf_key_type_id; create_attr.btf_value_type_id = map->btf_value_type_id; -- cgit v1.2.3 From 430025e5dca5ad8902e7c3092b7c975acae154c5 Mon Sep 17 00:00:00 2001 From: Delyan Kratunov Date: Wed, 16 Mar 2022 23:37:33 +0000 Subject: libbpf: Add subskeleton scaffolding In symmetry with bpf_object__open_skeleton(), bpf_object__open_subskeleton() performs the actual walking and linking of maps, progs, and globals described by bpf_*_skeleton objects. Signed-off-by: Delyan Kratunov Signed-off-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/6942a46fbe20e7ebf970affcca307ba616985b15.1647473511.git.delyank@fb.com --- tools/lib/bpf/libbpf.c | 139 ++++++++++++++++++++++++++++++++++++++++------- tools/lib/bpf/libbpf.h | 29 ++++++++++ tools/lib/bpf/libbpf.map | 2 + 3 files changed, 149 insertions(+), 21 deletions(-) (limited to 'tools/lib/bpf') diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c index 9de42d1556ee..7526419e59e0 100644 --- a/tools/lib/bpf/libbpf.c +++ b/tools/lib/bpf/libbpf.c @@ -11989,6 +11989,49 @@ int libbpf_num_possible_cpus(void) return tmp_cpus; } +static int populate_skeleton_maps(const struct bpf_object *obj, + struct bpf_map_skeleton *maps, + size_t map_cnt) +{ + int i; + + for (i = 0; i < map_cnt; i++) { + struct bpf_map **map = maps[i].map; + const char *name = maps[i].name; + void **mmaped = maps[i].mmaped; + + *map = bpf_object__find_map_by_name(obj, name); + if (!*map) { + pr_warn("failed to find skeleton map '%s'\n", name); + return -ESRCH; + } + + /* externs shouldn't be pre-setup from user code */ + if (mmaped && (*map)->libbpf_type != LIBBPF_MAP_KCONFIG) + *mmaped = (*map)->mmaped; + } + return 0; +} + +static int populate_skeleton_progs(const struct bpf_object *obj, + struct bpf_prog_skeleton *progs, + size_t prog_cnt) +{ + int i; + + for (i = 0; i < prog_cnt; i++) { + struct bpf_program **prog = progs[i].prog; + const char *name = progs[i].name; + + *prog = bpf_object__find_program_by_name(obj, name); + if (!*prog) { + pr_warn("failed to find skeleton program '%s'\n", name); + return -ESRCH; + } + } + return 0; +} + int bpf_object__open_skeleton(struct bpf_object_skeleton *s, const struct bpf_object_open_opts *opts) { @@ -11996,7 +12039,7 @@ int bpf_object__open_skeleton(struct bpf_object_skeleton *s, .object_name = s->name, ); struct bpf_object *obj; - int i, err; + int err; /* Attempt to preserve opts->object_name, unless overriden by user * explicitly. Overwriting object name for skeletons is discouraged, @@ -12019,37 +12062,91 @@ int bpf_object__open_skeleton(struct bpf_object_skeleton *s, } *s->obj = obj; + err = populate_skeleton_maps(obj, s->maps, s->map_cnt); + if (err) { + pr_warn("failed to populate skeleton maps for '%s': %d\n", s->name, err); + return libbpf_err(err); + } - for (i = 0; i < s->map_cnt; i++) { - struct bpf_map **map = s->maps[i].map; - const char *name = s->maps[i].name; - void **mmaped = s->maps[i].mmaped; + err = populate_skeleton_progs(obj, s->progs, s->prog_cnt); + if (err) { + pr_warn("failed to populate skeleton progs for '%s': %d\n", s->name, err); + return libbpf_err(err); + } - *map = bpf_object__find_map_by_name(obj, name); - if (!*map) { - pr_warn("failed to find skeleton map '%s'\n", name); - return libbpf_err(-ESRCH); - } + return 0; +} - /* externs shouldn't be pre-setup from user code */ - if (mmaped && (*map)->libbpf_type != LIBBPF_MAP_KCONFIG) - *mmaped = (*map)->mmaped; +int bpf_object__open_subskeleton(struct bpf_object_subskeleton *s) +{ + int err, len, var_idx, i; + const char *var_name; + const struct bpf_map *map; + struct btf *btf; + __u32 map_type_id; + const struct btf_type *map_type, *var_type; + const struct bpf_var_skeleton *var_skel; + struct btf_var_secinfo *var; + + if (!s->obj) + return libbpf_err(-EINVAL); + + btf = bpf_object__btf(s->obj); + if (!btf) { + pr_warn("subskeletons require BTF at runtime (object %s)\n", + bpf_object__name(s->obj)); + return libbpf_err(-errno); } - for (i = 0; i < s->prog_cnt; i++) { - struct bpf_program **prog = s->progs[i].prog; - const char *name = s->progs[i].name; + err = populate_skeleton_maps(s->obj, s->maps, s->map_cnt); + if (err) { + pr_warn("failed to populate subskeleton maps: %d\n", err); + return libbpf_err(err); + } - *prog = bpf_object__find_program_by_name(obj, name); - if (!*prog) { - pr_warn("failed to find skeleton program '%s'\n", name); - return libbpf_err(-ESRCH); - } + err = populate_skeleton_progs(s->obj, s->progs, s->prog_cnt); + if (err) { + pr_warn("failed to populate subskeleton maps: %d\n", err); + return libbpf_err(err); } + for (var_idx = 0; var_idx < s->var_cnt; var_idx++) { + var_skel = &s->vars[var_idx]; + map = *var_skel->map; + map_type_id = bpf_map__btf_value_type_id(map); + map_type = btf__type_by_id(btf, map_type_id); + + if (!btf_is_datasec(map_type)) { + pr_warn("type for map '%1$s' is not a datasec: %2$s", + bpf_map__name(map), + __btf_kind_str(btf_kind(map_type))); + return libbpf_err(-EINVAL); + } + + len = btf_vlen(map_type); + var = btf_var_secinfos(map_type); + for (i = 0; i < len; i++, var++) { + var_type = btf__type_by_id(btf, var->type); + var_name = btf__name_by_offset(btf, var_type->name_off); + if (strcmp(var_name, var_skel->name) == 0) { + *var_skel->addr = map->mmaped + var->offset; + break; + } + } + } return 0; } +void bpf_object__destroy_subskeleton(struct bpf_object_subskeleton *s) +{ + if (!s) + return; + free(s->maps); + free(s->progs); + free(s->vars); + free(s); +} + int bpf_object__load_skeleton(struct bpf_object_skeleton *s) { int i, err; diff --git a/tools/lib/bpf/libbpf.h b/tools/lib/bpf/libbpf.h index d5239fb4abdc..05dde85e19a6 100644 --- a/tools/lib/bpf/libbpf.h +++ b/tools/lib/bpf/libbpf.h @@ -1312,6 +1312,35 @@ LIBBPF_API int bpf_object__attach_skeleton(struct bpf_object_skeleton *s); LIBBPF_API void bpf_object__detach_skeleton(struct bpf_object_skeleton *s); LIBBPF_API void bpf_object__destroy_skeleton(struct bpf_object_skeleton *s); +struct bpf_var_skeleton { + const char *name; + struct bpf_map **map; + void **addr; +}; + +struct bpf_object_subskeleton { + size_t sz; /* size of this struct, for forward/backward compatibility */ + + const struct bpf_object *obj; + + int map_cnt; + int map_skel_sz; /* sizeof(struct bpf_map_skeleton) */ + struct bpf_map_skeleton *maps; + + int prog_cnt; + int prog_skel_sz; /* sizeof(struct bpf_prog_skeleton) */ + struct bpf_prog_skeleton *progs; + + int var_cnt; + int var_skel_sz; /* sizeof(struct bpf_var_skeleton) */ + struct bpf_var_skeleton *vars; +}; + +LIBBPF_API int +bpf_object__open_subskeleton(struct bpf_object_subskeleton *s); +LIBBPF_API void +bpf_object__destroy_subskeleton(struct bpf_object_subskeleton *s); + struct gen_loader_opts { size_t sz; /* size of this struct, for forward/backward compatiblity */ const char *data; diff --git a/tools/lib/bpf/libbpf.map b/tools/lib/bpf/libbpf.map index 554c56e6e5d3..dd35ee58bfaa 100644 --- a/tools/lib/bpf/libbpf.map +++ b/tools/lib/bpf/libbpf.map @@ -442,6 +442,8 @@ LIBBPF_0.7.0 { LIBBPF_0.8.0 { global: + bpf_object__destroy_subskeleton; + bpf_object__open_subskeleton; libbpf_register_prog_handler; libbpf_unregister_prog_handler; bpf_program__attach_kprobe_multi_opts; -- cgit v1.2.3 From a8fee96202e279441d0e52d83eb100bd4a6d6272 Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Sat, 19 Mar 2022 17:19:11 -0700 Subject: libbpf: Avoid NULL deref when initializing map BTF info If BPF object doesn't have an BTF info, don't attempt to search for BTF types describing BPF map key or value layout. Fixes: 262cfb74ffda ("libbpf: Init btf_{key,value}_type_id on internal map open") Signed-off-by: Andrii Nakryiko Signed-off-by: Alexei Starovoitov Acked-by: Yonghong Song Link: https://lore.kernel.org/bpf/20220320001911.3640917-1-andrii@kernel.org --- tools/lib/bpf/libbpf.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'tools/lib/bpf') diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c index 7526419e59e0..2efe9431c1ba 100644 --- a/tools/lib/bpf/libbpf.c +++ b/tools/lib/bpf/libbpf.c @@ -4197,6 +4197,9 @@ static int bpf_map_find_btf_info(struct bpf_object *obj, struct bpf_map *map) __u32 key_type_id = 0, value_type_id = 0; int ret; + if (!obj->btf) + return -ENOENT; + /* if it's BTF-defined map, we don't need to search for type IDs. * For struct_ops map, it does not need btf_key_type_id and * btf_value_type_id. -- cgit v1.2.3 From d0f325c34c2fbe15f6774f2b628224280b571ae9 Mon Sep 17 00:00:00 2001 From: Hengqi Chen Date: Sat, 19 Mar 2022 11:05:33 +0800 Subject: libbpf: Close fd in bpf_object__reuse_map pin_fd is dup-ed and assigned in bpf_map__reuse_fd. Close it in bpf_object__reuse_map after reuse. Signed-off-by: Hengqi Chen Signed-off-by: Daniel Borkmann Link: https://lore.kernel.org/bpf/20220319030533.3132250-1-hengqi.chen@gmail.com --- tools/lib/bpf/libbpf.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'tools/lib/bpf') diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c index 2efe9431c1ba..809fe209cdcc 100644 --- a/tools/lib/bpf/libbpf.c +++ b/tools/lib/bpf/libbpf.c @@ -4823,8 +4823,8 @@ bpf_object__reuse_map(struct bpf_map *map) } err = bpf_map__reuse_fd(map, pin_fd); + close(pin_fd); if (err) { - close(pin_fd); return err; } map->pinned = true; -- cgit v1.2.3