summaryrefslogtreecommitdiffstats
path: root/tools/lib
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2020-02-21 15:22:45 -0800
committerDavid S. Miller <davem@davemloft.net>2020-02-21 15:22:45 -0800
commitb105e8e281ac2dbea4229982ad57fbefab05963d (patch)
tree58ea551205dd646d809c789fccb2381b60b00d9e /tools/lib
parente65ee2fb54d4745d7b7d9061d7fe33c5c5bf3b06 (diff)
parenteb1e1478b6f4e70d99fee3f49bb7f7143c8c871d (diff)
downloadlinux-b105e8e281ac2dbea4229982ad57fbefab05963d.tar.bz2
Merge git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-next
Daniel Borkmann says: ==================== pull-request: bpf-next 2020-02-21 The following pull-request contains BPF updates for your *net-next* tree. We've added 25 non-merge commits during the last 4 day(s) which contain a total of 33 files changed, 2433 insertions(+), 161 deletions(-). The main changes are: 1) Allow for adding TCP listen sockets into sock_map/hash so they can be used with reuseport BPF programs, from Jakub Sitnicki. 2) Add a new bpf_program__set_attach_target() helper for adding libbpf support to specify the tracepoint/function dynamically, from Eelco Chaudron. 3) Add bpf_read_branch_records() BPF helper which helps use cases like profile guided optimizations, from Daniel Xu. 4) Enable bpf_perf_event_read_value() in all tracing programs, from Song Liu. 5) Relax BTF mandatory check if only used for libbpf itself e.g. to process BTF defined maps, from Andrii Nakryiko. 6) Move BPF selftests -mcpu compilation attribute from 'probe' to 'v3' as it has been observed that former fails in envs with low memlock, from Yonghong Song. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'tools/lib')
-rw-r--r--tools/lib/bpf/libbpf.c38
-rw-r--r--tools/lib/bpf/libbpf.h4
-rw-r--r--tools/lib/bpf/libbpf.map5
3 files changed, 40 insertions, 7 deletions
diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c
index 7469c7dcc15e..996162801f7a 100644
--- a/tools/lib/bpf/libbpf.c
+++ b/tools/lib/bpf/libbpf.c
@@ -2286,9 +2286,7 @@ static void bpf_object__sanitize_btf_ext(struct bpf_object *obj)
static bool bpf_object__is_btf_mandatory(const struct bpf_object *obj)
{
- return obj->efile.btf_maps_shndx >= 0 ||
- obj->efile.st_ops_shndx >= 0 ||
- obj->nr_extern > 0;
+ return obj->efile.st_ops_shndx >= 0 || obj->nr_extern > 0;
}
static int bpf_object__init_btf(struct bpf_object *obj,
@@ -4945,8 +4943,8 @@ int bpf_program__load(struct bpf_program *prog, char *license, __u32 kern_ver)
{
int err = 0, fd, i, btf_id;
- if (prog->type == BPF_PROG_TYPE_TRACING ||
- prog->type == BPF_PROG_TYPE_EXT) {
+ if ((prog->type == BPF_PROG_TYPE_TRACING ||
+ prog->type == BPF_PROG_TYPE_EXT) && !prog->attach_btf_id) {
btf_id = libbpf_find_attach_btf_id(prog);
if (btf_id <= 0)
return btf_id;
@@ -6589,6 +6587,9 @@ static inline int __find_vmlinux_btf_id(struct btf *btf, const char *name,
else
err = btf__find_by_name_kind(btf, name, BTF_KIND_FUNC);
+ if (err <= 0)
+ pr_warn("%s is not found in vmlinux BTF\n", name);
+
return err;
}
@@ -6661,8 +6662,6 @@ static int libbpf_find_attach_btf_id(struct bpf_program *prog)
err = __find_vmlinux_btf_id(prog->obj->btf_vmlinux,
name + section_defs[i].len,
attach_type);
- if (err <= 0)
- pr_warn("%s is not found in vmlinux BTF\n", name);
return err;
}
pr_warn("failed to identify btf_id based on ELF section name '%s'\n", name);
@@ -8138,6 +8137,31 @@ void bpf_program__bpil_offs_to_addr(struct bpf_prog_info_linear *info_linear)
}
}
+int bpf_program__set_attach_target(struct bpf_program *prog,
+ int attach_prog_fd,
+ const char *attach_func_name)
+{
+ int btf_id;
+
+ if (!prog || attach_prog_fd < 0 || !attach_func_name)
+ return -EINVAL;
+
+ if (attach_prog_fd)
+ btf_id = libbpf_find_prog_btf_id(attach_func_name,
+ attach_prog_fd);
+ else
+ btf_id = __find_vmlinux_btf_id(prog->obj->btf_vmlinux,
+ attach_func_name,
+ prog->expected_attach_type);
+
+ if (btf_id < 0)
+ return btf_id;
+
+ prog->attach_btf_id = btf_id;
+ prog->attach_prog_fd = attach_prog_fd;
+ return 0;
+}
+
int parse_cpu_mask_str(const char *s, bool **mask, int *mask_sz)
{
int err = 0, n, len, start, end = -1;
diff --git a/tools/lib/bpf/libbpf.h b/tools/lib/bpf/libbpf.h
index 3fe12c9d1f92..02fc58a21a7f 100644
--- a/tools/lib/bpf/libbpf.h
+++ b/tools/lib/bpf/libbpf.h
@@ -334,6 +334,10 @@ LIBBPF_API void
bpf_program__set_expected_attach_type(struct bpf_program *prog,
enum bpf_attach_type type);
+LIBBPF_API int
+bpf_program__set_attach_target(struct bpf_program *prog, int attach_prog_fd,
+ const char *attach_func_name);
+
LIBBPF_API bool bpf_program__is_socket_filter(const struct bpf_program *prog);
LIBBPF_API bool bpf_program__is_tracepoint(const struct bpf_program *prog);
LIBBPF_API bool bpf_program__is_raw_tracepoint(const struct bpf_program *prog);
diff --git a/tools/lib/bpf/libbpf.map b/tools/lib/bpf/libbpf.map
index b035122142bb..7b014c8cdece 100644
--- a/tools/lib/bpf/libbpf.map
+++ b/tools/lib/bpf/libbpf.map
@@ -235,3 +235,8 @@ LIBBPF_0.0.7 {
btf__align_of;
libbpf_find_kernel_btf;
} LIBBPF_0.0.6;
+
+LIBBPF_0.0.8 {
+ global:
+ bpf_program__set_attach_target;
+} LIBBPF_0.0.7;