summaryrefslogtreecommitdiffstats
path: root/tools/lib/bpf/bpf.c
diff options
context:
space:
mode:
Diffstat (limited to 'tools/lib/bpf/bpf.c')
-rw-r--r--tools/lib/bpf/bpf.c136
1 files changed, 54 insertions, 82 deletions
diff --git a/tools/lib/bpf/bpf.c b/tools/lib/bpf/bpf.c
index cf27251adb92..240186aac8e6 100644
--- a/tools/lib/bpf/bpf.c
+++ b/tools/lib/bpf/bpf.c
@@ -208,86 +208,6 @@ int bpf_map_create(enum bpf_map_type map_type,
return libbpf_err_errno(fd);
}
-int bpf_create_map_xattr(const struct bpf_create_map_attr *create_attr)
-{
- LIBBPF_OPTS(bpf_map_create_opts, p);
-
- p.map_flags = create_attr->map_flags;
- p.numa_node = create_attr->numa_node;
- p.btf_fd = create_attr->btf_fd;
- p.btf_key_type_id = create_attr->btf_key_type_id;
- p.btf_value_type_id = create_attr->btf_value_type_id;
- p.map_ifindex = create_attr->map_ifindex;
- if (create_attr->map_type == BPF_MAP_TYPE_STRUCT_OPS)
- p.btf_vmlinux_value_type_id = create_attr->btf_vmlinux_value_type_id;
- else
- p.inner_map_fd = create_attr->inner_map_fd;
-
- return bpf_map_create(create_attr->map_type, create_attr->name,
- create_attr->key_size, create_attr->value_size,
- create_attr->max_entries, &p);
-}
-
-int bpf_create_map_node(enum bpf_map_type map_type, const char *name,
- int key_size, int value_size, int max_entries,
- __u32 map_flags, int node)
-{
- LIBBPF_OPTS(bpf_map_create_opts, opts);
-
- opts.map_flags = map_flags;
- if (node >= 0) {
- opts.numa_node = node;
- opts.map_flags |= BPF_F_NUMA_NODE;
- }
-
- return bpf_map_create(map_type, name, key_size, value_size, max_entries, &opts);
-}
-
-int bpf_create_map(enum bpf_map_type map_type, int key_size,
- int value_size, int max_entries, __u32 map_flags)
-{
- LIBBPF_OPTS(bpf_map_create_opts, opts, .map_flags = map_flags);
-
- return bpf_map_create(map_type, NULL, key_size, value_size, max_entries, &opts);
-}
-
-int bpf_create_map_name(enum bpf_map_type map_type, const char *name,
- int key_size, int value_size, int max_entries,
- __u32 map_flags)
-{
- LIBBPF_OPTS(bpf_map_create_opts, opts, .map_flags = map_flags);
-
- return bpf_map_create(map_type, name, key_size, value_size, max_entries, &opts);
-}
-
-int bpf_create_map_in_map_node(enum bpf_map_type map_type, const char *name,
- int key_size, int inner_map_fd, int max_entries,
- __u32 map_flags, int node)
-{
- LIBBPF_OPTS(bpf_map_create_opts, opts);
-
- opts.inner_map_fd = inner_map_fd;
- opts.map_flags = map_flags;
- if (node >= 0) {
- opts.map_flags |= BPF_F_NUMA_NODE;
- opts.numa_node = node;
- }
-
- return bpf_map_create(map_type, name, key_size, 4, max_entries, &opts);
-}
-
-int bpf_create_map_in_map(enum bpf_map_type map_type, const char *name,
- int key_size, int inner_map_fd, int max_entries,
- __u32 map_flags)
-{
- LIBBPF_OPTS(bpf_map_create_opts, opts,
- .inner_map_fd = inner_map_fd,
- .map_flags = map_flags,
- );
-
- return bpf_map_create(map_type, name, key_size, 4, max_entries, &opts);
-}
-
static void *
alloc_zero_tailing_info(const void *orecord, __u32 cnt,
__u32 actual_rec_size, __u32 expected_rec_size)
@@ -639,6 +559,20 @@ int bpf_map_delete_elem(int fd, const void *key)
return libbpf_err_errno(ret);
}
+int bpf_map_delete_elem_flags(int fd, const void *key, __u64 flags)
+{
+ union bpf_attr attr;
+ int ret;
+
+ memset(&attr, 0, sizeof(attr));
+ attr.map_fd = fd;
+ attr.key = ptr_to_u64(key);
+ attr.flags = flags;
+
+ ret = sys_bpf(BPF_MAP_DELETE_ELEM, &attr, sizeof(attr));
+ return libbpf_err_errno(ret);
+}
+
int bpf_map_get_next_key(int fd, const void *key, void *next_key)
{
union bpf_attr attr;
@@ -817,7 +751,7 @@ int bpf_link_create(int prog_fd, int target_fd,
{
__u32 target_btf_id, iter_info_len;
union bpf_attr attr;
- int fd;
+ int fd, err;
if (!OPTS_VALID(opts, bpf_link_create_opts))
return libbpf_err(-EINVAL);
@@ -863,6 +797,14 @@ int bpf_link_create(int prog_fd, int target_fd,
if (!OPTS_ZEROED(opts, kprobe_multi))
return libbpf_err(-EINVAL);
break;
+ case BPF_TRACE_FENTRY:
+ case BPF_TRACE_FEXIT:
+ case BPF_MODIFY_RETURN:
+ case BPF_LSM_MAC:
+ attr.link_create.tracing.cookie = OPTS_GET(opts, tracing.cookie, 0);
+ if (!OPTS_ZEROED(opts, tracing))
+ return libbpf_err(-EINVAL);
+ break;
default:
if (!OPTS_ZEROED(opts, flags))
return libbpf_err(-EINVAL);
@@ -870,7 +812,37 @@ int bpf_link_create(int prog_fd, int target_fd,
}
proceed:
fd = sys_bpf_fd(BPF_LINK_CREATE, &attr, sizeof(attr));
- return libbpf_err_errno(fd);
+ if (fd >= 0)
+ return fd;
+ /* we'll get EINVAL if LINK_CREATE doesn't support attaching fentry
+ * and other similar programs
+ */
+ err = -errno;
+ if (err != -EINVAL)
+ return libbpf_err(err);
+
+ /* if user used features not supported by
+ * BPF_RAW_TRACEPOINT_OPEN command, then just give up immediately
+ */
+ if (attr.link_create.target_fd || attr.link_create.target_btf_id)
+ return libbpf_err(err);
+ if (!OPTS_ZEROED(opts, sz))
+ return libbpf_err(err);
+
+ /* otherwise, for few select kinds of programs that can be
+ * attached using BPF_RAW_TRACEPOINT_OPEN command, try that as
+ * a fallback for older kernels
+ */
+ switch (attach_type) {
+ case BPF_TRACE_RAW_TP:
+ case BPF_LSM_MAC:
+ case BPF_TRACE_FENTRY:
+ case BPF_TRACE_FEXIT:
+ case BPF_MODIFY_RETURN:
+ return bpf_raw_tracepoint_open(NULL, prog_fd);
+ default:
+ return libbpf_err(err);
+ }
}
int bpf_link_detach(int link_fd)