diff options
author | David S. Miller <davem@davemloft.net> | 2019-11-20 18:11:23 -0800 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2019-11-20 18:11:23 -0800 |
commit | ee5a489fd9645104925e5cdf8f8e455d833730b9 (patch) | |
tree | 1e46a8c460e1d51d465fe472e42cf1c16f92f9c7 /tools | |
parent | e2193c9334291ecdc437cdbd9fe9ac35c14fffa8 (diff) | |
parent | 196e8ca74886c433dcfc64a809707074b936aaf5 (diff) | |
download | linux-ee5a489fd9645104925e5cdf8f8e455d833730b9.tar.bz2 |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-next
Daniel Borkmann says:
====================
pull-request: bpf-next 2019-11-20
The following pull-request contains BPF updates for your *net-next* tree.
We've added 81 non-merge commits during the last 17 day(s) which contain
a total of 120 files changed, 4958 insertions(+), 1081 deletions(-).
There are 3 trivial conflicts, resolve it by always taking the chunk from
196e8ca74886c433:
<<<<<<< HEAD
=======
void *bpf_map_area_mmapable_alloc(u64 size, int numa_node);
>>>>>>> 196e8ca74886c433dcfc64a809707074b936aaf5
<<<<<<< HEAD
void *bpf_map_area_alloc(u64 size, int numa_node)
=======
static void *__bpf_map_area_alloc(u64 size, int numa_node, bool mmapable)
>>>>>>> 196e8ca74886c433dcfc64a809707074b936aaf5
<<<<<<< HEAD
if (size <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER)) {
=======
/* kmalloc()'ed memory can't be mmap()'ed */
if (!mmapable && size <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER)) {
>>>>>>> 196e8ca74886c433dcfc64a809707074b936aaf5
The main changes are:
1) Addition of BPF trampoline which works as a bridge between kernel functions,
BPF programs and other BPF programs along with two new use cases: i) fentry/fexit
BPF programs for tracing with practically zero overhead to call into BPF (as
opposed to k[ret]probes) and ii) attachment of the former to networking related
programs to see input/output of networking programs (covering xdpdump use case),
from Alexei Starovoitov.
2) BPF array map mmap support and use in libbpf for global data maps; also a big
batch of libbpf improvements, among others, support for reading bitfields in a
relocatable manner (via libbpf's CO-RE helper API), from Andrii Nakryiko.
3) Extend s390x JIT with usage of relative long jumps and loads in order to lift
the current 64/512k size limits on JITed BPF programs there, from Ilya Leoshkevich.
4) Add BPF audit support and emit messages upon successful prog load and unload in
order to have a timeline of events, from Daniel Borkmann and Jiri Olsa.
5) Extension to libbpf and xdpsock sample programs to demo the shared umem mode
(XDP_SHARED_UMEM) as well as RX-only and TX-only sockets, from Magnus Karlsson.
6) Several follow-up bug fixes for libbpf's auto-pinning code and a new API
call named bpf_get_link_xdp_info() for retrieving the full set of prog
IDs attached to XDP, from Toke Høiland-Jørgensen.
7) Add BTF support for array of int, array of struct and multidimensional arrays
and enable it for skb->cb[] access in kfree_skb test, from Martin KaFai Lau.
8) Fix AF_XDP by using the correct number of channels from ethtool, from Luigi Rizzo.
9) Two fixes for BPF selftest to get rid of a hang in test_tc_tunnel and to avoid
xdping to be run as standalone, from Jiri Benc.
10) Various BPF selftest fixes when run with latest LLVM trunk, from Yonghong Song.
11) Fix a memory leak in BPF fentry test run data, from Colin Ian King.
12) Various smaller misc cleanups and improvements mostly all over BPF selftests and
samples, from Daniel T. Lee, Andre Guedes, Anders Roxell, Mao Wenan, Yue Haibing.
====================
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'tools')
58 files changed, 2225 insertions, 340 deletions
diff --git a/tools/bpf/bpf_exp.y b/tools/bpf/bpf_exp.y index 56ba1de50784..8d48e896be50 100644 --- a/tools/bpf/bpf_exp.y +++ b/tools/bpf/bpf_exp.y @@ -545,6 +545,16 @@ static void bpf_reduce_k_jumps(void) } } +static uint8_t bpf_encode_jt_jf_offset(int off, int i) +{ + int delta = off - i - 1; + + if (delta < 0 || delta > 255) + fprintf(stderr, "warning: insn #%d jumps to insn #%d, " + "which is out of range\n", i, off); + return (uint8_t) delta; +} + static void bpf_reduce_jt_jumps(void) { int i; @@ -552,7 +562,7 @@ static void bpf_reduce_jt_jumps(void) for (i = 0; i < curr_instr; i++) { if (labels_jt[i]) { int off = bpf_find_insns_offset(labels_jt[i]); - out[i].jt = (uint8_t) (off - i -1); + out[i].jt = bpf_encode_jt_jf_offset(off, i); } } } @@ -564,7 +574,7 @@ static void bpf_reduce_jf_jumps(void) for (i = 0; i < curr_instr; i++) { if (labels_jf[i]) { int off = bpf_find_insns_offset(labels_jf[i]); - out[i].jf = (uint8_t) (off - i - 1); + out[i].jf = bpf_encode_jt_jf_offset(off, i); } } } diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h index df6809a76404..dbbcf0b02970 100644 --- a/tools/include/uapi/linux/bpf.h +++ b/tools/include/uapi/linux/bpf.h @@ -201,6 +201,8 @@ enum bpf_attach_type { BPF_CGROUP_GETSOCKOPT, BPF_CGROUP_SETSOCKOPT, BPF_TRACE_RAW_TP, + BPF_TRACE_FENTRY, + BPF_TRACE_FEXIT, __MAX_BPF_ATTACH_TYPE }; @@ -346,6 +348,9 @@ enum bpf_attach_type { /* Clone map from listener for newly accepted socket */ #define BPF_F_CLONE (1U << 9) +/* Enable memory-mapping BPF map */ +#define BPF_F_MMAPABLE (1U << 10) + /* flags for BPF_PROG_QUERY */ #define BPF_F_QUERY_EFFECTIVE (1U << 0) @@ -423,6 +428,7 @@ union bpf_attr { __aligned_u64 line_info; /* line info */ __u32 line_info_cnt; /* number of bpf_line_info records */ __u32 attach_btf_id; /* in-kernel BTF type id to attach to */ + __u32 attach_prog_fd; /* 0 to attach to vmlinux */ }; struct { /* anonymous struct used by BPF_OBJ_* commands */ diff --git a/tools/lib/bpf/bpf.c b/tools/lib/bpf/bpf.c index ca0d635b1d5e..98596e15390f 100644 --- a/tools/lib/bpf/bpf.c +++ b/tools/lib/bpf/bpf.c @@ -189,7 +189,7 @@ static void * alloc_zero_tailing_info(const void *orecord, __u32 cnt, __u32 actual_rec_size, __u32 expected_rec_size) { - __u64 info_len = actual_rec_size * cnt; + __u64 info_len = (__u64)actual_rec_size * cnt; void *info, *nrecord; int i; @@ -228,10 +228,13 @@ int bpf_load_program_xattr(const struct bpf_load_program_attr *load_attr, memset(&attr, 0, sizeof(attr)); attr.prog_type = load_attr->prog_type; attr.expected_attach_type = load_attr->expected_attach_type; - if (attr.prog_type == BPF_PROG_TYPE_TRACING) + if (attr.prog_type == BPF_PROG_TYPE_TRACING) { attr.attach_btf_id = load_attr->attach_btf_id; - else + attr.attach_prog_fd = load_attr->attach_prog_fd; + } else { attr.prog_ifindex = load_attr->prog_ifindex; + attr.kern_version = load_attr->kern_version; + } attr.insn_cnt = (__u32)load_attr->insns_cnt; attr.insns = ptr_to_u64(load_attr->insns); attr.license = ptr_to_u64(load_attr->license); @@ -245,7 +248,6 @@ int bpf_load_program_xattr(const struct bpf_load_program_attr *load_attr, attr.log_size = 0; } - attr.kern_version = load_attr->kern_version; attr.prog_btf_fd = load_attr->prog_btf_fd; attr.func_info_rec_size = load_attr->func_info_rec_size; attr.func_info_cnt = load_attr->func_info_cnt; diff --git a/tools/lib/bpf/bpf.h b/tools/lib/bpf/bpf.h index 1c53bc5b4b3c..3c791fa8e68e 100644 --- a/tools/lib/bpf/bpf.h +++ b/tools/lib/bpf/bpf.h @@ -77,7 +77,10 @@ struct bpf_load_program_attr { const struct bpf_insn *insns; size_t insns_cnt; const char *license; - __u32 kern_version; + union { + __u32 kern_version; + __u32 attach_prog_fd; + }; union { __u32 prog_ifindex; __u32 attach_btf_id; diff --git a/tools/lib/bpf/bpf_core_read.h b/tools/lib/bpf/bpf_core_read.h index a273df3784f4..7009dc90e012 100644 --- a/tools/lib/bpf/bpf_core_read.h +++ b/tools/lib/bpf/bpf_core_read.h @@ -12,9 +12,76 @@ */ enum bpf_field_info_kind { BPF_FIELD_BYTE_OFFSET = 0, /* field byte offset */ + BPF_FIELD_BYTE_SIZE = 1, BPF_FIELD_EXISTS = 2, /* field existence in target kernel */ + BPF_FIELD_SIGNED = 3, + BPF_FIELD_LSHIFT_U64 = 4, + BPF_FIELD_RSHIFT_U64 = 5, }; +#define __CORE_RELO(src, field, info) \ + __builtin_preserve_field_info((src)->field, BPF_FIELD_##info) + +#if __BYTE_ORDER == __LITTLE_ENDIAN +#define __CORE_BITFIELD_PROBE_READ(dst, src, fld) \ + bpf_probe_read((void *)dst, \ + __CORE_RELO(src, fld, BYTE_SIZE), \ + (const void *)src + __CORE_RELO(src, fld, BYTE_OFFSET)) +#else +/* semantics of LSHIFT_64 assumes loading values into low-ordered bytes, so + * for big-endian we need to adjust destination pointer accordingly, based on + * field byte size + */ +#define __CORE_BITFIELD_PROBE_READ(dst, src, fld) \ + bpf_probe_read((void *)dst + (8 - __CORE_RELO(src, fld, BYTE_SIZE)), \ + __CORE_RELO(src, fld, BYTE_SIZE), \ + (const void *)src + __CORE_RELO(src, fld, BYTE_OFFSET)) +#endif + +/* + * Extract bitfield, identified by s->field, and return its value as u64. + * All this is done in relocatable manner, so bitfield changes such as + * signedness, bit size, offset changes, this will be handled automatically. + * This version of macro is using bpf_probe_read() to read underlying integer + * storage. Macro functions as an expression and its return type is + * bpf_probe_read()'s return value: 0, on success, <0 on error. + */ +#define BPF_CORE_READ_BITFIELD_PROBED(s, field) ({ \ + unsigned long long val = 0; \ + \ + __CORE_BITFIELD_PROBE_READ(&val, s, field); \ + val <<= __CORE_RELO(s, field, LSHIFT_U64); \ + if (__CORE_RELO(s, field, SIGNED)) \ + val = ((long long)val) >> __CORE_RELO(s, field, RSHIFT_U64); \ + else \ + val = val >> __CORE_RELO(s, field, RSHIFT_U64); \ + val; \ +}) + +/* + * Extract bitfield, identified by s->field, and return its value as u64. + * This version of macro is using direct memory reads and should be used from + * BPF program types that support such functionality (e.g., typed raw + * tracepoints). + */ +#define BPF_CORE_READ_BITFIELD(s, field) ({ \ + const void *p = (const void *)s + __CORE_RELO(s, field, BYTE_OFFSET); \ + unsigned long long val; \ + \ + switch (__CORE_RELO(s, field, BYTE_SIZE)) { \ + case 1: val = *(const unsigned char *)p; \ + case 2: val = *(const unsigned short *)p; \ + case 4: val = *(const unsigned int *)p; \ + case 8: val = *(const unsigned long long *)p; \ + } \ + val <<= __CORE_RELO(s, field, LSHIFT_U64); \ + if (__CORE_RELO(s, field, SIGNED)) \ + val = ((long long)val) >> __CORE_RELO(s, field, RSHIFT_U64); \ + else \ + val = val >> __CORE_RELO(s, field, RSHIFT_U64); \ + val; \ +}) + /* * Convenience macro to check that field actually exists in target kernel's. * Returns: @@ -25,6 +92,13 @@ enum bpf_field_info_kind { __builtin_preserve_field_info(field, BPF_FIELD_EXISTS) /* + * Convenience macro to get byte size of a field. Works for integers, + * struct/unions, pointers, arrays, and enums. + */ +#define bpf_core_field_size(field) \ + __builtin_preserve_field_info(field, BPF_FIELD_BYTE_SIZE) + +/* * bpf_core_read() abstracts away bpf_probe_read() call and captures offset * relocation for source address using __builtin_preserve_access_index() * built-in, provided by Clang. diff --git a/tools/lib/bpf/bpf_helpers.h b/tools/lib/bpf/bpf_helpers.h index 0c7d28292898..c63ab1add126 100644 --- a/tools/lib/bpf/bpf_helpers.h +++ b/tools/lib/bpf/bpf_helpers.h @@ -44,4 +44,17 @@ enum libbpf_pin_type { LIBBPF_PIN_BY_NAME, }; +/* The following types should be used by BPF_PROG_TYPE_TRACING program to + * access kernel function arguments. BPF trampoline and raw tracepoints + * typecast arguments to 'unsigned long long'. + */ +typedef int __attribute__((aligned(8))) ks32; +typedef char __attribute__((aligned(8))) ks8; +typedef short __attribute__((aligned(8))) ks16; +typedef long long __attribute__((aligned(8))) ks64; +typedef unsigned int __attribute__((aligned(8))) ku32; +typedef unsigned char __attribute__((aligned(8))) ku8; +typedef unsigned short __attribute__((aligned(8))) ku16; +typedef unsigned long long __attribute__((aligned(8))) ku64; + #endif diff --git a/tools/lib/bpf/bpf_prog_linfo.c b/tools/lib/bpf/bpf_prog_linfo.c index 8c67561c93b0..3ed1a27b5f7c 100644 --- a/tools/lib/bpf/bpf_prog_linfo.c +++ b/tools/lib/bpf/bpf_prog_linfo.c @@ -101,6 +101,7 @@ struct bpf_prog_linfo *bpf_prog_linfo__new(const struct bpf_prog_info *info) { struct bpf_prog_linfo *prog_linfo; __u32 nr_linfo, nr_jited_func; + __u64 data_sz; nr_linfo = info->nr_line_info; @@ -122,11 +123,11 @@ struct bpf_prog_linfo *bpf_prog_linfo__new(const struct bpf_prog_info *info) /* Copy xlated line_info */ prog_linfo->nr_linfo = nr_linfo; prog_linfo->rec_size = info->line_info_rec_size; - prog_linfo->raw_linfo = malloc(nr_linfo * prog_linfo->rec_size); + data_sz = (__u64)nr_linfo * prog_linfo->rec_size; + prog_linfo->raw_linfo = malloc(data_sz); if (!prog_linfo->raw_linfo) goto err_free; - memcpy(prog_linfo->raw_linfo, (void *)(long)info->line_info, - nr_linfo * prog_linfo->rec_size); + memcpy(prog_linfo->raw_linfo, (void *)(long)info->line_info, data_sz); nr_jited_func = info->nr_jited_ksyms; if (!nr_jited_func || @@ -142,13 +143,12 @@ struct bpf_prog_linfo *bpf_prog_linfo__new(const struct bpf_prog_info *info) /* Copy jited_line_info */ prog_linfo->nr_jited_func = nr_jited_func; prog_linfo->jited_rec_size = info->jited_line_info_rec_size; - prog_linfo->raw_jited_linfo = malloc(nr_linfo * - prog_linfo->jited_rec_size); + data_sz = (__u64)nr_linfo * prog_linfo->jited_rec_size; + prog_linfo->raw_jited_linfo = malloc(data_sz); if (!prog_linfo->raw_jited_linfo) goto err_free; memcpy(prog_linfo->raw_jited_linfo, - (void *)(long)info->jited_line_info, - nr_linfo * prog_linfo->jited_rec_size); + (void *)(long)info->jited_line_info, data_sz); /* Number of jited_line_info per jited func */ prog_linfo->nr_jited_linfo_per_func = malloc(nr_jited_func * diff --git a/tools/lib/bpf/btf.c b/tools/lib/bpf/btf.c index d72e9a79dce1..88efa2bb7137 100644 --- a/tools/lib/bpf/btf.c +++ b/tools/lib/bpf/btf.c @@ -269,10 +269,9 @@ __s64 btf__resolve_size(const struct btf *btf, __u32 type_id) t = btf__type_by_id(btf, type_id); } +done: if (size < 0) return -EINVAL; - -done: if (nelems && size > UINT32_MAX / nelems) return -E2BIG; @@ -317,6 +316,28 @@ __s32 btf__find_by_name(const struct btf *btf, const char *type_name) return -ENOENT; } +__s32 btf__find_by_name_kind(const struct btf *btf, const char *type_name, + __u32 kind) +{ + __u32 i; + + if (kind == BTF_KIND_UNKN || !strcmp(type_name, "void")) + return 0; + + for (i = 1; i <= btf->nr_types; i++) { + const struct btf_type *t = btf->types[i]; + const char *name; + + if (btf_kind(t) != kind) + continue; + name = btf__name_by_offset(btf, t->name_off); + if (name && !strcmp(type_name, name)) + return i; + } + + return -ENOENT; +} + void btf__free(struct btf *btf) { if (!btf) diff --git a/tools/lib/bpf/btf.h b/tools/lib/bpf/btf.h index b18994116a44..d9ac73a02cde 100644 --- a/tools/lib/bpf/btf.h +++ b/tools/lib/bpf/btf.h @@ -72,6 +72,8 @@ LIBBPF_API int btf__finalize_data(struct bpf_object *obj, struct btf *btf); LIBBPF_API int btf__load(struct btf *btf); LIBBPF_API __s32 btf__find_by_name(const struct btf *btf, const char *type_name); +LIBBPF_API __s32 btf__find_by_name_kind(const struct btf *btf, + const char *type_name, __u32 kind); LIBBPF_API __u32 btf__get_nr_types(const struct btf *btf); LIBBPF_API const struct btf_type *btf__type_by_id(const struct btf *btf, __u32 id); diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c index 7aa2a2a22cef..a7d183f7ac72 100644 --- a/tools/lib/bpf/libbpf.c +++ b/tools/lib/bpf/libbpf.c @@ -142,6 +142,8 @@ struct bpf_capabilities { __u32 btf_func:1; /* BTF_KIND_VAR and BTF_KIND_DATASEC support */ __u32 btf_datasec:1; + /* BPF_F_MMAPABLE is supported for arrays */ + __u32 array_mmap:1; }; /* @@ -189,6 +191,7 @@ struct bpf_program { enum bpf_attach_type expected_attach_type; __u32 attach_btf_id; + __u32 attach_prog_fd; void *func_info; __u32 func_info_rec_size; __u32 func_info_cnt; @@ -229,6 +232,7 @@ struct bpf_map { enum libbpf_map_type libbpf_type; char *pin_path; bool pinned; + bool reused; }; struct bpf_secdata { @@ -855,8 +859,6 @@ bpf_object__init_internal_map(struct bpf_object *obj, enum libbpf_map_type type, pr_warn("failed to alloc map name\n"); return -ENOMEM; } - pr_debug("map '%s' (global data): at sec_idx %d, offset %zu.\n", - map_name, map->sec_idx, map->sec_offset); def = &map->def; def->type = BPF_MAP_TYPE_ARRAY; @@ -864,6 +866,12 @@ bpf_object__init_internal_map(struct bpf_object *obj, enum libbpf_map_type type, def->value_size = data->d_size; def->max_entries = 1; def->map_flags = type == LIBBPF_MAP_RODATA ? BPF_F_RDONLY_PROG : 0; + if (obj->caps.array_mmap) + def->map_flags |= BPF_F_MMAPABLE; + + pr_debug("map '%s' (global data): at sec_idx %d, offset %zu, flags %x.\n", + map_name, map->sec_idx, map->sec_offset, def->map_flags); + if (data_buff) { *data_buff = malloc(data->d_size); if (!*data_buff) { @@ -956,13 +964,13 @@ static int bpf_object__init_user_maps(struct bpf_object *obj, bool strict) pr_debug("maps in %s: %d maps in %zd bytes\n", obj->path, nr_maps, data->d_size); - map_def_sz = data->d_size / nr_maps; - if (!data->d_size || (data->d_size % nr_maps) != 0) { + if (!data->d_size || nr_maps == 0 || (data->d_size % nr_maps) != 0) { pr_warn("unable to determine map definition size " "section %s, %d maps in %zd bytes\n", obj->path, nr_maps, data->d_size); return -EINVAL; } + map_def_sz = data->d_size / nr_maps; /* Fill obj->maps using data in "maps" section. */ for (i = 0; i < nr_syms; i++) { @@ -1862,9 +1870,13 @@ bpf_program__collect_reloc(struct bpf_program *prog, GElf_Shdr *shdr, pr_warn("incorrect bpf_call opcode\n"); return -LIBBPF_ERRNO__RELOC; } + if (sym.st_value % 8) { + pr_warn("bad call relo offset: %lu\n", sym.st_value); + return -LIBBPF_ERRNO__RELOC; + } prog->reloc_desc[i].type = RELO_CALL; prog->reloc_desc[i].insn_idx = insn_idx; - prog->reloc_desc[i].text_off = sym.st_value; + prog->reloc_desc[i].text_off = sym.st_value / 8; obj->has_pseudo_calls = true; continue; } @@ -1995,6 +2007,7 @@ int bpf_map__reuse_fd(struct bpf_map *map, int fd) map->def.map_flags = info.map_flags; map->btf_key_type_id = info.btf_key_type_id; map->btf_value_type_id = info.btf_value_type_id; + map->reused = true; return 0; @@ -2158,6 +2171,27 @@ static int bpf_object__probe_btf_datasec(struct bpf_object *obj) return 0; } +static int bpf_object__probe_array_mmap(struct bpf_object *obj) +{ + struct bpf_create_map_attr attr = { + .map_type = BPF_MAP_TYPE_ARRAY, + .map_flags = BPF_F_MMAPABLE, + .key_size = sizeof(int), + .value_size = sizeof(int), + .max_entries = 1, + }; + int fd; + + fd = bpf_create_map_xattr(&attr); + if (fd >= 0) { + obj->caps.array_mmap = 1; + close(fd); + return 1; + } + + return 0; +} + static int bpf_object__probe_caps(struct bpf_object *obj) { @@ -2166,6 +2200,7 @@ bpf_object__probe_caps(struct bpf_object *obj) bpf_object__probe_global_data, bpf_object__probe_btf_func, bpf_object__probe_btf_datasec, + bpf_object__probe_array_mmap, }; int i, ret; @@ -2470,8 +2505,8 @@ struct bpf_core_spec { int raw_spec[BPF_CORE_SPEC_MAX_LEN]; /* raw spec length */ int raw_len; - /* field byte offset represented by spec */ - __u32 offset; + /* field bit offset represented by spec */ + __u32 bit_offset; }; static bool str_is_empty(const char *s) @@ -2482,8 +2517,8 @@ static bool str_is_empty(const char *s) /* * Turn bpf_field_reloc into a low- and high-level spec representation, * validating correctness along the way, as well as calculating resulting - * field offset (in bytes), specified by accessor string. Low-level spec - * captures every single level of nestedness, including traversing anonymous + * field bit offset, specified by accessor string. Low-level spec captures + * every single level of nestedness, including traversing anonymous * struct/union members. High-level one only captures semantically meaningful * "turning points": named fields and array indicies. * E.g., for this case: @@ -2555,7 +2590,7 @@ static int bpf_core_spec_parse(const struct btf *btf, sz = btf__resolve_size(btf, id); if (sz < 0) return sz; - spec->offset = access_idx * sz; + spec->bit_offset = access_idx * sz * 8; for (i = 1; i < spec->raw_len; i++) { t = skip_mods_and_typedefs(btf, id, &id); @@ -2566,17 +2601,13 @@ static int bpf_core_spec_parse(const struct btf *btf, if (btf_is_composite(t)) { const struct btf_member *m; - __u32 offset; + __u32 bit_offset; if (access_idx >= btf_vlen(t)) return -EINVAL; - if (btf_member_bitfield_size(t, access_idx)) - return -EINVAL; - offset = btf_member_bit_offset(t, access_idx); - if (offset % 8) - return -EINVAL; - spec->offset += offset / 8; + bit_offset = btf_member_bit_offset(t, access_idx); + spec->bit_offset += bit_offset; m = btf_members(t) + access_idx; if (m->name_off) { @@ -2605,7 +2636,7 @@ static int bpf_core_spec_parse(const struct btf *btf, sz = btf__resolve_size(btf, id); if (sz < 0) return sz; - spec->offset += access_idx * sz; + spec->bit_offset += access_idx * sz * 8; } else { pr_warn("relo for [%u] %s (at idx %d) captures type [%d] of unexpected kind %d\n", type_id, spec_str, i, id, btf_kind(t)); @@ -2706,12 +2737,14 @@ err_out: } /* Check two types for compatibility, skipping const/volatile/restrict and - * typedefs, to ensure we are relocating offset to the compatible entities: + * typedefs, to ensure we are relocating compatible entities: * - any two STRUCTs/UNIONs are compatible and can be mixed; - * - any two FWDs are compatible; + * - any two FWDs are compatible, if their names match (modulo flavor suffix); * - any two PTRs are always compatible; + * - for ENUMs, names should be the same (ignoring flavor suffix) or at + * least one of enums should be anonymous; * - for ENUMs, check sizes, names are ignored; - * - for INT, size and bitness should match, signedness is ignored; + * - for INT, size and signedness are ignored; * - for ARRAY, dimensionality is ignored, element types are checked for * compatibility recursively; * - everything else shouldn't be ever a target of relocation. @@ -2737,16 +2770,29 @@ recur: return 0; switch (btf_kind(local_type)) { - case BTF_KIND_FWD: case BTF_KIND_PTR: return 1; - case BTF_KIND_ENUM: - return local_type->size == targ_type->size; + case BTF_KIND_FWD: + case BTF_KIND_ENUM: { + const char *local_name, *targ_name; + size_t local_len, targ_len; + + local_name = btf__name_by_offset(local_btf, + local_type->name_off); + targ_name = btf__name_by_offset(targ_btf, targ_type->name_off); + local_len = bpf_core_essential_name_len(local_name); + targ_len = bpf_core_essential_name_len(targ_name); + /* one of them is anonymous or both w/ same flavor-less names */ + return local_len == 0 || targ_len == 0 || + (local_len == targ_len && + strncmp(local_name, targ_name, local_len) == 0); + } case BTF_KIND_INT: + /* just reject deprecated bitfield-like integers; all other + * integers are by default compatible between each other + */ return btf_int_offset(local_type) == 0 && - btf_int_offset(targ_type) == 0 && - local_type->size == targ_type->size && - btf_int_bits(local_type) == btf_int_bits(targ_type); + btf_int_offset(targ_type) == 0; case BTF_KIND_ARRAY: local_id = btf_array(local_type)->type; targ_id = btf_array(targ_type)->type; @@ -2762,7 +2808,7 @@ recur: * Given single high-level named field accessor in local type, find * corresponding high-level accessor for a target type. Along the way, * maintain low-level spec for target as well. Also keep updating target - * offset. + * bit offset. * * Searching is performed through recursive exhaustive enumeration of all * fields of a struct/union. If there are any anonymous (embedded) @@ -2801,21 +2847,16 @@ static int bpf_core_match_member(const struct btf *local_btf, n = btf_vlen(targ_type); m = btf_members(targ_type); for (i = 0; i < n; i++, m++) { - __u32 offset; + __u32 bit_offset; - /* bitfield relocations not supported */ - if (btf_member_bitfield_size(targ_type, i)) - continue; - offset = btf_member_bit_offset(targ_type, i); - if (offset % 8) - continue; + bit_offset = btf_member_bit_offset(targ_type, i); /* too deep struct/union/array nesting */ if (spec->raw_len == BPF_CORE_SPEC_MAX_LEN) return -E2BIG; /* speculate this member will be the good one */ - spec->offset += offset / 8; + spec->bit_offset += bit_offset; spec->raw_spec[spec->raw_len++] = i; targ_name = btf__name_by_offset(targ_btf, m->name_off); @@ -2844,7 +2885,7 @@ static int bpf_core_match_member(const struct btf *local_btf, return found; } /* member turned out not to be what we looked for */ - spec->offset -= offset / 8; + spec->bit_offset -= bit_offset; spec->raw_len--; } @@ -2853,7 +2894,7 @@ static int bpf_core_match_member(const struct btf *local_btf, /* * Try to match local spec to a target type and, if successful, produce full - * target spec (high-level, low-level + offset). + * target spec (high-level, low-level + bit offset). */ static int bpf_core_spec_match(struct bpf_core_spec *local_spec, const struct btf *targ_btf, __u32 targ_id, @@ -2916,13 +2957,120 @@ static int bpf_core_spec_match(struct bpf_core_spec *local_spec, sz = btf__resolve_size(targ_btf, targ_id); if (sz < 0) return sz; - targ_spec->offset += local_acc->idx * sz; + targ_spec->bit_offset += local_acc->idx * sz * 8; } } return 1; } +static int bpf_core_calc_field_relo(const struct bpf_program *prog, + const struct bpf_field_reloc *relo, + const struct bpf_core_spec *spec, + __u32 *val, bool *validate) +{ + const struct bpf_core_accessor *acc = &spec->spec[spec->len - 1]; + const struct btf_type *t = btf__type_by_id(spec->btf, acc->type_id); + __u32 byte_off, byte_sz, bit_off, bit_sz; + const struct btf_member *m; + const struct btf_type *mt; + bool bitfield; + __s64 sz; + + /* a[n] accessor needs special handling */ + if (!acc->name) { + if (relo->kind == BPF_FIELD_BYTE_OFFSET) { + *val = spec->bit_offset / 8; + } else if (relo->kind == BPF_FIELD_BYTE_SIZE) { + sz = btf__resolve_size(spec->btf, acc->type_id); + if (sz < 0) + return -EINVAL; + *val = sz; + } else { + pr_warn("prog '%s': relo %d at insn #%d can't be applied to array access\n", + bpf_program__title(prog, false), + relo->kind, relo->insn_off / 8); + return -EINVAL; + } + if (validate) + *validate = true; + return 0; + } + + m = btf_members(t) + acc->idx; + mt = skip_mods_and_typedefs(spec->btf, m->type, NULL); + bit_off = spec->bit_offset; + bit_sz = btf_member_bitfield_size(t, acc->idx); + + bitfield = bit_sz > 0; + if (bitfield) { + byte_sz = mt->size; + byte_off = bit_off / 8 / byte_sz * byte_sz; + /* figure out smallest int size necessary for bitfield load */ + while (bit_off + bit_sz - byte_off * 8 > byte_sz * 8) { + if (byte_sz >= 8) { + /* bitfield can't be read with 64-bit read */ + pr_warn("prog '%s': relo %d at insn #%d can't be satisfied for bitfield\n", + bpf_program__title(prog, false), + relo->kind, relo->insn_off / 8); + return -E2BIG; + } + byte_sz *= 2; + byte_off = bit_off / 8 / byte_sz * byte_sz; + } + } else { + sz = btf__resolve_size(spec->btf, m->type); + if (sz < 0) + return -EINVAL; + byte_sz = sz; + byte_off = spec->bit_offset / 8; + bit_sz = byte_sz * 8; + } + + /* for bitfields, all the relocatable aspects are ambiguous and we + * might disagree with compiler, so turn off validation of expected + * value, except for signedness + */ + if (validate) + *validate = !bitfield; + + switch (relo->kind) { + case BPF_FIELD_BYTE_OFFSET: + *val = byte_off; + break; + case BPF_FIELD_BYTE_SIZE: + *val = byte_sz; + break; + case BPF_FIELD_SIGNED: + /* enums will be assumed unsigned */ + *val = btf_is_enum(mt) || + (btf_int_encoding(mt) & BTF_INT_SIGNED); + if (validate) + *validate = true; /* signedness is never ambiguous */ + break; + case BPF_FIELD_LSHIFT_U64: +#if __BYTE_ORDER == __LITTLE_ENDIAN + *val = 64 - (bit_off + bit_sz - byte_off * 8); +#else + *val = (8 - byte_sz) * 8 + (bit_off - byte_off * 8); +#endif + break; + case BPF_FIELD_RSHIFT_U64: + *val = 64 - bit_sz; + if (validate) + *validate = true; /* right shift is never ambiguous */ + break; + case BPF_FIELD_EXISTS: + default: + pr_warn("prog '%s': unknown relo %d at insn #%d\n", + bpf_program__title(prog, false), + relo->kind, relo->insn_off / 8); + return -EINVAL; + } + + return 0; +} + /* * Patch relocatable BPF instruction. * @@ -2942,36 +3090,31 @@ static int bpf_core_reloc_insn(struct bpf_program *prog, const struct bpf_core_spec *local_spec, const struct bpf_core_spec *targ_spec) { + bool failed = false, validate = true; __u32 orig_val, new_val; struct bpf_insn *insn; - int insn_idx; + int insn_idx, err; __u8 class; if (relo->insn_off % sizeof(struct bpf_insn)) return -EINVAL; insn_idx = relo->insn_off / sizeof(struct bpf_insn); - switch (relo->kind) { - case BPF_FIELD_BYTE_OFFSET: - orig_val = local_spec->offset; - if (targ_spec) { - new_val = targ_spec->offset; - } else { - pr_warn("prog '%s': patching insn #%d w/ failed reloc, imm %d -> %d\n", - bpf_program__title(prog, false), insn_idx, - orig_val, -1); - new_val = (__u32)-1; - } - break; - case BPF_FIELD_EXISTS: + if (relo->kind == BPF_FIELD_EXISTS) { orig_val = 1; /* can't generate EXISTS relo w/o local field */ new_val = targ_spec ? 1 : 0; - break; - default: - pr_warn("prog '%s': unknown relo %d at insn #%d'\n", - bpf_program__title(prog, false), - relo->kind, insn_idx); - return -EINVAL; + } else if (!targ_spec) { + failed = true; + new_val = (__u32)-1; + } else { + err = bpf_core_calc_field_relo(prog, relo, local_spec, + &orig_val, &validate); + if (err) + return err; + err = bpf_core_calc_field_relo(prog, relo, targ_spec, + &new_val, NULL); + if (err) + return err; } insn = &prog->insns[insn_idx]; @@ -2980,12 +3123,17 @@ static int bpf_core_reloc_insn(struct bpf_program *prog, if (class == BPF_ALU || class == BPF_ALU64) { if (BPF_SRC(insn->code) != BPF_K) return -EINVAL; - if (insn->imm != orig_val) + if (!failed && validate && insn->imm != orig_val) { + pr_warn("prog '%s': unexpected insn #%d value: got %u, exp %u -> %u\n", + bpf_program__title(prog, false), insn_idx, + insn->imm, orig_val, new_val); return -EINVAL; + } + orig_val = insn->imm; insn->imm = new_val; - pr_debug("prog '%s': patched insn #%d (ALU/ALU64) imm %d -> %d\n", - bpf_program__title(prog, false), - insn_idx, orig_val, new_val); + pr_debug("prog '%s': patched insn #%d (ALU/ALU64)%s imm %u -> %u\n", + bpf_program__title(prog, false), insn_idx, + failed ? " w/ failed reloc" : "", orig_val, new_val); } else { pr_warn("prog '%s': trying to relocate unrecognized insn #%d, code:%x, src:%x, dst:%x, off:%x, imm:%x\n", bpf_program__title(prog, false), @@ -3103,7 +3251,8 @@ static void bpf_core_dump_spec(int level, const struct bpf_core_spec *spec) libbpf_print(level, "%d%s", spec->raw_spec[i], i == spec->raw_len - 1 ? " => " : ":"); - libbpf_print(level, "%u @ &x", spec->offset); + libbpf_print(level, "%u.%u @ &x", + spec->bit_offset / 8, spec->bit_offset % 8); for (i = 0; i < spec->len; i++) { if (spec->spec[i].name) @@ -3217,7 +3366,8 @@ static int bpf_core_reloc_field(struct bpf_program *prog, return -EINVAL; } - pr_debug("prog '%s': relo #%d: spec is ", prog_name, relo_idx); + pr_debug("prog '%s': relo #%d: kind %d, spec is ", prog_name, relo_idx, + relo->kind); bpf_core_dump_spec(LIBBPF_DEBUG, &local_spec); libbpf_print(LIBBPF_DEBUG, "\n"); @@ -3257,13 +3407,13 @@ static int bpf_core_reloc_field(struct bpf_program *prog, if (j == 0) { targ_spec = cand_spec; - } else if (cand_spec.offset != targ_spec.offset) { + } else if (cand_spec.bit_offset != targ_spec.bit_offset) { /* if there are many candidates, they should all - * resolve to the same offset + * resolve to the same bit offset */ pr_warn("prog '%s': relo #%d: offset ambiguity: %u != %u\n", - prog_name, relo_idx, cand_spec.offset, - targ_spec.offset); + prog_name, relo_idx, cand_spec.bit_offset, + targ_spec.bit_offset); return -EINVAL; } @@ -3408,6 +3558,7 @@ bpf_program__reloc_text(struct bpf_program *prog, struct bpf_object *obj, pr_warn("oom in prog realloc\n"); return -ENOMEM; } + prog->insns = new_insn; if (obj->btf_ext) { err = bpf_program_reloc_btf_ext(prog, obj, @@ -3419,7 +3570,6 @@ bpf_program__reloc_text(struct bpf_program *prog, struct bpf_object *obj, memcpy(new_insn + prog->insns_cnt, text->insns, text->insns_cnt * sizeof(*insn)); - prog->insns = new_insn; prog->main_prog_cnt = prog->insns_cnt; prog->insns_cnt = new_cnt; pr_debug("added %zd insn from %s to prog %s\n", @@ -3427,7 +3577,7 @@ bpf_program__reloc_text(struct bpf_program *prog, struct bpf_object *obj, prog->section_name); } insn = &prog->insns[relo->insn_idx]; - insn->imm += prog->main_prog_cnt - relo->insn_idx; + insn->imm += relo->text_off + prog->main_prog_cnt - relo->insn_idx; return 0; } @@ -3566,8 +3716,13 @@ load_program(struct bpf_program *prog, struct bpf_insn *insns, int insns_cnt, load_attr.insns = insns; load_attr.insns_cnt = insns_cnt; load_attr.license = license; - load_attr.kern_version = kern_version; - load_attr.prog_ifindex = prog->prog_ifindex; + if (prog->type == BPF_PROG_TYPE_TRACING) { + load_attr.attach_prog_fd = prog->attach_prog_fd; + load_attr.attach_btf_id = prog->attach_btf_id; + } else { + load_attr.kern_version = kern_version; + load_attr.prog_ifindex = prog->prog_ifindex; + } /* if .BTF.ext was loaded, kernel supports associated BTF for prog */ if (prog->obj->btf_ext) btf_fd = bpf_object__btf_fd(prog->obj); @@ -3582,7 +3737,6 @@ load_program(struct bpf_program *prog, struct bpf_insn *insns, int insns_cnt, load_attr.line_info_cnt = prog->line_info_cnt; load_attr.log_level = prog->log_level; load_attr.prog_flags = prog->prog_flags; - load_attr.attach_btf_id = prog->attach_btf_id; retry_load: log_buf = malloc(log_buf_size); @@ -3604,7 +3758,7 @@ retry_load: free(log_buf); goto retry_load; } - ret = -LIBBPF_ERRNO__LOAD; + ret = -errno; cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg)); pr_warn("load bpf program failed: %s\n", cp); @@ -3617,23 +3771,18 @@ retry_load: pr_warn("Program too large (%zu insns), at most %d insns\n", load_attr.insns_cnt, BPF_MAXINSNS); ret = -LIBBPF_ERRNO__PROG2BIG; - } else { + } else if (load_attr.prog_type != BPF_PROG_TYPE_KPROBE) { /* Wrong program type? */ - if (load_attr.prog_type != BPF_PROG_TYPE_KPROBE) { - int fd; - - load_attr.prog_type = BPF_PROG_TYPE_KPROBE; - load_attr.expected_attach_type = 0; - fd = bpf_load_program_xattr(&load_attr, NULL, 0); - if (fd >= 0) { - close(fd); - ret = -LIBBPF_ERRNO__PROGTYPE; - goto out; - } - } + int fd; - if (log_buf) - ret = -LIBBPF_ERRNO__KVER; + load_attr.prog_type = BPF_PROG_TYPE_KPROBE; + load_attr.expected_attach_type = 0; + fd = bpf_load_program_xattr(&load_attr, NULL, 0); + if (fd >= 0) { + close(fd); + ret = -LIBBPF_ERRNO__PROGTYPE; + goto out; + } } out: @@ -3744,8 +3893,9 @@ bpf_object__load_progs(struct bpf_object *obj, int log_level) return 0; } -static int libbpf_attach_btf_id_by_name(const char *name, __u32 *btf_id); - +static int libbpf_find_attach_btf_id(const char *name, + enum bpf_attach_type attach_type, + __u32 attach_prog_fd); static struct bpf_object * __bpf_object__open(const char *path, const void *obj_buf, size_t obj_buf_sz, struct bpf_object_open_opts *opts) @@ -3756,6 +3906,7 @@ __bpf_object__open(const char *path, const void *obj_buf, size_t obj_buf_sz, const char *obj_name; char tmp_name[64]; bool relaxed_maps; + __u32 attach_prog_fd; int err; if (elf_version(EV_CURRENT) == EV_NONE) { @@ -3786,6 +3937,7 @@ __bpf_object__open(const char *path, const void *obj_buf, size_t obj_buf_sz, obj->relaxed_core_relocs = OPTS_GET(opts, relaxed_core_relocs, false); relaxed_maps = OPTS_GET(opts, relaxed_maps, false); pin_root_path = OPTS_GET(opts, pin_root_path, NULL); + attach_prog_fd = OPTS_GET(opts, attach_prog_fd, 0); CHECK_ERR(bpf_object__elf_init(obj), err, out); CHECK_ERR(bpf_object__check_endianness(obj), err, out); @@ -3798,7 +3950,6 @@ __bpf_object__open(const char *path, const void *obj_buf, size_t obj_buf_sz, bpf_object__for_each_program(prog, obj) { enum bpf_prog_type prog_type; enum bpf_attach_type attach_type; - __u32 btf_id; err = libbpf_prog_type_by_name(prog->section_name, &prog_type, &attach_type); @@ -3811,10 +3962,13 @@ __bpf_object__open(const char *path, const void *obj_buf, size_t obj_buf_sz, bpf_program__set_type(prog, prog_type); bpf_program__set_expected_attach_type(prog, attach_type); if (prog_type == BPF_PROG_TYPE_TRACING) { - err = libbpf_attach_btf_id_by_name(prog->section_name, &btf_id); - if (err) + err = libbpf_find_attach_btf_id(prog->section_name, + attach_type, + attach_prog_fd); + if (err <= 0) goto out; - prog->attach_btf_id = btf_id; + prog->attach_btf_id = err; + prog->attach_prog_fd = attach_prog_fd; } } @@ -3911,7 +4065,7 @@ int bpf_object__unload(struct bpf_object *obj) int bpf_object__load_xattr(struct bpf_object_load_attr *attr) { struct bpf_object *obj; - int err; + int err, i; if (!attr) return -EINVAL; @@ -3932,6 +4086,11 @@ int bpf_object__load_xattr(struct bpf_object_load_attr *attr) return 0; out: + /* unpin any maps that were auto-pinned during load */ + for (i = 0; i < obj->nr_maps; i++) + if (obj->maps[i].pinned && !obj->maps[i].reused) + bpf_map__unpin(&obj->maps[i], NULL); + bpf_object__unload(obj); pr_warn("failed to load object '%s'\n", obj->path); return err; @@ -4665,6 +4824,11 @@ int bpf_program__fd(const struct bpf_program *prog) return bpf_program__nth_fd(prog, 0); } +size_t bpf_program__size(const struct bpf_program *prog) +{ + return prog->insns_cnt * sizeof(struct bpf_insn); +} + int bpf_program__set_prep(struct bpf_program *prog, int nr_instances, bpf_program_prep_t prep) { @@ -4813,6 +4977,10 @@ static const struct { BPF_PROG_SEC("raw_tp/", BPF_PROG_TYPE_RAW_TRACEPOINT), BPF_PROG_BTF("tp_btf/", BPF_PROG_TYPE_TRACING, BPF_TRACE_RAW_TP), + BPF_PROG_BTF("fentry/", BPF_PROG_TYPE_TRACING, + BPF_TRACE_FENTRY), + BPF_PROG_BTF("fexit/", BPF_PROG_TYPE_TRACING, + BPF_TRACE_FEXIT), BPF_PROG_SEC("xdp", BPF_PROG_TYPE_XDP), BPF_PROG_SEC("perf_event", BPF_PROG_TYPE_PERF_EVENT), BPF_PROG_SEC("lwt_in", BPF_PROG_TYPE_LWT_IN), @@ -4930,43 +5098,94 @@ int libbpf_prog_type_by_name(const char *name, enum bpf_prog_type *prog_type, } #define BTF_PREFIX "btf_trace_" -static int libbpf_attach_btf_id_by_name(const char *name, __u32 *btf_id) +int libbpf_find_vmlinux_btf_id(const char *name, + enum bpf_attach_type attach_type) { struct btf *btf = bpf_core_find_kernel_btf(); - char raw_tp_btf_name[128] = BTF_PREFIX; - char *dst = raw_tp_btf_name + sizeof(BTF_PREFIX) - 1; - int ret, i, err = -EINVAL; + char raw_tp_btf[128] = BTF_PREFIX; + char *dst = raw_tp_btf + sizeof(BTF_PREFIX) - 1; + const char *btf_name; + int err = -EINVAL; + u32 kind; if (IS_ERR(btf)) { pr_warn("vmlinux BTF is not found\n"); return -EINVAL; } - if (!name) + if (attach_type == BPF_TRACE_RAW_TP) { + /* prepend "btf_trace_" prefix per kernel convention */ + strncat(dst, name, sizeof(raw_tp_btf) - sizeof(BTF_PREFIX)); + btf_name = raw_tp_btf; + kind = BTF_KIND_TYPEDEF; + } else { + btf_name = name; + kind = BTF_KIND_FUNC; + } + err = btf__find_by_name_kind(btf, btf_name, kind); + btf__free(btf); + return err; +} + +static int libbpf_find_prog_btf_id(const char *name, __u32 attach_prog_fd) +{ + struct bpf_prog_info_linear *info_linear; + struct bpf_prog_info *info; + struct btf *btf = NULL; + int err = -EINVAL; + + info_linear = bpf_program__get_prog_info_linear(attach_prog_fd, 0); + if (IS_ERR_OR_NULL(info_linear)) { + pr_warn("failed get_prog_info_linear for FD %d\n", + attach_prog_fd); + return -EINVAL; + } + info = &info_linear->info; + if (!info->btf_id) { + pr_warn("The target program doesn't have BTF\n"); goto out; + } + if (btf__get_from_id(info->btf_id, &btf)) { + pr_warn("Failed to get BTF of the program\n"); + goto out; + } + err = btf__find_by_name_kind(btf, name, BTF_KIND_FUNC); + btf__free(btf); + if (err <= 0) { + pr_warn("%s is not found in prog's BTF\n", name); + goto out; + } +out: + free(info_linear); + return err; +} + +static int libbpf_find_attach_btf_id(const char *name, + enum bpf_attach_type attach_type, + __u32 attach_prog_fd) +{ + int i, err; + + if (!name) + return -EINVAL; for (i = 0; i < ARRAY_SIZE(section_names); i++) { if (!section_names[i].is_attach_btf) continue; if (strncmp(name, section_names[i].sec, section_names[i].len)) continue; - /* prepend "btf_trace_" prefix per kernel convention */ - strncat(dst, name + section_names[i].len, - sizeof(raw_tp_btf_name) - sizeof(BTF_PREFIX)); - ret = btf__find_by_name(btf, raw_tp_btf_name); - if (ret <= 0) { - pr_warn("%s is not found in vmlinux BTF\n", dst); - goto out; - } - *btf_id = ret; - err = 0; - goto out; + if (attach_prog_fd) + err = libbpf_find_prog_btf_id(name + section_names[i].len, + attach_prog_fd); + else + err = libbpf_find_vmlinux_btf_id(name + section_names[i].len, + attach_type); + if (err <= 0) + pr_warn("%s is not found in vmlinux BTF\n", name); + return err; } pr_warn("failed to identify btf_id based on ELF section name '%s'\n", name); - err = -ESRCH; -out: - btf__free(btf); - return err; + return -ESRCH; } int libbpf_attach_type_by_name(const char *name, @@ -5594,6 +5813,37 @@ struct bpf_link *bpf_program__attach_raw_tracepoint(struct bpf_program *prog, return (struct bpf_link *)link; } +struct bpf_link *bpf_program__attach_trace(struct bpf_program *prog) +{ + char errmsg[STRERR_BUFSIZE]; + struct bpf_link_fd *link; + int prog_fd, pfd; + + prog_fd = bpf_program__fd(prog); + if (prog_fd < 0) { + pr_warn("program '%s': can't attach before loaded\n", + bpf_program__title(prog, false)); + return ERR_PTR(-EINVAL); + } + + link = malloc(sizeof(*link)); + if (!link) + return ERR_PTR(-ENOMEM); + link->link.destroy = &bpf_link__destroy_fd; + + pfd = bpf_raw_tracepoint_open(NULL, prog_fd); + if (pfd < 0) { + pfd = -errno; + free(link); + pr_warn("program '%s': failed to attach to trace: %s\n", + bpf_program__title(prog, false), + libbpf_strerror_r(pfd, errmsg, sizeof(errmsg))); + return ERR_PTR(pfd); + } + link->fd = pfd; + return (struct bpf_link *)link; +} + enum bpf_perf_event_ret bpf_perf_event_read_simple(void *mmap_mem, size_t mmap_size, size_t page_size, void **copy_mem, size_t *copy_size, diff --git a/tools/lib/bpf/libbpf.h b/tools/lib/bpf/libbpf.h index 6ddc0419337b..0dbf4bfba0c4 100644 --- a/tools/lib/bpf/libbpf.h +++ b/tools/lib/bpf/libbpf.h @@ -108,8 +108,9 @@ struct bpf_object_open_opts { * auto-pinned to that path on load; defaults to "/sys/fs/bpf". */ const char *pin_root_path; + __u32 attach_prog_fd; }; -#define bpf_object_open_opts__last_field pin_root_path +#define bpf_object_open_opts__last_field attach_prog_fd LIBBPF_API struct bpf_object *bpf_object__open(const char *path); LIBBPF_API struct bpf_object * @@ -188,6 +189,8 @@ libbpf_prog_type_by_name(const char *name, enum bpf_prog_type *prog_type, enum bpf_attach_type *expected_attach_type); LIBBPF_API int libbpf_attach_type_by_name(const char *name, enum bpf_attach_type *attach_type); +LIBBPF_API int libbpf_find_vmlinux_btf_id(const char *name, + enum bpf_attach_type attach_type); /* Accessors of bpf_program */ struct bpf_program; @@ -214,6 +217,9 @@ LIBBPF_API void bpf_program__set_ifindex(struct bpf_program *prog, LIBBPF_API const char *bpf_program__title(const struct bpf_program *prog, bool needs_copy); +/* returns program size in bytes */ +LIBBPF_API size_t bpf_program__size(const struct bpf_program *prog); + LIBBPF_API int bpf_program__load(struct bpf_program *prog, char *license, __u32 kern_version); LIBBPF_API int bpf_program__fd(const struct bpf_program *prog); @@ -248,6 +254,8 @@ LIBBPF_API struct bpf_link * bpf_program__attach_raw_tracepoint(struct bpf_program *prog, const char *tp_name); +LIBBPF_API struct bpf_link * +bpf_program__attach_trace(struct bpf_program *prog); struct bpf_insn; /* @@ -427,8 +435,18 @@ LIBBPF_API int bpf_prog_load_xattr(const struct bpf_prog_load_attr *attr, LIBBPF_API int bpf_prog_load(const char *file, enum bpf_prog_type type, struct bpf_object **pobj, int *prog_fd); +struct xdp_link_info { + __u32 prog_id; + __u32 drv_prog_id; + __u32 hw_prog_id; + __u32 skb_prog_id; + __u8 attach_mode; +}; + LIBBPF_API int bpf_set_link_xdp_fd(int ifindex, int fd, __u32 flags); LIBBPF_API int bpf_get_link_xdp_id(int ifindex, __u32 *prog_id, __u32 flags); +LIBBPF_API int bpf_get_link_xdp_info(int ifindex, struct xdp_link_info *info, + size_t info_size, __u32 flags); struct perf_buffer; diff --git a/tools/lib/bpf/libbpf.map b/tools/lib/bpf/libbpf.map index 86173cbb159d..8ddc2c40e482 100644 --- a/tools/lib/bpf/libbpf.map +++ b/tools/lib/bpf/libbpf.map @@ -193,13 +193,18 @@ LIBBPF_0.0.5 { LIBBPF_0.0.6 { global: + bpf_get_link_xdp_info; bpf_map__get_pin_path; bpf_map__is_pinned; bpf_map__set_pin_path; bpf_object__open_file; bpf_object__open_mem; + bpf_program__attach_trace; bpf_program__get_expected_attach_type; bpf_program__get_type; bpf_program__is_tracing; bpf_program__set_tracing; + bpf_program__size; + btf__find_by_name_kind; + libbpf_find_vmlinux_btf_id; } LIBBPF_0.0.5; diff --git a/tools/lib/bpf/libbpf_internal.h b/tools/lib/bpf/libbpf_internal.h index bd6f48ea407b..97ac17a64a58 100644 --- a/tools/lib/bpf/libbpf_internal.h +++ b/tools/lib/bpf/libbpf_internal.h @@ -158,7 +158,11 @@ struct bpf_line_info_min { */ enum bpf_field_info_kind { BPF_FIELD_BYTE_OFFSET = 0, /* field byte offset */ + BPF_FIELD_BYTE_SIZE = 1, BPF_FIELD_EXISTS = 2, /* field existence in target kernel */ + BPF_FIELD_SIGNED = 3, + BPF_FIELD_LSHIFT_U64 = 4, + BPF_FIELD_RSHIFT_U64 = 5, }; /* The minimum bpf_field_reloc checked by the loader diff --git a/tools/lib/bpf/netlink.c b/tools/lib/bpf/netlink.c index ce3ec81b71c0..5065c1aa1061 100644 --- a/tools/lib/bpf/netlink.c +++ b/tools/lib/bpf/netlink.c @@ -12,6 +12,7 @@ #include "bpf.h" #include "libbpf.h" +#include "libbpf_internal.h" #include "nlattr.h" #ifndef SOL_NETLINK @@ -24,7 +25,7 @@ typedef int (*__dump_nlmsg_t)(struct nlmsghdr *nlmsg, libbpf_dump_nlmsg_t, struct xdp_id_md { int ifindex; __u32 flags; - __u32 id; + struct xdp_link_info info; }; int libbpf_netlink_open(__u32 *nl_pid) @@ -43,7 +44,7 @@ int libbpf_netlink_open(__u32 *nl_pid) if (setsockopt(sock, SOL_NETLINK, NETLINK_EXT_ACK, &one, sizeof(one)) < 0) { - fprintf(stderr, "Netlink error reporting not supported\n"); + pr_warn("Netlink error reporting not supported\n"); } if (bind(sock, (struct sockaddr *)&sa, sizeof(sa)) < 0) { @@ -202,26 +203,11 @@ static int __dump_link_nlmsg(struct nlmsghdr *nlh, return dump_link_nlmsg(cookie, ifi, tb); } -static unsigned char get_xdp_id_attr(unsigned char mode, __u32 flags) -{ - if (mode != XDP_ATTACHED_MULTI) - return IFLA_XDP_PROG_ID; - if (flags & XDP_FLAGS_DRV_MODE) - return IFLA_XDP_DRV_PROG_ID; - if (flags & XDP_FLAGS_HW_MODE) - return IFLA_XDP_HW_PROG_ID; - if (flags & XDP_FLAGS_SKB_MODE) - return IFLA_XDP_SKB_PROG_ID; - - return IFLA_XDP_UNSPEC; -} - -static int get_xdp_id(void *cookie, void *msg, struct nlattr **tb) +static int get_xdp_info(void *cookie, void *msg, struct nlattr **tb) { struct nlattr *xdp_tb[IFLA_XDP_MAX + 1]; struct xdp_id_md *xdp_id = cookie; struct ifinfomsg *ifinfo = msg; - unsigned char mode, xdp_attr; int ret; if (xdp_id->ifindex && xdp_id->ifindex != ifinfo->ifi_index) @@ -237,27 +223,40 @@ static int get_xdp_id(void *cookie, void *msg, struct nlattr **tb) if (!xdp_tb[IFLA_XDP_ATTACHED]) return 0; - mode = libbpf_nla_getattr_u8(xdp_tb[IFLA_XDP_ATTACHED]); - if (mode == XDP_ATTACHED_NONE) - return 0; + xdp_id->info.attach_mode = libbpf_nla_getattr_u8( + xdp_tb[IFLA_XDP_ATTACHED]); - xdp_attr = get_xdp_id_attr(mode, xdp_id->flags); - if (!xdp_attr || !xdp_tb[xdp_attr]) + if (xdp_id->info.attach_mode == XDP_ATTACHED_NONE) return 0; - xdp_id->id = libbpf_nla_getattr_u32(xdp_tb[xdp_attr]); + if (xdp_tb[IFLA_XDP_PROG_ID]) + xdp_id->info.prog_id = libbpf_nla_getattr_u32( + xdp_tb[IFLA_XDP_PROG_ID]); + + if (xdp_tb[IFLA_XDP_SKB_PROG_ID]) + xdp_id->info.skb_prog_id = libbpf_nla_getattr_u32( + xdp_tb[IFLA_XDP_SKB_PROG_ID]); + + if (xdp_tb[IFLA_XDP_DRV_PROG_ID]) + xdp_id->info.drv_prog_id = libbpf_nla_getattr_u32( + xdp_tb[IFLA_XDP_DRV_PROG_ID]); + + if (xdp_tb[IFLA_XDP_HW_PROG_ID]) + xdp_id->info.hw_prog_id = libbpf_nla_getattr_u32( + xdp_tb[IFLA_XDP_HW_PROG_ID]); return 0; } -int bpf_get_link_xdp_id(int ifindex, __u32 *prog_id, __u32 flags) +int bpf_get_link_xdp_info(int ifindex, struct xdp_link_info *info, + size_t info_size, __u32 flags) { struct xdp_id_md xdp_id = {}; int sock, ret; __u32 nl_pid; __u32 mask; - if (flags & ~XDP_FLAGS_MASK) + if (flags & ~XDP_FLAGS_MASK || !info_size) return -EINVAL; /* Check whether the single {HW,DRV,SKB} mode is set */ @@ -273,14 +272,44 @@ int bpf_get_link_xdp_id(int ifindex, __u32 *prog_id, __u32 flags) xdp_id.ifindex = ifindex; xdp_id.flags = flags; - ret = libbpf_nl_get_link(sock, nl_pid, get_xdp_id, &xdp_id); - if (!ret) - *prog_id = xdp_id.id; + ret = libbpf_nl_get_link(sock, nl_pid, get_xdp_info, &xdp_id); + if (!ret) { + size_t sz = min(info_size, sizeof(xdp_id.info)); + + memcpy(info, &xdp_id.info, sz); + memset((void *) info + sz, 0, info_size - sz); + } close(sock); return ret; } +static __u32 get_xdp_id(struct xdp_link_info *info, __u32 flags) +{ + if (info->attach_mode != XDP_ATTACHED_MULTI) + return info->prog_id; + if (flags & XDP_FLAGS_DRV_MODE) + return info->drv_prog_id; + if (flags & XDP_FLAGS_HW_MODE) + return info->hw_prog_id; + if (flags & XDP_FLAGS_SKB_MODE) + return info->skb_prog_id; + + return 0; +} + +int bpf_get_link_xdp_id(int ifindex, __u32 *prog_id, __u32 flags) +{ + struct xdp_link_info info; + int ret; + + ret = bpf_get_link_xdp_info(ifindex, &info, sizeof(info), flags); + if (!ret) + *prog_id = get_xdp_id(&info, flags); + + return ret; +} + int libbpf_nl_get_link(int sock, unsigned int nl_pid, libbpf_dump_nlmsg_t dump_link_nlmsg, void *cookie) { diff --git a/tools/lib/bpf/nlattr.c b/tools/lib/bpf/nlattr.c index 1e69c0c8d413..8db44bbfc66d 100644 --- a/tools/lib/bpf/nlattr.c +++ b/tools/lib/bpf/nlattr.c @@ -8,6 +8,7 @@ #include <errno.h> #include "nlattr.h" +#include "libbpf_internal.h" #include <linux/rtnetlink.h> #include <string.h> #include <stdio.h> @@ -121,8 +122,8 @@ int libbpf_nla_parse(struct nlattr *tb[], int maxtype, struct nlattr *head, } if (tb[type]) - fprintf(stderr, "Attribute of type %#x found multiple times in message, " - "previous attribute is being ignored.\n", type); + pr_warn("Attribute of type %#x found multiple times in message, " + "previous attribute is being ignored.\n", type); tb[type] = nla; } @@ -181,15 +182,14 @@ int libbpf_nla_dump_errormsg(struct nlmsghdr *nlh) if (libbpf_nla_parse(tb, NLMSGERR_ATTR_MAX, attr, alen, extack_policy) != 0) { - fprintf(stderr, - "Failed to parse extended error attributes\n"); + pr_warn("Failed to parse extended error attributes\n"); return 0; } if (tb[NLMSGERR_ATTR_MSG]) errmsg = (char *) libbpf_nla_data(tb[NLMSGERR_ATTR_MSG]); - fprintf(stderr, "Kernel error message: %s\n", errmsg); + pr_warn("Kernel error message: %s\n", errmsg); return 0; } diff --git a/tools/lib/bpf/xsk.c b/tools/lib/bpf/xsk.c index 74d84f36a5b2..8e0ffa800a71 100644 --- a/tools/lib/bpf/xsk.c +++ b/tools/lib/bpf/xsk.c @@ -431,13 +431,18 @@ static int xsk_get_max_queues(struct xsk_socket *xsk) goto out; } - if (err || channels.max_combined == 0) + if (err) { /* If the device says it has no channels, then all traffic * is sent to a single stream, so max queues = 1. */ ret = 1; - else - ret = channels.max_combined; + } else { + /* Take the max of rx, tx, combined. Drivers return + * the number of channels in different ways. + */ + ret = max(channels.max_rx, channels.max_tx); + ret = max(ret, (int)channels.max_combined); + } out: close(fd); @@ -553,6 +558,8 @@ static int xsk_setup_xdp_prog(struct xsk_socket *xsk) } } else { xsk->prog_fd = bpf_prog_get_fd_by_id(prog_id); + if (xsk->prog_fd < 0) + return -errno; err = xsk_lookup_bpf_maps(xsk); if (err) { close(xsk->prog_fd); @@ -560,7 +567,8 @@ static int xsk_setup_xdp_prog(struct xsk_socket *xsk) } } - err = xsk_set_bpf_maps(xsk); + if (xsk->rx) + err = xsk_set_bpf_maps(xsk); if (err) { xsk_delete_bpf_maps(xsk); close(xsk->prog_fd); @@ -581,18 +589,24 @@ int xsk_socket__create(struct xsk_socket **xsk_ptr, const char *ifname, struct xsk_socket *xsk; int err; - if (!umem || !xsk_ptr || !rx || !tx) + if (!umem || !xsk_ptr || !(rx || tx)) return -EFAULT; - if (umem->refcount) { - pr_warn("Error: shared umems not supported by libbpf.\n"); - return -EBUSY; - } - xsk = calloc(1, sizeof(*xsk)); if (!xsk) return -ENOMEM; + err = xsk_set_xdp_socket_config(&xsk->config, usr_config); + if (err) + goto out_xsk_alloc; + + if (umem->refcount && + !(xsk->config.libbpf_flags & XSK_LIBBPF_FLAGS__INHIBIT_PROG_LOAD)) { + pr_warn("Error: shared umems not supported by libbpf supplied XDP program.\n"); + err = -EBUSY; + goto out_xsk_alloc; + } + if (umem->refcount++ > 0) { xsk->fd = socket(AF_XDP, SOCK_RAW, 0); if (xsk->fd < 0) { @@ -614,10 +628,6 @@ int xsk_socket__create(struct xsk_socket **xsk_ptr, const char *ifname, memcpy(xsk->ifname, ifname, IFNAMSIZ - 1); xsk->ifname[IFNAMSIZ - 1] = '\0'; - err = xsk_set_xdp_socket_config(&xsk->config, usr_config); - if (err) - goto out_socket; - if (rx) { err = setsockopt(xsk->fd, SOL_XDP, XDP_RX_RING, &xsk->config.rx_size, @@ -685,7 +695,12 @@ int xsk_socket__create(struct xsk_socket **xsk_ptr, const char *ifname, sxdp.sxdp_family = PF_XDP; sxdp.sxdp_ifindex = xsk->ifindex; sxdp.sxdp_queue_id = xsk->queue_id; - sxdp.sxdp_flags = xsk->config.bind_flags; + if (umem->refcount > 1) { + sxdp.sxdp_flags = XDP_SHARED_UMEM; + sxdp.sxdp_shared_umem_fd = umem->fd; + } else { + sxdp.sxdp_flags = xsk->config.bind_flags; + } err = bind(xsk->fd, (struct sockaddr *)&sxdp, sizeof(sxdp)); if (err) { diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile index b334a6db15c1..4fe4aec0367c 100644 --- a/tools/testing/selftests/bpf/Makefile +++ b/tools/testing/selftests/bpf/Makefile @@ -30,7 +30,7 @@ TEST_GEN_PROGS = test_verifier test_tag test_maps test_lru_map test_lpm_map test test_sock test_btf test_sockmap get_cgroup_id_user test_socket_cookie \ test_cgroup_storage test_select_reuseport \ test_netcnt test_tcpnotify_user test_sock_fields test_sysctl test_hashmap \ - test_cgroup_attach xdping test_progs-no_alu32 + test_cgroup_attach test_progs-no_alu32 # Also test bpf-gcc, if present ifneq ($(BPF_GCC),) @@ -38,7 +38,8 @@ TEST_GEN_PROGS += test_progs-bpf_gcc endif TEST_GEN_FILES = -TEST_FILES = +TEST_FILES = test_lwt_ip_encap.o \ + test_tc_edt.o # Order correspond to 'make run_tests' order TEST_PROGS := test_kmod.sh \ @@ -70,7 +71,7 @@ TEST_PROGS_EXTENDED := with_addr.sh \ # Compile but not part of 'make run_tests' TEST_GEN_PROGS_EXTENDED = test_sock_addr test_skb_cgroup_id_user \ flow_dissector_load test_flow_dissector test_tcp_check_syncookie_user \ - test_lirc_mode2_user + test_lirc_mode2_user xdping TEST_CUSTOM_PROGS = urandom_read @@ -162,6 +163,12 @@ define CLANG_BPF_BUILD_RULE -c $1 -o - || echo "BPF obj compilation failed") | \ $(LLC) -march=bpf -mcpu=probe $4 -filetype=obj -o $2 endef +# Similar to CLANG_BPF_BUILD_RULE, but with disabled alu32 +define CLANG_NOALU32_BPF_BUILD_RULE + ($(CLANG) $3 -O2 -target bpf -emit-llvm \ + -c $1 -o - || echo "BPF obj compilation failed") | \ + $(LLC) -march=bpf -mcpu=v2 $4 -filetype=obj -o $2 +endef # Similar to CLANG_BPF_BUILD_RULE, but using native Clang and bpf LLC define CLANG_NATIVE_BPF_BUILD_RULE ($(CLANG) $3 -O2 -emit-llvm \ @@ -274,6 +281,7 @@ TRUNNER_BPF_LDFLAGS := -mattr=+alu32 $(eval $(call DEFINE_TEST_RUNNER,test_progs)) # Define test_progs-no_alu32 test runner. +TRUNNER_BPF_BUILD_RULE := CLANG_NOALU32_BPF_BUILD_RULE TRUNNER_BPF_LDFLAGS := $(eval $(call DEFINE_TEST_RUNNER,test_progs,no_alu32)) diff --git a/tools/testing/selftests/bpf/prog_tests/core_reloc.c b/tools/testing/selftests/bpf/prog_tests/core_reloc.c index 09dfa75fe948..ec9e2fdd6b89 100644 --- a/tools/testing/selftests/bpf/prog_tests/core_reloc.c +++ b/tools/testing/selftests/bpf/prog_tests/core_reloc.c @@ -1,6 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 #include <test_progs.h> #include "progs/core_reloc_types.h" +#include <sys/mman.h> #define STRUCT_TO_CHAR_PTR(struct_name) (const char *)&(struct struct_name) @@ -174,21 +175,82 @@ .fails = true, \ } -#define EXISTENCE_DATA(struct_name) STRUCT_TO_CHAR_PTR(struct_name) { \ - .a = 42, \ -} - #define EXISTENCE_CASE_COMMON(name) \ .case_name = #name, \ .bpf_obj_file = "test_core_reloc_existence.o", \ .btf_src_file = "btf__core_reloc_" #name ".o", \ - .relaxed_core_relocs = true \ + .relaxed_core_relocs = true #define EXISTENCE_ERR_CASE(name) { \ EXISTENCE_CASE_COMMON(name), \ .fails = true, \ } +#define BITFIELDS_CASE_COMMON(objfile, test_name_prefix, name) \ + .case_name = test_name_prefix#name, \ + .bpf_obj_file = objfile, \ + .btf_src_file = "btf__core_reloc_" #name ".o" + +#define BITFIELDS_CASE(name, ...) { \ + BITFIELDS_CASE_COMMON("test_core_reloc_bitfields_probed.o", \ + "direct:", name), \ + .input = STRUCT_TO_CHAR_PTR(core_reloc_##name) __VA_ARGS__, \ + .input_len = sizeof(struct core_reloc_##name), \ + .output = STRUCT_TO_CHAR_PTR(core_reloc_bitfields_output) \ + __VA_ARGS__, \ + .output_len = sizeof(struct core_reloc_bitfields_output), \ +}, { \ + BITFIELDS_CASE_COMMON("test_core_reloc_bitfields_direct.o", \ + "probed:", name), \ + .input = STRUCT_TO_CHAR_PTR(core_reloc_##name) __VA_ARGS__, \ + .input_len = sizeof(struct core_reloc_##name), \ + .output = STRUCT_TO_CHAR_PTR(core_reloc_bitfields_output) \ + __VA_ARGS__, \ + .output_len = sizeof(struct core_reloc_bitfields_output), \ + .direct_raw_tp = true, \ +} + + +#define BITFIELDS_ERR_CASE(name) { \ + BITFIELDS_CASE_COMMON("test_core_reloc_bitfields_probed.o", \ + "probed:", name), \ + .fails = true, \ +}, { \ + BITFIELDS_CASE_COMMON("test_core_reloc_bitfields_direct.o", \ + "direct:", name), \ + .direct_raw_tp = true, \ + .fails = true, \ +} + +#define SIZE_CASE_COMMON(name) \ + .case_name = #name, \ + .bpf_obj_file = "test_core_reloc_size.o", \ + .btf_src_file = "btf__core_reloc_" #name ".o", \ + .relaxed_core_relocs = true + +#define SIZE_OUTPUT_DATA(type) \ + STRUCT_TO_CHAR_PTR(core_reloc_size_output) { \ + .int_sz = sizeof(((type *)0)->int_field), \ + .struct_sz = sizeof(((type *)0)->struct_field), \ + .union_sz = sizeof(((type *)0)->union_field), \ + .arr_sz = sizeof(((type *)0)->arr_field), \ + .arr_elem_sz = sizeof(((type *)0)->arr_field[0]), \ + .ptr_sz = sizeof(((type *)0)->ptr_field), \ + .enum_sz = sizeof(((type *)0)->enum_field), \ + } + +#define SIZE_CASE(name) { \ + SIZE_CASE_COMMON(name), \ + .input_len = 0, \ + .output = SIZE_OUTPUT_DATA(struct core_reloc_##name), \ + .output_len = sizeof(struct core_reloc_size_output), \ +} + +#define SIZE_ERR_CASE(name) { \ + SIZE_CASE_COMMON(name), \ + .fails = true, \ +} + struct core_reloc_test_case { const char *case_name; const char *bpf_obj_file; @@ -199,6 +261,7 @@ struct core_reloc_test_case { int output_len; bool fails; bool relaxed_core_relocs; + bool direct_raw_tp; }; static struct core_reloc_test_case test_cases[] = { @@ -275,12 +338,6 @@ static struct core_reloc_test_case test_cases[] = { INTS_CASE(ints___bool), INTS_CASE(ints___reverse_sign), - INTS_ERR_CASE(ints___err_bitfield), - INTS_ERR_CASE(ints___err_wrong_sz_8), - INTS_ERR_CASE(ints___err_wrong_sz_16), - INTS_ERR_CASE(ints___err_wrong_sz_32), - INTS_ERR_CASE(ints___err_wrong_sz_64), - /* validate edge cases of capturing relocations */ { .case_name = "misc", @@ -352,6 +409,44 @@ static struct core_reloc_test_case test_cases[] = { EXISTENCE_ERR_CASE(existence__err_arr_kind), EXISTENCE_ERR_CASE(existence__err_arr_value_type), EXISTENCE_ERR_CASE(existence__err_struct_type), + + /* bitfield relocation checks */ + BITFIELDS_CASE(bitfields, { + .ub1 = 1, + .ub2 = 2, + .ub7 = 96, + .sb4 = -7, + .sb20 = -0x76543, + .u32 = 0x80000000, + .s32 = -0x76543210, + }), + BITFIELDS_CASE(bitfields___bit_sz_change, { + .ub1 = 6, + .ub2 = 0xABCDE, + .ub7 = 1, + .sb4 = -1, + .sb20 = -0x17654321, + .u32 = 0xBEEF, + .s32 = -0x3FEDCBA987654321, + }), + BITFIELDS_CASE(bitfields___bitfield_vs_int, { + .ub1 = 0xFEDCBA9876543210, + .ub2 = 0xA6, + .ub7 = -0x7EDCBA987654321, + .sb4 = -0x6123456789ABCDE, + .sb20 = 0xD00D, + .u32 = -0x76543, + .s32 = 0x0ADEADBEEFBADB0B, + }), + BITFIELDS_CASE(bitfields___just_big_enough, { + .ub1 = 0xF, + .ub2 = 0x0812345678FEDCBA, + }), + BITFIELDS_ERR_CASE(bitfields___err_too_big_bitfield), + + /* size relocation checks */ + SIZE_CASE(size), + SIZE_CASE(size___diff_sz), }; struct data { @@ -359,18 +454,25 @@ struct data { char out[256]; }; +static size_t roundup_page(size_t sz) +{ + long page_size = sysconf(_SC_PAGE_SIZE); + return (sz + page_size - 1) / page_size * page_size; +} + void test_core_reloc(void) { - const char *probe_name = "raw_tracepoint/sys_enter"; + const size_t mmap_sz = roundup_page(sizeof(struct data)); struct bpf_object_load_attr load_attr = {}; struct core_reloc_test_case *test_case; + const char *tp_name, *probe_name; int err, duration = 0, i, equal; struct bpf_link *link = NULL; struct bpf_map *data_map; struct bpf_program *prog; struct bpf_object *obj; - const int zero = 0; - struct data data; + struct data *data; + void *mmap_data = NULL; for (i = 0; i < ARRAY_SIZE(test_cases); i++) { test_case = &test_cases[i]; @@ -382,11 +484,19 @@ void test_core_reloc(void) ); obj = bpf_object__open_file(test_case->bpf_obj_file, &opts); - if (CHECK(IS_ERR_OR_NULL(obj), "obj_open", - "failed to open '%s': %ld\n", + if (CHECK(IS_ERR(obj), "obj_open", "failed to open '%s': %ld\n", test_case->bpf_obj_file, PTR_ERR(obj))) continue; + /* for typed raw tracepoints, NULL should be specified */ + if (test_case->direct_raw_tp) { + probe_name = "tp_btf/sys_enter"; + tp_name = NULL; + } else { + probe_name = "raw_tracepoint/sys_enter"; + tp_name = "sys_enter"; + } + prog = bpf_object__find_program_by_title(obj, probe_name); if (CHECK(!prog, "find_probe", "prog '%s' not found\n", probe_name)) @@ -407,7 +517,7 @@ void test_core_reloc(void) goto cleanup; } - link = bpf_program__attach_raw_tracepoint(prog, "sys_enter"); + link = bpf_program__attach_raw_tracepoint(prog, tp_name); if (CHECK(IS_ERR(link), "attach_raw_tp", "err %ld\n", PTR_ERR(link))) goto cleanup; @@ -416,24 +526,22 @@ void test_core_reloc(void) if (CHECK(!data_map, "find_data_map", "data map not found\n")) goto cleanup; - memset(&data, 0, sizeof(data)); - memcpy(data.in, test_case->input, test_case->input_len); - - err = bpf_map_update_elem(bpf_map__fd(data_map), - &zero, &data, 0); - if (CHECK(err, "update_data_map", - "failed to update .data map: %d\n", err)) + mmap_data = mmap(NULL, mmap_sz, PROT_READ | PROT_WRITE, + MAP_SHARED, bpf_map__fd(data_map), 0); + if (CHECK(mmap_data == MAP_FAILED, "mmap", + ".bss mmap failed: %d", errno)) { + mmap_data = NULL; goto cleanup; + } + data = mmap_data; + + memset(mmap_data, 0, sizeof(*data)); + memcpy(data->in, test_case->input, test_case->input_len); /* trigger test run */ usleep(1); - err = bpf_map_lookup_elem(bpf_map__fd(data_map), &zero, &data); - if (CHECK(err, "get_result", - "failed to get output data: %d\n", err)) - goto cleanup; - - equal = memcmp(data.out, test_case->output, + equal = memcmp(data->out, test_case->output, test_case->output_len) == 0; if (CHECK(!equal, "check_result", "input/output data don't match\n")) { @@ -445,12 +553,16 @@ void test_core_reloc(void) } for (j = 0; j < test_case->output_len; j++) { printf("output byte #%d: EXP 0x%02hhx GOT 0x%02hhx\n", - j, test_case->output[j], data.out[j]); + j, test_case->output[j], data->out[j]); } goto cleanup; } cleanup: + if (mmap_data) { + CHECK_FAIL(munmap(mmap_data, mmap_sz)); + mmap_data = NULL; + } if (!IS_ERR_OR_NULL(link)) { bpf_link__destroy(link); link = NULL; diff --git a/tools/testing/selftests/bpf/prog_tests/fentry_fexit.c b/tools/testing/selftests/bpf/prog_tests/fentry_fexit.c new file mode 100644 index 000000000000..40bcff2cc274 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/fentry_fexit.c @@ -0,0 +1,90 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2019 Facebook */ +#include <test_progs.h> + +void test_fentry_fexit(void) +{ + struct bpf_prog_load_attr attr_fentry = { + .file = "./fentry_test.o", + }; + struct bpf_prog_load_attr attr_fexit = { + .file = "./fexit_test.o", + }; + + struct bpf_object *obj_fentry = NULL, *obj_fexit = NULL, *pkt_obj; + struct bpf_map *data_map_fentry, *data_map_fexit; + char fentry_name[] = "fentry/bpf_fentry_testX"; + char fexit_name[] = "fexit/bpf_fentry_testX"; + int err, pkt_fd, kfree_skb_fd, i; + struct bpf_link *link[12] = {}; + struct bpf_program *prog[12]; + __u32 duration, retval; + const int zero = 0; + u64 result[12]; + + err = bpf_prog_load("./test_pkt_access.o", BPF_PROG_TYPE_SCHED_CLS, + &pkt_obj, &pkt_fd); + if (CHECK(err, "prog_load sched cls", "err %d errno %d\n", err, errno)) + return; + err = bpf_prog_load_xattr(&attr_fentry, &obj_fentry, &kfree_skb_fd); + if (CHECK(err, "prog_load fail", "err %d errno %d\n", err, errno)) + goto close_prog; + err = bpf_prog_load_xattr(&attr_fexit, &obj_fexit, &kfree_skb_fd); + if (CHECK(err, "prog_load fail", "err %d errno %d\n", err, errno)) + goto close_prog; + + for (i = 0; i < 6; i++) { + fentry_name[sizeof(fentry_name) - 2] = '1' + i; + prog[i] = bpf_object__find_program_by_title(obj_fentry, fentry_name); + if (CHECK(!prog[i], "find_prog", "prog %s not found\n", fentry_name)) + goto close_prog; + link[i] = bpf_program__attach_trace(prog[i]); + if (CHECK(IS_ERR(link[i]), "attach_trace", "failed to link\n")) + goto close_prog; + } + data_map_fentry = bpf_object__find_map_by_name(obj_fentry, "fentry_t.bss"); + if (CHECK(!data_map_fentry, "find_data_map", "data map not found\n")) + goto close_prog; + + for (i = 6; i < 12; i++) { + fexit_name[sizeof(fexit_name) - 2] = '1' + i - 6; + prog[i] = bpf_object__find_program_by_title(obj_fexit, fexit_name); + if (CHECK(!prog[i], "find_prog", "prog %s not found\n", fexit_name)) + goto close_prog; + link[i] = bpf_program__attach_trace(prog[i]); + if (CHECK(IS_ERR(link[i]), "attach_trace", "failed to link\n")) + goto close_prog; + } + data_map_fexit = bpf_object__find_map_by_name(obj_fexit, "fexit_te.bss"); + if (CHECK(!data_map_fexit, "find_data_map", "data map not found\n")) + goto close_prog; + + err = bpf_prog_test_run(pkt_fd, 1, &pkt_v6, sizeof(pkt_v6), + NULL, NULL, &retval, &duration); + CHECK(err || retval, "ipv6", + "err %d errno %d retval %d duration %d\n", + err, errno, retval, duration); + + err = bpf_map_lookup_elem(bpf_map__fd(data_map_fentry), &zero, &result); + if (CHECK(err, "get_result", + "failed to get output data: %d\n", err)) + goto close_prog; + + err = bpf_map_lookup_elem(bpf_map__fd(data_map_fexit), &zero, result + 6); + if (CHECK(err, "get_result", + "failed to get output data: %d\n", err)) + goto close_prog; + + for (i = 0; i < 12; i++) + if (CHECK(result[i] != 1, "result", "bpf_fentry_test%d failed err %ld\n", + i % 6 + 1, result[i])) + goto close_prog; + +close_prog: + for (i = 0; i < 12; i++) + if (!IS_ERR_OR_NULL(link[i])) + bpf_link__destroy(link[i]); + bpf_object__close(obj_fentry); + bpf_object__close(obj_fexit); + bpf_object__close(pkt_obj); +} diff --git a/tools/testing/selftests/bpf/prog_tests/fentry_test.c b/tools/testing/selftests/bpf/prog_tests/fentry_test.c new file mode 100644 index 000000000000..9fb103193878 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/fentry_test.c @@ -0,0 +1,64 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2019 Facebook */ +#include <test_progs.h> + +void test_fentry_test(void) +{ + struct bpf_prog_load_attr attr = { + .file = "./fentry_test.o", + }; + + char prog_name[] = "fentry/bpf_fentry_testX"; + struct bpf_object *obj = NULL, *pkt_obj; + int err, pkt_fd, kfree_skb_fd, i; + struct bpf_link *link[6] = {}; + struct bpf_program *prog[6]; + __u32 duration, retval; + struct bpf_map *data_map; + const int zero = 0; + u64 result[6]; + + err = bpf_prog_load("./test_pkt_access.o", BPF_PROG_TYPE_SCHED_CLS, + &pkt_obj, &pkt_fd); + if (CHECK(err, "prog_load sched cls", "err %d errno %d\n", err, errno)) + return; + err = bpf_prog_load_xattr(&attr, &obj, &kfree_skb_fd); + if (CHECK(err, "prog_load fail", "err %d errno %d\n", err, errno)) + goto close_prog; + + for (i = 0; i < 6; i++) { + prog_name[sizeof(prog_name) - 2] = '1' + i; + prog[i] = bpf_object__find_program_by_title(obj, prog_name); + if (CHECK(!prog[i], "find_prog", "prog %s not found\n", prog_name)) + goto close_prog; + link[i] = bpf_program__attach_trace(prog[i]); + if (CHECK(IS_ERR(link[i]), "attach_trace", "failed to link\n")) + goto close_prog; + } + data_map = bpf_object__find_map_by_name(obj, "fentry_t.bss"); + if (CHECK(!data_map, "find_data_map", "data map not found\n")) + goto close_prog; + + err = bpf_prog_test_run(pkt_fd, 1, &pkt_v6, sizeof(pkt_v6), + NULL, NULL, &retval, &duration); + CHECK(err || retval, "ipv6", + "err %d errno %d retval %d duration %d\n", + err, errno, retval, duration); + + err = bpf_map_lookup_elem(bpf_map__fd(data_map), &zero, &result); + if (CHECK(err, "get_result", + "failed to get output data: %d\n", err)) + goto close_prog; + + for (i = 0; i < 6; i++) + if (CHECK(result[i] != 1, "result", "bpf_fentry_test%d failed err %ld\n", + i + 1, result[i])) + goto close_prog; + +close_prog: + for (i = 0; i < 6; i++) + if (!IS_ERR_OR_NULL(link[i])) + bpf_link__destroy(link[i]); + bpf_object__close(obj); + bpf_object__close(pkt_obj); +} diff --git a/tools/testing/selftests/bpf/prog_tests/fexit_bpf2bpf.c b/tools/testing/selftests/bpf/prog_tests/fexit_bpf2bpf.c new file mode 100644 index 000000000000..15c7378362dd --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/fexit_bpf2bpf.c @@ -0,0 +1,76 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2019 Facebook */ +#include <test_progs.h> + +#define PROG_CNT 3 + +void test_fexit_bpf2bpf(void) +{ + const char *prog_name[PROG_CNT] = { + "fexit/test_pkt_access", + "fexit/test_pkt_access_subprog1", + "fexit/test_pkt_access_subprog2", + }; + struct bpf_object *obj = NULL, *pkt_obj; + int err, pkt_fd, i; + struct bpf_link *link[PROG_CNT] = {}; + struct bpf_program *prog[PROG_CNT]; + __u32 duration, retval; + struct bpf_map *data_map; + const int zero = 0; + u64 result[PROG_CNT]; + + err = bpf_prog_load("./test_pkt_access.o", BPF_PROG_TYPE_UNSPEC, + &pkt_obj, &pkt_fd); + if (CHECK(err, "prog_load sched cls", "err %d errno %d\n", err, errno)) + return; + DECLARE_LIBBPF_OPTS(bpf_object_open_opts, opts, + .attach_prog_fd = pkt_fd, + ); + + obj = bpf_object__open_file("./fexit_bpf2bpf.o", &opts); + if (CHECK(IS_ERR_OR_NULL(obj), "obj_open", + "failed to open fexit_bpf2bpf: %ld\n", + PTR_ERR(obj))) + goto close_prog; + + err = bpf_object__load(obj); + if (CHECK(err, "obj_load", "err %d\n", err)) + goto close_prog; + + for (i = 0; i < PROG_CNT; i++) { + prog[i] = bpf_object__find_program_by_title(obj, prog_name[i]); + if (CHECK(!prog[i], "find_prog", "prog %s not found\n", prog_name[i])) + goto close_prog; + link[i] = bpf_program__attach_trace(prog[i]); + if (CHECK(IS_ERR(link[i]), "attach_trace", "failed to link\n")) + goto close_prog; + } + data_map = bpf_object__find_map_by_name(obj, "fexit_bp.bss"); + if (CHECK(!data_map, "find_data_map", "data map not found\n")) + goto close_prog; + + err = bpf_prog_test_run(pkt_fd, 1, &pkt_v6, sizeof(pkt_v6), + NULL, NULL, &retval, &duration); + CHECK(err || retval, "ipv6", + "err %d errno %d retval %d duration %d\n", + err, errno, retval, duration); + + err = bpf_map_lookup_elem(bpf_map__fd(data_map), &zero, &result); + if (CHECK(err, "get_result", + "failed to get output data: %d\n", err)) + goto close_prog; + + for (i = 0; i < PROG_CNT; i++) + if (CHECK(result[i] != 1, "result", "fexit_bpf2bpf failed err %ld\n", + result[i])) + goto close_prog; + +close_prog: + for (i = 0; i < PROG_CNT; i++) + if (!IS_ERR_OR_NULL(link[i])) + bpf_link__destroy(link[i]); + if (!IS_ERR_OR_NULL(obj)) + bpf_object__close(obj); + bpf_object__close(pkt_obj); +} diff --git a/tools/testing/selftests/bpf/prog_tests/fexit_stress.c b/tools/testing/selftests/bpf/prog_tests/fexit_stress.c new file mode 100644 index 000000000000..3b9dbf7433f0 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/fexit_stress.c @@ -0,0 +1,76 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2019 Facebook */ +#include <test_progs.h> + +/* x86-64 fits 55 JITed and 43 interpreted progs into half page */ +#define CNT 40 + +void test_fexit_stress(void) +{ + char test_skb[128] = {}; + int fexit_fd[CNT] = {}; + int link_fd[CNT] = {}; + __u32 duration = 0; + char error[4096]; + __u32 prog_ret; + int err, i, filter_fd; + + const struct bpf_insn trace_program[] = { + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }; + + struct bpf_load_program_attr load_attr = { + .prog_type = BPF_PROG_TYPE_TRACING, + .license = "GPL", + .insns = trace_program, + .insns_cnt = sizeof(trace_program) / sizeof(struct bpf_insn), + .expected_attach_type = BPF_TRACE_FEXIT, + }; + + const struct bpf_insn skb_program[] = { + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }; + + struct bpf_load_program_attr skb_load_attr = { + .prog_type = BPF_PROG_TYPE_SOCKET_FILTER, + .license = "GPL", + .insns = skb_program, + .insns_cnt = sizeof(skb_program) / sizeof(struct bpf_insn), + }; + + err = libbpf_find_vmlinux_btf_id("bpf_fentry_test1", + load_attr.expected_attach_type); + if (CHECK(err <= 0, "find_vmlinux_btf_id", "failed: %d\n", err)) + goto out; + load_attr.attach_btf_id = err; + + for (i = 0; i < CNT; i++) { + fexit_fd[i] = bpf_load_program_xattr(&load_attr, error, sizeof(error)); + if (CHECK(fexit_fd[i] < 0, "fexit loaded", + "failed: %d errno %d\n", fexit_fd[i], errno)) + goto out; + link_fd[i] = bpf_raw_tracepoint_open(NULL, fexit_fd[i]); + if (CHECK(link_fd[i] < 0, "fexit attach failed", + "prog %d failed: %d err %d\n", i, link_fd[i], errno)) + goto out; + } + + filter_fd = bpf_load_program_xattr(&skb_load_attr, error, sizeof(error)); + if (CHECK(filter_fd < 0, "test_program_loaded", "failed: %d errno %d\n", + filter_fd, errno)) + goto out; + + err = bpf_prog_test_run(filter_fd, 1, test_skb, sizeof(test_skb), 0, + 0, &prog_ret, 0); + close(filter_fd); + CHECK_FAIL(err); +out: + for (i = 0; i < CNT; i++) { + if (link_fd[i]) + close(link_fd[i]); + if (fexit_fd[i]) + close(fexit_fd[i]); + } +} diff --git a/tools/testing/selftests/bpf/prog_tests/fexit_test.c b/tools/testing/selftests/bpf/prog_tests/fexit_test.c new file mode 100644 index 000000000000..f99013222c74 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/fexit_test.c @@ -0,0 +1,64 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2019 Facebook */ +#include <test_progs.h> + +void test_fexit_test(void) +{ + struct bpf_prog_load_attr attr = { + .file = "./fexit_test.o", + }; + + char prog_name[] = "fexit/bpf_fentry_testX"; + struct bpf_object *obj = NULL, *pkt_obj; + int err, pkt_fd, kfree_skb_fd, i; + struct bpf_link *link[6] = {}; + struct bpf_program *prog[6]; + __u32 duration, retval; + struct bpf_map *data_map; + const int zero = 0; + u64 result[6]; + + err = bpf_prog_load("./test_pkt_access.o", BPF_PROG_TYPE_SCHED_CLS, + &pkt_obj, &pkt_fd); + if (CHECK(err, "prog_load sched cls", "err %d errno %d\n", err, errno)) + return; + err = bpf_prog_load_xattr(&attr, &obj, &kfree_skb_fd); + if (CHECK(err, "prog_load fail", "err %d errno %d\n", err, errno)) + goto close_prog; + + for (i = 0; i < 6; i++) { + prog_name[sizeof(prog_name) - 2] = '1' + i; + prog[i] = bpf_object__find_program_by_title(obj, prog_name); + if (CHECK(!prog[i], "find_prog", "prog %s not found\n", prog_name)) + goto close_prog; + link[i] = bpf_program__attach_trace(prog[i]); + if (CHECK(IS_ERR(link[i]), "attach_trace", "failed to link\n")) + goto close_prog; + } + data_map = bpf_object__find_map_by_name(obj, "fexit_te.bss"); + if (CHECK(!data_map, "find_data_map", "data map not found\n")) + goto close_prog; + + err = bpf_prog_test_run(pkt_fd, 1, &pkt_v6, sizeof(pkt_v6), + NULL, NULL, &retval, &duration); + CHECK(err || retval, "ipv6", + "err %d errno %d retval %d duration %d\n", + err, errno, retval, duration); + + err = bpf_map_lookup_elem(bpf_map__fd(data_map), &zero, &result); + if (CHECK(err, "get_result", + "failed to get output data: %d\n", err)) + goto close_prog; + + for (i = 0; i < 6; i++) + if (CHECK(result[i] != 1, "result", "bpf_fentry_test%d failed err %ld\n", + i + 1, result[i])) + goto close_prog; + +close_prog: + for (i = 0; i < 6; i++) + if (!IS_ERR_OR_NULL(link[i])) + bpf_link__destroy(link[i]); + bpf_object__close(obj); + bpf_object__close(pkt_obj); +} diff --git a/tools/testing/selftests/bpf/prog_tests/kfree_skb.c b/tools/testing/selftests/bpf/prog_tests/kfree_skb.c index 430b50de1583..7507c8f689bc 100644 --- a/tools/testing/selftests/bpf/prog_tests/kfree_skb.c +++ b/tools/testing/selftests/bpf/prog_tests/kfree_skb.c @@ -1,15 +1,38 @@ // SPDX-License-Identifier: GPL-2.0 #include <test_progs.h> +struct meta { + int ifindex; + __u32 cb32_0; + __u8 cb8_0; +}; + +static union { + __u32 cb32[5]; + __u8 cb8[20]; +} cb = { + .cb32[0] = 0x81828384, +}; + static void on_sample(void *ctx, int cpu, void *data, __u32 size) { - int ifindex = *(int *)data, duration = 0; - struct ipv6_packet *pkt_v6 = data + 4; + struct meta *meta = (struct meta *)data; + struct ipv6_packet *pkt_v6 = data + sizeof(*meta); + int duration = 0; - if (ifindex != 1) + if (CHECK(size != 72 + sizeof(*meta), "check_size", "size %u != %zu\n", + size, 72 + sizeof(*meta))) + return; + if (CHECK(meta->ifindex != 1, "check_meta_ifindex", + "meta->ifindex = %d\n", meta->ifindex)) /* spurious kfree_skb not on loopback device */ return; - if (CHECK(size != 76, "check_size", "size %u != 76\n", size)) + if (CHECK(meta->cb8_0 != cb.cb8[0], "check_cb8_0", "cb8_0 %x != %x\n", + meta->cb8_0, cb.cb8[0])) + return; + if (CHECK(meta->cb32_0 != cb.cb32[0], "check_cb32_0", + "cb32_0 %x != %x\n", + meta->cb32_0, cb.cb32[0])) return; if (CHECK(pkt_v6->eth.h_proto != 0xdd86, "check_eth", "h_proto %x\n", pkt_v6->eth.h_proto)) @@ -26,21 +49,31 @@ static void on_sample(void *ctx, int cpu, void *data, __u32 size) void test_kfree_skb(void) { + struct __sk_buff skb = {}; + struct bpf_prog_test_run_attr tattr = { + .data_in = &pkt_v6, + .data_size_in = sizeof(pkt_v6), + .ctx_in = &skb, + .ctx_size_in = sizeof(skb), + }; struct bpf_prog_load_attr attr = { .file = "./kfree_skb.o", }; + struct bpf_link *link = NULL, *link_fentry = NULL, *link_fexit = NULL; + struct bpf_map *perf_buf_map, *global_data; + struct bpf_program *prog, *fentry, *fexit; struct bpf_object *obj, *obj2 = NULL; struct perf_buffer_opts pb_opts = {}; struct perf_buffer *pb = NULL; - struct bpf_link *link = NULL; - struct bpf_map *perf_buf_map; - struct bpf_program *prog; - __u32 duration, retval; - int err, pkt_fd, kfree_skb_fd; + int err, kfree_skb_fd; bool passed = false; + __u32 duration = 0; + const int zero = 0; + bool test_ok[2]; - err = bpf_prog_load("./test_pkt_access.o", BPF_PROG_TYPE_SCHED_CLS, &obj, &pkt_fd); + err = bpf_prog_load("./test_pkt_access.o", BPF_PROG_TYPE_SCHED_CLS, + &obj, &tattr.prog_fd); if (CHECK(err, "prog_load sched cls", "err %d errno %d\n", err, errno)) return; @@ -51,9 +84,28 @@ void test_kfree_skb(void) prog = bpf_object__find_program_by_title(obj2, "tp_btf/kfree_skb"); if (CHECK(!prog, "find_prog", "prog kfree_skb not found\n")) goto close_prog; + fentry = bpf_object__find_program_by_title(obj2, "fentry/eth_type_trans"); + if (CHECK(!fentry, "find_prog", "prog eth_type_trans not found\n")) + goto close_prog; + fexit = bpf_object__find_program_by_title(obj2, "fexit/eth_type_trans"); + if (CHECK(!fexit, "find_prog", "prog eth_type_trans not found\n")) + goto close_prog; + + global_data = bpf_object__find_map_by_name(obj2, "kfree_sk.bss"); + if (CHECK(!global_data, "find global data", "not found\n")) + goto close_prog; + link = bpf_program__attach_raw_tracepoint(prog, NULL); if (CHECK(IS_ERR(link), "attach_raw_tp", "err %ld\n", PTR_ERR(link))) goto close_prog; + link_fentry = bpf_program__attach_trace(fentry); + if (CHECK(IS_ERR(link_fentry), "attach fentry", "err %ld\n", + PTR_ERR(link_fentry))) + goto close_prog; + link_fexit = bpf_program__attach_trace(fexit); + if (CHECK(IS_ERR(link_fexit), "attach fexit", "err %ld\n", + PTR_ERR(link_fexit))) + goto close_prog; perf_buf_map = bpf_object__find_map_by_name(obj2, "perf_buf_map"); if (CHECK(!perf_buf_map, "find_perf_buf_map", "not found\n")) @@ -66,24 +118,37 @@ void test_kfree_skb(void) if (CHECK(IS_ERR(pb), "perf_buf__new", "err %ld\n", PTR_ERR(pb))) goto close_prog; - err = bpf_prog_test_run(pkt_fd, 1, &pkt_v6, sizeof(pkt_v6), - NULL, NULL, &retval, &duration); - CHECK(err || retval, "ipv6", + memcpy(skb.cb, &cb, sizeof(cb)); + err = bpf_prog_test_run_xattr(&tattr); + duration = tattr.duration; + CHECK(err || tattr.retval, "ipv6", "err %d errno %d retval %d duration %d\n", - err, errno, retval, duration); + err, errno, tattr.retval, duration); /* read perf buffer */ err = perf_buffer__poll(pb, 100); if (CHECK(err < 0, "perf_buffer__poll", "err %d\n", err)) goto close_prog; + /* make sure kfree_skb program was triggered * and it sent expected skb into ring buffer */ CHECK_FAIL(!passed); + + err = bpf_map_lookup_elem(bpf_map__fd(global_data), &zero, test_ok); + if (CHECK(err, "get_result", + "failed to get output data: %d\n", err)) + goto close_prog; + + CHECK_FAIL(!test_ok[0] || !test_ok[1]); close_prog: perf_buffer__free(pb); if (!IS_ERR_OR_NULL(link)) bpf_link__destroy(link); + if (!IS_ERR_OR_NULL(link_fentry)) + bpf_link__destroy(link_fentry); + if (!IS_ERR_OR_NULL(link_fexit)) + bpf_link__destroy(link_fexit); bpf_object__close(obj); bpf_object__close(obj2); } diff --git a/tools/testing/selftests/bpf/prog_tests/mmap.c b/tools/testing/selftests/bpf/prog_tests/mmap.c new file mode 100644 index 000000000000..051a6d48762c --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/mmap.c @@ -0,0 +1,220 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <test_progs.h> +#include <sys/mman.h> + +struct map_data { + __u64 val[512 * 4]; +}; + +struct bss_data { + __u64 in_val; + __u64 out_val; +}; + +static size_t roundup_page(size_t sz) +{ + long page_size = sysconf(_SC_PAGE_SIZE); + return (sz + page_size - 1) / page_size * page_size; +} + +void test_mmap(void) +{ + const char *file = "test_mmap.o"; + const char *probe_name = "raw_tracepoint/sys_enter"; + const char *tp_name = "sys_enter"; + const size_t bss_sz = roundup_page(sizeof(struct bss_data)); + const size_t map_sz = roundup_page(sizeof(struct map_data)); + const int zero = 0, one = 1, two = 2, far = 1500; + const long page_size = sysconf(_SC_PAGE_SIZE); + int err, duration = 0, i, data_map_fd; + struct bpf_program *prog; + struct bpf_object *obj; + struct bpf_link *link = NULL; + struct bpf_map *data_map, *bss_map; + void *bss_mmaped = NULL, *map_mmaped = NULL, *tmp1, *tmp2; + volatile struct bss_data *bss_data; + volatile struct map_data *map_data; + __u64 val = 0; + + obj = bpf_object__open_file("test_mmap.o", NULL); + if (CHECK(IS_ERR(obj), "obj_open", "failed to open '%s': %ld\n", + file, PTR_ERR(obj))) + return; + prog = bpf_object__find_program_by_title(obj, probe_name); + if (CHECK(!prog, "find_probe", "prog '%s' not found\n", probe_name)) + goto cleanup; + err = bpf_object__load(obj); + if (CHECK(err, "obj_load", "failed to load prog '%s': %d\n", + probe_name, err)) + goto cleanup; + + bss_map = bpf_object__find_map_by_name(obj, "test_mma.bss"); + if (CHECK(!bss_map, "find_bss_map", ".bss map not found\n")) + goto cleanup; + data_map = bpf_object__find_map_by_name(obj, "data_map"); + if (CHECK(!data_map, "find_data_map", "data_map map not found\n")) + goto cleanup; + data_map_fd = bpf_map__fd(data_map); + + bss_mmaped = mmap(NULL, bss_sz, PROT_READ | PROT_WRITE, MAP_SHARED, + bpf_map__fd(bss_map), 0); + if (CHECK(bss_mmaped == MAP_FAILED, "bss_mmap", + ".bss mmap failed: %d\n", errno)) { + bss_mmaped = NULL; + goto cleanup; + } + /* map as R/W first */ + map_mmaped = mmap(NULL, map_sz, PROT_READ | PROT_WRITE, MAP_SHARED, + data_map_fd, 0); + if (CHECK(map_mmaped == MAP_FAILED, "data_mmap", + "data_map mmap failed: %d\n", errno)) { + map_mmaped = NULL; + goto cleanup; + } + + bss_data = bss_mmaped; + map_data = map_mmaped; + + CHECK_FAIL(bss_data->in_val); + CHECK_FAIL(bss_data->out_val); + CHECK_FAIL(map_data->val[0]); + CHECK_FAIL(map_data->val[1]); + CHECK_FAIL(map_data->val[2]); + CHECK_FAIL(map_data->val[far]); + + link = bpf_program__attach_raw_tracepoint(prog, tp_name); + if (CHECK(IS_ERR(link), "attach_raw_tp", "err %ld\n", PTR_ERR(link))) + goto cleanup; + + bss_data->in_val = 123; + val = 111; + CHECK_FAIL(bpf_map_update_elem(data_map_fd, &zero, &val, 0)); + + usleep(1); + + CHECK_FAIL(bss_data->in_val != 123); + CHECK_FAIL(bss_data->out_val != 123); + CHECK_FAIL(map_data->val[0] != 111); + CHECK_FAIL(map_data->val[1] != 222); + CHECK_FAIL(map_data->val[2] != 123); + CHECK_FAIL(map_data->val[far] != 3 * 123); + + CHECK_FAIL(bpf_map_lookup_elem(data_map_fd, &zero, &val)); + CHECK_FAIL(val != 111); + CHECK_FAIL(bpf_map_lookup_elem(data_map_fd, &one, &val)); + CHECK_FAIL(val != 222); + CHECK_FAIL(bpf_map_lookup_elem(data_map_fd, &two, &val)); + CHECK_FAIL(val != 123); + CHECK_FAIL(bpf_map_lookup_elem(data_map_fd, &far, &val)); + CHECK_FAIL(val != 3 * 123); + + /* data_map freeze should fail due to R/W mmap() */ + err = bpf_map_freeze(data_map_fd); + if (CHECK(!err || errno != EBUSY, "no_freeze", + "data_map freeze succeeded: err=%d, errno=%d\n", err, errno)) + goto cleanup; + + /* unmap R/W mapping */ + err = munmap(map_mmaped, map_sz); + map_mmaped = NULL; + if (CHECK(err, "data_map_munmap", "data_map munmap failed: %d\n", errno)) + goto cleanup; + + /* re-map as R/O now */ + map_mmaped = mmap(NULL, map_sz, PROT_READ, MAP_SHARED, data_map_fd, 0); + if (CHECK(map_mmaped == MAP_FAILED, "data_mmap", + "data_map R/O mmap failed: %d\n", errno)) { + map_mmaped = NULL; + goto cleanup; + } + map_data = map_mmaped; + + /* map/unmap in a loop to test ref counting */ + for (i = 0; i < 10; i++) { + int flags = i % 2 ? PROT_READ : PROT_WRITE; + void *p; + + p = mmap(NULL, map_sz, flags, MAP_SHARED, data_map_fd, 0); + if (CHECK_FAIL(p == MAP_FAILED)) + goto cleanup; + err = munmap(p, map_sz); + if (CHECK_FAIL(err)) + goto cleanup; + } + + /* data_map freeze should now succeed due to no R/W mapping */ + err = bpf_map_freeze(data_map_fd); + if (CHECK(err, "freeze", "data_map freeze failed: err=%d, errno=%d\n", + err, errno)) + goto cleanup; + + /* mapping as R/W now should fail */ + tmp1 = mmap(NULL, map_sz, PROT_READ | PROT_WRITE, MAP_SHARED, + data_map_fd, 0); + if (CHECK(tmp1 != MAP_FAILED, "data_mmap", "mmap succeeded\n")) { + munmap(tmp1, map_sz); + goto cleanup; + } + + bss_data->in_val = 321; + usleep(1); + CHECK_FAIL(bss_data->in_val != 321); + CHECK_FAIL(bss_data->out_val != 321); + CHECK_FAIL(map_data->val[0] != 111); + CHECK_FAIL(map_data->val[1] != 222); + CHECK_FAIL(map_data->val[2] != 321); + CHECK_FAIL(map_data->val[far] != 3 * 321); + + /* check some more advanced mmap() manipulations */ + + /* map all but last page: pages 1-3 mapped */ + tmp1 = mmap(NULL, 3 * page_size, PROT_READ, MAP_SHARED, + data_map_fd, 0); + if (CHECK(tmp1 == MAP_FAILED, "adv_mmap1", "errno %d\n", errno)) + goto cleanup; + + /* unmap second page: pages 1, 3 mapped */ + err = munmap(tmp1 + page_size, page_size); + if (CHECK(err, "adv_mmap2", "errno %d\n", errno)) { + munmap(tmp1, map_sz); + goto cleanup; + } + + /* map page 2 back */ + tmp2 = mmap(tmp1 + page_size, page_size, PROT_READ, + MAP_SHARED | MAP_FIXED, data_map_fd, 0); + if (CHECK(tmp2 == MAP_FAILED, "adv_mmap3", "errno %d\n", errno)) { + munmap(tmp1, page_size); + munmap(tmp1 + 2*page_size, page_size); + goto cleanup; + } + CHECK(tmp1 + page_size != tmp2, "adv_mmap4", + "tmp1: %p, tmp2: %p\n", tmp1, tmp2); + + /* re-map all 4 pages */ + tmp2 = mmap(tmp1, 4 * page_size, PROT_READ, MAP_SHARED | MAP_FIXED, + data_map_fd, 0); + if (CHECK(tmp2 == MAP_FAILED, "adv_mmap5", "errno %d\n", errno)) { + munmap(tmp1, 3 * page_size); /* unmap page 1 */ + goto cleanup; + } + CHECK(tmp1 != tmp2, "adv_mmap6", "tmp1: %p, tmp2: %p\n", tmp1, tmp2); + + map_data = tmp2; + CHECK_FAIL(bss_data->in_val != 321); + CHECK_FAIL(bss_data->out_val != 321); + CHECK_FAIL(map_data->val[0] != 111); + CHECK_FAIL(map_data->val[1] != 222); + CHECK_FAIL(map_data->val[2] != 321); + CHECK_FAIL(map_data->val[far] != 3 * 321); + + munmap(tmp2, 4 * page_size); +cleanup: + if (bss_mmaped) + CHECK_FAIL(munmap(bss_mmaped, bss_sz)); + if (map_mmaped) + CHECK_FAIL(munmap(map_mmaped, map_sz)); + if (!IS_ERR_OR_NULL(link)) + bpf_link__destroy(link); + bpf_object__close(obj); +} diff --git a/tools/testing/selftests/bpf/prog_tests/pinning.c b/tools/testing/selftests/bpf/prog_tests/pinning.c index 525388971e08..041952524c55 100644 --- a/tools/testing/selftests/bpf/prog_tests/pinning.c +++ b/tools/testing/selftests/bpf/prog_tests/pinning.c @@ -163,12 +163,15 @@ void test_pinning(void) goto out; } - /* swap pin paths of the two maps */ + /* set pin paths so that nopinmap2 will attempt to reuse the map at + * pinpath (which will fail), but not before pinmap has already been + * reused + */ bpf_object__for_each_map(map, obj) { if (!strcmp(bpf_map__name(map), "nopinmap")) + err = bpf_map__set_pin_path(map, nopinpath2); + else if (!strcmp(bpf_map__name(map), "nopinmap2")) err = bpf_map__set_pin_path(map, pinpath); - else if (!strcmp(bpf_map__name(map), "pinmap")) - err = bpf_map__set_pin_path(map, NULL); else continue; @@ -181,6 +184,17 @@ void test_pinning(void) if (CHECK(err != -EINVAL, "param mismatch load", "err %d errno %d\n", err, errno)) goto out; + /* nopinmap2 should have been pinned and cleaned up again */ + err = stat(nopinpath2, &statbuf); + if (CHECK(!err || errno != ENOENT, "stat nopinpath2", + "err %d errno %d\n", err, errno)) + goto out; + + /* pinmap should still be there */ + err = stat(pinpath, &statbuf); + if (CHECK(err, "stat pinpath", "err %d errno %d\n", err, errno)) + goto out; + bpf_object__close(obj); /* test auto-pinning at custom path with open opt */ diff --git a/tools/testing/selftests/bpf/progs/btf__core_reloc_arrays___err_wrong_val_type.c b/tools/testing/selftests/bpf/progs/btf__core_reloc_arrays___err_wrong_val_type.c new file mode 100644 index 000000000000..f5a7c832d0f2 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/btf__core_reloc_arrays___err_wrong_val_type.c @@ -0,0 +1,3 @@ +#include "core_reloc_types.h" + +void f(struct core_reloc_arrays___err_wrong_val_type x) {} diff --git a/tools/testing/selftests/bpf/progs/btf__core_reloc_arrays___err_wrong_val_type1.c b/tools/testing/selftests/bpf/progs/btf__core_reloc_arrays___err_wrong_val_type1.c deleted file mode 100644 index 795a5b729176..000000000000 --- a/tools/testing/selftests/bpf/progs/btf__core_reloc_arrays___err_wrong_val_type1.c +++ /dev/null @@ -1,3 +0,0 @@ -#include "core_reloc_types.h" - -void f(struct core_reloc_arrays___err_wrong_val_type1 x) {} diff --git a/tools/testing/selftests/bpf/progs/btf__core_reloc_arrays___err_wrong_val_type2.c b/tools/testing/selftests/bpf/progs/btf__core_reloc_arrays___err_wrong_val_type2.c deleted file mode 100644 index 3af74b837c4d..000000000000 --- a/tools/testing/selftests/bpf/progs/btf__core_reloc_arrays___err_wrong_val_type2.c +++ /dev/null @@ -1,3 +0,0 @@ -#include "core_reloc_types.h" - -void f(struct core_reloc_arrays___err_wrong_val_type2 x) {} diff --git a/tools/testing/selftests/bpf/progs/btf__core_reloc_bitfields.c b/tools/testing/selftests/bpf/progs/btf__core_reloc_bitfields.c new file mode 100644 index 000000000000..cff6f1836cc5 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/btf__core_reloc_bitfields.c @@ -0,0 +1,3 @@ +#include "core_reloc_types.h" + +void f(struct core_reloc_bitfields x) {} diff --git a/tools/testing/selftests/bpf/progs/btf__core_reloc_bitfields___bit_sz_change.c b/tools/testing/selftests/bpf/progs/btf__core_reloc_bitfields___bit_sz_change.c new file mode 100644 index 000000000000..a1cd157d5451 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/btf__core_reloc_bitfields___bit_sz_change.c @@ -0,0 +1,3 @@ +#include "core_reloc_types.h" + +void f(struct core_reloc_bitfields___bit_sz_change x) {} diff --git a/tools/testing/selftests/bpf/progs/btf__core_reloc_bitfields___bitfield_vs_int.c b/tools/testing/selftests/bpf/progs/btf__core_reloc_bitfields___bitfield_vs_int.c new file mode 100644 index 000000000000..3f2c7b07c456 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/btf__core_reloc_bitfields___bitfield_vs_int.c @@ -0,0 +1,3 @@ +#include "core_reloc_types.h" + +void f(struct core_reloc_bitfields___bitfield_vs_int x) {} diff --git a/tools/testing/selftests/bpf/progs/btf__core_reloc_bitfields___err_too_big_bitfield.c b/tools/testing/selftests/bpf/progs/btf__core_reloc_bitfields___err_too_big_bitfield.c new file mode 100644 index 000000000000..f9746d6be399 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/btf__core_reloc_bitfields___err_too_big_bitfield.c @@ -0,0 +1,3 @@ +#include "core_reloc_types.h" + +void f(struct core_reloc_bitfields___err_too_big_bitfield x) {} diff --git a/tools/testing/selftests/bpf/progs/btf__core_reloc_bitfields___just_big_enough.c b/tools/testing/selftests/bpf/progs/btf__core_reloc_bitfields___just_big_enough.c new file mode 100644 index 000000000000..e7c75a6953dd --- /dev/null +++ b/tools/testing/selftests/bpf/progs/btf__core_reloc_bitfields___just_big_enough.c @@ -0,0 +1,3 @@ +#include "core_reloc_types.h" + +void f(struct core_reloc_bitfields___just_big_enough x) {} diff --git a/tools/testing/selftests/bpf/progs/btf__core_reloc_ints___err_bitfield.c b/tools/testing/selftests/bpf/progs/btf__core_reloc_ints___err_bitfield.c deleted file mode 100644 index 50369e8320a0..000000000000 --- a/tools/testing/selftests/bpf/progs/btf__core_reloc_ints___err_bitfield.c +++ /dev/null @@ -1,3 +0,0 @@ -#include "core_reloc_types.h" - -void f(struct core_reloc_ints___err_bitfield x) {} diff --git a/tools/testing/selftests/bpf/progs/btf__core_reloc_ints___err_wrong_sz_16.c b/tools/testing/selftests/bpf/progs/btf__core_reloc_ints___err_wrong_sz_16.c deleted file mode 100644 index 823bac13d641..000000000000 --- a/tools/testing/selftests/bpf/progs/btf__core_reloc_ints___err_wrong_sz_16.c +++ /dev/null @@ -1,3 +0,0 @@ -#include "core_reloc_types.h" - -void f(struct core_reloc_ints___err_wrong_sz_16 x) {} diff --git a/tools/testing/selftests/bpf/progs/btf__core_reloc_ints___err_wrong_sz_32.c b/tools/testing/selftests/bpf/progs/btf__core_reloc_ints___err_wrong_sz_32.c deleted file mode 100644 index b44f3be18535..000000000000 --- a/tools/testing/selftests/bpf/progs/btf__core_reloc_ints___err_wrong_sz_32.c +++ /dev/null @@ -1,3 +0,0 @@ -#include "core_reloc_types.h" - -void f(struct core_reloc_ints___err_wrong_sz_32 x) {} diff --git a/tools/testing/selftests/bpf/progs/btf__core_reloc_ints___err_wrong_sz_64.c b/tools/testing/selftests/bpf/progs/btf__core_reloc_ints___err_wrong_sz_64.c deleted file mode 100644 index 9a3dd2099c0f..000000000000 --- a/tools/testing/selftests/bpf/progs/btf__core_reloc_ints___err_wrong_sz_64.c +++ /dev/null @@ -1,3 +0,0 @@ -#include "core_reloc_types.h" - -void f(struct core_reloc_ints___err_wrong_sz_64 x) {} diff --git a/tools/testing/selftests/bpf/progs/btf__core_reloc_ints___err_wrong_sz_8.c b/tools/testing/selftests/bpf/progs/btf__core_reloc_ints___err_wrong_sz_8.c deleted file mode 100644 index 9f11ef5f6e88..000000000000 --- a/tools/testing/selftests/bpf/progs/btf__core_reloc_ints___err_wrong_sz_8.c +++ /dev/null @@ -1,3 +0,0 @@ -#include "core_reloc_types.h" - -void f(struct core_reloc_ints___err_wrong_sz_8 x) {} diff --git a/tools/testing/selftests/bpf/progs/btf__core_reloc_size.c b/tools/testing/selftests/bpf/progs/btf__core_reloc_size.c new file mode 100644 index 000000000000..3c80903da5a4 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/btf__core_reloc_size.c @@ -0,0 +1,3 @@ +#include "core_reloc_types.h" + +void f(struct core_reloc_size x) {} diff --git a/tools/testing/selftests/bpf/progs/btf__core_reloc_size___diff_sz.c b/tools/testing/selftests/bpf/progs/btf__core_reloc_size___diff_sz.c new file mode 100644 index 000000000000..6dbd14436b52 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/btf__core_reloc_size___diff_sz.c @@ -0,0 +1,3 @@ +#include "core_reloc_types.h" + +void f(struct core_reloc_size___diff_sz x) {} diff --git a/tools/testing/selftests/bpf/progs/core_reloc_types.h b/tools/testing/selftests/bpf/progs/core_reloc_types.h index f5939d9d5c61..9311489e14b2 100644 --- a/tools/testing/selftests/bpf/progs/core_reloc_types.h +++ b/tools/testing/selftests/bpf/progs/core_reloc_types.h @@ -386,14 +386,7 @@ struct core_reloc_arrays___err_non_array { struct core_reloc_arrays_substruct d[1][2]; }; -struct core_reloc_arrays___err_wrong_val_type1 { - char a[5]; /* char instead of int */ - char b[2][3][4]; - struct core_reloc_arrays_substruct c[3]; - struct core_reloc_arrays_substruct d[1][2]; -}; - -struct core_reloc_arrays___err_wrong_val_type2 { +struct core_reloc_arrays___err_wrong_val_type { int a[5]; char b[2][3][4]; int c[3]; /* value is not a struct */ @@ -589,67 +582,6 @@ struct core_reloc_ints___bool { int64_t s64_field; }; -struct core_reloc_ints___err_bitfield { - uint8_t u8_field; - int8_t s8_field; - uint16_t u16_field; - int16_t s16_field; - uint32_t u32_field: 32; /* bitfields are not supported */ - int32_t s32_field; - uint64_t u64_field; - int64_t s64_field; -}; - -struct core_reloc_ints___err_wrong_sz_8 { - uint16_t u8_field; /* not 8-bit anymore */ - int16_t s8_field; /* not 8-bit anymore */ - - uint16_t u16_field; - int16_t s16_field; - uint32_t u32_field; - int32_t s32_field; - uint64_t u64_field; - int64_t s64_field; -}; - -struct core_reloc_ints___err_wrong_sz_16 { - uint8_t u8_field; - int8_t s8_field; - - uint32_t u16_field; /* not 16-bit anymore */ - int32_t s16_field; /* not 16-bit anymore */ - - uint32_t u32_field; - int32_t s32_field; - uint64_t u64_field; - int64_t s64_field; -}; - -struct core_reloc_ints___err_wrong_sz_32 { - uint8_t u8_field; - int8_t s8_field; - uint16_t u16_field; - int16_t s16_field; - - uint64_t u32_field; /* not 32-bit anymore */ - int64_t s32_field; /* not 32-bit anymore */ - - uint64_t u64_field; - int64_t s64_field; -}; - -struct core_reloc_ints___err_wrong_sz_64 { - uint8_t u8_field; - int8_t s8_field; - uint16_t u16_field; - int16_t s16_field; - uint32_t u32_field; - int32_t s32_field; - - uint32_t u64_field; /* not 64-bit anymore */ - int32_t s64_field; /* not 64-bit anymore */ -}; - /* * MISC */ @@ -730,3 +662,106 @@ struct core_reloc_existence___err_wrong_arr_value_type { struct core_reloc_existence___err_wrong_struct_type { int s; }; + +/* + * BITFIELDS + */ +/* bitfield read results, all as plain integers */ +struct core_reloc_bitfields_output { + int64_t ub1; + int64_t ub2; + int64_t ub7; + int64_t sb4; + int64_t sb20; + int64_t u32; + int64_t s32; +}; + +struct core_reloc_bitfields { + /* unsigned bitfields */ + uint8_t ub1: 1; + uint8_t ub2: 2; + uint32_t ub7: 7; + /* signed bitfields */ + int8_t sb4: 4; + int32_t sb20: 20; + /* non-bitfields */ + uint32_t u32; + int32_t s32; +}; + +/* different bit sizes (both up and down) */ +struct core_reloc_bitfields___bit_sz_change { + /* unsigned bitfields */ + uint16_t ub1: 3; /* 1 -> 3 */ + uint32_t ub2: 20; /* 2 -> 20 */ + uint8_t ub7: 1; /* 7 -> 1 */ + /* signed bitfields */ + int8_t sb4: 1; /* 4 -> 1 */ + int32_t sb20: 30; /* 20 -> 30 */ + /* non-bitfields */ + uint16_t u32; /* 32 -> 16 */ + int64_t s32; /* 32 -> 64 */ +}; + +/* turn bitfield into non-bitfield and vice versa */ +struct core_reloc_bitfields___bitfield_vs_int { + uint64_t ub1; /* 3 -> 64 non-bitfield */ + uint8_t ub2; /* 20 -> 8 non-bitfield */ + int64_t ub7; /* 7 -> 64 non-bitfield signed */ + int64_t sb4; /* 4 -> 64 non-bitfield signed */ + uint64_t sb20; /* 20 -> 16 non-bitfield unsigned */ + int32_t u32: 20; /* 32 non-bitfield -> 20 bitfield */ + uint64_t s32: 60; /* 32 non-bitfield -> 60 bitfield */ +}; + +struct core_reloc_bitfields___just_big_enough { + uint64_t ub1: 4; + uint64_t ub2: 60; /* packed tightly */ + uint32_t ub7; + uint32_t sb4; + uint32_t sb20; + uint32_t u32; + uint32_t s32; +} __attribute__((packed)) ; + +struct core_reloc_bitfields___err_too_big_bitfield { + uint64_t ub1: 4; + uint64_t ub2: 61; /* packed tightly */ + uint32_t ub7; + uint32_t sb4; + uint32_t sb20; + uint32_t u32; + uint32_t s32; +} __attribute__((packed)) ; + +/* + * SIZE + */ +struct core_reloc_size_output { + int int_sz; + int struct_sz; + int union_sz; + int arr_sz; + int arr_elem_sz; + int ptr_sz; + int enum_sz; +}; + +struct core_reloc_size { + int int_field; + struct { int x; } struct_field; + union { int x; } union_field; + int arr_field[4]; + void *ptr_field; + enum { VALUE = 123 } enum_field; +}; + +struct core_reloc_size___diff_sz { + uint64_t int_field; + struct { int x; int y; int z; } struct_field; + union { int x; char bla[123]; } union_field; + char arr_field[10]; + void *ptr_field; + enum { OTHER_VALUE = 0xFFFFFFFFFFFFFFFF } enum_field; +}; diff --git a/tools/testing/selftests/bpf/progs/fentry_test.c b/tools/testing/selftests/bpf/progs/fentry_test.c new file mode 100644 index 000000000000..545788bf8d50 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/fentry_test.c @@ -0,0 +1,90 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2019 Facebook */ +#include <linux/bpf.h> +#include "bpf_helpers.h" + +char _license[] SEC("license") = "GPL"; + +struct test1 { + ks32 a; +}; +static volatile __u64 test1_result; +SEC("fentry/bpf_fentry_test1") +int test1(struct test1 *ctx) +{ + test1_result = ctx->a == 1; + return 0; +} + +struct test2 { + ks32 a; + ku64 b; +}; +static volatile __u64 test2_result; +SEC("fentry/bpf_fentry_test2") +int test2(struct test2 *ctx) +{ + test2_result = ctx->a == 2 && ctx->b == 3; + return 0; +} + +struct test3 { + ks8 a; + ks32 b; + ku64 c; +}; +static volatile __u64 test3_result; +SEC("fentry/bpf_fentry_test3") +int test3(struct test3 *ctx) +{ + test3_result = ctx->a == 4 && ctx->b == 5 && ctx->c == 6; + return 0; +} + +struct test4 { + void *a; + ks8 b; + ks32 c; + ku64 d; +}; +static volatile __u64 test4_result; +SEC("fentry/bpf_fentry_test4") +int test4(struct test4 *ctx) +{ + test4_result = ctx->a == (void *)7 && ctx->b == 8 && ctx->c == 9 && + ctx->d == 10; + return 0; +} + +struct test5 { + ku64 a; + void *b; + ks16 c; + ks32 d; + ku64 e; +}; +static volatile __u64 test5_result; +SEC("fentry/bpf_fentry_test5") +int test5(struct test5 *ctx) +{ + test5_result = ctx->a == 11 && ctx->b == (void *)12 && ctx->c == 13 && + ctx->d == 14 && ctx->e == 15; + return 0; +} + +struct test6 { + ku64 a; + void *b; + ks16 c; + ks32 d; + void *e; + ks64 f; +}; +static volatile __u64 test6_result; +SEC("fentry/bpf_fentry_test6") +int test6(struct test6 *ctx) +{ + test6_result = ctx->a == 16 && ctx->b == (void *)17 && ctx->c == 18 && + ctx->d == 19 && ctx->e == (void *)20 && ctx->f == 21; + return 0; +} diff --git a/tools/testing/selftests/bpf/progs/fexit_bpf2bpf.c b/tools/testing/selftests/bpf/progs/fexit_bpf2bpf.c new file mode 100644 index 000000000000..981f0474da5a --- /dev/null +++ b/tools/testing/selftests/bpf/progs/fexit_bpf2bpf.c @@ -0,0 +1,91 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2019 Facebook */ +#include <linux/bpf.h> +#include "bpf_helpers.h" + +struct sk_buff { + unsigned int len; +}; + +struct args { + struct sk_buff *skb; + ks32 ret; +}; +static volatile __u64 test_result; +SEC("fexit/test_pkt_access") +int test_main(struct args *ctx) +{ + struct sk_buff *skb = ctx->skb; + int len; + + __builtin_preserve_access_index(({ + len = skb->len; + })); + if (len != 74 || ctx->ret != 0) + return 0; + test_result = 1; + return 0; +} + +struct args_subprog1 { + struct sk_buff *skb; + ks32 ret; +}; +static volatile __u64 test_result_subprog1; +SEC("fexit/test_pkt_access_subprog1") +int test_subprog1(struct args_subprog1 *ctx) +{ + struct sk_buff *skb = ctx->skb; + int len; + + __builtin_preserve_access_index(({ + len = skb->len; + })); + if (len != 74 || ctx->ret != 148) + return 0; + test_result_subprog1 = 1; + return 0; +} + +/* Though test_pkt_access_subprog2() is defined in C as: + * static __attribute__ ((noinline)) + * int test_pkt_access_subprog2(int val, volatile struct __sk_buff *skb) + * { + * return skb->len * val; + * } + * llvm optimizations remove 'int val' argument and generate BPF assembly: + * r0 = *(u32 *)(r1 + 0) + * w0 <<= 1 + * exit + * In such case the verifier falls back to conservative and + * tracing program can access arguments and return value as u64 + * instead of accurate types. + */ +struct args_subprog2 { + ku64 args[5]; + ku64 ret; +}; +static volatile __u64 test_result_subprog2; +SEC("fexit/test_pkt_access_subprog2") +int test_subprog2(struct args_subprog2 *ctx) +{ + struct sk_buff *skb = (void *)ctx->args[0]; + __u64 ret; + int len; + + bpf_probe_read_kernel(&len, sizeof(len), + __builtin_preserve_access_index(&skb->len)); + + ret = ctx->ret; + /* bpf_prog_load() loads "test_pkt_access.o" with BPF_F_TEST_RND_HI32 + * which randomizes upper 32 bits after BPF_ALU32 insns. + * Hence after 'w0 <<= 1' upper bits of $rax are random. + * That is expected and correct. Trim them. + */ + ret = (__u32) ret; + if (len != 74 || ret != 148) + return 0; + test_result_subprog2 = 1; + return 0; +} +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/fexit_test.c b/tools/testing/selftests/bpf/progs/fexit_test.c new file mode 100644 index 000000000000..8b98b1a51784 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/fexit_test.c @@ -0,0 +1,98 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2019 Facebook */ +#include <linux/bpf.h> +#include "bpf_helpers.h" + +char _license[] SEC("license") = "GPL"; + +struct test1 { + ks32 a; + ks32 ret; +}; +static volatile __u64 test1_result; +SEC("fexit/bpf_fentry_test1") +int test1(struct test1 *ctx) +{ + test1_result = ctx->a == 1 && ctx->ret == 2; + return 0; +} + +struct test2 { + ks32 a; + ku64 b; + ks32 ret; +}; +static volatile __u64 test2_result; +SEC("fexit/bpf_fentry_test2") +int test2(struct test2 *ctx) +{ + test2_result = ctx->a == 2 && ctx->b == 3 && ctx->ret == 5; + return 0; +} + +struct test3 { + ks8 a; + ks32 b; + ku64 c; + ks32 ret; +}; +static volatile __u64 test3_result; +SEC("fexit/bpf_fentry_test3") +int test3(struct test3 *ctx) +{ + test3_result = ctx->a == 4 && ctx->b == 5 && ctx->c == 6 && + ctx->ret == 15; + return 0; +} + +struct test4 { + void *a; + ks8 b; + ks32 c; + ku64 d; + ks32 ret; +}; +static volatile __u64 test4_result; +SEC("fexit/bpf_fentry_test4") +int test4(struct test4 *ctx) +{ + test4_result = ctx->a == (void *)7 && ctx->b == 8 && ctx->c == 9 && + ctx->d == 10 && ctx->ret == 34; + return 0; +} + +struct test5 { + ku64 a; + void *b; + ks16 c; + ks32 d; + ku64 e; + ks32 ret; +}; +static volatile __u64 test5_result; +SEC("fexit/bpf_fentry_test5") +int test5(struct test5 *ctx) +{ + test5_result = ctx->a == 11 && ctx->b == (void *)12 && ctx->c == 13 && + ctx->d == 14 && ctx->e == 15 && ctx->ret == 65; + return 0; +} + +struct test6 { + ku64 a; + void *b; + ks16 c; + ks32 d; + void *e; + ks64 f; + ks32 ret; +}; +static volatile __u64 test6_result; +SEC("fexit/bpf_fentry_test6") +int test6(struct test6 *ctx) +{ + test6_result = ctx->a == 16 && ctx->b == (void *)17 && ctx->c == 18 && + ctx->d == 19 && ctx->e == (void *)20 && ctx->f == 21 && + ctx->ret == 111; + return 0; +} diff --git a/tools/testing/selftests/bpf/progs/kfree_skb.c b/tools/testing/selftests/bpf/progs/kfree_skb.c index 489319ea1d6a..dcc9feac8338 100644 --- a/tools/testing/selftests/bpf/progs/kfree_skb.c +++ b/tools/testing/selftests/bpf/progs/kfree_skb.c @@ -1,6 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 // Copyright (c) 2019 Facebook #include <linux/bpf.h> +#include <stdbool.h> #include "bpf_helpers.h" #include "bpf_endian.h" @@ -43,6 +44,7 @@ struct sk_buff { refcount_t users; unsigned char *data; char __pkt_type_offset[0]; + char cb[48]; }; /* copy arguments from @@ -57,28 +59,41 @@ struct trace_kfree_skb { void *location; }; +struct meta { + int ifindex; + __u32 cb32_0; + __u8 cb8_0; +}; + SEC("tp_btf/kfree_skb") int trace_kfree_skb(struct trace_kfree_skb *ctx) { struct sk_buff *skb = ctx->skb; struct net_device *dev; - int ifindex; struct callback_head *ptr; void *func; int users; unsigned char *data; unsigned short pkt_data; + struct meta meta = {}; char pkt_type; + __u32 *cb32; + __u8 *cb8; __builtin_preserve_access_index(({ users = skb->users.refs.counter; data = skb->data; dev = skb->dev; - ifindex = dev->ifindex; ptr = dev->ifalias->rcuhead.next; func = ptr->func; + cb8 = (__u8 *)&skb->cb; + cb32 = (__u32 *)&skb->cb; })); + meta.ifindex = _(dev->ifindex); + meta.cb8_0 = cb8[8]; + meta.cb32_0 = cb32[2]; + bpf_probe_read_kernel(&pkt_type, sizeof(pkt_type), _(&skb->__pkt_type_offset)); pkt_type &= 7; @@ -90,14 +105,66 @@ int trace_kfree_skb(struct trace_kfree_skb *ctx) _(skb->len), users, pkt_type); bpf_printk("skb->queue_mapping %d\n", _(skb->queue_mapping)); bpf_printk("dev->ifindex %d data %llx pkt_data %x\n", - ifindex, data, pkt_data); + meta.ifindex, data, pkt_data); + bpf_printk("cb8_0:%x cb32_0:%x\n", meta.cb8_0, meta.cb32_0); - if (users != 1 || pkt_data != bpf_htons(0x86dd) || ifindex != 1) + if (users != 1 || pkt_data != bpf_htons(0x86dd) || meta.ifindex != 1) /* raw tp ignores return value */ return 0; /* send first 72 byte of the packet to user space */ bpf_skb_output(skb, &perf_buf_map, (72ull << 32) | BPF_F_CURRENT_CPU, - &ifindex, sizeof(ifindex)); + &meta, sizeof(meta)); + return 0; +} + +static volatile struct { + bool fentry_test_ok; + bool fexit_test_ok; +} result; + +struct eth_type_trans_args { + struct sk_buff *skb; + struct net_device *dev; + unsigned short protocol; /* return value available to fexit progs */ +}; + +SEC("fentry/eth_type_trans") +int fentry_eth_type_trans(struct eth_type_trans_args *ctx) +{ + struct sk_buff *skb = ctx->skb; + struct net_device *dev = ctx->dev; + int len, ifindex; + + __builtin_preserve_access_index(({ + len = skb->len; + ifindex = dev->ifindex; + })); + + /* fentry sees full packet including L2 header */ + if (len != 74 || ifindex != 1) + return 0; + result.fentry_test_ok = true; + return 0; +} + +SEC("fexit/eth_type_trans") +int fexit_eth_type_trans(struct eth_type_trans_args *ctx) +{ + struct sk_buff *skb = ctx->skb; + struct net_device *dev = ctx->dev; + int len, ifindex; + + __builtin_preserve_access_index(({ + len = skb->len; + ifindex = dev->ifindex; + })); + + /* fexit sees packet without L2 header that eth_type_trans should have + * consumed. + */ + if (len != 60 || ctx->protocol != bpf_htons(0x86dd) || ifindex != 1) + return 0; + result.fexit_test_ok = true; return 0; } diff --git a/tools/testing/selftests/bpf/progs/test_btf_haskv.c b/tools/testing/selftests/bpf/progs/test_btf_haskv.c index 763c51447c19..62ad7e22105e 100644 --- a/tools/testing/selftests/bpf/progs/test_btf_haskv.c +++ b/tools/testing/selftests/bpf/progs/test_btf_haskv.c @@ -26,7 +26,7 @@ struct dummy_tracepoint_args { }; __attribute__((noinline)) -static int test_long_fname_2(struct dummy_tracepoint_args *arg) +int test_long_fname_2(struct dummy_tracepoint_args *arg) { struct ipv_counts *counts; int key = 0; @@ -44,7 +44,7 @@ static int test_long_fname_2(struct dummy_tracepoint_args *arg) } __attribute__((noinline)) -static int test_long_fname_1(struct dummy_tracepoint_args *arg) +int test_long_fname_1(struct dummy_tracepoint_args *arg) { return test_long_fname_2(arg); } diff --git a/tools/testing/selftests/bpf/progs/test_btf_newkv.c b/tools/testing/selftests/bpf/progs/test_btf_newkv.c index 96f9e8451029..fb8d91a1dbe0 100644 --- a/tools/testing/selftests/bpf/progs/test_btf_newkv.c +++ b/tools/testing/selftests/bpf/progs/test_btf_newkv.c @@ -34,7 +34,7 @@ struct dummy_tracepoint_args { }; __attribute__((noinline)) -static int test_long_fname_2(struct dummy_tracepoint_args *arg) +int test_long_fname_2(struct dummy_tracepoint_args *arg) { struct ipv_counts *counts; int key = 0; @@ -57,7 +57,7 @@ static int test_long_fname_2(struct dummy_tracepoint_args *arg) } __attribute__((noinline)) -static int test_long_fname_1(struct dummy_tracepoint_args *arg) +int test_long_fname_1(struct dummy_tracepoint_args *arg) { return test_long_fname_2(arg); } diff --git a/tools/testing/selftests/bpf/progs/test_btf_nokv.c b/tools/testing/selftests/bpf/progs/test_btf_nokv.c index 434188c37774..3f4422044759 100644 --- a/tools/testing/selftests/bpf/progs/test_btf_nokv.c +++ b/tools/testing/selftests/bpf/progs/test_btf_nokv.c @@ -23,7 +23,7 @@ struct dummy_tracepoint_args { }; __attribute__((noinline)) -static int test_long_fname_2(struct dummy_tracepoint_args *arg) +int test_long_fname_2(struct dummy_tracepoint_args *arg) { struct ipv_counts *counts; int key = 0; @@ -41,7 +41,7 @@ static int test_long_fname_2(struct dummy_tracepoint_args *arg) } __attribute__((noinline)) -static int test_long_fname_1(struct dummy_tracepoint_args *arg) +int test_long_fname_1(struct dummy_tracepoint_args *arg) { return test_long_fname_2(arg); } diff --git a/tools/testing/selftests/bpf/progs/test_core_reloc_bitfields_direct.c b/tools/testing/selftests/bpf/progs/test_core_reloc_bitfields_direct.c new file mode 100644 index 000000000000..738b34b72655 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/test_core_reloc_bitfields_direct.c @@ -0,0 +1,63 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (c) 2019 Facebook + +#include <linux/bpf.h> +#include <stdint.h> +#include "bpf_helpers.h" +#include "bpf_core_read.h" + +char _license[] SEC("license") = "GPL"; + +static volatile struct data { + char in[256]; + char out[256]; +} data; + +struct core_reloc_bitfields { + /* unsigned bitfields */ + uint8_t ub1: 1; + uint8_t ub2: 2; + uint32_t ub7: 7; + /* signed bitfields */ + int8_t sb4: 4; + int32_t sb20: 20; + /* non-bitfields */ + uint32_t u32; + int32_t s32; +}; + +/* bitfield read results, all as plain integers */ +struct core_reloc_bitfields_output { + int64_t ub1; + int64_t ub2; + int64_t ub7; + int64_t sb4; + int64_t sb20; + int64_t u32; + int64_t s32; +}; + +struct pt_regs; + +struct trace_sys_enter { + struct pt_regs *regs; + long id; +}; + +SEC("tp_btf/sys_enter") +int test_core_bitfields_direct(void *ctx) +{ + struct core_reloc_bitfields *in = (void *)&data.in; + struct core_reloc_bitfields_output *out = (void *)&data.out; + + out->ub1 = BPF_CORE_READ_BITFIELD(in, ub1); + out->ub2 = BPF_CORE_READ_BITFIELD(in, ub2); + out->ub7 = BPF_CORE_READ_BITFIELD(in, ub7); + out->sb4 = BPF_CORE_READ_BITFIELD(in, sb4); + out->sb20 = BPF_CORE_READ_BITFIELD(in, sb20); + out->u32 = BPF_CORE_READ_BITFIELD(in, u32); + out->s32 = BPF_CORE_READ_BITFIELD(in, s32); + + return 0; +} + diff --git a/tools/testing/selftests/bpf/progs/test_core_reloc_bitfields_probed.c b/tools/testing/selftests/bpf/progs/test_core_reloc_bitfields_probed.c new file mode 100644 index 000000000000..e466e3ab7de4 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/test_core_reloc_bitfields_probed.c @@ -0,0 +1,57 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (c) 2019 Facebook + +#include <linux/bpf.h> +#include <stdint.h> +#include "bpf_helpers.h" +#include "bpf_core_read.h" + +char _license[] SEC("license") = "GPL"; + +static volatile struct data { + char in[256]; + char out[256]; +} data; + +struct core_reloc_bitfields { + /* unsigned bitfields */ + uint8_t ub1: 1; + uint8_t ub2: 2; + uint32_t ub7: 7; + /* signed bitfields */ + int8_t sb4: 4; + int32_t sb20: 20; + /* non-bitfields */ + uint32_t u32; + int32_t s32; +}; + +/* bitfield read results, all as plain integers */ +struct core_reloc_bitfields_output { + int64_t ub1; + int64_t ub2; + int64_t ub7; + int64_t sb4; + int64_t sb20; + int64_t u32; + int64_t s32; +}; + +SEC("raw_tracepoint/sys_enter") +int test_core_bitfields(void *ctx) +{ + struct core_reloc_bitfields *in = (void *)&data.in; + struct core_reloc_bitfields_output *out = (void *)&data.out; + uint64_t res; + + out->ub1 = BPF_CORE_READ_BITFIELD_PROBED(in, ub1); + out->ub2 = BPF_CORE_READ_BITFIELD_PROBED(in, ub2); + out->ub7 = BPF_CORE_READ_BITFIELD_PROBED(in, ub7); + out->sb4 = BPF_CORE_READ_BITFIELD_PROBED(in, sb4); + out->sb20 = BPF_CORE_READ_BITFIELD_PROBED(in, sb20); + out->u32 = BPF_CORE_READ_BITFIELD_PROBED(in, u32); + out->s32 = BPF_CORE_READ_BITFIELD_PROBED(in, s32); + + return 0; +} + diff --git a/tools/testing/selftests/bpf/progs/test_core_reloc_size.c b/tools/testing/selftests/bpf/progs/test_core_reloc_size.c new file mode 100644 index 000000000000..9a92998d9107 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/test_core_reloc_size.c @@ -0,0 +1,51 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (c) 2019 Facebook + +#include <linux/bpf.h> +#include <stdint.h> +#include "bpf_helpers.h" +#include "bpf_core_read.h" + +char _license[] SEC("license") = "GPL"; + +static volatile struct data { + char in[256]; + char out[256]; +} data; + +struct core_reloc_size_output { + int int_sz; + int struct_sz; + int union_sz; + int arr_sz; + int arr_elem_sz; + int ptr_sz; + int enum_sz; +}; + +struct core_reloc_size { + int int_field; + struct { int x; } struct_field; + union { int x; } union_field; + int arr_field[4]; + void *ptr_field; + enum { VALUE = 123 } enum_field; +}; + +SEC("raw_tracepoint/sys_enter") +int test_core_size(void *ctx) +{ + struct core_reloc_size *in = (void *)&data.in; + struct core_reloc_size_output *out = (void *)&data.out; + + out->int_sz = bpf_core_field_size(in->int_field); + out->struct_sz = bpf_core_field_size(in->struct_field); + out->union_sz = bpf_core_field_size(in->union_field); + out->arr_sz = bpf_core_field_size(in->arr_field); + out->arr_elem_sz = bpf_core_field_size(in->arr_field[0]); + out->ptr_sz = bpf_core_field_size(in->ptr_field); + out->enum_sz = bpf_core_field_size(in->enum_field); + + return 0; +} + diff --git a/tools/testing/selftests/bpf/progs/test_mmap.c b/tools/testing/selftests/bpf/progs/test_mmap.c new file mode 100644 index 000000000000..0d2ec9fbcf61 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/test_mmap.c @@ -0,0 +1,45 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (c) 2019 Facebook + +#include <linux/bpf.h> +#include <stdint.h> +#include "bpf_helpers.h" + +char _license[] SEC("license") = "GPL"; + +struct { + __uint(type, BPF_MAP_TYPE_ARRAY); + __uint(max_entries, 512 * 4); /* at least 4 pages of data */ + __uint(map_flags, BPF_F_MMAPABLE); + __type(key, __u32); + __type(value, __u64); +} data_map SEC(".maps"); + +static volatile __u64 in_val; +static volatile __u64 out_val; + +SEC("raw_tracepoint/sys_enter") +int test_mmap(void *ctx) +{ + int zero = 0, one = 1, two = 2, far = 1500; + __u64 val, *p; + + out_val = in_val; + + /* data_map[2] = in_val; */ + bpf_map_update_elem(&data_map, &two, (const void *)&in_val, 0); + + /* data_map[1] = data_map[0] * 2; */ + p = bpf_map_lookup_elem(&data_map, &zero); + if (p) { + val = (*p) * 2; + bpf_map_update_elem(&data_map, &one, &val, 0); + } + + /* data_map[far] = in_val * 3; */ + val = in_val * 3; + bpf_map_update_elem(&data_map, &far, &val, 0); + + return 0; +} + diff --git a/tools/testing/selftests/bpf/progs/test_pinning.c b/tools/testing/selftests/bpf/progs/test_pinning.c index f69a4a50d056..f20e7e00373f 100644 --- a/tools/testing/selftests/bpf/progs/test_pinning.c +++ b/tools/testing/selftests/bpf/progs/test_pinning.c @@ -21,7 +21,7 @@ struct { } nopinmap SEC(".maps"); struct { - __uint(type, BPF_MAP_TYPE_ARRAY); + __uint(type, BPF_MAP_TYPE_HASH); __uint(max_entries, 1); __type(key, __u32); __type(value, __u64); diff --git a/tools/testing/selftests/bpf/progs/test_pkt_access.c b/tools/testing/selftests/bpf/progs/test_pkt_access.c index 7cf42d14103f..3a7b4b607ed3 100644 --- a/tools/testing/selftests/bpf/progs/test_pkt_access.c +++ b/tools/testing/selftests/bpf/progs/test_pkt_access.c @@ -17,8 +17,38 @@ #define barrier() __asm__ __volatile__("": : :"memory") int _version SEC("version") = 1; -SEC("test1") -int process(struct __sk_buff *skb) +/* llvm will optimize both subprograms into exactly the same BPF assembly + * + * Disassembly of section .text: + * + * 0000000000000000 test_pkt_access_subprog1: + * ; return skb->len * 2; + * 0: 61 10 00 00 00 00 00 00 r0 = *(u32 *)(r1 + 0) + * 1: 64 00 00 00 01 00 00 00 w0 <<= 1 + * 2: 95 00 00 00 00 00 00 00 exit + * + * 0000000000000018 test_pkt_access_subprog2: + * ; return skb->len * val; + * 3: 61 10 00 00 00 00 00 00 r0 = *(u32 *)(r1 + 0) + * 4: 64 00 00 00 01 00 00 00 w0 <<= 1 + * 5: 95 00 00 00 00 00 00 00 exit + * + * Which makes it an interesting test for BTF-enabled verifier. + */ +static __attribute__ ((noinline)) +int test_pkt_access_subprog1(volatile struct __sk_buff *skb) +{ + return skb->len * 2; +} + +static __attribute__ ((noinline)) +int test_pkt_access_subprog2(int val, volatile struct __sk_buff *skb) +{ + return skb->len * val; +} + +SEC("classifier/test_pkt_access") +int test_pkt_access(struct __sk_buff *skb) { void *data_end = (void *)(long)skb->data_end; void *data = (void *)(long)skb->data; @@ -48,6 +78,10 @@ int process(struct __sk_buff *skb) tcp = (struct tcphdr *)((void *)(ip6h) + ihl_len); } + if (test_pkt_access_subprog1(skb) != skb->len * 2) + return TC_ACT_SHOT; + if (test_pkt_access_subprog2(2, skb) != skb->len * 2) + return TC_ACT_SHOT; if (tcp) { if (((void *)(tcp) + 20) > data_end || proto != 6) return TC_ACT_SHOT; diff --git a/tools/testing/selftests/bpf/progs/test_seg6_loop.c b/tools/testing/selftests/bpf/progs/test_seg6_loop.c index c4d104428643..69880c1e7700 100644 --- a/tools/testing/selftests/bpf/progs/test_seg6_loop.c +++ b/tools/testing/selftests/bpf/progs/test_seg6_loop.c @@ -132,8 +132,10 @@ static __always_inline int is_valid_tlv_boundary(struct __sk_buff *skb, *pad_off = 0; // we can only go as far as ~10 TLVs due to the BPF max stack size + // workaround: define induction variable "i" as "long" instead + // of "int" to prevent alu32 sub-register spilling. #pragma clang loop unroll(disable) - for (int i = 0; i < 100; i++) { + for (long i = 0; i < 100; i++) { struct sr6_tlv_t tlv; if (cur_off == *tlv_off) diff --git a/tools/testing/selftests/bpf/progs/test_sysctl_loop1.c b/tools/testing/selftests/bpf/progs/test_sysctl_loop1.c index 608a06871572..d22e438198cf 100644 --- a/tools/testing/selftests/bpf/progs/test_sysctl_loop1.c +++ b/tools/testing/selftests/bpf/progs/test_sysctl_loop1.c @@ -44,7 +44,10 @@ int sysctl_tcp_mem(struct bpf_sysctl *ctx) unsigned long tcp_mem[TCP_MEM_LOOPS] = {}; char value[MAX_VALUE_STR_LEN]; unsigned char i, off = 0; - int ret; + /* a workaround to prevent compiler from generating + * codes verifier cannot handle yet. + */ + volatile int ret; if (ctx->write) return 0; diff --git a/tools/testing/selftests/bpf/test_tc_tunnel.sh b/tools/testing/selftests/bpf/test_tc_tunnel.sh index ff0d31d38061..7c76b841b17b 100755 --- a/tools/testing/selftests/bpf/test_tc_tunnel.sh +++ b/tools/testing/selftests/bpf/test_tc_tunnel.sh @@ -62,6 +62,10 @@ cleanup() { if [[ -f "${infile}" ]]; then rm "${infile}" fi + + if [[ -n $server_pid ]]; then + kill $server_pid 2> /dev/null + fi } server_listen() { @@ -77,6 +81,7 @@ client_connect() { verify_data() { wait "${server_pid}" + server_pid= # sha1sum returns two fields [sha1] [filepath] # convert to bash array and access first elem insum=($(sha1sum ${infile})) |