diff options
Diffstat (limited to 'tools/testing/selftests/bpf')
23 files changed, 1997 insertions, 77 deletions
diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile index 5c43c187f27c..0a315ddabbf4 100644 --- a/tools/testing/selftests/bpf/Makefile +++ b/tools/testing/selftests/bpf/Makefile @@ -13,34 +13,49 @@ endif CFLAGS += -Wall -O2 -I$(APIDIR) -I$(LIBDIR) -I$(GENDIR) $(GENFLAGS) -I../../../include LDLIBS += -lcap -lelf -lrt -lpthread +TEST_CUSTOM_PROGS = $(OUTPUT)/urandom_read +all: $(TEST_CUSTOM_PROGS) + +$(TEST_CUSTOM_PROGS): urandom_read + +urandom_read: urandom_read.c + $(CC) -o $(TEST_CUSTOM_PROGS) -static $< + # Order correspond to 'make run_tests' order TEST_GEN_PROGS = test_verifier test_tag test_maps test_lru_map test_lpm_map test_progs \ - test_align test_verifier_log test_dev_cgroup test_tcpbpf_user + test_align test_verifier_log test_dev_cgroup test_tcpbpf_user \ + test_sock test_sock_addr TEST_GEN_FILES = test_pkt_access.o test_xdp.o test_l4lb.o test_tcp_estats.o test_obj_id.o \ test_pkt_md_access.o test_xdp_redirect.o test_xdp_meta.o sockmap_parse_prog.o \ sockmap_verdict_prog.o dev_cgroup.o sample_ret0.o test_tracepoint.o \ test_l4lb_noinline.o test_xdp_noinline.o test_stacktrace_map.o \ - sample_map_ret0.o test_tcpbpf_kern.o + sample_map_ret0.o test_tcpbpf_kern.o test_stacktrace_build_id.o \ + sockmap_tcp_msg_prog.o connect4_prog.o connect6_prog.o # Order correspond to 'make run_tests' order TEST_PROGS := test_kmod.sh \ test_libbpf.sh \ test_xdp_redirect.sh \ test_xdp_meta.sh \ - test_offload.py + test_offload.py \ + test_sock_addr.sh # Compile but not part of 'make run_tests' TEST_GEN_PROGS_EXTENDED = test_libbpf_open include ../lib.mk -BPFOBJ := $(OUTPUT)/libbpf.a cgroup_helpers.c +BPFOBJ := $(OUTPUT)/libbpf.a $(TEST_GEN_PROGS): $(BPFOBJ) $(TEST_GEN_PROGS_EXTENDED): $(OUTPUT)/libbpf.a +$(OUTPUT)/test_dev_cgroup: cgroup_helpers.c +$(OUTPUT)/test_sock: cgroup_helpers.c +$(OUTPUT)/test_sock_addr: cgroup_helpers.c + .PHONY: force # force a rebuild of BPFOBJ when its dependencies are updated @@ -72,3 +87,5 @@ $(OUTPUT)/%.o: %.c $(CLANG) $(CLANG_FLAGS) \ -O2 -target bpf -emit-llvm -c $< -o - | \ $(LLC) -march=bpf -mcpu=$(CPU) -filetype=obj -o $@ + +EXTRA_CLEAN := $(TEST_CUSTOM_PROGS) diff --git a/tools/testing/selftests/bpf/bpf_helpers.h b/tools/testing/selftests/bpf/bpf_helpers.h index dde2c11d7771..d8223d99f96d 100644 --- a/tools/testing/selftests/bpf/bpf_helpers.h +++ b/tools/testing/selftests/bpf/bpf_helpers.h @@ -86,6 +86,16 @@ static int (*bpf_perf_prog_read_value)(void *ctx, void *buf, (void *) BPF_FUNC_perf_prog_read_value; static int (*bpf_override_return)(void *ctx, unsigned long rc) = (void *) BPF_FUNC_override_return; +static int (*bpf_msg_redirect_map)(void *ctx, void *map, int key, int flags) = + (void *) BPF_FUNC_msg_redirect_map; +static int (*bpf_msg_apply_bytes)(void *ctx, int len) = + (void *) BPF_FUNC_msg_apply_bytes; +static int (*bpf_msg_cork_bytes)(void *ctx, int len) = + (void *) BPF_FUNC_msg_cork_bytes; +static int (*bpf_msg_pull_data)(void *ctx, int start, int end, int flags) = + (void *) BPF_FUNC_msg_pull_data; +static int (*bpf_bind)(void *ctx, void *addr, int addr_len) = + (void *) BPF_FUNC_bind; /* llvm builtin functions that eBPF C program may use to * emit BPF_LD_ABS and BPF_LD_IND instructions @@ -123,6 +133,8 @@ static int (*bpf_skb_under_cgroup)(void *ctx, void *map, int index) = (void *) BPF_FUNC_skb_under_cgroup; static int (*bpf_skb_change_head)(void *, int len, int flags) = (void *) BPF_FUNC_skb_change_head; +static int (*bpf_skb_pull_data)(void *, int len) = + (void *) BPF_FUNC_skb_pull_data; /* Scan the ARCH passed in from ARCH env variable (see Makefile) */ #if defined(__TARGET_ARCH_x86) diff --git a/tools/testing/selftests/bpf/bpf_rlimit.h b/tools/testing/selftests/bpf/bpf_rlimit.h new file mode 100644 index 000000000000..9dac9b30f8ef --- /dev/null +++ b/tools/testing/selftests/bpf/bpf_rlimit.h @@ -0,0 +1,28 @@ +#include <sys/resource.h> +#include <stdio.h> + +static __attribute__((constructor)) void bpf_rlimit_ctor(void) +{ + struct rlimit rlim_old, rlim_new = { + .rlim_cur = RLIM_INFINITY, + .rlim_max = RLIM_INFINITY, + }; + + getrlimit(RLIMIT_MEMLOCK, &rlim_old); + /* For the sake of running the test cases, we temporarily + * set rlimit to infinity in order for kernel to focus on + * errors from actual test cases and not getting noise + * from hitting memlock limits. The limit is on per-process + * basis and not a global one, hence destructor not really + * needed here. + */ + if (setrlimit(RLIMIT_MEMLOCK, &rlim_new) < 0) { + perror("Unable to lift memlock rlimit"); + /* Trying out lower limit, but expect potential test + * case failures from this! + */ + rlim_new.rlim_cur = rlim_old.rlim_cur + (1UL << 20); + rlim_new.rlim_max = rlim_old.rlim_max + (1UL << 20); + setrlimit(RLIMIT_MEMLOCK, &rlim_new); + } +} diff --git a/tools/testing/selftests/bpf/connect4_prog.c b/tools/testing/selftests/bpf/connect4_prog.c new file mode 100644 index 000000000000..5a88a681d2ab --- /dev/null +++ b/tools/testing/selftests/bpf/connect4_prog.c @@ -0,0 +1,45 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (c) 2018 Facebook + +#include <string.h> + +#include <linux/stddef.h> +#include <linux/bpf.h> +#include <linux/in.h> +#include <linux/in6.h> +#include <sys/socket.h> + +#include "bpf_helpers.h" +#include "bpf_endian.h" + +#define SRC_REWRITE_IP4 0x7f000004U +#define DST_REWRITE_IP4 0x7f000001U +#define DST_REWRITE_PORT4 4444 + +int _version SEC("version") = 1; + +SEC("cgroup/connect4") +int connect_v4_prog(struct bpf_sock_addr *ctx) +{ + struct sockaddr_in sa; + + /* Rewrite destination. */ + ctx->user_ip4 = bpf_htonl(DST_REWRITE_IP4); + ctx->user_port = bpf_htons(DST_REWRITE_PORT4); + + if (ctx->type == SOCK_DGRAM || ctx->type == SOCK_STREAM) { + ///* Rewrite source. */ + memset(&sa, 0, sizeof(sa)); + + sa.sin_family = AF_INET; + sa.sin_port = bpf_htons(0); + sa.sin_addr.s_addr = bpf_htonl(SRC_REWRITE_IP4); + + if (bpf_bind(ctx, (struct sockaddr *)&sa, sizeof(sa)) != 0) + return 0; + } + + return 1; +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/connect6_prog.c b/tools/testing/selftests/bpf/connect6_prog.c new file mode 100644 index 000000000000..8ea3f7d12dee --- /dev/null +++ b/tools/testing/selftests/bpf/connect6_prog.c @@ -0,0 +1,61 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (c) 2018 Facebook + +#include <string.h> + +#include <linux/stddef.h> +#include <linux/bpf.h> +#include <linux/in.h> +#include <linux/in6.h> +#include <sys/socket.h> + +#include "bpf_helpers.h" +#include "bpf_endian.h" + +#define SRC_REWRITE_IP6_0 0 +#define SRC_REWRITE_IP6_1 0 +#define SRC_REWRITE_IP6_2 0 +#define SRC_REWRITE_IP6_3 6 + +#define DST_REWRITE_IP6_0 0 +#define DST_REWRITE_IP6_1 0 +#define DST_REWRITE_IP6_2 0 +#define DST_REWRITE_IP6_3 1 + +#define DST_REWRITE_PORT6 6666 + +int _version SEC("version") = 1; + +SEC("cgroup/connect6") +int connect_v6_prog(struct bpf_sock_addr *ctx) +{ + struct sockaddr_in6 sa; + + /* Rewrite destination. */ + ctx->user_ip6[0] = bpf_htonl(DST_REWRITE_IP6_0); + ctx->user_ip6[1] = bpf_htonl(DST_REWRITE_IP6_1); + ctx->user_ip6[2] = bpf_htonl(DST_REWRITE_IP6_2); + ctx->user_ip6[3] = bpf_htonl(DST_REWRITE_IP6_3); + + ctx->user_port = bpf_htons(DST_REWRITE_PORT6); + + if (ctx->type == SOCK_DGRAM || ctx->type == SOCK_STREAM) { + /* Rewrite source. */ + memset(&sa, 0, sizeof(sa)); + + sa.sin6_family = AF_INET6; + sa.sin6_port = bpf_htons(0); + + sa.sin6_addr.s6_addr32[0] = bpf_htonl(SRC_REWRITE_IP6_0); + sa.sin6_addr.s6_addr32[1] = bpf_htonl(SRC_REWRITE_IP6_1); + sa.sin6_addr.s6_addr32[2] = bpf_htonl(SRC_REWRITE_IP6_2); + sa.sin6_addr.s6_addr32[3] = bpf_htonl(SRC_REWRITE_IP6_3); + + if (bpf_bind(ctx, (struct sockaddr *)&sa, sizeof(sa)) != 0) + return 0; + } + + return 1; +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/sockmap_parse_prog.c b/tools/testing/selftests/bpf/sockmap_parse_prog.c index a1dec2b6d9c5..0f92858f6226 100644 --- a/tools/testing/selftests/bpf/sockmap_parse_prog.c +++ b/tools/testing/selftests/bpf/sockmap_parse_prog.c @@ -20,14 +20,25 @@ int bpf_prog1(struct __sk_buff *skb) __u32 lport = skb->local_port; __u32 rport = skb->remote_port; __u8 *d = data; + __u32 len = (__u32) data_end - (__u32) data; + int err; - if (data + 10 > data_end) - return skb->len; + if (data + 10 > data_end) { + err = bpf_skb_pull_data(skb, 10); + if (err) + return SK_DROP; + + data_end = (void *)(long)skb->data_end; + data = (void *)(long)skb->data; + if (data + 10 > data_end) + return SK_DROP; + } /* This write/read is a bit pointless but tests the verifier and * strparser handler for read/write pkt data and access into sk * fields. */ + d = data; d[7] = 1; return skb->len; } diff --git a/tools/testing/selftests/bpf/sockmap_tcp_msg_prog.c b/tools/testing/selftests/bpf/sockmap_tcp_msg_prog.c new file mode 100644 index 000000000000..12a7b5c82ed6 --- /dev/null +++ b/tools/testing/selftests/bpf/sockmap_tcp_msg_prog.c @@ -0,0 +1,33 @@ +#include <linux/bpf.h> +#include "bpf_helpers.h" +#include "bpf_util.h" +#include "bpf_endian.h" + +int _version SEC("version") = 1; + +#define bpf_printk(fmt, ...) \ +({ \ + char ____fmt[] = fmt; \ + bpf_trace_printk(____fmt, sizeof(____fmt), \ + ##__VA_ARGS__); \ +}) + +SEC("sk_msg1") +int bpf_prog1(struct sk_msg_md *msg) +{ + void *data_end = (void *)(long) msg->data_end; + void *data = (void *)(long) msg->data; + + char *d; + + if (data + 8 > data_end) + return SK_DROP; + + bpf_printk("data length %i\n", (__u64)msg->data_end - (__u64)msg->data); + d = (char *)data; + bpf_printk("hello sendmsg hook %i %i\n", d[0], d[1]); + + return SK_PASS; +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/sockmap_verdict_prog.c b/tools/testing/selftests/bpf/sockmap_verdict_prog.c index d7bea972cb21..2ce7634a4012 100644 --- a/tools/testing/selftests/bpf/sockmap_verdict_prog.c +++ b/tools/testing/selftests/bpf/sockmap_verdict_prog.c @@ -26,6 +26,13 @@ struct bpf_map_def SEC("maps") sock_map_tx = { .max_entries = 20, }; +struct bpf_map_def SEC("maps") sock_map_msg = { + .type = BPF_MAP_TYPE_SOCKMAP, + .key_size = sizeof(int), + .value_size = sizeof(int), + .max_entries = 20, +}; + struct bpf_map_def SEC("maps") sock_map_break = { .type = BPF_MAP_TYPE_ARRAY, .key_size = sizeof(int), diff --git a/tools/testing/selftests/bpf/test_align.c b/tools/testing/selftests/bpf/test_align.c index ff8bd7e3e50c..6b1b302310fe 100644 --- a/tools/testing/selftests/bpf/test_align.c +++ b/tools/testing/selftests/bpf/test_align.c @@ -9,8 +9,6 @@ #include <stddef.h> #include <stdbool.h> -#include <sys/resource.h> - #include <linux/unistd.h> #include <linux/filter.h> #include <linux/bpf_perf_event.h> @@ -19,6 +17,7 @@ #include <bpf/bpf.h> #include "../../../include/linux/filter.h" +#include "bpf_rlimit.h" #ifndef ARRAY_SIZE # define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0])) @@ -702,9 +701,6 @@ static int do_test(unsigned int from, unsigned int to) int main(int argc, char **argv) { unsigned int from = 0, to = ARRAY_SIZE(tests); - struct rlimit rinf = { RLIM_INFINITY, RLIM_INFINITY }; - - setrlimit(RLIMIT_MEMLOCK, &rinf); if (argc == 3) { unsigned int l = atoi(argv[argc - 2]); diff --git a/tools/testing/selftests/bpf/test_dev_cgroup.c b/tools/testing/selftests/bpf/test_dev_cgroup.c index 3489cc283433..9c8b50bac7e0 100644 --- a/tools/testing/selftests/bpf/test_dev_cgroup.c +++ b/tools/testing/selftests/bpf/test_dev_cgroup.c @@ -11,13 +11,13 @@ #include <errno.h> #include <assert.h> #include <sys/time.h> -#include <sys/resource.h> #include <linux/bpf.h> #include <bpf/bpf.h> #include <bpf/libbpf.h> #include "cgroup_helpers.h" +#include "bpf_rlimit.h" #define DEV_CGROUP_PROG "./dev_cgroup.o" @@ -25,15 +25,11 @@ int main(int argc, char **argv) { - struct rlimit limit = { RLIM_INFINITY, RLIM_INFINITY }; struct bpf_object *obj; int error = EXIT_FAILURE; int prog_fd, cgroup_fd; __u32 prog_cnt; - if (setrlimit(RLIMIT_MEMLOCK, &limit) < 0) - perror("Unable to lift memlock rlimit"); - if (bpf_prog_load(DEV_CGROUP_PROG, BPF_PROG_TYPE_CGROUP_DEVICE, &obj, &prog_fd)) { printf("Failed to load DEV_CGROUP program\n"); diff --git a/tools/testing/selftests/bpf/test_lpm_map.c b/tools/testing/selftests/bpf/test_lpm_map.c index 2be87e9ee28d..147e34cfceb7 100644 --- a/tools/testing/selftests/bpf/test_lpm_map.c +++ b/tools/testing/selftests/bpf/test_lpm_map.c @@ -22,10 +22,11 @@ #include <unistd.h> #include <arpa/inet.h> #include <sys/time.h> -#include <sys/resource.h> #include <bpf/bpf.h> + #include "bpf_util.h" +#include "bpf_rlimit.h" struct tlpm_node { struct tlpm_node *next; @@ -736,17 +737,11 @@ static void test_lpm_multi_thread(void) int main(void) { - struct rlimit limit = { RLIM_INFINITY, RLIM_INFINITY }; - int i, ret; + int i; /* we want predictable, pseudo random tests */ srand(0xf00ba1); - /* allow unlimited locked memory */ - ret = setrlimit(RLIMIT_MEMLOCK, &limit); - if (ret < 0) - perror("Unable to lift memlock rlimit"); - test_lpm_basic(); test_lpm_order(); @@ -755,11 +750,8 @@ int main(void) test_lpm_map(i); test_lpm_ipaddr(); - test_lpm_delete(); - test_lpm_get_next_key(); - test_lpm_multi_thread(); printf("test_lpm: OK\n"); diff --git a/tools/testing/selftests/bpf/test_lru_map.c b/tools/testing/selftests/bpf/test_lru_map.c index 8c10c9180c1a..781c7de343be 100644 --- a/tools/testing/selftests/bpf/test_lru_map.c +++ b/tools/testing/selftests/bpf/test_lru_map.c @@ -16,10 +16,11 @@ #include <time.h> #include <sys/wait.h> -#include <sys/resource.h> #include <bpf/bpf.h> + #include "bpf_util.h" +#include "bpf_rlimit.h" #define LOCAL_FREE_TARGET (128) #define PERCPU_FREE_TARGET (4) @@ -613,7 +614,6 @@ static void test_lru_sanity6(int map_type, int map_flags, int tgt_free) int main(int argc, char **argv) { - struct rlimit r = {RLIM_INFINITY, RLIM_INFINITY}; int map_types[] = {BPF_MAP_TYPE_LRU_HASH, BPF_MAP_TYPE_LRU_PERCPU_HASH}; int map_flags[] = {0, BPF_F_NO_COMMON_LRU}; @@ -621,8 +621,6 @@ int main(int argc, char **argv) setbuf(stdout, NULL); - assert(!setrlimit(RLIMIT_MEMLOCK, &r)); - nr_cpus = bpf_num_possible_cpus(); assert(nr_cpus != -1); printf("nr_cpus:%d\n\n", nr_cpus); diff --git a/tools/testing/selftests/bpf/test_maps.c b/tools/testing/selftests/bpf/test_maps.c index 9e03a4c356a4..6c253343a6f9 100644 --- a/tools/testing/selftests/bpf/test_maps.c +++ b/tools/testing/selftests/bpf/test_maps.c @@ -17,13 +17,14 @@ #include <stdlib.h> #include <sys/wait.h> -#include <sys/resource.h> #include <linux/bpf.h> #include <bpf/bpf.h> #include <bpf/libbpf.h> + #include "bpf_util.h" +#include "bpf_rlimit.h" static int map_flags; @@ -463,15 +464,17 @@ static void test_devmap(int task, void *data) #include <linux/err.h> #define SOCKMAP_PARSE_PROG "./sockmap_parse_prog.o" #define SOCKMAP_VERDICT_PROG "./sockmap_verdict_prog.o" +#define SOCKMAP_TCP_MSG_PROG "./sockmap_tcp_msg_prog.o" static void test_sockmap(int tasks, void *data) { - int one = 1, map_fd_rx = 0, map_fd_tx = 0, map_fd_break, s, sc, rc; - struct bpf_map *bpf_map_rx, *bpf_map_tx, *bpf_map_break; + struct bpf_map *bpf_map_rx, *bpf_map_tx, *bpf_map_msg, *bpf_map_break; + int map_fd_msg = 0, map_fd_rx = 0, map_fd_tx = 0, map_fd_break; int ports[] = {50200, 50201, 50202, 50204}; int err, i, fd, udp, sfd[6] = {0xdeadbeef}; u8 buf[20] = {0x0, 0x5, 0x3, 0x2, 0x1, 0x0}; - int parse_prog, verdict_prog; + int parse_prog, verdict_prog, msg_prog; struct sockaddr_in addr; + int one = 1, s, sc, rc; struct bpf_object *obj; struct timeval to; __u32 key, value; @@ -583,6 +586,12 @@ static void test_sockmap(int tasks, void *data) goto out_sockmap; } + err = bpf_prog_attach(-1, fd, BPF_SK_MSG_VERDICT, 0); + if (!err) { + printf("Failed invalid msg verdict prog attach\n"); + goto out_sockmap; + } + err = bpf_prog_attach(-1, fd, __MAX_BPF_ATTACH_TYPE, 0); if (!err) { printf("Failed unknown prog attach\n"); @@ -601,6 +610,12 @@ static void test_sockmap(int tasks, void *data) goto out_sockmap; } + err = bpf_prog_detach(fd, BPF_SK_MSG_VERDICT); + if (err) { + printf("Failed empty msg verdict prog detach\n"); + goto out_sockmap; + } + err = bpf_prog_detach(fd, __MAX_BPF_ATTACH_TYPE); if (!err) { printf("Detach invalid prog successful\n"); @@ -615,6 +630,13 @@ static void test_sockmap(int tasks, void *data) goto out_sockmap; } + err = bpf_prog_load(SOCKMAP_TCP_MSG_PROG, + BPF_PROG_TYPE_SK_MSG, &obj, &msg_prog); + if (err) { + printf("Failed to load SK_SKB msg prog\n"); + goto out_sockmap; + } + err = bpf_prog_load(SOCKMAP_VERDICT_PROG, BPF_PROG_TYPE_SK_SKB, &obj, &verdict_prog); if (err) { @@ -630,7 +652,7 @@ static void test_sockmap(int tasks, void *data) map_fd_rx = bpf_map__fd(bpf_map_rx); if (map_fd_rx < 0) { - printf("Failed to get map fd\n"); + printf("Failed to get map rx fd\n"); goto out_sockmap; } @@ -646,6 +668,18 @@ static void test_sockmap(int tasks, void *data) goto out_sockmap; } + bpf_map_msg = bpf_object__find_map_by_name(obj, "sock_map_msg"); + if (IS_ERR(bpf_map_msg)) { + printf("Failed to load map msg from msg_verdict prog\n"); + goto out_sockmap; + } + + map_fd_msg = bpf_map__fd(bpf_map_msg); + if (map_fd_msg < 0) { + printf("Failed to get map msg fd\n"); + goto out_sockmap; + } + bpf_map_break = bpf_object__find_map_by_name(obj, "sock_map_break"); if (IS_ERR(bpf_map_break)) { printf("Failed to load map tx from verdict prog\n"); @@ -679,6 +713,12 @@ static void test_sockmap(int tasks, void *data) goto out_sockmap; } + err = bpf_prog_attach(msg_prog, map_fd_msg, BPF_SK_MSG_VERDICT, 0); + if (err) { + printf("Failed msg verdict bpf prog attach\n"); + goto out_sockmap; + } + err = bpf_prog_attach(verdict_prog, map_fd_rx, __MAX_BPF_ATTACH_TYPE, 0); if (!err) { @@ -718,6 +758,14 @@ static void test_sockmap(int tasks, void *data) } } + /* Put sfd[2] (sending fd below) into msg map to test sendmsg bpf */ + i = 0; + err = bpf_map_update_elem(map_fd_msg, &i, &sfd[2], BPF_ANY); + if (err) { + printf("Failed map_fd_msg update sockmap %i\n", err); + goto out_sockmap; + } + /* Test map send/recv */ for (i = 0; i < 2; i++) { buf[0] = i; @@ -1126,10 +1174,6 @@ static void run_all_tests(void) int main(void) { - struct rlimit rinf = { RLIM_INFINITY, RLIM_INFINITY }; - - setrlimit(RLIMIT_MEMLOCK, &rinf); - map_flags = 0; run_all_tests(); diff --git a/tools/testing/selftests/bpf/test_progs.c b/tools/testing/selftests/bpf/test_progs.c index b549308abd19..faadbe233966 100644 --- a/tools/testing/selftests/bpf/test_progs.c +++ b/tools/testing/selftests/bpf/test_progs.c @@ -26,7 +26,6 @@ typedef __u16 __sum16; #include <sys/ioctl.h> #include <sys/wait.h> -#include <sys/resource.h> #include <sys/types.h> #include <fcntl.h> @@ -34,9 +33,11 @@ typedef __u16 __sum16; #include <linux/err.h> #include <bpf/bpf.h> #include <bpf/libbpf.h> + #include "test_iptunnel_common.h" #include "bpf_util.h" #include "bpf_endian.h" +#include "bpf_rlimit.h" static int error_cnt, pass_cnt; @@ -840,7 +841,8 @@ static void test_tp_attach_query(void) static int compare_map_keys(int map1_fd, int map2_fd) { __u32 key, next_key; - char val_buf[PERF_MAX_STACK_DEPTH * sizeof(__u64)]; + char val_buf[PERF_MAX_STACK_DEPTH * + sizeof(struct bpf_stack_build_id)]; int err; err = bpf_map_get_next_key(map1_fd, NULL, &key); @@ -875,7 +877,7 @@ static void test_stacktrace_map() err = bpf_prog_load(file, BPF_PROG_TYPE_TRACEPOINT, &obj, &prog_fd); if (CHECK(err, "prog_load", "err %d errno %d\n", err, errno)) - goto out; + return; /* Get the ID for the sched/sched_switch tracepoint */ snprintf(buf, sizeof(buf), @@ -886,6 +888,181 @@ static void test_stacktrace_map() bytes = read(efd, buf, sizeof(buf)); close(efd); + if (bytes <= 0 || bytes >= sizeof(buf)) + goto close_prog; + + /* Open the perf event and attach bpf progrram */ + attr.config = strtol(buf, NULL, 0); + attr.type = PERF_TYPE_TRACEPOINT; + attr.sample_type = PERF_SAMPLE_RAW | PERF_SAMPLE_CALLCHAIN; + attr.sample_period = 1; + attr.wakeup_events = 1; + pmu_fd = syscall(__NR_perf_event_open, &attr, -1 /* pid */, + 0 /* cpu 0 */, -1 /* group id */, + 0 /* flags */); + if (CHECK(pmu_fd < 0, "perf_event_open", "err %d errno %d\n", + pmu_fd, errno)) + goto close_prog; + + err = ioctl(pmu_fd, PERF_EVENT_IOC_ENABLE, 0); + if (err) + goto disable_pmu; + + err = ioctl(pmu_fd, PERF_EVENT_IOC_SET_BPF, prog_fd); + if (err) + goto disable_pmu; + + /* find map fds */ + control_map_fd = bpf_find_map(__func__, obj, "control_map"); + if (control_map_fd < 0) + goto disable_pmu; + + stackid_hmap_fd = bpf_find_map(__func__, obj, "stackid_hmap"); + if (stackid_hmap_fd < 0) + goto disable_pmu; + + stackmap_fd = bpf_find_map(__func__, obj, "stackmap"); + if (stackmap_fd < 0) + goto disable_pmu; + + /* give some time for bpf program run */ + sleep(1); + + /* disable stack trace collection */ + key = 0; + val = 1; + bpf_map_update_elem(control_map_fd, &key, &val, 0); + + /* for every element in stackid_hmap, we can find a corresponding one + * in stackmap, and vise versa. + */ + err = compare_map_keys(stackid_hmap_fd, stackmap_fd); + if (CHECK(err, "compare_map_keys stackid_hmap vs. stackmap", + "err %d errno %d\n", err, errno)) + goto disable_pmu_noerr; + + err = compare_map_keys(stackmap_fd, stackid_hmap_fd); + if (CHECK(err, "compare_map_keys stackmap vs. stackid_hmap", + "err %d errno %d\n", err, errno)) + goto disable_pmu_noerr; + + goto disable_pmu_noerr; +disable_pmu: + error_cnt++; +disable_pmu_noerr: + ioctl(pmu_fd, PERF_EVENT_IOC_DISABLE); + close(pmu_fd); +close_prog: + bpf_object__close(obj); +} + +static void test_stacktrace_map_raw_tp() +{ + int control_map_fd, stackid_hmap_fd, stackmap_fd; + const char *file = "./test_stacktrace_map.o"; + int efd, err, prog_fd; + __u32 key, val, duration = 0; + struct bpf_object *obj; + + err = bpf_prog_load(file, BPF_PROG_TYPE_RAW_TRACEPOINT, &obj, &prog_fd); + if (CHECK(err, "prog_load raw tp", "err %d errno %d\n", err, errno)) + return; + + efd = bpf_raw_tracepoint_open("sched_switch", prog_fd); + if (CHECK(efd < 0, "raw_tp_open", "err %d errno %d\n", efd, errno)) + goto close_prog; + + /* find map fds */ + control_map_fd = bpf_find_map(__func__, obj, "control_map"); + if (control_map_fd < 0) + goto close_prog; + + stackid_hmap_fd = bpf_find_map(__func__, obj, "stackid_hmap"); + if (stackid_hmap_fd < 0) + goto close_prog; + + stackmap_fd = bpf_find_map(__func__, obj, "stackmap"); + if (stackmap_fd < 0) + goto close_prog; + + /* give some time for bpf program run */ + sleep(1); + + /* disable stack trace collection */ + key = 0; + val = 1; + bpf_map_update_elem(control_map_fd, &key, &val, 0); + + /* for every element in stackid_hmap, we can find a corresponding one + * in stackmap, and vise versa. + */ + err = compare_map_keys(stackid_hmap_fd, stackmap_fd); + if (CHECK(err, "compare_map_keys stackid_hmap vs. stackmap", + "err %d errno %d\n", err, errno)) + goto close_prog; + + err = compare_map_keys(stackmap_fd, stackid_hmap_fd); + if (CHECK(err, "compare_map_keys stackmap vs. stackid_hmap", + "err %d errno %d\n", err, errno)) + goto close_prog; + + goto close_prog_noerr; +close_prog: + error_cnt++; +close_prog_noerr: + bpf_object__close(obj); +} + +static int extract_build_id(char *build_id, size_t size) +{ + FILE *fp; + char *line = NULL; + size_t len = 0; + + fp = popen("readelf -n ./urandom_read | grep 'Build ID'", "r"); + if (fp == NULL) + return -1; + + if (getline(&line, &len, fp) == -1) + goto err; + fclose(fp); + + if (len > size) + len = size; + memcpy(build_id, line, len); + build_id[len] = '\0'; + return 0; +err: + fclose(fp); + return -1; +} + +static void test_stacktrace_build_id(void) +{ + int control_map_fd, stackid_hmap_fd, stackmap_fd; + const char *file = "./test_stacktrace_build_id.o"; + int bytes, efd, err, pmu_fd, prog_fd; + struct perf_event_attr attr = {}; + __u32 key, previous_key, val, duration = 0; + struct bpf_object *obj; + char buf[256]; + int i, j; + struct bpf_stack_build_id id_offs[PERF_MAX_STACK_DEPTH]; + int build_id_matches = 0; + + err = bpf_prog_load(file, BPF_PROG_TYPE_TRACEPOINT, &obj, &prog_fd); + if (CHECK(err, "prog_load", "err %d errno %d\n", err, errno)) + goto out; + + /* Get the ID for the sched/sched_switch tracepoint */ + snprintf(buf, sizeof(buf), + "/sys/kernel/debug/tracing/events/random/urandom_read/id"); + efd = open(buf, O_RDONLY, 0); + if (CHECK(efd < 0, "open", "err %d errno %d\n", efd, errno)) + goto close_prog; + + bytes = read(efd, buf, sizeof(buf)); + close(efd); if (CHECK(bytes <= 0 || bytes >= sizeof(buf), "read", "bytes %d errno %d\n", bytes, errno)) goto close_prog; @@ -929,9 +1106,9 @@ static void test_stacktrace_map() err, errno)) goto disable_pmu; - /* give some time for bpf program run */ - sleep(1); - + assert(system("dd if=/dev/urandom of=/dev/zero count=4 2> /dev/null") + == 0); + assert(system("./urandom_read if=/dev/urandom of=/dev/zero count=4 2> /dev/null") == 0); /* disable stack trace collection */ key = 0; val = 1; @@ -948,7 +1125,40 @@ static void test_stacktrace_map() err = compare_map_keys(stackmap_fd, stackid_hmap_fd); if (CHECK(err, "compare_map_keys stackmap vs. stackid_hmap", "err %d errno %d\n", err, errno)) - ; /* fall through */ + goto disable_pmu; + + err = extract_build_id(buf, 256); + + if (CHECK(err, "get build_id with readelf", + "err %d errno %d\n", err, errno)) + goto disable_pmu; + + err = bpf_map_get_next_key(stackmap_fd, NULL, &key); + if (CHECK(err, "get_next_key from stackmap", + "err %d, errno %d\n", err, errno)) + goto disable_pmu; + + do { + char build_id[64]; + + err = bpf_map_lookup_elem(stackmap_fd, &key, id_offs); + if (CHECK(err, "lookup_elem from stackmap", + "err %d, errno %d\n", err, errno)) + goto disable_pmu; + for (i = 0; i < PERF_MAX_STACK_DEPTH; ++i) + if (id_offs[i].status == BPF_STACK_BUILD_ID_VALID && + id_offs[i].offset != 0) { + for (j = 0; j < 20; ++j) + sprintf(build_id + 2 * j, "%02x", + id_offs[i].build_id[j] & 0xff); + if (strstr(buf, build_id) != NULL) + build_id_matches = 1; + } + previous_key = key; + } while (bpf_map_get_next_key(stackmap_fd, &previous_key, &key) == 0); + + CHECK(build_id_matches < 1, "build id match", + "Didn't find expected build ID from the map"); disable_pmu: ioctl(pmu_fd, PERF_EVENT_IOC_DISABLE); @@ -965,10 +1175,6 @@ out: int main(void) { - struct rlimit rinf = { RLIM_INFINITY, RLIM_INFINITY }; - - setrlimit(RLIMIT_MEMLOCK, &rinf); - test_pkt_access(); test_xdp(); test_l4lb_all(); @@ -979,6 +1185,8 @@ int main(void) test_obj_name(); test_tp_attach_query(); test_stacktrace_map(); + test_stacktrace_build_id(); + test_stacktrace_map_raw_tp(); printf("Summary: %d PASSED, %d FAILED\n", pass_cnt, error_cnt); return error_cnt ? EXIT_FAILURE : EXIT_SUCCESS; diff --git a/tools/testing/selftests/bpf/test_sock.c b/tools/testing/selftests/bpf/test_sock.c new file mode 100644 index 000000000000..73bb20cfb9b7 --- /dev/null +++ b/tools/testing/selftests/bpf/test_sock.c @@ -0,0 +1,479 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (c) 2018 Facebook + +#include <stdio.h> +#include <unistd.h> + +#include <arpa/inet.h> +#include <sys/types.h> +#include <sys/socket.h> + +#include <linux/filter.h> + +#include <bpf/bpf.h> + +#include "cgroup_helpers.h" + +#ifndef ARRAY_SIZE +# define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0])) +#endif + +#define CG_PATH "/foo" +#define MAX_INSNS 512 + +char bpf_log_buf[BPF_LOG_BUF_SIZE]; + +struct sock_test { + const char *descr; + /* BPF prog properties */ + struct bpf_insn insns[MAX_INSNS]; + enum bpf_attach_type expected_attach_type; + enum bpf_attach_type attach_type; + /* Socket properties */ + int domain; + int type; + /* Endpoint to bind() to */ + const char *ip; + unsigned short port; + /* Expected test result */ + enum { + LOAD_REJECT, + ATTACH_REJECT, + BIND_REJECT, + SUCCESS, + } result; +}; + +static struct sock_test tests[] = { + { + "bind4 load with invalid access: src_ip6", + .insns = { + BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), + BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_6, + offsetof(struct bpf_sock, src_ip6[0])), + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_EXIT_INSN(), + }, + BPF_CGROUP_INET4_POST_BIND, + BPF_CGROUP_INET4_POST_BIND, + 0, + 0, + NULL, + 0, + LOAD_REJECT, + }, + { + "bind4 load with invalid access: mark", + .insns = { + BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), + BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_6, + offsetof(struct bpf_sock, mark)), + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_EXIT_INSN(), + }, + BPF_CGROUP_INET4_POST_BIND, + BPF_CGROUP_INET4_POST_BIND, + 0, + 0, + NULL, + 0, + LOAD_REJECT, + }, + { + "bind6 load with invalid access: src_ip4", + .insns = { + BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), + BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_6, + offsetof(struct bpf_sock, src_ip4)), + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_EXIT_INSN(), + }, + BPF_CGROUP_INET6_POST_BIND, + BPF_CGROUP_INET6_POST_BIND, + 0, + 0, + NULL, + 0, + LOAD_REJECT, + }, + { + "sock_create load with invalid access: src_port", + .insns = { + BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), + BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_6, + offsetof(struct bpf_sock, src_port)), + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_EXIT_INSN(), + }, + BPF_CGROUP_INET_SOCK_CREATE, + BPF_CGROUP_INET_SOCK_CREATE, + 0, + 0, + NULL, + 0, + LOAD_REJECT, + }, + { + "sock_create load w/o expected_attach_type (compat mode)", + .insns = { + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_EXIT_INSN(), + }, + 0, + BPF_CGROUP_INET_SOCK_CREATE, + AF_INET, + SOCK_STREAM, + "127.0.0.1", + 8097, + SUCCESS, + }, + { + "sock_create load w/ expected_attach_type", + .insns = { + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_EXIT_INSN(), + }, + BPF_CGROUP_INET_SOCK_CREATE, + BPF_CGROUP_INET_SOCK_CREATE, + AF_INET, + SOCK_STREAM, + "127.0.0.1", + 8097, + SUCCESS, + }, + { + "attach type mismatch bind4 vs bind6", + .insns = { + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_EXIT_INSN(), + }, + BPF_CGROUP_INET4_POST_BIND, + BPF_CGROUP_INET6_POST_BIND, + 0, + 0, + NULL, + 0, + ATTACH_REJECT, + }, + { + "attach type mismatch bind6 vs bind4", + .insns = { + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_EXIT_INSN(), + }, + BPF_CGROUP_INET6_POST_BIND, + BPF_CGROUP_INET4_POST_BIND, + 0, + 0, + NULL, + 0, + ATTACH_REJECT, + }, + { + "attach type mismatch default vs bind4", + .insns = { + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_EXIT_INSN(), + }, + 0, + BPF_CGROUP_INET4_POST_BIND, + 0, + 0, + NULL, + 0, + ATTACH_REJECT, + }, + { + "attach type mismatch bind6 vs sock_create", + .insns = { + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_EXIT_INSN(), + }, + BPF_CGROUP_INET6_POST_BIND, + BPF_CGROUP_INET_SOCK_CREATE, + 0, + 0, + NULL, + 0, + ATTACH_REJECT, + }, + { + "bind4 reject all", + .insns = { + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + BPF_CGROUP_INET4_POST_BIND, + BPF_CGROUP_INET4_POST_BIND, + AF_INET, + SOCK_STREAM, + "0.0.0.0", + 0, + BIND_REJECT, + }, + { + "bind6 reject all", + .insns = { + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + BPF_CGROUP_INET6_POST_BIND, + BPF_CGROUP_INET6_POST_BIND, + AF_INET6, + SOCK_STREAM, + "::", + 0, + BIND_REJECT, + }, + { + "bind6 deny specific IP & port", + .insns = { + BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), + + /* if (ip == expected && port == expected) */ + BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_6, + offsetof(struct bpf_sock, src_ip6[3])), + BPF_JMP_IMM(BPF_JNE, BPF_REG_7, 0x01000000, 4), + BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_6, + offsetof(struct bpf_sock, src_port)), + BPF_JMP_IMM(BPF_JNE, BPF_REG_7, 0x2001, 2), + + /* return DENY; */ + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_JMP_A(1), + + /* else return ALLOW; */ + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_EXIT_INSN(), + }, + BPF_CGROUP_INET6_POST_BIND, + BPF_CGROUP_INET6_POST_BIND, + AF_INET6, + SOCK_STREAM, + "::1", + 8193, + BIND_REJECT, + }, + { + "bind4 allow specific IP & port", + .insns = { + BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), + + /* if (ip == expected && port == expected) */ + BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_6, + offsetof(struct bpf_sock, src_ip4)), + BPF_JMP_IMM(BPF_JNE, BPF_REG_7, 0x0100007F, 4), + BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_6, + offsetof(struct bpf_sock, src_port)), + BPF_JMP_IMM(BPF_JNE, BPF_REG_7, 0x1002, 2), + + /* return ALLOW; */ + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_JMP_A(1), + + /* else return DENY; */ + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + BPF_CGROUP_INET4_POST_BIND, + BPF_CGROUP_INET4_POST_BIND, + AF_INET, + SOCK_STREAM, + "127.0.0.1", + 4098, + SUCCESS, + }, + { + "bind4 allow all", + .insns = { + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_EXIT_INSN(), + }, + BPF_CGROUP_INET4_POST_BIND, + BPF_CGROUP_INET4_POST_BIND, + AF_INET, + SOCK_STREAM, + "0.0.0.0", + 0, + SUCCESS, + }, + { + "bind6 allow all", + .insns = { + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_EXIT_INSN(), + }, + BPF_CGROUP_INET6_POST_BIND, + BPF_CGROUP_INET6_POST_BIND, + AF_INET6, + SOCK_STREAM, + "::", + 0, + SUCCESS, + }, +}; + +static size_t probe_prog_length(const struct bpf_insn *fp) +{ + size_t len; + + for (len = MAX_INSNS - 1; len > 0; --len) + if (fp[len].code != 0 || fp[len].imm != 0) + break; + return len + 1; +} + +static int load_sock_prog(const struct bpf_insn *prog, + enum bpf_attach_type attach_type) +{ + struct bpf_load_program_attr attr; + + memset(&attr, 0, sizeof(struct bpf_load_program_attr)); + attr.prog_type = BPF_PROG_TYPE_CGROUP_SOCK; + attr.expected_attach_type = attach_type; + attr.insns = prog; + attr.insns_cnt = probe_prog_length(attr.insns); + attr.license = "GPL"; + + return bpf_load_program_xattr(&attr, bpf_log_buf, BPF_LOG_BUF_SIZE); +} + +static int attach_sock_prog(int cgfd, int progfd, + enum bpf_attach_type attach_type) +{ + return bpf_prog_attach(progfd, cgfd, attach_type, BPF_F_ALLOW_OVERRIDE); +} + +static int bind_sock(int domain, int type, const char *ip, unsigned short port) +{ + struct sockaddr_storage addr; + struct sockaddr_in6 *addr6; + struct sockaddr_in *addr4; + int sockfd = -1; + socklen_t len; + int err = 0; + + sockfd = socket(domain, type, 0); + if (sockfd < 0) + goto err; + + memset(&addr, 0, sizeof(addr)); + + if (domain == AF_INET) { + len = sizeof(struct sockaddr_in); + addr4 = (struct sockaddr_in *)&addr; + addr4->sin_family = domain; + addr4->sin_port = htons(port); + if (inet_pton(domain, ip, (void *)&addr4->sin_addr) != 1) + goto err; + } else if (domain == AF_INET6) { + len = sizeof(struct sockaddr_in6); + addr6 = (struct sockaddr_in6 *)&addr; + addr6->sin6_family = domain; + addr6->sin6_port = htons(port); + if (inet_pton(domain, ip, (void *)&addr6->sin6_addr) != 1) + goto err; + } else { + goto err; + } + + if (bind(sockfd, (const struct sockaddr *)&addr, len) == -1) + goto err; + + goto out; +err: + err = -1; +out: + close(sockfd); + return err; +} + +static int run_test_case(int cgfd, const struct sock_test *test) +{ + int progfd = -1; + int err = 0; + + printf("Test case: %s .. ", test->descr); + progfd = load_sock_prog(test->insns, test->expected_attach_type); + if (progfd < 0) { + if (test->result == LOAD_REJECT) + goto out; + else + goto err; + } + + if (attach_sock_prog(cgfd, progfd, test->attach_type) == -1) { + if (test->result == ATTACH_REJECT) + goto out; + else + goto err; + } + + if (bind_sock(test->domain, test->type, test->ip, test->port) == -1) { + /* sys_bind() may fail for different reasons, errno has to be + * checked to confirm that BPF program rejected it. + */ + if (test->result == BIND_REJECT && errno == EPERM) + goto out; + else + goto err; + } + + + if (test->result != SUCCESS) + goto err; + + goto out; +err: + err = -1; +out: + /* Detaching w/o checking return code: best effort attempt. */ + if (progfd != -1) + bpf_prog_detach(cgfd, test->attach_type); + close(progfd); + printf("[%s]\n", err ? "FAIL" : "PASS"); + return err; +} + +static int run_tests(int cgfd) +{ + int passes = 0; + int fails = 0; + int i; + + for (i = 0; i < ARRAY_SIZE(tests); ++i) { + if (run_test_case(cgfd, &tests[i])) + ++fails; + else + ++passes; + } + printf("Summary: %d PASSED, %d FAILED\n", passes, fails); + return fails ? -1 : 0; +} + +int main(int argc, char **argv) +{ + int cgfd = -1; + int err = 0; + + if (setup_cgroup_environment()) + goto err; + + cgfd = create_and_get_cgroup(CG_PATH); + if (!cgfd) + goto err; + + if (join_cgroup(CG_PATH)) + goto err; + + if (run_tests(cgfd)) + goto err; + + goto out; +err: + err = -1; +out: + close(cgfd); + cleanup_cgroup_environment(); + return err; +} diff --git a/tools/testing/selftests/bpf/test_sock_addr.c b/tools/testing/selftests/bpf/test_sock_addr.c new file mode 100644 index 000000000000..d488f20926e8 --- /dev/null +++ b/tools/testing/selftests/bpf/test_sock_addr.c @@ -0,0 +1,588 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (c) 2018 Facebook + +#include <stdio.h> +#include <stdlib.h> +#include <unistd.h> + +#include <arpa/inet.h> +#include <sys/types.h> +#include <sys/socket.h> + +#include <linux/filter.h> + +#include <bpf/bpf.h> +#include <bpf/libbpf.h> + +#include "cgroup_helpers.h" + +#define CG_PATH "/foo" +#define CONNECT4_PROG_PATH "./connect4_prog.o" +#define CONNECT6_PROG_PATH "./connect6_prog.o" + +#define SERV4_IP "192.168.1.254" +#define SERV4_REWRITE_IP "127.0.0.1" +#define SERV4_PORT 4040 +#define SERV4_REWRITE_PORT 4444 + +#define SERV6_IP "face:b00c:1234:5678::abcd" +#define SERV6_REWRITE_IP "::1" +#define SERV6_PORT 6060 +#define SERV6_REWRITE_PORT 6666 + +#define INET_NTOP_BUF 40 + +typedef int (*load_fn)(enum bpf_attach_type, const char *comment); +typedef int (*info_fn)(int, struct sockaddr *, socklen_t *); + +struct program { + enum bpf_attach_type type; + load_fn loadfn; + int fd; + const char *name; + enum bpf_attach_type invalid_type; +}; + +char bpf_log_buf[BPF_LOG_BUF_SIZE]; + +static int mk_sockaddr(int domain, const char *ip, unsigned short port, + struct sockaddr *addr, socklen_t addr_len) +{ + struct sockaddr_in6 *addr6; + struct sockaddr_in *addr4; + + if (domain != AF_INET && domain != AF_INET6) { + log_err("Unsupported address family"); + return -1; + } + + memset(addr, 0, addr_len); + + if (domain == AF_INET) { + if (addr_len < sizeof(struct sockaddr_in)) + return -1; + addr4 = (struct sockaddr_in *)addr; + addr4->sin_family = domain; + addr4->sin_port = htons(port); + if (inet_pton(domain, ip, (void *)&addr4->sin_addr) != 1) { + log_err("Invalid IPv4: %s", ip); + return -1; + } + } else if (domain == AF_INET6) { + if (addr_len < sizeof(struct sockaddr_in6)) + return -1; + addr6 = (struct sockaddr_in6 *)addr; + addr6->sin6_family = domain; + addr6->sin6_port = htons(port); + if (inet_pton(domain, ip, (void *)&addr6->sin6_addr) != 1) { + log_err("Invalid IPv6: %s", ip); + return -1; + } + } + + return 0; +} + +static int load_insns(enum bpf_attach_type attach_type, + const struct bpf_insn *insns, size_t insns_cnt, + const char *comment) +{ + struct bpf_load_program_attr load_attr; + int ret; + + memset(&load_attr, 0, sizeof(struct bpf_load_program_attr)); + load_attr.prog_type = BPF_PROG_TYPE_CGROUP_SOCK_ADDR; + load_attr.expected_attach_type = attach_type; + load_attr.insns = insns; + load_attr.insns_cnt = insns_cnt; + load_attr.license = "GPL"; + + ret = bpf_load_program_xattr(&load_attr, bpf_log_buf, BPF_LOG_BUF_SIZE); + if (ret < 0 && comment) { + log_err(">>> Loading %s program error.\n" + ">>> Output from verifier:\n%s\n-------\n", + comment, bpf_log_buf); + } + + return ret; +} + +/* [1] These testing programs try to read different context fields, including + * narrow loads of different sizes from user_ip4 and user_ip6, and write to + * those allowed to be overridden. + * + * [2] BPF_LD_IMM64 & BPF_JMP_REG are used below whenever there is a need to + * compare a register with unsigned 32bit integer. BPF_JMP_IMM can't be used + * in such cases since it accepts only _signed_ 32bit integer as IMM + * argument. Also note that BPF_LD_IMM64 contains 2 instructions what matters + * to count jumps properly. + */ + +static int bind4_prog_load(enum bpf_attach_type attach_type, + const char *comment) +{ + union { + uint8_t u4_addr8[4]; + uint16_t u4_addr16[2]; + uint32_t u4_addr32; + } ip4; + struct sockaddr_in addr4_rw; + + if (inet_pton(AF_INET, SERV4_IP, (void *)&ip4) != 1) { + log_err("Invalid IPv4: %s", SERV4_IP); + return -1; + } + + if (mk_sockaddr(AF_INET, SERV4_REWRITE_IP, SERV4_REWRITE_PORT, + (struct sockaddr *)&addr4_rw, sizeof(addr4_rw)) == -1) + return -1; + + /* See [1]. */ + struct bpf_insn insns[] = { + BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), + + /* if (sk.family == AF_INET && */ + BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_6, + offsetof(struct bpf_sock_addr, family)), + BPF_JMP_IMM(BPF_JNE, BPF_REG_7, AF_INET, 16), + + /* (sk.type == SOCK_DGRAM || sk.type == SOCK_STREAM) && */ + BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_6, + offsetof(struct bpf_sock_addr, type)), + BPF_JMP_IMM(BPF_JNE, BPF_REG_7, SOCK_DGRAM, 1), + BPF_JMP_A(1), + BPF_JMP_IMM(BPF_JNE, BPF_REG_7, SOCK_STREAM, 12), + + /* 1st_byte_of_user_ip4 == expected && */ + BPF_LDX_MEM(BPF_B, BPF_REG_7, BPF_REG_6, + offsetof(struct bpf_sock_addr, user_ip4)), + BPF_JMP_IMM(BPF_JNE, BPF_REG_7, ip4.u4_addr8[0], 10), + + /* 1st_half_of_user_ip4 == expected && */ + BPF_LDX_MEM(BPF_H, BPF_REG_7, BPF_REG_6, + offsetof(struct bpf_sock_addr, user_ip4)), + BPF_JMP_IMM(BPF_JNE, BPF_REG_7, ip4.u4_addr16[0], 8), + + /* whole_user_ip4 == expected) { */ + BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_6, + offsetof(struct bpf_sock_addr, user_ip4)), + BPF_LD_IMM64(BPF_REG_8, ip4.u4_addr32), /* See [2]. */ + BPF_JMP_REG(BPF_JNE, BPF_REG_7, BPF_REG_8, 4), + + /* user_ip4 = addr4_rw.sin_addr */ + BPF_MOV32_IMM(BPF_REG_7, addr4_rw.sin_addr.s_addr), + BPF_STX_MEM(BPF_W, BPF_REG_6, BPF_REG_7, + offsetof(struct bpf_sock_addr, user_ip4)), + + /* user_port = addr4_rw.sin_port */ + BPF_MOV32_IMM(BPF_REG_7, addr4_rw.sin_port), + BPF_STX_MEM(BPF_W, BPF_REG_6, BPF_REG_7, + offsetof(struct bpf_sock_addr, user_port)), + /* } */ + + /* return 1 */ + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_EXIT_INSN(), + }; + + return load_insns(attach_type, insns, + sizeof(insns) / sizeof(struct bpf_insn), comment); +} + +static int bind6_prog_load(enum bpf_attach_type attach_type, + const char *comment) +{ + struct sockaddr_in6 addr6_rw; + struct in6_addr ip6; + + if (inet_pton(AF_INET6, SERV6_IP, (void *)&ip6) != 1) { + log_err("Invalid IPv6: %s", SERV6_IP); + return -1; + } + + if (mk_sockaddr(AF_INET6, SERV6_REWRITE_IP, SERV6_REWRITE_PORT, + (struct sockaddr *)&addr6_rw, sizeof(addr6_rw)) == -1) + return -1; + + /* See [1]. */ + struct bpf_insn insns[] = { + BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), + + /* if (sk.family == AF_INET6 && */ + BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_6, + offsetof(struct bpf_sock_addr, family)), + BPF_JMP_IMM(BPF_JNE, BPF_REG_7, AF_INET6, 18), + + /* 5th_byte_of_user_ip6 == expected && */ + BPF_LDX_MEM(BPF_B, BPF_REG_7, BPF_REG_6, + offsetof(struct bpf_sock_addr, user_ip6[1])), + BPF_JMP_IMM(BPF_JNE, BPF_REG_7, ip6.s6_addr[4], 16), + + /* 3rd_half_of_user_ip6 == expected && */ + BPF_LDX_MEM(BPF_H, BPF_REG_7, BPF_REG_6, + offsetof(struct bpf_sock_addr, user_ip6[1])), + BPF_JMP_IMM(BPF_JNE, BPF_REG_7, ip6.s6_addr16[2], 14), + + /* last_word_of_user_ip6 == expected) { */ + BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_6, + offsetof(struct bpf_sock_addr, user_ip6[3])), + BPF_LD_IMM64(BPF_REG_8, ip6.s6_addr32[3]), /* See [2]. */ + BPF_JMP_REG(BPF_JNE, BPF_REG_7, BPF_REG_8, 10), + + +#define STORE_IPV6_WORD(N) \ + BPF_MOV32_IMM(BPF_REG_7, addr6_rw.sin6_addr.s6_addr32[N]), \ + BPF_STX_MEM(BPF_W, BPF_REG_6, BPF_REG_7, \ + offsetof(struct bpf_sock_addr, user_ip6[N])) + + /* user_ip6 = addr6_rw.sin6_addr */ + STORE_IPV6_WORD(0), + STORE_IPV6_WORD(1), + STORE_IPV6_WORD(2), + STORE_IPV6_WORD(3), + + /* user_port = addr6_rw.sin6_port */ + BPF_MOV32_IMM(BPF_REG_7, addr6_rw.sin6_port), + BPF_STX_MEM(BPF_W, BPF_REG_6, BPF_REG_7, + offsetof(struct bpf_sock_addr, user_port)), + + /* } */ + + /* return 1 */ + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_EXIT_INSN(), + }; + + return load_insns(attach_type, insns, + sizeof(insns) / sizeof(struct bpf_insn), comment); +} + +static int connect_prog_load_path(const char *path, + enum bpf_attach_type attach_type, + const char *comment) +{ + struct bpf_prog_load_attr attr; + struct bpf_object *obj; + int prog_fd; + + memset(&attr, 0, sizeof(struct bpf_prog_load_attr)); + attr.file = path; + attr.prog_type = BPF_PROG_TYPE_CGROUP_SOCK_ADDR; + attr.expected_attach_type = attach_type; + + if (bpf_prog_load_xattr(&attr, &obj, &prog_fd)) { + if (comment) + log_err(">>> Loading %s program at %s error.\n", + comment, path); + return -1; + } + + return prog_fd; +} + +static int connect4_prog_load(enum bpf_attach_type attach_type, + const char *comment) +{ + return connect_prog_load_path(CONNECT4_PROG_PATH, attach_type, comment); +} + +static int connect6_prog_load(enum bpf_attach_type attach_type, + const char *comment) +{ + return connect_prog_load_path(CONNECT6_PROG_PATH, attach_type, comment); +} + +static void print_ip_port(int sockfd, info_fn fn, const char *fmt) +{ + char addr_buf[INET_NTOP_BUF]; + struct sockaddr_storage addr; + struct sockaddr_in6 *addr6; + struct sockaddr_in *addr4; + socklen_t addr_len; + unsigned short port; + void *nip; + + addr_len = sizeof(struct sockaddr_storage); + memset(&addr, 0, addr_len); + + if (fn(sockfd, (struct sockaddr *)&addr, (socklen_t *)&addr_len) == 0) { + if (addr.ss_family == AF_INET) { + addr4 = (struct sockaddr_in *)&addr; + nip = (void *)&addr4->sin_addr; + port = ntohs(addr4->sin_port); + } else if (addr.ss_family == AF_INET6) { + addr6 = (struct sockaddr_in6 *)&addr; + nip = (void *)&addr6->sin6_addr; + port = ntohs(addr6->sin6_port); + } else { + return; + } + const char *addr_str = + inet_ntop(addr.ss_family, nip, addr_buf, INET_NTOP_BUF); + printf(fmt, addr_str ? addr_str : "??", port); + } +} + +static void print_local_ip_port(int sockfd, const char *fmt) +{ + print_ip_port(sockfd, getsockname, fmt); +} + +static void print_remote_ip_port(int sockfd, const char *fmt) +{ + print_ip_port(sockfd, getpeername, fmt); +} + +static int start_server(int type, const struct sockaddr_storage *addr, + socklen_t addr_len) +{ + + int fd; + + fd = socket(addr->ss_family, type, 0); + if (fd == -1) { + log_err("Failed to create server socket"); + goto out; + } + + if (bind(fd, (const struct sockaddr *)addr, addr_len) == -1) { + log_err("Failed to bind server socket"); + goto close_out; + } + + if (type == SOCK_STREAM) { + if (listen(fd, 128) == -1) { + log_err("Failed to listen on server socket"); + goto close_out; + } + } + + print_local_ip_port(fd, "\t Actual: bind(%s, %d)\n"); + + goto out; +close_out: + close(fd); + fd = -1; +out: + return fd; +} + +static int connect_to_server(int type, const struct sockaddr_storage *addr, + socklen_t addr_len) +{ + int domain; + int fd; + + domain = addr->ss_family; + + if (domain != AF_INET && domain != AF_INET6) { + log_err("Unsupported address family"); + return -1; + } + + fd = socket(domain, type, 0); + if (fd == -1) { + log_err("Failed to creating client socket"); + return -1; + } + + if (connect(fd, (const struct sockaddr *)addr, addr_len) == -1) { + log_err("Fail to connect to server"); + goto err; + } + + print_remote_ip_port(fd, "\t Actual: connect(%s, %d)"); + print_local_ip_port(fd, " from (%s, %d)\n"); + + return 0; +err: + close(fd); + return -1; +} + +static void print_test_case_num(int domain, int type) +{ + static int test_num; + + printf("Test case #%d (%s/%s):\n", ++test_num, + (domain == AF_INET ? "IPv4" : + domain == AF_INET6 ? "IPv6" : + "unknown_domain"), + (type == SOCK_STREAM ? "TCP" : + type == SOCK_DGRAM ? "UDP" : + "unknown_type")); +} + +static int run_test_case(int domain, int type, const char *ip, + unsigned short port) +{ + struct sockaddr_storage addr; + socklen_t addr_len = sizeof(addr); + int servfd = -1; + int err = 0; + + print_test_case_num(domain, type); + + if (mk_sockaddr(domain, ip, port, (struct sockaddr *)&addr, + addr_len) == -1) + return -1; + + printf("\tRequested: bind(%s, %d) ..\n", ip, port); + servfd = start_server(type, &addr, addr_len); + if (servfd == -1) + goto err; + + printf("\tRequested: connect(%s, %d) from (*, *) ..\n", ip, port); + if (connect_to_server(type, &addr, addr_len)) + goto err; + + goto out; +err: + err = -1; +out: + close(servfd); + return err; +} + +static void close_progs_fds(struct program *progs, size_t prog_cnt) +{ + size_t i; + + for (i = 0; i < prog_cnt; ++i) { + close(progs[i].fd); + progs[i].fd = -1; + } +} + +static int load_and_attach_progs(int cgfd, struct program *progs, + size_t prog_cnt) +{ + size_t i; + + for (i = 0; i < prog_cnt; ++i) { + printf("Load %s with invalid type (can pollute stderr) ", + progs[i].name); + fflush(stdout); + progs[i].fd = progs[i].loadfn(progs[i].invalid_type, NULL); + if (progs[i].fd != -1) { + log_err("Load with invalid type accepted for %s", + progs[i].name); + goto err; + } + printf("... REJECTED\n"); + + printf("Load %s with valid type", progs[i].name); + progs[i].fd = progs[i].loadfn(progs[i].type, progs[i].name); + if (progs[i].fd == -1) { + log_err("Failed to load program %s", progs[i].name); + goto err; + } + printf(" ... OK\n"); + + printf("Attach %s with invalid type", progs[i].name); + if (bpf_prog_attach(progs[i].fd, cgfd, progs[i].invalid_type, + BPF_F_ALLOW_OVERRIDE) != -1) { + log_err("Attach with invalid type accepted for %s", + progs[i].name); + goto err; + } + printf(" ... REJECTED\n"); + + printf("Attach %s with valid type", progs[i].name); + if (bpf_prog_attach(progs[i].fd, cgfd, progs[i].type, + BPF_F_ALLOW_OVERRIDE) == -1) { + log_err("Failed to attach program %s", progs[i].name); + goto err; + } + printf(" ... OK\n"); + } + + return 0; +err: + close_progs_fds(progs, prog_cnt); + return -1; +} + +static int run_domain_test(int domain, int cgfd, struct program *progs, + size_t prog_cnt, const char *ip, unsigned short port) +{ + int err = 0; + + if (load_and_attach_progs(cgfd, progs, prog_cnt) == -1) + goto err; + + if (run_test_case(domain, SOCK_STREAM, ip, port) == -1) + goto err; + + if (run_test_case(domain, SOCK_DGRAM, ip, port) == -1) + goto err; + + goto out; +err: + err = -1; +out: + close_progs_fds(progs, prog_cnt); + return err; +} + +static int run_test(void) +{ + size_t inet6_prog_cnt; + size_t inet_prog_cnt; + int cgfd = -1; + int err = 0; + + struct program inet6_progs[] = { + {BPF_CGROUP_INET6_BIND, bind6_prog_load, -1, "bind6", + BPF_CGROUP_INET4_BIND}, + {BPF_CGROUP_INET6_CONNECT, connect6_prog_load, -1, "connect6", + BPF_CGROUP_INET4_CONNECT}, + }; + inet6_prog_cnt = sizeof(inet6_progs) / sizeof(struct program); + + struct program inet_progs[] = { + {BPF_CGROUP_INET4_BIND, bind4_prog_load, -1, "bind4", + BPF_CGROUP_INET6_BIND}, + {BPF_CGROUP_INET4_CONNECT, connect4_prog_load, -1, "connect4", + BPF_CGROUP_INET6_CONNECT}, + }; + inet_prog_cnt = sizeof(inet_progs) / sizeof(struct program); + + if (setup_cgroup_environment()) + goto err; + + cgfd = create_and_get_cgroup(CG_PATH); + if (!cgfd) + goto err; + + if (join_cgroup(CG_PATH)) + goto err; + + if (run_domain_test(AF_INET, cgfd, inet_progs, inet_prog_cnt, SERV4_IP, + SERV4_PORT) == -1) + goto err; + + if (run_domain_test(AF_INET6, cgfd, inet6_progs, inet6_prog_cnt, + SERV6_IP, SERV6_PORT) == -1) + goto err; + + goto out; +err: + err = -1; +out: + close(cgfd); + cleanup_cgroup_environment(); + printf(err ? "### FAIL\n" : "### SUCCESS\n"); + return err; +} + +int main(int argc, char **argv) +{ + if (argc < 2) { + fprintf(stderr, + "%s has to be run via %s.sh. Skip direct run.\n", + argv[0], argv[0]); + exit(0); + } + return run_test(); +} diff --git a/tools/testing/selftests/bpf/test_sock_addr.sh b/tools/testing/selftests/bpf/test_sock_addr.sh new file mode 100755 index 000000000000..c6e1dcf992c4 --- /dev/null +++ b/tools/testing/selftests/bpf/test_sock_addr.sh @@ -0,0 +1,57 @@ +#!/bin/sh + +set -eu + +ping_once() +{ + ping -q -c 1 -W 1 ${1%%/*} >/dev/null 2>&1 +} + +wait_for_ip() +{ + local _i + echo -n "Wait for testing IPv4/IPv6 to become available " + for _i in $(seq ${MAX_PING_TRIES}); do + echo -n "." + if ping_once ${TEST_IPv4} && ping_once ${TEST_IPv6}; then + echo " OK" + return + fi + done + echo 1>&2 "ERROR: Timeout waiting for test IP to become available." + exit 1 +} + +setup() +{ + # Create testing interfaces not to interfere with current environment. + ip link add dev ${TEST_IF} type veth peer name ${TEST_IF_PEER} + ip link set ${TEST_IF} up + ip link set ${TEST_IF_PEER} up + + ip -4 addr add ${TEST_IPv4} dev ${TEST_IF} + ip -6 addr add ${TEST_IPv6} dev ${TEST_IF} + wait_for_ip +} + +cleanup() +{ + ip link del ${TEST_IF} 2>/dev/null || : + ip link del ${TEST_IF_PEER} 2>/dev/null || : +} + +main() +{ + trap cleanup EXIT 2 3 6 15 + setup + ./test_sock_addr setup_done +} + +BASENAME=$(basename $0 .sh) +TEST_IF="${BASENAME}1" +TEST_IF_PEER="${BASENAME}2" +TEST_IPv4="127.0.0.4/8" +TEST_IPv6="::6/128" +MAX_PING_TRIES=5 + +main diff --git a/tools/testing/selftests/bpf/test_stacktrace_build_id.c b/tools/testing/selftests/bpf/test_stacktrace_build_id.c new file mode 100644 index 000000000000..b755bd783ce5 --- /dev/null +++ b/tools/testing/selftests/bpf/test_stacktrace_build_id.c @@ -0,0 +1,60 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (c) 2018 Facebook + +#include <linux/bpf.h> +#include "bpf_helpers.h" + +#ifndef PERF_MAX_STACK_DEPTH +#define PERF_MAX_STACK_DEPTH 127 +#endif + +struct bpf_map_def SEC("maps") control_map = { + .type = BPF_MAP_TYPE_ARRAY, + .key_size = sizeof(__u32), + .value_size = sizeof(__u32), + .max_entries = 1, +}; + +struct bpf_map_def SEC("maps") stackid_hmap = { + .type = BPF_MAP_TYPE_HASH, + .key_size = sizeof(__u32), + .value_size = sizeof(__u32), + .max_entries = 10000, +}; + +struct bpf_map_def SEC("maps") stackmap = { + .type = BPF_MAP_TYPE_STACK_TRACE, + .key_size = sizeof(__u32), + .value_size = sizeof(struct bpf_stack_build_id) + * PERF_MAX_STACK_DEPTH, + .max_entries = 128, + .map_flags = BPF_F_STACK_BUILD_ID, +}; + +/* taken from /sys/kernel/debug/tracing/events/random/urandom_read/format */ +struct random_urandom_args { + unsigned long long pad; + int got_bits; + int pool_left; + int input_left; +}; + +SEC("tracepoint/random/urandom_read") +int oncpu(struct random_urandom_args *args) +{ + __u32 key = 0, val = 0, *value_p; + + value_p = bpf_map_lookup_elem(&control_map, &key); + if (value_p && *value_p) + return 0; /* skip if non-zero *value_p */ + + /* The size of stackmap and stackid_hmap should be the same */ + key = bpf_get_stackid(args, &stackmap, BPF_F_USER_STACK); + if ((int)key >= 0) + bpf_map_update_elem(&stackid_hmap, &key, &val, 0); + + return 0; +} + +char _license[] SEC("license") = "GPL"; +__u32 _version SEC("version") = 1; /* ignored by tracepoints, required by libbpf.a */ diff --git a/tools/testing/selftests/bpf/test_tag.c b/tools/testing/selftests/bpf/test_tag.c index 8b201895c569..6272c784ca2a 100644 --- a/tools/testing/selftests/bpf/test_tag.c +++ b/tools/testing/selftests/bpf/test_tag.c @@ -12,7 +12,6 @@ #include <assert.h> #include <sys/socket.h> -#include <sys/resource.h> #include <linux/filter.h> #include <linux/bpf.h> @@ -21,6 +20,7 @@ #include <bpf/bpf.h> #include "../../../include/linux/filter.h" +#include "bpf_rlimit.h" static struct bpf_insn prog[BPF_MAXINSNS]; @@ -184,11 +184,9 @@ static void do_test(uint32_t *tests, int start_insns, int fd_map, int main(void) { - struct rlimit rinf = { RLIM_INFINITY, RLIM_INFINITY }; uint32_t tests = 0; int i, fd_map; - setrlimit(RLIMIT_MEMLOCK, &rinf); fd_map = bpf_create_map(BPF_MAP_TYPE_HASH, sizeof(int), sizeof(int), 1, BPF_F_NO_PREALLOC); assert(fd_map > 0); diff --git a/tools/testing/selftests/bpf/test_tcpbpf_user.c b/tools/testing/selftests/bpf/test_tcpbpf_user.c index 95a370f3d378..84ab5163c828 100644 --- a/tools/testing/selftests/bpf/test_tcpbpf_user.c +++ b/tools/testing/selftests/bpf/test_tcpbpf_user.c @@ -11,12 +11,14 @@ #include <linux/ptrace.h> #include <linux/bpf.h> #include <sys/ioctl.h> +#include <sys/time.h> #include <sys/types.h> #include <sys/stat.h> #include <fcntl.h> #include <bpf/bpf.h> #include <bpf/libbpf.h> #include "bpf_util.h" +#include "bpf_rlimit.h" #include <linux/perf_event.h> #include "test_tcpbpf.h" diff --git a/tools/testing/selftests/bpf/test_verifier.c b/tools/testing/selftests/bpf/test_verifier.c index 437c0b1c9d21..3e7718b1a9ae 100644 --- a/tools/testing/selftests/bpf/test_verifier.c +++ b/tools/testing/selftests/bpf/test_verifier.c @@ -24,7 +24,6 @@ #include <limits.h> #include <sys/capability.h> -#include <sys/resource.h> #include <linux/unistd.h> #include <linux/filter.h> @@ -41,7 +40,7 @@ # define CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS 1 # endif #endif - +#include "bpf_rlimit.h" #include "../../../include/linux/filter.h" #ifndef ARRAY_SIZE @@ -57,6 +56,9 @@ #define F_NEEDS_EFFICIENT_UNALIGNED_ACCESS (1 << 0) #define F_LOAD_WITH_STRICT_ALIGNMENT (1 << 1) +#define UNPRIV_SYSCTL "kernel/unprivileged_bpf_disabled" +static bool unpriv_disabled = false; + struct bpf_test { const char *descr; struct bpf_insn insns[MAX_INSNS]; @@ -1595,6 +1597,60 @@ static struct bpf_test tests[] = { .prog_type = BPF_PROG_TYPE_SK_SKB, }, { + "direct packet read for SK_MSG", + .insns = { + BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, + offsetof(struct sk_msg_md, data)), + BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1, + offsetof(struct sk_msg_md, data_end)), + BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), + BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_SK_MSG, + }, + { + "direct packet write for SK_MSG", + .insns = { + BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, + offsetof(struct sk_msg_md, data)), + BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1, + offsetof(struct sk_msg_md, data_end)), + BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), + BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1), + BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_SK_MSG, + }, + { + "overlapping checks for direct packet access SK_MSG", + .insns = { + BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, + offsetof(struct sk_msg_md, data)), + BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1, + offsetof(struct sk_msg_md, data_end)), + BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), + BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 4), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6), + BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1), + BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_2, 6), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_SK_MSG, + }, + { "check skb->mark is not writeable by sockets", .insns = { BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1, @@ -2587,17 +2643,74 @@ static struct bpf_test tests[] = { .result = ACCEPT, }, { + "runtime/jit: tail_call within bounds, prog once", + .insns = { + BPF_MOV64_IMM(BPF_REG_3, 0), + BPF_LD_MAP_FD(BPF_REG_2, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, + BPF_FUNC_tail_call), + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_EXIT_INSN(), + }, + .fixup_prog = { 1 }, + .result = ACCEPT, + .retval = 42, + }, + { + "runtime/jit: tail_call within bounds, prog loop", + .insns = { + BPF_MOV64_IMM(BPF_REG_3, 1), + BPF_LD_MAP_FD(BPF_REG_2, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, + BPF_FUNC_tail_call), + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_EXIT_INSN(), + }, + .fixup_prog = { 1 }, + .result = ACCEPT, + .retval = 41, + }, + { + "runtime/jit: tail_call within bounds, no prog", + .insns = { + BPF_MOV64_IMM(BPF_REG_3, 2), + BPF_LD_MAP_FD(BPF_REG_2, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, + BPF_FUNC_tail_call), + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_EXIT_INSN(), + }, + .fixup_prog = { 1 }, + .result = ACCEPT, + .retval = 1, + }, + { + "runtime/jit: tail_call out of bounds", + .insns = { + BPF_MOV64_IMM(BPF_REG_3, 256), + BPF_LD_MAP_FD(BPF_REG_2, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, + BPF_FUNC_tail_call), + BPF_MOV64_IMM(BPF_REG_0, 2), + BPF_EXIT_INSN(), + }, + .fixup_prog = { 1 }, + .result = ACCEPT, + .retval = 2, + }, + { "runtime/jit: pass negative index to tail_call", .insns = { BPF_MOV64_IMM(BPF_REG_3, -1), BPF_LD_MAP_FD(BPF_REG_2, 0), BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_tail_call), - BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_MOV64_IMM(BPF_REG_0, 2), BPF_EXIT_INSN(), }, .fixup_prog = { 1 }, .result = ACCEPT, + .retval = 2, }, { "runtime/jit: pass > 32bit index to tail_call", @@ -2606,11 +2719,12 @@ static struct bpf_test tests[] = { BPF_LD_MAP_FD(BPF_REG_2, 0), BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_tail_call), - BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_MOV64_IMM(BPF_REG_0, 2), BPF_EXIT_INSN(), }, .fixup_prog = { 2 }, .result = ACCEPT, + .retval = 42, }, { "stack pointer arithmetic", @@ -11164,6 +11278,94 @@ static struct bpf_test tests[] = { .prog_type = BPF_PROG_TYPE_TRACEPOINT, }, { + "jit: lsh, rsh, arsh by 1", + .insns = { + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_MOV64_IMM(BPF_REG_1, 0xff), + BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 1), + BPF_ALU32_IMM(BPF_LSH, BPF_REG_1, 1), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0x3fc, 1), + BPF_EXIT_INSN(), + BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 1), + BPF_ALU32_IMM(BPF_RSH, BPF_REG_1, 1), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0xff, 1), + BPF_EXIT_INSN(), + BPF_ALU64_IMM(BPF_ARSH, BPF_REG_1, 1), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0x7f, 1), + BPF_EXIT_INSN(), + BPF_MOV64_IMM(BPF_REG_0, 2), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .retval = 2, + }, + { + "jit: mov32 for ldimm64, 1", + .insns = { + BPF_MOV64_IMM(BPF_REG_0, 2), + BPF_LD_IMM64(BPF_REG_1, 0xfeffffffffffffffULL), + BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 32), + BPF_LD_IMM64(BPF_REG_2, 0xfeffffffULL), + BPF_JMP_REG(BPF_JEQ, BPF_REG_1, BPF_REG_2, 1), + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .retval = 2, + }, + { + "jit: mov32 for ldimm64, 2", + .insns = { + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_LD_IMM64(BPF_REG_1, 0x1ffffffffULL), + BPF_LD_IMM64(BPF_REG_2, 0xffffffffULL), + BPF_JMP_REG(BPF_JEQ, BPF_REG_1, BPF_REG_2, 1), + BPF_MOV64_IMM(BPF_REG_0, 2), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .retval = 2, + }, + { + "jit: various mul tests", + .insns = { + BPF_LD_IMM64(BPF_REG_2, 0xeeff0d413122ULL), + BPF_LD_IMM64(BPF_REG_0, 0xfefefeULL), + BPF_LD_IMM64(BPF_REG_1, 0xefefefULL), + BPF_ALU64_REG(BPF_MUL, BPF_REG_0, BPF_REG_1), + BPF_JMP_REG(BPF_JEQ, BPF_REG_0, BPF_REG_2, 2), + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_EXIT_INSN(), + BPF_LD_IMM64(BPF_REG_3, 0xfefefeULL), + BPF_ALU64_REG(BPF_MUL, BPF_REG_3, BPF_REG_1), + BPF_JMP_REG(BPF_JEQ, BPF_REG_3, BPF_REG_2, 2), + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_EXIT_INSN(), + BPF_MOV32_REG(BPF_REG_2, BPF_REG_2), + BPF_LD_IMM64(BPF_REG_0, 0xfefefeULL), + BPF_ALU32_REG(BPF_MUL, BPF_REG_0, BPF_REG_1), + BPF_JMP_REG(BPF_JEQ, BPF_REG_0, BPF_REG_2, 2), + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_EXIT_INSN(), + BPF_LD_IMM64(BPF_REG_3, 0xfefefeULL), + BPF_ALU32_REG(BPF_MUL, BPF_REG_3, BPF_REG_1), + BPF_JMP_REG(BPF_JEQ, BPF_REG_3, BPF_REG_2, 2), + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_EXIT_INSN(), + BPF_LD_IMM64(BPF_REG_0, 0x952a7bbcULL), + BPF_LD_IMM64(BPF_REG_1, 0xfefefeULL), + BPF_LD_IMM64(BPF_REG_2, 0xeeff0d413122ULL), + BPF_ALU32_REG(BPF_MUL, BPF_REG_2, BPF_REG_1), + BPF_JMP_REG(BPF_JEQ, BPF_REG_2, BPF_REG_0, 2), + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_EXIT_INSN(), + BPF_MOV64_IMM(BPF_REG_0, 2), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .retval = 2, + }, + { "xadd/w check unaligned stack", .insns = { BPF_MOV64_IMM(BPF_REG_0, 1), @@ -11245,16 +11447,61 @@ static int create_map(uint32_t size_value, uint32_t max_elem) return fd; } +static int create_prog_dummy1(void) +{ + struct bpf_insn prog[] = { + BPF_MOV64_IMM(BPF_REG_0, 42), + BPF_EXIT_INSN(), + }; + + return bpf_load_program(BPF_PROG_TYPE_SOCKET_FILTER, prog, + ARRAY_SIZE(prog), "GPL", 0, NULL, 0); +} + +static int create_prog_dummy2(int mfd, int idx) +{ + struct bpf_insn prog[] = { + BPF_MOV64_IMM(BPF_REG_3, idx), + BPF_LD_MAP_FD(BPF_REG_2, mfd), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, + BPF_FUNC_tail_call), + BPF_MOV64_IMM(BPF_REG_0, 41), + BPF_EXIT_INSN(), + }; + + return bpf_load_program(BPF_PROG_TYPE_SOCKET_FILTER, prog, + ARRAY_SIZE(prog), "GPL", 0, NULL, 0); +} + static int create_prog_array(void) { - int fd; + int p1key = 0, p2key = 1; + int mfd, p1fd, p2fd; - fd = bpf_create_map(BPF_MAP_TYPE_PROG_ARRAY, sizeof(int), - sizeof(int), 4, 0); - if (fd < 0) + mfd = bpf_create_map(BPF_MAP_TYPE_PROG_ARRAY, sizeof(int), + sizeof(int), 4, 0); + if (mfd < 0) { printf("Failed to create prog array '%s'!\n", strerror(errno)); + return -1; + } - return fd; + p1fd = create_prog_dummy1(); + p2fd = create_prog_dummy2(mfd, p2key); + if (p1fd < 0 || p2fd < 0) + goto out; + if (bpf_map_update_elem(mfd, &p1key, &p1fd, BPF_ANY) < 0) + goto out; + if (bpf_map_update_elem(mfd, &p2key, &p2fd, BPF_ANY) < 0) + goto out; + close(p2fd); + close(p1fd); + + return mfd; +out: + close(p2fd); + close(p1fd); + close(mfd); + return -1; } static int create_map_in_map(void) @@ -11375,7 +11622,8 @@ static void do_test_single(struct bpf_test *test, bool unpriv, goto fail_log; } if (!strstr(bpf_vlog, expected_err) && !reject_from_alignment) { - printf("FAIL\nUnexpected error message!\n"); + printf("FAIL\nUnexpected error message!\n\tEXP: %s\n\tRES: %s\n", + expected_err, bpf_vlog); goto fail_log; } } @@ -11459,9 +11707,20 @@ out: return ret; } +static void get_unpriv_disabled() +{ + char buf[2]; + FILE *fd; + + fd = fopen("/proc/sys/"UNPRIV_SYSCTL, "r"); + if (fgets(buf, 2, fd) == buf && atoi(buf)) + unpriv_disabled = true; + fclose(fd); +} + static int do_test(bool unpriv, unsigned int from, unsigned int to) { - int i, passes = 0, errors = 0; + int i, passes = 0, errors = 0, skips = 0; for (i = from; i < to; i++) { struct bpf_test *test = &tests[i]; @@ -11469,7 +11728,10 @@ static int do_test(bool unpriv, unsigned int from, unsigned int to) /* Program types that are not supported by non-root we * skip right away. */ - if (!test->prog_type) { + if (!test->prog_type && unpriv_disabled) { + printf("#%d/u %s SKIP\n", i, test->descr); + skips++; + } else if (!test->prog_type) { if (!unpriv) set_admin(false); printf("#%d/u %s ", i, test->descr); @@ -11478,20 +11740,22 @@ static int do_test(bool unpriv, unsigned int from, unsigned int to) set_admin(true); } - if (!unpriv) { + if (unpriv) { + printf("#%d/p %s SKIP\n", i, test->descr); + skips++; + } else { printf("#%d/p %s ", i, test->descr); do_test_single(test, false, &passes, &errors); } } - printf("Summary: %d PASSED, %d FAILED\n", passes, errors); + printf("Summary: %d PASSED, %d SKIPPED, %d FAILED\n", passes, + skips, errors); return errors ? EXIT_FAILURE : EXIT_SUCCESS; } int main(int argc, char **argv) { - struct rlimit rinf = { RLIM_INFINITY, RLIM_INFINITY }; - struct rlimit rlim = { 1 << 20, 1 << 20 }; unsigned int from = 0, to = ARRAY_SIZE(tests); bool unpriv = !is_admin(); @@ -11512,6 +11776,12 @@ int main(int argc, char **argv) } } - setrlimit(RLIMIT_MEMLOCK, unpriv ? &rlim : &rinf); + get_unpriv_disabled(); + if (unpriv && unpriv_disabled) { + printf("Cannot run as unprivileged user with sysctl %s.\n", + UNPRIV_SYSCTL); + return EXIT_FAILURE; + } + return do_test(unpriv, from, to); } diff --git a/tools/testing/selftests/bpf/test_verifier_log.c b/tools/testing/selftests/bpf/test_verifier_log.c index e9626cf5607a..8d6918c3b4a2 100644 --- a/tools/testing/selftests/bpf/test_verifier_log.c +++ b/tools/testing/selftests/bpf/test_verifier_log.c @@ -4,7 +4,6 @@ #include <string.h> #include <unistd.h> #include <sys/time.h> -#include <sys/resource.h> #include <linux/bpf.h> #include <linux/filter.h> @@ -12,6 +11,8 @@ #include <bpf/bpf.h> +#include "bpf_rlimit.h" + #define LOG_SIZE (1 << 20) #define err(str...) printf("ERROR: " str) @@ -133,16 +134,11 @@ static void test_log_bad(char *log, size_t log_len, int log_level) int main(int argc, char **argv) { - struct rlimit limit = { RLIM_INFINITY, RLIM_INFINITY }; char full_log[LOG_SIZE]; char log[LOG_SIZE]; size_t want_len; int i; - /* allow unlimited locked memory to have more consistent error code */ - if (setrlimit(RLIMIT_MEMLOCK, &limit) < 0) - perror("Unable to lift memlock rlimit"); - memset(log, 1, LOG_SIZE); /* Test incorrect attr */ diff --git a/tools/testing/selftests/bpf/urandom_read.c b/tools/testing/selftests/bpf/urandom_read.c new file mode 100644 index 000000000000..4acfdebf36fa --- /dev/null +++ b/tools/testing/selftests/bpf/urandom_read.c @@ -0,0 +1,22 @@ +#include <stdio.h> +#include <unistd.h> +#include <sys/types.h> +#include <sys/stat.h> +#include <fcntl.h> +#include <stdlib.h> + +#define BUF_SIZE 256 +int main(void) +{ + int fd = open("/dev/urandom", O_RDONLY); + int i; + char buf[BUF_SIZE]; + + if (fd < 0) + return 1; + for (i = 0; i < 4; ++i) + read(fd, buf, BUF_SIZE); + + close(fd); + return 0; +} |