diff options
author | David S. Miller <davem@davemloft.net> | 2019-12-11 20:13:45 -0800 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2019-12-11 20:13:45 -0800 |
commit | 148709bc278a2e49ff7b10cee2d8cdc33b894598 (patch) | |
tree | c27a039d9886cae1572f81d80e35af60bf9cd740 | |
parent | 24dee0c7478d1a1e00abdf5625b7f921467325dc (diff) | |
parent | fe3300897cbfd76c6cb825776e5ac0ca50a91ca4 (diff) | |
download | linux-148709bc278a2e49ff7b10cee2d8cdc33b894598.tar.bz2 |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf
Alexei Starovoitov says:
====================
pull-request: bpf 2019-12-11
The following pull-request contains BPF updates for your *net* tree.
We've added 8 non-merge commits during the last 1 day(s) which contain
a total of 10 files changed, 126 insertions(+), 18 deletions(-).
The main changes are:
1) Make BPF trampoline co-exist with ftrace-based tracers, from Alexei.
2) Fix build in minimal configurations, from Arnd.
3) Fix mips, riscv bpf_tail_call limit, from Paul.
4) Fix bpftool segfault, from Toke.
5) Fix samples/bpf, from Daniel.
====================
Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r-- | arch/mips/net/ebpf_jit.c | 9 | ||||
-rw-r--r-- | arch/riscv/net/bpf_jit_comp.c | 4 | ||||
-rw-r--r-- | include/linux/bpf.h | 1 | ||||
-rw-r--r-- | kernel/bpf/btf.c | 1 | ||||
-rw-r--r-- | kernel/bpf/trampoline.c | 64 | ||||
-rw-r--r-- | samples/bpf/syscall_tp_kern.c | 18 | ||||
-rw-r--r-- | samples/bpf/trace_event_user.c | 4 | ||||
-rw-r--r-- | tools/bpf/bpftool/prog.c | 2 | ||||
-rw-r--r-- | tools/bpf/bpftool/xlated_dumper.c | 2 | ||||
-rwxr-xr-x | tools/testing/selftests/bpf/test_ftrace.sh | 39 |
10 files changed, 126 insertions, 18 deletions
diff --git a/arch/mips/net/ebpf_jit.c b/arch/mips/net/ebpf_jit.c index 46b76751f3a5..3ec69d9cbe88 100644 --- a/arch/mips/net/ebpf_jit.c +++ b/arch/mips/net/ebpf_jit.c @@ -604,6 +604,7 @@ static void emit_const_to_reg(struct jit_ctx *ctx, int dst, u64 value) static int emit_bpf_tail_call(struct jit_ctx *ctx, int this_idx) { int off, b_off; + int tcc_reg; ctx->flags |= EBPF_SEEN_TC; /* @@ -616,14 +617,14 @@ static int emit_bpf_tail_call(struct jit_ctx *ctx, int this_idx) b_off = b_imm(this_idx + 1, ctx); emit_instr(ctx, bne, MIPS_R_AT, MIPS_R_ZERO, b_off); /* - * if (--TCC < 0) + * if (TCC-- < 0) * goto out; */ /* Delay slot */ - emit_instr(ctx, daddiu, MIPS_R_T5, - (ctx->flags & EBPF_TCC_IN_V1) ? MIPS_R_V1 : MIPS_R_S4, -1); + tcc_reg = (ctx->flags & EBPF_TCC_IN_V1) ? MIPS_R_V1 : MIPS_R_S4; + emit_instr(ctx, daddiu, MIPS_R_T5, tcc_reg, -1); b_off = b_imm(this_idx + 1, ctx); - emit_instr(ctx, bltz, MIPS_R_T5, b_off); + emit_instr(ctx, bltz, tcc_reg, b_off); /* * prog = array->ptrs[index]; * if (prog == NULL) diff --git a/arch/riscv/net/bpf_jit_comp.c b/arch/riscv/net/bpf_jit_comp.c index 5451ef3845f2..7fbf56aab661 100644 --- a/arch/riscv/net/bpf_jit_comp.c +++ b/arch/riscv/net/bpf_jit_comp.c @@ -631,14 +631,14 @@ static int emit_bpf_tail_call(int insn, struct rv_jit_context *ctx) return -1; emit(rv_bgeu(RV_REG_A2, RV_REG_T1, off >> 1), ctx); - /* if (--TCC < 0) + /* if (TCC-- < 0) * goto out; */ emit(rv_addi(RV_REG_T1, tcc, -1), ctx); off = (tc_ninsn - (ctx->ninsns - start_insn)) << 2; if (is_13b_check(off, insn)) return -1; - emit(rv_blt(RV_REG_T1, RV_REG_ZERO, off >> 1), ctx); + emit(rv_blt(tcc, RV_REG_ZERO, off >> 1), ctx); /* prog = array->ptrs[index]; * if (!prog) diff --git a/include/linux/bpf.h b/include/linux/bpf.h index 35903f148be5..ac7de5291509 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -461,6 +461,7 @@ struct bpf_trampoline { struct { struct btf_func_model model; void *addr; + bool ftrace_managed; } func; /* list of BPF programs using this trampoline */ struct hlist_head progs_hlist[BPF_TRAMP_MAX]; diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c index 7d40da240891..ed2075884724 100644 --- a/kernel/bpf/btf.c +++ b/kernel/bpf/btf.c @@ -3470,6 +3470,7 @@ static u8 bpf_ctx_convert_map[] = { [_id] = __ctx_convert##_id, #include <linux/bpf_types.h> #undef BPF_PROG_TYPE + 0, /* avoid empty array */ }; #undef BPF_MAP_TYPE diff --git a/kernel/bpf/trampoline.c b/kernel/bpf/trampoline.c index 7e89f1f49d77..23b0d5cfd47e 100644 --- a/kernel/bpf/trampoline.c +++ b/kernel/bpf/trampoline.c @@ -3,6 +3,7 @@ #include <linux/hash.h> #include <linux/bpf.h> #include <linux/filter.h> +#include <linux/ftrace.h> /* btf_vmlinux has ~22k attachable functions. 1k htab is enough. */ #define TRAMPOLINE_HASH_BITS 10 @@ -59,6 +60,60 @@ out: return tr; } +static int is_ftrace_location(void *ip) +{ + long addr; + + addr = ftrace_location((long)ip); + if (!addr) + return 0; + if (WARN_ON_ONCE(addr != (long)ip)) + return -EFAULT; + return 1; +} + +static int unregister_fentry(struct bpf_trampoline *tr, void *old_addr) +{ + void *ip = tr->func.addr; + int ret; + + if (tr->func.ftrace_managed) + ret = unregister_ftrace_direct((long)ip, (long)old_addr); + else + ret = bpf_arch_text_poke(ip, BPF_MOD_CALL, old_addr, NULL); + return ret; +} + +static int modify_fentry(struct bpf_trampoline *tr, void *old_addr, void *new_addr) +{ + void *ip = tr->func.addr; + int ret; + + if (tr->func.ftrace_managed) + ret = modify_ftrace_direct((long)ip, (long)old_addr, (long)new_addr); + else + ret = bpf_arch_text_poke(ip, BPF_MOD_CALL, old_addr, new_addr); + return ret; +} + +/* first time registering */ +static int register_fentry(struct bpf_trampoline *tr, void *new_addr) +{ + void *ip = tr->func.addr; + int ret; + + ret = is_ftrace_location(ip); + if (ret < 0) + return ret; + tr->func.ftrace_managed = ret; + + if (tr->func.ftrace_managed) + ret = register_ftrace_direct((long)ip, (long)new_addr); + else + ret = bpf_arch_text_poke(ip, BPF_MOD_CALL, NULL, new_addr); + return ret; +} + /* Each call __bpf_prog_enter + call bpf_func + call __bpf_prog_exit is ~50 * bytes on x86. Pick a number to fit into PAGE_SIZE / 2 */ @@ -77,8 +132,7 @@ static int bpf_trampoline_update(struct bpf_trampoline *tr) int err; if (fentry_cnt + fexit_cnt == 0) { - err = bpf_arch_text_poke(tr->func.addr, BPF_MOD_CALL, - old_image, NULL); + err = unregister_fentry(tr, old_image); tr->selector = 0; goto out; } @@ -105,12 +159,10 @@ static int bpf_trampoline_update(struct bpf_trampoline *tr) if (tr->selector) /* progs already running at this address */ - err = bpf_arch_text_poke(tr->func.addr, BPF_MOD_CALL, - old_image, new_image); + err = modify_fentry(tr, old_image, new_image); else /* first time registering */ - err = bpf_arch_text_poke(tr->func.addr, BPF_MOD_CALL, NULL, - new_image); + err = register_fentry(tr, new_image); if (err) goto out; tr->selector++; diff --git a/samples/bpf/syscall_tp_kern.c b/samples/bpf/syscall_tp_kern.c index 1d78819ffef1..630ce8c4d5a2 100644 --- a/samples/bpf/syscall_tp_kern.c +++ b/samples/bpf/syscall_tp_kern.c @@ -47,13 +47,27 @@ static __always_inline void count(void *map) SEC("tracepoint/syscalls/sys_enter_open") int trace_enter_open(struct syscalls_enter_open_args *ctx) { - count((void *)&enter_open_map); + count(&enter_open_map); + return 0; +} + +SEC("tracepoint/syscalls/sys_enter_openat") +int trace_enter_open_at(struct syscalls_enter_open_args *ctx) +{ + count(&enter_open_map); return 0; } SEC("tracepoint/syscalls/sys_exit_open") int trace_enter_exit(struct syscalls_exit_open_args *ctx) { - count((void *)&exit_open_map); + count(&exit_open_map); + return 0; +} + +SEC("tracepoint/syscalls/sys_exit_openat") +int trace_enter_exit_at(struct syscalls_exit_open_args *ctx) +{ + count(&exit_open_map); return 0; } diff --git a/samples/bpf/trace_event_user.c b/samples/bpf/trace_event_user.c index 16a16eadd509..749a50f2f9f3 100644 --- a/samples/bpf/trace_event_user.c +++ b/samples/bpf/trace_event_user.c @@ -37,9 +37,9 @@ static void print_ksym(__u64 addr) } printf("%s;", sym->name); - if (!strcmp(sym->name, "sys_read")) + if (!strstr(sym->name, "sys_read")) sys_read_seen = true; - else if (!strcmp(sym->name, "sys_write")) + else if (!strstr(sym->name, "sys_write")) sys_write_seen = true; } diff --git a/tools/bpf/bpftool/prog.c b/tools/bpf/bpftool/prog.c index 4535c863d2cd..2ce9c5ba1934 100644 --- a/tools/bpf/bpftool/prog.c +++ b/tools/bpf/bpftool/prog.c @@ -493,7 +493,7 @@ static int do_dump(int argc, char **argv) info = &info_linear->info; if (mode == DUMP_JITED) { - if (info->jited_prog_len == 0) { + if (info->jited_prog_len == 0 || !info->jited_prog_insns) { p_info("no instructions returned"); goto err_free; } diff --git a/tools/bpf/bpftool/xlated_dumper.c b/tools/bpf/bpftool/xlated_dumper.c index 494d7ae3614d..5b91ee65a080 100644 --- a/tools/bpf/bpftool/xlated_dumper.c +++ b/tools/bpf/bpftool/xlated_dumper.c @@ -174,7 +174,7 @@ static const char *print_call(void *private_data, struct kernel_sym *sym; if (insn->src_reg == BPF_PSEUDO_CALL && - (__u32) insn->imm < dd->nr_jited_ksyms) + (__u32) insn->imm < dd->nr_jited_ksyms && dd->jited_ksyms) address = dd->jited_ksyms[insn->imm]; sym = kernel_syms_search(dd, address); diff --git a/tools/testing/selftests/bpf/test_ftrace.sh b/tools/testing/selftests/bpf/test_ftrace.sh new file mode 100755 index 000000000000..20de7bb873bc --- /dev/null +++ b/tools/testing/selftests/bpf/test_ftrace.sh @@ -0,0 +1,39 @@ +#!/bin/bash + +TR=/sys/kernel/debug/tracing/ +clear_trace() { # reset trace output + echo > $TR/trace +} + +disable_tracing() { # stop trace recording + echo 0 > $TR/tracing_on +} + +enable_tracing() { # start trace recording + echo 1 > $TR/tracing_on +} + +reset_tracer() { # reset the current tracer + echo nop > $TR/current_tracer +} + +disable_tracing +clear_trace + +echo "" > $TR/set_ftrace_filter +echo '*printk* *console* *wake* *serial* *lock*' > $TR/set_ftrace_notrace + +echo "bpf_prog_test*" > $TR/set_graph_function +echo "" > $TR/set_graph_notrace + +echo function_graph > $TR/current_tracer + +enable_tracing +./test_progs -t fentry +./test_progs -t fexit +disable_tracing +clear_trace + +reset_tracer + +exit 0 |