summaryrefslogtreecommitdiffstats
path: root/arch/x86/net
diff options
context:
space:
mode:
authorAlexei Starovoitov <ast@kernel.org>2020-03-10 17:39:06 -0700
committerDaniel Borkmann <daniel@iogearbox.net>2020-03-11 14:07:32 +0100
commit13fac1d851e09109096b5862bf37c3da6908fb48 (patch)
tree927815f0128976e44e7f0435f52b91dd348ca4ea /arch/x86/net
parentbabf3164095b0670435910340c2a1eec37757b57 (diff)
downloadlinux-13fac1d851e09109096b5862bf37c3da6908fb48.tar.bz2
bpf: Fix trampoline generation for fmod_ret programs
fmod_ret progs are emitted as: start = __bpf_prog_enter(); call fmod_ret *(u64 *)(rbp - 8) = rax __bpf_prog_exit(, start); test eax, eax jne do_fexit That 'test eax, eax' is working by accident. The compiler is free to use rax inside __bpf_prog_exit() or inside functions that __bpf_prog_exit() is calling. Which caused "test_progs -t modify_return" to sporadically fail depending on compiler version and kconfig. Fix it by using 'cmp [rbp - 8], 0' instead of 'test eax, eax'. Fixes: ae24082331d9 ("bpf: Introduce BPF_MODIFY_RETURN") Reported-by: Andrii Nakryiko <andriin@fb.com> Signed-off-by: Alexei Starovoitov <ast@kernel.org> Signed-off-by: Daniel Borkmann <daniel@iogearbox.net> Acked-by: Andrii Nakryiko <andriin@fb.com> Acked-by: KP Singh <kpsingh@google.com> Link: https://lore.kernel.org/bpf/20200311003906.3643037-1-ast@kernel.org
Diffstat (limited to 'arch/x86/net')
-rw-r--r--arch/x86/net/bpf_jit_comp.c31
1 files changed, 5 insertions, 26 deletions
diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
index b1fd000feb89..5ea7c2cf7ab4 100644
--- a/arch/x86/net/bpf_jit_comp.c
+++ b/arch/x86/net/bpf_jit_comp.c
@@ -1449,23 +1449,6 @@ static int emit_cond_near_jump(u8 **pprog, void *func, void *ip, u8 jmp_cond)
return 0;
}
-static int emit_mod_ret_check_imm8(u8 **pprog, int value)
-{
- u8 *prog = *pprog;
- int cnt = 0;
-
- if (!is_imm8(value))
- return -EINVAL;
-
- if (value == 0)
- EMIT2(0x85, add_2reg(0xC0, BPF_REG_0, BPF_REG_0));
- else
- EMIT3(0x83, add_1reg(0xF8, BPF_REG_0), value);
-
- *pprog = prog;
- return 0;
-}
-
static int invoke_bpf(const struct btf_func_model *m, u8 **pprog,
struct bpf_tramp_progs *tp, int stack_size)
{
@@ -1485,7 +1468,7 @@ static int invoke_bpf_mod_ret(const struct btf_func_model *m, u8 **pprog,
u8 **branches)
{
u8 *prog = *pprog;
- int i;
+ int i, cnt = 0;
/* The first fmod_ret program will receive a garbage return value.
* Set this to 0 to avoid confusing the program.
@@ -1496,16 +1479,12 @@ static int invoke_bpf_mod_ret(const struct btf_func_model *m, u8 **pprog,
if (invoke_bpf_prog(m, &prog, tp->progs[i], stack_size, true))
return -EINVAL;
- /* Generate a branch:
- *
- * if (ret != 0)
+ /* mod_ret prog stored return value into [rbp - 8]. Emit:
+ * if (*(u64 *)(rbp - 8) != 0)
* goto do_fexit;
- *
- * If needed this can be extended to any integer value which can
- * be passed by user-space when the program is loaded.
*/
- if (emit_mod_ret_check_imm8(&prog, 0))
- return -EINVAL;
+ /* cmp QWORD PTR [rbp - 0x8], 0x0 */
+ EMIT4(0x48, 0x83, 0x7d, 0xf8); EMIT1(0x00);
/* Save the location of the branch and Generate 6 nops
* (4 bytes for an offset and 2 bytes for the jump) These nops