summaryrefslogtreecommitdiffstats
path: root/net/core
diff options
context:
space:
mode:
authorDaniel Borkmann <daniel@iogearbox.net>2018-07-10 00:43:22 +0200
committerAlexei Starovoitov <ast@kernel.org>2018-07-10 08:15:30 -0700
commit59ee4129a279070d8e2f9dc1660330f6593c7808 (patch)
treeae2b186ce2f62280bbd74b91a72a9166f8c34d39 /net/core
parentb9626f45abccd044f8048269c67720f0719f2d4e (diff)
downloadlinux-59ee4129a279070d8e2f9dc1660330f6593c7808.tar.bz2
bpf: fix ldx in ld_abs rewrite for large offsets
Mark reported that syzkaller triggered a KASAN detected slab-out-of-bounds bug in ___bpf_prog_run() with a BPF_LD | BPF_ABS word load at offset 0x8001. After further investigation it became clear that the issue was the BPF_LDX_MEM() which takes offset as an argument whereas it cannot encode larger than S16_MAX offsets into it. For this synthetical case we need to move the full address into tmp register instead and do the LDX without immediate value. Fixes: e0cea7ce988c ("bpf: implement ld_abs/ld_ind in native bpf") Reported-by: syzbot <syzkaller@googlegroups.com> Reported-by: Mark Rutland <mark.rutland@arm.com> Signed-off-by: Daniel Borkmann <daniel@iogearbox.net> Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Diffstat (limited to 'net/core')
-rw-r--r--net/core/filter.c16
1 files changed, 13 insertions, 3 deletions
diff --git a/net/core/filter.c b/net/core/filter.c
index 5fa66a33927f..a13f5b1f1636 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -459,11 +459,21 @@ static bool convert_bpf_ld_abs(struct sock_filter *fp, struct bpf_insn **insnp)
(!unaligned_ok && offset >= 0 &&
offset + ip_align >= 0 &&
offset + ip_align % size == 0))) {
+ bool ldx_off_ok = offset <= S16_MAX;
+
*insn++ = BPF_MOV64_REG(BPF_REG_TMP, BPF_REG_H);
*insn++ = BPF_ALU64_IMM(BPF_SUB, BPF_REG_TMP, offset);
- *insn++ = BPF_JMP_IMM(BPF_JSLT, BPF_REG_TMP, size, 2 + endian);
- *insn++ = BPF_LDX_MEM(BPF_SIZE(fp->code), BPF_REG_A, BPF_REG_D,
- offset);
+ *insn++ = BPF_JMP_IMM(BPF_JSLT, BPF_REG_TMP,
+ size, 2 + endian + (!ldx_off_ok * 2));
+ if (ldx_off_ok) {
+ *insn++ = BPF_LDX_MEM(BPF_SIZE(fp->code), BPF_REG_A,
+ BPF_REG_D, offset);
+ } else {
+ *insn++ = BPF_MOV64_REG(BPF_REG_TMP, BPF_REG_D);
+ *insn++ = BPF_ALU64_IMM(BPF_ADD, BPF_REG_TMP, offset);
+ *insn++ = BPF_LDX_MEM(BPF_SIZE(fp->code), BPF_REG_A,
+ BPF_REG_TMP, 0);
+ }
if (endian)
*insn++ = BPF_ENDIAN(BPF_FROM_BE, BPF_REG_A, size * 8);
*insn++ = BPF_JMP_A(8);