summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--include/linux/bpf_verifier.h1
-rw-r--r--kernel/bpf/verifier.c86
-rw-r--r--tools/testing/selftests/bpf/test_verifier.c156
3 files changed, 225 insertions, 18 deletions
diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h
index aaac589e490c..94a02ceb1246 100644
--- a/include/linux/bpf_verifier.h
+++ b/include/linux/bpf_verifier.h
@@ -194,6 +194,7 @@ struct bpf_verifier_env {
struct bpf_insn_aux_data *insn_aux_data; /* array of per-insn state */
struct bpf_verifer_log log;
u32 subprog_starts[BPF_MAX_SUBPROGS];
+ /* computes the stack depth of each bpf function */
u16 subprog_stack_depth[BPF_MAX_SUBPROGS + 1];
u32 subprog_cnt;
};
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 82ae580440b8..52ad60b3b8be 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -1430,33 +1430,80 @@ static int update_stack_depth(struct bpf_verifier_env *env,
const struct bpf_func_state *func,
int off)
{
- u16 stack = env->subprog_stack_depth[func->subprogno], total = 0;
- struct bpf_verifier_state *cur = env->cur_state;
- int i;
+ u16 stack = env->subprog_stack_depth[func->subprogno];
if (stack >= -off)
return 0;
/* update known max for given subprogram */
env->subprog_stack_depth[func->subprogno] = -off;
+ return 0;
+}
- /* compute the total for current call chain */
- for (i = 0; i <= cur->curframe; i++) {
- u32 depth = env->subprog_stack_depth[cur->frame[i]->subprogno];
-
- /* round up to 32-bytes, since this is granularity
- * of interpreter stack sizes
- */
- depth = round_up(depth, 32);
- total += depth;
- }
+/* starting from main bpf function walk all instructions of the function
+ * and recursively walk all callees that given function can call.
+ * Ignore jump and exit insns.
+ * Since recursion is prevented by check_cfg() this algorithm
+ * only needs a local stack of MAX_CALL_FRAMES to remember callsites
+ */
+static int check_max_stack_depth(struct bpf_verifier_env *env)
+{
+ int depth = 0, frame = 0, subprog = 0, i = 0, subprog_end;
+ struct bpf_insn *insn = env->prog->insnsi;
+ int insn_cnt = env->prog->len;
+ int ret_insn[MAX_CALL_FRAMES];
+ int ret_prog[MAX_CALL_FRAMES];
- if (total > MAX_BPF_STACK) {
+process_func:
+ /* round up to 32-bytes, since this is granularity
+ * of interpreter stack size
+ */
+ depth += round_up(max_t(u32, env->subprog_stack_depth[subprog], 1), 32);
+ if (depth > MAX_BPF_STACK) {
verbose(env, "combined stack size of %d calls is %d. Too large\n",
- cur->curframe, total);
+ frame + 1, depth);
return -EACCES;
}
- return 0;
+continue_func:
+ if (env->subprog_cnt == subprog)
+ subprog_end = insn_cnt;
+ else
+ subprog_end = env->subprog_starts[subprog];
+ for (; i < subprog_end; i++) {
+ if (insn[i].code != (BPF_JMP | BPF_CALL))
+ continue;
+ if (insn[i].src_reg != BPF_PSEUDO_CALL)
+ continue;
+ /* remember insn and function to return to */
+ ret_insn[frame] = i + 1;
+ ret_prog[frame] = subprog;
+
+ /* find the callee */
+ i = i + insn[i].imm + 1;
+ subprog = find_subprog(env, i);
+ if (subprog < 0) {
+ WARN_ONCE(1, "verifier bug. No program starts at insn %d\n",
+ i);
+ return -EFAULT;
+ }
+ subprog++;
+ frame++;
+ if (frame >= MAX_CALL_FRAMES) {
+ WARN_ONCE(1, "verifier bug. Call stack is too deep\n");
+ return -EFAULT;
+ }
+ goto process_func;
+ }
+ /* end of for() loop means the last insn of the 'subprog'
+ * was reached. Doesn't matter whether it was JA or EXIT
+ */
+ if (frame == 0)
+ return 0;
+ depth -= round_up(max_t(u32, env->subprog_stack_depth[subprog], 1), 32);
+ frame--;
+ i = ret_insn[frame];
+ subprog = ret_prog[frame];
+ goto continue_func;
}
static int get_callee_stack_depth(struct bpf_verifier_env *env,
@@ -2079,9 +2126,9 @@ static int check_func_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
struct bpf_func_state *caller, *callee;
int i, subprog, target_insn;
- if (state->curframe >= MAX_CALL_FRAMES) {
+ if (state->curframe + 1 >= MAX_CALL_FRAMES) {
verbose(env, "the call stack of %d frames is too deep\n",
- state->curframe);
+ state->curframe + 2);
return -E2BIG;
}
@@ -5404,6 +5451,9 @@ skip_full_check:
sanitize_dead_code(env);
if (ret == 0)
+ ret = check_max_stack_depth(env);
+
+ if (ret == 0)
/* program is valid, convert *(u32*)(ctx + off) accesses */
ret = convert_ctx_accesses(env);
diff --git a/tools/testing/selftests/bpf/test_verifier.c b/tools/testing/selftests/bpf/test_verifier.c
index 41dcc7dbba42..5d0a574ce270 100644
--- a/tools/testing/selftests/bpf/test_verifier.c
+++ b/tools/testing/selftests/bpf/test_verifier.c
@@ -8764,6 +8764,162 @@ static struct bpf_test tests[] = {
.result = REJECT,
},
{
+ "calls: stack depth check using three frames. test1",
+ .insns = {
+ /* main */
+ BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 4), /* call A */
+ BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 5), /* call B */
+ BPF_ST_MEM(BPF_B, BPF_REG_10, -32, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ /* A */
+ BPF_ST_MEM(BPF_B, BPF_REG_10, -256, 0),
+ BPF_EXIT_INSN(),
+ /* B */
+ BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, -3), /* call A */
+ BPF_ST_MEM(BPF_B, BPF_REG_10, -64, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_XDP,
+ /* stack_main=32, stack_A=256, stack_B=64
+ * and max(main+A, main+A+B) < 512
+ */
+ .result = ACCEPT,
+ },
+ {
+ "calls: stack depth check using three frames. test2",
+ .insns = {
+ /* main */
+ BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 4), /* call A */
+ BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 5), /* call B */
+ BPF_ST_MEM(BPF_B, BPF_REG_10, -32, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ /* A */
+ BPF_ST_MEM(BPF_B, BPF_REG_10, -64, 0),
+ BPF_EXIT_INSN(),
+ /* B */
+ BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, -3), /* call A */
+ BPF_ST_MEM(BPF_B, BPF_REG_10, -256, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_XDP,
+ /* stack_main=32, stack_A=64, stack_B=256
+ * and max(main+A, main+A+B) < 512
+ */
+ .result = ACCEPT,
+ },
+ {
+ "calls: stack depth check using three frames. test3",
+ .insns = {
+ /* main */
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 6), /* call A */
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 8), /* call B */
+ BPF_JMP_IMM(BPF_JGE, BPF_REG_6, 0, 1),
+ BPF_ST_MEM(BPF_B, BPF_REG_10, -64, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ /* A */
+ BPF_JMP_IMM(BPF_JLT, BPF_REG_1, 10, 1),
+ BPF_EXIT_INSN(),
+ BPF_ST_MEM(BPF_B, BPF_REG_10, -224, 0),
+ BPF_JMP_IMM(BPF_JA, 0, 0, -3),
+ /* B */
+ BPF_JMP_IMM(BPF_JGT, BPF_REG_1, 2, 1),
+ BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, -6), /* call A */
+ BPF_ST_MEM(BPF_B, BPF_REG_10, -256, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_XDP,
+ /* stack_main=64, stack_A=224, stack_B=256
+ * and max(main+A, main+A+B) > 512
+ */
+ .errstr = "combined stack",
+ .result = REJECT,
+ },
+ {
+ "calls: stack depth check using three frames. test4",
+ /* void main(void) {
+ * func1(0);
+ * func1(1);
+ * func2(1);
+ * }
+ * void func1(int alloc_or_recurse) {
+ * if (alloc_or_recurse) {
+ * frame_pointer[-300] = 1;
+ * } else {
+ * func2(alloc_or_recurse);
+ * }
+ * }
+ * void func2(int alloc_or_recurse) {
+ * if (alloc_or_recurse) {
+ * frame_pointer[-300] = 1;
+ * }
+ * }
+ */
+ .insns = {
+ /* main */
+ BPF_MOV64_IMM(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 6), /* call A */
+ BPF_MOV64_IMM(BPF_REG_1, 1),
+ BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 4), /* call A */
+ BPF_MOV64_IMM(BPF_REG_1, 1),
+ BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 7), /* call B */
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ /* A */
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 2),
+ BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
+ BPF_EXIT_INSN(),
+ BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call B */
+ BPF_EXIT_INSN(),
+ /* B */
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
+ BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .result = REJECT,
+ .errstr = "combined stack",
+ },
+ {
+ "calls: stack depth check using three frames. test5",
+ .insns = {
+ /* main */
+ BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call A */
+ BPF_EXIT_INSN(),
+ /* A */
+ BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call B */
+ BPF_EXIT_INSN(),
+ /* B */
+ BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call C */
+ BPF_EXIT_INSN(),
+ /* C */
+ BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call D */
+ BPF_EXIT_INSN(),
+ /* D */
+ BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call E */
+ BPF_EXIT_INSN(),
+ /* E */
+ BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call F */
+ BPF_EXIT_INSN(),
+ /* F */
+ BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call G */
+ BPF_EXIT_INSN(),
+ /* G */
+ BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call H */
+ BPF_EXIT_INSN(),
+ /* H */
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .errstr = "call stack",
+ .result = REJECT,
+ },
+ {
"calls: spill into caller stack frame",
.insns = {
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),