summaryrefslogtreecommitdiffstats
path: root/kernel/bpf
diff options
context:
space:
mode:
authorAndrii Nakryiko <andrii@kernel.org>2022-12-06 15:33:43 -0800
committerAlexei Starovoitov <ast@kernel.org>2022-12-06 19:14:38 -0800
commitbffdeaa8a5af7200b0e74c9d5a41167f86626a36 (patch)
treeaa79e45550ea589b05d5b86292bccdce0d1e4c85 /kernel/bpf
parentd8939cb0a03ce7e4e69f65bbd31b79fe42f7d5e6 (diff)
downloadlinux-bffdeaa8a5af7200b0e74c9d5a41167f86626a36.tar.bz2
bpf: decouple prune and jump points
BPF verifier marks some instructions as prune points. Currently these prune points serve two purposes. It's a point where verifier tries to find previously verified state and check current state's equivalence to short circuit verification for current code path. But also currently it's a point where jump history, used for precision backtracking, is updated. This is done so that non-linear flow of execution could be properly backtracked. Such coupling is coincidental and unnecessary. Some prune points are not part of some non-linear jump path, so don't need update of jump history. On the other hand, not all instructions which have to be recorded in jump history necessarily are good prune points. This patch splits prune and jump points into independent flags. Currently all prune points are marked as jump points to minimize amount of changes in this patch, but next patch will perform some optimization of prune vs jmp point placement. No functional changes are intended. Acked-by: John Fastabend <john.fastabend@gmail.com> Signed-off-by: Andrii Nakryiko <andrii@kernel.org> Link: https://lore.kernel.org/r/20221206233345.438540-2-andrii@kernel.org Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Diffstat (limited to 'kernel/bpf')
-rw-r--r--kernel/bpf/verifier.c57
1 files changed, 43 insertions, 14 deletions
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 67a13110bc22..cf580e4c00f5 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -2530,6 +2530,16 @@ static int check_reg_arg(struct bpf_verifier_env *env, u32 regno,
return 0;
}
+static void mark_jmp_point(struct bpf_verifier_env *env, int idx)
+{
+ env->insn_aux_data[idx].jmp_point = true;
+}
+
+static bool is_jmp_point(struct bpf_verifier_env *env, int insn_idx)
+{
+ return env->insn_aux_data[insn_idx].jmp_point;
+}
+
/* for any branch, call, exit record the history of jmps in the given state */
static int push_jmp_history(struct bpf_verifier_env *env,
struct bpf_verifier_state *cur)
@@ -2538,6 +2548,9 @@ static int push_jmp_history(struct bpf_verifier_env *env,
struct bpf_idx_pair *p;
size_t alloc_size;
+ if (!is_jmp_point(env, env->insn_idx))
+ return 0;
+
cnt++;
alloc_size = kmalloc_size_roundup(size_mul(cnt, sizeof(*p)));
p = krealloc(cur->jmp_history, alloc_size, GFP_USER);
@@ -12140,11 +12153,16 @@ static struct bpf_verifier_state_list **explored_state(
return &env->explored_states[(idx ^ state->callsite) % state_htab_size(env)];
}
-static void init_explored_state(struct bpf_verifier_env *env, int idx)
+static void mark_prune_point(struct bpf_verifier_env *env, int idx)
{
env->insn_aux_data[idx].prune_point = true;
}
+static bool is_prune_point(struct bpf_verifier_env *env, int insn_idx)
+{
+ return env->insn_aux_data[insn_idx].prune_point;
+}
+
enum {
DONE_EXPLORING = 0,
KEEP_EXPLORING = 1,
@@ -12173,9 +12191,11 @@ static int push_insn(int t, int w, int e, struct bpf_verifier_env *env,
return -EINVAL;
}
- if (e == BRANCH)
+ if (e == BRANCH) {
/* mark branch target for state pruning */
- init_explored_state(env, w);
+ mark_prune_point(env, w);
+ mark_jmp_point(env, w);
+ }
if (insn_state[w] == 0) {
/* tree-edge */
@@ -12213,10 +12233,13 @@ static int visit_func_call_insn(int t, int insn_cnt,
if (ret)
return ret;
- if (t + 1 < insn_cnt)
- init_explored_state(env, t + 1);
+ if (t + 1 < insn_cnt) {
+ mark_prune_point(env, t + 1);
+ mark_jmp_point(env, t + 1);
+ }
if (visit_callee) {
- init_explored_state(env, t);
+ mark_prune_point(env, t);
+ mark_jmp_point(env, t);
ret = push_insn(t, t + insns[t].imm + 1, BRANCH, env,
/* It's ok to allow recursion from CFG point of
* view. __check_func_call() will do the actual
@@ -12250,13 +12273,15 @@ static int visit_insn(int t, int insn_cnt, struct bpf_verifier_env *env)
return DONE_EXPLORING;
case BPF_CALL:
- if (insns[t].imm == BPF_FUNC_timer_set_callback)
+ if (insns[t].imm == BPF_FUNC_timer_set_callback) {
/* Mark this call insn to trigger is_state_visited() check
* before call itself is processed by __check_func_call().
* Otherwise new async state will be pushed for further
* exploration.
*/
- init_explored_state(env, t);
+ mark_prune_point(env, t);
+ mark_jmp_point(env, t);
+ }
return visit_func_call_insn(t, insn_cnt, insns, env,
insns[t].src_reg == BPF_PSEUDO_CALL);
@@ -12274,18 +12299,22 @@ static int visit_insn(int t, int insn_cnt, struct bpf_verifier_env *env)
* but it's marked, since backtracking needs
* to record jmp history in is_state_visited().
*/
- init_explored_state(env, t + insns[t].off + 1);
+ mark_prune_point(env, t + insns[t].off + 1);
+ mark_jmp_point(env, t + insns[t].off + 1);
/* tell verifier to check for equivalent states
* after every call and jump
*/
- if (t + 1 < insn_cnt)
- init_explored_state(env, t + 1);
+ if (t + 1 < insn_cnt) {
+ mark_prune_point(env, t + 1);
+ mark_jmp_point(env, t + 1);
+ }
return ret;
default:
/* conditional jump with two edges */
- init_explored_state(env, t);
+ mark_prune_point(env, t);
+ mark_jmp_point(env, t);
ret = push_insn(t, t + 1, FALLTHROUGH, env, true);
if (ret)
return ret;
@@ -13320,11 +13349,11 @@ static int is_state_visited(struct bpf_verifier_env *env, int insn_idx)
bool add_new_state = env->test_state_freq ? true : false;
cur->last_insn_idx = env->prev_insn_idx;
- if (!env->insn_aux_data[insn_idx].prune_point)
+ if (!is_prune_point(env, insn_idx))
/* this 'insn_idx' instruction wasn't marked, so we will not
* be doing state search here
*/
- return 0;
+ return push_jmp_history(env, cur);
/* bpf progs typically have pruning point every 4 instructions
* http://vger.kernel.org/bpfconf2019.html#session-1