summaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorDaniel Borkmann <daniel@iogearbox.net>2016-05-13 19:08:31 +0200
committerDavid S. Miller <davem@davemloft.net>2016-05-16 13:49:32 -0400
commitd1c55ab5e41fcd72cb0a8bef86d3f652ad9ad9f5 (patch)
tree70d10a938de18a64f103de01ab2ffa8a66e52178 /kernel
parentc237ee5eb33bf19fe0591c04ff8db19da7323a83 (diff)
downloadlinux-d1c55ab5e41fcd72cb0a8bef86d3f652ad9ad9f5.tar.bz2
bpf: prepare bpf_int_jit_compile/bpf_prog_select_runtime apis
Since the blinding is strictly only called from inside eBPF JITs, we need to change signatures for bpf_int_jit_compile() and bpf_prog_select_runtime() first in order to prepare that the eBPF program we're dealing with can change underneath. Hence, for call sites, we need to return the latest prog. No functional change in this patch. Signed-off-by: Daniel Borkmann <daniel@iogearbox.net> Acked-by: Alexei Starovoitov <ast@kernel.org> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/bpf/core.c18
-rw-r--r--kernel/bpf/syscall.c2
2 files changed, 15 insertions, 5 deletions
diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
index 49b5538a5301..70f0821aca47 100644
--- a/kernel/bpf/core.c
+++ b/kernel/bpf/core.c
@@ -761,15 +761,22 @@ static int bpf_check_tail_call(const struct bpf_prog *fp)
/**
* bpf_prog_select_runtime - select exec runtime for BPF program
* @fp: bpf_prog populated with internal BPF program
+ * @err: pointer to error variable
*
* Try to JIT eBPF program, if JIT is not available, use interpreter.
* The BPF program will be executed via BPF_PROG_RUN() macro.
*/
-int bpf_prog_select_runtime(struct bpf_prog *fp)
+struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err)
{
fp->bpf_func = (void *) __bpf_prog_run;
- bpf_int_jit_compile(fp);
+ /* eBPF JITs can rewrite the program in case constant
+ * blinding is active. However, in case of error during
+ * blinding, bpf_int_jit_compile() must always return a
+ * valid program, which in this case would simply not
+ * be JITed, but falls back to the interpreter.
+ */
+ fp = bpf_int_jit_compile(fp);
bpf_prog_lock_ro(fp);
/* The tail call compatibility check can only be done at
@@ -777,7 +784,9 @@ int bpf_prog_select_runtime(struct bpf_prog *fp)
* with JITed or non JITed program concatenations and not
* all eBPF JITs might immediately support all features.
*/
- return bpf_check_tail_call(fp);
+ *err = bpf_check_tail_call(fp);
+
+ return fp;
}
EXPORT_SYMBOL_GPL(bpf_prog_select_runtime);
@@ -859,8 +868,9 @@ const struct bpf_func_proto bpf_tail_call_proto = {
};
/* For classic BPF JITs that don't implement bpf_int_jit_compile(). */
-void __weak bpf_int_jit_compile(struct bpf_prog *prog)
+struct bpf_prog * __weak bpf_int_jit_compile(struct bpf_prog *prog)
{
+ return prog;
}
bool __weak bpf_helper_changes_skb_data(void *func)
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
index cf5e9f7ad13a..46ecce4b79ed 100644
--- a/kernel/bpf/syscall.c
+++ b/kernel/bpf/syscall.c
@@ -762,7 +762,7 @@ static int bpf_prog_load(union bpf_attr *attr)
fixup_bpf_calls(prog);
/* eBPF program is ready to be JITed */
- err = bpf_prog_select_runtime(prog);
+ prog = bpf_prog_select_runtime(prog, &err);
if (err < 0)
goto free_used_maps;