summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDaniel Borkmann <daniel@iogearbox.net>2019-11-29 23:29:11 +0100
committerAlexei Starovoitov <ast@kernel.org>2019-12-01 09:34:03 -0800
commite1608f3fa857b600045b6df7f7dadc70eeaa4496 (patch)
tree5993631f992a9465dc0036dabe9c6890d948acf2
parentdf786c9b947639aedbc7bb44b5dae2a7824af360 (diff)
downloadlinux-e1608f3fa857b600045b6df7f7dadc70eeaa4496.tar.bz2
bpf: Avoid setting bpf insns pages read-only when prog is jited
For the case where the interpreter is compiled out or when the prog is jited it is completely unnecessary to set the BPF insn pages as read-only. In fact, on frequent churn of BPF programs, it could lead to performance degradation of the system over time since it would break the direct map down to 4k pages when calling set_memory_ro() for the insn buffer on x86-64 / arm64 and there is no reverse operation. Thus, avoid breaking up large pages for data maps, and only limit this to the module range used by the JIT where it is necessary to set the image read-only and executable. Suggested-by: Peter Zijlstra <peterz@infradead.org> Signed-off-by: Daniel Borkmann <daniel@iogearbox.net> Signed-off-by: Alexei Starovoitov <ast@kernel.org> Link: https://lore.kernel.org/bpf/20191129222911.3710-1-daniel@iogearbox.net
-rw-r--r--include/linux/filter.h8
1 files changed, 6 insertions, 2 deletions
diff --git a/include/linux/filter.h b/include/linux/filter.h
index 1b1e8b8f88da..a141cb07e76a 100644
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@ -776,8 +776,12 @@ bpf_ctx_narrow_access_offset(u32 off, u32 size, u32 size_default)
static inline void bpf_prog_lock_ro(struct bpf_prog *fp)
{
- set_vm_flush_reset_perms(fp);
- set_memory_ro((unsigned long)fp, fp->pages);
+#ifndef CONFIG_BPF_JIT_ALWAYS_ON
+ if (!fp->jited) {
+ set_vm_flush_reset_perms(fp);
+ set_memory_ro((unsigned long)fp, fp->pages);
+ }
+#endif
}
static inline void bpf_jit_binary_lock_ro(struct bpf_binary_header *hdr)