summaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/kprobes/opt.c
diff options
context:
space:
mode:
authorPeter Zijlstra <peterz@infradead.org>2019-11-11 14:02:10 +0100
committerIngo Molnar <mingo@kernel.org>2019-11-27 07:44:25 +0100
commitf2cb4f95b7571f2bebcf226cd92b448fd58950ca (patch)
tree90fdcc1b05666267186ea47bd45fc7c3f71e0c6b /arch/x86/kernel/kprobes/opt.c
parent04ae87a52074e2d448fc66143f1bd2c7d694d2b9 (diff)
downloadlinux-f2cb4f95b7571f2bebcf226cd92b448fd58950ca.tar.bz2
x86/kprobe: Add comments to arch_{,un}optimize_kprobes()
Add a few words describing how it is safe to overwrite the 4 bytes after a kprobe. In specific it is possible the JMP.d32 required for the optimized kprobe overwrites multiple instructions. Tested-by: Alexei Starovoitov <ast@kernel.org> Tested-by: Steven Rostedt (VMware) <rostedt@goodmis.org> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Acked-by: Alexei Starovoitov <ast@kernel.org> Cc: Andy Lutomirski <luto@kernel.org> Cc: Borislav Petkov <bp@alien8.de> Cc: Brian Gerst <brgerst@gmail.com> Cc: Denys Vlasenko <dvlasenk@redhat.com> Cc: H. Peter Anvin <hpa@zytor.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Link: https://lkml.kernel.org/r/20191111132458.401696663@infradead.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'arch/x86/kernel/kprobes/opt.c')
-rw-r--r--arch/x86/kernel/kprobes/opt.c14
1 files changed, 12 insertions, 2 deletions
diff --git a/arch/x86/kernel/kprobes/opt.c b/arch/x86/kernel/kprobes/opt.c
index 26e0d6c4d8d6..3f45b5c43a71 100644
--- a/arch/x86/kernel/kprobes/opt.c
+++ b/arch/x86/kernel/kprobes/opt.c
@@ -414,8 +414,12 @@ err:
}
/*
- * Replace breakpoints (int3) with relative jumps.
+ * Replace breakpoints (INT3) with relative jumps (JMP.d32).
* Caller must call with locking kprobe_mutex and text_mutex.
+ *
+ * The caller will have installed a regular kprobe and after that issued
+ * syncrhonize_rcu_tasks(), this ensures that the instruction(s) that live in
+ * the 4 bytes after the INT3 are unused and can now be overwritten.
*/
void arch_optimize_kprobes(struct list_head *oplist)
{
@@ -441,7 +445,13 @@ void arch_optimize_kprobes(struct list_head *oplist)
}
}
-/* Replace a relative jump with a breakpoint (int3). */
+/*
+ * Replace a relative jump (JMP.d32) with a breakpoint (INT3).
+ *
+ * After that, we can restore the 4 bytes after the INT3 to undo what
+ * arch_optimize_kprobes() scribbled. This is safe since those bytes will be
+ * unused once the INT3 lands.
+ */
void arch_unoptimize_kprobe(struct optimized_kprobe *op)
{
arch_arm_kprobe(&op->kp);