From bd0b90676c30fe640e7ead919b3e38846ac88ab7 Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Wed, 29 Mar 2017 13:56:56 +0900 Subject: kprobes/x86: Fix kprobe-booster not to boost far call instructions Fix the kprobe-booster not to boost far call instruction, because a call may store the address in the single-step execution buffer to the stack, which should be modified after single stepping. Currently, this instruction will be filtered as not boostable in resume_execution(), so this is not a critical issue. Signed-off-by: Masami Hiramatsu Cc: Ananth N Mavinakayanahalli Cc: Andrey Ryabinin Cc: Anil S Keshavamurthy Cc: Borislav Petkov Cc: Brian Gerst Cc: David S . Miller Cc: Denys Vlasenko Cc: H. Peter Anvin Cc: Josh Poimboeuf Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: Ye Xiaolong Link: http://lkml.kernel.org/r/149076340615.22469.14066273186134229909.stgit@devbox Signed-off-by: Ingo Molnar --- arch/x86/kernel/kprobes/core.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'arch/x86/kernel') diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c index 993fa4fe4f68..9eae5a6c5870 100644 --- a/arch/x86/kernel/kprobes/core.c +++ b/arch/x86/kernel/kprobes/core.c @@ -200,6 +200,8 @@ retry: return (opcode != 0x62 && opcode != 0x67); case 0x70: return 0; /* can't boost conditional jump */ + case 0x90: + return opcode != 0x9a; /* can't boost call far */ case 0xc0: /* can't boost software-interruptions */ return (0xc1 < opcode && opcode < 0xcc) || opcode == 0xcf; -- cgit v1.2.3 From 129d17e8e8daf50f8aff4941fb4a9cda027ab9cf Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Wed, 29 Mar 2017 13:58:06 +0900 Subject: kprobes/x86: Fix the description of __copy_instruction() Fix the description comment of __copy_instruction() function since it has already been changed to return the length of the copied instruction. Signed-off-by: Masami Hiramatsu Cc: Ananth N Mavinakayanahalli Cc: Andrey Ryabinin Cc: Anil S Keshavamurthy Cc: Borislav Petkov Cc: Brian Gerst Cc: David S . Miller Cc: Denys Vlasenko Cc: H. Peter Anvin Cc: Josh Poimboeuf Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: Ye Xiaolong Link: http://lkml.kernel.org/r/149076347582.22469.3775133607244923462.stgit@devbox Signed-off-by: Ingo Molnar --- arch/x86/kernel/kprobes/core.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) (limited to 'arch/x86/kernel') diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c index 9eae5a6c5870..81d4dc786dae 100644 --- a/arch/x86/kernel/kprobes/core.c +++ b/arch/x86/kernel/kprobes/core.c @@ -350,11 +350,10 @@ static int is_IF_modifier(kprobe_opcode_t *insn) } /* - * Copy an instruction and adjust the displacement if the instruction - * uses the %rip-relative addressing mode. - * If it does, Return the address of the 32-bit displacement word. - * If not, return null. - * Only applicable to 64-bit x86. + * Copy an instruction with recovering modified instruction by kprobes + * and adjust the displacement if the instruction uses the %rip-relative + * addressing mode. + * This returns the length of copied instruction, or 0 if it has an error. */ int __copy_instruction(u8 *dest, u8 *src) { @@ -376,6 +375,7 @@ int __copy_instruction(u8 *dest, u8 *src) memcpy(dest, insn.kaddr, length); #ifdef CONFIG_X86_64 + /* Only x86_64 has RIP relative instructions */ if (insn_rip_relative(&insn)) { s64 newdisp; u8 *disp; -- cgit v1.2.3 From 17880e4d5777df4770081ecf0750471cda57f86b Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Wed, 29 Mar 2017 13:59:15 +0900 Subject: kprobes/x86: Use instruction decoder for booster Use x86 instruction decoder for checking whether the probed instruction is able to boost or not, instead of hand-written code. Signed-off-by: Masami Hiramatsu Cc: Ananth N Mavinakayanahalli Cc: Andrey Ryabinin Cc: Anil S Keshavamurthy Cc: Borislav Petkov Cc: Brian Gerst Cc: David S . Miller Cc: Denys Vlasenko Cc: H. Peter Anvin Cc: Josh Poimboeuf Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: Ye Xiaolong Link: http://lkml.kernel.org/r/149076354563.22469.13379472209338986858.stgit@devbox Signed-off-by: Ingo Molnar --- arch/x86/kernel/kprobes/core.c | 39 ++++++++++++++++----------------------- 1 file changed, 16 insertions(+), 23 deletions(-) (limited to 'arch/x86/kernel') diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c index 81d4dc786dae..6327f95832a0 100644 --- a/arch/x86/kernel/kprobes/core.c +++ b/arch/x86/kernel/kprobes/core.c @@ -169,35 +169,33 @@ NOKPROBE_SYMBOL(skip_prefixes); */ int can_boost(kprobe_opcode_t *opcodes, void *addr) { + struct insn insn; kprobe_opcode_t opcode; - kprobe_opcode_t *orig_opcodes = opcodes; if (search_exception_tables((unsigned long)addr)) return 0; /* Page fault may occur on this address. */ -retry: - if (opcodes - orig_opcodes > MAX_INSN_SIZE - 1) - return 0; - opcode = *(opcodes++); + kernel_insn_init(&insn, (void *)opcodes, MAX_INSN_SIZE); + insn_get_opcode(&insn); /* 2nd-byte opcode */ - if (opcode == 0x0f) { - if (opcodes - orig_opcodes > MAX_INSN_SIZE - 1) - return 0; - return test_bit(*opcodes, + if (insn.opcode.nbytes == 2) + return test_bit(insn.opcode.bytes[1], (unsigned long *)twobyte_is_boostable); - } + + if (insn.opcode.nbytes != 1) + return 0; + + /* Can't boost Address-size override prefix */ + if (unlikely(inat_is_address_size_prefix(insn.attr))) + return 0; + + opcode = insn.opcode.bytes[0]; switch (opcode & 0xf0) { -#ifdef CONFIG_X86_64 - case 0x40: - goto retry; /* REX prefix is boostable */ -#endif case 0x60: - if (0x63 < opcode && opcode < 0x67) - goto retry; /* prefixes */ - /* can't boost Address-size override and bound */ - return (opcode != 0x62 && opcode != 0x67); + /* can't boost "bound" */ + return (opcode != 0x62); case 0x70: return 0; /* can't boost conditional jump */ case 0x90: @@ -212,14 +210,9 @@ retry: /* can boost in/out and absolute jmps */ return ((opcode & 0x04) || opcode == 0xea); case 0xf0: - if ((opcode & 0x0c) == 0 && opcode != 0xf1) - goto retry; /* lock/rep(ne) prefix */ /* clear and set flags are boostable */ return (opcode == 0xf5 || (0xf7 < opcode && opcode < 0xfe)); default: - /* segment override prefixes are boostable */ - if (opcode == 0x26 || opcode == 0x36 || opcode == 0x3e) - goto retry; /* prefixes */ /* CS override prefix and call are not boostable */ return (opcode != 0x2e && opcode != 0x9a); } -- cgit v1.2.3 From 804dec5bda9b4fcdab5f67fe61db4a0498af5221 Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Wed, 29 Mar 2017 14:00:25 +0900 Subject: kprobes/x86: Do not modify singlestep buffer while resuming Do not modify singlestep execution buffer (kprobe.ainsn.insn) while resuming from single-stepping, instead, modifies the buffer to add a jump back instruction at preparing buffer. Signed-off-by: Masami Hiramatsu Cc: Ananth N Mavinakayanahalli Cc: Andrey Ryabinin Cc: Anil S Keshavamurthy Cc: Borislav Petkov Cc: Brian Gerst Cc: David S . Miller Cc: Denys Vlasenko Cc: H. Peter Anvin Cc: Josh Poimboeuf Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: Ye Xiaolong Link: http://lkml.kernel.org/r/149076361560.22469.1610155860343077495.stgit@devbox Signed-off-by: Ingo Molnar --- arch/x86/kernel/kprobes/core.c | 42 ++++++++++++++++++++---------------------- 1 file changed, 20 insertions(+), 22 deletions(-) (limited to 'arch/x86/kernel') diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c index 6327f95832a0..a654054eae7e 100644 --- a/arch/x86/kernel/kprobes/core.c +++ b/arch/x86/kernel/kprobes/core.c @@ -399,23 +399,36 @@ int __copy_instruction(u8 *dest, u8 *src) return length; } +/* Prepare reljump right after instruction to boost */ +static void prepare_boost(struct kprobe *p, int length) +{ + if (can_boost(p->ainsn.insn, p->addr) && + MAX_INSN_SIZE - length >= RELATIVEJUMP_SIZE) { + /* + * These instructions can be executed directly if it + * jumps back to correct address. + */ + synthesize_reljump(p->ainsn.insn + length, p->addr + length); + p->ainsn.boostable = 1; + } else { + p->ainsn.boostable = -1; + } +} + static int arch_copy_kprobe(struct kprobe *p) { - int ret; + int len; /* Copy an instruction with recovering if other optprobe modifies it.*/ - ret = __copy_instruction(p->ainsn.insn, p->addr); - if (!ret) + len = __copy_instruction(p->ainsn.insn, p->addr); + if (!len) return -EINVAL; /* * __copy_instruction can modify the displacement of the instruction, * but it doesn't affect boostable check. */ - if (can_boost(p->ainsn.insn, p->addr)) - p->ainsn.boostable = 0; - else - p->ainsn.boostable = -1; + prepare_boost(p, len); /* Check whether the instruction modifies Interrupt Flag or not */ p->ainsn.if_modifier = is_IF_modifier(p->ainsn.insn); @@ -878,21 +891,6 @@ static void resume_execution(struct kprobe *p, struct pt_regs *regs, break; } - if (p->ainsn.boostable == 0) { - if ((regs->ip > copy_ip) && - (regs->ip - copy_ip) + 5 < MAX_INSN_SIZE) { - /* - * These instructions can be executed directly if it - * jumps back to correct address. - */ - synthesize_reljump((void *)regs->ip, - (void *)orig_ip + (regs->ip - copy_ip)); - p->ainsn.boostable = 1; - } else { - p->ainsn.boostable = -1; - } - } - regs->ip += orig_ip - copy_ip; no_change: -- cgit v1.2.3 From 490154bc68d15de9e38fbb850fe470e32cc66407 Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Wed, 29 Mar 2017 14:01:35 +0900 Subject: kprobes/x86: Make boostable flag boolean Make arch_specific_insn.boostable to boolean, since it has only 2 states, boostable or not. So it is better to use boolean from the viewpoint of code readability. Signed-off-by: Masami Hiramatsu Cc: Ananth N Mavinakayanahalli Cc: Andrey Ryabinin Cc: Anil S Keshavamurthy Cc: Borislav Petkov Cc: Brian Gerst Cc: David S . Miller Cc: Denys Vlasenko Cc: H. Peter Anvin Cc: Josh Poimboeuf Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: Ye Xiaolong Link: http://lkml.kernel.org/r/149076368566.22469.6322906866458231844.stgit@devbox Signed-off-by: Ingo Molnar --- arch/x86/include/asm/kprobes.h | 7 +++---- arch/x86/kernel/kprobes/core.c | 12 ++++++------ arch/x86/kernel/kprobes/ftrace.c | 2 +- 3 files changed, 10 insertions(+), 11 deletions(-) (limited to 'arch/x86/kernel') diff --git a/arch/x86/include/asm/kprobes.h b/arch/x86/include/asm/kprobes.h index 200581691c6e..34b984c60790 100644 --- a/arch/x86/include/asm/kprobes.h +++ b/arch/x86/include/asm/kprobes.h @@ -72,14 +72,13 @@ struct arch_specific_insn { /* copy of the original instruction */ kprobe_opcode_t *insn; /* - * boostable = -1: This instruction type is not boostable. - * boostable = 0: This instruction type is boostable. - * boostable = 1: This instruction has been boosted: we have + * boostable = false: This instruction type is not boostable. + * boostable = true: This instruction has been boosted: we have * added a relative jump after the instruction copy in insn, * so no single-step and fixup are needed (unless there's * a post_handler or break_handler). */ - int boostable; + bool boostable; bool if_modifier; }; diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c index a654054eae7e..3f084a0ca722 100644 --- a/arch/x86/kernel/kprobes/core.c +++ b/arch/x86/kernel/kprobes/core.c @@ -409,9 +409,9 @@ static void prepare_boost(struct kprobe *p, int length) * jumps back to correct address. */ synthesize_reljump(p->ainsn.insn + length, p->addr + length); - p->ainsn.boostable = 1; + p->ainsn.boostable = true; } else { - p->ainsn.boostable = -1; + p->ainsn.boostable = false; } } @@ -467,7 +467,7 @@ void arch_disarm_kprobe(struct kprobe *p) void arch_remove_kprobe(struct kprobe *p) { if (p->ainsn.insn) { - free_insn_slot(p->ainsn.insn, (p->ainsn.boostable == 1)); + free_insn_slot(p->ainsn.insn, p->ainsn.boostable); p->ainsn.insn = NULL; } } @@ -539,7 +539,7 @@ static void setup_singlestep(struct kprobe *p, struct pt_regs *regs, return; #if !defined(CONFIG_PREEMPT) - if (p->ainsn.boostable == 1 && !p->post_handler) { + if (p->ainsn.boostable && !p->post_handler) { /* Boost up -- we can execute copied instructions directly */ if (!reenter) reset_current_kprobe(); @@ -859,7 +859,7 @@ static void resume_execution(struct kprobe *p, struct pt_regs *regs, case 0xcf: case 0xea: /* jmp absolute -- ip is correct */ /* ip is already adjusted, no more changes required */ - p->ainsn.boostable = 1; + p->ainsn.boostable = true; goto no_change; case 0xe8: /* call relative - Fix return addr */ *tos = orig_ip + (*tos - copy_ip); @@ -884,7 +884,7 @@ static void resume_execution(struct kprobe *p, struct pt_regs *regs, * jmp near and far, absolute indirect * ip is correct. And this is boostable */ - p->ainsn.boostable = 1; + p->ainsn.boostable = true; goto no_change; } default: diff --git a/arch/x86/kernel/kprobes/ftrace.c b/arch/x86/kernel/kprobes/ftrace.c index 5f8f0b3cc674..041f7b6dfa0f 100644 --- a/arch/x86/kernel/kprobes/ftrace.c +++ b/arch/x86/kernel/kprobes/ftrace.c @@ -94,6 +94,6 @@ NOKPROBE_SYMBOL(kprobe_ftrace_handler); int arch_prepare_kprobe_ftrace(struct kprobe *p) { p->ainsn.insn = NULL; - p->ainsn.boostable = -1; + p->ainsn.boostable = false; return 0; } -- cgit v1.2.3 From d0381c81c2f782fa2131178d11e0cfb23d50d631 Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Wed, 29 Mar 2017 14:02:46 +0900 Subject: kprobes/x86: Set kprobes pages read-only Set the pages which is used for kprobes' singlestep buffer and optprobe's trampoline instruction buffer to readonly. This can prevent unexpected (or unintended) instruction modification. This also passes rodata_test as below. Without this patch, rodata_test shows a warning: WARNING: CPU: 0 PID: 1 at arch/x86/mm/dump_pagetables.c:235 note_page+0x7a9/0xa20 x86/mm: Found insecure W+X mapping at address ffffffffa0000000/0xffffffffa0000000 With this fix, no W+X pages are found: x86/mm: Checked W+X mappings: passed, no W+X pages found. rodata_test: all tests were successful Reported-by: Andrey Ryabinin Signed-off-by: Masami Hiramatsu Cc: Ananth N Mavinakayanahalli Cc: Anil S Keshavamurthy Cc: Borislav Petkov Cc: Brian Gerst Cc: David S . Miller Cc: Denys Vlasenko Cc: H. Peter Anvin Cc: Josh Poimboeuf Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: Ye Xiaolong Link: http://lkml.kernel.org/r/149076375592.22469.14174394514338612247.stgit@devbox Signed-off-by: Ingo Molnar --- arch/x86/kernel/kprobes/core.c | 4 ++++ arch/x86/kernel/kprobes/opt.c | 3 +++ 2 files changed, 7 insertions(+) (limited to 'arch/x86/kernel') diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c index 3f084a0ca722..0dc24e6cdd1e 100644 --- a/arch/x86/kernel/kprobes/core.c +++ b/arch/x86/kernel/kprobes/core.c @@ -419,6 +419,8 @@ static int arch_copy_kprobe(struct kprobe *p) { int len; + set_memory_rw((unsigned long)p->ainsn.insn & PAGE_MASK, 1); + /* Copy an instruction with recovering if other optprobe modifies it.*/ len = __copy_instruction(p->ainsn.insn, p->addr); if (!len) @@ -430,6 +432,8 @@ static int arch_copy_kprobe(struct kprobe *p) */ prepare_boost(p, len); + set_memory_ro((unsigned long)p->ainsn.insn & PAGE_MASK, 1); + /* Check whether the instruction modifies Interrupt Flag or not */ p->ainsn.if_modifier = is_IF_modifier(p->ainsn.insn); diff --git a/arch/x86/kernel/kprobes/opt.c b/arch/x86/kernel/kprobes/opt.c index 3e7c6e5a08ff..b121037739e4 100644 --- a/arch/x86/kernel/kprobes/opt.c +++ b/arch/x86/kernel/kprobes/opt.c @@ -350,6 +350,7 @@ int arch_prepare_optimized_kprobe(struct optimized_kprobe *op, } buf = (u8 *)op->optinsn.insn; + set_memory_rw((unsigned long)buf & PAGE_MASK, 1); /* Copy instructions into the out-of-line buffer */ ret = copy_optimized_instructions(buf + TMPL_END_IDX, op->kp.addr); @@ -372,6 +373,8 @@ int arch_prepare_optimized_kprobe(struct optimized_kprobe *op, synthesize_reljump(buf + TMPL_END_IDX + op->optinsn.size, (u8 *)op->kp.addr + op->optinsn.size); + set_memory_ro((unsigned long)buf & PAGE_MASK, 1); + flush_icache_range((unsigned long) buf, (unsigned long) buf + TMPL_END_IDX + op->optinsn.size + RELATIVEJUMP_SIZE); -- cgit v1.2.3 From ea1e34fc366b84e4449b37d86f2222935e29412d Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Wed, 29 Mar 2017 14:03:56 +0900 Subject: kprobes/x86: Use probe_kernel_read() instead of memcpy() Use probe_kernel_read() for avoiding unexpected faults while copying kernel text in __recover_probed_insn(), __recover_optprobed_insn() and __copy_instruction(). Signed-off-by: Masami Hiramatsu Cc: Ananth N Mavinakayanahalli Cc: Andrey Ryabinin Cc: Anil S Keshavamurthy Cc: Borislav Petkov Cc: Brian Gerst Cc: David S . Miller Cc: Denys Vlasenko Cc: H. Peter Anvin Cc: Josh Poimboeuf Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: Ye Xiaolong Link: http://lkml.kernel.org/r/149076382624.22469.10091613887942958518.stgit@devbox Signed-off-by: Ingo Molnar --- arch/x86/kernel/kprobes/core.c | 12 +++++++++--- arch/x86/kernel/kprobes/opt.c | 5 ++++- 2 files changed, 13 insertions(+), 4 deletions(-) (limited to 'arch/x86/kernel') diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c index 0dc24e6cdd1e..722f54440e7e 100644 --- a/arch/x86/kernel/kprobes/core.c +++ b/arch/x86/kernel/kprobes/core.c @@ -259,7 +259,10 @@ __recover_probed_insn(kprobe_opcode_t *buf, unsigned long addr) * Fortunately, we know that the original code is the ideal 5-byte * long NOP. */ - memcpy(buf, (void *)addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t)); + if (probe_kernel_read(buf, (void *)addr, + MAX_INSN_SIZE * sizeof(kprobe_opcode_t))) + return 0UL; + if (faddr) memcpy(buf, ideal_nops[NOP_ATOMIC5], 5); else @@ -271,7 +274,7 @@ __recover_probed_insn(kprobe_opcode_t *buf, unsigned long addr) * Recover the probed instruction at addr for further analysis. * Caller must lock kprobes by kprobe_mutex, or disable preemption * for preventing to release referencing kprobes. - * Returns zero if the instruction can not get recovered. + * Returns zero if the instruction can not get recovered (or access failed). */ unsigned long recover_probed_instruction(kprobe_opcode_t *buf, unsigned long addr) { @@ -365,7 +368,10 @@ int __copy_instruction(u8 *dest, u8 *src) /* Another subsystem puts a breakpoint, failed to recover */ if (insn.opcode.bytes[0] == BREAKPOINT_INSTRUCTION) return 0; - memcpy(dest, insn.kaddr, length); + + /* This can access kernel text if given address is not recovered */ + if (kernel_probe_read(dest, insn.kaddr, length)) + return 0; #ifdef CONFIG_X86_64 /* Only x86_64 has RIP relative instructions */ diff --git a/arch/x86/kernel/kprobes/opt.c b/arch/x86/kernel/kprobes/opt.c index b121037739e4..5b5233441d30 100644 --- a/arch/x86/kernel/kprobes/opt.c +++ b/arch/x86/kernel/kprobes/opt.c @@ -65,7 +65,10 @@ found: * overwritten by jump destination address. In this case, original * bytes must be recovered from op->optinsn.copied_insn buffer. */ - memcpy(buf, (void *)addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t)); + if (probe_kernel_read(buf, (void *)addr, + MAX_INSN_SIZE * sizeof(kprobe_opcode_t))) + return 0UL; + if (addr == (unsigned long)kp->addr) { buf[0] = kp->opcode; memcpy(buf + 1, op->optinsn.copied_insn, RELATIVE_ADDR_SIZE); -- cgit v1.2.3 From a8d11cd0714f51877587f5ec891013ca46e163ac Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Wed, 29 Mar 2017 14:05:06 +0900 Subject: kprobes/x86: Consolidate insn decoder users for copying code Consolidate x86 instruction decoder users on the path of copying original code for kprobes. Kprobes decodes the same instruction a maximum of 3 times when preparing the instruction buffer: - The first time for getting the length of the instruction, - the 2nd for adjusting displacement, - and the 3rd for checking whether the instruction is boostable or not. For each time, the actual decoding target address is slightly different (1st is original address or recovered instruction buffer, 2nd and 3rd are pointing to the copied buffer), but all have the same instruction. Thus, this patch also changes the target address to the copied buffer at first and reuses the decoded "insn" for displacement adjusting and checking boostability. Signed-off-by: Masami Hiramatsu Cc: Ananth N Mavinakayanahalli Cc: Andrey Ryabinin Cc: Anil S Keshavamurthy Cc: Borislav Petkov Cc: Brian Gerst Cc: David S . Miller Cc: Denys Vlasenko Cc: H. Peter Anvin Cc: Josh Poimboeuf Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: Ye Xiaolong Link: http://lkml.kernel.org/r/149076389643.22469.13151892839998777373.stgit@devbox Signed-off-by: Ingo Molnar --- arch/x86/kernel/kprobes/common.h | 4 +-- arch/x86/kernel/kprobes/core.c | 66 +++++++++++++++++++--------------------- arch/x86/kernel/kprobes/opt.c | 5 +-- 3 files changed, 36 insertions(+), 39 deletions(-) (limited to 'arch/x86/kernel') diff --git a/arch/x86/kernel/kprobes/common.h b/arch/x86/kernel/kprobes/common.h index d688826e5736..db2182d63ed0 100644 --- a/arch/x86/kernel/kprobes/common.h +++ b/arch/x86/kernel/kprobes/common.h @@ -67,7 +67,7 @@ #endif /* Ensure if the instruction can be boostable */ -extern int can_boost(kprobe_opcode_t *instruction, void *addr); +extern int can_boost(struct insn *insn, void *orig_addr); /* Recover instruction if given address is probed */ extern unsigned long recover_probed_instruction(kprobe_opcode_t *buf, unsigned long addr); @@ -75,7 +75,7 @@ extern unsigned long recover_probed_instruction(kprobe_opcode_t *buf, * Copy an instruction and adjust the displacement if the instruction * uses the %rip-relative addressing mode. */ -extern int __copy_instruction(u8 *dest, u8 *src); +extern int __copy_instruction(u8 *dest, u8 *src, struct insn *insn); /* Generate a relative-jump/call instruction */ extern void synthesize_reljump(void *from, void *to); diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c index 722f54440e7e..19e1f2a6d7b0 100644 --- a/arch/x86/kernel/kprobes/core.c +++ b/arch/x86/kernel/kprobes/core.c @@ -164,33 +164,29 @@ static kprobe_opcode_t *skip_prefixes(kprobe_opcode_t *insn) NOKPROBE_SYMBOL(skip_prefixes); /* - * Returns non-zero if opcode is boostable. + * Returns non-zero if INSN is boostable. * RIP relative instructions are adjusted at copying time in 64 bits mode */ -int can_boost(kprobe_opcode_t *opcodes, void *addr) +int can_boost(struct insn *insn, void *addr) { - struct insn insn; kprobe_opcode_t opcode; if (search_exception_tables((unsigned long)addr)) return 0; /* Page fault may occur on this address. */ - kernel_insn_init(&insn, (void *)opcodes, MAX_INSN_SIZE); - insn_get_opcode(&insn); - /* 2nd-byte opcode */ - if (insn.opcode.nbytes == 2) - return test_bit(insn.opcode.bytes[1], + if (insn->opcode.nbytes == 2) + return test_bit(insn->opcode.bytes[1], (unsigned long *)twobyte_is_boostable); - if (insn.opcode.nbytes != 1) + if (insn->opcode.nbytes != 1) return 0; /* Can't boost Address-size override prefix */ - if (unlikely(inat_is_address_size_prefix(insn.attr))) + if (unlikely(inat_is_address_size_prefix(insn->attr))) return 0; - opcode = insn.opcode.bytes[0]; + opcode = insn->opcode.bytes[0]; switch (opcode & 0xf0) { case 0x60: @@ -351,35 +347,31 @@ static int is_IF_modifier(kprobe_opcode_t *insn) * addressing mode. * This returns the length of copied instruction, or 0 if it has an error. */ -int __copy_instruction(u8 *dest, u8 *src) +int __copy_instruction(u8 *dest, u8 *src, struct insn *insn) { - struct insn insn; kprobe_opcode_t buf[MAX_INSN_SIZE]; - int length; unsigned long recovered_insn = recover_probed_instruction(buf, (unsigned long)src); - if (!recovered_insn) + if (!recovered_insn || !insn) return 0; - kernel_insn_init(&insn, (void *)recovered_insn, MAX_INSN_SIZE); - insn_get_length(&insn); - length = insn.length; - /* Another subsystem puts a breakpoint, failed to recover */ - if (insn.opcode.bytes[0] == BREAKPOINT_INSTRUCTION) + /* This can access kernel text if given address is not recovered */ + if (probe_kernel_read(dest, (void *)recovered_insn, MAX_INSN_SIZE)) return 0; - /* This can access kernel text if given address is not recovered */ - if (kernel_probe_read(dest, insn.kaddr, length)) + kernel_insn_init(insn, dest, MAX_INSN_SIZE); + insn_get_length(insn); + + /* Another subsystem puts a breakpoint, failed to recover */ + if (insn->opcode.bytes[0] == BREAKPOINT_INSTRUCTION) return 0; #ifdef CONFIG_X86_64 /* Only x86_64 has RIP relative instructions */ - if (insn_rip_relative(&insn)) { + if (insn_rip_relative(insn)) { s64 newdisp; u8 *disp; - kernel_insn_init(&insn, dest, length); - insn_get_displacement(&insn); /* * The copied instruction uses the %rip-relative addressing * mode. Adjust the displacement for the difference between @@ -392,29 +384,32 @@ int __copy_instruction(u8 *dest, u8 *src) * extension of the original signed 32-bit displacement would * have given. */ - newdisp = (u8 *) src + (s64) insn.displacement.value - (u8 *) dest; + newdisp = (u8 *) src + (s64) insn->displacement.value + - (u8 *) dest; if ((s64) (s32) newdisp != newdisp) { pr_err("Kprobes error: new displacement does not fit into s32 (%llx)\n", newdisp); - pr_err("\tSrc: %p, Dest: %p, old disp: %x\n", src, dest, insn.displacement.value); + pr_err("\tSrc: %p, Dest: %p, old disp: %x\n", + src, dest, insn->displacement.value); return 0; } - disp = (u8 *) dest + insn_offset_displacement(&insn); + disp = (u8 *) dest + insn_offset_displacement(insn); *(s32 *) disp = (s32) newdisp; } #endif - return length; + return insn->length; } /* Prepare reljump right after instruction to boost */ -static void prepare_boost(struct kprobe *p, int length) +static void prepare_boost(struct kprobe *p, struct insn *insn) { - if (can_boost(p->ainsn.insn, p->addr) && - MAX_INSN_SIZE - length >= RELATIVEJUMP_SIZE) { + if (can_boost(insn, p->addr) && + MAX_INSN_SIZE - insn->length >= RELATIVEJUMP_SIZE) { /* * These instructions can be executed directly if it * jumps back to correct address. */ - synthesize_reljump(p->ainsn.insn + length, p->addr + length); + synthesize_reljump(p->ainsn.insn + insn->length, + p->addr + insn->length); p->ainsn.boostable = true; } else { p->ainsn.boostable = false; @@ -423,12 +418,13 @@ static void prepare_boost(struct kprobe *p, int length) static int arch_copy_kprobe(struct kprobe *p) { + struct insn insn; int len; set_memory_rw((unsigned long)p->ainsn.insn & PAGE_MASK, 1); /* Copy an instruction with recovering if other optprobe modifies it.*/ - len = __copy_instruction(p->ainsn.insn, p->addr); + len = __copy_instruction(p->ainsn.insn, p->addr, &insn); if (!len) return -EINVAL; @@ -436,7 +432,7 @@ static int arch_copy_kprobe(struct kprobe *p) * __copy_instruction can modify the displacement of the instruction, * but it doesn't affect boostable check. */ - prepare_boost(p, len); + prepare_boost(p, &insn); set_memory_ro((unsigned long)p->ainsn.insn & PAGE_MASK, 1); diff --git a/arch/x86/kernel/kprobes/opt.c b/arch/x86/kernel/kprobes/opt.c index 5b5233441d30..9aadff3d0902 100644 --- a/arch/x86/kernel/kprobes/opt.c +++ b/arch/x86/kernel/kprobes/opt.c @@ -177,11 +177,12 @@ NOKPROBE_SYMBOL(optimized_callback); static int copy_optimized_instructions(u8 *dest, u8 *src) { + struct insn insn; int len = 0, ret; while (len < RELATIVEJUMP_SIZE) { - ret = __copy_instruction(dest + len, src + len); - if (!ret || !can_boost(dest + len, src + len)) + ret = __copy_instruction(dest + len, src + len, &insn); + if (!ret || !can_boost(&insn, src + len)) return -EINVAL; len += ret; } -- cgit v1.2.3