diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2019-05-15 16:05:47 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2019-05-15 16:05:47 -0700 |
commit | d2d8b146043ae7e250aef1fb312971f6f479d487 (patch) | |
tree | 22db8758a5aa0bc850ba8f83fe57b1f679924d0a /arch/x86 | |
parent | 2bbacd1a92788ee334c7e92b765ea16ebab68dfe (diff) | |
parent | 693713cbdb3a4bda5a8a678c31f06560bbb14657 (diff) | |
download | linux-d2d8b146043ae7e250aef1fb312971f6f479d487.tar.bz2 |
Merge tag 'trace-v5.2' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace
Pull tracing updates from Steven Rostedt:
"The major changes in this tracing update includes:
- Removal of non-DYNAMIC_FTRACE from 32bit x86
- Removal of mcount support from x86
- Emulating a call from int3 on x86_64, fixes live kernel patching
- Consolidated Tracing Error logs file
Minor updates:
- Removal of klp_check_compiler_support()
- kdb ftrace dumping output changes
- Accessing and creating ftrace instances from inside the kernel
- Clean up of #define if macro
- Introduction of TRACE_EVENT_NOP() to disable trace events based on
config options
And other minor fixes and clean ups"
* tag 'trace-v5.2' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace: (44 commits)
x86: Hide the int3_emulate_call/jmp functions from UML
livepatch: Remove klp_check_compiler_support()
ftrace/x86: Remove mcount support
ftrace/x86_32: Remove support for non DYNAMIC_FTRACE
tracing: Simplify "if" macro code
tracing: Fix documentation about disabling options using trace_options
tracing: Replace kzalloc with kcalloc
tracing: Fix partial reading of trace event's id file
tracing: Allow RCU to run between postponed startup tests
tracing: Fix white space issues in parse_pred() function
tracing: Eliminate const char[] auto variables
ring-buffer: Fix mispelling of Calculate
tracing: probeevent: Fix to make the type of $comm string
tracing: probeevent: Do not accumulate on ret variable
tracing: uprobes: Re-enable $comm support for uprobe events
ftrace/x86_64: Emulate call function while updating in breakpoint handler
x86_64: Allow breakpoints to emulate call instructions
x86_64: Add gap to int3 to allow for call emulation
tracing: kdb: Allow ftdump to skip all but the last few entries
tracing: Add trace_total_entries() / trace_total_entries_cpu()
...
Diffstat (limited to 'arch/x86')
-rw-r--r-- | arch/x86/Kconfig | 11 | ||||
-rw-r--r-- | arch/x86/entry/entry_64.S | 18 | ||||
-rw-r--r-- | arch/x86/include/asm/ftrace.h | 8 | ||||
-rw-r--r-- | arch/x86/include/asm/livepatch.h | 8 | ||||
-rw-r--r-- | arch/x86/include/asm/text-patching.h | 30 | ||||
-rw-r--r-- | arch/x86/kernel/ftrace.c | 32 | ||||
-rw-r--r-- | arch/x86/kernel/ftrace_32.S | 75 | ||||
-rw-r--r-- | arch/x86/kernel/ftrace_64.S | 28 |
8 files changed, 93 insertions, 117 deletions
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 326b2d5bab9d..21e9f2fac04b 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -31,6 +31,17 @@ config X86_64 select SWIOTLB select ARCH_HAS_SYSCALL_WRAPPER +config FORCE_DYNAMIC_FTRACE + def_bool y + depends on X86_32 + depends on FUNCTION_TRACER + select DYNAMIC_FTRACE + help + We keep the static function tracing (!DYNAMIC_FTRACE) around + in order to test the non static function tracing in the + generic code, as other architectures still use it. But we + only need to keep it around for x86_64. No need to keep it + for x86_32. For x86_32, force DYNAMIC_FTRACE. # # Arch settings # diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S index 20e45d9b4e15..11aa3b2afa4d 100644 --- a/arch/x86/entry/entry_64.S +++ b/arch/x86/entry/entry_64.S @@ -878,7 +878,7 @@ apicinterrupt IRQ_WORK_VECTOR irq_work_interrupt smp_irq_work_interrupt * @paranoid == 2 is special: the stub will never switch stacks. This is for * #DF: if the thread stack is somehow unusable, we'll still get a useful OOPS. */ -.macro idtentry sym do_sym has_error_code:req paranoid=0 shift_ist=-1 ist_offset=0 +.macro idtentry sym do_sym has_error_code:req paranoid=0 shift_ist=-1 ist_offset=0 create_gap=0 ENTRY(\sym) UNWIND_HINT_IRET_REGS offset=\has_error_code*8 @@ -898,6 +898,20 @@ ENTRY(\sym) jnz .Lfrom_usermode_switch_stack_\@ .endif + .if \create_gap == 1 + /* + * If coming from kernel space, create a 6-word gap to allow the + * int3 handler to emulate a call instruction. + */ + testb $3, CS-ORIG_RAX(%rsp) + jnz .Lfrom_usermode_no_gap_\@ + .rept 6 + pushq 5*8(%rsp) + .endr + UNWIND_HINT_IRET_REGS offset=8 +.Lfrom_usermode_no_gap_\@: + .endif + .if \paranoid call paranoid_entry .else @@ -1129,7 +1143,7 @@ apicinterrupt3 HYPERV_STIMER0_VECTOR \ #endif /* CONFIG_HYPERV */ idtentry debug do_debug has_error_code=0 paranoid=1 shift_ist=IST_INDEX_DB ist_offset=DB_STACK_OFFSET -idtentry int3 do_int3 has_error_code=0 +idtentry int3 do_int3 has_error_code=0 create_gap=1 idtentry stack_segment do_stack_segment has_error_code=1 #ifdef CONFIG_XEN_PV diff --git a/arch/x86/include/asm/ftrace.h b/arch/x86/include/asm/ftrace.h index cf350639e76d..287f1f7b2e52 100644 --- a/arch/x86/include/asm/ftrace.h +++ b/arch/x86/include/asm/ftrace.h @@ -3,12 +3,10 @@ #define _ASM_X86_FTRACE_H #ifdef CONFIG_FUNCTION_TRACER -#ifdef CC_USING_FENTRY -# define MCOUNT_ADDR ((unsigned long)(__fentry__)) -#else -# define MCOUNT_ADDR ((unsigned long)(mcount)) -# define HAVE_FUNCTION_GRAPH_FP_TEST +#ifndef CC_USING_FENTRY +# error Compiler does not support fentry? #endif +# define MCOUNT_ADDR ((unsigned long)(__fentry__)) #define MCOUNT_INSN_SIZE 5 /* sizeof mcount call */ #ifdef CONFIG_DYNAMIC_FTRACE diff --git a/arch/x86/include/asm/livepatch.h b/arch/x86/include/asm/livepatch.h index ed80003ce3e2..a66f6706c2de 100644 --- a/arch/x86/include/asm/livepatch.h +++ b/arch/x86/include/asm/livepatch.h @@ -24,14 +24,6 @@ #include <asm/setup.h> #include <linux/ftrace.h> -static inline int klp_check_compiler_support(void) -{ -#ifndef CC_USING_FENTRY - return 1; -#endif - return 0; -} - static inline void klp_arch_set_pc(struct pt_regs *regs, unsigned long ip) { regs->ip = ip; diff --git a/arch/x86/include/asm/text-patching.h b/arch/x86/include/asm/text-patching.h index c90678fd391a..880b5515b1d6 100644 --- a/arch/x86/include/asm/text-patching.h +++ b/arch/x86/include/asm/text-patching.h @@ -42,4 +42,34 @@ extern int after_bootmem; extern __ro_after_init struct mm_struct *poking_mm; extern __ro_after_init unsigned long poking_addr; +#ifndef CONFIG_UML_X86 +static inline void int3_emulate_jmp(struct pt_regs *regs, unsigned long ip) +{ + regs->ip = ip; +} + +#define INT3_INSN_SIZE 1 +#define CALL_INSN_SIZE 5 + +#ifdef CONFIG_X86_64 +static inline void int3_emulate_push(struct pt_regs *regs, unsigned long val) +{ + /* + * The int3 handler in entry_64.S adds a gap between the + * stack where the break point happened, and the saving of + * pt_regs. We can extend the original stack because of + * this gap. See the idtentry macro's create_gap option. + */ + regs->sp -= sizeof(unsigned long); + *(unsigned long *)regs->sp = val; +} + +static inline void int3_emulate_call(struct pt_regs *regs, unsigned long func) +{ + int3_emulate_push(regs, regs->ip - INT3_INSN_SIZE + CALL_INSN_SIZE); + int3_emulate_jmp(regs, func); +} +#endif /* CONFIG_X86_64 */ +#endif /* !CONFIG_UML_X86 */ + #endif /* _ASM_X86_TEXT_PATCHING_H */ diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c index 0caf8122d680..0927bb158ffc 100644 --- a/arch/x86/kernel/ftrace.c +++ b/arch/x86/kernel/ftrace.c @@ -29,6 +29,7 @@ #include <asm/kprobes.h> #include <asm/ftrace.h> #include <asm/nops.h> +#include <asm/text-patching.h> #ifdef CONFIG_DYNAMIC_FTRACE @@ -231,6 +232,7 @@ int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr, } static unsigned long ftrace_update_func; +static unsigned long ftrace_update_func_call; static int update_ftrace_func(unsigned long ip, void *new) { @@ -259,6 +261,8 @@ int ftrace_update_ftrace_func(ftrace_func_t func) unsigned char *new; int ret; + ftrace_update_func_call = (unsigned long)func; + new = ftrace_call_replace(ip, (unsigned long)func); ret = update_ftrace_func(ip, new); @@ -294,13 +298,28 @@ int ftrace_int3_handler(struct pt_regs *regs) if (WARN_ON_ONCE(!regs)) return 0; - ip = regs->ip - 1; - if (!ftrace_location(ip) && !is_ftrace_caller(ip)) - return 0; + ip = regs->ip - INT3_INSN_SIZE; - regs->ip += MCOUNT_INSN_SIZE - 1; +#ifdef CONFIG_X86_64 + if (ftrace_location(ip)) { + int3_emulate_call(regs, (unsigned long)ftrace_regs_caller); + return 1; + } else if (is_ftrace_caller(ip)) { + if (!ftrace_update_func_call) { + int3_emulate_jmp(regs, ip + CALL_INSN_SIZE); + return 1; + } + int3_emulate_call(regs, ftrace_update_func_call); + return 1; + } +#else + if (ftrace_location(ip) || is_ftrace_caller(ip)) { + int3_emulate_jmp(regs, ip + CALL_INSN_SIZE); + return 1; + } +#endif - return 1; + return 0; } NOKPROBE_SYMBOL(ftrace_int3_handler); @@ -865,6 +884,8 @@ void arch_ftrace_update_trampoline(struct ftrace_ops *ops) func = ftrace_ops_get_func(ops); + ftrace_update_func_call = (unsigned long)func; + /* Do a safe modify in case the trampoline is executing */ new = ftrace_call_replace(ip, (unsigned long)func); ret = update_ftrace_func(ip, new); @@ -966,6 +987,7 @@ static int ftrace_mod_jmp(unsigned long ip, void *func) { unsigned char *new; + ftrace_update_func_call = 0UL; new = ftrace_jmp_replace(ip, (unsigned long)func); return update_ftrace_func(ip, new); diff --git a/arch/x86/kernel/ftrace_32.S b/arch/x86/kernel/ftrace_32.S index 4c8440de3355..2ba914a34b06 100644 --- a/arch/x86/kernel/ftrace_32.S +++ b/arch/x86/kernel/ftrace_32.S @@ -10,22 +10,10 @@ #include <asm/ftrace.h> #include <asm/nospec-branch.h> -#ifdef CC_USING_FENTRY # define function_hook __fentry__ EXPORT_SYMBOL(__fentry__) -#else -# define function_hook mcount -EXPORT_SYMBOL(mcount) -#endif - -#ifdef CONFIG_DYNAMIC_FTRACE - -/* mcount uses a frame pointer even if CONFIG_FRAME_POINTER is not set */ -#if !defined(CC_USING_FENTRY) || defined(CONFIG_FRAME_POINTER) -# define USING_FRAME_POINTER -#endif -#ifdef USING_FRAME_POINTER +#ifdef CONFIG_FRAME_POINTER # define MCOUNT_FRAME 1 /* using frame = true */ #else # define MCOUNT_FRAME 0 /* using frame = false */ @@ -37,8 +25,7 @@ END(function_hook) ENTRY(ftrace_caller) -#ifdef USING_FRAME_POINTER -# ifdef CC_USING_FENTRY +#ifdef CONFIG_FRAME_POINTER /* * Frame pointers are of ip followed by bp. * Since fentry is an immediate jump, we are left with @@ -49,7 +36,7 @@ ENTRY(ftrace_caller) pushl %ebp movl %esp, %ebp pushl 2*4(%esp) /* function ip */ -# endif + /* For mcount, the function ip is directly above */ pushl %ebp movl %esp, %ebp @@ -59,7 +46,7 @@ ENTRY(ftrace_caller) pushl %edx pushl $0 /* Pass NULL as regs pointer */ -#ifdef USING_FRAME_POINTER +#ifdef CONFIG_FRAME_POINTER /* Load parent ebp into edx */ movl 4*4(%esp), %edx #else @@ -82,13 +69,11 @@ ftrace_call: popl %edx popl %ecx popl %eax -#ifdef USING_FRAME_POINTER +#ifdef CONFIG_FRAME_POINTER popl %ebp -# ifdef CC_USING_FENTRY addl $4,%esp /* skip function ip */ popl %ebp /* this is the orig bp */ addl $4, %esp /* skip parent ip */ -# endif #endif .Lftrace_ret: #ifdef CONFIG_FUNCTION_GRAPH_TRACER @@ -133,11 +118,7 @@ ENTRY(ftrace_regs_caller) movl 12*4(%esp), %eax /* Load ip (1st parameter) */ subl $MCOUNT_INSN_SIZE, %eax /* Adjust ip */ -#ifdef CC_USING_FENTRY movl 15*4(%esp), %edx /* Load parent ip (2nd parameter) */ -#else - movl 0x4(%ebp), %edx /* Load parent ip (2nd parameter) */ -#endif movl function_trace_op, %ecx /* Save ftrace_pos in 3rd parameter */ pushl %esp /* Save pt_regs as 4th parameter */ @@ -170,43 +151,6 @@ GLOBAL(ftrace_regs_call) lea 3*4(%esp), %esp /* Skip orig_ax, ip and cs */ jmp .Lftrace_ret -#else /* ! CONFIG_DYNAMIC_FTRACE */ - -ENTRY(function_hook) - cmpl $__PAGE_OFFSET, %esp - jb ftrace_stub /* Paging not enabled yet? */ - - cmpl $ftrace_stub, ftrace_trace_function - jnz .Ltrace -#ifdef CONFIG_FUNCTION_GRAPH_TRACER - cmpl $ftrace_stub, ftrace_graph_return - jnz ftrace_graph_caller - - cmpl $ftrace_graph_entry_stub, ftrace_graph_entry - jnz ftrace_graph_caller -#endif -.globl ftrace_stub -ftrace_stub: - ret - - /* taken from glibc */ -.Ltrace: - pushl %eax - pushl %ecx - pushl %edx - movl 0xc(%esp), %eax - movl 0x4(%ebp), %edx - subl $MCOUNT_INSN_SIZE, %eax - - movl ftrace_trace_function, %ecx - CALL_NOSPEC %ecx - - popl %edx - popl %ecx - popl %eax - jmp ftrace_stub -END(function_hook) -#endif /* CONFIG_DYNAMIC_FTRACE */ #ifdef CONFIG_FUNCTION_GRAPH_TRACER ENTRY(ftrace_graph_caller) @@ -215,13 +159,8 @@ ENTRY(ftrace_graph_caller) pushl %edx movl 3*4(%esp), %eax /* Even with frame pointers, fentry doesn't have one here */ -#ifdef CC_USING_FENTRY lea 4*4(%esp), %edx movl $0, %ecx -#else - lea 0x4(%ebp), %edx - movl (%ebp), %ecx -#endif subl $MCOUNT_INSN_SIZE, %eax call prepare_ftrace_return popl %edx @@ -234,11 +173,7 @@ END(ftrace_graph_caller) return_to_handler: pushl %eax pushl %edx -#ifdef CC_USING_FENTRY movl $0, %eax -#else - movl %ebp, %eax -#endif call ftrace_return_to_handler movl %eax, %ecx popl %edx diff --git a/arch/x86/kernel/ftrace_64.S b/arch/x86/kernel/ftrace_64.S index 75f2b36b41a6..10eb2760ef2c 100644 --- a/arch/x86/kernel/ftrace_64.S +++ b/arch/x86/kernel/ftrace_64.S @@ -13,22 +13,12 @@ .code64 .section .entry.text, "ax" -#ifdef CC_USING_FENTRY # define function_hook __fentry__ EXPORT_SYMBOL(__fentry__) -#else -# define function_hook mcount -EXPORT_SYMBOL(mcount) -#endif #ifdef CONFIG_FRAME_POINTER -# ifdef CC_USING_FENTRY /* Save parent and function stack frames (rip and rbp) */ # define MCOUNT_FRAME_SIZE (8+16*2) -# else -/* Save just function stack frame (rip and rbp) */ -# define MCOUNT_FRAME_SIZE (8+16) -# endif #else /* No need to save a stack frame */ # define MCOUNT_FRAME_SIZE 0 @@ -75,17 +65,13 @@ EXPORT_SYMBOL(mcount) * fentry is called before the stack frame is set up, where as mcount * is called afterward. */ -#ifdef CC_USING_FENTRY + /* Save the parent pointer (skip orig rbp and our return address) */ pushq \added+8*2(%rsp) pushq %rbp movq %rsp, %rbp /* Save the return address (now skip orig rbp, rbp and parent) */ pushq \added+8*3(%rsp) -#else - /* Can't assume that rip is before this (unless added was zero) */ - pushq \added+8(%rsp) -#endif pushq %rbp movq %rsp, %rbp #endif /* CONFIG_FRAME_POINTER */ @@ -113,12 +99,7 @@ EXPORT_SYMBOL(mcount) movq %rdx, RBP(%rsp) /* Copy the parent address into %rsi (second parameter) */ -#ifdef CC_USING_FENTRY movq MCOUNT_REG_SIZE+8+\added(%rsp), %rsi -#else - /* %rdx contains original %rbp */ - movq 8(%rdx), %rsi -#endif /* Move RIP to its proper location */ movq MCOUNT_REG_SIZE+\added(%rsp), %rdi @@ -303,15 +284,8 @@ ENTRY(ftrace_graph_caller) /* Saves rbp into %rdx and fills first parameter */ save_mcount_regs -#ifdef CC_USING_FENTRY leaq MCOUNT_REG_SIZE+8(%rsp), %rsi movq $0, %rdx /* No framepointers needed */ -#else - /* Save address of the return address of traced function */ - leaq 8(%rdx), %rsi - /* ftrace does sanity checks against frame pointers */ - movq (%rdx), %rdx -#endif call prepare_ftrace_return restore_mcount_regs |