diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2014-08-04 11:50:00 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2014-08-04 11:50:00 -0700 |
commit | b8c0aa46b3e86083721b57ed2eec6bd2c29ebfba (patch) | |
tree | 45e349bf8a14aa99279d323fdc515e849fd349f3 /arch | |
parent | c7ed326fa7cafb83ced5a8b02517a61672fe9e90 (diff) | |
parent | dc6f03f26f570104a2bb03f9d1deb588026d7c75 (diff) | |
download | linux-b8c0aa46b3e86083721b57ed2eec6bd2c29ebfba.tar.bz2 |
Merge tag 'trace-3.17' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace
Pull tracing updates from Steven Rostedt:
"This pull request has a lot of work done. The main thing is the
changes to the ftrace function callback infrastructure. It's
introducing a way to allow different functions to call directly
different trampolines instead of all calling the same "mcount" one.
The only user of this for now is the function graph tracer, which
always had a different trampoline, but the function tracer trampoline
was called and did basically nothing, and then the function graph
tracer trampoline was called. The difference now, is that the
function graph tracer trampoline can be called directly if a function
is only being traced by the function graph trampoline. If function
tracing is also happening on the same function, the old way is still
done.
The accounting for this takes up more memory when function graph
tracing is activated, as it needs to keep track of which functions it
uses. I have a new way that wont take as much memory, but it's not
ready yet for this merge window, and will have to wait for the next
one.
Another big change was the removal of the ftrace_start/stop() calls
that were used by the suspend/resume code that stopped function
tracing when entering into suspend and resume paths. The stop of
ftrace was done because there was some function that would crash the
system if one called smp_processor_id()! The stop/start was a big
hammer to solve the issue at the time, which was when ftrace was first
introduced into Linux. Now ftrace has better infrastructure to debug
such issues, and I found the problem function and labeled it with
"notrace" and function tracing can now safely be activated all the way
down into the guts of suspend and resume
Other changes include clean ups of uprobe code, clean up of the
trace_seq() code, and other various small fixes and clean ups to
ftrace and tracing"
* tag 'trace-3.17' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace: (57 commits)
ftrace: Add warning if tramp hash does not match nr_trampolines
ftrace: Fix trampoline hash update check on rec->flags
ring-buffer: Use rb_page_size() instead of open coded head_page size
ftrace: Rename ftrace_ops field from trampolines to nr_trampolines
tracing: Convert local function_graph functions to static
ftrace: Do not copy old hash when resetting
tracing: let user specify tracing_thresh after selecting function_graph
ring-buffer: Always run per-cpu ring buffer resize with schedule_work_on()
tracing: Remove function_trace_stop and HAVE_FUNCTION_TRACE_MCOUNT_TEST
s390/ftrace: remove check of obsolete variable function_trace_stop
arm64, ftrace: Remove check of obsolete variable function_trace_stop
Blackfin: ftrace: Remove check of obsolete variable function_trace_stop
metag: ftrace: Remove check of obsolete variable function_trace_stop
microblaze: ftrace: Remove check of obsolete variable function_trace_stop
MIPS: ftrace: Remove check of obsolete variable function_trace_stop
parisc: ftrace: Remove check of obsolete variable function_trace_stop
sh: ftrace: Remove check of obsolete variable function_trace_stop
sparc64,ftrace: Remove check of obsolete variable function_trace_stop
tile: ftrace: Remove check of obsolete variable function_trace_stop
ftrace: x86: Remove check of obsolete variable function_trace_stop
...
Diffstat (limited to 'arch')
31 files changed, 31 insertions, 144 deletions
diff --git a/arch/arm64/kernel/entry-ftrace.S b/arch/arm64/kernel/entry-ftrace.S index aa5f9fcbf9ee..38e704e597f7 100644 --- a/arch/arm64/kernel/entry-ftrace.S +++ b/arch/arm64/kernel/entry-ftrace.S @@ -96,11 +96,6 @@ * - ftrace_graph_caller to set up an exit hook */ ENTRY(_mcount) -#ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST - ldr x0, =ftrace_trace_stop - ldr x0, [x0] // if ftrace_trace_stop - ret // return; -#endif mcount_enter ldr x0, =ftrace_trace_function diff --git a/arch/blackfin/Kconfig b/arch/blackfin/Kconfig index f81e7b989fff..ed30699cc635 100644 --- a/arch/blackfin/Kconfig +++ b/arch/blackfin/Kconfig @@ -18,7 +18,6 @@ config BLACKFIN select HAVE_FTRACE_MCOUNT_RECORD select HAVE_FUNCTION_GRAPH_TRACER select HAVE_FUNCTION_TRACER - select HAVE_FUNCTION_TRACE_MCOUNT_TEST select HAVE_IDE select HAVE_KERNEL_GZIP if RAMKERNEL select HAVE_KERNEL_BZIP2 if RAMKERNEL diff --git a/arch/blackfin/kernel/ftrace-entry.S b/arch/blackfin/kernel/ftrace-entry.S index 7eed00bbd26d..28d059540424 100644 --- a/arch/blackfin/kernel/ftrace-entry.S +++ b/arch/blackfin/kernel/ftrace-entry.S @@ -33,15 +33,6 @@ ENDPROC(__mcount) * function will be waiting there. mmmm pie. */ ENTRY(_ftrace_caller) -# ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST - /* optional micro optimization: return if stopped */ - p1.l = _function_trace_stop; - p1.h = _function_trace_stop; - r3 = [p1]; - cc = r3 == 0; - if ! cc jump _ftrace_stub (bp); -# endif - /* save first/second/third function arg and the return register */ [--sp] = r2; [--sp] = r0; @@ -83,15 +74,6 @@ ENDPROC(_ftrace_caller) /* See documentation for _ftrace_caller */ ENTRY(__mcount) -# ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST - /* optional micro optimization: return if stopped */ - p1.l = _function_trace_stop; - p1.h = _function_trace_stop; - r3 = [p1]; - cc = r3 == 0; - if ! cc jump _ftrace_stub (bp); -# endif - /* save third function arg early so we can do testing below */ [--sp] = r2; diff --git a/arch/metag/Kconfig b/arch/metag/Kconfig index 499b7610eaaf..0b389a81c43a 100644 --- a/arch/metag/Kconfig +++ b/arch/metag/Kconfig @@ -13,7 +13,6 @@ config METAG select HAVE_DYNAMIC_FTRACE select HAVE_FTRACE_MCOUNT_RECORD select HAVE_FUNCTION_TRACER - select HAVE_FUNCTION_TRACE_MCOUNT_TEST select HAVE_KERNEL_BZIP2 select HAVE_KERNEL_GZIP select HAVE_KERNEL_LZO diff --git a/arch/metag/kernel/ftrace_stub.S b/arch/metag/kernel/ftrace_stub.S index e70bff745bdd..3acc288217c0 100644 --- a/arch/metag/kernel/ftrace_stub.S +++ b/arch/metag/kernel/ftrace_stub.S @@ -16,13 +16,6 @@ _mcount_wrapper: .global _ftrace_caller .type _ftrace_caller,function _ftrace_caller: - MOVT D0Re0,#HI(_function_trace_stop) - ADD D0Re0,D0Re0,#LO(_function_trace_stop) - GETD D0Re0,[D0Re0] - CMP D0Re0,#0 - BEQ $Lcall_stub - MOV PC,D0.4 -$Lcall_stub: MSETL [A0StP], D0Ar6, D0Ar4, D0Ar2, D0.4 MOV D1Ar1, D0.4 MOV D0Ar2, D1RtP @@ -42,13 +35,6 @@ _ftrace_call: .global _mcount_wrapper .type _mcount_wrapper,function _mcount_wrapper: - MOVT D0Re0,#HI(_function_trace_stop) - ADD D0Re0,D0Re0,#LO(_function_trace_stop) - GETD D0Re0,[D0Re0] - CMP D0Re0,#0 - BEQ $Lcall_mcount - MOV PC,D0.4 -$Lcall_mcount: MSETL [A0StP], D0Ar6, D0Ar4, D0Ar2, D0.4 MOV D1Ar1, D0.4 MOV D0Ar2, D1RtP diff --git a/arch/microblaze/Kconfig b/arch/microblaze/Kconfig index 9ae08541e30d..40e1c1dd0e24 100644 --- a/arch/microblaze/Kconfig +++ b/arch/microblaze/Kconfig @@ -22,7 +22,6 @@ config MICROBLAZE select HAVE_DYNAMIC_FTRACE select HAVE_FTRACE_MCOUNT_RECORD select HAVE_FUNCTION_GRAPH_TRACER - select HAVE_FUNCTION_TRACE_MCOUNT_TEST select HAVE_FUNCTION_TRACER select HAVE_MEMBLOCK select HAVE_MEMBLOCK_NODE_MAP diff --git a/arch/microblaze/kernel/ftrace.c b/arch/microblaze/kernel/ftrace.c index bbcd2533766c..fc7b48a52cd5 100644 --- a/arch/microblaze/kernel/ftrace.c +++ b/arch/microblaze/kernel/ftrace.c @@ -27,6 +27,9 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr) unsigned long return_hooker = (unsigned long) &return_to_handler; + if (unlikely(ftrace_graph_is_dead())) + return; + if (unlikely(atomic_read(¤t->tracing_graph_pause))) return; diff --git a/arch/microblaze/kernel/mcount.S b/arch/microblaze/kernel/mcount.S index fc1e1322ce4c..fed9da5de8c4 100644 --- a/arch/microblaze/kernel/mcount.S +++ b/arch/microblaze/kernel/mcount.S @@ -91,11 +91,6 @@ ENTRY(ftrace_caller) #endif /* CONFIG_DYNAMIC_FTRACE */ SAVE_REGS swi r15, r1, 0; - /* MS: HAVE_FUNCTION_TRACE_MCOUNT_TEST begin of checking */ - lwi r5, r0, function_trace_stop; - bneid r5, end; - nop; - /* MS: HAVE_FUNCTION_TRACE_MCOUNT_TEST end of checking */ #ifdef CONFIG_FUNCTION_GRAPH_TRACER #ifndef CONFIG_DYNAMIC_FTRACE lwi r5, r0, ftrace_graph_return; diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig index 4e238e6e661c..10f270bd3e25 100644 --- a/arch/mips/Kconfig +++ b/arch/mips/Kconfig @@ -15,7 +15,6 @@ config MIPS select HAVE_BPF_JIT if !CPU_MICROMIPS select ARCH_HAVE_CUSTOM_GPIO_H select HAVE_FUNCTION_TRACER - select HAVE_FUNCTION_TRACE_MCOUNT_TEST select HAVE_DYNAMIC_FTRACE select HAVE_FTRACE_MCOUNT_RECORD select HAVE_C_RECORDMCOUNT diff --git a/arch/mips/kernel/ftrace.c b/arch/mips/kernel/ftrace.c index 60e7e5e45af1..8b6538750fe1 100644 --- a/arch/mips/kernel/ftrace.c +++ b/arch/mips/kernel/ftrace.c @@ -302,6 +302,9 @@ void prepare_ftrace_return(unsigned long *parent_ra_addr, unsigned long self_ra, &return_to_handler; int faulted, insns; + if (unlikely(ftrace_graph_is_dead())) + return; + if (unlikely(atomic_read(¤t->tracing_graph_pause))) return; diff --git a/arch/mips/kernel/mcount.S b/arch/mips/kernel/mcount.S index 539b6294b613..00940d1d5c4f 100644 --- a/arch/mips/kernel/mcount.S +++ b/arch/mips/kernel/mcount.S @@ -74,10 +74,6 @@ _mcount: #endif /* When tracing is activated, it calls ftrace_caller+8 (aka here) */ - lw t1, function_trace_stop - bnez t1, ftrace_stub - nop - MCOUNT_SAVE_REGS #ifdef KBUILD_MCOUNT_RA_ADDRESS PTR_S MCOUNT_RA_ADDRESS_REG, PT_R12(sp) @@ -105,9 +101,6 @@ ftrace_stub: #else /* ! CONFIG_DYNAMIC_FTRACE */ NESTED(_mcount, PT_SIZE, ra) - lw t1, function_trace_stop - bnez t1, ftrace_stub - nop PTR_LA t1, ftrace_stub PTR_L t2, ftrace_trace_function /* Prepare t2 for (1) */ bne t1, t2, static_trace diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig index 108d48e652af..6e75e2030927 100644 --- a/arch/parisc/Kconfig +++ b/arch/parisc/Kconfig @@ -6,7 +6,6 @@ config PARISC select HAVE_OPROFILE select HAVE_FUNCTION_TRACER if 64BIT select HAVE_FUNCTION_GRAPH_TRACER if 64BIT - select HAVE_FUNCTION_TRACE_MCOUNT_TEST if 64BIT select ARCH_WANT_FRAME_POINTERS select RTC_CLASS select RTC_DRV_GENERIC diff --git a/arch/parisc/kernel/ftrace.c b/arch/parisc/kernel/ftrace.c index 5beb97bafbb1..559d400f9385 100644 --- a/arch/parisc/kernel/ftrace.c +++ b/arch/parisc/kernel/ftrace.c @@ -112,6 +112,9 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr) unsigned long long calltime; struct ftrace_graph_ent trace; + if (unlikely(ftrace_graph_is_dead())) + return; + if (unlikely(atomic_read(¤t->tracing_graph_pause))) return; @@ -152,9 +155,6 @@ void ftrace_function_trampoline(unsigned long parent, { extern ftrace_func_t ftrace_trace_function; - if (function_trace_stop) - return; - if (ftrace_trace_function != ftrace_stub) { ftrace_trace_function(parent, self_addr); return; diff --git a/arch/powerpc/kernel/ftrace.c b/arch/powerpc/kernel/ftrace.c index d178834fe508..390311c0f03d 100644 --- a/arch/powerpc/kernel/ftrace.c +++ b/arch/powerpc/kernel/ftrace.c @@ -525,6 +525,9 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr) struct ftrace_graph_ent trace; unsigned long return_hooker = (unsigned long)&return_to_handler; + if (unlikely(ftrace_graph_is_dead())) + return; + if (unlikely(atomic_read(¤t->tracing_graph_pause))) return; diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig index bb63499fc5d3..f5af5f6ef0f4 100644 --- a/arch/s390/Kconfig +++ b/arch/s390/Kconfig @@ -116,7 +116,6 @@ config S390 select HAVE_FTRACE_MCOUNT_RECORD select HAVE_FUNCTION_GRAPH_TRACER select HAVE_FUNCTION_TRACER - select HAVE_FUNCTION_TRACE_MCOUNT_TEST select HAVE_FUTEX_CMPXCHG if FUTEX select HAVE_KERNEL_BZIP2 select HAVE_KERNEL_GZIP diff --git a/arch/s390/kernel/mcount.S b/arch/s390/kernel/mcount.S index 08dcf21cb8df..433c6dbfa442 100644 --- a/arch/s390/kernel/mcount.S +++ b/arch/s390/kernel/mcount.S @@ -21,13 +21,9 @@ ENTRY(_mcount) ENTRY(ftrace_caller) #endif stm %r2,%r5,16(%r15) - bras %r1,2f + bras %r1,1f 0: .long ftrace_trace_function -1: .long function_trace_stop -2: l %r2,1b-0b(%r1) - icm %r2,0xf,0(%r2) - jnz 3f - st %r14,56(%r15) +1: st %r14,56(%r15) lr %r0,%r15 ahi %r15,-96 l %r3,100(%r15) @@ -50,7 +46,7 @@ ENTRY(ftrace_graph_caller) #endif ahi %r15,96 l %r14,56(%r15) -3: lm %r2,%r5,16(%r15) + lm %r2,%r5,16(%r15) br %r14 #ifdef CONFIG_FUNCTION_GRAPH_TRACER diff --git a/arch/s390/kernel/mcount64.S b/arch/s390/kernel/mcount64.S index 1c52eae3396a..c67a8bf0fd9a 100644 --- a/arch/s390/kernel/mcount64.S +++ b/arch/s390/kernel/mcount64.S @@ -20,9 +20,6 @@ ENTRY(_mcount) ENTRY(ftrace_caller) #endif - larl %r1,function_trace_stop - icm %r1,0xf,0(%r1) - bnzr %r14 stmg %r2,%r5,32(%r15) stg %r14,112(%r15) lgr %r1,%r15 diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig index 834b67c4db5a..aa2df3eaeb29 100644 --- a/arch/sh/Kconfig +++ b/arch/sh/Kconfig @@ -57,7 +57,6 @@ config SUPERH32 select HAVE_FUNCTION_TRACER select HAVE_FTRACE_MCOUNT_RECORD select HAVE_DYNAMIC_FTRACE - select HAVE_FUNCTION_TRACE_MCOUNT_TEST select HAVE_FTRACE_NMI_ENTER if DYNAMIC_FTRACE select ARCH_WANT_IPC_PARSE_VERSION select HAVE_FUNCTION_GRAPH_TRACER diff --git a/arch/sh/kernel/ftrace.c b/arch/sh/kernel/ftrace.c index 3c74f53db6db..079d70e6d74b 100644 --- a/arch/sh/kernel/ftrace.c +++ b/arch/sh/kernel/ftrace.c @@ -344,6 +344,9 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr) struct ftrace_graph_ent trace; unsigned long return_hooker = (unsigned long)&return_to_handler; + if (unlikely(ftrace_graph_is_dead())) + return; + if (unlikely(atomic_read(¤t->tracing_graph_pause))) return; diff --git a/arch/sh/lib/mcount.S b/arch/sh/lib/mcount.S index 52aa2011d753..7a8572f9d58b 100644 --- a/arch/sh/lib/mcount.S +++ b/arch/sh/lib/mcount.S @@ -92,13 +92,6 @@ mcount: rts nop #else -#ifndef CONFIG_DYNAMIC_FTRACE - mov.l .Lfunction_trace_stop, r0 - mov.l @r0, r0 - tst r0, r0 - bf ftrace_stub -#endif - MCOUNT_ENTER() #ifdef CONFIG_DYNAMIC_FTRACE @@ -174,11 +167,6 @@ ftrace_graph_call: .globl ftrace_caller ftrace_caller: - mov.l .Lfunction_trace_stop, r0 - mov.l @r0, r0 - tst r0, r0 - bf ftrace_stub - MCOUNT_ENTER() .globl ftrace_call @@ -196,8 +184,6 @@ ftrace_call: #endif /* CONFIG_DYNAMIC_FTRACE */ .align 2 -.Lfunction_trace_stop: - .long function_trace_stop /* * NOTE: From here on the locations of the .Lftrace_stub label and @@ -217,12 +203,7 @@ ftrace_stub: #ifdef CONFIG_FUNCTION_GRAPH_TRACER .globl ftrace_graph_caller ftrace_graph_caller: - mov.l 2f, r0 - mov.l @r0, r0 - tst r0, r0 - bt 1f - - mov.l 3f, r1 + mov.l 2f, r1 jmp @r1 nop 1: @@ -242,8 +223,7 @@ ftrace_graph_caller: MCOUNT_LEAVE() .align 2 -2: .long function_trace_stop -3: .long skip_trace +2: .long skip_trace .Lprepare_ftrace_return: .long prepare_ftrace_return diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig index 407c87d9879a..4692c90936f1 100644 --- a/arch/sparc/Kconfig +++ b/arch/sparc/Kconfig @@ -55,7 +55,6 @@ config SPARC64 select HAVE_FUNCTION_TRACER select HAVE_FUNCTION_GRAPH_TRACER select HAVE_FUNCTION_GRAPH_FP_TEST - select HAVE_FUNCTION_TRACE_MCOUNT_TEST select HAVE_KRETPROBES select HAVE_KPROBES select HAVE_RCU_TABLE_FREE if SMP diff --git a/arch/sparc/lib/mcount.S b/arch/sparc/lib/mcount.S index 3ad6cbdc2163..0b0ed4d34219 100644 --- a/arch/sparc/lib/mcount.S +++ b/arch/sparc/lib/mcount.S @@ -24,10 +24,7 @@ mcount: #ifdef CONFIG_DYNAMIC_FTRACE /* Do nothing, the retl/nop below is all we need. */ #else - sethi %hi(function_trace_stop), %g1 - lduw [%g1 + %lo(function_trace_stop)], %g2 - brnz,pn %g2, 2f - sethi %hi(ftrace_trace_function), %g1 + sethi %hi(ftrace_trace_function), %g1 sethi %hi(ftrace_stub), %g2 ldx [%g1 + %lo(ftrace_trace_function)], %g1 or %g2, %lo(ftrace_stub), %g2 @@ -80,11 +77,8 @@ ftrace_stub: .globl ftrace_caller .type ftrace_caller,#function ftrace_caller: - sethi %hi(function_trace_stop), %g1 mov %i7, %g2 - lduw [%g1 + %lo(function_trace_stop)], %g1 - brnz,pn %g1, ftrace_stub - mov %fp, %g3 + mov %fp, %g3 save %sp, -176, %sp mov %g2, %o1 mov %g2, %l0 diff --git a/arch/tile/Kconfig b/arch/tile/Kconfig index 4f3006b600e3..7fcd492adbfc 100644 --- a/arch/tile/Kconfig +++ b/arch/tile/Kconfig @@ -128,7 +128,6 @@ config TILEGX select SPARSE_IRQ select GENERIC_IRQ_LEGACY_ALLOC_HWIRQ select HAVE_FUNCTION_TRACER - select HAVE_FUNCTION_TRACE_MCOUNT_TEST select HAVE_FUNCTION_GRAPH_TRACER select HAVE_DYNAMIC_FTRACE select HAVE_FTRACE_MCOUNT_RECORD diff --git a/arch/tile/kernel/mcount_64.S b/arch/tile/kernel/mcount_64.S index 70d7bb0c4d8f..3c2b8d5e1d1a 100644 --- a/arch/tile/kernel/mcount_64.S +++ b/arch/tile/kernel/mcount_64.S @@ -77,15 +77,6 @@ STD_ENDPROC(__mcount) .align 64 STD_ENTRY(ftrace_caller) - moveli r11, hw2_last(function_trace_stop) - { shl16insli r11, r11, hw1(function_trace_stop); move r12, lr } - { shl16insli r11, r11, hw0(function_trace_stop); move lr, r10 } - ld r11, r11 - beqz r11, 1f - jrp r12 - -1: - { move r10, lr; move lr, r12 } MCOUNT_SAVE_REGS /* arg1: self return address */ @@ -119,15 +110,6 @@ STD_ENDPROC(ftrace_caller) .align 64 STD_ENTRY(__mcount) - moveli r11, hw2_last(function_trace_stop) - { shl16insli r11, r11, hw1(function_trace_stop); move r12, lr } - { shl16insli r11, r11, hw0(function_trace_stop); move lr, r10 } - ld r11, r11 - beqz r11, 1f - jrp r12 - -1: - { move r10, lr; move lr, r12 } { moveli r11, hw2_last(ftrace_trace_function) moveli r13, hw2_last(ftrace_stub) diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index d24887b645dc..2840c27d4479 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -54,7 +54,6 @@ config X86 select HAVE_FUNCTION_TRACER select HAVE_FUNCTION_GRAPH_TRACER select HAVE_FUNCTION_GRAPH_FP_TEST - select HAVE_FUNCTION_TRACE_MCOUNT_TEST select HAVE_SYSCALL_TRACEPOINTS select SYSCTL_EXCEPTION_TRACE select HAVE_KVM diff --git a/arch/x86/include/asm/ftrace.h b/arch/x86/include/asm/ftrace.h index 0525a8bdf65d..e1f7fecaa7d6 100644 --- a/arch/x86/include/asm/ftrace.h +++ b/arch/x86/include/asm/ftrace.h @@ -68,6 +68,8 @@ struct dyn_arch_ftrace { int ftrace_int3_handler(struct pt_regs *regs); +#define FTRACE_GRAPH_TRAMP_ADDR FTRACE_GRAPH_ADDR + #endif /* CONFIG_DYNAMIC_FTRACE */ #endif /* __ASSEMBLY__ */ #endif /* CONFIG_FUNCTION_TRACER */ diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S index 0d0c9d4ab6d5..47c410d99f5d 100644 --- a/arch/x86/kernel/entry_32.S +++ b/arch/x86/kernel/entry_32.S @@ -1059,9 +1059,6 @@ ENTRY(mcount) END(mcount) ENTRY(ftrace_caller) - cmpl $0, function_trace_stop - jne ftrace_stub - pushl %eax pushl %ecx pushl %edx @@ -1093,8 +1090,6 @@ END(ftrace_caller) ENTRY(ftrace_regs_caller) pushf /* push flags before compare (in cs location) */ - cmpl $0, function_trace_stop - jne ftrace_restore_flags /* * i386 does not save SS and ESP when coming from kernel. @@ -1153,7 +1148,6 @@ GLOBAL(ftrace_regs_call) popf /* Pop flags at end (no addl to corrupt flags) */ jmp ftrace_ret -ftrace_restore_flags: popf jmp ftrace_stub #else /* ! CONFIG_DYNAMIC_FTRACE */ @@ -1162,9 +1156,6 @@ ENTRY(mcount) cmpl $__PAGE_OFFSET, %esp jb ftrace_stub /* Paging not enabled yet? */ - cmpl $0, function_trace_stop - jne ftrace_stub - cmpl $ftrace_stub, ftrace_trace_function jnz trace #ifdef CONFIG_FUNCTION_GRAPH_TRACER diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c index cbc4a91b131e..3386dc9aa333 100644 --- a/arch/x86/kernel/ftrace.c +++ b/arch/x86/kernel/ftrace.c @@ -703,6 +703,9 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr, unsigned long return_hooker = (unsigned long) &return_to_handler; + if (unlikely(ftrace_graph_is_dead())) + return; + if (unlikely(atomic_read(¤t->tracing_graph_pause))) return; diff --git a/arch/x86/kernel/mcount_64.S b/arch/x86/kernel/mcount_64.S index c050a0153168..c73aecf10d34 100644 --- a/arch/x86/kernel/mcount_64.S +++ b/arch/x86/kernel/mcount_64.S @@ -46,10 +46,6 @@ END(function_hook) .endm ENTRY(ftrace_caller) - /* Check if tracing was disabled (quick check) */ - cmpl $0, function_trace_stop - jne ftrace_stub - ftrace_caller_setup /* regs go into 4th parameter (but make it NULL) */ movq $0, %rcx @@ -73,10 +69,6 @@ ENTRY(ftrace_regs_caller) /* Save the current flags before compare (in SS location)*/ pushfq - /* Check if tracing was disabled (quick check) */ - cmpl $0, function_trace_stop - jne ftrace_restore_flags - /* skip=8 to skip flags saved in SS */ ftrace_caller_setup 8 @@ -131,7 +123,7 @@ GLOBAL(ftrace_regs_call) popfq jmp ftrace_return -ftrace_restore_flags: + popfq jmp ftrace_stub @@ -141,9 +133,6 @@ END(ftrace_regs_caller) #else /* ! CONFIG_DYNAMIC_FTRACE */ ENTRY(function_hook) - cmpl $0, function_trace_stop - jne ftrace_stub - cmpq $ftrace_stub, ftrace_trace_function jnz trace diff --git a/arch/x86/kvm/mmutrace.h b/arch/x86/kvm/mmutrace.h index 9d2e0ffcb190..2e5652b62fd6 100644 --- a/arch/x86/kvm/mmutrace.h +++ b/arch/x86/kvm/mmutrace.h @@ -22,7 +22,7 @@ __entry->unsync = sp->unsync; #define KVM_MMU_PAGE_PRINTK() ({ \ - const char *ret = p->buffer + p->len; \ + const char *ret = trace_seq_buffer_ptr(p); \ static const char *access_str[] = { \ "---", "--x", "w--", "w-x", "-u-", "-ux", "wu-", "wux" \ }; \ diff --git a/arch/x86/power/cpu.c b/arch/x86/power/cpu.c index 424f4c97a44d..6ec7910f59bf 100644 --- a/arch/x86/power/cpu.c +++ b/arch/x86/power/cpu.c @@ -165,7 +165,7 @@ static void fix_processor_context(void) * by __save_processor_state() * @ctxt - structure to load the registers contents from */ -static void __restore_processor_state(struct saved_context *ctxt) +static void notrace __restore_processor_state(struct saved_context *ctxt) { if (ctxt->misc_enable_saved) wrmsrl(MSR_IA32_MISC_ENABLE, ctxt->misc_enable); @@ -239,7 +239,7 @@ static void __restore_processor_state(struct saved_context *ctxt) } /* Needed by apm.c */ -void restore_processor_state(void) +void notrace restore_processor_state(void) { __restore_processor_state(&saved_context); } |