diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2016-12-15 13:49:34 -0800 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2016-12-15 13:49:34 -0800 |
commit | 179a7ba6806805bd4cd7a5e4574b83353c5615ad (patch) | |
tree | 58855a59ba3bd66f947c3f9781cd44a7329c7d75 /arch/powerpc | |
parent | 5e176d6973bdac04d9f298ca384c39f08eb084cb (diff) | |
parent | 3dbb16b87b57bb1088044ad2a0432e4769075002 (diff) | |
download | linux-179a7ba6806805bd4cd7a5e4574b83353c5615ad.tar.bz2 |
Merge tag 'trace-v4.10' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace
Pull tracing updates from Steven Rostedt:
"This release has a few updates:
- STM can hook into the function tracer
- Function filtering now supports more advance glob matching
- Ftrace selftests updates and added tests
- Softirq tag in traces now show only softirqs
- ARM nop added to non traced locations at compile time
- New trace_marker_raw file that allows for binary input
- Optimizations to the ring buffer
- Removal of kmap in trace_marker
- Wakeup and irqsoff tracers now adhere to the set_graph_notrace file
- Other various fixes and clean ups"
* tag 'trace-v4.10' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace: (42 commits)
selftests: ftrace: Shift down default message verbosity
kprobes/trace: Fix kprobe selftest for newer gcc
tracing/kprobes: Add a helper method to return number of probe hits
tracing/rb: Init the CPU mask on allocation
tracing: Use SOFTIRQ_OFFSET for softirq dectection for more accurate results
tracing/fgraph: Have wakeup and irqsoff tracers ignore graph functions too
fgraph: Handle a case where a tracer ignores set_graph_notrace
tracing: Replace kmap with copy_from_user() in trace_marker writing
ftrace/x86_32: Set ftrace_stub to weak to prevent gcc from using short jumps to it
tracing: Allow benchmark to be enabled at early_initcall()
tracing: Have system enable return error if one of the events fail
tracing: Do not start benchmark on boot up
tracing: Have the reg function allow to fail
ring-buffer: Force rb_end_commit() and rb_set_commit_to_write() inline
ring-buffer: Froce rb_update_write_stamp() to be inlined
ring-buffer: Force inline of hotpath helper functions
tracing: Make __buffer_unlock_commit() always_inline
tracing: Make tracepoint_printk a static_key
ring-buffer: Always inline rb_event_data()
ring-buffer: Make rb_reserve_next_event() always inlined
...
Diffstat (limited to 'arch/powerpc')
-rw-r--r-- | arch/powerpc/include/asm/trace.h | 4 | ||||
-rw-r--r-- | arch/powerpc/platforms/powernv/opal-tracepoints.c | 6 | ||||
-rw-r--r-- | arch/powerpc/platforms/pseries/lpar.c | 6 |
3 files changed, 10 insertions, 6 deletions
diff --git a/arch/powerpc/include/asm/trace.h b/arch/powerpc/include/asm/trace.h index 32e36b16773f..c05cef6ee06c 100644 --- a/arch/powerpc/include/asm/trace.h +++ b/arch/powerpc/include/asm/trace.h @@ -54,7 +54,7 @@ DEFINE_EVENT(ppc64_interrupt_class, timer_interrupt_exit, ); #ifdef CONFIG_PPC_PSERIES -extern void hcall_tracepoint_regfunc(void); +extern int hcall_tracepoint_regfunc(void); extern void hcall_tracepoint_unregfunc(void); TRACE_EVENT_FN_COND(hcall_entry, @@ -104,7 +104,7 @@ TRACE_EVENT_FN_COND(hcall_exit, #endif #ifdef CONFIG_PPC_POWERNV -extern void opal_tracepoint_regfunc(void); +extern int opal_tracepoint_regfunc(void); extern void opal_tracepoint_unregfunc(void); TRACE_EVENT_FN(opal_entry, diff --git a/arch/powerpc/platforms/powernv/opal-tracepoints.c b/arch/powerpc/platforms/powernv/opal-tracepoints.c index 1e496b780efd..3c447002edff 100644 --- a/arch/powerpc/platforms/powernv/opal-tracepoints.c +++ b/arch/powerpc/platforms/powernv/opal-tracepoints.c @@ -6,9 +6,10 @@ #ifdef HAVE_JUMP_LABEL struct static_key opal_tracepoint_key = STATIC_KEY_INIT; -void opal_tracepoint_regfunc(void) +int opal_tracepoint_regfunc(void) { static_key_slow_inc(&opal_tracepoint_key); + return 0; } void opal_tracepoint_unregfunc(void) @@ -25,9 +26,10 @@ void opal_tracepoint_unregfunc(void) /* NB: reg/unreg are called while guarded with the tracepoints_mutex */ extern long opal_tracepoint_refcount; -void opal_tracepoint_regfunc(void) +int opal_tracepoint_regfunc(void) { opal_tracepoint_refcount++; + return 0; } void opal_tracepoint_unregfunc(void) diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c index f2c98f6c1c9c..a78da511ffeb 100644 --- a/arch/powerpc/platforms/pseries/lpar.c +++ b/arch/powerpc/platforms/pseries/lpar.c @@ -661,9 +661,10 @@ EXPORT_SYMBOL(arch_free_page); #ifdef HAVE_JUMP_LABEL struct static_key hcall_tracepoint_key = STATIC_KEY_INIT; -void hcall_tracepoint_regfunc(void) +int hcall_tracepoint_regfunc(void) { static_key_slow_inc(&hcall_tracepoint_key); + return 0; } void hcall_tracepoint_unregfunc(void) @@ -680,9 +681,10 @@ void hcall_tracepoint_unregfunc(void) /* NB: reg/unreg are called while guarded with the tracepoints_mutex */ extern long hcall_tracepoint_refcount; -void hcall_tracepoint_regfunc(void) +int hcall_tracepoint_regfunc(void) { hcall_tracepoint_refcount++; + return 0; } void hcall_tracepoint_unregfunc(void) |