summaryrefslogtreecommitdiffstats
path: root/kernel/trace/ftrace.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2022-12-15 18:01:16 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2022-12-15 18:01:16 -0800
commitfe36bb8736ee9e38fa6173e1271ed8c5cf7bc907 (patch)
tree15948d02fe68c2b8c8d6de803006a82f226853de /kernel/trace/ftrace.c
parent851f657a86421dded42b6175c6ea0f4f5e86af97 (diff)
parenteb9d58947d40699d93e5e69e1ddc54e41da7e132 (diff)
downloadlinux-fe36bb8736ee9e38fa6173e1271ed8c5cf7bc907.tar.bz2
Merge tag 'trace-v6.2' of git://git.kernel.org/pub/scm/linux/kernel/git/trace/linux-trace
Pull tracing updates from Steven Rostedt: - Add options to the osnoise tracer: - 'panic_on_stop' option that panics the kernel if osnoise is greater than some user defined threshold. - 'preempt' option, to test noise while preemption is disabled - 'irq' option, to test noise when interrupts are disabled - Add .percent and .graph suffix to histograms to give different outputs - Add nohitcount to disable showing hitcount in histogram output - Add new __cpumask() to trace event fields to annotate that a unsigned long array is a cpumask to user space and should be treated as one. - Add trace_trigger kernel command line parameter to enable trace event triggers at boot up. Useful to trace stack traces, disable tracing and take snapshots. - Fix x86/kmmio mmio tracer to work with the updates to lockdep - Unify the panic and die notifiers - Add back ftrace_expect reference that is used to extract more information in the ftrace_bug() code. - Have trigger filter parsing errors show up in the tracing error log. - Updated MAINTAINERS file to add kernel tracing mailing list and patchwork info - Use IDA to keep track of event type numbers. - And minor fixes and clean ups * tag 'trace-v6.2' of git://git.kernel.org/pub/scm/linux/kernel/git/trace/linux-trace: (44 commits) tracing: Fix cpumask() example typo tracing: Improve panic/die notifiers ftrace: Prevent RCU stall on PREEMPT_VOLUNTARY kernels tracing: Do not synchronize freeing of trigger filter on boot up tracing: Remove pointer (asterisk) and brackets from cpumask_t field tracing: Have trigger filter parsing errors show up in error_log x86/mm/kmmio: Remove redundant preempt_disable() tracing: Fix infinite loop in tracing_read_pipe on overflowed print_trace_line Documentation/osnoise: Add osnoise/options documentation tracing/osnoise: Add preempt and/or irq disabled options tracing/osnoise: Add PANIC_ON_STOP option Documentation/osnoise: Escape underscore of NO_ prefix tracing: Fix some checker warnings tracing/osnoise: Make osnoise_options static tracing: remove unnecessary trace_trigger ifdef ring-buffer: Handle resize in early boot up tracing/hist: Fix issue of losting command info in error_log tracing: Fix issue of missing one synthetic field tracing/hist: Fix out-of-bound write on 'action_data.var_ref_idx' tracing/hist: Fix wrong return value in parse_action_params() ...
Diffstat (limited to 'kernel/trace/ftrace.c')
-rw-r--r--kernel/trace/ftrace.c28
1 files changed, 19 insertions, 9 deletions
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 8e842f68b9a5..442438b93fe9 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -163,7 +163,7 @@ static void ftrace_sync_ipi(void *data)
static ftrace_func_t ftrace_ops_get_list_func(struct ftrace_ops *ops)
{
/*
- * If this is a dynamic, RCU, or per CPU ops, or we force list func,
+ * If this is a dynamic or RCU ops, or we force list func,
* then it needs to call the list anyway.
*/
if (ops->flags & (FTRACE_OPS_FL_DYNAMIC | FTRACE_OPS_FL_RCU) ||
@@ -2762,6 +2762,19 @@ void __weak ftrace_arch_code_modify_post_process(void)
{
}
+static int update_ftrace_func(ftrace_func_t func)
+{
+ static ftrace_func_t save_func;
+
+ /* Avoid updating if it hasn't changed */
+ if (func == save_func)
+ return 0;
+
+ save_func = func;
+
+ return ftrace_update_ftrace_func(func);
+}
+
void ftrace_modify_all_code(int command)
{
int update = command & FTRACE_UPDATE_TRACE_FUNC;
@@ -2782,7 +2795,7 @@ void ftrace_modify_all_code(int command)
* traced.
*/
if (update) {
- err = ftrace_update_ftrace_func(ftrace_ops_list_func);
+ err = update_ftrace_func(ftrace_ops_list_func);
if (FTRACE_WARN_ON(err))
return;
}
@@ -2798,7 +2811,7 @@ void ftrace_modify_all_code(int command)
/* If irqs are disabled, we are in stop machine */
if (!irqs_disabled())
smp_call_function(ftrace_sync_ipi, NULL, 1);
- err = ftrace_update_ftrace_func(ftrace_trace_function);
+ err = update_ftrace_func(ftrace_trace_function);
if (FTRACE_WARN_ON(err))
return;
}
@@ -3070,8 +3083,6 @@ out:
/*
* Dynamic ops may be freed, we must make sure that all
* callers are done before leaving this function.
- * The same goes for freeing the per_cpu data of the per_cpu
- * ops.
*/
if (ops->flags & FTRACE_OPS_FL_DYNAMIC) {
/*
@@ -4192,6 +4203,7 @@ match_records(struct ftrace_hash *hash, char *func, int len, char *mod)
}
found = 1;
}
+ cond_resched();
} while_for_each_ftrace_rec();
out_unlock:
mutex_unlock(&ftrace_lock);
@@ -7518,8 +7530,6 @@ __ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
/*
* Check the following for each ops before calling their func:
* if RCU flag is set, then rcu_is_watching() must be true
- * if PER_CPU is set, then ftrace_function_local_disable()
- * must be false
* Otherwise test if the ip matches the ops filter
*
* If any of the above fails then the op->func() is not executed.
@@ -7569,8 +7579,8 @@ NOKPROBE_SYMBOL(arch_ftrace_ops_list_func);
/*
* If there's only one function registered but it does not support
- * recursion, needs RCU protection and/or requires per cpu handling, then
- * this function will be called by the mcount trampoline.
+ * recursion, needs RCU protection, then this function will be called
+ * by the mcount trampoline.
*/
static void ftrace_ops_assist_func(unsigned long ip, unsigned long parent_ip,
struct ftrace_ops *op, struct ftrace_regs *fregs)