From 78a01feb4024ffb6c6321e45dc2bfcafb2d1d1e5 Mon Sep 17 00:00:00 2001 From: Zheng Yejian Date: Tue, 25 Oct 2022 15:39:23 +0000 Subject: ftrace: Clean comments related to FTRACE_OPS_FL_PER_CPU Commit b3a88803ac5b ("ftrace: Kill FTRACE_OPS_FL_PER_CPU") didn't completely remove the comments related to FTRACE_OPS_FL_PER_CPU. Link: https://lkml.kernel.org/r/20221025153923.1995973-1-zhengyejian1@huawei.com Fixes: b3a88803ac5b ("ftrace: Kill FTRACE_OPS_FL_PER_CPU") Signed-off-by: Zheng Yejian Signed-off-by: Steven Rostedt (Google) --- kernel/trace/ftrace.c | 10 +++------- 1 file changed, 3 insertions(+), 7 deletions(-) (limited to 'kernel/trace/ftrace.c') diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index 33236241f236..65a5d36463e0 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c @@ -163,7 +163,7 @@ static void ftrace_sync_ipi(void *data) static ftrace_func_t ftrace_ops_get_list_func(struct ftrace_ops *ops) { /* - * If this is a dynamic, RCU, or per CPU ops, or we force list func, + * If this is a dynamic or RCU ops, or we force list func, * then it needs to call the list anyway. */ if (ops->flags & (FTRACE_OPS_FL_DYNAMIC | FTRACE_OPS_FL_RCU) || @@ -3071,8 +3071,6 @@ out: /* * Dynamic ops may be freed, we must make sure that all * callers are done before leaving this function. - * The same goes for freeing the per_cpu data of the per_cpu - * ops. */ if (ops->flags & FTRACE_OPS_FL_DYNAMIC) { /* @@ -7519,8 +7517,6 @@ __ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip, /* * Check the following for each ops before calling their func: * if RCU flag is set, then rcu_is_watching() must be true - * if PER_CPU is set, then ftrace_function_local_disable() - * must be false * Otherwise test if the ip matches the ops filter * * If any of the above fails then the op->func() is not executed. @@ -7570,8 +7566,8 @@ NOKPROBE_SYMBOL(arch_ftrace_ops_list_func); /* * If there's only one function registered but it does not support - * recursion, needs RCU protection and/or requires per cpu handling, then - * this function will be called by the mcount trampoline. + * recursion, needs RCU protection, then this function will be called + * by the mcount trampoline. */ static void ftrace_ops_assist_func(unsigned long ip, unsigned long parent_ip, struct ftrace_ops *op, struct ftrace_regs *fregs) -- cgit v1.2.3 From bd604f3db49c5b21171abea0414a2020dcbf2646 Mon Sep 17 00:00:00 2001 From: "Steven Rostedt (Google)" Date: Tue, 22 Nov 2022 18:09:05 -0500 Subject: ftrace: Avoid needless updates of the ftrace function call Song Shuai reported: The list func (ftrace_ops_list_func) will be patched first before the transition between old and new calls are set, which fixed the race described in this commit `59338f75`. While ftrace_trace_function changes from the list func to a ftrace_ops func, like unregistering the klp_ops to leave the only global_ops in ftrace_ops_list, the ftrace_[regs]_call will be replaced with the list func although it already exists. So there should be a condition to avoid this. And suggested using another variable to keep track of what the ftrace function is set to. But this could be simplified by using a helper function that does the same with a static variable. Link: https://lore.kernel.org/lkml/20221026132039.2236233-1-suagrfillet@gmail.com/ Link: https://lore.kernel.org/linux-trace-kernel/20221122180905.737b6f52@gandalf.local.home Reported-by: Song Shuai Reviewed-by: Masami Hiramatsu (Google) Signed-off-by: Steven Rostedt (Google) --- kernel/trace/ftrace.c | 17 +++++++++++++++-- 1 file changed, 15 insertions(+), 2 deletions(-) (limited to 'kernel/trace/ftrace.c') diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index 65a5d36463e0..d04552c0c275 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c @@ -2763,6 +2763,19 @@ void __weak ftrace_arch_code_modify_post_process(void) { } +static int update_ftrace_func(ftrace_func_t func) +{ + static ftrace_func_t save_func; + + /* Avoid updating if it hasn't changed */ + if (func == save_func) + return 0; + + save_func = func; + + return ftrace_update_ftrace_func(func); +} + void ftrace_modify_all_code(int command) { int update = command & FTRACE_UPDATE_TRACE_FUNC; @@ -2783,7 +2796,7 @@ void ftrace_modify_all_code(int command) * traced. */ if (update) { - err = ftrace_update_ftrace_func(ftrace_ops_list_func); + err = update_ftrace_func(ftrace_ops_list_func); if (FTRACE_WARN_ON(err)) return; } @@ -2799,7 +2812,7 @@ void ftrace_modify_all_code(int command) /* If irqs are disabled, we are in stop machine */ if (!irqs_disabled()) smp_call_function(ftrace_sync_ipi, NULL, 1); - err = ftrace_update_ftrace_func(ftrace_trace_function); + err = update_ftrace_func(ftrace_trace_function); if (FTRACE_WARN_ON(err)) return; } -- cgit v1.2.3 From d0b24b4e91fcb8408c4979888547f86be514e337 Mon Sep 17 00:00:00 2001 From: "Guilherme G. Piccoli" Date: Tue, 15 Nov 2022 17:48:47 -0300 Subject: ftrace: Prevent RCU stall on PREEMPT_VOLUNTARY kernels The function match_records() may take a while due to a large number of string comparisons, so when in PREEMPT_VOLUNTARY kernels we could face RCU stalls due to that. Add a cond_resched() to prevent that. Link: https://lkml.kernel.org/r/20221115204847.593616-1-gpiccoli@igalia.com Cc: Mark Rutland Suggested-by: Steven Rostedt Acked-by: Paul E. McKenney # from RCU CPU stall warning perspective Signed-off-by: Guilherme G. Piccoli Acked-by: Masami Hiramatsu (Google) Signed-off-by: Steven Rostedt (Google) --- kernel/trace/ftrace.c | 1 + 1 file changed, 1 insertion(+) (limited to 'kernel/trace/ftrace.c') diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index d04552c0c275..b8e374a372e5 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c @@ -4204,6 +4204,7 @@ match_records(struct ftrace_hash *hash, char *func, int len, char *mod) } found = 1; } + cond_resched(); } while_for_each_ftrace_rec(); out_unlock: mutex_unlock(&ftrace_lock); -- cgit v1.2.3