summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorSteven Rostedt (VMware) <rostedt@goodmis.org>2021-10-15 13:42:40 -0400
committerSteven Rostedt (VMware) <rostedt@goodmis.org>2021-10-19 20:33:20 -0400
commit9b84fadc444de5456ab5f5487e2108311c724c3f (patch)
tree0bbafa429f2728795926fbe1b152ea2415a339f0
parent7ce1bb83a14019f8c396d57ec704d19478747716 (diff)
downloadlinux-9b84fadc444de5456ab5f5487e2108311c724c3f.tar.bz2
tracing: Reuse logic from perf's get_recursion_context()
Instead of having branches that adds noise to the branch prediction, use the addition logic to set the bit for the level of interrupt context that the state is currently in. This copies the logic from perf's get_recursion_context() function. Link: https://lore.kernel.org/all/20211015161702.GF174703@worktop.programming.kicks-ass.net/ Suggested-by: Peter Zijlstra <peterz@infradead.org> Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
-rw-r--r--include/linux/trace_recursion.h11
-rw-r--r--kernel/trace/ring_buffer.c12
2 files changed, 12 insertions, 11 deletions
diff --git a/include/linux/trace_recursion.h b/include/linux/trace_recursion.h
index a9f9c5714e65..f6da7a03bff0 100644
--- a/include/linux/trace_recursion.h
+++ b/include/linux/trace_recursion.h
@@ -137,12 +137,13 @@ enum {
static __always_inline int trace_get_context_bit(void)
{
unsigned long pc = preempt_count();
+ unsigned char bit = 0;
- if (!(pc & (NMI_MASK | HARDIRQ_MASK | SOFTIRQ_OFFSET)))
- return TRACE_CTX_NORMAL;
- else
- return pc & NMI_MASK ? TRACE_CTX_NMI :
- pc & HARDIRQ_MASK ? TRACE_CTX_IRQ : TRACE_CTX_SOFTIRQ;
+ bit += !!(pc & (NMI_MASK));
+ bit += !!(pc & (NMI_MASK | HARDIRQ_MASK));
+ bit += !!(pc & (NMI_MASK | HARDIRQ_MASK | SOFTIRQ_OFFSET));
+
+ return TRACE_CTX_NORMAL - bit;
}
#ifdef CONFIG_FTRACE_RECORD_RECURSION
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index c5a3fbf19617..15d4380006e3 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -3168,13 +3168,13 @@ trace_recursive_lock(struct ring_buffer_per_cpu *cpu_buffer)
{
unsigned int val = cpu_buffer->current_context;
unsigned long pc = preempt_count();
- int bit;
+ int bit = 0;
- if (!(pc & (NMI_MASK | HARDIRQ_MASK | SOFTIRQ_OFFSET)))
- bit = RB_CTX_NORMAL;
- else
- bit = pc & NMI_MASK ? RB_CTX_NMI :
- pc & HARDIRQ_MASK ? RB_CTX_IRQ : RB_CTX_SOFTIRQ;
+ bit += !!(pc & (NMI_MASK));
+ bit += !!(pc & (NMI_MASK | HARDIRQ_MASK));
+ bit += !!(pc & (NMI_MASK | HARDIRQ_MASK | SOFTIRQ_OFFSET));
+
+ bit = RB_CTX_NORMAL - bit;
if (unlikely(val & (1 << (bit + cpu_buffer->nest)))) {
/*