diff options
author | Seiji Aguchi <seiji.aguchi@hds.com> | 2013-06-28 14:02:11 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2013-07-02 09:52:31 +0200 |
commit | 4787c368a9bca39e173d702389ee2eaf0520abc1 (patch) | |
tree | 4c195c5ca8147b5924f1dbd3cfc069061dafe53d /arch/x86/kernel/smp.c | |
parent | 5236eb968ec21c693d463d0494e39b00c1bc174d (diff) | |
download | linux-4787c368a9bca39e173d702389ee2eaf0520abc1.tar.bz2 |
x86/tracing: Add irq_enter/exit() in smp_trace_reschedule_interrupt()
Reschedule vector tracepoints may be called in cpu idle state.
This causes lockdep check warning below.
The tracepoint requires rcu but for accuracy it also
requires irq_enter() (tracepoints record the irq context), thus,
the tracepoint interrupt handler should be calling irq_enter()
and not rcu_irq_enter() (irq_enter() calls rcu_irq_enter()).
So, add irq_enter/exit() to smp_trace_reschedule_interrupt()
with common pre/post processing functions, smp_entering_irq()
and exiting_irq() (exiting_irq() calls just irq_exit()
in arch/x86/include/asm/apic.h),
because these can be shared among reschedule, call_function,
and call_function_single vectors.
[ 50.720557] Testing event reschedule_exit:
[ 50.721349]
[ 50.721502] ===============================
[ 50.721835] [ INFO: suspicious RCU usage. ]
[ 50.722169] 3.10.0-rc6-00004-gcf910e8 #190 Not tainted
[ 50.722582] -------------------------------
[ 50.722915] /c/kernel-tests/src/linux/arch/x86/include/asm/trace/irq_vectors.h:50 suspicious rcu_dereference_check() usage!
[ 50.723770]
[ 50.723770] other info that might help us debug this:
[ 50.723770]
[ 50.724385]
[ 50.724385] RCU used illegally from idle CPU!
[ 50.724385] rcu_scheduler_active = 1, debug_locks = 0
[ 50.725232] RCU used illegally from extended quiescent state!
[ 50.725690] no locks held by swapper/0/0.
[ 50.726010]
[ 50.726010] stack backtrace:
[...]
Signed-off-by: Seiji Aguchi <seiji.aguchi@hds.com>
Reviewed-by: Steven Rostedt <rostedt@goodmis.org>
Link: http://lkml.kernel.org/r/51CDCFA3.9080101@hds.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'arch/x86/kernel/smp.c')
-rw-r--r-- | arch/x86/kernel/smp.c | 29 |
1 files changed, 18 insertions, 11 deletions
diff --git a/arch/x86/kernel/smp.c b/arch/x86/kernel/smp.c index f4fe0b8879e0..cdaa347dfcad 100644 --- a/arch/x86/kernel/smp.c +++ b/arch/x86/kernel/smp.c @@ -265,23 +265,30 @@ void smp_reschedule_interrupt(struct pt_regs *regs) */ } -void smp_trace_reschedule_interrupt(struct pt_regs *regs) +static inline void smp_entering_irq(void) { ack_APIC_irq(); + irq_enter(); +} + +void smp_trace_reschedule_interrupt(struct pt_regs *regs) +{ + /* + * Need to call irq_enter() before calling the trace point. + * __smp_reschedule_interrupt() calls irq_enter/exit() too (in + * scheduler_ipi(). This is OK, since those functions are allowed + * to nest. + */ + smp_entering_irq(); trace_reschedule_entry(RESCHEDULE_VECTOR); __smp_reschedule_interrupt(); trace_reschedule_exit(RESCHEDULE_VECTOR); + exiting_irq(); /* * KVM uses this interrupt to force a cpu out of guest mode */ } -static inline void call_function_entering_irq(void) -{ - ack_APIC_irq(); - irq_enter(); -} - static inline void __smp_call_function_interrupt(void) { generic_smp_call_function_interrupt(); @@ -290,14 +297,14 @@ static inline void __smp_call_function_interrupt(void) void smp_call_function_interrupt(struct pt_regs *regs) { - call_function_entering_irq(); + smp_entering_irq(); __smp_call_function_interrupt(); exiting_irq(); } void smp_trace_call_function_interrupt(struct pt_regs *regs) { - call_function_entering_irq(); + smp_entering_irq(); trace_call_function_entry(CALL_FUNCTION_VECTOR); __smp_call_function_interrupt(); trace_call_function_exit(CALL_FUNCTION_VECTOR); @@ -312,14 +319,14 @@ static inline void __smp_call_function_single_interrupt(void) void smp_call_function_single_interrupt(struct pt_regs *regs) { - call_function_entering_irq(); + smp_entering_irq(); __smp_call_function_single_interrupt(); exiting_irq(); } void smp_trace_call_function_single_interrupt(struct pt_regs *regs) { - call_function_entering_irq(); + smp_entering_irq(); trace_call_function_single_entry(CALL_FUNCTION_SINGLE_VECTOR); __smp_call_function_single_interrupt(); trace_call_function_single_exit(CALL_FUNCTION_SINGLE_VECTOR); |