summaryrefslogtreecommitdiffstats
path: root/kernel/trace/trace_irqsoff.c
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2009-07-25 17:13:33 +0200
committerIngo Molnar <mingo@elte.hu>2011-09-13 11:11:52 +0200
commit5389f6fad27019f2ba78f1b332f719ec05f12a42 (patch)
tree01b9511a75c147808f48a7f4408bf2e35b12623d /kernel/trace/trace_irqsoff.c
parent740969f91e950b64a18fdd0a25164cdee042abf0 (diff)
downloadlinux-5389f6fad27019f2ba78f1b332f719ec05f12a42.tar.bz2
locking, tracing: Annotate tracing locks as raw
The tracing locks can be taken in atomic context and therefore cannot be preempted on -rt - annotate it. In mainline this change documents the low level nature of the lock - otherwise there's no functional difference. Lockdep and Sparse checking will work as usual. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/trace/trace_irqsoff.c')
-rw-r--r--kernel/trace/trace_irqsoff.c6
1 files changed, 3 insertions, 3 deletions
diff --git a/kernel/trace/trace_irqsoff.c b/kernel/trace/trace_irqsoff.c
index 667aa8cc0cfc..11186212068c 100644
--- a/kernel/trace/trace_irqsoff.c
+++ b/kernel/trace/trace_irqsoff.c
@@ -23,7 +23,7 @@ static int tracer_enabled __read_mostly;
static DEFINE_PER_CPU(int, tracing_cpu);
-static DEFINE_SPINLOCK(max_trace_lock);
+static DEFINE_RAW_SPINLOCK(max_trace_lock);
enum {
TRACER_IRQS_OFF = (1 << 1),
@@ -321,7 +321,7 @@ check_critical_timing(struct trace_array *tr,
if (!report_latency(delta))
goto out;
- spin_lock_irqsave(&max_trace_lock, flags);
+ raw_spin_lock_irqsave(&max_trace_lock, flags);
/* check if we are still the max latency */
if (!report_latency(delta))
@@ -344,7 +344,7 @@ check_critical_timing(struct trace_array *tr,
max_sequence++;
out_unlock:
- spin_unlock_irqrestore(&max_trace_lock, flags);
+ raw_spin_unlock_irqrestore(&max_trace_lock, flags);
out:
data->critical_sequence = max_sequence;