diff options
author | Steven Rostedt <srostedt@redhat.com> | 2008-11-15 15:48:29 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-11-16 07:35:37 +0100 |
commit | d51ad7ac48f991c4a8834485727efa99a691cb87 (patch) | |
tree | 7491150df5bda0b3c04e5a7f1fafdb35f3aec5c4 /kernel | |
parent | 918c115410c6cc57033835b6a401e57697f9ea4f (diff) | |
download | linux-d51ad7ac48f991c4a8834485727efa99a691cb87.tar.bz2 |
ftrace: replace raw_local_irq_save with local_irq_save
Impact: fix lockdep disabling itself when function tracing is enabled
The raw_local_irq_saves used in ftrace is causing problems with
lockdep. (it thinks the irq flags are out of sync and disables
itself with a warning)
The raw ops here are not needed, and the normal local_irq_save is fine.
Signed-off-by: Steven Rostedt <srostedt@redhat.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/trace/trace.c | 4 | ||||
-rw-r--r-- | kernel/trace/trace_selftest.c | 4 |
2 files changed, 4 insertions, 4 deletions
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 4a904623e05d..dff4bee591b9 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -1051,7 +1051,7 @@ function_trace_call(unsigned long ip, unsigned long parent_ip) * Need to use raw, since this must be called before the * recursive protection is performed. */ - raw_local_irq_save(flags); + local_irq_save(flags); cpu = raw_smp_processor_id(); data = tr->data[cpu]; disabled = atomic_inc_return(&data->disabled); @@ -1062,7 +1062,7 @@ function_trace_call(unsigned long ip, unsigned long parent_ip) } atomic_dec(&data->disabled); - raw_local_irq_restore(flags); + local_irq_restore(flags); } #ifdef CONFIG_FUNCTION_RET_TRACER diff --git a/kernel/trace/trace_selftest.c b/kernel/trace/trace_selftest.c index 24e6e075e6d6..5cb64ea061b5 100644 --- a/kernel/trace/trace_selftest.c +++ b/kernel/trace/trace_selftest.c @@ -52,7 +52,7 @@ static int trace_test_buffer(struct trace_array *tr, unsigned long *count) int cpu, ret = 0; /* Don't allow flipping of max traces now */ - raw_local_irq_save(flags); + local_irq_save(flags); __raw_spin_lock(&ftrace_max_lock); cnt = ring_buffer_entries(tr->buffer); @@ -63,7 +63,7 @@ static int trace_test_buffer(struct trace_array *tr, unsigned long *count) break; } __raw_spin_unlock(&ftrace_max_lock); - raw_local_irq_restore(flags); + local_irq_restore(flags); if (count) *count = cnt; |