summaryrefslogtreecommitdiffstats
path: root/kernel/trace
diff options
context:
space:
mode:
authorFrederic Weisbecker <fweisbec@gmail.com>2008-11-25 21:07:04 +0100
committerIngo Molnar <mingo@elte.hu>2008-11-26 01:59:45 +0100
commitfb52607afcd0629776f1dc9e657647ceae81dd50 (patch)
tree7bf43b41ff8510d3098c089913cce56a9049f0fd /kernel/trace
parent509dceef6470442d8c7b8a43ec34125205840b3c (diff)
downloadlinux-fb52607afcd0629776f1dc9e657647ceae81dd50.tar.bz2
tracing/function-return-tracer: change the name into function-graph-tracer
Impact: cleanup This patch changes the name of the "return function tracer" into function-graph-tracer which is a more suitable name for a tracing which makes one able to retrieve the ordered call stack during the code flow. Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com> Acked-by: Steven Rostedt <rostedt@goodmis.org> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/trace')
-rw-r--r--kernel/trace/Kconfig19
-rw-r--r--kernel/trace/Makefile2
-rw-r--r--kernel/trace/ftrace.c26
-rw-r--r--kernel/trace/trace.c18
-rw-r--r--kernel/trace/trace.h12
-rw-r--r--kernel/trace/trace_functions_graph.c98
6 files changed, 138 insertions, 37 deletions
diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig
index 620feadff67a..eb9b901e0777 100644
--- a/kernel/trace/Kconfig
+++ b/kernel/trace/Kconfig
@@ -12,7 +12,7 @@ config NOP_TRACER
config HAVE_FUNCTION_TRACER
bool
-config HAVE_FUNCTION_RET_TRACER
+config HAVE_FUNCTION_GRAPH_TRACER
bool
config HAVE_FUNCTION_TRACE_MCOUNT_TEST
@@ -63,15 +63,18 @@ config FUNCTION_TRACER
(the bootup default), then the overhead of the instructions is very
small and not measurable even in micro-benchmarks.
-config FUNCTION_RET_TRACER
- bool "Kernel Function return Tracer"
- depends on HAVE_FUNCTION_RET_TRACER
+config FUNCTION_GRAPH_TRACER
+ bool "Kernel Function Graph Tracer"
+ depends on HAVE_FUNCTION_GRAPH_TRACER
depends on FUNCTION_TRACER
help
- Enable the kernel to trace a function at its return.
- It's first purpose is to trace the duration of functions.
- This is done by setting the current return address on the thread
- info structure of the current task.
+ Enable the kernel to trace a function at both its return
+ and its entry.
+ It's first purpose is to trace the duration of functions and
+ draw a call graph for each thread with some informations like
+ the return value.
+ This is done by setting the current return address on the current
+ task structure into a stack of calls.
config IRQSOFF_TRACER
bool "Interrupts-off Latency Tracer"
diff --git a/kernel/trace/Makefile b/kernel/trace/Makefile
index cef4bcb4e822..08c5fe6ddc09 100644
--- a/kernel/trace/Makefile
+++ b/kernel/trace/Makefile
@@ -29,7 +29,7 @@ obj-$(CONFIG_NOP_TRACER) += trace_nop.o
obj-$(CONFIG_STACK_TRACER) += trace_stack.o
obj-$(CONFIG_MMIOTRACE) += trace_mmiotrace.o
obj-$(CONFIG_BOOT_TRACER) += trace_boot.o
-obj-$(CONFIG_FUNCTION_RET_TRACER) += trace_functions_return.o
+obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += trace_functions_graph.o
obj-$(CONFIG_TRACE_BRANCH_PROFILING) += trace_branch.o
obj-$(CONFIG_BTS_TRACER) += trace_bts.o
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 53042f118f23..9e19976af727 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -395,11 +395,11 @@ __ftrace_replace_code(struct dyn_ftrace *rec, int enable)
unsigned long ip, fl;
unsigned long ftrace_addr;
-#ifdef CONFIG_FUNCTION_RET_TRACER
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
if (ftrace_tracing_type == FTRACE_TYPE_ENTER)
ftrace_addr = (unsigned long)ftrace_caller;
else
- ftrace_addr = (unsigned long)ftrace_return_caller;
+ ftrace_addr = (unsigned long)ftrace_graph_caller;
#else
ftrace_addr = (unsigned long)ftrace_caller;
#endif
@@ -1496,13 +1496,13 @@ ftrace_enable_sysctl(struct ctl_table *table, int write,
return ret;
}
-#ifdef CONFIG_FUNCTION_RET_TRACER
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
static atomic_t ftrace_retfunc_active;
/* The callback that hooks the return of a function */
-trace_function_return_t ftrace_function_return =
- (trace_function_return_t)ftrace_stub;
+trace_function_graph_t ftrace_graph_function =
+ (trace_function_graph_t)ftrace_stub;
/* Try to assign a return stack array on FTRACE_RETSTACK_ALLOC_SIZE tasks. */
@@ -1549,7 +1549,7 @@ free:
}
/* Allocate a return stack for each task */
-static int start_return_tracing(void)
+static int start_graph_tracing(void)
{
struct ftrace_ret_stack **ret_stack_list;
int ret;
@@ -1569,7 +1569,7 @@ static int start_return_tracing(void)
return ret;
}
-int register_ftrace_return(trace_function_return_t func)
+int register_ftrace_graph(trace_function_graph_t func)
{
int ret = 0;
@@ -1584,13 +1584,13 @@ int register_ftrace_return(trace_function_return_t func)
goto out;
}
atomic_inc(&ftrace_retfunc_active);
- ret = start_return_tracing();
+ ret = start_graph_tracing();
if (ret) {
atomic_dec(&ftrace_retfunc_active);
goto out;
}
ftrace_tracing_type = FTRACE_TYPE_RETURN;
- ftrace_function_return = func;
+ ftrace_graph_function = func;
ftrace_startup();
out:
@@ -1598,12 +1598,12 @@ out:
return ret;
}
-void unregister_ftrace_return(void)
+void unregister_ftrace_graph(void)
{
mutex_lock(&ftrace_sysctl_lock);
atomic_dec(&ftrace_retfunc_active);
- ftrace_function_return = (trace_function_return_t)ftrace_stub;
+ ftrace_graph_function = (trace_function_graph_t)ftrace_stub;
ftrace_shutdown();
/* Restore normal tracing type */
ftrace_tracing_type = FTRACE_TYPE_ENTER;
@@ -1612,7 +1612,7 @@ void unregister_ftrace_return(void)
}
/* Allocate a return stack for newly created task */
-void ftrace_retfunc_init_task(struct task_struct *t)
+void ftrace_graph_init_task(struct task_struct *t)
{
if (atomic_read(&ftrace_retfunc_active)) {
t->ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH
@@ -1626,7 +1626,7 @@ void ftrace_retfunc_init_task(struct task_struct *t)
t->ret_stack = NULL;
}
-void ftrace_retfunc_exit_task(struct task_struct *t)
+void ftrace_graph_exit_task(struct task_struct *t)
{
struct ftrace_ret_stack *ret_stack = t->ret_stack;
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 8df8fdd69c95..f21ab2c68fd4 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -878,15 +878,15 @@ trace_function(struct trace_array *tr, struct trace_array_cpu *data,
ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
}
-#ifdef CONFIG_FUNCTION_RET_TRACER
-static void __trace_function_return(struct trace_array *tr,
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+static void __trace_function_graph(struct trace_array *tr,
struct trace_array_cpu *data,
- struct ftrace_retfunc *trace,
+ struct ftrace_graph_ret *trace,
unsigned long flags,
int pc)
{
struct ring_buffer_event *event;
- struct ftrace_ret_entry *entry;
+ struct ftrace_graph_entry *entry;
unsigned long irq_flags;
if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled))))
@@ -1177,8 +1177,8 @@ function_trace_call(unsigned long ip, unsigned long parent_ip)
local_irq_restore(flags);
}
-#ifdef CONFIG_FUNCTION_RET_TRACER
-void trace_function_return(struct ftrace_retfunc *trace)
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+void trace_function_graph(struct ftrace_graph_ret *trace)
{
struct trace_array *tr = &global_trace;
struct trace_array_cpu *data;
@@ -1193,12 +1193,12 @@ void trace_function_return(struct ftrace_retfunc *trace)
disabled = atomic_inc_return(&data->disabled);
if (likely(disabled == 1)) {
pc = preempt_count();
- __trace_function_return(tr, data, trace, flags, pc);
+ __trace_function_graph(tr, data, trace, flags, pc);
}
atomic_dec(&data->disabled);
raw_local_irq_restore(flags);
}
-#endif /* CONFIG_FUNCTION_RET_TRACER */
+#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
static struct ftrace_ops trace_ops __read_mostly =
{
@@ -2001,7 +2001,7 @@ static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
break;
}
case TRACE_FN_RET: {
- return print_return_function(iter);
+ return print_graph_function(iter);
break;
}
case TRACE_BRANCH: {
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index 3abd645e8af2..72b5ef868765 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -57,7 +57,7 @@ struct ftrace_entry {
};
/* Function return entry */
-struct ftrace_ret_entry {
+struct ftrace_graph_entry {
struct trace_entry ent;
unsigned long ip;
unsigned long parent_ip;
@@ -264,7 +264,7 @@ extern void __ftrace_bad_type(void);
IF_ASSIGN(var, ent, struct trace_boot_call, TRACE_BOOT_CALL);\
IF_ASSIGN(var, ent, struct trace_boot_ret, TRACE_BOOT_RET);\
IF_ASSIGN(var, ent, struct trace_branch, TRACE_BRANCH); \
- IF_ASSIGN(var, ent, struct ftrace_ret_entry, TRACE_FN_RET);\
+ IF_ASSIGN(var, ent, struct ftrace_graph_entry, TRACE_FN_RET);\
IF_ASSIGN(var, ent, struct bts_entry, TRACE_BTS);\
__ftrace_bad_type(); \
} while (0)
@@ -398,7 +398,7 @@ void trace_function(struct trace_array *tr,
unsigned long parent_ip,
unsigned long flags, int pc);
void
-trace_function_return(struct ftrace_retfunc *trace);
+trace_function_graph(struct ftrace_graph_ret *trace);
void trace_bts(struct trace_array *tr,
unsigned long from,
@@ -489,11 +489,11 @@ extern int trace_vprintk(unsigned long ip, const char *fmt, va_list args);
extern unsigned long trace_flags;
/* Standard output formatting function used for function return traces */
-#ifdef CONFIG_FUNCTION_RET_TRACER
-extern enum print_line_t print_return_function(struct trace_iterator *iter);
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+extern enum print_line_t print_graph_function(struct trace_iterator *iter);
#else
static inline enum print_line_t
-print_return_function(struct trace_iterator *iter)
+print_graph_function(struct trace_iterator *iter)
{
return TRACE_TYPE_UNHANDLED;
}
diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c
new file mode 100644
index 000000000000..f5bad4624d2b
--- /dev/null
+++ b/kernel/trace/trace_functions_graph.c
@@ -0,0 +1,98 @@
+/*
+ *
+ * Function graph tracer.
+ * Copyright (c) 2008 Frederic Weisbecker <fweisbec@gmail.com>
+ * Mostly borrowed from function tracer which
+ * is Copyright (c) Steven Rostedt <srostedt@redhat.com>
+ *
+ */
+#include <linux/debugfs.h>
+#include <linux/uaccess.h>
+#include <linux/ftrace.h>
+#include <linux/fs.h>
+
+#include "trace.h"
+
+
+#define TRACE_GRAPH_PRINT_OVERRUN 0x1
+static struct tracer_opt trace_opts[] = {
+ /* Display overruns or not */
+ { TRACER_OPT(overrun, TRACE_GRAPH_PRINT_OVERRUN) },
+ { } /* Empty entry */
+};
+
+static struct tracer_flags tracer_flags = {
+ .val = 0, /* Don't display overruns by default */
+ .opts = trace_opts
+};
+
+
+static int graph_trace_init(struct trace_array *tr)
+{
+ int cpu;
+ for_each_online_cpu(cpu)
+ tracing_reset(tr, cpu);
+
+ return register_ftrace_graph(&trace_function_graph);
+}
+
+static void graph_trace_reset(struct trace_array *tr)
+{
+ unregister_ftrace_graph();
+}
+
+
+enum print_line_t
+print_graph_function(struct trace_iterator *iter)
+{
+ struct trace_seq *s = &iter->seq;
+ struct trace_entry *entry = iter->ent;
+ struct ftrace_graph_entry *field;
+ int ret;
+
+ if (entry->type == TRACE_FN_RET) {
+ trace_assign_type(field, entry);
+ ret = trace_seq_printf(s, "%pF -> ", (void *)field->parent_ip);
+ if (!ret)
+ return TRACE_TYPE_PARTIAL_LINE;
+
+ ret = seq_print_ip_sym(s, field->ip,
+ trace_flags & TRACE_ITER_SYM_MASK);
+ if (!ret)
+ return TRACE_TYPE_PARTIAL_LINE;
+
+ ret = trace_seq_printf(s, " (%llu ns)",
+ field->rettime - field->calltime);
+ if (!ret)
+ return TRACE_TYPE_PARTIAL_LINE;
+
+ if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERRUN) {
+ ret = trace_seq_printf(s, " (Overruns: %lu)",
+ field->overrun);
+ if (!ret)
+ return TRACE_TYPE_PARTIAL_LINE;
+ }
+
+ ret = trace_seq_printf(s, "\n");
+ if (!ret)
+ return TRACE_TYPE_PARTIAL_LINE;
+
+ return TRACE_TYPE_HANDLED;
+ }
+ return TRACE_TYPE_UNHANDLED;
+}
+
+static struct tracer graph_trace __read_mostly = {
+ .name = "function-graph",
+ .init = graph_trace_init,
+ .reset = graph_trace_reset,
+ .print_line = print_graph_function,
+ .flags = &tracer_flags,
+};
+
+static __init int init_graph_trace(void)
+{
+ return register_tracer(&graph_trace);
+}
+
+device_initcall(init_graph_trace);