summaryrefslogtreecommitdiffstats
path: root/kernel/trace
diff options
context:
space:
mode:
authorSteven Rostedt (VMware) <rostedt@goodmis.org>2017-03-29 22:45:18 -0400
committerSteven Rostedt (VMware) <rostedt@goodmis.org>2017-03-31 18:00:45 -0400
commit5bd84629a7a0e2462c28ca52e213ebe27fadfee8 (patch)
tree9fef370dae724a4822358278d0591ba5da1c3a4f /kernel/trace
parent43ff926a0c3a0cfd6aa313c3232420f009ab43e8 (diff)
downloadlinux-5bd84629a7a0e2462c28ca52e213ebe27fadfee8.tar.bz2
ftrace: Create separate t_func_next() to simplify the function / hash logic
I noticed that if I use dd to read the set_ftrace_filter file that the first hash command is repeated. # cd /sys/kernel/debug/tracing # echo schedule > set_ftrace_filter # echo do_IRQ >> set_ftrace_filter # echo schedule:traceoff >> set_ftrace_filter # echo do_IRQ:traceoff >> set_ftrace_filter # cat set_ftrace_filter schedule do_IRQ schedule:traceoff:unlimited do_IRQ:traceoff:unlimited # dd if=set_ftrace_filter bs=1 schedule do_IRQ schedule:traceoff:unlimited schedule:traceoff:unlimited do_IRQ:traceoff:unlimited 98+0 records in 98+0 records out 98 bytes copied, 0.00265011 s, 37.0 kB/s This is due to the way t_start() calls t_next() as well as the seq_file calls t_next() and the state is slightly different between the two. Namely, t_start() will call t_next() with a local "pos" variable. By separating out the function listing from t_next() into its own function, we can have better control of outputting the functions and the hash of triggers. This simplifies the code. Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
Diffstat (limited to 'kernel/trace')
-rw-r--r--kernel/trace/ftrace.c45
1 files changed, 31 insertions, 14 deletions
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index d4b18ce9ba88..aff7a2c08387 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -3154,22 +3154,12 @@ t_hash_show(struct seq_file *m, struct ftrace_iterator *iter)
}
static void *
-t_next(struct seq_file *m, void *v, loff_t *pos)
+t_func_next(struct seq_file *m, loff_t *pos)
{
struct ftrace_iterator *iter = m->private;
struct dyn_ftrace *rec = NULL;
- if (unlikely(ftrace_disabled))
- return NULL;
-
- if (iter->flags & FTRACE_ITER_HASH)
- return t_hash_next(m, pos);
-
(*pos)++;
- iter->pos = iter->func_pos = *pos;
-
- if (iter->flags & FTRACE_ITER_PRINTALL)
- return t_hash_start(m, pos);
retry:
if (iter->idx >= iter->pg->index) {
@@ -3192,13 +3182,40 @@ t_next(struct seq_file *m, void *v, loff_t *pos)
}
if (!rec)
- return t_hash_start(m, pos);
+ return NULL;
+ iter->pos = iter->func_pos = *pos;
iter->func = rec;
return iter;
}
+static void *
+t_next(struct seq_file *m, void *v, loff_t *pos)
+{
+ struct ftrace_iterator *iter = m->private;
+ void *ret;
+
+ if (unlikely(ftrace_disabled))
+ return NULL;
+
+ if (iter->flags & FTRACE_ITER_HASH)
+ return t_hash_next(m, pos);
+
+ if (iter->flags & FTRACE_ITER_PRINTALL) {
+ /* next must increment pos, and t_hash_start does not */
+ (*pos)++;
+ return t_hash_start(m, pos);
+ }
+
+ ret = t_func_next(m, pos);
+
+ if (!ret)
+ return t_hash_start(m, pos);
+
+ return ret;
+}
+
static void reset_iter_read(struct ftrace_iterator *iter)
{
iter->pos = 0;
@@ -3250,13 +3267,13 @@ static void *t_start(struct seq_file *m, loff_t *pos)
iter->pg = ftrace_pages_start;
iter->idx = 0;
for (l = 0; l <= *pos; ) {
- p = t_next(m, p, &l);
+ p = t_func_next(m, &l);
if (!p)
break;
}
if (!p)
- return NULL;
+ return t_hash_start(m, pos);
return iter;
}