summaryrefslogtreecommitdiffstats
path: root/kernel/trace
diff options
context:
space:
mode:
authorSteven Rostedt (VMware) <rostedt@goodmis.org>2017-02-02 20:16:29 -0500
committerSteven Rostedt (VMware) <rostedt@goodmis.org>2017-02-03 10:59:42 -0500
commit649b988b12ddb9aed16047a3d9bb4d7bfdb47221 (patch)
tree00dd85ce86d799d9deeb26987c77fb6542f0e38a /kernel/trace
parent0e684b6578ee463ecb5c9a1cd0c20069f063d9f0 (diff)
downloadlinux-649b988b12ddb9aed16047a3d9bb4d7bfdb47221.tar.bz2
ftrace: Do not hold references of ftrace_graph_{notrace_}hash out of graph_lock
The hashs ftrace_graph_hash and ftrace_graph_notrace_hash are modified within the graph_lock being held. Holding a pointer to them and passing them along can lead to a use of a stale pointer (fgd->hash). Move assigning the pointer and its use to within the holding of the lock. Note, it's an rcu_sched protected data, and other instances of referencing them are done with preemption disabled. But the file manipuation code must be protected by the lock. The fgd->hash pointer is set to NULL when the lock is being released. Acked-by: Namhyung Kim <namhyung@kernel.org> Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
Diffstat (limited to 'kernel/trace')
-rw-r--r--kernel/trace/ftrace.c28
1 files changed, 23 insertions, 5 deletions
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 0233c8cb45f4..b3a4896ef78a 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -4616,6 +4616,13 @@ static void *g_start(struct seq_file *m, loff_t *pos)
mutex_lock(&graph_lock);
+ if (fgd->type == GRAPH_FILTER_FUNCTION)
+ fgd->hash = rcu_dereference_protected(ftrace_graph_hash,
+ lockdep_is_held(&graph_lock));
+ else
+ fgd->hash = rcu_dereference_protected(ftrace_graph_notrace_hash,
+ lockdep_is_held(&graph_lock));
+
/* Nothing, tell g_show to print all functions are enabled */
if (ftrace_hash_empty(fgd->hash) && !*pos)
return FTRACE_GRAPH_EMPTY;
@@ -4695,6 +4702,14 @@ __ftrace_graph_open(struct inode *inode, struct file *file,
out:
fgd->new_hash = new_hash;
+
+ /*
+ * All uses of fgd->hash must be taken with the graph_lock
+ * held. The graph_lock is going to be released, so force
+ * fgd->hash to be reinitialized when it is taken again.
+ */
+ fgd->hash = NULL;
+
return ret;
}
@@ -4713,7 +4728,8 @@ ftrace_graph_open(struct inode *inode, struct file *file)
mutex_lock(&graph_lock);
- fgd->hash = ftrace_graph_hash;
+ fgd->hash = rcu_dereference_protected(ftrace_graph_hash,
+ lockdep_is_held(&graph_lock));
fgd->type = GRAPH_FILTER_FUNCTION;
fgd->seq_ops = &ftrace_graph_seq_ops;
@@ -4740,7 +4756,8 @@ ftrace_graph_notrace_open(struct inode *inode, struct file *file)
mutex_lock(&graph_lock);
- fgd->hash = ftrace_graph_notrace_hash;
+ fgd->hash = rcu_dereference_protected(ftrace_graph_notrace_hash,
+ lockdep_is_held(&graph_lock));
fgd->type = GRAPH_FILTER_NOTRACE;
fgd->seq_ops = &ftrace_graph_seq_ops;
@@ -4859,17 +4876,18 @@ ftrace_graph_write(struct file *file, const char __user *ubuf,
ret = ftrace_graph_set_hash(fgd->new_hash,
parser.buffer);
- old_hash = fgd->hash;
new_hash = __ftrace_hash_move(fgd->new_hash);
if (!new_hash)
ret = -ENOMEM;
if (fgd->type == GRAPH_FILTER_FUNCTION) {
+ old_hash = rcu_dereference_protected(ftrace_graph_hash,
+ lockdep_is_held(&graph_lock));
rcu_assign_pointer(ftrace_graph_hash, new_hash);
- fgd->hash = ftrace_graph_hash;
} else {
+ old_hash = rcu_dereference_protected(ftrace_graph_notrace_hash,
+ lockdep_is_held(&graph_lock));
rcu_assign_pointer(ftrace_graph_notrace_hash, new_hash);
- fgd->hash = ftrace_graph_notrace_hash;
}
mutex_unlock(&graph_lock);