summaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorSteven Rostedt <srostedt@redhat.com>2008-12-03 11:04:51 -0500
committerIngo Molnar <mingo@elte.hu>2008-12-03 17:15:03 +0100
commite8e1abe92fd7ea9d823a3aaf81d10e2cba593b6b (patch)
tree9ddeaf43558b2ad13c6b4758b17b84781f0217a7 /kernel
parent0a37119d963e876ca86912497346ec50dea2541b (diff)
downloadlinux-e8e1abe92fd7ea9d823a3aaf81d10e2cba593b6b.tar.bz2
ftrace: fix race in function graph during fork
Impact: graph tracer race/crash fix There is a nasy race in startup of a new process running the function graph tracer. In fork.c: total_forks++; spin_unlock(&current->sighand->siglock); write_unlock_irq(&tasklist_lock); ftrace_graph_init_task(p); proc_fork_connector(p); cgroup_post_fork(p); return p; The new task is free to run as soon as the tasklist_lock is released. This is before the ftrace_graph_init_task. If the task does run it will be using the same ret_stack and curr_ret_stack as the parent. This will cause crashes that are difficult to debug. This patch moves the ftrace_graph_init_task to just after the alloc_pid code. This fixes the above race. Signed-off-by: Steven Rostedt <srostedt@redhat.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/fork.c9
1 files changed, 6 insertions, 3 deletions
diff --git a/kernel/fork.c b/kernel/fork.c
index 5f82a999c032..7407ab319875 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -1137,6 +1137,8 @@ static struct task_struct *copy_process(unsigned long clone_flags,
}
}
+ ftrace_graph_init_task(p);
+
p->pid = pid_nr(pid);
p->tgid = p->pid;
if (clone_flags & CLONE_THREAD)
@@ -1145,7 +1147,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
if (current->nsproxy != p->nsproxy) {
retval = ns_cgroup_clone(p, pid);
if (retval)
- goto bad_fork_free_pid;
+ goto bad_fork_free_graph;
}
p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL;
@@ -1238,7 +1240,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
spin_unlock(&current->sighand->siglock);
write_unlock_irq(&tasklist_lock);
retval = -ERESTARTNOINTR;
- goto bad_fork_free_pid;
+ goto bad_fork_free_graph;
}
if (clone_flags & CLONE_THREAD) {
@@ -1271,11 +1273,12 @@ static struct task_struct *copy_process(unsigned long clone_flags,
total_forks++;
spin_unlock(&current->sighand->siglock);
write_unlock_irq(&tasklist_lock);
- ftrace_graph_init_task(p);
proc_fork_connector(p);
cgroup_post_fork(p);
return p;
+bad_fork_free_graph:
+ ftrace_graph_exit_task(p);
bad_fork_free_pid:
if (pid != &init_struct_pid)
free_pid(pid);