summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorEric W. Biederman <ebiederm@xmission.com>2006-04-18 22:20:16 -0700
committerLinus Torvalds <torvalds@g5.osdl.org>2006-04-19 09:13:49 -0700
commit5e85d4abe3f43bb5362f384bab0e20ef082ce0b5 (patch)
treecd3a29086e5274fd08bc8d22d15568deab144755
parent181ae4005d0a4010802be534d929b38c42b9ac06 (diff)
downloadlinux-5e85d4abe3f43bb5362f384bab0e20ef082ce0b5.tar.bz2
[PATCH] task: Make task list manipulations RCU safe
While we can currently walk through thread groups, process groups, and sessions with just the rcu_read_lock, this opens the door to walking the entire task list. We already have all of the other RCU guarantees so there is no cost in doing this, this should be enough so that proc can stop taking the tasklist lock during readdir. prev_task was killed because it has no users, and using it will miss new tasks when doing an rcu traversal. Signed-off-by: Eric W. Biederman <ebiederm@xmission.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
-rw-r--r--fs/exec.c2
-rw-r--r--include/linux/sched.h3
-rw-r--r--kernel/exit.c2
-rw-r--r--kernel/fork.c2
4 files changed, 4 insertions, 5 deletions
diff --git a/fs/exec.c b/fs/exec.c
index 4121bb559739..3a79d97ac234 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -712,7 +712,7 @@ static int de_thread(struct task_struct *tsk)
attach_pid(current, PIDTYPE_PID, current->pid);
attach_pid(current, PIDTYPE_PGID, current->signal->pgrp);
attach_pid(current, PIDTYPE_SID, current->signal->session);
- list_add_tail(&current->tasks, &init_task.tasks);
+ list_add_tail_rcu(&current->tasks, &init_task.tasks);
current->group_leader = current;
leader->group_leader = current;
diff --git a/include/linux/sched.h b/include/linux/sched.h
index b7d31e2e1729..29b7d4f87d20 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1192,8 +1192,7 @@ extern void wait_task_inactive(task_t * p);
#define remove_parent(p) list_del_init(&(p)->sibling)
#define add_parent(p) list_add_tail(&(p)->sibling,&(p)->parent->children)
-#define next_task(p) list_entry((p)->tasks.next, struct task_struct, tasks)
-#define prev_task(p) list_entry((p)->tasks.prev, struct task_struct, tasks)
+#define next_task(p) list_entry(rcu_dereference((p)->tasks.next), struct task_struct, tasks)
#define for_each_process(p) \
for (p = &init_task ; (p = next_task(p)) != &init_task ; )
diff --git a/kernel/exit.c b/kernel/exit.c
index 1a9787ac6173..f86434d7b3d1 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -56,7 +56,7 @@ static void __unhash_process(struct task_struct *p)
detach_pid(p, PIDTYPE_PGID);
detach_pid(p, PIDTYPE_SID);
- list_del_init(&p->tasks);
+ list_del_rcu(&p->tasks);
__get_cpu_var(process_counts)--;
}
list_del_rcu(&p->thread_group);
diff --git a/kernel/fork.c b/kernel/fork.c
index 54b15f8cda53..34515772611e 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -1204,7 +1204,7 @@ static task_t *copy_process(unsigned long clone_flags,
attach_pid(p, PIDTYPE_PGID, process_group(p));
attach_pid(p, PIDTYPE_SID, p->signal->session);
- list_add_tail(&p->tasks, &init_task.tasks);
+ list_add_tail_rcu(&p->tasks, &init_task.tasks);
__get_cpu_var(process_counts)++;
}
attach_pid(p, PIDTYPE_PID, p->pid);