summaryrefslogtreecommitdiffstats
path: root/kernel/pid.c
diff options
context:
space:
mode:
authorEric W. Biederman <ebiederm@xmission.com>2012-12-21 20:27:12 -0800
committerEric W. Biederman <ebiederm@xmission.com>2012-12-25 16:10:05 -0800
commitc876ad7682155958d0c9c27afe9017925c230d64 (patch)
tree926064bd7909f60daed3b6b963555e57cab7b520 /kernel/pid.c
parent8382fcac1b813ad0a4e68a838fc7ae93fa39eda0 (diff)
downloadlinux-c876ad7682155958d0c9c27afe9017925c230d64.tar.bz2
pidns: Stop pid allocation when init dies
Oleg pointed out that in a pid namespace the sequence. - pid 1 becomes a zombie - setns(thepidns), fork,... - reaping pid 1. - The injected processes exiting. Can lead to processes attempting access their child reaper and instead following a stale pointer. That waitpid for init can return before all of the processes in the pid namespace have exited is also unfortunate. Avoid these problems by disabling the allocation of new pids in a pid namespace when init dies, instead of when the last process in a pid namespace is reaped. Pointed-out-by: Oleg Nesterov <oleg@redhat.com> Reviewed-by: Oleg Nesterov <oleg@redhat.com> Signed-off-by: "Eric W. Biederman" <ebiederm@xmission.com>
Diffstat (limited to 'kernel/pid.c')
-rw-r--r--kernel/pid.c15
1 files changed, 12 insertions, 3 deletions
diff --git a/kernel/pid.c b/kernel/pid.c
index 36aa02ff17d6..de9af600006f 100644
--- a/kernel/pid.c
+++ b/kernel/pid.c
@@ -270,7 +270,6 @@ void free_pid(struct pid *pid)
wake_up_process(ns->child_reaper);
break;
case 0:
- ns->nr_hashed = -1;
schedule_work(&ns->proc_work);
break;
}
@@ -319,7 +318,7 @@ struct pid *alloc_pid(struct pid_namespace *ns)
upid = pid->numbers + ns->level;
spin_lock_irq(&pidmap_lock);
- if (ns->nr_hashed < 0)
+ if (!(ns->nr_hashed & PIDNS_HASH_ADDING))
goto out_unlock;
for ( ; upid >= pid->numbers; --upid) {
hlist_add_head_rcu(&upid->pid_chain,
@@ -342,6 +341,13 @@ out_free:
goto out;
}
+void disable_pid_allocation(struct pid_namespace *ns)
+{
+ spin_lock_irq(&pidmap_lock);
+ ns->nr_hashed &= ~PIDNS_HASH_ADDING;
+ spin_unlock_irq(&pidmap_lock);
+}
+
struct pid *find_pid_ns(int nr, struct pid_namespace *ns)
{
struct hlist_node *elem;
@@ -573,6 +579,9 @@ void __init pidhash_init(void)
void __init pidmap_init(void)
{
+ /* Veryify no one has done anything silly */
+ BUILD_BUG_ON(PID_MAX_LIMIT >= PIDNS_HASH_ADDING);
+
/* bump default and minimum pid_max based on number of cpus */
pid_max = min(pid_max_max, max_t(int, pid_max,
PIDS_PER_CPU_DEFAULT * num_possible_cpus()));
@@ -584,7 +593,7 @@ void __init pidmap_init(void)
/* Reserve PID 0. We never call free_pidmap(0) */
set_bit(0, init_pid_ns.pidmap[0].page);
atomic_dec(&init_pid_ns.pidmap[0].nr_free);
- init_pid_ns.nr_hashed = 1;
+ init_pid_ns.nr_hashed = PIDNS_HASH_ADDING;
init_pid_ns.pid_cachep = KMEM_CACHE(pid,
SLAB_HWCACHE_ALIGN | SLAB_PANIC);