diff options
author | Eric W. Biederman <ebiederm@xmission.com> | 2011-11-16 23:20:58 -0800 |
---|---|---|
committer | Eric W. Biederman <ebiederm@xmission.com> | 2012-04-07 17:11:46 -0700 |
commit | 7b44ab978b77a91b327058a0f4db7e6fcdb90b92 (patch) | |
tree | 632c872f0b88d001f1bddce2c0aacd77bf062454 /kernel/user.c | |
parent | 5673a94c14574d7c6495c320c6b0e480673d54bd (diff) | |
download | linux-7b44ab978b77a91b327058a0f4db7e6fcdb90b92.tar.bz2 |
userns: Disassociate user_struct from the user_namespace.
Modify alloc_uid to take a kuid and make the user hash table global.
Stop holding a reference to the user namespace in struct user_struct.
This simplifies the code and makes the per user accounting not
care about which user namespace a uid happens to appear in.
Acked-by: Serge Hallyn <serge.hallyn@canonical.com>
Signed-off-by: Eric W. Biederman <ebiederm@xmission.com>
Diffstat (limited to 'kernel/user.c')
-rw-r--r-- | kernel/user.c | 28 |
1 files changed, 13 insertions, 15 deletions
diff --git a/kernel/user.c b/kernel/user.c index d65fec0615a0..025077e54a7c 100644 --- a/kernel/user.c +++ b/kernel/user.c @@ -34,11 +34,14 @@ EXPORT_SYMBOL_GPL(init_user_ns); * when changing user ID's (ie setuid() and friends). */ +#define UIDHASH_BITS (CONFIG_BASE_SMALL ? 3 : 7) +#define UIDHASH_SZ (1 << UIDHASH_BITS) #define UIDHASH_MASK (UIDHASH_SZ - 1) #define __uidhashfn(uid) (((uid >> UIDHASH_BITS) + uid) & UIDHASH_MASK) -#define uidhashentry(ns, uid) ((ns)->uidhash_table + __uidhashfn((uid))) +#define uidhashentry(uid) (uidhash_table + __uidhashfn((__kuid_val(uid)))) static struct kmem_cache *uid_cachep; +struct hlist_head uidhash_table[UIDHASH_SZ]; /* * The uidhash_lock is mostly taken from process context, but it is @@ -58,7 +61,7 @@ struct user_struct root_user = { .files = ATOMIC_INIT(0), .sigpending = ATOMIC_INIT(0), .locked_shm = 0, - ._user_ns = &init_user_ns, + .uid = GLOBAL_ROOT_UID, }; /* @@ -72,16 +75,15 @@ static void uid_hash_insert(struct user_struct *up, struct hlist_head *hashent) static void uid_hash_remove(struct user_struct *up) { hlist_del_init(&up->uidhash_node); - put_user_ns(up->_user_ns); /* It is safe to free the uid hash table now */ } -static struct user_struct *uid_hash_find(uid_t uid, struct hlist_head *hashent) +static struct user_struct *uid_hash_find(kuid_t uid, struct hlist_head *hashent) { struct user_struct *user; struct hlist_node *h; hlist_for_each_entry(user, h, hashent, uidhash_node) { - if (user->uid == uid) { + if (uid_eq(user->uid, uid)) { atomic_inc(&user->__count); return user; } @@ -110,14 +112,13 @@ static void free_user(struct user_struct *up, unsigned long flags) * * If the user_struct could not be found, return NULL. */ -struct user_struct *find_user(uid_t uid) +struct user_struct *find_user(kuid_t uid) { struct user_struct *ret; unsigned long flags; - struct user_namespace *ns = current_user_ns(); spin_lock_irqsave(&uidhash_lock, flags); - ret = uid_hash_find(uid, uidhashentry(ns, uid)); + ret = uid_hash_find(uid, uidhashentry(uid)); spin_unlock_irqrestore(&uidhash_lock, flags); return ret; } @@ -136,9 +137,9 @@ void free_uid(struct user_struct *up) local_irq_restore(flags); } -struct user_struct *alloc_uid(struct user_namespace *ns, uid_t uid) +struct user_struct *alloc_uid(kuid_t uid) { - struct hlist_head *hashent = uidhashentry(ns, uid); + struct hlist_head *hashent = uidhashentry(uid); struct user_struct *up, *new; spin_lock_irq(&uidhash_lock); @@ -153,8 +154,6 @@ struct user_struct *alloc_uid(struct user_namespace *ns, uid_t uid) new->uid = uid; atomic_set(&new->__count, 1); - new->_user_ns = get_user_ns(ns); - /* * Before adding this, check whether we raced * on adding the same user already.. @@ -162,7 +161,6 @@ struct user_struct *alloc_uid(struct user_namespace *ns, uid_t uid) spin_lock_irq(&uidhash_lock); up = uid_hash_find(uid, hashent); if (up) { - put_user_ns(ns); key_put(new->uid_keyring); key_put(new->session_keyring); kmem_cache_free(uid_cachep, new); @@ -187,11 +185,11 @@ static int __init uid_cache_init(void) 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL); for(n = 0; n < UIDHASH_SZ; ++n) - INIT_HLIST_HEAD(init_user_ns.uidhash_table + n); + INIT_HLIST_HEAD(uidhash_table + n); /* Insert the root user immediately (init already runs as root) */ spin_lock_irq(&uidhash_lock); - uid_hash_insert(&root_user, uidhashentry(&init_user_ns, 0)); + uid_hash_insert(&root_user, uidhashentry(GLOBAL_ROOT_UID)); spin_unlock_irq(&uidhash_lock); return 0; |