diff options
author | Srivatsa Vaddagiri <vatsa@linux.vnet.ibm.com> | 2007-10-15 17:00:09 +0200 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2007-10-15 17:00:09 +0200 |
commit | 24e377a83220ef05c9b5bec7e01d65eed6609aa6 (patch) | |
tree | 9303b3d9f91ee39517d379aaac06c0432be8a9b8 | |
parent | 9b5b77512dce239fa168183fa71896712232e95a (diff) | |
download | linux-24e377a83220ef05c9b5bec7e01d65eed6609aa6.tar.bz2 |
sched: add fair-user scheduler
Enable user-id based fair group scheduling. This is useful for anyone
who wants to test the group scheduler w/o having to enable
CONFIG_CGROUPS.
A separate scheduling group (i.e struct task_grp) is automatically created for
every new user added to the system. Upon uid change for a task, it is made to
move to the corresponding scheduling group.
A /proc tunable (/proc/root_user_share) is also provided to tune root
user's quota of cpu bandwidth.
Signed-off-by: Srivatsa Vaddagiri <vatsa@linux.vnet.ibm.com>
Signed-off-by: Dhaval Giani <dhaval@linux.vnet.ibm.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Reviewed-by: Thomas Gleixner <tglx@linutronix.de>
-rw-r--r-- | include/linux/sched.h | 4 | ||||
-rw-r--r-- | init/Kconfig | 13 | ||||
-rw-r--r-- | kernel/sched.c | 9 | ||||
-rw-r--r-- | kernel/sched_debug.c | 52 | ||||
-rw-r--r-- | kernel/user.c | 43 |
5 files changed, 121 insertions, 0 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h index 03c13b663e4b..d0cc58311b13 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -597,6 +597,10 @@ struct user_struct { /* Hash table maintenance information */ struct hlist_node uidhash_node; uid_t uid; + +#ifdef CONFIG_FAIR_USER_SCHED + struct task_grp *tg; +#endif }; extern struct user_struct *find_user(uid_t); diff --git a/init/Kconfig b/init/Kconfig index ef90a154dd90..37711fe3c01c 100644 --- a/init/Kconfig +++ b/init/Kconfig @@ -289,6 +289,19 @@ config FAIR_GROUP_SCHED This feature lets cpu scheduler recognize task groups and control cpu bandwidth allocation to such task groups. +choice + depends on FAIR_GROUP_SCHED + prompt "Basis for grouping tasks" + default FAIR_USER_SCHED + + config FAIR_USER_SCHED + bool "user id" + help + This option will choose userid as the basis for grouping + tasks, thus providing equal cpu bandwidth to each user. + +endchoice + config SYSFS_DEPRECATED bool "Create deprecated sysfs files" default y diff --git a/kernel/sched.c b/kernel/sched.c index e10c403b1213..f33608e9e1a2 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -200,7 +200,12 @@ struct task_grp init_task_grp = { .cfs_rq = init_cfs_rq_p, }; +#ifdef CONFIG_FAIR_USER_SCHED +#define INIT_TASK_GRP_LOAD 2*NICE_0_LOAD +#else #define INIT_TASK_GRP_LOAD NICE_0_LOAD +#endif + static int init_task_grp_load = INIT_TASK_GRP_LOAD; /* return group to which a task belongs */ @@ -208,7 +213,11 @@ static inline struct task_grp *task_grp(struct task_struct *p) { struct task_grp *tg; +#ifdef CONFIG_FAIR_USER_SCHED + tg = p->user->tg; +#else tg = &init_task_grp; +#endif return tg; } diff --git a/kernel/sched_debug.c b/kernel/sched_debug.c index 3e47e870b043..57ee9d5630a8 100644 --- a/kernel/sched_debug.c +++ b/kernel/sched_debug.c @@ -212,6 +212,49 @@ static void sysrq_sched_debug_show(void) sched_debug_show(NULL, NULL); } +#ifdef CONFIG_FAIR_USER_SCHED + +static DEFINE_MUTEX(root_user_share_mutex); + +static int +root_user_share_read_proc(char *page, char **start, off_t off, int count, + int *eof, void *data) +{ + int len; + + len = sprintf(page, "%d\n", init_task_grp_load); + + return len; +} + +static int +root_user_share_write_proc(struct file *file, const char __user *buffer, + unsigned long count, void *data) +{ + unsigned long shares; + char kbuf[sizeof(unsigned long)+1]; + int rc = 0; + + if (copy_from_user(kbuf, buffer, sizeof(kbuf))) + return -EFAULT; + + shares = simple_strtoul(kbuf, NULL, 0); + + if (!shares) + shares = NICE_0_LOAD; + + mutex_lock(&root_user_share_mutex); + + init_task_grp_load = shares; + rc = sched_group_set_shares(&init_task_grp, shares); + + mutex_unlock(&root_user_share_mutex); + + return (rc < 0 ? rc : count); +} + +#endif /* CONFIG_FAIR_USER_SCHED */ + static int sched_debug_open(struct inode *inode, struct file *filp) { return single_open(filp, sched_debug_show, NULL); @@ -234,6 +277,15 @@ static int __init init_sched_debug_procfs(void) pe->proc_fops = &sched_debug_fops; +#ifdef CONFIG_FAIR_USER_SCHED + pe = create_proc_entry("root_user_share", 0644, NULL); + if (!pe) + return -ENOMEM; + + pe->read_proc = root_user_share_read_proc; + pe->write_proc = root_user_share_write_proc; +#endif + return 0; } diff --git a/kernel/user.c b/kernel/user.c index 9ca2848fc356..c6387fac932d 100644 --- a/kernel/user.c +++ b/kernel/user.c @@ -50,8 +50,41 @@ struct user_struct root_user = { .uid_keyring = &root_user_keyring, .session_keyring = &root_session_keyring, #endif +#ifdef CONFIG_FAIR_USER_SCHED + .tg = &init_task_grp, +#endif }; +#ifdef CONFIG_FAIR_USER_SCHED +static void sched_destroy_user(struct user_struct *up) +{ + sched_destroy_group(up->tg); +} + +static int sched_create_user(struct user_struct *up) +{ + int rc = 0; + + up->tg = sched_create_group(); + if (IS_ERR(up->tg)) + rc = -ENOMEM; + + return rc; +} + +static void sched_switch_user(struct task_struct *p) +{ + sched_move_task(p); +} + +#else /* CONFIG_FAIR_USER_SCHED */ + +static void sched_destroy_user(struct user_struct *up) { } +static int sched_create_user(struct user_struct *up) { return 0; } +static void sched_switch_user(struct task_struct *p) { } + +#endif /* CONFIG_FAIR_USER_SCHED */ + /* * These routines must be called with the uidhash spinlock held! */ @@ -109,6 +142,7 @@ void free_uid(struct user_struct *up) if (atomic_dec_and_lock(&up->__count, &uidhash_lock)) { uid_hash_remove(up); spin_unlock_irqrestore(&uidhash_lock, flags); + sched_destroy_user(up); key_put(up->uid_keyring); key_put(up->session_keyring); kmem_cache_free(uid_cachep, up); @@ -150,6 +184,13 @@ struct user_struct * alloc_uid(struct user_namespace *ns, uid_t uid) return NULL; } + if (sched_create_user(new) < 0) { + key_put(new->uid_keyring); + key_put(new->session_keyring); + kmem_cache_free(uid_cachep, new); + return NULL; + } + /* * Before adding this, check whether we raced * on adding the same user already.. @@ -157,6 +198,7 @@ struct user_struct * alloc_uid(struct user_namespace *ns, uid_t uid) spin_lock_irq(&uidhash_lock); up = uid_hash_find(uid, hashent); if (up) { + sched_destroy_user(new); key_put(new->uid_keyring); key_put(new->session_keyring); kmem_cache_free(uid_cachep, new); @@ -184,6 +226,7 @@ void switch_uid(struct user_struct *new_user) atomic_dec(&old_user->processes); switch_uid_keyring(new_user); current->user = new_user; + sched_switch_user(current); /* * We need to synchronize with __sigqueue_alloc() |