diff options
author | Wei Yang <richard.weiyang@gmail.com> | 2020-04-23 21:44:43 +0000 |
---|---|---|
committer | Peter Zijlstra <peterz@infradead.org> | 2020-04-30 20:14:42 +0200 |
commit | b1d1779e5ef7a60b192b61fd97201f322e1e9303 (patch) | |
tree | 8785d35fdcd991fe0c3db1077a08ade462ab3892 /kernel/sched | |
parent | 12ac6782a40ad7636b6ef45680741825b64ab221 (diff) | |
download | linux-b1d1779e5ef7a60b192b61fd97201f322e1e9303.tar.bz2 |
sched/core: Simplify sched_init()
Currently root_task_group.shares and cfs_bandwidth are initialized for
each online cpu, which not necessary.
Let's take it out to do it only once.
Signed-off-by: Wei Yang <richard.weiyang@gmail.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Link: https://lkml.kernel.org/r/20200423214443.29994-1-richard.weiyang@gmail.com
Diffstat (limited to 'kernel/sched')
-rw-r--r-- | kernel/sched/core.c | 4 |
1 files changed, 2 insertions, 2 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c index f6ae2629f4e1..b58efb1156eb 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -6597,6 +6597,8 @@ void __init sched_init(void) root_task_group.cfs_rq = (struct cfs_rq **)ptr; ptr += nr_cpu_ids * sizeof(void **); + root_task_group.shares = ROOT_TASK_GROUP_LOAD; + init_cfs_bandwidth(&root_task_group.cfs_bandwidth); #endif /* CONFIG_FAIR_GROUP_SCHED */ #ifdef CONFIG_RT_GROUP_SCHED root_task_group.rt_se = (struct sched_rt_entity **)ptr; @@ -6649,7 +6651,6 @@ void __init sched_init(void) init_rt_rq(&rq->rt); init_dl_rq(&rq->dl); #ifdef CONFIG_FAIR_GROUP_SCHED - root_task_group.shares = ROOT_TASK_GROUP_LOAD; INIT_LIST_HEAD(&rq->leaf_cfs_rq_list); rq->tmp_alone_branch = &rq->leaf_cfs_rq_list; /* @@ -6671,7 +6672,6 @@ void __init sched_init(void) * We achieve this by letting root_task_group's tasks sit * directly in rq->cfs (i.e root_task_group->se[] = NULL). */ - init_cfs_bandwidth(&root_task_group.cfs_bandwidth); init_tg_cfs_entry(&root_task_group, &rq->cfs, NULL, i, NULL); #endif /* CONFIG_FAIR_GROUP_SCHED */ |