diff options
author | Ingo Molnar <mingo@kernel.org> | 2013-08-29 12:02:08 +0200 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2013-08-29 12:02:08 +0200 |
commit | aee2bce3cfdcb9bf2c51c24496ee776e8202ed11 (patch) | |
tree | 66ff8e345cf693cfb39383f25ad796e2f59ab6ad /kernel/sched/fair.c | |
parent | 5ec4c599a52362896c3e7c6a31ba6145dca9c6f5 (diff) | |
parent | c95389b4cd6a4b52af78bea706a274453e886251 (diff) | |
download | linux-aee2bce3cfdcb9bf2c51c24496ee776e8202ed11.tar.bz2 |
Merge branch 'linus' into perf/core
Pick up the latest upstream fixes.
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/sched/fair.c')
-rw-r--r-- | kernel/sched/fair.c | 10 |
1 files changed, 8 insertions, 2 deletions
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 10d729b02696..8977a249816f 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -2032,6 +2032,7 @@ entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr, int queued) */ update_entity_load_avg(curr, 1); update_cfs_rq_blocked_load(cfs_rq, 1); + update_cfs_shares(cfs_rq); #ifdef CONFIG_SCHED_HRTICK /* @@ -4324,6 +4325,8 @@ struct sg_lb_stats { * get_sd_load_idx - Obtain the load index for a given sched domain. * @sd: The sched_domain whose load_idx is to be obtained. * @idle: The Idle status of the CPU for whose sd load_icx is obtained. + * + * Return: The load index. */ static inline int get_sd_load_idx(struct sched_domain *sd, enum cpu_idle_type idle) @@ -4618,6 +4621,9 @@ static inline void update_sg_lb_stats(struct lb_env *env, * * Determine if @sg is a busier group than the previously selected * busiest group. + * + * Return: %true if @sg is a busier group than the previously selected + * busiest group. %false otherwise. */ static bool update_sd_pick_busiest(struct lb_env *env, struct sd_lb_stats *sds, @@ -4735,7 +4741,7 @@ static inline void update_sd_lb_stats(struct lb_env *env, * assuming lower CPU number will be equivalent to lower a SMT thread * number. * - * Returns 1 when packing is required and a task should be moved to + * Return: 1 when packing is required and a task should be moved to * this CPU. The amount of the imbalance is returned in *imbalance. * * @env: The load balancing environment. @@ -4913,7 +4919,7 @@ static inline void calculate_imbalance(struct lb_env *env, struct sd_lb_stats *s * @balance: Pointer to a variable indicating if this_cpu * is the appropriate cpu to perform load balancing at this_level. * - * Returns: - the busiest group if imbalance exists. + * Return: - The busiest group if imbalance exists. * - If no imbalance and user has opted for power-savings balance, * return the least loaded group whose CPUs can be * put to idle by rebalancing its tasks onto our group. |