summaryrefslogtreecommitdiffstats
path: root/kernel/sched
diff options
context:
space:
mode:
authorPeter Zijlstra <peterz@infradead.org>2017-12-21 11:20:23 +0100
committerIngo Molnar <mingo@kernel.org>2018-03-09 07:59:19 +0100
commite022e0d38ad475fc650f22efa3deb2fb96e62542 (patch)
tree5160fc74cda2c713b7a6b8d2b50721e713cecb1a /kernel/sched
parenta4064fb614f83c0a097c5ff7fe433c4aa139c7af (diff)
downloadlinux-e022e0d38ad475fc650f22efa3deb2fb96e62542.tar.bz2
sched/fair: Update blocked load from NEWIDLE
Since we already iterate CPUs looking for work on NEWIDLE, use this iteration to age the blocked load. If the domain for which this is done completely spand the idle set, we can push the ILB based aging forward. Suggested-by: Brendan Jackman <brendan.jackman@arm.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: linux-kernel@vger.kernel.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/sched')
-rw-r--r--kernel/sched/core.c1
-rw-r--r--kernel/sched/fair.c49
-rw-r--r--kernel/sched/sched.h1
3 files changed, 45 insertions, 6 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 69c9a6b07b61..8a10a2ce30a4 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -6074,6 +6074,7 @@ void __init sched_init(void)
rq_attach_root(rq, &def_root_domain);
#ifdef CONFIG_NO_HZ_COMMON
rq->last_load_update_tick = jiffies;
+ rq->last_blocked_load_update_tick = jiffies;
atomic_set(&rq->nohz_flags, 0);
#endif
#endif /* CONFIG_SMP */
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index d8693fa9e7c5..85232dad89c9 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -5376,6 +5376,14 @@ decay_load_missed(unsigned long load, unsigned long missed_updates, int idx)
}
return load;
}
+
+static struct {
+ cpumask_var_t idle_cpus_mask;
+ atomic_t nr_cpus;
+ unsigned long next_balance; /* in jiffy units */
+ unsigned long next_stats;
+} nohz ____cacheline_aligned;
+
#endif /* CONFIG_NO_HZ_COMMON */
/**
@@ -7022,6 +7030,7 @@ enum fbq_type { regular, remote, all };
#define LBF_NEED_BREAK 0x02
#define LBF_DST_PINNED 0x04
#define LBF_SOME_PINNED 0x08
+#define LBF_NOHZ_STATS 0x10
struct lb_env {
struct sched_domain *sd;
@@ -7460,6 +7469,10 @@ static void update_blocked_averages(int cpu)
if (cfs_rq_is_decayed(cfs_rq))
list_del_leaf_cfs_rq(cfs_rq);
}
+
+#ifdef CONFIG_NO_HZ_COMMON
+ rq->last_blocked_load_update_tick = jiffies;
+#endif
rq_unlock_irqrestore(rq, &rf);
}
@@ -7519,6 +7532,9 @@ static inline void update_blocked_averages(int cpu)
rq_lock_irqsave(rq, &rf);
update_rq_clock(rq);
update_cfs_rq_load_avg(cfs_rq_clock_task(cfs_rq), cfs_rq);
+#ifdef CONFIG_NO_HZ_COMMON
+ rq->last_blocked_load_update_tick = jiffies;
+#endif
rq_unlock_irqrestore(rq, &rf);
}
@@ -7853,6 +7869,21 @@ group_type group_classify(struct sched_group *group,
return group_other;
}
+static void update_nohz_stats(struct rq *rq)
+{
+#ifdef CONFIG_NO_HZ_COMMON
+ unsigned int cpu = rq->cpu;
+
+ if (!cpumask_test_cpu(cpu, nohz.idle_cpus_mask))
+ return;
+
+ if (!time_after(jiffies, rq->last_blocked_load_update_tick))
+ return;
+
+ update_blocked_averages(cpu);
+#endif
+}
+
/**
* update_sg_lb_stats - Update sched_group's statistics for load balancing.
* @env: The load balancing environment.
@@ -7875,6 +7906,9 @@ static inline void update_sg_lb_stats(struct lb_env *env,
for_each_cpu_and(i, sched_group_span(group), env->cpus) {
struct rq *rq = cpu_rq(i);
+ if (env->flags & LBF_NOHZ_STATS)
+ update_nohz_stats(rq);
+
/* Bias balancing toward CPUs of our domain: */
if (local_group)
load = target_load(i, load_idx);
@@ -8030,6 +8064,15 @@ static inline void update_sd_lb_stats(struct lb_env *env, struct sd_lb_stats *sd
if (child && child->flags & SD_PREFER_SIBLING)
prefer_sibling = 1;
+#ifdef CONFIG_NO_HZ_COMMON
+ if (env->idle == CPU_NEWLY_IDLE) {
+ env->flags |= LBF_NOHZ_STATS;
+
+ if (cpumask_subset(nohz.idle_cpus_mask, sched_domain_span(env->sd)))
+ nohz.next_stats = jiffies + msecs_to_jiffies(LOAD_AVG_PERIOD);
+ }
+#endif
+
load_idx = get_sd_load_idx(env->sd, env->idle);
do {
@@ -9049,12 +9092,6 @@ static inline int on_null_domain(struct rq *rq)
* needed, they will kick the idle load balancer, which then does idle
* load balancing for all the idle CPUs.
*/
-static struct {
- cpumask_var_t idle_cpus_mask;
- atomic_t nr_cpus;
- unsigned long next_balance; /* in jiffy units */
- unsigned long next_stats;
-} nohz ____cacheline_aligned;
static inline int find_new_ilb(void)
{
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 5295f274053b..21381d276709 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -762,6 +762,7 @@ struct rq {
#ifdef CONFIG_NO_HZ_COMMON
#ifdef CONFIG_SMP
unsigned long last_load_update_tick;
+ unsigned long last_blocked_load_update_tick;
#endif /* CONFIG_SMP */
atomic_t nohz_flags;
#endif /* CONFIG_NO_HZ_COMMON */