From 63928384faefba1b31c3bb77361965715a9fc71c Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Tue, 13 Feb 2018 16:54:17 +0100 Subject: sched/nohz: Optimize nohz_idle_balance() Avoid calling update_blocked_averages() when it does not in fact have any by re-using/extending update_nohz_stats(). Signed-off-by: Peter Zijlstra (Intel) Cc: Frederic Weisbecker Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: linux-kernel@vger.kernel.org Signed-off-by: Ingo Molnar --- kernel/sched/fair.c | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) (limited to 'kernel/sched') diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index aad7c03dbad8..5c357561db5d 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -7898,7 +7898,7 @@ group_type group_classify(struct sched_group *group, return group_other; } -static bool update_nohz_stats(struct rq *rq) +static bool update_nohz_stats(struct rq *rq, bool force) { #ifdef CONFIG_NO_HZ_COMMON unsigned int cpu = rq->cpu; @@ -7909,7 +7909,7 @@ static bool update_nohz_stats(struct rq *rq) if (!cpumask_test_cpu(cpu, nohz.idle_cpus_mask)) return false; - if (!time_after(jiffies, rq->last_blocked_load_update_tick)) + if (!force && !time_after(jiffies, rq->last_blocked_load_update_tick)) return true; update_blocked_averages(cpu); @@ -7942,7 +7942,7 @@ static inline void update_sg_lb_stats(struct lb_env *env, for_each_cpu_and(i, sched_group_span(group), env->cpus) { struct rq *rq = cpu_rq(i); - if ((env->flags & LBF_NOHZ_STATS) && update_nohz_stats(rq)) + if ((env->flags & LBF_NOHZ_STATS) && update_nohz_stats(rq, false)) env->flags |= LBF_NOHZ_AGAIN; /* Bias balancing toward CPUs of our domain: */ @@ -9552,8 +9552,7 @@ static bool nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle) rq = cpu_rq(balance_cpu); - update_blocked_averages(rq->cpu); - has_blocked_load |= rq->has_blocked_load; + has_blocked_load |= update_nohz_stats(rq, true); /* * If time for next balance is due, -- cgit v1.2.3