diff options
author | Rafael J. Wysocki <rafael.j.wysocki@intel.com> | 2016-08-10 03:11:17 +0200 |
---|---|---|
committer | Rafael J. Wysocki <rafael.j.wysocki@intel.com> | 2016-08-16 22:16:03 +0200 |
commit | 12bde33dbb3eadd60343a8a71c39766073c1d752 (patch) | |
tree | 840fa6eddcbdab3682e5ba0263c9efc236c01193 /kernel/sched | |
parent | 58919e83c85c3a3c5fb34025dc0e95ddd998c478 (diff) | |
download | linux-12bde33dbb3eadd60343a8a71c39766073c1d752.tar.bz2 |
cpufreq / sched: Pass runqueue pointer to cpufreq_update_util()
All of the callers of cpufreq_update_util() pass rq_clock(rq) to it
as the time argument and some of them check whether or not cpu_of(rq)
is equal to smp_processor_id() before calling it, so rework it to
take a runqueue pointer as the argument and move the rq_clock(rq)
evaluation into it.
Additionally, provide a wrapper checking cpu_of(rq) against
smp_processor_id() for the cpufreq_update_util() callers that
need it.
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Acked-by: Viresh Kumar <viresh.kumar@linaro.org>
Diffstat (limited to 'kernel/sched')
-rw-r--r-- | kernel/sched/deadline.c | 3 | ||||
-rw-r--r-- | kernel/sched/fair.c | 9 | ||||
-rw-r--r-- | kernel/sched/rt.c | 3 | ||||
-rw-r--r-- | kernel/sched/sched.h | 15 |
4 files changed, 15 insertions, 15 deletions
diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c index 4464cc3e4f3d..974779656999 100644 --- a/kernel/sched/deadline.c +++ b/kernel/sched/deadline.c @@ -736,8 +736,7 @@ static void update_curr_dl(struct rq *rq) } /* kick cpufreq (see the comment in kernel/sched/sched.h). */ - if (cpu_of(rq) == smp_processor_id()) - cpufreq_update_util(rq_clock(rq), SCHED_CPUFREQ_DL); + cpufreq_update_this_cpu(rq, SCHED_CPUFREQ_DL); schedstat_set(curr->se.statistics.exec_max, max(curr->se.statistics.exec_max, delta_exec)); diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index f91fa5796e50..5d558cc91f08 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -2876,8 +2876,6 @@ static inline void update_tg_load_avg(struct cfs_rq *cfs_rq, int force) {} static inline void cfs_rq_util_change(struct cfs_rq *cfs_rq) { if (&this_rq()->cfs == cfs_rq) { - struct rq *rq = rq_of(cfs_rq); - /* * There are a few boundary cases this might miss but it should * get called often enough that that should (hopefully) not be @@ -2894,7 +2892,7 @@ static inline void cfs_rq_util_change(struct cfs_rq *cfs_rq) * * See cpu_util(). */ - cpufreq_update_util(rq_clock(rq), 0); + cpufreq_update_util(rq_of(cfs_rq), 0); } } @@ -3155,10 +3153,7 @@ update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq, bool update_freq) static inline void update_load_avg(struct sched_entity *se, int not_used) { - struct cfs_rq *cfs_rq = cfs_rq_of(se); - struct rq *rq = rq_of(cfs_rq); - - cpufreq_update_util(rq_clock(rq), 0); + cpufreq_update_util(rq_of(cfs_rq_of(se)), 0); } static inline void diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c index 8a9cd9ba5153..2516b8df6dbb 100644 --- a/kernel/sched/rt.c +++ b/kernel/sched/rt.c @@ -958,8 +958,7 @@ static void update_curr_rt(struct rq *rq) return; /* Kick cpufreq (see the comment in kernel/sched/sched.h). */ - if (cpu_of(rq) == smp_processor_id()) - cpufreq_update_util(rq_clock(rq), SCHED_CPUFREQ_RT); + cpufreq_update_this_cpu(rq, SCHED_CPUFREQ_RT); schedstat_set(curr->se.statistics.exec_max, max(curr->se.statistics.exec_max, delta_exec)); diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 82fc5542708c..b7fc1ced4380 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -1763,7 +1763,7 @@ DECLARE_PER_CPU(struct update_util_data *, cpufreq_update_util_data); /** * cpufreq_update_util - Take a note about CPU utilization changes. - * @time: Current time. + * @rq: Runqueue to carry out the update for. * @flags: Update reason flags. * * This function is called by the scheduler on the CPU whose utilization is @@ -1783,16 +1783,23 @@ DECLARE_PER_CPU(struct update_util_data *, cpufreq_update_util_data); * but that really is a band-aid. Going forward it should be replaced with * solutions targeted more specifically at RT and DL tasks. */ -static inline void cpufreq_update_util(u64 time, unsigned int flags) +static inline void cpufreq_update_util(struct rq *rq, unsigned int flags) { struct update_util_data *data; data = rcu_dereference_sched(*this_cpu_ptr(&cpufreq_update_util_data)); if (data) - data->func(data, time, flags); + data->func(data, rq_clock(rq), flags); +} + +static inline void cpufreq_update_this_cpu(struct rq *rq, unsigned int flags) +{ + if (cpu_of(rq) == smp_processor_id()) + cpufreq_update_util(rq, flags); } #else -static inline void cpufreq_update_util(u64 time, unsigned int flags) {} +static inline void cpufreq_update_util(struct rq *rq, unsigned int flags) {} +static inline void cpufreq_update_this_cpu(struct rq *rq, unsigned int flags) {} #endif /* CONFIG_CPU_FREQ */ #ifdef arch_scale_freq_capacity |