summaryrefslogtreecommitdiffstats
path: root/kernel/sched/cpufreq_schedutil.c
diff options
context:
space:
mode:
authorVincent Guittot <vincent.guittot@linaro.org>2018-06-28 17:45:06 +0200
committerIngo Molnar <mingo@kernel.org>2018-07-15 23:51:20 +0200
commit3ae117c6cd7c4783819a0766aa97b9493a8a0f62 (patch)
treeeb2dc43c697ce8286661c1dc9940505126be6870 /kernel/sched/cpufreq_schedutil.c
parent371bf42732694d142b0de026e152266c039b97d3 (diff)
downloadlinux-3ae117c6cd7c4783819a0766aa97b9493a8a0f62.tar.bz2
cpufreq/schedutil: Use RT utilization tracking
Add both CFS and RT utilization when selecting an OPP for CFS tasks as RT can preempt and steal CFS's running time. RT util_avg is used to take into account the utilization of RT tasks on the CPU when selecting OPP. If a RT task migrate, the RT utilization will not migrate but will decay over time. On an overloaded CPU, CFS utilization reflects the remaining utilization avialable on CPU. When RT task migrates, the CFS utilization will increase when tasks will start to use the newly available capacity. At the same pace, RT utilization will decay and both variations will compensate each other to keep unchanged overall utilization and will prevent any OPP drop. Signed-off-by: Vincent Guittot <vincent.guittot@linaro.org> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Acked-by: Viresh Kumar <viresh.kumar@linaro.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Morten.Rasmussen@arm.com Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: claudio@evidence.eu.com Cc: daniel.lezcano@linaro.org Cc: dietmar.eggemann@arm.com Cc: joel@joelfernandes.org Cc: juri.lelli@redhat.com Cc: luca.abeni@santannapisa.it Cc: patrick.bellasi@arm.com Cc: quentin.perret@arm.com Cc: rjw@rjwysocki.net Cc: valentin.schneider@arm.com Link: http://lkml.kernel.org/r/1530200714-4504-4-git-send-email-vincent.guittot@linaro.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/sched/cpufreq_schedutil.c')
-rw-r--r--kernel/sched/cpufreq_schedutil.c9
1 files changed, 8 insertions, 1 deletions
diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c
index c907fde01eaa..da29b5a33adb 100644
--- a/kernel/sched/cpufreq_schedutil.c
+++ b/kernel/sched/cpufreq_schedutil.c
@@ -56,6 +56,7 @@ struct sugov_cpu {
/* The fields below are only needed when sharing a policy: */
unsigned long util_cfs;
unsigned long util_dl;
+ unsigned long util_rt;
unsigned long max;
/* The field below is for single-CPU policies only: */
@@ -186,15 +187,21 @@ static void sugov_get_util(struct sugov_cpu *sg_cpu)
sg_cpu->max = arch_scale_cpu_capacity(NULL, sg_cpu->cpu);
sg_cpu->util_cfs = cpu_util_cfs(rq);
sg_cpu->util_dl = cpu_util_dl(rq);
+ sg_cpu->util_rt = cpu_util_rt(rq);
}
static unsigned long sugov_aggregate_util(struct sugov_cpu *sg_cpu)
{
struct rq *rq = cpu_rq(sg_cpu->cpu);
+ unsigned long util;
if (rt_rq_is_runnable(&rq->rt))
return sg_cpu->max;
+ util = sg_cpu->util_dl;
+ util += sg_cpu->util_cfs;
+ util += sg_cpu->util_rt;
+
/*
* Utilization required by DEADLINE must always be granted while, for
* FAIR, we use blocked utilization of IDLE CPUs as a mechanism to
@@ -205,7 +212,7 @@ static unsigned long sugov_aggregate_util(struct sugov_cpu *sg_cpu)
* util_cfs + util_dl as requested freq. However, cpufreq is not yet
* ready for such an interface. So, we only do the latter for now.
*/
- return min(sg_cpu->max, (sg_cpu->util_dl + sg_cpu->util_cfs));
+ return min(sg_cpu->max, util);
}
/**