From f1d88b4468188ddcd2620b8d612068faf6662a62 Mon Sep 17 00:00:00 2001 From: Viresh Kumar Date: Thu, 26 Apr 2018 16:00:50 +0530 Subject: sched/fair: Rearrange select_task_rq_fair() to optimize it Rearrange select_task_rq_fair() a bit to avoid executing some conditional statements in few specific code-paths. That gets rid of the goto as well. This shouldn't result in any functional changes. Tested-by: Rohit Jain Signed-off-by: Viresh Kumar Signed-off-by: Peter Zijlstra (Intel) Reviewed-by: Valentin Schneider Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: Vincent Guittot Link: http://lkml.kernel.org/r/20831b8d237bf3a20e4e328286f678b425ff04c9.1524738578.git.viresh.kumar@linaro.org Signed-off-by: Ingo Molnar --- kernel/sched/fair.c | 37 ++++++++++++++++--------------------- 1 file changed, 16 insertions(+), 21 deletions(-) (limited to 'kernel/sched') diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index e3002e5ada31..4b346f358005 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -6613,7 +6613,7 @@ static int wake_cap(struct task_struct *p, int cpu, int prev_cpu) static int select_task_rq_fair(struct task_struct *p, int prev_cpu, int sd_flag, int wake_flags) { - struct sched_domain *tmp, *affine_sd = NULL, *sd = NULL; + struct sched_domain *tmp, *sd = NULL; int cpu = smp_processor_id(); int new_cpu = prev_cpu; int want_affine = 0; @@ -6636,7 +6636,10 @@ select_task_rq_fair(struct task_struct *p, int prev_cpu, int sd_flag, int wake_f */ if (want_affine && (tmp->flags & SD_WAKE_AFFINE) && cpumask_test_cpu(prev_cpu, sched_domain_span(tmp))) { - affine_sd = tmp; + if (cpu != prev_cpu) + new_cpu = wake_affine(tmp, p, cpu, prev_cpu, sync); + + sd = NULL; /* Prefer wake_affine over balance flags */ break; } @@ -6646,33 +6649,25 @@ select_task_rq_fair(struct task_struct *p, int prev_cpu, int sd_flag, int wake_f break; } - if (affine_sd) { - sd = NULL; /* Prefer wake_affine over balance flags */ - if (cpu == prev_cpu) - goto pick_cpu; - - new_cpu = wake_affine(affine_sd, p, cpu, prev_cpu, sync); - } + if (unlikely(sd)) { + /* Slow path */ - if (sd && !(sd_flag & SD_BALANCE_FORK)) { /* * We're going to need the task's util for capacity_spare_wake * in find_idlest_group. Sync it up to prev_cpu's * last_update_time. */ - sync_entity_load_avg(&p->se); - } - - if (!sd) { -pick_cpu: - if (sd_flag & SD_BALANCE_WAKE) { /* XXX always ? */ - new_cpu = select_idle_sibling(p, prev_cpu, new_cpu); + if (!(sd_flag & SD_BALANCE_FORK)) + sync_entity_load_avg(&p->se); - if (want_affine) - current->recent_used_cpu = cpu; - } - } else { new_cpu = find_idlest_cpu(sd, p, cpu, prev_cpu, sd_flag); + } else if (sd_flag & SD_BALANCE_WAKE) { /* XXX always ? */ + /* Fast path */ + + new_cpu = select_idle_sibling(p, prev_cpu, new_cpu); + + if (want_affine) + current->recent_used_cpu = cpu; } rcu_read_unlock(); -- cgit v1.2.3 From c976a862ba4869c1e75c39b9b8f1e9ebfe90cdfc Mon Sep 17 00:00:00 2001 From: Viresh Kumar Date: Thu, 26 Apr 2018 16:00:51 +0530 Subject: sched/fair: Avoid calling sync_entity_load_avg() unnecessarily Call sync_entity_load_avg() directly from find_idlest_cpu() instead of select_task_rq_fair(), as that's where we need to use task's utilization value. And call sync_entity_load_avg() only after making sure sched domain spans over one of the allowed CPUs for the task. Signed-off-by: Viresh Kumar Signed-off-by: Peter Zijlstra (Intel) Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: Vincent Guittot Link: http://lkml.kernel.org/r/cd019d1753824c81130eae7b43e2bbcec47cc1ad.1524738578.git.viresh.kumar@linaro.org Signed-off-by: Ingo Molnar --- kernel/sched/fair.c | 16 +++++++--------- 1 file changed, 7 insertions(+), 9 deletions(-) (limited to 'kernel/sched') diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 4b346f358005..1f6a23a5b451 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -6199,6 +6199,13 @@ static inline int find_idlest_cpu(struct sched_domain *sd, struct task_struct *p if (!cpumask_intersects(sched_domain_span(sd), &p->cpus_allowed)) return prev_cpu; + /* + * We need task's util for capacity_spare_wake, sync it up to prev_cpu's + * last_update_time. + */ + if (!(sd_flag & SD_BALANCE_FORK)) + sync_entity_load_avg(&p->se); + while (sd) { struct sched_group *group; struct sched_domain *tmp; @@ -6651,15 +6658,6 @@ select_task_rq_fair(struct task_struct *p, int prev_cpu, int sd_flag, int wake_f if (unlikely(sd)) { /* Slow path */ - - /* - * We're going to need the task's util for capacity_spare_wake - * in find_idlest_group. Sync it up to prev_cpu's - * last_update_time. - */ - if (!(sd_flag & SD_BALANCE_FORK)) - sync_entity_load_avg(&p->se); - new_cpu = find_idlest_cpu(sd, p, cpu, prev_cpu, sd_flag); } else if (sd_flag & SD_BALANCE_WAKE) { /* XXX always ? */ /* Fast path */ -- cgit v1.2.3 From 247f2f6f3c706b40b5f3886646f3eb53671258bf Mon Sep 17 00:00:00 2001 From: Rohit Jain Date: Wed, 2 May 2018 13:52:10 -0700 Subject: sched/core: Don't schedule threads on pre-empted vCPUs In paravirt configurations today, spinlocks figure out whether a vCPU is running to determine whether or not spinlock should bother spinning. We can use the same logic to prioritize CPUs when scheduling threads. If a vCPU has been pre-empted, it will incur the extra cost of VMENTER and the time it actually spends to be running on the host CPU. If we had other vCPUs which were actually running on the host CPU and idle we should schedule threads there. Performance numbers: Note: With patch is referred to as Paravirt in the following and without patch is referred to as Base. 1) When only 1 VM is running: a) Hackbench test on KVM 8 vCPUs, 10,000 loops (lower is better): +-------+-----------------+----------------+ |Number |Paravirt |Base | |of +---------+-------+-------+--------+ |Threads|Average |Std Dev|Average| Std Dev| +-------+---------+-------+-------+--------+ |1 |1.817 |0.076 |1.721 | 0.067 | |2 |3.467 |0.120 |3.468 | 0.074 | |4 |6.266 |0.035 |6.314 | 0.068 | |8 |11.437 |0.105 |11.418 | 0.132 | |16 |21.862 |0.167 |22.161 | 0.129 | |25 |33.341 |0.326 |33.692 | 0.147 | +-------+---------+-------+-------+--------+ 2) When two VMs are running with same CPU affinities: a) tbench test on VM 8 cpus Base: VM1: Throughput 220.59 MB/sec 1 clients 1 procs max_latency=12.872 ms Throughput 448.716 MB/sec 2 clients 2 procs max_latency=7.555 ms Throughput 861.009 MB/sec 4 clients 4 procs max_latency=49.501 ms Throughput 1261.81 MB/sec 7 clients 7 procs max_latency=76.990 ms VM2: Throughput 219.937 MB/sec 1 clients 1 procs max_latency=12.517 ms Throughput 470.99 MB/sec 2 clients 2 procs max_latency=12.419 ms Throughput 841.299 MB/sec 4 clients 4 procs max_latency=37.043 ms Throughput 1240.78 MB/sec 7 clients 7 procs max_latency=77.489 ms Paravirt: VM1: Throughput 222.572 MB/sec 1 clients 1 procs max_latency=7.057 ms Throughput 485.993 MB/sec 2 clients 2 procs max_latency=26.049 ms Throughput 947.095 MB/sec 4 clients 4 procs max_latency=45.338 ms Throughput 1364.26 MB/sec 7 clients 7 procs max_latency=145.124 ms VM2: Throughput 224.128 MB/sec 1 clients 1 procs max_latency=4.564 ms Throughput 501.878 MB/sec 2 clients 2 procs max_latency=11.061 ms Throughput 965.455 MB/sec 4 clients 4 procs max_latency=45.370 ms Throughput 1359.08 MB/sec 7 clients 7 procs max_latency=168.053 ms b) Hackbench with 4 fd 1,000,000 loops +-------+--------------------------------------+----------------------------------------+ |Number |Paravirt |Base | |of +----------+--------+---------+--------+----------+--------+---------+----------+ |Threads|Average1 |Std Dev1|Average2 | Std Dev|Average1 |Std Dev1|Average2 | Std Dev 2| +-------+----------+--------+---------+--------+----------+--------+---------+----------+ | 1 | 3.748 | 0.620 | 3.576 | 0.432 | 4.006 | 0.395 | 3.446 | 0.787 | +-------+----------+--------+---------+--------+----------+--------+---------+----------+ Note that this test was run just to show the interference effect over-subscription can have in baseline c) schbench results with 2 message groups on 8 vCPU VMs +-----------+-------+---------------+--------------+------------+ | | | Paravirt | Base | | +-----------+-------+-------+-------+-------+------+------------+ | |Threads| VM1 | VM2 | VM1 | VM2 |%Improvement| +-----------+-------+-------+-------+-------+------+------------+ |50.0000th | 1 | 52 | 53 | 58 | 54 | +6.25% | |75.0000th | 1 | 69 | 61 | 83 | 59 | +8.45% | |90.0000th | 1 | 80 | 80 | 89 | 83 | +6.98% | |95.0000th | 1 | 83 | 83 | 93 | 87 | +7.78% | |*99.0000th | 1 | 92 | 94 | 99 | 97 | +5.10% | |99.5000th | 1 | 95 | 100 | 102 | 103 | +4.88% | |99.9000th | 1 | 107 | 123 | 105 | 203 | +25.32% | +-----------+-------+-------+-------+-------+------+------------+ |50.0000th | 2 | 56 | 62 | 67 | 59 | +6.35% | |75.0000th | 2 | 69 | 75 | 80 | 71 | +4.64% | |90.0000th | 2 | 80 | 82 | 90 | 81 | +5.26% | |95.0000th | 2 | 85 | 87 | 97 | 91 | +8.51% | |*99.0000th | 2 | 98 | 99 | 107 | 109 | +8.79% | |99.5000th | 2 | 107 | 105 | 109 | 116 | +5.78% | |99.9000th | 2 | 9968 | 609 | 875 | 3116 | -165.02% | +-----------+-------+-------+-------+-------+------+------------+ |50.0000th | 4 | 78 | 77 | 78 | 79 | +1.27% | |75.0000th | 4 | 98 | 106 | 100 | 104 | 0.00% | |90.0000th | 4 | 987 | 1001 | 995 | 1015 | +1.09% | |95.0000th | 4 | 4136 | 5368 | 5752 | 5192 | +13.16% | |*99.0000th | 4 | 11632 | 11344 | 11024| 10736| -5.59% | |99.5000th | 4 | 12624 | 13040 | 12720| 12144| -3.22% | |99.9000th | 4 | 13168 | 18912 | 14992| 17824| +2.24% | +-----------+-------+-------+-------+-------+------+------------+ Note: Improvement is measured for (VM1+VM2) Signed-off-by: Rohit Jain Signed-off-by: Peter Zijlstra (Intel) Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: dhaval.giani@oracle.com Cc: matt@codeblueprint.co.uk Cc: steven.sistare@oracle.com Cc: subhra.mazumdar@oracle.com Link: http://lkml.kernel.org/r/1525294330-7759-1-git-send-email-rohit.k.jain@oracle.com Signed-off-by: Ingo Molnar --- kernel/sched/core.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'kernel/sched') diff --git a/kernel/sched/core.c b/kernel/sched/core.c index ffde9eebc846..71bdb86e07f9 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -4028,6 +4028,9 @@ int idle_cpu(int cpu) return 0; #endif + if (vcpu_is_preempted(cpu)) + return 0; + return 1; } -- cgit v1.2.3 From 1378447598432513d94ce2c607c412dc4f260f31 Mon Sep 17 00:00:00 2001 From: Mel Gorman Date: Fri, 4 May 2018 16:41:09 +0100 Subject: sched/numa: Stagger NUMA balancing scan periods for new threads Threads share an address space and each can change the protections of the same address space to trap NUMA faults. This is redundant and potentially counter-productive as any thread doing the update will suffice. Potentially only one thread is required but that thread may be idle or it may not have any locality concerns and pick an unsuitable scan rate. This patch uses independent scan period but they are staggered based on the number of address space users when the thread is created. The intent is that threads will avoid scanning at the same time and have a chance to adapt their scan rate later if necessary. This reduces the total scan activity early in the lifetime of the threads. The different in headline performance across a range of machines and workloads is marginal but the system CPU usage is reduced as well as overall scan activity. The following is the time reported by NAS Parallel Benchmark using unbound openmp threads and a D size class: 4.17.0-rc1 4.17.0-rc1 vanilla stagger-v1r1 Time bt.D 442.77 ( 0.00%) 419.70 ( 5.21%) Time cg.D 171.90 ( 0.00%) 180.85 ( -5.21%) Time ep.D 33.10 ( 0.00%) 32.90 ( 0.60%) Time is.D 9.59 ( 0.00%) 9.42 ( 1.77%) Time lu.D 306.75 ( 0.00%) 304.65 ( 0.68%) Time mg.D 54.56 ( 0.00%) 52.38 ( 4.00%) Time sp.D 1020.03 ( 0.00%) 903.77 ( 11.40%) Time ua.D 400.58 ( 0.00%) 386.49 ( 3.52%) Note it's not a universal win but we have no prior knowledge of which thread matters but the number of threads created often exceeds the size of the node when the threads are not bound. However, there is a reducation of overall system CPU usage: 4.17.0-rc1 4.17.0-rc1 vanilla stagger-v1r1 sys-time-bt.D 48.78 ( 0.00%) 48.22 ( 1.15%) sys-time-cg.D 25.31 ( 0.00%) 26.63 ( -5.22%) sys-time-ep.D 1.65 ( 0.00%) 0.62 ( 62.42%) sys-time-is.D 40.05 ( 0.00%) 24.45 ( 38.95%) sys-time-lu.D 37.55 ( 0.00%) 29.02 ( 22.72%) sys-time-mg.D 47.52 ( 0.00%) 34.92 ( 26.52%) sys-time-sp.D 119.01 ( 0.00%) 109.05 ( 8.37%) sys-time-ua.D 51.52 ( 0.00%) 45.13 ( 12.40%) NUMA scan activity is also reduced: NUMA alloc local 1042828 1342670 NUMA base PTE updates 140481138 93577468 NUMA huge PMD updates 272171 180766 NUMA page range updates 279832690 186129660 NUMA hint faults 1395972 1193897 NUMA hint local faults 877925 855053 NUMA hint local percent 62 71 NUMA pages migrated 12057909 9158023 Similar observations are made for other thread-intensive workloads. System CPU usage is lower even though the headline gains in performance tend to be small. For example, specjbb 2005 shows almost no difference in performance but scan activity is reduced by a third on a 4-socket box. I didn't find a workload (thread intensive or otherwise) that suffered badly. Signed-off-by: Mel Gorman Signed-off-by: Peter Zijlstra (Intel) Cc: Linus Torvalds Cc: Matt Fleming Cc: Mike Galbraith Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: linux-kernel@vger.kernel.org Link: http://lkml.kernel.org/r/20180504154109.mvrha2qo5wdl65vr@techsingularity.net Signed-off-by: Ingo Molnar --- kernel/sched/core.c | 22 +--------------------- kernel/sched/fair.c | 41 +++++++++++++++++++++++++++++++++++++++++ kernel/sched/sched.h | 6 ++++++ 3 files changed, 48 insertions(+), 21 deletions(-) (limited to 'kernel/sched') diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 4e0ebae045dc..102c36c317dc 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -2177,27 +2177,7 @@ static void __sched_fork(unsigned long clone_flags, struct task_struct *p) INIT_HLIST_HEAD(&p->preempt_notifiers); #endif -#ifdef CONFIG_NUMA_BALANCING - if (p->mm && atomic_read(&p->mm->mm_users) == 1) { - p->mm->numa_next_scan = jiffies + msecs_to_jiffies(sysctl_numa_balancing_scan_delay); - p->mm->numa_scan_seq = 0; - } - - if (clone_flags & CLONE_VM) - p->numa_preferred_nid = current->numa_preferred_nid; - else - p->numa_preferred_nid = -1; - - p->node_stamp = 0ULL; - p->numa_scan_seq = p->mm ? p->mm->numa_scan_seq : 0; - p->numa_scan_period = sysctl_numa_balancing_scan_delay; - p->numa_work.next = &p->numa_work; - p->numa_faults = NULL; - p->last_task_numa_placement = 0; - p->last_sum_exec_runtime = 0; - - p->numa_group = NULL; -#endif /* CONFIG_NUMA_BALANCING */ + init_numa_balancing(clone_flags, p); } DEFINE_STATIC_KEY_FALSE(sched_numa_balancing); diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 43c7b45f20be..f32b97d4c63b 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -1139,6 +1139,47 @@ static unsigned int task_scan_max(struct task_struct *p) return max(smin, smax); } +void init_numa_balancing(unsigned long clone_flags, struct task_struct *p) +{ + int mm_users = 0; + struct mm_struct *mm = p->mm; + + if (mm) { + mm_users = atomic_read(&mm->mm_users); + if (mm_users == 1) { + mm->numa_next_scan = jiffies + msecs_to_jiffies(sysctl_numa_balancing_scan_delay); + mm->numa_scan_seq = 0; + } + } + p->node_stamp = 0; + p->numa_scan_seq = mm ? mm->numa_scan_seq : 0; + p->numa_scan_period = sysctl_numa_balancing_scan_delay; + p->numa_work.next = &p->numa_work; + p->numa_faults = NULL; + p->numa_group = NULL; + p->last_task_numa_placement = 0; + p->last_sum_exec_runtime = 0; + + /* New address space, reset the preferred nid */ + if (!(clone_flags & CLONE_VM)) { + p->numa_preferred_nid = -1; + return; + } + + /* + * New thread, keep existing numa_preferred_nid which should be copied + * already by arch_dup_task_struct but stagger when scans start. + */ + if (mm) { + unsigned int delay; + + delay = min_t(unsigned int, task_scan_max(current), + current->numa_scan_period * mm_users * NSEC_PER_MSEC); + delay += 2 * TICK_NSEC; + p->node_stamp = delay; + } +} + static void account_numa_enqueue(struct rq *rq, struct task_struct *p) { rq->nr_numa_running += (p->numa_preferred_nid != -1); diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 15750c222ca2..c9895d35c5f7 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -1069,6 +1069,12 @@ enum numa_faults_stats { extern void sched_setnuma(struct task_struct *p, int node); extern int migrate_task_to(struct task_struct *p, int cpu); extern int migrate_swap(struct task_struct *, struct task_struct *); +extern void init_numa_balancing(unsigned long clone_flags, struct task_struct *p); +#else +static inline void +init_numa_balancing(unsigned long clone_flags, struct task_struct *p) +{ +} #endif /* CONFIG_NUMA_BALANCING */ #ifdef CONFIG_SMP -- cgit v1.2.3 From 943d355d7feef380e15a95892be3dff1095ef54b Mon Sep 17 00:00:00 2001 From: Rohit Jain Date: Wed, 9 May 2018 09:39:48 -0700 Subject: sched/core: Distinguish between idle_cpu() calls based on desired effect, introduce available_idle_cpu() In the following commit: 247f2f6f3c70 ("sched/core: Don't schedule threads on pre-empted vCPUs") ... we distinguish between idle_cpu() when the vCPU is not running for scheduling threads. However, the idle_cpu() function is used in other places for actually checking whether the state of the CPU is idle or not. Hence split the use of that function based on the desired return value, by introducing the available_idle_cpu() function. This fixes a (slight) regression in that initial vCPU commit, because some code paths (like the load-balancer) don't care and shouldn't care if the vCPU is preempted or not, they just want to know if there's any tasks on the CPU. Signed-off-by: Rohit Jain Signed-off-by: Peter Zijlstra (Intel) Cc: Linus Torvalds Cc: Mike Galbraith Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: dhaval.giani@oracle.com Cc: linux-kernel@vger.kernel.org Cc: matt@codeblueprint.co.uk Cc: steven.sistare@oracle.com Cc: subhra.mazumdar@oracle.com Link: http://lkml.kernel.org/r/1525883988-10356-1-git-send-email-rohit.k.jain@oracle.com Signed-off-by: Ingo Molnar --- include/linux/sched.h | 1 + kernel/sched/core.c | 14 ++++++++++++++ kernel/sched/fair.c | 20 ++++++++++---------- 3 files changed, 25 insertions(+), 10 deletions(-) (limited to 'kernel/sched') diff --git a/include/linux/sched.h b/include/linux/sched.h index c2413703f45d..959a8588e365 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -1504,6 +1504,7 @@ static inline int task_nice(const struct task_struct *p) extern int can_nice(const struct task_struct *p, const int nice); extern int task_curr(const struct task_struct *p); extern int idle_cpu(int cpu); +extern int available_idle_cpu(int cpu); extern int sched_setscheduler(struct task_struct *, int, const struct sched_param *); extern int sched_setscheduler_nocheck(struct task_struct *, int, const struct sched_param *); extern int sched_setattr(struct task_struct *, const struct sched_attr *); diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 102c36c317dc..d1555185c054 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -4009,6 +4009,20 @@ int idle_cpu(int cpu) return 0; #endif + return 1; +} + +/** + * available_idle_cpu - is a given CPU idle for enqueuing work. + * @cpu: the CPU in question. + * + * Return: 1 if the CPU is currently idle. 0 otherwise. + */ +int available_idle_cpu(int cpu) +{ + if (!idle_cpu(cpu)) + return 0; + if (vcpu_is_preempted(cpu)) return 0; diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index f32b97d4c63b..748cb054fefd 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -5899,8 +5899,8 @@ wake_affine_idle(int this_cpu, int prev_cpu, int sync) * a cpufreq perspective, it's better to have higher utilisation * on one CPU. */ - if (idle_cpu(this_cpu) && cpus_share_cache(this_cpu, prev_cpu)) - return idle_cpu(prev_cpu) ? prev_cpu : this_cpu; + if (available_idle_cpu(this_cpu) && cpus_share_cache(this_cpu, prev_cpu)) + return available_idle_cpu(prev_cpu) ? prev_cpu : this_cpu; if (sync && cpu_rq(this_cpu)->nr_running == 1) return this_cpu; @@ -6143,7 +6143,7 @@ find_idlest_group_cpu(struct sched_group *group, struct task_struct *p, int this /* Traverse only the allowed CPUs */ for_each_cpu_and(i, sched_group_span(group), &p->cpus_allowed) { - if (idle_cpu(i)) { + if (available_idle_cpu(i)) { struct rq *rq = cpu_rq(i); struct cpuidle_state *idle = idle_get_state(rq); if (idle && idle->exit_latency < min_exit_latency) { @@ -6272,7 +6272,7 @@ void __update_idle_core(struct rq *rq) if (cpu == core) continue; - if (!idle_cpu(cpu)) + if (!available_idle_cpu(cpu)) goto unlock; } @@ -6304,7 +6304,7 @@ static int select_idle_core(struct task_struct *p, struct sched_domain *sd, int for_each_cpu(cpu, cpu_smt_mask(core)) { cpumask_clear_cpu(cpu, cpus); - if (!idle_cpu(cpu)) + if (!available_idle_cpu(cpu)) idle = false; } @@ -6333,7 +6333,7 @@ static int select_idle_smt(struct task_struct *p, struct sched_domain *sd, int t for_each_cpu(cpu, cpu_smt_mask(target)) { if (!cpumask_test_cpu(cpu, &p->cpus_allowed)) continue; - if (idle_cpu(cpu)) + if (available_idle_cpu(cpu)) return cpu; } @@ -6396,7 +6396,7 @@ static int select_idle_cpu(struct task_struct *p, struct sched_domain *sd, int t return -1; if (!cpumask_test_cpu(cpu, &p->cpus_allowed)) continue; - if (idle_cpu(cpu)) + if (available_idle_cpu(cpu)) break; } @@ -6416,13 +6416,13 @@ static int select_idle_sibling(struct task_struct *p, int prev, int target) struct sched_domain *sd; int i, recent_used_cpu; - if (idle_cpu(target)) + if (available_idle_cpu(target)) return target; /* * If the previous CPU is cache affine and idle, don't be stupid: */ - if (prev != target && cpus_share_cache(prev, target) && idle_cpu(prev)) + if (prev != target && cpus_share_cache(prev, target) && available_idle_cpu(prev)) return prev; /* Check a recently used CPU as a potential idle candidate: */ @@ -6430,7 +6430,7 @@ static int select_idle_sibling(struct task_struct *p, int prev, int target) if (recent_used_cpu != prev && recent_used_cpu != target && cpus_share_cache(recent_used_cpu, target) && - idle_cpu(recent_used_cpu) && + available_idle_cpu(recent_used_cpu) && cpumask_test_cpu(p->recent_used_cpu, &p->cpus_allowed)) { /* * Replace recent_used_cpu with prev as it is a potential -- cgit v1.2.3 From 8ecf04e11283a28ca88b8b8049ac93c3a99fcd2c Mon Sep 17 00:00:00 2001 From: Patrick Bellasi Date: Thu, 24 May 2018 15:10:22 +0100 Subject: sched/cpufreq: Modify aggregate utilization to always include blocked FAIR utilization Since the refactoring introduced by: commit 8f111bc357aa ("cpufreq/schedutil: Rewrite CPUFREQ_RT support") we aggregate FAIR utilization only if this class has runnable tasks. This was mainly due to avoid the risk to stay on an high frequency just because of the blocked utilization of a CPU not being properly decayed while the CPU was idle. However, since: commit 31e77c93e432 ("sched/fair: Update blocked load when newly idle") the FAIR blocked utilization is properly decayed also for IDLE CPUs. This allows us to use the FAIR blocked utilization as a safe mechanism to gracefully reduce the frequency only if no FAIR tasks show up on a CPU for a reasonable period of time. Moreover, we also reduce the frequency drops of CPUs running periodic tasks which, depending on the task periodicity and the time required for a frequency switch, was increasing the chances to introduce some undesirable performance variations. Reported-by: Vincent Guittot Tested-by: Vincent Guittot Signed-off-by: Patrick Bellasi Signed-off-by: Peter Zijlstra (Intel) Reviewed-by: Thomas Gleixner Acked-by: Viresh Kumar Acked-by: Vincent Guittot Cc: Dietmar Eggemann Cc: Joel Fernandes Cc: Juri Lelli Cc: Linus Torvalds Cc: Morten Rasmussen Cc: Peter Zijlstra Cc: Rafael J . Wysocki Cc: Steve Muckle Link: http://lkml.kernel.org/r/20180524141023.13765-2-patrick.bellasi@arm.com Signed-off-by: Ingo Molnar --- kernel/sched/cpufreq_schedutil.c | 17 ++++++++--------- 1 file changed, 8 insertions(+), 9 deletions(-) (limited to 'kernel/sched') diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c index e13df951aca7..28592b62b1d5 100644 --- a/kernel/sched/cpufreq_schedutil.c +++ b/kernel/sched/cpufreq_schedutil.c @@ -183,22 +183,21 @@ static void sugov_get_util(struct sugov_cpu *sg_cpu) static unsigned long sugov_aggregate_util(struct sugov_cpu *sg_cpu) { struct rq *rq = cpu_rq(sg_cpu->cpu); - unsigned long util; - if (rq->rt.rt_nr_running) { - util = sg_cpu->max; - } else { - util = sg_cpu->util_dl; - if (rq->cfs.h_nr_running) - util += sg_cpu->util_cfs; - } + if (rq->rt.rt_nr_running) + return sg_cpu->max; /* + * Utilization required by DEADLINE must always be granted while, for + * FAIR, we use blocked utilization of IDLE CPUs as a mechanism to + * gracefully reduce the frequency when no tasks show up for longer + * periods of time. + * * Ideally we would like to set util_dl as min/guaranteed freq and * util_cfs + util_dl as requested freq. However, cpufreq is not yet * ready for such an interface. So, we only do the latter for now. */ - return min(util, sg_cpu->max); + return min(sg_cpu->max, (sg_cpu->util_dl + sg_cpu->util_cfs)); } static void sugov_set_iowait_boost(struct sugov_cpu *sg_cpu, u64 time, unsigned int flags) -- cgit v1.2.3 From 2539fc82aa9b07d968cf9ba1ffeec3e0416ac721 Mon Sep 17 00:00:00 2001 From: Patrick Bellasi Date: Thu, 24 May 2018 15:10:23 +0100 Subject: sched/fair: Update util_est before updating schedutil When a task is enqueued the estimated utilization of a CPU is updated to better support the selection of the required frequency. However, schedutil is (implicitly) updated by update_load_avg() which always happens before util_est_{en,de}queue(), thus potentially introducing a latency between estimated utilization updates and frequency selections. Let's update util_est at the beginning of enqueue_task_fair(), which will ensure that all schedutil updates will see the most updated estimated utilization value for a CPU. Reported-by: Vincent Guittot Signed-off-by: Patrick Bellasi Signed-off-by: Peter Zijlstra (Intel) Reviewed-by: Thomas Gleixner Acked-by: Viresh Kumar Acked-by: Vincent Guittot Cc: Dietmar Eggemann Cc: Joel Fernandes Cc: Juri Lelli Cc: Linus Torvalds Cc: Morten Rasmussen Cc: Peter Zijlstra Cc: Rafael J . Wysocki Cc: Steve Muckle Fixes: 7f65ea42eb00 ("sched/fair: Add util_est on top of PELT") Link: http://lkml.kernel.org/r/20180524141023.13765-3-patrick.bellasi@arm.com Signed-off-by: Ingo Molnar --- kernel/sched/fair.c | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) (limited to 'kernel/sched') diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 748cb054fefd..e497c05aab7f 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -5385,6 +5385,14 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags) struct cfs_rq *cfs_rq; struct sched_entity *se = &p->se; + /* + * The code below (indirectly) updates schedutil which looks at + * the cfs_rq utilization to select a frequency. + * Let's add the task's estimated utilization to the cfs_rq's + * estimated utilization, before we update schedutil. + */ + util_est_enqueue(&rq->cfs, p); + /* * If in_iowait is set, the code below may not trigger any cpufreq * utilization updates, so do it here explicitly with the IOWAIT flag @@ -5426,7 +5434,6 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags) if (!se) add_nr_running(rq, 1); - util_est_enqueue(&rq->cfs, p); hrtick_update(rq); } -- cgit v1.2.3