summaryrefslogtreecommitdiffstats
path: root/kernel/sched/rt.c
diff options
context:
space:
mode:
authorPeter Zijlstra <peterz@infradead.org>2020-10-01 15:54:14 +0200
committerPeter Zijlstra <peterz@infradead.org>2020-11-10 18:39:00 +0100
commit14e292f8d45380c519a83d9b0f37089a17eedcdf (patch)
tree50b8a4826a5fa1a806304a497930b46e8282d0ae /kernel/sched/rt.c
parent3015ef4b98f53fe7eba4f5f82f562c0e074d213c (diff)
downloadlinux-14e292f8d45380c519a83d9b0f37089a17eedcdf.tar.bz2
sched,rt: Use cpumask_any*_distribute()
Replace a bunch of cpumask_any*() instances with cpumask_any*_distribute(), by injecting this little bit of random in cpu selection, we reduce the chance two competing balance operations working off the same lowest_mask pick the same CPU. Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Reviewed-by: Valentin Schneider <valentin.schneider@arm.com> Reviewed-by: Daniel Bristot de Oliveira <bristot@redhat.com> Link: https://lkml.kernel.org/r/20201023102347.190759694@infradead.org
Diffstat (limited to 'kernel/sched/rt.c')
-rw-r--r--kernel/sched/rt.c6
1 files changed, 3 insertions, 3 deletions
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
index 40a46639f78a..2525a1beed26 100644
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
@@ -1752,8 +1752,8 @@ static int find_lowest_rq(struct task_struct *task)
return this_cpu;
}
- best_cpu = cpumask_first_and(lowest_mask,
- sched_domain_span(sd));
+ best_cpu = cpumask_any_and_distribute(lowest_mask,
+ sched_domain_span(sd));
if (best_cpu < nr_cpu_ids) {
rcu_read_unlock();
return best_cpu;
@@ -1770,7 +1770,7 @@ static int find_lowest_rq(struct task_struct *task)
if (this_cpu != -1)
return this_cpu;
- cpu = cpumask_any(lowest_mask);
+ cpu = cpumask_any_distribute(lowest_mask);
if (cpu < nr_cpu_ids)
return cpu;