summaryrefslogtreecommitdiffstats
path: root/kernel/sched
diff options
context:
space:
mode:
authorMel Gorman <mgorman@techsingularity.net>2022-05-20 11:35:17 +0100
committerPeter Zijlstra <peterz@infradead.org>2022-06-13 10:29:59 +0200
commit13ede33150877d44756171e33570076882b17b0b (patch)
treec1ed544c0396affd5bb7a68be5e6e5cd01e34993 /kernel/sched
parent70ce3ea9aa4ed901c8a90de667df5ef307766e71 (diff)
downloadlinux-13ede33150877d44756171e33570076882b17b0b.tar.bz2
sched/numa: Do not swap tasks between nodes when spare capacity is available
If a destination node has spare capacity but there is an imbalance then two tasks are selected for swapping. If the tasks have no numa group or are within the same NUMA group, it's simply shuffling tasks around without having any impact on the compute imbalance. Instead, it's just punishing one task to help another. Signed-off-by: Mel Gorman <mgorman@techsingularity.net> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Tested-by: K Prateek Nayak <kprateek.nayak@amd.com> Link: https://lore.kernel.org/r/20220520103519.1863-3-mgorman@techsingularity.net
Diffstat (limited to 'kernel/sched')
-rw-r--r--kernel/sched/fair.c9
1 files changed, 9 insertions, 0 deletions
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 51836efe5931..23da36c9cacb 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -1790,6 +1790,15 @@ static bool task_numa_compare(struct task_numa_env *env,
*/
cur_ng = rcu_dereference(cur->numa_group);
if (cur_ng == p_ng) {
+ /*
+ * Do not swap within a group or between tasks that have
+ * no group if there is spare capacity. Swapping does
+ * not address the load imbalance and helps one task at
+ * the cost of punishing another.
+ */
+ if (env->dst_stats.node_type == node_has_spare)
+ goto unlock;
+
imp = taskimp + task_weight(cur, env->src_nid, dist) -
task_weight(cur, env->dst_nid, dist);
/*