summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2022-04-17 09:42:03 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2022-04-17 09:42:03 -0700
commit7e1777f5ec178542adf0506e5ba2308f5be862a7 (patch)
tree6e17556baba215426e93d55ce55bcd67ec1cdddb
parent9a921a6ff7a616863b981220d0a2e0d1de2d050b (diff)
parent08d835dff916bfe8f45acc7b92c7af6c4081c8a7 (diff)
downloadlinux-7e1777f5ec178542adf0506e5ba2308f5be862a7.tar.bz2
Merge tag 'irq-urgent-2022-04-17' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull irq fix from Thomas Gleixner: "A single fix for the interrupt affinity spreading logic to take into account that there can be an imbalance between present and possible CPUs, which causes already assigned bits to be overwritten" * tag 'irq-urgent-2022-04-17' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: genirq/affinity: Consider that CPUs on nodes can be unbalanced
-rw-r--r--kernel/irq/affinity.c5
1 files changed, 3 insertions, 2 deletions
diff --git a/kernel/irq/affinity.c b/kernel/irq/affinity.c
index f7ff8919dc9b..fdf170404650 100644
--- a/kernel/irq/affinity.c
+++ b/kernel/irq/affinity.c
@@ -269,8 +269,9 @@ static int __irq_build_affinity_masks(unsigned int startvec,
*/
if (numvecs <= nodes) {
for_each_node_mask(n, nodemsk) {
- cpumask_or(&masks[curvec].mask, &masks[curvec].mask,
- node_to_cpumask[n]);
+ /* Ensure that only CPUs which are in both masks are set */
+ cpumask_and(nmsk, cpu_mask, node_to_cpumask[n]);
+ cpumask_or(&masks[curvec].mask, &masks[curvec].mask, nmsk);
if (++curvec == last_affv)
curvec = firstvec;
}