summaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2017-06-20 01:37:20 +0200
committerThomas Gleixner <tglx@linutronix.de>2017-06-22 18:21:13 +0200
commit8e7b632237df8b17526411d1d98f838580bb6aa3 (patch)
tree6f2b4b78b13dcd94c62f45c415bea92751240dcf /arch/x86
parentcdd16365b0bd7c0cd19e2cc768b6bdc8021f32c3 (diff)
downloadlinux-8e7b632237df8b17526411d1d98f838580bb6aa3.tar.bz2
x86/irq: Cleanup pending irq move in fixup_irqs()
If an CPU goes offline, the interrupts are migrated away, but a eventually pending interrupt move, which has not yet been made effective is kept pending even if the outgoing CPU is the sole target of the pending affinity mask. What's worse is, that the pending affinity mask is discarded even if it would contain a valid subset of the online CPUs. Use the newly introduced helper to: - Discard a pending move when the outgoing CPU is the only target in the pending mask. - Use the pending mask instead of the affinity mask to find a valid target for the CPU if the pending mask intersects with the online CPUs. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Cc: Jens Axboe <axboe@kernel.dk> Cc: Marc Zyngier <marc.zyngier@arm.com> Cc: Michael Ellerman <mpe@ellerman.id.au> Cc: Keith Busch <keith.busch@intel.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Christoph Hellwig <hch@lst.de> Link: http://lkml.kernel.org/r/20170619235444.774068557@linutronix.de
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/kernel/irq.c25
1 files changed, 21 insertions, 4 deletions
diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c
index f34fe7444836..9696007df67b 100644
--- a/arch/x86/kernel/irq.c
+++ b/arch/x86/kernel/irq.c
@@ -440,9 +440,9 @@ void fixup_irqs(void)
int ret;
for_each_irq_desc(irq, desc) {
+ const struct cpumask *affinity;
int break_affinity = 0;
int set_affinity = 1;
- const struct cpumask *affinity;
if (!desc)
continue;
@@ -454,19 +454,36 @@ void fixup_irqs(void)
data = irq_desc_get_irq_data(desc);
affinity = irq_data_get_affinity_mask(data);
+
if (!irq_has_action(irq) || irqd_is_per_cpu(data) ||
cpumask_subset(affinity, cpu_online_mask)) {
+ irq_fixup_move_pending(desc, false);
raw_spin_unlock(&desc->lock);
continue;
}
/*
- * Complete the irq move. This cpu is going down and for
- * non intr-remapping case, we can't wait till this interrupt
- * arrives at this cpu before completing the irq move.
+ * Complete an eventually pending irq move cleanup. If this
+ * interrupt was moved in hard irq context, then the
+ * vectors need to be cleaned up. It can't wait until this
+ * interrupt actually happens and this CPU was involved.
*/
irq_force_complete_move(desc);
+ /*
+ * If there is a setaffinity pending, then try to reuse the
+ * pending mask, so the last change of the affinity does
+ * not get lost. If there is no move pending or the pending
+ * mask does not contain any online CPU, use the current
+ * affinity mask.
+ */
+ if (irq_fixup_move_pending(desc, true))
+ affinity = desc->pending_mask;
+
+ /*
+ * If the mask does not contain an offline CPU, break
+ * affinity and use cpu_online_mask as fall back.
+ */
if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) {
break_affinity = 1;
affinity = cpu_online_mask;