summaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2015-07-12 09:15:02 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2015-07-12 09:15:02 -0700
commitc4bc680cf7bcd257865dac97dd2debbc9dcffad4 (patch)
treeceb5df05f889df160d04ebf952fc4c3e50de7981 /kernel
parent59c3cb553f5fc4ed6868eeaae6ffd8e1daf6d93e (diff)
parenta899418167264c7bac574b1a0f1b2c26c5b0995a (diff)
downloadlinux-c4bc680cf7bcd257865dac97dd2debbc9dcffad4.tar.bz2
Merge branch 'irq-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull irq fix from Thomas Gleixner: "A single fix for a cpu hotplug race vs. interrupt descriptors: Prevent irq setup/teardown across the cpu starting/dying parts of cpu hotplug so that the starting/dying cpu has a stable view of the descriptor space. This has been an issue for all architectures in the cpu dying phase, where interrupts are migrated away from the dying cpu. In the starting phase its mostly a x86 issue vs the vector space update" * 'irq-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: hotplug: Prevent alloc/free of irq descriptors during cpu up/down
Diffstat (limited to 'kernel')
-rw-r--r--kernel/cpu.c22
-rw-r--r--kernel/irq/internals.h4
2 files changed, 21 insertions, 5 deletions
diff --git a/kernel/cpu.c b/kernel/cpu.c
index 9c9c9fab16cc..6a374544d495 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -21,6 +21,7 @@
#include <linux/suspend.h>
#include <linux/lockdep.h>
#include <linux/tick.h>
+#include <linux/irq.h>
#include <trace/events/power.h>
#include "smpboot.h"
@@ -392,13 +393,19 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen)
smpboot_park_threads(cpu);
/*
- * So now all preempt/rcu users must observe !cpu_active().
+ * Prevent irq alloc/free while the dying cpu reorganizes the
+ * interrupt affinities.
*/
+ irq_lock_sparse();
+ /*
+ * So now all preempt/rcu users must observe !cpu_active().
+ */
err = __stop_machine(take_cpu_down, &tcd_param, cpumask_of(cpu));
if (err) {
/* CPU didn't die: tell everyone. Can't complain. */
cpu_notify_nofail(CPU_DOWN_FAILED | mod, hcpu);
+ irq_unlock_sparse();
goto out_release;
}
BUG_ON(cpu_online(cpu));
@@ -415,6 +422,9 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen)
smp_mb(); /* Read from cpu_dead_idle before __cpu_die(). */
per_cpu(cpu_dead_idle, cpu) = false;
+ /* Interrupts are moved away from the dying cpu, reenable alloc/free */
+ irq_unlock_sparse();
+
hotplug_cpu__broadcast_tick_pull(cpu);
/* This actually kills the CPU. */
__cpu_die(cpu);
@@ -517,8 +527,18 @@ static int _cpu_up(unsigned int cpu, int tasks_frozen)
goto out_notify;
}
+ /*
+ * Some architectures have to walk the irq descriptors to
+ * setup the vector space for the cpu which comes online.
+ * Prevent irq alloc/free across the bringup.
+ */
+ irq_lock_sparse();
+
/* Arch-specific enabling code. */
ret = __cpu_up(cpu, idle);
+
+ irq_unlock_sparse();
+
if (ret != 0)
goto out_notify;
BUG_ON(!cpu_online(cpu));
diff --git a/kernel/irq/internals.h b/kernel/irq/internals.h
index 4834ee828c41..61008b8433ab 100644
--- a/kernel/irq/internals.h
+++ b/kernel/irq/internals.h
@@ -76,12 +76,8 @@ extern void unmask_threaded_irq(struct irq_desc *desc);
#ifdef CONFIG_SPARSE_IRQ
static inline void irq_mark_irq(unsigned int irq) { }
-extern void irq_lock_sparse(void);
-extern void irq_unlock_sparse(void);
#else
extern void irq_mark_irq(unsigned int irq);
-static inline void irq_lock_sparse(void) { }
-static inline void irq_unlock_sparse(void) { }
#endif
extern void init_kstat_irqs(struct irq_desc *desc, int node, int nr);