diff options
author | Ingo Molnar <mingo@kernel.org> | 2018-12-03 11:44:51 +0100 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2018-12-18 14:22:28 +0100 |
commit | c5f48c0a7aa1a8c82d81cdf27e63aa0a5544c6e6 (patch) | |
tree | 64731929e3abf3b2afbb9ec0a6cbceef8d076db7 /kernel | |
parent | 989a4222c13a3e148772730d362fceb0727852f5 (diff) | |
download | linux-c5f48c0a7aa1a8c82d81cdf27e63aa0a5544c6e6.tar.bz2 |
genirq: Fix various typos in comments
Go over the IRQ subsystem source code (including irqchip drivers) and
fix common typos in comments.
No change in functionality intended.
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Jason Cooper <jason@lakedaemon.net>
Cc: Marc Zyngier <marc.zyngier@arm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: linux-kernel@vger.kernel.org
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/irq/chip.c | 2 | ||||
-rw-r--r-- | kernel/irq/ipi.c | 4 | ||||
-rw-r--r-- | kernel/irq/manage.c | 2 | ||||
-rw-r--r-- | kernel/irq/spurious.c | 6 |
4 files changed, 7 insertions, 7 deletions
diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c index a2b3d9de999c..34e969069488 100644 --- a/kernel/irq/chip.c +++ b/kernel/irq/chip.c @@ -929,7 +929,7 @@ __irq_do_set_handler(struct irq_desc *desc, irq_flow_handler_t handle, break; /* * Bail out if the outer chip is not set up - * and the interrrupt supposed to be started + * and the interrupt supposed to be started * right away. */ if (WARN_ON(is_chained)) diff --git a/kernel/irq/ipi.c b/kernel/irq/ipi.c index 8b778e37dc6d..43e3d1be622c 100644 --- a/kernel/irq/ipi.c +++ b/kernel/irq/ipi.c @@ -56,7 +56,7 @@ int irq_reserve_ipi(struct irq_domain *domain, unsigned int next; /* - * The IPI requires a seperate HW irq on each CPU. We require + * The IPI requires a separate HW irq on each CPU. We require * that the destination mask is consecutive. If an * implementation needs to support holes, it can reserve * several IPI ranges. @@ -172,7 +172,7 @@ irq_hw_number_t ipi_get_hwirq(unsigned int irq, unsigned int cpu) /* * Get the real hardware irq number if the underlying implementation - * uses a seperate irq per cpu. If the underlying implementation uses + * uses a separate irq per cpu. If the underlying implementation uses * a single hardware irq for all cpus then the IPI send mechanism * needs to take care of the cpu destinations. */ diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c index 9dbdccab3b6a..a4888ce4667a 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c @@ -915,7 +915,7 @@ irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) { } #endif /* - * Interrupts which are not explicitely requested as threaded + * Interrupts which are not explicitly requested as threaded * interrupts rely on the implicit bh/preempt disable of the hard irq * context. So we need to disable bh here to avoid deadlocks and other * side effects. diff --git a/kernel/irq/spurious.c b/kernel/irq/spurious.c index d867d6ddafdd..6d2fa6914b30 100644 --- a/kernel/irq/spurious.c +++ b/kernel/irq/spurious.c @@ -66,7 +66,7 @@ static int try_one_irq(struct irq_desc *desc, bool force) raw_spin_lock(&desc->lock); /* - * PER_CPU, nested thread interrupts and interrupts explicitely + * PER_CPU, nested thread interrupts and interrupts explicitly * marked polled are excluded from polling. */ if (irq_settings_is_per_cpu(desc) || @@ -76,7 +76,7 @@ static int try_one_irq(struct irq_desc *desc, bool force) /* * Do not poll disabled interrupts unless the spurious - * disabled poller asks explicitely. + * disabled poller asks explicitly. */ if (irqd_irq_disabled(&desc->irq_data) && !force) goto out; @@ -292,7 +292,7 @@ void note_interrupt(struct irq_desc *desc, irqreturn_t action_ret) * So in case a thread is woken, we just note the fact and * defer the analysis to the next hardware interrupt. * - * The threaded handlers store whether they sucessfully + * The threaded handlers store whether they successfully * handled an interrupt and we check whether that number * changed versus the last invocation. * |