From c8446b75be6f85b3d40066922876cb7adc948afb Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Tue, 30 Oct 2012 13:33:54 +0100 Subject: irq_work: Fix racy IRQ_WORK_BUSY flag setting The IRQ_WORK_BUSY flag is set right before we execute the work. Once this flag value is set, the work enters a claimable state again. So if we have specific data to compute in our work, we ensure it's either handled by another CPU or locally by enqueuing the work again. This state machine is guanranteed by atomic operations on the flags. So when we set IRQ_WORK_BUSY without using an xchg-like operation, we break this guarantee as in the following summarized scenario: CPU 1 CPU 2 ----- ----- (flags = 0) old_flags = flags; (flags = 0) cmpxchg(flags, old_flags, old_flags | IRQ_WORK_FLAGS) (flags = 3) [...] flags = IRQ_WORK_BUSY (flags = 2) func() (sees flags = 3) cmpxchg(flags, old_flags, old_flags | IRQ_WORK_FLAGS) (give up) cmpxchg(flags, 2, 0); (flags = 0) CPU 1 claims a work and executes it, so it sets IRQ_WORK_BUSY and the work is again in a claimable state. Now CPU 2 has new data to process and try to claim that work but it may see a stale value of the flags and think the work is still pending somewhere that will handle our data. This is because CPU 1 doesn't set IRQ_WORK_BUSY atomically. As a result, the data expected to be handle by CPU 2 won't get handled. To fix this, use xchg() to set IRQ_WORK_BUSY, this way we ensure the CPU 2 will see the correct value with cmpxchg() using the expected ordering. Changelog-heavily-inspired-by: Steven Rostedt Signed-off-by: Frederic Weisbecker Acked-by: Steven Rostedt Cc: Peter Zijlstra Cc: Ingo Molnar Cc: Thomas Gleixner Cc: Andrew Morton Cc: Paul Gortmaker Cc: Anish Kumar --- kernel/irq_work.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) (limited to 'kernel/irq_work.c') diff --git a/kernel/irq_work.c b/kernel/irq_work.c index 1588e3b2871b..57be1a6cd8da 100644 --- a/kernel/irq_work.c +++ b/kernel/irq_work.c @@ -119,8 +119,11 @@ void irq_work_run(void) /* * Clear the PENDING bit, after this point the @work * can be re-used. + * Make it immediately visible so that other CPUs trying + * to claim that work don't rely on us to handle their data + * while we are in the middle of the func. */ - work->flags = IRQ_WORK_BUSY; + xchg(&work->flags, IRQ_WORK_BUSY); work->func(work); /* * Clear the BUSY bit and return to the free state if -- cgit v1.2.3 From e0bbe2d80c415bd4063d894ec2ccb336788af814 Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Sat, 27 Oct 2012 15:21:36 +0200 Subject: irq_work: Fix racy check on work pending flag Work claiming wants to be SMP-safe. And by the time we try to claim a work, if it is already executing concurrently on another CPU, we want to succeed the claiming and queue the work again because the other CPU may have missed the data we wanted to handle in our work if it's about to complete there. This scenario is summarized below: CPU 1 CPU 2 ----- ----- (flags = 0) cmpxchg(flags, 0, IRQ_WORK_FLAGS) (flags = 3) [...] xchg(flags, IRQ_WORK_BUSY) (flags = 2) func() if (flags & IRQ_WORK_PENDING) (not true) cmpxchg(flags, flags, IRQ_WORK_FLAGS) (flags = 3) [...] cmpxchg(flags, IRQ_WORK_BUSY, 0); (fail, pending on CPU 2) This state machine is synchronized using [cmp]xchg() on the flags. As such, the early IRQ_WORK_PENDING check in CPU 2 above is racy. By the time we check it, we may be dealing with a stale value because we aren't using an atomic accessor. As a result, CPU 2 may "see" that the work is still pending on another CPU while it may be actually completing the work function exection already, leaving our data unprocessed. To fix this, we start by speculating about the value we wish to be in the work->flags but we only make any conclusion after the value returned by the cmpxchg() call that either claims the work or let the current owner handle the pending work for us. Changelog-heavily-inspired-by: Steven Rostedt Signed-off-by: Frederic Weisbecker Acked-by: Steven Rostedt Cc: Peter Zijlstra Cc: Ingo Molnar Cc: Thomas Gleixner Cc: Andrew Morton Cc: Paul Gortmaker Cc: Anish Kumar --- kernel/irq_work.c | 16 +++++++++++----- 1 file changed, 11 insertions(+), 5 deletions(-) (limited to 'kernel/irq_work.c') diff --git a/kernel/irq_work.c b/kernel/irq_work.c index 57be1a6cd8da..64eddd59ed83 100644 --- a/kernel/irq_work.c +++ b/kernel/irq_work.c @@ -34,15 +34,21 @@ static DEFINE_PER_CPU(struct llist_head, irq_work_list); */ static bool irq_work_claim(struct irq_work *work) { - unsigned long flags, nflags; + unsigned long flags, oflags, nflags; + /* + * Start with our best wish as a premise but only trust any + * flag value after cmpxchg() result. + */ + flags = work->flags & ~IRQ_WORK_PENDING; for (;;) { - flags = work->flags; - if (flags & IRQ_WORK_PENDING) - return false; nflags = flags | IRQ_WORK_FLAGS; - if (cmpxchg(&work->flags, flags, nflags) == flags) + oflags = cmpxchg(&work->flags, flags, nflags); + if (oflags == flags) break; + if (oflags & IRQ_WORK_PENDING) + return false; + flags = oflags; cpu_relax(); } -- cgit v1.2.3 From 00b42959106a9ca1c2899e591ae4e9a83ad6af05 Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Wed, 7 Nov 2012 21:03:07 +0100 Subject: irq_work: Don't stop the tick with pending works Don't stop the tick if we have pending irq works on the queue, otherwise if the arch can't raise self-IPIs, we may not find an opportunity to execute the pending works for a while. Signed-off-by: Frederic Weisbecker Acked-by: Steven Rostedt Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: Ingo Molnar Cc: Andrew Morton Cc: Paul Gortmaker --- include/linux/irq_work.h | 6 ++++++ kernel/irq_work.c | 11 +++++++++++ kernel/time/tick-sched.c | 3 ++- 3 files changed, 19 insertions(+), 1 deletion(-) (limited to 'kernel/irq_work.c') diff --git a/include/linux/irq_work.h b/include/linux/irq_work.h index 6a9e8f5399e2..a69704f37204 100644 --- a/include/linux/irq_work.h +++ b/include/linux/irq_work.h @@ -20,4 +20,10 @@ bool irq_work_queue(struct irq_work *work); void irq_work_run(void); void irq_work_sync(struct irq_work *work); +#ifdef CONFIG_IRQ_WORK +bool irq_work_needs_cpu(void); +#else +static bool irq_work_needs_cpu(void) { return false; } +#endif + #endif /* _LINUX_IRQ_WORK_H */ diff --git a/kernel/irq_work.c b/kernel/irq_work.c index 64eddd59ed83..b3c113a14727 100644 --- a/kernel/irq_work.c +++ b/kernel/irq_work.c @@ -99,6 +99,17 @@ bool irq_work_queue(struct irq_work *work) } EXPORT_SYMBOL_GPL(irq_work_queue); +bool irq_work_needs_cpu(void) +{ + struct llist_head *this_list; + + this_list = &__get_cpu_var(irq_work_list); + if (llist_empty(this_list)) + return false; + + return true; +} + /* * Run the irq_work entries on this cpu. Requires to be ran from hardirq * context with local IRQs disabled. diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c index 9e945aa090ac..f249e8c3e58e 100644 --- a/kernel/time/tick-sched.c +++ b/kernel/time/tick-sched.c @@ -20,6 +20,7 @@ #include #include #include +#include #include @@ -289,7 +290,7 @@ static ktime_t tick_nohz_stop_sched_tick(struct tick_sched *ts, } while (read_seqretry(&xtime_lock, seq)); if (rcu_needs_cpu(cpu, &rcu_delta_jiffies) || printk_needs_cpu(cpu) || - arch_needs_cpu(cpu)) { + arch_needs_cpu(cpu) || irq_work_needs_cpu()) { next_jiffies = last_jiffies + 1; delta_jiffies = 1; } else { -- cgit v1.2.3 From c0e980a4bd7fc5c9b748f2f0209d2a48c0fdf0ab Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Thu, 15 Nov 2012 11:34:21 -0500 Subject: irq_work: Flush work on CPU_DYING In order not to offline a CPU with pending irq works, flush the queue from CPU_DYING. The notifier is called by stop_machine on the CPU that is going down. The code will not be called from irq context (so things like get_irq_regs() wont work) but I'm not sure what the requirements are for irq_work in that regard (Peter?). But irqs are disabled and the CPU is about to go offline. Might as well flush the work. Signed-off-by: Steven Rostedt Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: Ingo Molnar Cc: Andrew Morton Cc: Paul Gortmaker Signed-off-by: Frederic Weisbecker --- kernel/irq_work.c | 51 +++++++++++++++++++++++++++++++++++++++++++++------ 1 file changed, 45 insertions(+), 6 deletions(-) (limited to 'kernel/irq_work.c') diff --git a/kernel/irq_work.c b/kernel/irq_work.c index b3c113a14727..4ed17490f629 100644 --- a/kernel/irq_work.c +++ b/kernel/irq_work.c @@ -12,6 +12,8 @@ #include #include #include +#include +#include #include /* @@ -110,11 +112,7 @@ bool irq_work_needs_cpu(void) return true; } -/* - * Run the irq_work entries on this cpu. Requires to be ran from hardirq - * context with local IRQs disabled. - */ -void irq_work_run(void) +static void __irq_work_run(void) { struct irq_work *work; struct llist_head *this_list; @@ -124,7 +122,6 @@ void irq_work_run(void) if (llist_empty(this_list)) return; - BUG_ON(!in_irq()); BUG_ON(!irqs_disabled()); llnode = llist_del_all(this_list); @@ -149,6 +146,16 @@ void irq_work_run(void) (void)cmpxchg(&work->flags, IRQ_WORK_BUSY, 0); } } + +/* + * Run the irq_work entries on this cpu. Requires to be ran from hardirq + * context with local IRQs disabled. + */ +void irq_work_run(void) +{ + BUG_ON(!in_irq()); + __irq_work_run(); +} EXPORT_SYMBOL_GPL(irq_work_run); /* @@ -163,3 +170,35 @@ void irq_work_sync(struct irq_work *work) cpu_relax(); } EXPORT_SYMBOL_GPL(irq_work_sync); + +#ifdef CONFIG_HOTPLUG_CPU +static int irq_work_cpu_notify(struct notifier_block *self, + unsigned long action, void *hcpu) +{ + long cpu = (long)hcpu; + + switch (action) { + case CPU_DYING: + /* Called from stop_machine */ + if (WARN_ON_ONCE(cpu != smp_processor_id())) + break; + __irq_work_run(); + break; + default: + break; + } + return NOTIFY_OK; +} + +static struct notifier_block cpu_notify; + +static __init int irq_work_init_cpu_notifier(void) +{ + cpu_notify.notifier_call = irq_work_cpu_notify; + cpu_notify.priority = 0; + register_cpu_notifier(&cpu_notify); + return 0; +} +device_initcall(irq_work_init_cpu_notifier); + +#endif /* CONFIG_HOTPLUG_CPU */ -- cgit v1.2.3 From 8aa2accee41f7045dc904fa41d4475b2f6ffae3e Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Thu, 15 Nov 2012 12:52:44 -0500 Subject: irq_work: Warn if there's still work on cpu_down If we are in nohz and there's still irq_work to be done when the idle task is about to go offline, give a nasty warning. Everything should have been flushed from the CPU_DYING notifier already. Further attempts to enqueue an irq_work are buggy because irqs are disabled by __cpu_disable(). The best we can do is to report the issue to the user. Signed-off-by: Steven Rostedt Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: Ingo Molnar Cc: Andrew Morton Cc: Paul Gortmaker Signed-off-by: Frederic Weisbecker --- kernel/irq_work.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'kernel/irq_work.c') diff --git a/kernel/irq_work.c b/kernel/irq_work.c index 4ed17490f629..480f74715ba9 100644 --- a/kernel/irq_work.c +++ b/kernel/irq_work.c @@ -109,6 +109,9 @@ bool irq_work_needs_cpu(void) if (llist_empty(this_list)) return false; + /* All work should have been flushed before going offline */ + WARN_ON_ONCE(cpu_is_offline(smp_processor_id())); + return true; } -- cgit v1.2.3 From bc6679aef673f9dcb8f718528fc3df49ff661af9 Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Fri, 19 Oct 2012 16:43:41 -0400 Subject: irq_work: Make self-IPIs optable On irq work initialization, let the user choose to define it as "lazy" or not. "Lazy" means that we don't want to send an IPI (provided the arch can anyway) when we enqueue this work but we rather prefer to wait for the next timer tick to execute our work if possible. This is going to be a benefit for non-urgent enqueuers (like printk in the future) that may prefer not to raise an IPI storm in case of frequent enqueuing on short periods of time. Signed-off-by: Frederic Weisbecker Acked-by: Steven Rostedt Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: Ingo Molnar Cc: Andrew Morton Cc: Paul Gortmaker --- include/linux/irq_work.h | 14 ++++++++++++++ kernel/irq_work.c | 47 +++++++++++++++++++++++++++-------------------- 2 files changed, 41 insertions(+), 20 deletions(-) (limited to 'kernel/irq_work.c') diff --git a/include/linux/irq_work.h b/include/linux/irq_work.h index a69704f37204..b28eb60c8bf6 100644 --- a/include/linux/irq_work.h +++ b/include/linux/irq_work.h @@ -3,6 +3,20 @@ #include +/* + * An entry can be in one of four states: + * + * free NULL, 0 -> {claimed} : free to be used + * claimed NULL, 3 -> {pending} : claimed to be enqueued + * pending next, 3 -> {busy} : queued, pending callback + * busy NULL, 2 -> {free, claimed} : callback in progress, can be claimed + */ + +#define IRQ_WORK_PENDING 1UL +#define IRQ_WORK_BUSY 2UL +#define IRQ_WORK_FLAGS 3UL +#define IRQ_WORK_LAZY 4UL /* Doesn't want IPI, wait for tick */ + struct irq_work { unsigned long flags; struct llist_node llnode; diff --git a/kernel/irq_work.c b/kernel/irq_work.c index 480f74715ba9..7f3a59bc8e3d 100644 --- a/kernel/irq_work.c +++ b/kernel/irq_work.c @@ -12,24 +12,15 @@ #include #include #include +#include +#include #include #include #include -/* - * An entry can be in one of four states: - * - * free NULL, 0 -> {claimed} : free to be used - * claimed NULL, 3 -> {pending} : claimed to be enqueued - * pending next, 3 -> {busy} : queued, pending callback - * busy NULL, 2 -> {free, claimed} : callback in progress, can be claimed - */ - -#define IRQ_WORK_PENDING 1UL -#define IRQ_WORK_BUSY 2UL -#define IRQ_WORK_FLAGS 3UL static DEFINE_PER_CPU(struct llist_head, irq_work_list); +static DEFINE_PER_CPU(int, irq_work_raised); /* * Claim the entry so that no one else will poke at it. @@ -69,14 +60,19 @@ void __weak arch_irq_work_raise(void) */ static void __irq_work_queue(struct irq_work *work) { - bool empty; - preempt_disable(); - empty = llist_add(&work->llnode, &__get_cpu_var(irq_work_list)); - /* The list was empty, raise self-interrupt to start processing. */ - if (empty) - arch_irq_work_raise(); + llist_add(&work->llnode, &__get_cpu_var(irq_work_list)); + + /* + * If the work is not "lazy" or the tick is stopped, raise the irq + * work interrupt (if supported by the arch), otherwise, just wait + * for the next tick. + */ + if (!(work->flags & IRQ_WORK_LAZY) || tick_nohz_tick_stopped()) { + if (!this_cpu_cmpxchg(irq_work_raised, 0, 1)) + arch_irq_work_raise(); + } preempt_enable(); } @@ -117,10 +113,19 @@ bool irq_work_needs_cpu(void) static void __irq_work_run(void) { + unsigned long flags; struct irq_work *work; struct llist_head *this_list; struct llist_node *llnode; + + /* + * Reset the "raised" state right before we check the list because + * an NMI may enqueue after we find the list empty from the runner. + */ + __this_cpu_write(irq_work_raised, 0); + barrier(); + this_list = &__get_cpu_var(irq_work_list); if (llist_empty(this_list)) return; @@ -140,13 +145,15 @@ static void __irq_work_run(void) * to claim that work don't rely on us to handle their data * while we are in the middle of the func. */ - xchg(&work->flags, IRQ_WORK_BUSY); + flags = work->flags & ~IRQ_WORK_PENDING; + xchg(&work->flags, flags); + work->func(work); /* * Clear the BUSY bit and return to the free state if * no-one else claimed it meanwhile. */ - (void)cmpxchg(&work->flags, IRQ_WORK_BUSY, 0); + (void)cmpxchg(&work->flags, flags, flags & ~IRQ_WORK_BUSY); } } -- cgit v1.2.3 From c02cf5f8ed6137e2b3b2f10e0fca336e06e09ba4 Mon Sep 17 00:00:00 2001 From: anish kumar Date: Sun, 3 Feb 2013 22:08:23 +0100 Subject: irq_work: Remove return value from the irq_work_queue() function As no one is using the return value of irq_work_queue(), so it is better to just make it void. Signed-off-by: anish kumar Acked-by: Steven Rostedt [ Fix stale comments, remove now unnecessary __irq_work_queue() intermediate function ] Signed-off-by: Frederic Weisbecker Link: http://lkml.kernel.org/r/1359925703-24304-1-git-send-email-fweisbec@gmail.com Signed-off-by: Ingo Molnar --- include/linux/irq_work.h | 2 +- kernel/irq_work.c | 31 ++++++++++--------------------- 2 files changed, 11 insertions(+), 22 deletions(-) (limited to 'kernel/irq_work.c') diff --git a/include/linux/irq_work.h b/include/linux/irq_work.h index 6a9e8f5399e2..ce60c084635b 100644 --- a/include/linux/irq_work.h +++ b/include/linux/irq_work.h @@ -16,7 +16,7 @@ void init_irq_work(struct irq_work *work, void (*func)(struct irq_work *)) work->func = func; } -bool irq_work_queue(struct irq_work *work); +void irq_work_queue(struct irq_work *work); void irq_work_run(void); void irq_work_sync(struct irq_work *work); diff --git a/kernel/irq_work.c b/kernel/irq_work.c index 64eddd59ed83..c9d7478e4889 100644 --- a/kernel/irq_work.c +++ b/kernel/irq_work.c @@ -63,12 +63,20 @@ void __weak arch_irq_work_raise(void) } /* - * Queue the entry and raise the IPI if needed. + * Enqueue the irq_work @entry unless it's already pending + * somewhere. + * + * Can be re-enqueued while the callback is still in progress. */ -static void __irq_work_queue(struct irq_work *work) +void irq_work_queue(struct irq_work *work) { bool empty; + /* Only queue if not already pending */ + if (!irq_work_claim(work)) + return; + + /* Queue the entry and raise the IPI if needed. */ preempt_disable(); empty = llist_add(&work->llnode, &__get_cpu_var(irq_work_list)); @@ -78,25 +86,6 @@ static void __irq_work_queue(struct irq_work *work) preempt_enable(); } - -/* - * Enqueue the irq_work @entry, returns true on success, failure when the - * @entry was already enqueued by someone else. - * - * Can be re-enqueued while the callback is still in progress. - */ -bool irq_work_queue(struct irq_work *work) -{ - if (!irq_work_claim(work)) { - /* - * Already enqueued, can't do! - */ - return false; - } - - __irq_work_queue(work); - return true; -} EXPORT_SYMBOL_GPL(irq_work_queue); /* -- cgit v1.2.3