diff options
author | Oleg Nesterov <oleg@tv-sign.ru> | 2007-05-09 02:34:10 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-05-09 12:30:52 -0700 |
commit | f293ea92007419e4f9c52db0cf57af17f45b9f94 (patch) | |
tree | 829d06499c1d9004ca530e5f23de43df27d3baa4 /kernel/workqueue.c | |
parent | 7097a87afe937a5879528d52880c2d95f089e96c (diff) | |
download | linux-f293ea92007419e4f9c52db0cf57af17f45b9f94.tar.bz2 |
workqueue: don't save interrupts in run_workqueue()
work->func() may sleep, it's a bug to call run_workqueue() with irqs disabled.
Signed-off-by: Oleg Nesterov <oleg@tv-sign.ru>
Cc: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'kernel/workqueue.c')
-rw-r--r-- | kernel/workqueue.c | 18 |
1 files changed, 8 insertions, 10 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index ea422254f8bf..74f3f7825229 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -227,13 +227,7 @@ EXPORT_SYMBOL_GPL(queue_delayed_work_on); static void run_workqueue(struct cpu_workqueue_struct *cwq) { - unsigned long flags; - - /* - * Keep taking off work from the queue until - * done. - */ - spin_lock_irqsave(&cwq->lock, flags); + spin_lock_irq(&cwq->lock); cwq->run_depth++; if (cwq->run_depth > 3) { /* morton gets to eat his hat */ @@ -248,7 +242,7 @@ static void run_workqueue(struct cpu_workqueue_struct *cwq) cwq->current_work = work; list_del_init(cwq->worklist.next); - spin_unlock_irqrestore(&cwq->lock, flags); + spin_unlock_irq(&cwq->lock); BUG_ON(get_wq_data(work) != cwq); if (!test_bit(WORK_STRUCT_NOAUTOREL, work_data_bits(work))) @@ -266,11 +260,11 @@ static void run_workqueue(struct cpu_workqueue_struct *cwq) dump_stack(); } - spin_lock_irqsave(&cwq->lock, flags); + spin_lock_irq(&cwq->lock); cwq->current_work = NULL; } cwq->run_depth--; - spin_unlock_irqrestore(&cwq->lock, flags); + spin_unlock_irq(&cwq->lock); } /* @@ -399,6 +393,8 @@ static void flush_cpu_workqueue(struct cpu_workqueue_struct *cwq) */ void fastcall flush_workqueue(struct workqueue_struct *wq) { + might_sleep(); + if (is_single_threaded(wq)) flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, singlethread_cpu)); else { @@ -445,6 +441,8 @@ void flush_work(struct workqueue_struct *wq, struct work_struct *work) { struct cpu_workqueue_struct *cwq; + might_sleep(); + cwq = get_wq_data(work); /* Was it ever queued ? */ if (!cwq) |