diff options
author | James Morris <jmorris@macbook.(none)> | 2009-12-03 12:03:40 +0530 |
---|---|---|
committer | James Morris <jmorris@macbook.(none)> | 2009-12-03 12:03:40 +0530 |
commit | c84d6efd363a3948eb32ec40d46bab6338580454 (patch) | |
tree | 3ba7ac46e6626fe8ac843834588609eb6ccee5c6 /kernel/workqueue.c | |
parent | 7539cf4b92be4aecc573ea962135f246a7a33401 (diff) | |
parent | 22763c5cf3690a681551162c15d34d935308c8d7 (diff) | |
download | linux-c84d6efd363a3948eb32ec40d46bab6338580454.tar.bz2 |
Merge branch 'master' into next
Diffstat (limited to 'kernel/workqueue.c')
-rw-r--r-- | kernel/workqueue.c | 35 |
1 files changed, 34 insertions, 1 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index addfe2df93b1..67e526b6ae81 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -640,6 +640,24 @@ int schedule_delayed_work(struct delayed_work *dwork, EXPORT_SYMBOL(schedule_delayed_work); /** + * flush_delayed_work - block until a dwork_struct's callback has terminated + * @dwork: the delayed work which is to be flushed + * + * Any timeout is cancelled, and any pending work is run immediately. + */ +void flush_delayed_work(struct delayed_work *dwork) +{ + if (del_timer_sync(&dwork->timer)) { + struct cpu_workqueue_struct *cwq; + cwq = wq_per_cpu(keventd_wq, get_cpu()); + __queue_work(cwq, &dwork->work); + put_cpu(); + } + flush_work(&dwork->work); +} +EXPORT_SYMBOL(flush_delayed_work); + +/** * schedule_delayed_work_on - queue work in global workqueue on CPU after delay * @cpu: cpu to use * @dwork: job to be done @@ -667,6 +685,7 @@ EXPORT_SYMBOL(schedule_delayed_work_on); int schedule_on_each_cpu(work_func_t func) { int cpu; + int orig = -1; struct work_struct *works; works = alloc_percpu(struct work_struct); @@ -674,14 +693,28 @@ int schedule_on_each_cpu(work_func_t func) return -ENOMEM; get_online_cpus(); + + /* + * When running in keventd don't schedule a work item on + * itself. Can just call directly because the work queue is + * already bound. This also is faster. + */ + if (current_is_keventd()) + orig = raw_smp_processor_id(); + for_each_online_cpu(cpu) { struct work_struct *work = per_cpu_ptr(works, cpu); INIT_WORK(work, func); - schedule_work_on(cpu, work); + if (cpu != orig) + schedule_work_on(cpu, work); } + if (orig >= 0) + func(per_cpu_ptr(works, orig)); + for_each_online_cpu(cpu) flush_work(per_cpu_ptr(works, cpu)); + put_online_cpus(); free_percpu(works); return 0; |