summaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
authorJens Axboe <jaxboe@fusionio.com>2011-03-02 11:08:00 -0500
committerJens Axboe <jaxboe@fusionio.com>2011-03-10 08:45:54 +0100
commit3cca6dc1c81e2407928dc4c6105252146fd3924f (patch)
treeb78b0d93e7c02abdc37e1d5a6204ab6b94d56fd4 /block
parent53f22956effe1c9e7961b8c6e4362ecca5e460b7 (diff)
downloadlinux-3cca6dc1c81e2407928dc4c6105252146fd3924f.tar.bz2
block: add API for delaying work/request_fn a little bit
Currently we use plugging for that, but as plugging is going away, we need an alternative mechanism. Signed-off-by: Jens Axboe <jaxboe@fusionio.com>
Diffstat (limited to 'block')
-rw-r--r--block/blk-core.c29
1 files changed, 29 insertions, 0 deletions
diff --git a/block/blk-core.c b/block/blk-core.c
index 3cc17e6064d6..e958c7a1e462 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -197,6 +197,32 @@ void blk_dump_rq_flags(struct request *rq, char *msg)
}
EXPORT_SYMBOL(blk_dump_rq_flags);
+static void blk_delay_work(struct work_struct *work)
+{
+ struct request_queue *q;
+
+ q = container_of(work, struct request_queue, delay_work.work);
+ spin_lock_irq(q->queue_lock);
+ q->request_fn(q);
+ spin_unlock_irq(q->queue_lock);
+}
+
+/**
+ * blk_delay_queue - restart queueing after defined interval
+ * @q: The &struct request_queue in question
+ * @msecs: Delay in msecs
+ *
+ * Description:
+ * Sometimes queueing needs to be postponed for a little while, to allow
+ * resources to come back. This function will make sure that queueing is
+ * restarted around the specified time.
+ */
+void blk_delay_queue(struct request_queue *q, unsigned long msecs)
+{
+ schedule_delayed_work(&q->delay_work, msecs_to_jiffies(msecs));
+}
+EXPORT_SYMBOL(blk_delay_queue);
+
/*
* "plug" the device if there are no outstanding requests: this will
* force the transfer to start only after we have put all the requests
@@ -363,6 +389,7 @@ EXPORT_SYMBOL(blk_start_queue);
void blk_stop_queue(struct request_queue *q)
{
blk_remove_plug(q);
+ cancel_delayed_work(&q->delay_work);
queue_flag_set(QUEUE_FLAG_STOPPED, q);
}
EXPORT_SYMBOL(blk_stop_queue);
@@ -387,6 +414,7 @@ void blk_sync_queue(struct request_queue *q)
del_timer_sync(&q->timeout);
cancel_work_sync(&q->unplug_work);
throtl_shutdown_timer_wq(q);
+ cancel_delayed_work_sync(&q->delay_work);
}
EXPORT_SYMBOL(blk_sync_queue);
@@ -534,6 +562,7 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
INIT_LIST_HEAD(&q->flush_queue[1]);
INIT_LIST_HEAD(&q->flush_data_in_flight);
INIT_WORK(&q->unplug_work, blk_unplug_work);
+ INIT_DELAYED_WORK(&q->delay_work, blk_delay_work);
kobject_init(&q->kobj, &blk_queue_ktype);