summaryrefslogtreecommitdiffstats
path: root/block/blk-core.c
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2012-03-05 13:14:58 -0800
committerJens Axboe <axboe@kernel.dk>2012-03-06 21:27:21 +0100
commitd732580b4eb31553c63744a47d590f770cafb8f0 (patch)
treeea4e8e21df1b639603693e6f5fdfc5a620cd8737 /block/blk-core.c
parentb2fab5acd28ead6f0dd6c3996ba23f0ef1772f15 (diff)
downloadlinux-d732580b4eb31553c63744a47d590f770cafb8f0.tar.bz2
block: implement blk_queue_bypass_start/end()
Rename and extend elv_queisce_start/end() to blk_queue_bypass_start/end() which are exported and supports nesting via @q->bypass_depth. Also add blk_queue_bypass() to test bypass state. This will be further extended and used for blkio_group management. Signed-off-by: Tejun Heo <tj@kernel.org> Cc: Vivek Goyal <vgoyal@redhat.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block/blk-core.c')
-rw-r--r--block/blk-core.c39
1 files changed, 37 insertions, 2 deletions
diff --git a/block/blk-core.c b/block/blk-core.c
index fccb25021121..98ddef430093 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -410,6 +410,42 @@ void blk_drain_queue(struct request_queue *q, bool drain_all)
}
/**
+ * blk_queue_bypass_start - enter queue bypass mode
+ * @q: queue of interest
+ *
+ * In bypass mode, only the dispatch FIFO queue of @q is used. This
+ * function makes @q enter bypass mode and drains all requests which were
+ * issued before. On return, it's guaranteed that no request has ELVPRIV
+ * set.
+ */
+void blk_queue_bypass_start(struct request_queue *q)
+{
+ spin_lock_irq(q->queue_lock);
+ q->bypass_depth++;
+ queue_flag_set(QUEUE_FLAG_BYPASS, q);
+ spin_unlock_irq(q->queue_lock);
+
+ blk_drain_queue(q, false);
+}
+EXPORT_SYMBOL_GPL(blk_queue_bypass_start);
+
+/**
+ * blk_queue_bypass_end - leave queue bypass mode
+ * @q: queue of interest
+ *
+ * Leave bypass mode and restore the normal queueing behavior.
+ */
+void blk_queue_bypass_end(struct request_queue *q)
+{
+ spin_lock_irq(q->queue_lock);
+ if (!--q->bypass_depth)
+ queue_flag_clear(QUEUE_FLAG_BYPASS, q);
+ WARN_ON_ONCE(q->bypass_depth < 0);
+ spin_unlock_irq(q->queue_lock);
+}
+EXPORT_SYMBOL_GPL(blk_queue_bypass_end);
+
+/**
* blk_cleanup_queue - shutdown a request queue
* @q: request queue to shutdown
*
@@ -862,8 +898,7 @@ retry:
* Also, lookup icq while holding queue_lock. If it doesn't exist,
* it will be created after releasing queue_lock.
*/
- if (blk_rq_should_init_elevator(bio) &&
- !test_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags)) {
+ if (blk_rq_should_init_elevator(bio) && !blk_queue_bypass(q)) {
rw_flags |= REQ_ELVPRIV;
rl->elvpriv++;
if (et->icq_cache && ioc)