summaryrefslogtreecommitdiffstats
path: root/block/blk-flush.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-02-14 10:45:18 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2014-02-14 10:45:18 -0800
commit5e57dc81106b942786f5db8e7ab8788bb9319933 (patch)
tree4533e01e745bba3614c77200b3fd96dd7af7e04e /block/blk-flush.c
parent0d25e3691186f5ae6feb0229717a60a5169dc5b2 (diff)
parentc8123f8c9cb517403b51aa41c3c46ff5e10b2c17 (diff)
downloadlinux-5e57dc81106b942786f5db8e7ab8788bb9319933.tar.bz2
Merge branch 'for-linus' of git://git.kernel.dk/linux-block
Pull block IO fixes from Jens Axboe: "Second round of updates and fixes for 3.14-rc2. Most of this stuff has been queued up for a while. The notable exception is the blk-mq changes, which are naturally a bit more in flux still. The pull request contains: - Two bug fixes for the new immutable vecs, causing crashes with raid or swap. From Kent. - Various blk-mq tweaks and fixes from Christoph. A fix for integrity bio's from Nic. - A few bcache fixes from Kent and Darrick Wong. - xen-blk{front,back} fixes from David Vrabel, Matt Rushton, Nicolas Swenson, and Roger Pau Monne. - Fix for a vec miscount with integrity vectors from Martin. - Minor annotations or fixes from Masanari Iida and Rashika Kheria. - Tweak to null_blk to do more normal FIFO processing of requests from Shlomo Pongratz. - Elevator switching bypass fix from Tejun. - Softlockup in blkdev_issue_discard() fix when !CONFIG_PREEMPT from me" * 'for-linus' of git://git.kernel.dk/linux-block: (31 commits) block: add cond_resched() to potentially long running ioctl discard loop xen-blkback: init persistent_purge_work work_struct blk-mq: pair blk_mq_start_request / blk_mq_requeue_request blk-mq: dont assume rq->errors is set when returning an error from ->queue_rq block: Fix cloning of discard/write same bios block: Fix type mismatch in ssize_t_blk_mq_tag_sysfs_show blk-mq: rework flush sequencing logic null_blk: use blk_complete_request and blk_mq_complete_request virtio_blk: use blk_mq_complete_request blk-mq: rework I/O completions fs: Add prototype declaration to appropriate header file include/linux/bio.h fs: Mark function as static in fs/bio-integrity.c block/null_blk: Fix completion processing from LIFO to FIFO block: Explicitly handle discard/write same segments block: Fix nr_vecs for inline integrity vectors blk-mq: Add bio_integrity setup to blk_mq_make_request blk-mq: initialize sg_reserved_size blk-mq: handle dma_drain_size blk-mq: divert __blk_put_request for MQ ops blk-mq: support at_head inserations for blk_execute_rq ...
Diffstat (limited to 'block/blk-flush.c')
-rw-r--r--block/blk-flush.c101
1 files changed, 37 insertions, 64 deletions
diff --git a/block/blk-flush.c b/block/blk-flush.c
index 9288aaf35c21..66e2b697f5db 100644
--- a/block/blk-flush.c
+++ b/block/blk-flush.c
@@ -130,20 +130,26 @@ static void blk_flush_restore_request(struct request *rq)
blk_clear_rq_complete(rq);
}
-static void mq_flush_data_run(struct work_struct *work)
+static void mq_flush_run(struct work_struct *work)
{
struct request *rq;
- rq = container_of(work, struct request, mq_flush_data);
+ rq = container_of(work, struct request, mq_flush_work);
memset(&rq->csd, 0, sizeof(rq->csd));
blk_mq_run_request(rq, true, false);
}
-static void blk_mq_flush_data_insert(struct request *rq)
+static bool blk_flush_queue_rq(struct request *rq)
{
- INIT_WORK(&rq->mq_flush_data, mq_flush_data_run);
- kblockd_schedule_work(rq->q, &rq->mq_flush_data);
+ if (rq->q->mq_ops) {
+ INIT_WORK(&rq->mq_flush_work, mq_flush_run);
+ kblockd_schedule_work(rq->q, &rq->mq_flush_work);
+ return false;
+ } else {
+ list_add_tail(&rq->queuelist, &rq->q->queue_head);
+ return true;
+ }
}
/**
@@ -187,12 +193,7 @@ static bool blk_flush_complete_seq(struct request *rq, unsigned int seq,
case REQ_FSEQ_DATA:
list_move_tail(&rq->flush.list, &q->flush_data_in_flight);
- if (q->mq_ops)
- blk_mq_flush_data_insert(rq);
- else {
- list_add(&rq->queuelist, &q->queue_head);
- queued = true;
- }
+ queued = blk_flush_queue_rq(rq);
break;
case REQ_FSEQ_DONE:
@@ -216,9 +217,6 @@ static bool blk_flush_complete_seq(struct request *rq, unsigned int seq,
}
kicked = blk_kick_flush(q);
- /* blk_mq_run_flush will run queue */
- if (q->mq_ops)
- return queued;
return kicked | queued;
}
@@ -230,10 +228,9 @@ static void flush_end_io(struct request *flush_rq, int error)
struct request *rq, *n;
unsigned long flags = 0;
- if (q->mq_ops) {
- blk_mq_free_request(flush_rq);
+ if (q->mq_ops)
spin_lock_irqsave(&q->mq_flush_lock, flags);
- }
+
running = &q->flush_queue[q->flush_running_idx];
BUG_ON(q->flush_pending_idx == q->flush_running_idx);
@@ -263,49 +260,14 @@ static void flush_end_io(struct request *flush_rq, int error)
* kblockd.
*/
if (queued || q->flush_queue_delayed) {
- if (!q->mq_ops)
- blk_run_queue_async(q);
- else
- /*
- * This can be optimized to only run queues with requests
- * queued if necessary.
- */
- blk_mq_run_queues(q, true);
+ WARN_ON(q->mq_ops);
+ blk_run_queue_async(q);
}
q->flush_queue_delayed = 0;
if (q->mq_ops)
spin_unlock_irqrestore(&q->mq_flush_lock, flags);
}
-static void mq_flush_work(struct work_struct *work)
-{
- struct request_queue *q;
- struct request *rq;
-
- q = container_of(work, struct request_queue, mq_flush_work);
-
- /* We don't need set REQ_FLUSH_SEQ, it's for consistency */
- rq = blk_mq_alloc_request(q, WRITE_FLUSH|REQ_FLUSH_SEQ,
- __GFP_WAIT|GFP_ATOMIC, true);
- rq->cmd_type = REQ_TYPE_FS;
- rq->end_io = flush_end_io;
-
- blk_mq_run_request(rq, true, false);
-}
-
-/*
- * We can't directly use q->flush_rq, because it doesn't have tag and is not in
- * hctx->rqs[]. so we must allocate a new request, since we can't sleep here,
- * so offload the work to workqueue.
- *
- * Note: we assume a flush request finished in any hardware queue will flush
- * the whole disk cache.
- */
-static void mq_run_flush(struct request_queue *q)
-{
- kblockd_schedule_work(q, &q->mq_flush_work);
-}
-
/**
* blk_kick_flush - consider issuing flush request
* @q: request_queue being kicked
@@ -340,19 +302,31 @@ static bool blk_kick_flush(struct request_queue *q)
* different from running_idx, which means flush is in flight.
*/
q->flush_pending_idx ^= 1;
+
if (q->mq_ops) {
- mq_run_flush(q);
- return true;
+ struct blk_mq_ctx *ctx = first_rq->mq_ctx;
+ struct blk_mq_hw_ctx *hctx = q->mq_ops->map_queue(q, ctx->cpu);
+
+ blk_mq_rq_init(hctx, q->flush_rq);
+ q->flush_rq->mq_ctx = ctx;
+
+ /*
+ * Reuse the tag value from the fist waiting request,
+ * with blk-mq the tag is generated during request
+ * allocation and drivers can rely on it being inside
+ * the range they asked for.
+ */
+ q->flush_rq->tag = first_rq->tag;
+ } else {
+ blk_rq_init(q, q->flush_rq);
}
- blk_rq_init(q, &q->flush_rq);
- q->flush_rq.cmd_type = REQ_TYPE_FS;
- q->flush_rq.cmd_flags = WRITE_FLUSH | REQ_FLUSH_SEQ;
- q->flush_rq.rq_disk = first_rq->rq_disk;
- q->flush_rq.end_io = flush_end_io;
+ q->flush_rq->cmd_type = REQ_TYPE_FS;
+ q->flush_rq->cmd_flags = WRITE_FLUSH | REQ_FLUSH_SEQ;
+ q->flush_rq->rq_disk = first_rq->rq_disk;
+ q->flush_rq->end_io = flush_end_io;
- list_add_tail(&q->flush_rq.queuelist, &q->queue_head);
- return true;
+ return blk_flush_queue_rq(q->flush_rq);
}
static void flush_data_end_io(struct request *rq, int error)
@@ -558,5 +532,4 @@ EXPORT_SYMBOL(blkdev_issue_flush);
void blk_mq_init_flush(struct request_queue *q)
{
spin_lock_init(&q->mq_flush_lock);
- INIT_WORK(&q->mq_flush_work, mq_flush_work);
}