diff options
author | Christoph Hellwig <hch@lst.de> | 2014-05-28 08:08:02 -0600 |
---|---|---|
committer | Jens Axboe <axboe@fb.com> | 2014-05-28 08:08:02 -0600 |
commit | 6fca6a611c27f1f0d90fbe1cc3c229dbf8c09e48 (patch) | |
tree | d3348f3ab1169db9b5a1fca67a8fd2164152530c /block/blk-flush.c | |
parent | 7738dac4f697ffbd0ed4c4aeb69a714ef9d876da (diff) | |
download | linux-6fca6a611c27f1f0d90fbe1cc3c229dbf8c09e48.tar.bz2 |
blk-mq: add helper to insert requests from irq context
Both the cache flush state machine and the SCSI midlayer want to submit
requests from irq context, and the current per-request requeue_work
unfortunately causes corruption due to sharing with the csd field for
flushes. Replace them with a per-request_queue list of requests to
be requeued.
Based on an earlier test by Ming Lei.
Signed-off-by: Christoph Hellwig <hch@lst.de>
Reported-by: Ming Lei <tom.leiming@gmail.com>
Tested-by: Ming Lei <tom.leiming@gmail.com>
Signed-off-by: Jens Axboe <axboe@fb.com>
Diffstat (limited to 'block/blk-flush.c')
-rw-r--r-- | block/blk-flush.c | 16 |
1 files changed, 4 insertions, 12 deletions
diff --git a/block/blk-flush.c b/block/blk-flush.c index ec7a224d6733..ef608b35d9be 100644 --- a/block/blk-flush.c +++ b/block/blk-flush.c @@ -130,21 +130,13 @@ static void blk_flush_restore_request(struct request *rq) blk_clear_rq_complete(rq); } -static void mq_flush_run(struct work_struct *work) -{ - struct request *rq; - - rq = container_of(work, struct request, requeue_work); - - memset(&rq->csd, 0, sizeof(rq->csd)); - blk_mq_insert_request(rq, false, true, false); -} - static bool blk_flush_queue_rq(struct request *rq, bool add_front) { if (rq->q->mq_ops) { - INIT_WORK(&rq->requeue_work, mq_flush_run); - kblockd_schedule_work(&rq->requeue_work); + struct request_queue *q = rq->q; + + blk_mq_add_to_requeue_list(rq, add_front); + blk_mq_kick_requeue_list(q); return false; } else { if (add_front) |