diff options
author | Jens Axboe <axboe@kernel.dk> | 2020-07-20 15:38:23 -0600 |
---|---|---|
committer | Jens Axboe <axboe@kernel.dk> | 2020-07-20 15:38:23 -0600 |
commit | 9caaa66c918c020fd16e84d1c6ebcce9960df1b2 (patch) | |
tree | 578d455a53453a1aead77da399c5134e73224a66 /block/blk-flush.c | |
parent | ba47d845d715a010f7b51f6f89bae32845e6acb7 (diff) | |
parent | ef45fe470e1e5410db4af87abc5d5055427945ac (diff) | |
download | linux-9caaa66c918c020fd16e84d1c6ebcce9960df1b2.tar.bz2 |
Merge branch 'for-5.9/block' into for-5.9/block-merge
* for-5.9/block: (124 commits)
blk-cgroup: show global disk stats in root cgroup io.stat
blk-cgroup: make iostat functions visible to stat printing
block: improve discard bio alignment in __blkdev_issue_discard()
block: change REQ_OP_ZONE_RESET and REQ_OP_ZONE_RESET_ALL to be odd numbers
block: defer flush request no matter whether we have elevator
block: make blk_timeout_init() static
block: remove retry loop in ioc_release_fn()
block: remove unnecessary ioc nested locking
block: integrate bd_start_claiming into __blkdev_get
block: use bd_prepare_to_claim directly in the loop driver
block: refactor bd_start_claiming
block: simplify the restart case in __blkdev_get
Revert "blk-rq-qos: remove redundant finish_wait to rq_qos_wait."
block: always remove partitions from blk_drop_partitions()
block: relax jiffies rounding for timeouts
blk-mq: remove redundant validation in __blk_mq_end_request()
blk-mq: Remove unnecessary local variable
writeback: remove bdi->congested_fn
writeback: remove struct bdi_writeback_congested
writeback: remove {set,clear}_wb_congested
...
Diffstat (limited to 'block/blk-flush.c')
-rw-r--r-- | block/blk-flush.c | 23 |
1 files changed, 6 insertions, 17 deletions
diff --git a/block/blk-flush.c b/block/blk-flush.c index 15ae0155ec07..6e1543c10493 100644 --- a/block/blk-flush.c +++ b/block/blk-flush.c @@ -219,7 +219,6 @@ static void flush_end_io(struct request *flush_rq, blk_status_t error) struct request *rq, *n; unsigned long flags = 0; struct blk_flush_queue *fq = blk_get_flush_queue(q, flush_rq->mq_ctx); - struct blk_mq_hw_ctx *hctx; blk_account_io_flush(flush_rq); @@ -235,13 +234,11 @@ static void flush_end_io(struct request *flush_rq, blk_status_t error) if (fq->rq_status != BLK_STS_OK) error = fq->rq_status; - hctx = flush_rq->mq_hctx; if (!q->elevator) { - blk_mq_tag_set_rq(hctx, flush_rq->tag, fq->orig_rq); - flush_rq->tag = -1; + flush_rq->tag = BLK_MQ_NO_TAG; } else { blk_mq_put_driver_tag(flush_rq); - flush_rq->internal_tag = -1; + flush_rq->internal_tag = BLK_MQ_NO_TAG; } running = &fq->flush_queue[fq->flush_running_idx]; @@ -286,13 +283,8 @@ static void blk_kick_flush(struct request_queue *q, struct blk_flush_queue *fq, if (fq->flush_pending_idx != fq->flush_running_idx || list_empty(pending)) return; - /* C2 and C3 - * - * For blk-mq + scheduling, we can risk having all driver tags - * assigned to empty flushes, and we deadlock if we are expecting - * other requests to make progress. Don't defer for that case. - */ - if (!list_empty(&fq->flush_data_in_flight) && q->elevator && + /* C2 and C3 */ + if (!list_empty(&fq->flush_data_in_flight) && time_before(jiffies, fq->flush_pending_since + FLUSH_PENDING_TIMEOUT)) return; @@ -316,13 +308,10 @@ static void blk_kick_flush(struct request_queue *q, struct blk_flush_queue *fq, flush_rq->mq_ctx = first_rq->mq_ctx; flush_rq->mq_hctx = first_rq->mq_hctx; - if (!q->elevator) { - fq->orig_rq = first_rq; + if (!q->elevator) flush_rq->tag = first_rq->tag; - blk_mq_tag_set_rq(flush_rq->mq_hctx, first_rq->tag, flush_rq); - } else { + else flush_rq->internal_tag = first_rq->internal_tag; - } flush_rq->cmd_flags = REQ_OP_FLUSH | REQ_PREFLUSH; flush_rq->cmd_flags |= (flags & REQ_DRV) | (flags & REQ_FAILFAST_MASK); |