summaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
authorHou Tao <houtao1@huawei.com>2019-05-21 15:59:03 +0800
committerJens Axboe <axboe@kernel.dk>2019-09-15 16:02:08 -0600
commit3d24430694077313c75c6b89f618db09943621e4 (patch)
treef8688e775734e33e5616d916287e21f30147eb44 /block
parent89f3b6d62f2c7c1ed7b2e672be605016d9ff60f2 (diff)
downloadlinux-3d24430694077313c75c6b89f618db09943621e4.tar.bz2
block: make rq sector size accessible for block stats
Currently rq->data_len will be decreased by partial completion or zeroed by completion, so when blk_stat_add() is invoked, data_len will be zero and there will never be samples in poll_cb because blk_mq_poll_stats_bkt() will return -1 if data_len is zero. We could move blk_stat_add() back to __blk_mq_complete_request(), but that would make the effort of trying to call ktime_get_ns() once in vain. Instead we can reuse throtl_size field, and use it for both block stats and block throttle, and adjust the logic in blk_mq_poll_stats_bkt() accordingly. Fixes: 4bc6339a583c ("block: move blk_stat_add() to __blk_mq_end_request()") Tested-by: Pavel Begunkov <asml.silence@gmail.com> Signed-off-by: Hou Tao <houtao1@huawei.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block')
-rw-r--r--block/blk-mq.c11
-rw-r--r--block/blk-throttle.c3
2 files changed, 7 insertions, 7 deletions
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 3647776a0f6e..d30fabb583fd 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -44,12 +44,12 @@ static void blk_mq_poll_stats_fn(struct blk_stat_callback *cb);
static int blk_mq_poll_stats_bkt(const struct request *rq)
{
- int ddir, bytes, bucket;
+ int ddir, sectors, bucket;
ddir = rq_data_dir(rq);
- bytes = blk_rq_bytes(rq);
+ sectors = blk_rq_stats_sectors(rq);
- bucket = ddir + 2*(ilog2(bytes) - 9);
+ bucket = ddir + 2 * ilog2(sectors);
if (bucket < 0)
return -1;
@@ -333,6 +333,7 @@ static struct request *blk_mq_rq_ctx_init(struct blk_mq_alloc_data *data,
else
rq->start_time_ns = 0;
rq->io_start_time_ns = 0;
+ rq->stats_sectors = 0;
rq->nr_phys_segments = 0;
#if defined(CONFIG_BLK_DEV_INTEGRITY)
rq->nr_integrity_segments = 0;
@@ -681,9 +682,7 @@ void blk_mq_start_request(struct request *rq)
if (test_bit(QUEUE_FLAG_STATS, &q->queue_flags)) {
rq->io_start_time_ns = ktime_get_ns();
-#ifdef CONFIG_BLK_DEV_THROTTLING_LOW
- rq->throtl_size = blk_rq_sectors(rq);
-#endif
+ rq->stats_sectors = blk_rq_sectors(rq);
rq->rq_flags |= RQF_STATS;
rq_qos_issue(q, rq);
}
diff --git a/block/blk-throttle.c b/block/blk-throttle.c
index 0445c998c377..18f773e52dfb 100644
--- a/block/blk-throttle.c
+++ b/block/blk-throttle.c
@@ -2248,7 +2248,8 @@ void blk_throtl_stat_add(struct request *rq, u64 time_ns)
struct request_queue *q = rq->q;
struct throtl_data *td = q->td;
- throtl_track_latency(td, rq->throtl_size, req_op(rq), time_ns >> 10);
+ throtl_track_latency(td, blk_rq_stats_sectors(rq), req_op(rq),
+ time_ns >> 10);
}
void blk_throtl_bio_endio(struct bio *bio)