summaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
authorKashyap Desai <kashyap.desai@broadcom.com>2020-08-19 23:20:28 +0800
committerJens Axboe <axboe@kernel.dk>2020-09-03 15:20:47 -0600
commitb445547ec1bbd3e7bf4b1c142550942f70527d95 (patch)
tree452adb8f18969cd32966206e046632f1d7ff9b44 /block
parentf1b49fdc1c64db110aa1315831e5fe0f8599fa56 (diff)
downloadlinux-b445547ec1bbd3e7bf4b1c142550942f70527d95.tar.bz2
blk-mq, elevator: Count requests per hctx to improve performance
High CPU utilization on "native_queued_spin_lock_slowpath" due to lock contention is possible for mq-deadline and bfq IO schedulers when nr_hw_queues is more than one. It is because kblockd work queue can submit IO from all online CPUs (through blk_mq_run_hw_queues()) even though only one hctx has pending commands. The elevator callback .has_work for mq-deadline and bfq scheduler considers pending work if there are any IOs on request queue but it does not account hctx context. Add a per-hctx 'elevator_queued' count to the hctx to avoid triggering the elevator even though there are no requests queued. [jpg: Relocated atomic_dec() in dd_dispatch_request(), update commit message per Kashyap] Signed-off-by: Kashyap Desai <kashyap.desai@broadcom.com> Signed-off-by: Hannes Reinecke <hare@suse.de> Signed-off-by: John Garry <john.garry@huawei.com> Tested-by: Douglas Gilbert <dgilbert@interlog.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block')
-rw-r--r--block/bfq-iosched.c5
-rw-r--r--block/blk-mq.c1
-rw-r--r--block/mq-deadline.c6
3 files changed, 12 insertions, 0 deletions
diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c
index 418ffd4b5552..a251b80d8ee5 100644
--- a/block/bfq-iosched.c
+++ b/block/bfq-iosched.c
@@ -4640,6 +4640,9 @@ static bool bfq_has_work(struct blk_mq_hw_ctx *hctx)
{
struct bfq_data *bfqd = hctx->queue->elevator->elevator_data;
+ if (!atomic_read(&hctx->elevator_queued))
+ return false;
+
/*
* Avoiding lock: a race on bfqd->busy_queues should cause at
* most a call to dispatch for nothing
@@ -5554,6 +5557,7 @@ static void bfq_insert_requests(struct blk_mq_hw_ctx *hctx,
rq = list_first_entry(list, struct request, queuelist);
list_del_init(&rq->queuelist);
bfq_insert_request(hctx, rq, at_head);
+ atomic_inc(&hctx->elevator_queued);
}
}
@@ -5933,6 +5937,7 @@ static void bfq_finish_requeue_request(struct request *rq)
bfq_completed_request(bfqq, bfqd);
bfq_finish_requeue_request_body(bfqq);
+ atomic_dec(&rq->mq_hctx->elevator_queued);
spin_unlock_irqrestore(&bfqd->lock, flags);
} else {
diff --git a/block/blk-mq.c b/block/blk-mq.c
index eff9d987f85b..4abb71459f94 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -2660,6 +2660,7 @@ blk_mq_alloc_hctx(struct request_queue *q, struct blk_mq_tag_set *set,
goto free_hctx;
atomic_set(&hctx->nr_active, 0);
+ atomic_set(&hctx->elevator_queued, 0);
if (node == NUMA_NO_NODE)
node = set->numa_node;
hctx->numa_node = node;
diff --git a/block/mq-deadline.c b/block/mq-deadline.c
index b57470e154c8..800ac902809b 100644
--- a/block/mq-deadline.c
+++ b/block/mq-deadline.c
@@ -386,6 +386,8 @@ static struct request *dd_dispatch_request(struct blk_mq_hw_ctx *hctx)
spin_lock(&dd->lock);
rq = __dd_dispatch_request(dd);
spin_unlock(&dd->lock);
+ if (rq)
+ atomic_dec(&rq->mq_hctx->elevator_queued);
return rq;
}
@@ -533,6 +535,7 @@ static void dd_insert_requests(struct blk_mq_hw_ctx *hctx,
rq = list_first_entry(list, struct request, queuelist);
list_del_init(&rq->queuelist);
dd_insert_request(hctx, rq, at_head);
+ atomic_inc(&hctx->elevator_queued);
}
spin_unlock(&dd->lock);
}
@@ -579,6 +582,9 @@ static bool dd_has_work(struct blk_mq_hw_ctx *hctx)
{
struct deadline_data *dd = hctx->queue->elevator->elevator_data;
+ if (!atomic_read(&hctx->elevator_queued))
+ return false;
+
return !list_empty_careful(&dd->dispatch) ||
!list_empty_careful(&dd->fifo_list[0]) ||
!list_empty_careful(&dd->fifo_list[1]);