summaryrefslogtreecommitdiffstats
path: root/block/kyber-iosched.c
diff options
context:
space:
mode:
authorJens Axboe <axboe@kernel.dk>2018-10-29 13:13:29 -0600
committerJens Axboe <axboe@kernel.dk>2018-11-07 13:44:59 -0700
commitf31967f0e455d08d3ea1d2f849bf62dafc92dbf4 (patch)
tree22fbc79b5ba286e2add8f995a8cfa9f211200192 /block/kyber-iosched.c
parentf9afca4d367b8c915f28d29fcaba7460640403ff (diff)
downloadlinux-f31967f0e455d08d3ea1d2f849bf62dafc92dbf4.tar.bz2
blk-mq: allow software queue to map to multiple hardware queues
The mapping used to be dependent on just the CPU location, but now it's a tuple of (type, cpu) instead. This is a prep patch for allowing a single software queue to map to multiple hardware queues. No functional changes in this patch. This changes the software queue count to an unsigned short to save a bit of space. We can still support 64K-1 CPUs, which should be enough. Add a check to catch a wrap. Reviewed-by: Hannes Reinecke <hare@suse.com> Reviewed-by: Keith Busch <keith.busch@intel.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block/kyber-iosched.c')
-rw-r--r--block/kyber-iosched.c6
1 files changed, 3 insertions, 3 deletions
diff --git a/block/kyber-iosched.c b/block/kyber-iosched.c
index 1fd83a91e749..de78e8aa7b0a 100644
--- a/block/kyber-iosched.c
+++ b/block/kyber-iosched.c
@@ -576,7 +576,7 @@ static bool kyber_bio_merge(struct blk_mq_hw_ctx *hctx, struct bio *bio)
{
struct kyber_hctx_data *khd = hctx->sched_data;
struct blk_mq_ctx *ctx = blk_mq_get_ctx(hctx->queue);
- struct kyber_ctx_queue *kcq = &khd->kcqs[ctx->index_hw];
+ struct kyber_ctx_queue *kcq = &khd->kcqs[ctx->index_hw[hctx->type]];
unsigned int sched_domain = kyber_sched_domain(bio->bi_opf);
struct list_head *rq_list = &kcq->rq_list[sched_domain];
bool merged;
@@ -602,7 +602,7 @@ static void kyber_insert_requests(struct blk_mq_hw_ctx *hctx,
list_for_each_entry_safe(rq, next, rq_list, queuelist) {
unsigned int sched_domain = kyber_sched_domain(rq->cmd_flags);
- struct kyber_ctx_queue *kcq = &khd->kcqs[rq->mq_ctx->index_hw];
+ struct kyber_ctx_queue *kcq = &khd->kcqs[rq->mq_ctx->index_hw[hctx->type]];
struct list_head *head = &kcq->rq_list[sched_domain];
spin_lock(&kcq->lock);
@@ -611,7 +611,7 @@ static void kyber_insert_requests(struct blk_mq_hw_ctx *hctx,
else
list_move_tail(&rq->queuelist, head);
sbitmap_set_bit(&khd->kcq_map[sched_domain],
- rq->mq_ctx->index_hw);
+ rq->mq_ctx->index_hw[hctx->type]);
blk_mq_sched_request_inserted(rq);
spin_unlock(&kcq->lock);
}