diff options
author | Keith Busch <kbusch@kernel.org> | 2020-04-27 11:54:46 -0700 |
---|---|---|
committer | Jens Axboe <axboe@kernel.dk> | 2020-05-09 16:18:36 -0600 |
commit | 54b2fcee1db041a83b52b51752dade6090cf952f (patch) | |
tree | b20574005ddc702eb039754bfc74a300014d8fe7 | |
parent | 74943d45eef4db64b1e5c9f7ad1d018576e113c5 (diff) | |
download | linux-54b2fcee1db041a83b52b51752dade6090cf952f.tar.bz2 |
nvme-pci: remove last_sq_tail
The nvme driver does not have enough tags to wrap the queue, and blk-mq
will no longer call commit_rqs() when there are no new submissions to
notify.
Signed-off-by: Keith Busch <kbusch@kernel.org>
Reviewed-by: Sagi Grimberg <sagi@grimberg.me>
Signed-off-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
-rw-r--r-- | drivers/nvme/host/pci.c | 23 |
1 files changed, 4 insertions, 19 deletions
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index e95c7465c7bd..b945e9a89883 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c @@ -173,7 +173,6 @@ struct nvme_queue { u16 q_depth; u16 cq_vector; u16 sq_tail; - u16 last_sq_tail; u16 cq_head; u16 qid; u8 cq_phase; @@ -446,24 +445,11 @@ static int nvme_pci_map_queues(struct blk_mq_tag_set *set) return 0; } -/* - * Write sq tail if we are asked to, or if the next command would wrap. - */ -static inline void nvme_write_sq_db(struct nvme_queue *nvmeq, bool write_sq) +static inline void nvme_write_sq_db(struct nvme_queue *nvmeq) { - if (!write_sq) { - u16 next_tail = nvmeq->sq_tail + 1; - - if (next_tail == nvmeq->q_depth) - next_tail = 0; - if (next_tail != nvmeq->last_sq_tail) - return; - } - if (nvme_dbbuf_update_and_check_event(nvmeq->sq_tail, nvmeq->dbbuf_sq_db, nvmeq->dbbuf_sq_ei)) writel(nvmeq->sq_tail, nvmeq->q_db); - nvmeq->last_sq_tail = nvmeq->sq_tail; } /** @@ -480,7 +466,8 @@ static void nvme_submit_cmd(struct nvme_queue *nvmeq, struct nvme_command *cmd, cmd, sizeof(*cmd)); if (++nvmeq->sq_tail == nvmeq->q_depth) nvmeq->sq_tail = 0; - nvme_write_sq_db(nvmeq, write_sq); + if (write_sq) + nvme_write_sq_db(nvmeq); spin_unlock(&nvmeq->sq_lock); } @@ -489,8 +476,7 @@ static void nvme_commit_rqs(struct blk_mq_hw_ctx *hctx) struct nvme_queue *nvmeq = hctx->driver_data; spin_lock(&nvmeq->sq_lock); - if (nvmeq->sq_tail != nvmeq->last_sq_tail) - nvme_write_sq_db(nvmeq, true); + nvme_write_sq_db(nvmeq); spin_unlock(&nvmeq->sq_lock); } @@ -1494,7 +1480,6 @@ static void nvme_init_queue(struct nvme_queue *nvmeq, u16 qid) struct nvme_dev *dev = nvmeq->dev; nvmeq->sq_tail = 0; - nvmeq->last_sq_tail = 0; nvmeq->cq_head = 0; nvmeq->cq_phase = 1; nvmeq->q_db = &dev->dbs[qid * 2 * dev->db_stride]; |