diff options
author | Jens Axboe <axboe@kernel.dk> | 2021-10-29 14:34:11 -0600 |
---|---|---|
committer | Jens Axboe <axboe@kernel.dk> | 2021-12-16 10:54:35 -0700 |
commit | 62451a2b2e7ea17c4a547ada6a5deebf8787a27a (patch) | |
tree | cd1778bf96ab27561b538f06ef0b5a81cb7f6b92 /drivers/nvme | |
parent | 3233b94cf842984ea7e208d5be1ad2f2af02d495 (diff) | |
download | linux-62451a2b2e7ea17c4a547ada6a5deebf8787a27a.tar.bz2 |
nvme: separate command prep and issue
Add a nvme_prep_rq() helper to setup a command, and nvme_queue_rq() is
adapted to use this helper.
Reviewed-by: Hannes Reinecke <hare@suse.de>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'drivers/nvme')
-rw-r--r-- | drivers/nvme/host/pci.c | 63 |
1 files changed, 36 insertions, 27 deletions
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index 2009f8c047a2..081abbe04f29 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c @@ -903,55 +903,32 @@ static blk_status_t nvme_map_metadata(struct nvme_dev *dev, struct request *req, return BLK_STS_OK; } -/* - * NOTE: ns is NULL when called on the admin queue. - */ -static blk_status_t nvme_queue_rq(struct blk_mq_hw_ctx *hctx, - const struct blk_mq_queue_data *bd) +static blk_status_t nvme_prep_rq(struct nvme_dev *dev, struct request *req) { - struct nvme_ns *ns = hctx->queue->queuedata; - struct nvme_queue *nvmeq = hctx->driver_data; - struct nvme_dev *dev = nvmeq->dev; - struct request *req = bd->rq; struct nvme_iod *iod = blk_mq_rq_to_pdu(req); - struct nvme_command *cmnd = &iod->cmd; blk_status_t ret; iod->aborted = 0; iod->npages = -1; iod->nents = 0; - /* - * We should not need to do this, but we're still using this to - * ensure we can drain requests on a dying queue. - */ - if (unlikely(!test_bit(NVMEQ_ENABLED, &nvmeq->flags))) - return BLK_STS_IOERR; - - if (!nvme_check_ready(&dev->ctrl, req, true)) - return nvme_fail_nonready_command(&dev->ctrl, req); - - ret = nvme_setup_cmd(ns, req); + ret = nvme_setup_cmd(req->q->queuedata, req); if (ret) return ret; if (blk_rq_nr_phys_segments(req)) { - ret = nvme_map_data(dev, req, cmnd); + ret = nvme_map_data(dev, req, &iod->cmd); if (ret) goto out_free_cmd; } if (blk_integrity_rq(req)) { - ret = nvme_map_metadata(dev, req, cmnd); + ret = nvme_map_metadata(dev, req, &iod->cmd); if (ret) goto out_unmap_data; } blk_mq_start_request(req); - spin_lock(&nvmeq->sq_lock); - nvme_sq_copy_cmd(nvmeq, &iod->cmd); - nvme_write_sq_db(nvmeq, bd->last); - spin_unlock(&nvmeq->sq_lock); return BLK_STS_OK; out_unmap_data: nvme_unmap_data(dev, req); @@ -960,6 +937,38 @@ out_free_cmd: return ret; } +/* + * NOTE: ns is NULL when called on the admin queue. + */ +static blk_status_t nvme_queue_rq(struct blk_mq_hw_ctx *hctx, + const struct blk_mq_queue_data *bd) +{ + struct nvme_queue *nvmeq = hctx->driver_data; + struct nvme_dev *dev = nvmeq->dev; + struct request *req = bd->rq; + struct nvme_iod *iod = blk_mq_rq_to_pdu(req); + blk_status_t ret; + + /* + * We should not need to do this, but we're still using this to + * ensure we can drain requests on a dying queue. + */ + if (unlikely(!test_bit(NVMEQ_ENABLED, &nvmeq->flags))) + return BLK_STS_IOERR; + + if (unlikely(!nvme_check_ready(&dev->ctrl, req, true))) + return nvme_fail_nonready_command(&dev->ctrl, req); + + ret = nvme_prep_rq(dev, req); + if (unlikely(ret)) + return ret; + spin_lock(&nvmeq->sq_lock); + nvme_sq_copy_cmd(nvmeq, &iod->cmd); + nvme_write_sq_db(nvmeq, bd->last); + spin_unlock(&nvmeq->sq_lock); + return BLK_STS_OK; +} + static __always_inline void nvme_pci_unmap_rq(struct request *req) { struct nvme_iod *iod = blk_mq_rq_to_pdu(req); |