diff options
author | Jens Axboe <axboe@kernel.dk> | 2018-05-17 18:31:51 +0200 |
---|---|---|
committer | Keith Busch <keith.busch@intel.com> | 2018-05-18 14:41:36 -0600 |
commit | 1ab0cd6966fc4a7e9dfbd7c6eda917ae9c977f42 (patch) | |
tree | 76baa40276578d79e113b75f8f3d2bcb3bbe6640 | |
parent | 5cb525c8315f1dd9232b59cd1cf1e0f19ff1a5df (diff) | |
download | linux-1ab0cd6966fc4a7e9dfbd7c6eda917ae9c977f42.tar.bz2 |
nvme-pci: split the nvme queue lock into submission and completion locks
This is now feasible. We protect the submission queue ring with
->sq_lock, and the completion side with ->cq_lock.
Reviewed-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Signed-off-by: Christoph Hellwig <hch@lst.de>
-rw-r--r-- | drivers/nvme/host/pci.c | 44 |
1 files changed, 23 insertions, 21 deletions
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index 7fbb6f94b561..1b49b694a57a 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c @@ -147,9 +147,10 @@ static inline struct nvme_dev *to_nvme_dev(struct nvme_ctrl *ctrl) struct nvme_queue { struct device *q_dmadev; struct nvme_dev *dev; - spinlock_t q_lock; + spinlock_t sq_lock; struct nvme_command *sq_cmds; struct nvme_command __iomem *sq_cmds_io; + spinlock_t cq_lock ____cacheline_aligned_in_smp; volatile struct nvme_completion *cqes; struct blk_mq_tags **tags; dma_addr_t sq_dma_addr; @@ -894,9 +895,9 @@ static blk_status_t nvme_queue_rq(struct blk_mq_hw_ctx *hctx, blk_mq_start_request(req); - spin_lock_irq(&nvmeq->q_lock); + spin_lock_irq(&nvmeq->sq_lock); __nvme_submit_cmd(nvmeq, &cmnd); - spin_unlock_irq(&nvmeq->q_lock); + spin_unlock_irq(&nvmeq->sq_lock); return BLK_STS_OK; out_cleanup_iod: nvme_free_iod(dev, req); @@ -1000,9 +1001,9 @@ static irqreturn_t nvme_irq(int irq, void *data) struct nvme_queue *nvmeq = data; u16 start, end; - spin_lock(&nvmeq->q_lock); + spin_lock(&nvmeq->cq_lock); nvme_process_cq(nvmeq, &start, &end, -1); - spin_unlock(&nvmeq->q_lock); + spin_unlock(&nvmeq->cq_lock); if (start == end) return IRQ_NONE; @@ -1026,9 +1027,9 @@ static int __nvme_poll(struct nvme_queue *nvmeq, unsigned int tag) if (!nvme_cqe_pending(nvmeq)) return 0; - spin_lock_irq(&nvmeq->q_lock); + spin_lock_irq(&nvmeq->cq_lock); found = nvme_process_cq(nvmeq, &start, &end, tag); - spin_unlock_irq(&nvmeq->q_lock); + spin_unlock_irq(&nvmeq->cq_lock); nvme_complete_cqes(nvmeq, start, end); return found; @@ -1051,9 +1052,9 @@ static void nvme_pci_submit_async_event(struct nvme_ctrl *ctrl) c.common.opcode = nvme_admin_async_event; c.common.command_id = NVME_AQ_BLK_MQ_DEPTH; - spin_lock_irq(&nvmeq->q_lock); + spin_lock_irq(&nvmeq->sq_lock); __nvme_submit_cmd(nvmeq, &c); - spin_unlock_irq(&nvmeq->q_lock); + spin_unlock_irq(&nvmeq->sq_lock); } static int adapter_delete_queue(struct nvme_dev *dev, u8 opcode, u16 id) @@ -1310,15 +1311,15 @@ static int nvme_suspend_queue(struct nvme_queue *nvmeq) { int vector; - spin_lock_irq(&nvmeq->q_lock); + spin_lock_irq(&nvmeq->cq_lock); if (nvmeq->cq_vector == -1) { - spin_unlock_irq(&nvmeq->q_lock); + spin_unlock_irq(&nvmeq->cq_lock); return 1; } vector = nvmeq->cq_vector; nvmeq->dev->online_queues--; nvmeq->cq_vector = -1; - spin_unlock_irq(&nvmeq->q_lock); + spin_unlock_irq(&nvmeq->cq_lock); /* * Ensure that nvme_queue_rq() sees it ->cq_vector == -1 without @@ -1344,9 +1345,9 @@ static void nvme_disable_admin_queue(struct nvme_dev *dev, bool shutdown) else nvme_disable_ctrl(&dev->ctrl, dev->ctrl.cap); - spin_lock_irq(&nvmeq->q_lock); + spin_lock_irq(&nvmeq->cq_lock); nvme_process_cq(nvmeq, &start, &end, -1); - spin_unlock_irq(&nvmeq->q_lock); + spin_unlock_irq(&nvmeq->cq_lock); nvme_complete_cqes(nvmeq, start, end); } @@ -1406,7 +1407,8 @@ static int nvme_alloc_queue(struct nvme_dev *dev, int qid, int depth) nvmeq->q_dmadev = dev->dev; nvmeq->dev = dev; - spin_lock_init(&nvmeq->q_lock); + spin_lock_init(&nvmeq->sq_lock); + spin_lock_init(&nvmeq->cq_lock); nvmeq->cq_head = 0; nvmeq->cq_phase = 1; nvmeq->q_db = &dev->dbs[qid * 2 * dev->db_stride]; @@ -1442,7 +1444,7 @@ static void nvme_init_queue(struct nvme_queue *nvmeq, u16 qid) { struct nvme_dev *dev = nvmeq->dev; - spin_lock_irq(&nvmeq->q_lock); + spin_lock_irq(&nvmeq->cq_lock); nvmeq->sq_tail = 0; nvmeq->cq_head = 0; nvmeq->cq_phase = 1; @@ -1450,7 +1452,7 @@ static void nvme_init_queue(struct nvme_queue *nvmeq, u16 qid) memset((void *)nvmeq->cqes, 0, CQ_SIZE(nvmeq->q_depth)); nvme_dbbuf_init(dev, nvmeq, qid); dev->online_queues++; - spin_unlock_irq(&nvmeq->q_lock); + spin_unlock_irq(&nvmeq->cq_lock); } static int nvme_create_queue(struct nvme_queue *nvmeq, int qid) @@ -2001,14 +2003,14 @@ static void nvme_del_cq_end(struct request *req, blk_status_t error) unsigned long flags; /* - * We might be called with the AQ q_lock held - * and the I/O queue q_lock should always + * We might be called with the AQ cq_lock held + * and the I/O queue cq_lock should always * nest inside the AQ one. */ - spin_lock_irqsave_nested(&nvmeq->q_lock, flags, + spin_lock_irqsave_nested(&nvmeq->cq_lock, flags, SINGLE_DEPTH_NESTING); nvme_process_cq(nvmeq, &start, &end, -1); - spin_unlock_irqrestore(&nvmeq->q_lock, flags); + spin_unlock_irqrestore(&nvmeq->cq_lock, flags); nvme_complete_cqes(nvmeq, start, end); } |