summaryrefslogtreecommitdiffstats
path: root/drivers/nvme
diff options
context:
space:
mode:
authorSagi Grimberg <sagi@grimberg.me>2017-06-18 17:28:10 +0300
committerJens Axboe <axboe@kernel.dk>2017-06-28 08:14:13 -0600
commit442e19b7ccb25337be7bfff96df94c38c037ee9f (patch)
tree592702bd98fadae1685eaf5c23f70aac73c04e32 /drivers/nvme
parent920d13a884c0595451658a7b48af8ac16918628f (diff)
downloadlinux-442e19b7ccb25337be7bfff96df94c38c037ee9f.tar.bz2
nvme-pci: open-code polling logic in nvme_poll
Given that the code is simple enough it seems better then passing a tag by reference for each call site, also we can now get rid of __nvme_process_cq. Signed-off-by: Sagi Grimberg <sagi@grimberg.me> Reviewed-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Keith Busch <keith.busch@intel.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'drivers/nvme')
-rw-r--r--drivers/nvme/host/pci.c40
1 files changed, 21 insertions, 19 deletions
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index d309b6c90511..2a9ee769ce9e 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -785,7 +785,7 @@ static inline bool nvme_read_cqe(struct nvme_queue *nvmeq,
return false;
}
-static void __nvme_process_cq(struct nvme_queue *nvmeq, int *tag)
+static void nvme_process_cq(struct nvme_queue *nvmeq)
{
struct nvme_completion cqe;
int consumed = 0;
@@ -793,11 +793,6 @@ static void __nvme_process_cq(struct nvme_queue *nvmeq, int *tag)
while (nvme_read_cqe(nvmeq, &cqe)) {
nvme_handle_cqe(nvmeq, &cqe);
consumed++;
-
- if (tag && *tag == cqe.command_id) {
- *tag = -1;
- break;
- }
}
if (consumed) {
@@ -806,11 +801,6 @@ static void __nvme_process_cq(struct nvme_queue *nvmeq, int *tag)
}
}
-static void nvme_process_cq(struct nvme_queue *nvmeq)
-{
- __nvme_process_cq(nvmeq, NULL);
-}
-
static irqreturn_t nvme_irq(int irq, void *data)
{
irqreturn_t result;
@@ -833,16 +823,28 @@ static irqreturn_t nvme_irq_check(int irq, void *data)
static int __nvme_poll(struct nvme_queue *nvmeq, unsigned int tag)
{
- if (nvme_cqe_valid(nvmeq, nvmeq->cq_head, nvmeq->cq_phase)) {
- spin_lock_irq(&nvmeq->q_lock);
- __nvme_process_cq(nvmeq, &tag);
- spin_unlock_irq(&nvmeq->q_lock);
+ struct nvme_completion cqe;
+ int found = 0, consumed = 0;
- if (tag == -1)
- return 1;
- }
+ if (!nvme_cqe_valid(nvmeq, nvmeq->cq_head, nvmeq->cq_phase))
+ return 0;
- return 0;
+ spin_lock_irq(&nvmeq->q_lock);
+ while (nvme_read_cqe(nvmeq, &cqe)) {
+ nvme_handle_cqe(nvmeq, &cqe);
+ consumed++;
+
+ if (tag == cqe.command_id) {
+ found = 1;
+ break;
+ }
+ }
+
+ if (consumed)
+ nvme_ring_cq_doorbell(nvmeq);
+ spin_unlock_irq(&nvmeq->q_lock);
+
+ return found;
}
static int nvme_poll(struct blk_mq_hw_ctx *hctx, unsigned int tag)