summaryrefslogtreecommitdiffstats
path: root/drivers/nvme
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2015-10-16 07:58:37 +0200
committerJens Axboe <axboe@fb.com>2015-12-01 10:59:39 -0700
commit69d2b571746d1c3fa10b7a0aa00859b296a98d12 (patch)
treee971387fe713c6e54689d93e686903b45b92e29d /drivers/nvme
parent1c63dc66580d4bbb6d2b75bf184b5aa105ba5bdb (diff)
downloadlinux-69d2b571746d1c3fa10b7a0aa00859b296a98d12.tar.bz2
nvme: simplify nvme_setup_prps calling convention
Pass back a true/false value instead of the length which needs a compare with the bytes in the request and drop the pointless gfp_t argument. Signed-off-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Keith Busch <keith.busch@intel.com> Signed-off-by: Jens Axboe <axboe@fb.com>
Diffstat (limited to 'drivers/nvme')
-rw-r--r--drivers/nvme/host/pci.c22
1 files changed, 10 insertions, 12 deletions
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index 8a564f4ecf99..75970fdeeecb 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -709,9 +709,8 @@ release_iod:
blk_mq_complete_request(req, error);
}
-/* length is in bytes. gfp flags indicates whether we may sleep. */
-static int nvme_setup_prps(struct nvme_dev *dev, struct nvme_iod *iod,
- int total_len, gfp_t gfp)
+static bool nvme_setup_prps(struct nvme_dev *dev, struct nvme_iod *iod,
+ int total_len)
{
struct dma_pool *pool;
int length = total_len;
@@ -727,7 +726,7 @@ static int nvme_setup_prps(struct nvme_dev *dev, struct nvme_iod *iod,
length -= (page_size - offset);
if (length <= 0)
- return total_len;
+ return true;
dma_len -= (page_size - offset);
if (dma_len) {
@@ -740,7 +739,7 @@ static int nvme_setup_prps(struct nvme_dev *dev, struct nvme_iod *iod,
if (length <= page_size) {
iod->first_dma = dma_addr;
- return total_len;
+ return true;
}
nprps = DIV_ROUND_UP(length, page_size);
@@ -752,11 +751,11 @@ static int nvme_setup_prps(struct nvme_dev *dev, struct nvme_iod *iod,
iod->npages = 1;
}
- prp_list = dma_pool_alloc(pool, gfp, &prp_dma);
+ prp_list = dma_pool_alloc(pool, GFP_ATOMIC, &prp_dma);
if (!prp_list) {
iod->first_dma = dma_addr;
iod->npages = -1;
- return (total_len - length) + page_size;
+ return false;
}
list[0] = prp_list;
iod->first_dma = prp_dma;
@@ -764,9 +763,9 @@ static int nvme_setup_prps(struct nvme_dev *dev, struct nvme_iod *iod,
for (;;) {
if (i == page_size >> 3) {
__le64 *old_prp_list = prp_list;
- prp_list = dma_pool_alloc(pool, gfp, &prp_dma);
+ prp_list = dma_pool_alloc(pool, GFP_ATOMIC, &prp_dma);
if (!prp_list)
- return total_len - length;
+ return false;
list[iod->npages++] = prp_list;
prp_list[0] = old_prp_list[i - 1];
old_prp_list[i - 1] = cpu_to_le64(prp_dma);
@@ -786,7 +785,7 @@ static int nvme_setup_prps(struct nvme_dev *dev, struct nvme_iod *iod,
dma_len = sg_dma_len(sg);
}
- return total_len;
+ return true;
}
static void nvme_submit_priv(struct nvme_queue *nvmeq, struct request *req,
@@ -952,8 +951,7 @@ static int nvme_queue_rq(struct blk_mq_hw_ctx *hctx,
if (!dma_map_sg(nvmeq->q_dmadev, iod->sg, iod->nents, dma_dir))
goto retry_cmd;
- if (blk_rq_bytes(req) !=
- nvme_setup_prps(dev, iod, blk_rq_bytes(req), GFP_ATOMIC)) {
+ if (!nvme_setup_prps(dev, iod, blk_rq_bytes(req))) {
dma_unmap_sg(dev->dev, iod->sg, iod->nents, dma_dir);
goto retry_cmd;
}