summaryrefslogtreecommitdiffstats
path: root/drivers/block/nvme.c
diff options
context:
space:
mode:
authorNisheeth Bhat <nisheeth.bhat@intel.com>2011-09-29 10:10:10 -0400
committerMatthew Wilcox <matthew.r.wilcox@intel.com>2011-11-04 15:53:04 -0400
commit0d1bc9125890426b52ca2de6abedd32e31722e5c (patch)
treec62cff35187c87c8a2b1f6dafdfc577cba94c137 /drivers/block/nvme.c
parentbc5fc7e4b22ca855902aba02b28c96f09b446407 (diff)
downloadlinux-0d1bc9125890426b52ca2de6abedd32e31722e5c.tar.bz2
Fix calculation of number of pages in a PRP List
The existing calculation underestimated the number of pages required as it did not take into account the pointer at the end of each page. The replacement calculation may overestimate the number of pages required if the last page in the PRP List is entirely full. By using ->npages as a counter as we fill in the pages, we ensure that we don't try to free a page that was never allocated. Signed-off-by: Nisheeth Bhat <nisheeth.bhat@intel.com> Signed-off-by: Matthew Wilcox <matthew.r.wilcox@intel.com>
Diffstat (limited to 'drivers/block/nvme.c')
-rw-r--r--drivers/block/nvme.c14
1 files changed, 7 insertions, 7 deletions
diff --git a/drivers/block/nvme.c b/drivers/block/nvme.c
index b77894a75855..3afdc750aaa8 100644
--- a/drivers/block/nvme.c
+++ b/drivers/block/nvme.c
@@ -265,7 +265,7 @@ static int nvme_submit_cmd(struct nvme_queue *nvmeq, struct nvme_command *cmd)
}
struct nvme_prps {
- int npages;
+ int npages; /* 0 means small pool in use */
dma_addr_t first_dma;
__le64 *list[0];
};
@@ -347,7 +347,7 @@ static struct nvme_prps *nvme_setup_prps(struct nvme_dev *dev,
int offset = offset_in_page(dma_addr);
__le64 *prp_list;
dma_addr_t prp_dma;
- int nprps, npages, i, prp_page;
+ int nprps, npages, i;
struct nvme_prps *prps = NULL;
cmd->prp1 = cpu_to_le64(dma_addr);
@@ -370,20 +370,20 @@ static struct nvme_prps *nvme_setup_prps(struct nvme_dev *dev,
}
nprps = DIV_ROUND_UP(length, PAGE_SIZE);
- npages = DIV_ROUND_UP(8 * nprps, PAGE_SIZE);
+ npages = DIV_ROUND_UP(8 * nprps, PAGE_SIZE - 8);
prps = kmalloc(sizeof(*prps) + sizeof(__le64 *) * npages, gfp);
if (!prps) {
cmd->prp2 = cpu_to_le64(dma_addr);
*len = (*len - length) + PAGE_SIZE;
return prps;
}
- prp_page = 0;
+
if (nprps <= (256 / 8)) {
pool = dev->prp_small_pool;
prps->npages = 0;
} else {
pool = dev->prp_page_pool;
- prps->npages = npages;
+ prps->npages = 1;
}
prp_list = dma_pool_alloc(pool, gfp, &prp_dma);
@@ -393,7 +393,7 @@ static struct nvme_prps *nvme_setup_prps(struct nvme_dev *dev,
kfree(prps);
return NULL;
}
- prps->list[prp_page++] = prp_list;
+ prps->list[0] = prp_list;
prps->first_dma = prp_dma;
cmd->prp2 = cpu_to_le64(prp_dma);
i = 0;
@@ -405,7 +405,7 @@ static struct nvme_prps *nvme_setup_prps(struct nvme_dev *dev,
*len = (*len - length);
return prps;
}
- prps->list[prp_page++] = prp_list;
+ prps->list[prps->npages++] = prp_list;
prp_list[0] = old_prp_list[i - 1];
old_prp_list[i - 1] = cpu_to_le64(prp_dma);
i = 1;