From 41d34865b24c6a0b594b0a69bfe9ea56dff5abcd Mon Sep 17 00:00:00 2001 From: Shiraz Saleem Date: Thu, 28 Mar 2019 11:49:45 -0500 Subject: RDMA/mthca: Use correct sizing on buffers holding page DMA addresses The buffer that holds the page DMA addresses is sized off umem->nmap. This can potentially cause out of bound accesses on the PBL array when iterating the umem DMA-mapped SGL. This is because if umem pages are combined, umem->nmap can be much lower than the number of system pages in umem. Use ib_umem_num_pages() to size this buffer. Signed-off-by: Shiraz Saleem Signed-off-by: Jason Gunthorpe --- drivers/infiniband/hw/mthca/mthca_provider.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/infiniband/hw/mthca') diff --git a/drivers/infiniband/hw/mthca/mthca_provider.c b/drivers/infiniband/hw/mthca/mthca_provider.c index d063d7a37762..35c3119726bb 100644 --- a/drivers/infiniband/hw/mthca/mthca_provider.c +++ b/drivers/infiniband/hw/mthca/mthca_provider.c @@ -914,7 +914,7 @@ static struct ib_mr *mthca_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, goto err; } - n = mr->umem->nmap; + n = ib_umem_num_pages(mr->umem); mr->mtt = mthca_alloc_mtt(dev, n); if (IS_ERR(mr->mtt)) { -- cgit v1.2.3