summaryrefslogtreecommitdiffstats
path: root/drivers/infiniband
diff options
context:
space:
mode:
authorGuy Shapiro <guysh@mellanox.com>2015-04-15 18:17:56 +0300
committerDoug Ledford <dledford@redhat.com>2015-05-05 09:18:02 -0400
commitc1d383b5785b1e0fb5fb862864712a7208219e6a (patch)
treeac3eb2c7467890e8665508c28b5b5c517b5a0e83 /drivers/infiniband
parent5b6b8fe64053b2649660ded2f3c5be25ebddbfdb (diff)
downloadlinux-c1d383b5785b1e0fb5fb862864712a7208219e6a.tar.bz2
IB/core: dma map/unmap locking optimizations
Currently, while mapping or unmapping pages for ODP, the umem mutex is locked and unlocked once for each page. Such lock/unlock operation take few tens to hundreds of nsecs. This makes a significant impact when mapping or unmapping few MBs of memory. To avoid this, the mutex should be locked only once per operation, and not per page. Signed-off-by: Guy Shapiro <guysh@mellanox.com> Acked-by: Shachar Raindel <raindel@mellanox.com> Reviewed-by: Sagi Grimberg <sagig@mellanox.com> Signed-off-by: Doug Ledford <dledford@redhat.com>
Diffstat (limited to 'drivers/infiniband')
-rw-r--r--drivers/infiniband/core/umem_odp.c9
1 files changed, 4 insertions, 5 deletions
diff --git a/drivers/infiniband/core/umem_odp.c b/drivers/infiniband/core/umem_odp.c
index 8b8cc6fa0ab0..aba47398880d 100644
--- a/drivers/infiniband/core/umem_odp.c
+++ b/drivers/infiniband/core/umem_odp.c
@@ -446,7 +446,6 @@ static int ib_umem_odp_map_dma_single_page(
int remove_existing_mapping = 0;
int ret = 0;
- mutex_lock(&umem->odp_data->umem_mutex);
/*
* Note: we avoid writing if seq is different from the initial seq, to
* handle case of a racing notifier. This check also allows us to bail
@@ -479,8 +478,6 @@ static int ib_umem_odp_map_dma_single_page(
}
out:
- mutex_unlock(&umem->odp_data->umem_mutex);
-
/* On Demand Paging - avoid pinning the page */
if (umem->context->invalidate_range || !stored_page)
put_page(page);
@@ -586,6 +583,7 @@ int ib_umem_odp_map_dma_pages(struct ib_umem *umem, u64 user_virt, u64 bcnt,
bcnt -= min_t(size_t, npages << PAGE_SHIFT, bcnt);
user_virt += npages << PAGE_SHIFT;
+ mutex_lock(&umem->odp_data->umem_mutex);
for (j = 0; j < npages; ++j) {
ret = ib_umem_odp_map_dma_single_page(
umem, k, base_virt_addr, local_page_list[j],
@@ -594,6 +592,7 @@ int ib_umem_odp_map_dma_pages(struct ib_umem *umem, u64 user_virt, u64 bcnt,
break;
k++;
}
+ mutex_unlock(&umem->odp_data->umem_mutex);
if (ret < 0) {
/* Release left over pages when handling errors. */
@@ -633,9 +632,9 @@ void ib_umem_odp_unmap_dma_pages(struct ib_umem *umem, u64 virt,
* faults from completion. We might be racing with other
* invalidations, so we must make sure we free each page only
* once. */
+ mutex_lock(&umem->odp_data->umem_mutex);
for (addr = virt; addr < bound; addr += (u64)umem->page_size) {
idx = (addr - ib_umem_start(umem)) / PAGE_SIZE;
- mutex_lock(&umem->odp_data->umem_mutex);
if (umem->odp_data->page_list[idx]) {
struct page *page = umem->odp_data->page_list[idx];
struct page *head_page = compound_head(page);
@@ -663,7 +662,7 @@ void ib_umem_odp_unmap_dma_pages(struct ib_umem *umem, u64 virt,
umem->odp_data->page_list[idx] = NULL;
umem->odp_data->dma_list[idx] = 0;
}
- mutex_unlock(&umem->odp_data->umem_mutex);
}
+ mutex_unlock(&umem->odp_data->umem_mutex);
}
EXPORT_SYMBOL(ib_umem_odp_unmap_dma_pages);