summaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorShiraz Saleem <shiraz.saleem@intel.com>2019-05-06 08:53:33 -0500
committerJason Gunthorpe <jgg@mellanox.com>2019-05-06 13:08:11 -0300
commita808273a495c657e33281b181fd7fcc2bb28f662 (patch)
tree4da75f55f429a761ae4e0bb9bf3126fbe8f996b2 /include
parent4a35339958f16d42a4ca06a8da9d4b5ab39ee8ea (diff)
downloadlinux-a808273a495c657e33281b181fd7fcc2bb28f662.tar.bz2
RDMA/verbs: Add a DMA iterator to return aligned contiguous memory blocks
This helper iterates over a DMA-mapped SGL and returns contiguous memory blocks aligned to a HW supported page size. Suggested-by: Jason Gunthorpe <jgg@ziepe.ca> Signed-off-by: Shiraz Saleem <shiraz.saleem@intel.com> Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
Diffstat (limited to 'include')
-rw-r--r--include/rdma/ib_verbs.h47
1 files changed, 47 insertions, 0 deletions
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
index 03b07ec6a34b..deb67b21ccb9 100644
--- a/include/rdma/ib_verbs.h
+++ b/include/rdma/ib_verbs.h
@@ -2726,6 +2726,21 @@ struct ib_client {
u8 no_kverbs_req:1;
};
+/*
+ * IB block DMA iterator
+ *
+ * Iterates the DMA-mapped SGL in contiguous memory blocks aligned
+ * to a HW supported page size.
+ */
+struct ib_block_iter {
+ /* internal states */
+ struct scatterlist *__sg; /* sg holding the current aligned block */
+ dma_addr_t __dma_addr; /* unaligned DMA address of this block */
+ unsigned int __sg_nents; /* number of SG entries */
+ unsigned int __sg_advance; /* number of bytes to advance in sg in next step */
+ unsigned int __pg_bit; /* alignment of current block */
+};
+
struct ib_device *_ib_alloc_device(size_t size);
#define ib_alloc_device(drv_struct, member) \
container_of(_ib_alloc_device(sizeof(struct drv_struct) + \
@@ -2746,6 +2761,38 @@ void ib_unregister_device_queued(struct ib_device *ib_dev);
int ib_register_client (struct ib_client *client);
void ib_unregister_client(struct ib_client *client);
+void __rdma_block_iter_start(struct ib_block_iter *biter,
+ struct scatterlist *sglist,
+ unsigned int nents,
+ unsigned long pgsz);
+bool __rdma_block_iter_next(struct ib_block_iter *biter);
+
+/**
+ * rdma_block_iter_dma_address - get the aligned dma address of the current
+ * block held by the block iterator.
+ * @biter: block iterator holding the memory block
+ */
+static inline dma_addr_t
+rdma_block_iter_dma_address(struct ib_block_iter *biter)
+{
+ return biter->__dma_addr & ~(BIT_ULL(biter->__pg_bit) - 1);
+}
+
+/**
+ * rdma_for_each_block - iterate over contiguous memory blocks of the sg list
+ * @sglist: sglist to iterate over
+ * @biter: block iterator holding the memory block
+ * @nents: maximum number of sg entries to iterate over
+ * @pgsz: best HW supported page size to use
+ *
+ * Callers may use rdma_block_iter_dma_address() to get each
+ * blocks aligned DMA address.
+ */
+#define rdma_for_each_block(sglist, biter, nents, pgsz) \
+ for (__rdma_block_iter_start(biter, sglist, nents, \
+ pgsz); \
+ __rdma_block_iter_next(biter);)
+
/**
* ib_get_client_data - Get IB client context
* @device:Device to get context for