summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJianxin Xiong <jianxin.xiong@intel.com>2020-10-14 09:16:01 -0700
committerDaniel Vetter <daniel.vetter@ffwll.ch>2020-10-15 23:15:43 +0200
commitac80cd17a615e472e3dcff0a5446a58540e64e6d (patch)
tree44d8ef5a2d724ef5e835c3344717d21a36265a49
parent73b62cdb93b68d7e2c1d373c6a411bc00c53e702 (diff)
downloadlinux-ac80cd17a615e472e3dcff0a5446a58540e64e6d.tar.bz2
dma-buf: Clarify that dma-buf sg lists are page aligned
The dma-buf API have been used under the assumption that the sg lists returned from dma_buf_map_attachment() are fully page aligned. Lots of stuff can break otherwise all over the place. Clarify this in the documentation and add a check when DMA API debug is enabled. Signed-off-by: Jianxin Xiong <jianxin.xiong@intel.com> Reviewed-by: Christian König <christian.koenig@amd.com> Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch> Link: https://patchwork.freedesktop.org/patch/msgid/1602692161-107096-1-git-send-email-jianxin.xiong@intel.com
-rw-r--r--drivers/dma-buf/dma-buf.c21
-rw-r--r--include/linux/dma-buf.h3
2 files changed, 23 insertions, 1 deletions
diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c
index a6ba4d598f0e..9db211a2b6cb 100644
--- a/drivers/dma-buf/dma-buf.c
+++ b/drivers/dma-buf/dma-buf.c
@@ -849,6 +849,9 @@ EXPORT_SYMBOL_GPL(dma_buf_unpin);
* Returns sg_table containing the scatterlist to be returned; returns ERR_PTR
* on error. May return -EINTR if it is interrupted by a signal.
*
+ * On success, the DMA addresses and lengths in the returned scatterlist are
+ * PAGE_SIZE aligned.
+ *
* A mapping must be unmapped by using dma_buf_unmap_attachment(). Note that
* the underlying backing storage is pinned for as long as a mapping exists,
* therefore users/importers should not hold onto a mapping for undue amounts of
@@ -902,6 +905,24 @@ struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach,
attach->dir = direction;
}
+#ifdef CONFIG_DMA_API_DEBUG
+ {
+ struct scatterlist *sg;
+ u64 addr;
+ int len;
+ int i;
+
+ for_each_sgtable_dma_sg(sg_table, sg, i) {
+ addr = sg_dma_address(sg);
+ len = sg_dma_len(sg);
+ if (!PAGE_ALIGNED(addr) || !PAGE_ALIGNED(len)) {
+ pr_debug("%s: addr %llx or len %x is not page aligned!\n",
+ __func__, addr, len);
+ }
+ }
+ }
+#endif /* CONFIG_DMA_API_DEBUG */
+
return sg_table;
}
EXPORT_SYMBOL_GPL(dma_buf_map_attachment);
diff --git a/include/linux/dma-buf.h b/include/linux/dma-buf.h
index cf77cc15f4ba..03875eaed51a 100644
--- a/include/linux/dma-buf.h
+++ b/include/linux/dma-buf.h
@@ -146,7 +146,8 @@ struct dma_buf_ops {
*
* A &sg_table scatter list of or the backing storage of the DMA buffer,
* already mapped into the device address space of the &device attached
- * with the provided &dma_buf_attachment.
+ * with the provided &dma_buf_attachment. The addresses and lengths in
+ * the scatter list are PAGE_SIZE aligned.
*
* On failure, returns a negative error value wrapped into a pointer.
* May also return -EINTR when a signal was received while being