From 23f88e0a7e9f084e91a40cc90a15968fb1e5d506 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 20 May 2019 09:29:28 +0200 Subject: iommu/dma: Use for_each_sg in iommu_dma_alloc arch_dma_prep_coherent can handle physically contiguous ranges larger than PAGE_SIZE just fine, which means we don't need a page-based iterator. Signed-off-by: Christoph Hellwig Reviewed-by: Robin Murphy Signed-off-by: Joerg Roedel --- drivers/iommu/dma-iommu.c | 14 +++++--------- 1 file changed, 5 insertions(+), 9 deletions(-) (limited to 'drivers') diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c index aac12433ffef..9b7f120d7381 100644 --- a/drivers/iommu/dma-iommu.c +++ b/drivers/iommu/dma-iommu.c @@ -606,15 +606,11 @@ struct page **iommu_dma_alloc(struct device *dev, size_t size, gfp_t gfp, goto out_free_iova; if (!(prot & IOMMU_CACHE)) { - struct sg_mapping_iter miter; - /* - * The CPU-centric flushing implied by SG_MITER_TO_SG isn't - * sufficient here, so skip it by using the "wrong" direction. - */ - sg_miter_start(&miter, sgt.sgl, sgt.orig_nents, SG_MITER_FROM_SG); - while (sg_miter_next(&miter)) - arch_dma_prep_coherent(miter.page, PAGE_SIZE); - sg_miter_stop(&miter); + struct scatterlist *sg; + int i; + + for_each_sg(sgt.sgl, sg, sgt.orig_nents, i) + arch_dma_prep_coherent(sg_page(sg), sg->length); } if (iommu_map_sg(domain, iova, sgt.sgl, sgt.orig_nents, prot) -- cgit v1.2.3