diff options
author | Robin Murphy <robin.murphy@arm.com> | 2019-05-20 09:29:37 +0200 |
---|---|---|
committer | Joerg Roedel <jroedel@suse.de> | 2019-05-27 17:31:11 +0200 |
commit | 072bebc0691737a6b44c648b4c59501a0cd25357 (patch) | |
tree | 6b7c9c89cf1985a850fbb9d15c7eb37e19e7354c | |
parent | bcf4b9c4c2ee0f00d9e273b19419416a20cce9a4 (diff) | |
download | linux-072bebc0691737a6b44c648b4c59501a0cd25357.tar.bz2 |
iommu/dma: Refactor iommu_dma_alloc
Shuffle around the self-contained atomic and non-contiguous cases to
return early and get out of the way of the CMA case that we're about to
work on next.
Signed-off-by: Robin Murphy <robin.murphy@arm.com>
[hch: slight changes to the code flow]
Signed-off-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Joerg Roedel <jroedel@suse.de>
-rw-r--r-- | drivers/iommu/dma-iommu.c | 60 |
1 files changed, 30 insertions, 30 deletions
diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c index a288b3d366ae..4134f13b5529 100644 --- a/drivers/iommu/dma-iommu.c +++ b/drivers/iommu/dma-iommu.c @@ -973,14 +973,19 @@ static void *iommu_dma_alloc(struct device *dev, size_t size, { bool coherent = dev_is_dma_coherent(dev); int ioprot = dma_info_to_prot(DMA_BIDIRECTIONAL, coherent, attrs); + pgprot_t prot = arch_dma_mmap_pgprot(dev, PAGE_KERNEL, attrs); size_t iosize = size; + struct page *page; void *addr; size = PAGE_ALIGN(size); gfp |= __GFP_ZERO; + if (gfpflags_allow_blocking(gfp) && + !(attrs & DMA_ATTR_FORCE_CONTIGUOUS)) + return iommu_dma_alloc_remap(dev, iosize, handle, gfp, attrs); + if (!gfpflags_allow_blocking(gfp)) { - struct page *page; /* * In atomic context we can't remap anything, so we'll only * get the virtually contiguous buffer we need by way of a @@ -1002,39 +1007,34 @@ static void *iommu_dma_alloc(struct device *dev, size_t size, __free_pages(page, get_order(size)); else dma_free_from_pool(addr, size); - addr = NULL; - } - } else if (attrs & DMA_ATTR_FORCE_CONTIGUOUS) { - pgprot_t prot = arch_dma_mmap_pgprot(dev, PAGE_KERNEL, attrs); - struct page *page; - - page = dma_alloc_from_contiguous(dev, size >> PAGE_SHIFT, - get_order(size), gfp & __GFP_NOWARN); - if (!page) return NULL; - - *handle = __iommu_dma_map(dev, page_to_phys(page), iosize, ioprot); - if (*handle == DMA_MAPPING_ERROR) { - dma_release_from_contiguous(dev, page, - size >> PAGE_SHIFT); - return NULL; - } - addr = dma_common_contiguous_remap(page, size, VM_USERMAP, - prot, - __builtin_return_address(0)); - if (addr) { - if (!coherent) - arch_dma_prep_coherent(page, iosize); - memset(addr, 0, size); - } else { - __iommu_dma_unmap(dev, *handle, iosize); - dma_release_from_contiguous(dev, page, - size >> PAGE_SHIFT); } - } else { - addr = iommu_dma_alloc_remap(dev, iosize, handle, gfp, attrs); + return addr; } + + page = dma_alloc_from_contiguous(dev, size >> PAGE_SHIFT, + get_order(size), gfp & __GFP_NOWARN); + if (!page) + return NULL; + + *handle = __iommu_dma_map(dev, page_to_phys(page), iosize, ioprot); + if (*handle == DMA_MAPPING_ERROR) + goto out_free_pages; + + addr = dma_common_contiguous_remap(page, size, VM_USERMAP, prot, + __builtin_return_address(0)); + if (!addr) + goto out_unmap; + + if (!coherent) + arch_dma_prep_coherent(page, iosize); + memset(addr, 0, size); return addr; +out_unmap: + __iommu_dma_unmap(dev, *handle, iosize); +out_free_pages: + dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT); + return NULL; } static int __iommu_dma_mmap_pfn(struct vm_area_struct *vma, |