summaryrefslogtreecommitdiffstats
path: root/arch/arm64/mm/dma-mapping.c
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2018-09-30 16:13:33 -0700
committerChristoph Hellwig <hch@lst.de>2018-10-19 08:48:28 +0200
commitfafadcd16595c1df82df399f62421718ec9bf70a (patch)
treec13da3c6e387183a5edd19349ed195c0699ac116 /arch/arm64/mm/dma-mapping.c
parentc4dae366925f929749b2a26efa53b561904a9a4f (diff)
downloadlinux-fafadcd16595c1df82df399f62421718ec9bf70a.tar.bz2
swiotlb: don't dip into swiotlb pool for coherent allocations
All architectures that support swiotlb also have a zone that backs up these less than full addressing allocations (usually ZONE_DMA32). Because of that it is rather pointless to fall back to the global swiotlb buffer if the normal dma direct allocation failed - the only thing this will do is to eat up bounce buffers that would be more useful to serve streaming mappings. Signed-off-by: Christoph Hellwig <hch@lst.de> Acked-by: Catalin Marinas <catalin.marinas@arm.com> Acked-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Diffstat (limited to 'arch/arm64/mm/dma-mapping.c')
-rw-r--r--arch/arm64/mm/dma-mapping.c6
1 files changed, 3 insertions, 3 deletions
diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c
index 8d91b927e09e..eee6cfcfde9e 100644
--- a/arch/arm64/mm/dma-mapping.c
+++ b/arch/arm64/mm/dma-mapping.c
@@ -112,7 +112,7 @@ static void *__dma_alloc(struct device *dev, size_t size,
return addr;
}
- ptr = swiotlb_alloc(dev, size, dma_handle, flags, attrs);
+ ptr = dma_direct_alloc_pages(dev, size, dma_handle, flags, attrs);
if (!ptr)
goto no_mem;
@@ -133,7 +133,7 @@ static void *__dma_alloc(struct device *dev, size_t size,
return coherent_ptr;
no_map:
- swiotlb_free(dev, size, ptr, *dma_handle, attrs);
+ dma_direct_free_pages(dev, size, ptr, *dma_handle, attrs);
no_mem:
return NULL;
}
@@ -151,7 +151,7 @@ static void __dma_free(struct device *dev, size_t size,
return;
vunmap(vaddr);
}
- swiotlb_free(dev, size, swiotlb_addr, dma_handle, attrs);
+ dma_direct_free_pages(dev, size, swiotlb_addr, dma_handle, attrs);
}
static dma_addr_t __swiotlb_map_page(struct device *dev, struct page *page,