summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/arm64/mm/dma-mapping.c4
-rw-r--r--drivers/iommu/dma-iommu.c60
-rw-r--r--include/linux/dma-iommu.h4
3 files changed, 43 insertions, 25 deletions
diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c
index 5d36907f9b12..41d19a0fc9c0 100644
--- a/arch/arm64/mm/dma-mapping.c
+++ b/arch/arm64/mm/dma-mapping.c
@@ -562,8 +562,8 @@ static void *__iommu_alloc_attrs(struct device *dev, size_t size,
struct page **pages;
pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL, coherent);
- pages = iommu_dma_alloc(dev, iosize, gfp, ioprot, handle,
- flush_page);
+ pages = iommu_dma_alloc(dev, iosize, gfp, attrs, ioprot,
+ handle, flush_page);
if (!pages)
return NULL;
diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
index 99432999b52f..ea5a9ebf0f78 100644
--- a/drivers/iommu/dma-iommu.c
+++ b/drivers/iommu/dma-iommu.c
@@ -190,11 +190,15 @@ static void __iommu_dma_free_pages(struct page **pages, int count)
kvfree(pages);
}
-static struct page **__iommu_dma_alloc_pages(unsigned int count, gfp_t gfp)
+static struct page **__iommu_dma_alloc_pages(unsigned int count,
+ unsigned long order_mask, gfp_t gfp)
{
struct page **pages;
unsigned int i = 0, array_size = count * sizeof(*pages);
- unsigned int order = MAX_ORDER;
+
+ order_mask &= (2U << MAX_ORDER) - 1;
+ if (!order_mask)
+ return NULL;
if (array_size <= PAGE_SIZE)
pages = kzalloc(array_size, GFP_KERNEL);
@@ -208,36 +212,38 @@ static struct page **__iommu_dma_alloc_pages(unsigned int count, gfp_t gfp)
while (count) {
struct page *page = NULL;
- int j;
+ unsigned int order_size;
/*
* Higher-order allocations are a convenience rather
* than a necessity, hence using __GFP_NORETRY until
- * falling back to single-page allocations.
+ * falling back to minimum-order allocations.
*/
- for (order = min_t(unsigned int, order, __fls(count));
- order > 0; order--) {
- page = alloc_pages(gfp | __GFP_NORETRY, order);
+ for (order_mask &= (2U << __fls(count)) - 1;
+ order_mask; order_mask &= ~order_size) {
+ unsigned int order = __fls(order_mask);
+
+ order_size = 1U << order;
+ page = alloc_pages((order_mask - order_size) ?
+ gfp | __GFP_NORETRY : gfp, order);
if (!page)
continue;
- if (PageCompound(page)) {
- if (!split_huge_page(page))
- break;
- __free_pages(page, order);
- } else {
+ if (!order)
+ break;
+ if (!PageCompound(page)) {
split_page(page, order);
break;
+ } else if (!split_huge_page(page)) {
+ break;
}
+ __free_pages(page, order);
}
- if (!page)
- page = alloc_page(gfp);
if (!page) {
__iommu_dma_free_pages(pages, i);
return NULL;
}
- j = 1 << order;
- count -= j;
- while (j--)
+ count -= order_size;
+ while (order_size--)
pages[i++] = page++;
}
return pages;
@@ -267,6 +273,7 @@ void iommu_dma_free(struct device *dev, struct page **pages, size_t size,
* attached to an iommu_dma_domain
* @size: Size of buffer in bytes
* @gfp: Allocation flags
+ * @attrs: DMA attributes for this allocation
* @prot: IOMMU mapping flags
* @handle: Out argument for allocated DMA handle
* @flush_page: Arch callback which must ensure PAGE_SIZE bytes from the
@@ -278,8 +285,8 @@ void iommu_dma_free(struct device *dev, struct page **pages, size_t size,
* Return: Array of struct page pointers describing the buffer,
* or NULL on failure.
*/
-struct page **iommu_dma_alloc(struct device *dev, size_t size,
- gfp_t gfp, int prot, dma_addr_t *handle,
+struct page **iommu_dma_alloc(struct device *dev, size_t size, gfp_t gfp,
+ struct dma_attrs *attrs, int prot, dma_addr_t *handle,
void (*flush_page)(struct device *, const void *, phys_addr_t))
{
struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
@@ -288,11 +295,22 @@ struct page **iommu_dma_alloc(struct device *dev, size_t size,
struct page **pages;
struct sg_table sgt;
dma_addr_t dma_addr;
- unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
+ unsigned int count, min_size, alloc_sizes = domain->pgsize_bitmap;
*handle = DMA_ERROR_CODE;
- pages = __iommu_dma_alloc_pages(count, gfp);
+ min_size = alloc_sizes & -alloc_sizes;
+ if (min_size < PAGE_SIZE) {
+ min_size = PAGE_SIZE;
+ alloc_sizes |= PAGE_SIZE;
+ } else {
+ size = ALIGN(size, min_size);
+ }
+ if (dma_get_attr(DMA_ATTR_ALLOC_SINGLE_PAGES, attrs))
+ alloc_sizes = min_size;
+
+ count = PAGE_ALIGN(size) >> PAGE_SHIFT;
+ pages = __iommu_dma_alloc_pages(count, alloc_sizes >> PAGE_SHIFT, gfp);
if (!pages)
return NULL;
diff --git a/include/linux/dma-iommu.h b/include/linux/dma-iommu.h
index fc481037478a..8443bbb5c071 100644
--- a/include/linux/dma-iommu.h
+++ b/include/linux/dma-iommu.h
@@ -38,8 +38,8 @@ int dma_direction_to_prot(enum dma_data_direction dir, bool coherent);
* These implement the bulk of the relevant DMA mapping callbacks, but require
* the arch code to take care of attributes and cache maintenance
*/
-struct page **iommu_dma_alloc(struct device *dev, size_t size,
- gfp_t gfp, int prot, dma_addr_t *handle,
+struct page **iommu_dma_alloc(struct device *dev, size_t size, gfp_t gfp,
+ struct dma_attrs *attrs, int prot, dma_addr_t *handle,
void (*flush_page)(struct device *, const void *, phys_addr_t));
void iommu_dma_free(struct device *dev, struct page **pages, size_t size,
dma_addr_t *handle);