summaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/iommu/dma-iommu.c18
-rw-r--r--drivers/iommu/iommu.c10
2 files changed, 13 insertions, 15 deletions
diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
index c80056f6c9f9..f659395e7959 100644
--- a/drivers/iommu/dma-iommu.c
+++ b/drivers/iommu/dma-iommu.c
@@ -380,18 +380,6 @@ static int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base,
return iova_reserve_iommu_regions(dev, domain);
}
-static int iommu_dma_deferred_attach(struct device *dev,
- struct iommu_domain *domain)
-{
- const struct iommu_ops *ops = domain->ops;
-
- if (unlikely(ops->is_attach_deferred &&
- ops->is_attach_deferred(domain, dev)))
- return iommu_attach_device(domain, dev);
-
- return 0;
-}
-
/**
* dma_info_to_prot - Translate DMA API directions and attributes to IOMMU API
* page flags.
@@ -535,7 +523,7 @@ static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys,
dma_addr_t iova;
if (static_branch_unlikely(&iommu_deferred_attach_enabled) &&
- iommu_dma_deferred_attach(dev, domain))
+ iommu_deferred_attach(dev, domain))
return DMA_MAPPING_ERROR;
size = iova_align(iovad, size + iova_off);
@@ -694,7 +682,7 @@ static void *iommu_dma_alloc_remap(struct device *dev, size_t size,
*dma_handle = DMA_MAPPING_ERROR;
if (static_branch_unlikely(&iommu_deferred_attach_enabled) &&
- iommu_dma_deferred_attach(dev, domain))
+ iommu_deferred_attach(dev, domain))
return NULL;
min_size = alloc_sizes & -alloc_sizes;
@@ -978,7 +966,7 @@ static int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg,
int i;
if (static_branch_unlikely(&iommu_deferred_attach_enabled) &&
- iommu_dma_deferred_attach(dev, domain))
+ iommu_deferred_attach(dev, domain))
return 0;
if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
index 91f7871f5a37..fd76e2f579fe 100644
--- a/drivers/iommu/iommu.c
+++ b/drivers/iommu/iommu.c
@@ -1980,6 +1980,16 @@ out_unlock:
}
EXPORT_SYMBOL_GPL(iommu_attach_device);
+int iommu_deferred_attach(struct device *dev, struct iommu_domain *domain)
+{
+ const struct iommu_ops *ops = domain->ops;
+
+ if (ops->is_attach_deferred && ops->is_attach_deferred(domain, dev))
+ return __iommu_attach_device(domain, dev);
+
+ return 0;
+}
+
/*
* Check flags and other user provided data for valid combinations. We also
* make sure no reserved fields or unused flags are set. This is to ensure