summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/iommu/dma-iommu.c1
-rw-r--r--drivers/iommu/iova.c7
-rw-r--r--include/linux/iommu.h8
3 files changed, 15 insertions, 1 deletions
diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
index afaa1f9b5935..1eacbbdf601c 100644
--- a/drivers/iommu/dma-iommu.c
+++ b/drivers/iommu/dma-iommu.c
@@ -481,6 +481,7 @@ static void __iommu_dma_unmap(struct device *dev, dma_addr_t dma_addr,
dma_addr -= iova_off;
size = iova_align(iovad, size + iova_off);
iommu_iotlb_gather_init(&iotlb_gather);
+ iotlb_gather.queued = cookie->fq_domain;
unmapped = iommu_unmap_fast(domain, dma_addr, size, &iotlb_gather);
WARN_ON(unmapped != size);
diff --git a/drivers/iommu/iova.c b/drivers/iommu/iova.c
index b6cf5f16123b..2ad73fb2e94e 100644
--- a/drivers/iommu/iova.c
+++ b/drivers/iommu/iova.c
@@ -637,6 +637,13 @@ void queue_iova(struct iova_domain *iovad,
unsigned long flags;
unsigned idx;
+ /*
+ * Order against the IOMMU driver's pagetable update from unmapping
+ * @pte, to guarantee that iova_domain_flush() observes that if called
+ * from a different CPU before we release the lock below.
+ */
+ smp_wmb();
+
spin_lock_irqsave(&fq->lock, flags);
/*
diff --git a/include/linux/iommu.h b/include/linux/iommu.h
index 141779d76035..f7679f6684b1 100644
--- a/include/linux/iommu.h
+++ b/include/linux/iommu.h
@@ -161,16 +161,22 @@ enum iommu_dev_features {
* @start: IOVA representing the start of the range to be flushed
* @end: IOVA representing the end of the range to be flushed (inclusive)
* @pgsize: The interval at which to perform the flush
+ * @freelist: Removed pages to free after sync
+ * @queued: Indicates that the flush will be queued
*
* This structure is intended to be updated by multiple calls to the
* ->unmap() function in struct iommu_ops before eventually being passed
- * into ->iotlb_sync().
+ * into ->iotlb_sync(). Drivers can add pages to @freelist to be freed after
+ * ->iotlb_sync() or ->iotlb_flush_all() have cleared all cached references to
+ * them. @queued is set to indicate when ->iotlb_flush_all() will be called
+ * later instead of ->iotlb_sync(), so drivers may optimise accordingly.
*/
struct iommu_iotlb_gather {
unsigned long start;
unsigned long end;
size_t pgsize;
struct page *freelist;
+ bool queued;
};
/**