diff options
author | Joerg Roedel <jroedel@suse.de> | 2022-03-04 16:46:19 +0100 |
---|---|---|
committer | Joerg Roedel <jroedel@suse.de> | 2022-03-04 16:46:19 +0100 |
commit | 067e95fc34f017c66e1711f3a08098d59ad4da1a (patch) | |
tree | 36109969308a599ea75bd9da1de739464b524167 /drivers/iommu | |
parent | f266c11bce79ea793a741e3da6dfe4d6007df8df (diff) | |
parent | 5b61343b50590fb04a3f6be2cdc4868091757262 (diff) | |
download | linux-067e95fc34f017c66e1711f3a08098d59ad4da1a.tar.bz2 |
Merge branch 'core' into x86/vt-d
Diffstat (limited to 'drivers/iommu')
29 files changed, 305 insertions, 1385 deletions
diff --git a/drivers/iommu/amd/amd_iommu.h b/drivers/iommu/amd/amd_iommu.h index 416815a525d6..3b2f06b7aca6 100644 --- a/drivers/iommu/amd/amd_iommu.h +++ b/drivers/iommu/amd/amd_iommu.h @@ -116,8 +116,7 @@ void amd_iommu_domain_clr_pt_root(struct protection_domain *domain) extern bool translation_pre_enabled(struct amd_iommu *iommu); -extern bool amd_iommu_is_attach_deferred(struct iommu_domain *domain, - struct device *dev); +extern bool amd_iommu_is_attach_deferred(struct device *dev); extern int __init add_special_device(u8 type, u8 id, u16 *devid, bool cmd_line); diff --git a/drivers/iommu/amd/iommu.c b/drivers/iommu/amd/iommu.c index 461f1844ed1f..2bed113c4e6d 100644 --- a/drivers/iommu/amd/iommu.c +++ b/drivers/iommu/amd/iommu.c @@ -2215,8 +2215,7 @@ static void amd_iommu_get_resv_regions(struct device *dev, list_add_tail(®ion->list, head); } -bool amd_iommu_is_attach_deferred(struct iommu_domain *domain, - struct device *dev) +bool amd_iommu_is_attach_deferred(struct device *dev) { struct iommu_dev_data *dev_data = dev_iommu_priv_get(dev); @@ -2269,13 +2268,6 @@ static int amd_iommu_def_domain_type(struct device *dev) const struct iommu_ops amd_iommu_ops = { .capable = amd_iommu_capable, .domain_alloc = amd_iommu_domain_alloc, - .domain_free = amd_iommu_domain_free, - .attach_dev = amd_iommu_attach_device, - .detach_dev = amd_iommu_detach_device, - .map = amd_iommu_map, - .iotlb_sync_map = amd_iommu_iotlb_sync_map, - .unmap = amd_iommu_unmap, - .iova_to_phys = amd_iommu_iova_to_phys, .probe_device = amd_iommu_probe_device, .release_device = amd_iommu_release_device, .probe_finalize = amd_iommu_probe_finalize, @@ -2284,9 +2276,18 @@ const struct iommu_ops amd_iommu_ops = { .put_resv_regions = generic_iommu_put_resv_regions, .is_attach_deferred = amd_iommu_is_attach_deferred, .pgsize_bitmap = AMD_IOMMU_PGSIZES, - .flush_iotlb_all = amd_iommu_flush_iotlb_all, - .iotlb_sync = amd_iommu_iotlb_sync, .def_domain_type = amd_iommu_def_domain_type, + .default_domain_ops = &(const struct iommu_domain_ops) { + .attach_dev = amd_iommu_attach_device, + .detach_dev = amd_iommu_detach_device, + .map = amd_iommu_map, + .unmap = amd_iommu_unmap, + .iotlb_sync_map = amd_iommu_iotlb_sync_map, + .iova_to_phys = amd_iommu_iova_to_phys, + .flush_iotlb_all = amd_iommu_flush_iotlb_all, + .iotlb_sync = amd_iommu_iotlb_sync, + .free = amd_iommu_domain_free, + } }; /***************************************************************************** diff --git a/drivers/iommu/amd/iommu_v2.c b/drivers/iommu/amd/iommu_v2.c index 58da08cc3d01..7c94ec05d289 100644 --- a/drivers/iommu/amd/iommu_v2.c +++ b/drivers/iommu/amd/iommu_v2.c @@ -537,7 +537,7 @@ static int ppr_notifier(struct notifier_block *nb, unsigned long e, void *data) ret = NOTIFY_DONE; /* In kdump kernel pci dev is not initialized yet -> send INVALID */ - if (amd_iommu_is_attach_deferred(NULL, &pdev->dev)) { + if (amd_iommu_is_attach_deferred(&pdev->dev)) { amd_iommu_complete_ppr(pdev, iommu_fault->pasid, PPR_INVALID, tag); goto out; diff --git a/drivers/iommu/apple-dart.c b/drivers/iommu/apple-dart.c index 565ef5598811..decafb07ad08 100644 --- a/drivers/iommu/apple-dart.c +++ b/drivers/iommu/apple-dart.c @@ -765,15 +765,6 @@ static void apple_dart_get_resv_regions(struct device *dev, static const struct iommu_ops apple_dart_iommu_ops = { .domain_alloc = apple_dart_domain_alloc, - .domain_free = apple_dart_domain_free, - .attach_dev = apple_dart_attach_dev, - .detach_dev = apple_dart_detach_dev, - .map_pages = apple_dart_map_pages, - .unmap_pages = apple_dart_unmap_pages, - .flush_iotlb_all = apple_dart_flush_iotlb_all, - .iotlb_sync = apple_dart_iotlb_sync, - .iotlb_sync_map = apple_dart_iotlb_sync_map, - .iova_to_phys = apple_dart_iova_to_phys, .probe_device = apple_dart_probe_device, .release_device = apple_dart_release_device, .device_group = apple_dart_device_group, @@ -782,6 +773,17 @@ static const struct iommu_ops apple_dart_iommu_ops = { .get_resv_regions = apple_dart_get_resv_regions, .put_resv_regions = generic_iommu_put_resv_regions, .pgsize_bitmap = -1UL, /* Restricted during dart probe */ + .default_domain_ops = &(const struct iommu_domain_ops) { + .attach_dev = apple_dart_attach_dev, + .detach_dev = apple_dart_detach_dev, + .map_pages = apple_dart_map_pages, + .unmap_pages = apple_dart_unmap_pages, + .flush_iotlb_all = apple_dart_flush_iotlb_all, + .iotlb_sync = apple_dart_iotlb_sync, + .iotlb_sync_map = apple_dart_iotlb_sync_map, + .iova_to_phys = apple_dart_iova_to_phys, + .free = apple_dart_domain_free, + } }; static irqreturn_t apple_dart_irq(int irq, void *dev) diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c index 6dc6d8b6b368..fd49282c03a3 100644 --- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c +++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c @@ -2841,17 +2841,9 @@ static int arm_smmu_dev_disable_feature(struct device *dev, static struct iommu_ops arm_smmu_ops = { .capable = arm_smmu_capable, .domain_alloc = arm_smmu_domain_alloc, - .domain_free = arm_smmu_domain_free, - .attach_dev = arm_smmu_attach_dev, - .map_pages = arm_smmu_map_pages, - .unmap_pages = arm_smmu_unmap_pages, - .flush_iotlb_all = arm_smmu_flush_iotlb_all, - .iotlb_sync = arm_smmu_iotlb_sync, - .iova_to_phys = arm_smmu_iova_to_phys, .probe_device = arm_smmu_probe_device, .release_device = arm_smmu_release_device, .device_group = arm_smmu_device_group, - .enable_nesting = arm_smmu_enable_nesting, .of_xlate = arm_smmu_of_xlate, .get_resv_regions = arm_smmu_get_resv_regions, .put_resv_regions = generic_iommu_put_resv_regions, @@ -2865,6 +2857,16 @@ static struct iommu_ops arm_smmu_ops = { .page_response = arm_smmu_page_response, .pgsize_bitmap = -1UL, /* Restricted during device attach */ .owner = THIS_MODULE, + .default_domain_ops = &(const struct iommu_domain_ops) { + .attach_dev = arm_smmu_attach_dev, + .map_pages = arm_smmu_map_pages, + .unmap_pages = arm_smmu_unmap_pages, + .flush_iotlb_all = arm_smmu_flush_iotlb_all, + .iotlb_sync = arm_smmu_iotlb_sync, + .iova_to_phys = arm_smmu_iova_to_phys, + .enable_nesting = arm_smmu_enable_nesting, + .free = arm_smmu_domain_free, + } }; /* Probing and initialisation functions */ diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu.c b/drivers/iommu/arm/arm-smmu/arm-smmu.c index 4bc75c4ce402..41321fb47152 100644 --- a/drivers/iommu/arm/arm-smmu/arm-smmu.c +++ b/drivers/iommu/arm/arm-smmu/arm-smmu.c @@ -1583,25 +1583,27 @@ static int arm_smmu_def_domain_type(struct device *dev) static struct iommu_ops arm_smmu_ops = { .capable = arm_smmu_capable, .domain_alloc = arm_smmu_domain_alloc, - .domain_free = arm_smmu_domain_free, - .attach_dev = arm_smmu_attach_dev, - .map_pages = arm_smmu_map_pages, - .unmap_pages = arm_smmu_unmap_pages, - .flush_iotlb_all = arm_smmu_flush_iotlb_all, - .iotlb_sync = arm_smmu_iotlb_sync, - .iova_to_phys = arm_smmu_iova_to_phys, .probe_device = arm_smmu_probe_device, .release_device = arm_smmu_release_device, .probe_finalize = arm_smmu_probe_finalize, .device_group = arm_smmu_device_group, - .enable_nesting = arm_smmu_enable_nesting, - .set_pgtable_quirks = arm_smmu_set_pgtable_quirks, .of_xlate = arm_smmu_of_xlate, .get_resv_regions = arm_smmu_get_resv_regions, .put_resv_regions = generic_iommu_put_resv_regions, .def_domain_type = arm_smmu_def_domain_type, .pgsize_bitmap = -1UL, /* Restricted during device attach */ .owner = THIS_MODULE, + .default_domain_ops = &(const struct iommu_domain_ops) { + .attach_dev = arm_smmu_attach_dev, + .map_pages = arm_smmu_map_pages, + .unmap_pages = arm_smmu_unmap_pages, + .flush_iotlb_all = arm_smmu_flush_iotlb_all, + .iotlb_sync = arm_smmu_iotlb_sync, + .iova_to_phys = arm_smmu_iova_to_phys, + .enable_nesting = arm_smmu_enable_nesting, + .set_pgtable_quirks = arm_smmu_set_pgtable_quirks, + .free = arm_smmu_domain_free, + } }; static void arm_smmu_device_reset(struct arm_smmu_device *smmu) diff --git a/drivers/iommu/arm/arm-smmu/qcom_iommu.c b/drivers/iommu/arm/arm-smmu/qcom_iommu.c index b91874cb6cf3..ed659de20661 100644 --- a/drivers/iommu/arm/arm-smmu/qcom_iommu.c +++ b/drivers/iommu/arm/arm-smmu/qcom_iommu.c @@ -590,19 +590,21 @@ static int qcom_iommu_of_xlate(struct device *dev, struct of_phandle_args *args) static const struct iommu_ops qcom_iommu_ops = { .capable = qcom_iommu_capable, .domain_alloc = qcom_iommu_domain_alloc, - .domain_free = qcom_iommu_domain_free, - .attach_dev = qcom_iommu_attach_dev, - .detach_dev = qcom_iommu_detach_dev, - .map = qcom_iommu_map, - .unmap = qcom_iommu_unmap, - .flush_iotlb_all = qcom_iommu_flush_iotlb_all, - .iotlb_sync = qcom_iommu_iotlb_sync, - .iova_to_phys = qcom_iommu_iova_to_phys, .probe_device = qcom_iommu_probe_device, .release_device = qcom_iommu_release_device, .device_group = generic_device_group, .of_xlate = qcom_iommu_of_xlate, .pgsize_bitmap = SZ_4K | SZ_64K | SZ_1M | SZ_16M, + .default_domain_ops = &(const struct iommu_domain_ops) { + .attach_dev = qcom_iommu_attach_dev, + .detach_dev = qcom_iommu_detach_dev, + .map = qcom_iommu_map, + .unmap = qcom_iommu_unmap, + .flush_iotlb_all = qcom_iommu_flush_iotlb_all, + .iotlb_sync = qcom_iommu_iotlb_sync, + .iova_to_phys = qcom_iommu_iova_to_phys, + .free = qcom_iommu_domain_free, + } }; static int qcom_iommu_sec_ptbl_init(struct device *dev) diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c index d85d54f2b549..b22034975301 100644 --- a/drivers/iommu/dma-iommu.c +++ b/drivers/iommu/dma-iommu.c @@ -525,6 +525,7 @@ static int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base, struct iommu_dma_cookie *cookie = domain->iova_cookie; unsigned long order, base_pfn; struct iova_domain *iovad; + int ret; if (!cookie || cookie->type != IOMMU_DMA_IOVA_COOKIE) return -EINVAL; @@ -559,6 +560,9 @@ static int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base, } init_iova_domain(iovad, 1UL << order, base_pfn); + ret = iova_domain_init_rcaches(iovad); + if (ret) + return ret; /* If the FQ fails we can simply fall back to strict mode */ if (domain->type == IOMMU_DOMAIN_DMA_FQ && iommu_dma_init_fq(domain)) diff --git a/drivers/iommu/exynos-iommu.c b/drivers/iommu/exynos-iommu.c index 939ffa768986..71f2018e23fe 100644 --- a/drivers/iommu/exynos-iommu.c +++ b/drivers/iommu/exynos-iommu.c @@ -1309,17 +1309,19 @@ static int exynos_iommu_of_xlate(struct device *dev, static const struct iommu_ops exynos_iommu_ops = { .domain_alloc = exynos_iommu_domain_alloc, - .domain_free = exynos_iommu_domain_free, - .attach_dev = exynos_iommu_attach_device, - .detach_dev = exynos_iommu_detach_device, - .map = exynos_iommu_map, - .unmap = exynos_iommu_unmap, - .iova_to_phys = exynos_iommu_iova_to_phys, .device_group = generic_device_group, .probe_device = exynos_iommu_probe_device, .release_device = exynos_iommu_release_device, .pgsize_bitmap = SECT_SIZE | LPAGE_SIZE | SPAGE_SIZE, .of_xlate = exynos_iommu_of_xlate, + .default_domain_ops = &(const struct iommu_domain_ops) { + .attach_dev = exynos_iommu_attach_device, + .detach_dev = exynos_iommu_detach_device, + .map = exynos_iommu_map, + .unmap = exynos_iommu_unmap, + .iova_to_phys = exynos_iommu_iova_to_phys, + .free = exynos_iommu_domain_free, + } }; static int __init exynos_iommu_init(void) diff --git a/drivers/iommu/fsl_pamu_domain.c b/drivers/iommu/fsl_pamu_domain.c index a47f47307109..69a4a62dc3b9 100644 --- a/drivers/iommu/fsl_pamu_domain.c +++ b/drivers/iommu/fsl_pamu_domain.c @@ -453,13 +453,15 @@ static void fsl_pamu_release_device(struct device *dev) static const struct iommu_ops fsl_pamu_ops = { .capable = fsl_pamu_capable, .domain_alloc = fsl_pamu_domain_alloc, - .domain_free = fsl_pamu_domain_free, - .attach_dev = fsl_pamu_attach_device, - .detach_dev = fsl_pamu_detach_device, - .iova_to_phys = fsl_pamu_iova_to_phys, .probe_device = fsl_pamu_probe_device, .release_device = fsl_pamu_release_device, .device_group = fsl_pamu_device_group, + .default_domain_ops = &(const struct iommu_domain_ops) { + .attach_dev = fsl_pamu_attach_device, + .detach_dev = fsl_pamu_detach_device, + .iova_to_phys = fsl_pamu_iova_to_phys, + .free = fsl_pamu_domain_free, + } }; int __init pamu_domain_init(void) diff --git a/drivers/iommu/intel/debugfs.c b/drivers/iommu/intel/debugfs.c index 62e23ff3c987..db7a0ca73626 100644 --- a/drivers/iommu/intel/debugfs.c +++ b/drivers/iommu/intel/debugfs.c @@ -351,8 +351,7 @@ static int show_device_domain_translation(struct device *dev, void *data) if (!domain) return 0; - seq_printf(m, "Device %s with pasid %d @0x%llx\n", - dev_name(dev), domain->default_pasid, + seq_printf(m, "Device %s @0x%llx\n", dev_name(dev), (u64)virt_to_phys(domain->pgd)); seq_puts(m, "IOVA_PFN\t\tPML5E\t\t\tPML4E\t\t\tPDPE\t\t\tPDE\t\t\tPTE\n"); diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c index 92fea3fbbb11..b549172e88ef 100644 --- a/drivers/iommu/intel/iommu.c +++ b/drivers/iommu/intel/iommu.c @@ -1573,18 +1573,6 @@ static void domain_update_iotlb(struct dmar_domain *domain) break; } - if (!has_iotlb_device) { - struct subdev_domain_info *sinfo; - - list_for_each_entry(sinfo, &domain->subdevices, link_domain) { - info = get_domain_info(sinfo->pdev); - if (info && info->ats_enabled) { - has_iotlb_device = true; - break; - } - } - } - domain->has_iotlb_device = has_iotlb_device; } @@ -1682,7 +1670,6 @@ static void iommu_flush_dev_iotlb(struct dmar_domain *domain, { unsigned long flags; struct device_domain_info *info; - struct subdev_domain_info *sinfo; if (!domain->has_iotlb_device) return; @@ -1691,27 +1678,9 @@ static void iommu_flush_dev_iotlb(struct dmar_domain *domain, list_for_each_entry(info, &domain->devices, link) __iommu_flush_dev_iotlb(info, addr, mask); - list_for_each_entry(sinfo, &domain->subdevices, link_domain) { - info = get_domain_info(sinfo->pdev); - __iommu_flush_dev_iotlb(info, addr, mask); - } spin_unlock_irqrestore(&device_domain_lock, flags); } -static void domain_flush_piotlb(struct intel_iommu *iommu, - struct dmar_domain *domain, - u64 addr, unsigned long npages, bool ih) -{ - u16 did = domain->iommu_did[iommu->seq_id]; - - if (domain->default_pasid) - qi_flush_piotlb(iommu, did, domain->default_pasid, - addr, npages, ih); - - if (!list_empty(&domain->devices)) - qi_flush_piotlb(iommu, did, PASID_RID2PASID, addr, npages, ih); -} - static void iommu_flush_iotlb_psi(struct intel_iommu *iommu, struct dmar_domain *domain, unsigned long pfn, unsigned int pages, @@ -1727,7 +1696,7 @@ static void iommu_flush_iotlb_psi(struct intel_iommu *iommu, ih = 1 << 6; if (domain_use_first_level(domain)) { - domain_flush_piotlb(iommu, domain, addr, pages, ih); + qi_flush_piotlb(iommu, did, PASID_RID2PASID, addr, pages, ih); } else { /* * Fallback to domain selective flush if no PSI support or @@ -1776,7 +1745,7 @@ static void intel_flush_iotlb_all(struct iommu_domain *domain) u16 did = dmar_domain->iommu_did[iommu->seq_id]; if (domain_use_first_level(dmar_domain)) - domain_flush_piotlb(iommu, dmar_domain, 0, -1, 0); + qi_flush_piotlb(iommu, did, PASID_RID2PASID, 0, -1, 0); else iommu->flush.flush_iotlb(iommu, did, 0, 0, DMA_TLB_DSI_FLUSH); @@ -1983,7 +1952,6 @@ static struct dmar_domain *alloc_domain(unsigned int type) domain->flags |= DOMAIN_FLAG_USE_FIRST_LEVEL; domain->has_iotlb_device = false; INIT_LIST_HEAD(&domain->devices); - INIT_LIST_HEAD(&domain->subdevices); return domain; } @@ -2676,8 +2644,6 @@ static struct dmar_domain *dmar_insert_one_dev_info(struct intel_iommu *iommu, info->domain = domain; info->iommu = iommu; info->pasid_table = NULL; - info->auxd_enabled = 0; - INIT_LIST_HEAD(&info->subdevices); if (dev && dev_is_pci(dev)) { struct pci_dev *pdev = to_pci_dev(info->dev); @@ -4637,183 +4603,6 @@ static void intel_iommu_domain_free(struct iommu_domain *domain) domain_exit(to_dmar_domain(domain)); } -/* - * Check whether a @domain could be attached to the @dev through the - * aux-domain attach/detach APIs. - */ -static inline bool -is_aux_domain(struct device *dev, struct iommu_domain *domain) -{ - struct device_domain_info *info = get_domain_info(dev); - - return info && info->auxd_enabled && - domain->type == IOMMU_DOMAIN_UNMANAGED; -} - -static inline struct subdev_domain_info * -lookup_subdev_info(struct dmar_domain *domain, struct device *dev) -{ - struct subdev_domain_info *sinfo; - - if (!list_empty(&domain->subdevices)) { - list_for_each_entry(sinfo, &domain->subdevices, link_domain) { - if (sinfo->pdev == dev) - return sinfo; - } - } - - return NULL; -} - -static int auxiliary_link_device(struct dmar_domain *domain, - struct device *dev) -{ - struct device_domain_info *info = get_domain_info(dev); - struct subdev_domain_info *sinfo = lookup_subdev_info(domain, dev); - - assert_spin_locked(&device_domain_lock); - if (WARN_ON(!info)) - return -EINVAL; - - if (!sinfo) { - sinfo = kzalloc(sizeof(*sinfo), GFP_ATOMIC); - if (!sinfo) - return -ENOMEM; - sinfo->domain = domain; - sinfo->pdev = dev; - list_add(&sinfo->link_phys, &info->subdevices); - list_add(&sinfo->link_domain, &domain->subdevices); - } - - return ++sinfo->users; -} - -static int auxiliary_unlink_device(struct dmar_domain *domain, - struct device *dev) -{ - struct device_domain_info *info = get_domain_info(dev); - struct subdev_domain_info *sinfo = lookup_subdev_info(domain, dev); - int ret; - - assert_spin_locked(&device_domain_lock); - if (WARN_ON(!info || !sinfo || sinfo->users <= 0)) - return -EINVAL; - - ret = --sinfo->users; - if (!ret) { - list_del(&sinfo->link_phys); - list_del(&sinfo->link_domain); - kfree(sinfo); - } - - return ret; -} - -static int aux_domain_add_dev(struct dmar_domain *domain, - struct device *dev) -{ - int ret; - unsigned long flags; - struct intel_iommu *iommu; - - iommu = device_to_iommu(dev, NULL, NULL); - if (!iommu) - return -ENODEV; - - if (domain->default_pasid <= 0) { - u32 pasid; - - /* No private data needed for the default pasid */ - pasid = ioasid_alloc(NULL, PASID_MIN, - pci_max_pasids(to_pci_dev(dev)) - 1, - NULL); - if (pasid == INVALID_IOASID) { - pr_err("Can't allocate default pasid\n"); - return -ENODEV; - } - domain->default_pasid = pasid; - } - - spin_lock_irqsave(&device_domain_lock, flags); - ret = auxiliary_link_device(domain, dev); - if (ret <= 0) - goto link_failed; - - /* - * Subdevices from the same physical device can be attached to the - * same domain. For such cases, only the first subdevice attachment - * needs to go through the full steps in this function. So if ret > - * 1, just goto out. - */ - if (ret > 1) - goto out; - - /* - * iommu->lock must be held to attach domain to iommu and setup the - * pasid entry for second level translation. - */ - spin_lock(&iommu->lock); - ret = domain_attach_iommu(domain, iommu); - if (ret) - goto attach_failed; - - /* Setup the PASID entry for mediated devices: */ - if (domain_use_first_level(domain)) - ret = domain_setup_first_level(iommu, domain, dev, - domain->default_pasid); - else - ret = intel_pasid_setup_second_level(iommu, domain, dev, - domain->default_pasid); - if (ret) - goto table_failed; - - spin_unlock(&iommu->lock); -out: - spin_unlock_irqrestore(&device_domain_lock, flags); - - return 0; - -table_failed: - domain_detach_iommu(domain, iommu); -attach_failed: - spin_unlock(&iommu->lock); - auxiliary_unlink_device(domain, dev); -link_failed: - spin_unlock_irqrestore(&device_domain_lock, flags); - if (list_empty(&domain->subdevices) && domain->default_pasid > 0) - ioasid_put(domain->default_pasid); - - return ret; -} - -static void aux_domain_remove_dev(struct dmar_domain *domain, - struct device *dev) -{ - struct device_domain_info *info; - struct intel_iommu *iommu; - unsigned long flags; - - if (!is_aux_domain(dev, &domain->domain)) - return; - - spin_lock_irqsave(&device_domain_lock, flags); - info = get_domain_info(dev); - iommu = info->iommu; - - if (!auxiliary_unlink_device(domain, dev)) { - spin_lock(&iommu->lock); - intel_pasid_tear_down_entry(iommu, dev, - domain->default_pasid, false); - domain_detach_iommu(domain, iommu); - spin_unlock(&iommu->lock); - } - - spin_unlock_irqrestore(&device_domain_lock, flags); - - if (list_empty(&domain->subdevices) && domain->default_pasid > 0) - ioasid_put(domain->default_pasid); -} - static int prepare_domain_attach_device(struct iommu_domain *domain, struct device *dev) { @@ -4825,13 +4614,6 @@ static int prepare_domain_attach_device(struct iommu_domain *domain, if (!iommu) return -ENODEV; - if ((dmar_domain->flags & DOMAIN_FLAG_NESTING_MODE) && - !ecap_nest(iommu->ecap)) { - dev_err(dev, "%s: iommu not support nested translation\n", - iommu->name); - return -EINVAL; - } - /* check if this iommu agaw is sufficient for max mapped address */ addr_width = agaw_to_width(iommu->agaw); if (addr_width > cap_mgaw(iommu->cap)) @@ -4873,9 +4655,6 @@ static int intel_iommu_attach_device(struct iommu_domain *domain, return -EPERM; } - if (is_aux_domain(dev, domain)) - return -EPERM; - /* normally dev is not mapped */ if (unlikely(domain_context_mapped(dev))) { struct dmar_domain *old_domain; @@ -4892,212 +4671,12 @@ static int intel_iommu_attach_device(struct iommu_domain *domain, return domain_add_dev_info(to_dmar_domain(domain), dev); } -static int intel_iommu_aux_attach_device(struct iommu_domain *domain, - struct device *dev) -{ - int ret; - - if (!is_aux_domain(dev, domain)) - return -EPERM; - - ret = prepare_domain_attach_device(domain, dev); - if (ret) - return ret; - - return aux_domain_add_dev(to_dmar_domain(domain), dev); -} - static void intel_iommu_detach_device(struct iommu_domain *domain, struct device *dev) { dmar_remove_one_dev_info(dev); } -static void intel_iommu_aux_detach_device(struct iommu_domain *domain, - struct device *dev) -{ - aux_domain_remove_dev(to_dmar_domain(domain), dev); -} - -#ifdef CONFIG_INTEL_IOMMU_SVM -/* - * 2D array for converting and sanitizing IOMMU generic TLB granularity to - * VT-d granularity. Invalidation is typically included in the unmap operation - * as a result of DMA or VFIO unmap. However, for assigned devices guest - * owns the first level page tables. Invalidations of translation caches in the - * guest are trapped and passed down to the host. - * - * vIOMMU in the guest will only expose first level page tables, therefore - * we do not support IOTLB granularity for request without PASID (second level). - * - * For example, to find the VT-d granularity encoding for IOTLB - * type and page selective granularity within PASID: - * X: indexed by iommu cache type - * Y: indexed by enum iommu_inv_granularity - * [IOMMU_CACHE_INV_TYPE_IOTLB][IOMMU_INV_GRANU_ADDR] - */ - -static const int -inv_type_granu_table[IOMMU_CACHE_INV_TYPE_NR][IOMMU_INV_GRANU_NR] = { - /* - * PASID based IOTLB invalidation: PASID selective (per PASID), - * page selective (address granularity) - */ - {-EINVAL, QI_GRAN_NONG_PASID, QI_GRAN_PSI_PASID}, - /* PASID based dev TLBs */ - {-EINVAL, -EINVAL, QI_DEV_IOTLB_GRAN_PASID_SEL}, - /* PASID cache */ - {-EINVAL, -EINVAL, -EINVAL} -}; - -static inline int to_vtd_granularity(int type, int granu) -{ - return inv_type_granu_table[type][granu]; -} - -static inline u64 to_vtd_size(u64 granu_size, u64 nr_granules) -{ - u64 nr_pages = (granu_size * nr_granules) >> VTD_PAGE_SHIFT; - - /* VT-d size is encoded as 2^size of 4K pages, 0 for 4k, 9 for 2MB, etc. - * IOMMU cache invalidate API passes granu_size in bytes, and number of - * granu size in contiguous memory. - */ - return order_base_2(nr_pages); -} - -static int -intel_iommu_sva_invalidate(struct iommu_domain *domain, struct device *dev, - struct iommu_cache_invalidate_info *inv_info) -{ - struct dmar_domain *dmar_domain = to_dmar_domain(domain); - struct device_domain_info *info; - struct intel_iommu *iommu; - unsigned long flags; - int cache_type; - u8 bus, devfn; - u16 did, sid; - int ret = 0; - u64 size = 0; - - if (!inv_info || !dmar_domain) - return -EINVAL; - - if (!dev || !dev_is_pci(dev)) - return -ENODEV; - - iommu = device_to_iommu(dev, &bus, &devfn); - if (!iommu) - return -ENODEV; - - if (!(dmar_domain->flags & DOMAIN_FLAG_NESTING_MODE)) - return -EINVAL; - - spin_lock_irqsave(&device_domain_lock, flags); - spin_lock(&iommu->lock); - info = get_domain_info(dev); - if (!info) { - ret = -EINVAL; - goto out_unlock; - } - did = dmar_domain->iommu_did[iommu->seq_id]; - sid = PCI_DEVID(bus, devfn); - - /* Size is only valid in address selective invalidation */ - if (inv_info->granularity == IOMMU_INV_GRANU_ADDR) - size = to_vtd_size(inv_info->granu.addr_info.granule_size, - inv_info->granu.addr_info.nb_granules); - - for_each_set_bit(cache_type, - (unsigned long *)&inv_info->cache, - IOMMU_CACHE_INV_TYPE_NR) { - int granu = 0; - u64 pasid = 0; - u64 addr = 0; - - granu = to_vtd_granularity(cache_type, inv_info->granularity); - if (granu == -EINVAL) { - pr_err_ratelimited("Invalid cache type and granu combination %d/%d\n", - cache_type, inv_info->granularity); - break; - } - - /* - * PASID is stored in different locations based on the - * granularity. - */ - if (inv_info->granularity == IOMMU_INV_GRANU_PASID && - (inv_info->granu.pasid_info.flags & IOMMU_INV_PASID_FLAGS_PASID)) - pasid = inv_info->granu.pasid_info.pasid; - else if (inv_info->granularity == IOMMU_INV_GRANU_ADDR && - (inv_info->granu.addr_info.flags & IOMMU_INV_ADDR_FLAGS_PASID)) - pasid = inv_info->granu.addr_info.pasid; - - switch (BIT(cache_type)) { - case IOMMU_CACHE_INV_TYPE_IOTLB: - /* HW will ignore LSB bits based on address mask */ - if (inv_info->granularity == IOMMU_INV_GRANU_ADDR && - size && - (inv_info->granu.addr_info.addr & ((BIT(VTD_PAGE_SHIFT + size)) - 1))) { - pr_err_ratelimited("User address not aligned, 0x%llx, size order %llu\n", - inv_info->granu.addr_info.addr, size); - } - - /* - * If granu is PASID-selective, address is ignored. - * We use npages = -1 to indicate that. - */ - qi_flush_piotlb(iommu, did, pasid, - mm_to_dma_pfn(inv_info->granu.addr_info.addr), - (granu == QI_GRAN_NONG_PASID) ? -1 : 1 << size, - inv_info->granu.addr_info.flags & IOMMU_INV_ADDR_FLAGS_LEAF); - - if (!info->ats_enabled) - break; - /* - * Always flush device IOTLB if ATS is enabled. vIOMMU - * in the guest may assume IOTLB flush is inclusive, - * which is more efficient. - */ - fallthrough; - case IOMMU_CACHE_INV_TYPE_DEV_IOTLB: - /* - * PASID based device TLB invalidation does not support - * IOMMU_INV_GRANU_PASID granularity but only supports - * IOMMU_INV_GRANU_ADDR. - * The equivalent of that is we set the size to be the - * entire range of 64 bit. User only provides PASID info - * without address info. So we set addr to 0. - */ - if (inv_info->granularity == IOMMU_INV_GRANU_PASID) { - size = 64 - VTD_PAGE_SHIFT; - addr = 0; - } else if (inv_info->granularity == IOMMU_INV_GRANU_ADDR) { - addr = inv_info->granu.addr_info.addr; - } - - if (info->ats_enabled) - qi_flush_dev_iotlb_pasid(iommu, sid, - info->pfsid, pasid, - info->ats_qdep, addr, - size); - else - pr_warn_ratelimited("Passdown device IOTLB flush w/o ATS!\n"); - break; - default: - dev_err_ratelimited(dev, "Unsupported IOMMU invalidation type %d\n", - cache_type); - ret = -EINVAL; - } - } -out_unlock: - spin_unlock(&iommu->lock); - spin_unlock_irqrestore(&device_domain_lock, flags); - - return ret; -} -#endif - static int intel_iommu_map(struct iommu_domain *domain, unsigned long iova, phys_addr_t hpa, size_t size, int iommu_prot, gfp_t gfp) @@ -5391,46 +4970,6 @@ static struct iommu_group *intel_iommu_device_group(struct device *dev) return generic_device_group(dev); } -static int intel_iommu_enable_auxd(struct device *dev) -{ - struct device_domain_info *info; - struct intel_iommu *iommu; - unsigned long flags; - int ret; - - iommu = device_to_iommu(dev, NULL, NULL); - if (!iommu || dmar_disabled) - return -EINVAL; - - if (!sm_supported(iommu) || !pasid_supported(iommu)) - return -EINVAL; - - ret = intel_iommu_enable_pasid(iommu, dev); - if (ret) - return -ENODEV; - - spin_lock_irqsave(&device_domain_lock, flags); - info = get_domain_info(dev); - info->auxd_enabled = 1; - spin_unlock_irqrestore(&device_domain_lock, flags); - - return 0; -} - -static int intel_iommu_disable_auxd(struct device *dev) -{ - struct device_domain_info *info; - unsigned long flags; - - spin_lock_irqsave(&device_domain_lock, flags); - info = get_domain_info(dev); - if (!WARN_ON(!info)) - info->auxd_enabled = 0; - spin_unlock_irqrestore(&device_domain_lock, flags); - - return 0; -} - static int intel_iommu_enable_sva(struct device *dev) { struct device_domain_info *info = get_domain_info(dev); @@ -5487,9 +5026,6 @@ static int intel_iommu_dev_enable_feat(struct device *dev, enum iommu_dev_features feat) { switch (feat) { - case IOMMU_DEV_FEAT_AUX: - return intel_iommu_enable_auxd(dev); - case IOMMU_DEV_FEAT_IOPF: return intel_iommu_enable_iopf(dev); @@ -5505,9 +5041,6 @@ static int intel_iommu_dev_disable_feat(struct device *dev, enum iommu_dev_features feat) { switch (feat) { - case IOMMU_DEV_FEAT_AUX: - return intel_iommu_disable_auxd(dev); - case IOMMU_DEV_FEAT_IOPF: return 0; @@ -5519,50 +5052,11 @@ intel_iommu_dev_disable_feat(struct device *dev, enum iommu_dev_features feat) } } -static bool -intel_iommu_dev_feat_enabled(struct device *dev, enum iommu_dev_features feat) -{ - struct device_domain_info *info = get_domain_info(dev); - - if (feat == IOMMU_DEV_FEAT_AUX) - return scalable_mode_support() && info && info->auxd_enabled; - - return false; -} - -static int -intel_iommu_aux_get_pasid(struct iommu_domain *domain, struct device *dev) -{ - struct dmar_domain *dmar_domain = to_dmar_domain(domain); - - return dmar_domain->default_pasid > 0 ? - dmar_domain->default_pasid : -EINVAL; -} - -static bool intel_iommu_is_attach_deferred(struct iommu_domain *domain, - struct device *dev) +static bool intel_iommu_is_attach_deferred(struct device *dev) { return attach_deferred(dev); } -static int -intel_iommu_enable_nesting(struct iommu_domain *domain) -{ - struct dmar_domain *dmar_domain = to_dmar_domain(domain); - unsigned long flags; - int ret = -ENODEV; - - spin_lock_irqsave(&device_domain_lock, flags); - if (list_empty(&dmar_domain->devices)) { - dmar_domain->flags |= DOMAIN_FLAG_NESTING_MODE; - dmar_domain->flags &= ~DOMAIN_FLAG_USE_FIRST_LEVEL; - ret = 0; - } - spin_unlock_irqrestore(&device_domain_lock, flags); - - return ret; -} - /* * Check that the device does not live on an external facing PCI port that is * marked as untrusted. Such devices should not be able to apply quirks and @@ -5598,40 +5092,34 @@ static void intel_iommu_iotlb_sync_map(struct iommu_domain *domain, const struct iommu_ops intel_iommu_ops = { .capable = intel_iommu_capable, .domain_alloc = intel_iommu_domain_alloc, - .domain_free = intel_iommu_domain_free, - .enable_nesting = intel_iommu_enable_nesting, - .attach_dev = intel_iommu_attach_device, - .detach_dev = intel_iommu_detach_device, - .aux_attach_dev = intel_iommu_aux_attach_device, - .aux_detach_dev = intel_iommu_aux_detach_device, - .aux_get_pasid = intel_iommu_aux_get_pasid, - .map_pages = intel_iommu_map_pages, - .unmap_pages = intel_iommu_unmap_pages, - .iotlb_sync_map = intel_iommu_iotlb_sync_map, - .flush_iotlb_all = intel_flush_iotlb_all, - .iotlb_sync = intel_iommu_tlb_sync, - .iova_to_phys = intel_iommu_iova_to_phys, .probe_device = intel_iommu_probe_device, .probe_finalize = intel_iommu_probe_finalize, .release_device = intel_iommu_release_device, .get_resv_regions = intel_iommu_get_resv_regions, .put_resv_regions = generic_iommu_put_resv_regions, .device_group = intel_iommu_device_group, - .dev_feat_enabled = intel_iommu_dev_feat_enabled, .dev_enable_feat = intel_iommu_dev_enable_feat, .dev_disable_feat = intel_iommu_dev_disable_feat, .is_attach_deferred = intel_iommu_is_attach_deferred, .def_domain_type = device_def_domain_type, .pgsize_bitmap = SZ_4K, #ifdef CONFIG_INTEL_IOMMU_SVM - .cache_invalidate = intel_iommu_sva_invalidate, - .sva_bind_gpasid = intel_svm_bind_gpasid, - .sva_unbind_gpasid = intel_svm_unbind_gpasid, .sva_bind = intel_svm_bind, .sva_unbind = intel_svm_unbind, .sva_get_pasid = intel_svm_get_pasid, .page_response = intel_svm_page_response, #endif + .default_domain_ops = &(const struct iommu_domain_ops) { + .attach_dev = intel_iommu_attach_device, + .detach_dev = intel_iommu_detach_device, + .map_pages = intel_iommu_map_pages, + .unmap_pages = intel_iommu_unmap_pages, + .iotlb_sync_map = intel_iommu_iotlb_sync_map, + .flush_iotlb_all = intel_flush_iotlb_all, + .iotlb_sync = intel_iommu_tlb_sync, + .iova_to_phys = intel_iommu_iova_to_phys, + .free = intel_iommu_domain_free, + } }; static void quirk_iommu_igfx(struct pci_dev *dev) diff --git a/drivers/iommu/intel/pasid.c b/drivers/iommu/intel/pasid.c index 07c390aed1fe..10fb82ea467d 100644 --- a/drivers/iommu/intel/pasid.c +++ b/drivers/iommu/intel/pasid.c @@ -762,164 +762,3 @@ int intel_pasid_setup_pass_through(struct intel_iommu *iommu, return 0; } - -static int -intel_pasid_setup_bind_data(struct intel_iommu *iommu, struct pasid_entry *pte, - struct iommu_gpasid_bind_data_vtd *pasid_data) -{ - /* - * Not all guest PASID table entry fields are passed down during bind, - * here we only set up the ones that are dependent on guest settings. - * Execution related bits such as NXE, SMEP are not supported. - * Other fields, such as snoop related, are set based on host needs - * regardless of guest settings. - */ - if (pasid_data->flags & IOMMU_SVA_VTD_GPASID_SRE) { - if (!ecap_srs(iommu->ecap)) { - pr_err_ratelimited("No supervisor request support on %s\n", - iommu->name); - return -EINVAL; - } - pasid_set_sre(pte); - /* Enable write protect WP if guest requested */ - if (pasid_data->flags & IOMMU_SVA_VTD_GPASID_WPE) - pasid_set_wpe(pte); - } - - if (pasid_data->flags & IOMMU_SVA_VTD_GPASID_EAFE) { - if (!ecap_eafs(iommu->ecap)) { - pr_err_ratelimited("No extended access flag support on %s\n", - iommu->name); - return -EINVAL; - } - pasid_set_eafe(pte); - } - - /* - * Memory type is only applicable to devices inside processor coherent - * domain. Will add MTS support once coherent devices are available. - */ - if (pasid_data->flags & IOMMU_SVA_VTD_GPASID_MTS_MASK) { - pr_warn_ratelimited("No memory type support %s\n", - iommu->name); - return -EINVAL; - } - - return 0; -} - -/** - * intel_pasid_setup_nested() - Set up PASID entry for nested translation. - * This could be used for guest shared virtual address. In this case, the - * first level page tables are used for GVA-GPA translation in the guest, - * second level page tables are used for GPA-HPA translation. - * - * @iommu: IOMMU which the device belong to - * @dev: Device to be set up for translation - * @gpgd: FLPTPTR: First Level Page translation pointer in GPA - * @pasid: PASID to be programmed in the device PASID table - * @pasid_data: Additional PASID info from the guest bind request - * @domain: Domain info for setting up second level page tables - * @addr_width: Address width of the first level (guest) - */ -int intel_pasid_setup_nested(struct intel_iommu *iommu, struct device *dev, - pgd_t *gpgd, u32 pasid, - struct iommu_gpasid_bind_data_vtd *pasid_data, - struct dmar_domain *domain, int addr_width) -{ - struct pasid_entry *pte; - struct dma_pte *pgd; - int ret = 0; - u64 pgd_val; - int agaw; - u16 did; - - if (!ecap_nest(iommu->ecap)) { - pr_err_ratelimited("IOMMU: %s: No nested translation support\n", - iommu->name); - return -EINVAL; - } - - if (!(domain->flags & DOMAIN_FLAG_NESTING_MODE)) { - pr_err_ratelimited("Domain is not in nesting mode, %x\n", - domain->flags); - return -EINVAL; - } - - pte = intel_pasid_get_entry(dev, pasid); - if (WARN_ON(!pte)) - return -EINVAL; - - /* - * Caller must ensure PASID entry is not in use, i.e. not bind the - * same PASID to the same device twice. - */ - if (pasid_pte_is_present(pte)) - return -EBUSY; - - pasid_clear_entry(pte); - - /* Sanity checking performed by caller to make sure address - * width matching in two dimensions: - * 1. CPU vs. IOMMU - * 2. Guest vs. Host. - */ - switch (addr_width) { -#ifdef CONFIG_X86 - case ADDR_WIDTH_5LEVEL: - if (!cpu_feature_enabled(X86_FEATURE_LA57) || - !cap_5lp_support(iommu->cap)) { - dev_err_ratelimited(dev, - "5-level paging not supported\n"); - return -EINVAL; - } - - pasid_set_flpm(pte, 1); - break; -#endif - case ADDR_WIDTH_4LEVEL: - pasid_set_flpm(pte, 0); - break; - default: - dev_err_ratelimited(dev, "Invalid guest address width %d\n", - addr_width); - return -EINVAL; - } - - /* First level PGD is in GPA, must be supported by the second level */ - if ((uintptr_t)gpgd > domain->max_addr) { - dev_err_ratelimited(dev, - "Guest PGD %lx not supported, max %llx\n", - (uintptr_t)gpgd, domain->max_addr); - return -EINVAL; - } - pasid_set_flptr(pte, (uintptr_t)gpgd); - - ret = intel_pasid_setup_bind_data(iommu, pte, pasid_data); - if (ret) - return ret; - - /* Setup the second level based on the given domain */ - pgd = domain->pgd; - - agaw = iommu_skip_agaw(domain, iommu, &pgd); - if (agaw < 0) { - dev_err_ratelimited(dev, "Invalid domain page table\n"); - return -EINVAL; - } - pgd_val = virt_to_phys(pgd); - pasid_set_slptr(pte, pgd_val); - pasid_set_fault_enable(pte); - - did = domain->iommu_did[iommu->seq_id]; - pasid_set_domain_id(pte, did); - - pasid_set_address_width(pte, agaw); - pasid_set_page_snoop(pte, !!ecap_smpwc(iommu->ecap)); - - pasid_set_translation_type(pte, PASID_ENTRY_PGTT_NESTED); - pasid_set_present(pte); - pasid_flush_caches(iommu, pte, pasid, did); - - return ret; -} diff --git a/drivers/iommu/intel/pasid.h b/drivers/iommu/intel/pasid.h index d5552e2c160d..ab4408c824a5 100644 --- a/drivers/iommu/intel/pasid.h +++ b/drivers/iommu/intel/pasid.h @@ -118,10 +118,6 @@ int intel_pasid_setup_second_level(struct intel_iommu *iommu, int intel_pasid_setup_pass_through(struct intel_iommu *iommu, struct dmar_domain *domain, struct device *dev, u32 pasid); -int intel_pasid_setup_nested(struct intel_iommu *iommu, - struct device *dev, pgd_t *pgd, u32 pasid, - struct iommu_gpasid_bind_data_vtd *pasid_data, - struct dmar_domain *domain, int addr_width); void intel_pasid_tear_down_entry(struct intel_iommu *iommu, struct device *dev, u32 pasid, bool fault_ignore); diff --git a/drivers/iommu/intel/svm.c b/drivers/iommu/intel/svm.c index 5b5d69b04fcc..d04c83dd3a58 100644 --- a/drivers/iommu/intel/svm.c +++ b/drivers/iommu/intel/svm.c @@ -318,193 +318,6 @@ out: return 0; } -int intel_svm_bind_gpasid(struct iommu_domain *domain, struct device *dev, - struct iommu_gpasid_bind_data *data) -{ - struct intel_iommu *iommu = device_to_iommu(dev, NULL, NULL); - struct intel_svm_dev *sdev = NULL; - struct dmar_domain *dmar_domain; - struct device_domain_info *info; - struct intel_svm *svm = NULL; - unsigned long iflags; - int ret = 0; - - if (WARN_ON(!iommu) || !data) - return -EINVAL; - - if (data->format != IOMMU_PASID_FORMAT_INTEL_VTD) - return -EINVAL; - - /* IOMMU core ensures argsz is more than the start of the union */ - if (data->argsz < offsetofend(struct iommu_gpasid_bind_data, vendor.vtd)) - return -EINVAL; - - /* Make sure no undefined flags are used in vendor data */ - if (data->vendor.vtd.flags & ~(IOMMU_SVA_VTD_GPASID_LAST - 1)) - return -EINVAL; - - if (!dev_is_pci(dev)) - return -ENOTSUPP; - - /* VT-d supports devices with full 20 bit PASIDs only */ - if (pci_max_pasids(to_pci_dev(dev)) != PASID_MAX) - return -EINVAL; - - /* - * We only check host PASID range, we have no knowledge to check - * guest PASID range. - */ - if (data->hpasid <= 0 || data->hpasid >= PASID_MAX) - return -EINVAL; - - info = get_domain_info(dev); - if (!info) - return -EINVAL; - - dmar_domain = to_dmar_domain(domain); - - mutex_lock(&pasid_mutex); - ret = pasid_to_svm_sdev(dev, data->hpasid, &svm, &sdev); - if (ret) - goto out; - - if (sdev) { - /* - * Do not allow multiple bindings of the same device-PASID since - * there is only one SL page tables per PASID. We may revisit - * once sharing PGD across domains are supported. - */ - dev_warn_ratelimited(dev, "Already bound with PASID %u\n", - svm->pasid); - ret = -EBUSY; - goto out; - } - - if (!svm) { - /* We come here when PASID has never been bond to a device. */ - svm = kzalloc(sizeof(*svm), GFP_KERNEL); - if (!svm) { - ret = -ENOMEM; - goto out; - } - /* REVISIT: upper layer/VFIO can track host process that bind - * the PASID. ioasid_set = mm might be sufficient for vfio to - * check pasid VMM ownership. We can drop the following line - * once VFIO and IOASID set check is in place. - */ - svm->mm = get_task_mm(current); - svm->pasid = data->hpasid; - if (data->flags & IOMMU_SVA_GPASID_VAL) { - svm->gpasid = data->gpasid; - svm->flags |= SVM_FLAG_GUEST_PASID; - } - pasid_private_add(data->hpasid, svm); - INIT_LIST_HEAD_RCU(&svm->devs); - mmput(svm->mm); - } - sdev = kzalloc(sizeof(*sdev), GFP_KERNEL); - if (!sdev) { - ret = -ENOMEM; - goto out; - } - sdev->dev = dev; - sdev->sid = PCI_DEVID(info->bus, info->devfn); - sdev->iommu = iommu; - - /* Only count users if device has aux domains */ - if (iommu_dev_feature_enabled(dev, IOMMU_DEV_FEAT_AUX)) - sdev->users = 1; - - /* Set up device context entry for PASID if not enabled already */ - ret = intel_iommu_enable_pasid(iommu, sdev->dev); - if (ret) { - dev_err_ratelimited(dev, "Failed to enable PASID capability\n"); - kfree(sdev); - goto out; - } - - /* - * PASID table is per device for better security. Therefore, for - * each bind of a new device even with an existing PASID, we need to - * call the nested mode setup function here. - */ - spin_lock_irqsave(&iommu->lock, iflags); - ret = intel_pasid_setup_nested(iommu, dev, - (pgd_t *)(uintptr_t)data->gpgd, - data->hpasid, &data->vendor.vtd, dmar_domain, - data->addr_width); - spin_unlock_irqrestore(&iommu->lock, iflags); - if (ret) { - dev_err_ratelimited(dev, "Failed to set up PASID %llu in nested mode, Err %d\n", - data->hpasid, ret); - /* - * PASID entry should be in cleared state if nested mode - * set up failed. So we only need to clear IOASID tracking - * data such that free call will succeed. - */ - kfree(sdev); - goto out; - } - - svm->flags |= SVM_FLAG_GUEST_MODE; - - init_rcu_head(&sdev->rcu); - list_add_rcu(&sdev->list, &svm->devs); - out: - if (!IS_ERR_OR_NULL(svm) && list_empty(&svm->devs)) { - pasid_private_remove(data->hpasid); - kfree(svm); - } - - mutex_unlock(&pasid_mutex); - return ret; -} - -int intel_svm_unbind_gpasid(struct device *dev, u32 pasid) -{ - struct intel_iommu *iommu = device_to_iommu(dev, NULL, NULL); - struct intel_svm_dev *sdev; - struct intel_svm *svm; - int ret; - - if (WARN_ON(!iommu)) - return -EINVAL; - - mutex_lock(&pasid_mutex); - ret = pasid_to_svm_sdev(dev, pasid, &svm, &sdev); - if (ret) - goto out; - - if (sdev) { - if (iommu_dev_feature_enabled(dev, IOMMU_DEV_FEAT_AUX)) - sdev->users--; - if (!sdev->users) { - list_del_rcu(&sdev->list); - intel_pasid_tear_down_entry(iommu, dev, - svm->pasid, false); - intel_svm_drain_prq(dev, svm->pasid); - kfree_rcu(sdev, rcu); - - if (list_empty(&svm->devs)) { - /* - * We do not free the IOASID here in that - * IOMMU driver did not allocate it. - * Unlike native SVM, IOASID for guest use was - * allocated prior to the bind call. - * In any case, if the free call comes before - * the unbind, IOMMU driver will get notified - * and perform cleanup. - */ - pasid_private_remove(pasid); - kfree(svm); - } - } - } -out: - mutex_unlock(&pasid_mutex); - return ret; -} - static int intel_svm_alloc_pasid(struct device *dev, struct mm_struct *mm, unsigned int flags) { @@ -1126,28 +939,6 @@ int intel_svm_page_response(struct device *dev, } /* - * For responses from userspace, need to make sure that the - * pasid has been bound to its mm. - */ - if (svm->flags & SVM_FLAG_GUEST_MODE) { - struct mm_struct *mm; - - mm = get_task_mm(current); - if (!mm) { - ret = -EINVAL; - goto out; - } - - if (mm != svm->mm) { - ret = -ENODEV; - mmput(mm); - goto out; - } - - mmput(mm); - } - - /* * Per VT-d spec. v3.0 ch7.7, system software must respond * with page group response if private data is present (PDP) * or last page in group (LPIG) bit is set. This is an diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c index 107dcf5938d6..f2c45b85b9fc 100644 --- a/drivers/iommu/iommu.c +++ b/drivers/iommu/iommu.c @@ -323,13 +323,14 @@ err_out: void iommu_release_device(struct device *dev) { - const struct iommu_ops *ops = dev->bus->iommu_ops; + const struct iommu_ops *ops; if (!dev->iommu) return; iommu_device_unlink(dev->iommu->iommu_dev, dev); + ops = dev_iommu_ops(dev); ops->release_device(dev); iommu_group_remove_device(dev); @@ -790,9 +791,6 @@ static int iommu_create_device_direct_mappings(struct iommu_group *group, dma_addr_t start, end, addr; size_t map_size = 0; - if (domain->ops->apply_resv_region) - domain->ops->apply_resv_region(dev, domain, entry); - start = ALIGN(entry->start, pg_size); end = ALIGN(entry->start + entry->length, pg_size); @@ -833,11 +831,12 @@ out: return ret; } -static bool iommu_is_attach_deferred(struct iommu_domain *domain, - struct device *dev) +static bool iommu_is_attach_deferred(struct device *dev) { - if (domain->ops->is_attach_deferred) - return domain->ops->is_attach_deferred(domain, dev); + const struct iommu_ops *ops = dev_iommu_ops(dev); + + if (ops->is_attach_deferred) + return ops->is_attach_deferred(dev); return false; } @@ -894,7 +893,7 @@ rename: mutex_lock(&group->mutex); list_add_tail(&device->list, &group->devices); - if (group->domain && !iommu_is_attach_deferred(group->domain, dev)) + if (group->domain && !iommu_is_attach_deferred(dev)) ret = __iommu_attach_device(group->domain, dev); mutex_unlock(&group->mutex); if (ret) @@ -1255,10 +1254,10 @@ int iommu_page_response(struct device *dev, struct iommu_fault_event *evt; struct iommu_fault_page_request *prm; struct dev_iommu *param = dev->iommu; + const struct iommu_ops *ops = dev_iommu_ops(dev); bool has_pasid = msg->flags & IOMMU_PAGE_RESP_PASID_VALID; - struct iommu_domain *domain = iommu_get_domain_for_dev(dev); - if (!domain || !domain->ops->page_response) + if (!ops->page_response) return -ENODEV; if (!param || !param->fault_param) @@ -1299,7 +1298,7 @@ int iommu_page_response(struct device *dev, msg->pasid = 0; } - ret = domain->ops->page_response(dev, evt, msg); + ret = ops->page_response(dev, evt, msg); list_del(&evt->list); kfree(evt); break; @@ -1524,7 +1523,7 @@ EXPORT_SYMBOL_GPL(fsl_mc_device_group); static int iommu_get_def_domain_type(struct device *dev) { - const struct iommu_ops *ops = dev->bus->iommu_ops; + const struct iommu_ops *ops = dev_iommu_ops(dev); if (dev_is_pci(dev) && to_pci_dev(dev)->untrusted) return IOMMU_DOMAIN_DMA; @@ -1583,7 +1582,7 @@ static int iommu_alloc_default_domain(struct iommu_group *group, */ static struct iommu_group *iommu_group_get_for_dev(struct device *dev) { - const struct iommu_ops *ops = dev->bus->iommu_ops; + const struct iommu_ops *ops = dev_iommu_ops(dev); struct iommu_group *group; int ret; @@ -1591,9 +1590,6 @@ static struct iommu_group *iommu_group_get_for_dev(struct device *dev) if (group) return group; - if (!ops) - return ERR_PTR(-EINVAL); - group = ops->device_group(dev); if (WARN_ON_ONCE(group == NULL)) return ERR_PTR(-EINVAL); @@ -1748,7 +1744,7 @@ static int iommu_group_do_dma_attach(struct device *dev, void *data) struct iommu_domain *domain = data; int ret = 0; - if (!iommu_is_attach_deferred(domain, dev)) + if (!iommu_is_attach_deferred(dev)) ret = __iommu_attach_device(domain, dev); return ret; @@ -1762,10 +1758,10 @@ static int __iommu_group_dma_attach(struct iommu_group *group) static int iommu_group_do_probe_finalize(struct device *dev, void *data) { - struct iommu_domain *domain = data; + const struct iommu_ops *ops = dev_iommu_ops(dev); - if (domain->ops->probe_finalize) - domain->ops->probe_finalize(dev); + if (ops->probe_finalize) + ops->probe_finalize(dev); return 0; } @@ -1954,10 +1950,11 @@ static struct iommu_domain *__iommu_domain_alloc(struct bus_type *bus, if (!domain) return NULL; - domain->ops = bus->iommu_ops; domain->type = type; /* Assume all sizes by default; the driver may override this later */ - domain->pgsize_bitmap = bus->iommu_ops->pgsize_bitmap; + domain->pgsize_bitmap = bus->iommu_ops->pgsize_bitmap; + if (!domain->ops) + domain->ops = bus->iommu_ops->default_domain_ops; if (iommu_is_dma_domain(domain) && iommu_get_dma_cookie(domain)) { iommu_domain_free(domain); @@ -1975,7 +1972,7 @@ EXPORT_SYMBOL_GPL(iommu_domain_alloc); void iommu_domain_free(struct iommu_domain *domain) { iommu_put_dma_cookie(domain); - domain->ops->domain_free(domain); + domain->ops->free(domain); } EXPORT_SYMBOL_GPL(iommu_domain_free); @@ -2023,228 +2020,16 @@ EXPORT_SYMBOL_GPL(iommu_attach_device); int iommu_deferred_attach(struct device *dev, struct iommu_domain *domain) { - const struct iommu_ops *ops = domain->ops; - - if (ops->is_attach_deferred && ops->is_attach_deferred(domain, dev)) + if (iommu_is_attach_deferred(dev)) return __iommu_attach_device(domain, dev); return 0; } -/* - * Check flags and other user provided data for valid combinations. We also - * make sure no reserved fields or unused flags are set. This is to ensure - * not breaking userspace in the future when these fields or flags are used. - */ -static int iommu_check_cache_invl_data(struct iommu_cache_invalidate_info *info) -{ - u32 mask; - int i; - - if (info->version != IOMMU_CACHE_INVALIDATE_INFO_VERSION_1) - return -EINVAL; - - mask = (1 << IOMMU_CACHE_INV_TYPE_NR) - 1; - if (info->cache & ~mask) - return -EINVAL; - - if (info->granularity >= IOMMU_INV_GRANU_NR) - return -EINVAL; - - switch (info->granularity) { - case IOMMU_INV_GRANU_ADDR: - if (info->cache & IOMMU_CACHE_INV_TYPE_PASID) - return -EINVAL; - - mask = IOMMU_INV_ADDR_FLAGS_PASID | - IOMMU_INV_ADDR_FLAGS_ARCHID | - IOMMU_INV_ADDR_FLAGS_LEAF; - - if (info->granu.addr_info.flags & ~mask) - return -EINVAL; - break; - case IOMMU_INV_GRANU_PASID: - mask = IOMMU_INV_PASID_FLAGS_PASID | - IOMMU_INV_PASID_FLAGS_ARCHID; - if (info->granu.pasid_info.flags & ~mask) - return -EINVAL; - - break; - case IOMMU_INV_GRANU_DOMAIN: - if (info->cache & IOMMU_CACHE_INV_TYPE_DEV_IOTLB) - return -EINVAL; - break; - default: - return -EINVAL; - } - - /* Check reserved padding fields */ - for (i = 0; i < sizeof(info->padding); i++) { - if (info->padding[i]) - return -EINVAL; - } - - return 0; -} - -int iommu_uapi_cache_invalidate(struct iommu_domain *domain, struct device *dev, - void __user *uinfo) -{ - struct iommu_cache_invalidate_info inv_info = { 0 }; - u32 minsz; - int ret; - - if (unlikely(!domain->ops->cache_invalidate)) - return -ENODEV; - - /* - * No new spaces can be added before the variable sized union, the - * minimum size is the offset to the union. - */ - minsz = offsetof(struct iommu_cache_invalidate_info, granu); - - /* Copy minsz from user to get flags and argsz */ - if (copy_from_user(&inv_info, uinfo, minsz)) - return -EFAULT; - - /* Fields before the variable size union are mandatory */ - if (inv_info.argsz < minsz) - return -EINVAL; - - /* PASID and address granu require additional info beyond minsz */ - if (inv_info.granularity == IOMMU_INV_GRANU_PASID && - inv_info.argsz < offsetofend(struct iommu_cache_invalidate_info, granu.pasid_info)) - return -EINVAL; - - if (inv_info.granularity == IOMMU_INV_GRANU_ADDR && - inv_info.argsz < offsetofend(struct iommu_cache_invalidate_info, granu.addr_info)) - return -EINVAL; - - /* - * User might be using a newer UAPI header which has a larger data - * size, we shall support the existing flags within the current - * size. Copy the remaining user data _after_ minsz but not more - * than the current kernel supported size. - */ - if (copy_from_user((void *)&inv_info + minsz, uinfo + minsz, - min_t(u32, inv_info.argsz, sizeof(inv_info)) - minsz)) - return -EFAULT; - - /* Now the argsz is validated, check the content */ - ret = iommu_check_cache_invl_data(&inv_info); - if (ret) - return ret; - - return domain->ops->cache_invalidate(domain, dev, &inv_info); -} -EXPORT_SYMBOL_GPL(iommu_uapi_cache_invalidate); - -static int iommu_check_bind_data(struct iommu_gpasid_bind_data *data) -{ - u64 mask; - int i; - - if (data->version != IOMMU_GPASID_BIND_VERSION_1) - return -EINVAL; - - /* Check the range of supported formats */ - if (data->format >= IOMMU_PASID_FORMAT_LAST) - return -EINVAL; - - /* Check all flags */ - mask = IOMMU_SVA_GPASID_VAL; - if (data->flags & ~mask) - return -EINVAL; - - /* Check reserved padding fields */ - for (i = 0; i < sizeof(data->padding); i++) { - if (data->padding[i]) - return -EINVAL; - } - - return 0; -} - -static int iommu_sva_prepare_bind_data(void __user *udata, - struct iommu_gpasid_bind_data *data) -{ - u32 minsz; - - /* - * No new spaces can be added before the variable sized union, the - * minimum size is the offset to the union. - */ - minsz = offsetof(struct iommu_gpasid_bind_data, vendor); - - /* Copy minsz from user to get flags and argsz */ - if (copy_from_user(data, udata, minsz)) - return -EFAULT; - - /* Fields before the variable size union are mandatory */ - if (data->argsz < minsz) - return -EINVAL; - /* - * User might be using a newer UAPI header, we shall let IOMMU vendor - * driver decide on what size it needs. Since the guest PASID bind data - * can be vendor specific, larger argsz could be the result of extension - * for one vendor but it should not affect another vendor. - * Copy the remaining user data _after_ minsz - */ - if (copy_from_user((void *)data + minsz, udata + minsz, - min_t(u32, data->argsz, sizeof(*data)) - minsz)) - return -EFAULT; - - return iommu_check_bind_data(data); -} - -int iommu_uapi_sva_bind_gpasid(struct iommu_domain *domain, struct device *dev, - void __user *udata) -{ - struct iommu_gpasid_bind_data data = { 0 }; - int ret; - - if (unlikely(!domain->ops->sva_bind_gpasid)) - return -ENODEV; - - ret = iommu_sva_prepare_bind_data(udata, &data); - if (ret) - return ret; - - return domain->ops->sva_bind_gpasid(domain, dev, &data); -} -EXPORT_SYMBOL_GPL(iommu_uapi_sva_bind_gpasid); - -int iommu_sva_unbind_gpasid(struct iommu_domain *domain, struct device *dev, - ioasid_t pasid) -{ - if (unlikely(!domain->ops->sva_unbind_gpasid)) - return -ENODEV; - - return domain->ops->sva_unbind_gpasid(dev, pasid); -} -EXPORT_SYMBOL_GPL(iommu_sva_unbind_gpasid); - -int iommu_uapi_sva_unbind_gpasid(struct iommu_domain *domain, struct device *dev, - void __user *udata) -{ - struct iommu_gpasid_bind_data data = { 0 }; - int ret; - - if (unlikely(!domain->ops->sva_bind_gpasid)) - return -ENODEV; - - ret = iommu_sva_prepare_bind_data(udata, &data); - if (ret) - return ret; - - return iommu_sva_unbind_gpasid(domain, dev, data.hpasid); -} -EXPORT_SYMBOL_GPL(iommu_uapi_sva_unbind_gpasid); - static void __iommu_detach_device(struct iommu_domain *domain, struct device *dev) { - if (iommu_is_attach_deferred(domain, dev)) + if (iommu_is_attach_deferred(dev)) return; if (unlikely(domain->ops->detach_dev == NULL)) @@ -2458,7 +2243,7 @@ static int __iommu_map_pages(struct iommu_domain *domain, unsigned long iova, phys_addr_t paddr, size_t size, int prot, gfp_t gfp, size_t *mapped) { - const struct iommu_ops *ops = domain->ops; + const struct iommu_domain_ops *ops = domain->ops; size_t pgsize, count; int ret; @@ -2481,7 +2266,7 @@ static int __iommu_map_pages(struct iommu_domain *domain, unsigned long iova, static int __iommu_map(struct iommu_domain *domain, unsigned long iova, phys_addr_t paddr, size_t size, int prot, gfp_t gfp) { - const struct iommu_ops *ops = domain->ops; + const struct iommu_domain_ops *ops = domain->ops; unsigned long orig_iova = iova; unsigned int min_pagesz; size_t orig_size = size; @@ -2541,7 +2326,7 @@ static int __iommu_map(struct iommu_domain *domain, unsigned long iova, static int _iommu_map(struct iommu_domain *domain, unsigned long iova, phys_addr_t paddr, size_t size, int prot, gfp_t gfp) { - const struct iommu_ops *ops = domain->ops; + const struct iommu_domain_ops *ops = domain->ops; int ret; ret = __iommu_map(domain, iova, paddr, size, prot, gfp); @@ -2570,7 +2355,7 @@ static size_t __iommu_unmap_pages(struct iommu_domain *domain, unsigned long iova, size_t size, struct iommu_iotlb_gather *iotlb_gather) { - const struct iommu_ops *ops = domain->ops; + const struct iommu_domain_ops *ops = domain->ops; size_t pgsize, count; pgsize = iommu_pgsize(domain, iova, iova, size, &count); @@ -2583,7 +2368,7 @@ static size_t __iommu_unmap(struct iommu_domain *domain, unsigned long iova, size_t size, struct iommu_iotlb_gather *iotlb_gather) { - const struct iommu_ops *ops = domain->ops; + const struct iommu_domain_ops *ops = domain->ops; size_t unmapped_page, unmapped = 0; unsigned long orig_iova = iova; unsigned int min_pagesz; @@ -2659,7 +2444,7 @@ static ssize_t __iommu_map_sg(struct iommu_domain *domain, unsigned long iova, struct scatterlist *sg, unsigned int nents, int prot, gfp_t gfp) { - const struct iommu_ops *ops = domain->ops; + const struct iommu_domain_ops *ops = domain->ops; size_t len = 0, mapped = 0; phys_addr_t start; unsigned int i = 0; @@ -2792,17 +2577,17 @@ EXPORT_SYMBOL_GPL(iommu_set_pgtable_quirks); void iommu_get_resv_regions(struct device *dev, struct list_head *list) { - const struct iommu_ops *ops = dev->bus->iommu_ops; + const struct iommu_ops *ops = dev_iommu_ops(dev); - if (ops && ops->get_resv_regions) + if (ops->get_resv_regions) ops->get_resv_regions(dev, list); } void iommu_put_resv_regions(struct device *dev, struct list_head *list) { - const struct iommu_ops *ops = dev->bus->iommu_ops; + const struct iommu_ops *ops = dev_iommu_ops(dev); - if (ops && ops->put_resv_regions) + if (ops->put_resv_regions) ops->put_resv_regions(dev, list); } @@ -2959,8 +2744,6 @@ EXPORT_SYMBOL_GPL(iommu_dev_enable_feature); /* * The device drivers should do the necessary cleanups before calling this. - * For example, before disabling the aux-domain feature, the device driver - * should detach all aux-domains. Otherwise, this will return -EBUSY. */ int iommu_dev_disable_feature(struct device *dev, enum iommu_dev_features feat) { @@ -2988,50 +2771,6 @@ bool iommu_dev_feature_enabled(struct device *dev, enum iommu_dev_features feat) } EXPORT_SYMBOL_GPL(iommu_dev_feature_enabled); -/* - * Aux-domain specific attach/detach. - * - * Only works if iommu_dev_feature_enabled(dev, IOMMU_DEV_FEAT_AUX) returns - * true. Also, as long as domains are attached to a device through this - * interface, any tries to call iommu_attach_device() should fail - * (iommu_detach_device() can't fail, so we fail when trying to re-attach). - * This should make us safe against a device being attached to a guest as a - * whole while there are still pasid users on it (aux and sva). - */ -int iommu_aux_attach_device(struct iommu_domain *domain, struct device *dev) -{ - int ret = -ENODEV; - - if (domain->ops->aux_attach_dev) - ret = domain->ops->aux_attach_dev(domain, dev); - - if (!ret) - trace_attach_device_to_domain(dev); - - return ret; -} -EXPORT_SYMBOL_GPL(iommu_aux_attach_device); - -void iommu_aux_detach_device(struct iommu_domain *domain, struct device *dev) -{ - if (domain->ops->aux_detach_dev) { - domain->ops->aux_detach_dev(domain, dev); - trace_detach_device_from_domain(dev); - } -} -EXPORT_SYMBOL_GPL(iommu_aux_detach_device); - -int iommu_aux_get_pasid(struct iommu_domain *domain, struct device *dev) -{ - int ret = -ENODEV; - - if (domain->ops->aux_get_pasid) - ret = domain->ops->aux_get_pasid(domain, dev); - - return ret; -} -EXPORT_SYMBOL_GPL(iommu_aux_get_pasid); - /** * iommu_sva_bind_device() - Bind a process address space to a device * @dev: the device @@ -3053,9 +2792,9 @@ iommu_sva_bind_device(struct device *dev, struct mm_struct *mm, void *drvdata) { struct iommu_group *group; struct iommu_sva *handle = ERR_PTR(-EINVAL); - const struct iommu_ops *ops = dev->bus->iommu_ops; + const struct iommu_ops *ops = dev_iommu_ops(dev); - if (!ops || !ops->sva_bind) + if (!ops->sva_bind) return ERR_PTR(-ENODEV); group = iommu_group_get(dev); @@ -3096,9 +2835,9 @@ void iommu_sva_unbind_device(struct iommu_sva *handle) { struct iommu_group *group; struct device *dev = handle->dev; - const struct iommu_ops *ops = dev->bus->iommu_ops; + const struct iommu_ops *ops = dev_iommu_ops(dev); - if (!ops || !ops->sva_unbind) + if (!ops->sva_unbind) return; group = iommu_group_get(dev); @@ -3115,9 +2854,9 @@ EXPORT_SYMBOL_GPL(iommu_sva_unbind_device); u32 iommu_sva_get_pasid(struct iommu_sva *handle) { - const struct iommu_ops *ops = handle->dev->bus->iommu_ops; + const struct iommu_ops *ops = dev_iommu_ops(handle->dev); - if (!ops || !ops->sva_get_pasid) + if (!ops->sva_get_pasid) return IOMMU_PASID_INVALID; return ops->sva_get_pasid(handle); diff --git a/drivers/iommu/iova.c b/drivers/iommu/iova.c index b28c9435b898..db77aa675145 100644 --- a/drivers/iommu/iova.c +++ b/drivers/iommu/iova.c @@ -15,13 +15,14 @@ /* The anchor node sits above the top of the usable address space */ #define IOVA_ANCHOR ~0UL +#define IOVA_RANGE_CACHE_MAX_SIZE 6 /* log of max cached IOVA range size (in pages) */ + static bool iova_rcache_insert(struct iova_domain *iovad, unsigned long pfn, unsigned long size); static unsigned long iova_rcache_get(struct iova_domain *iovad, unsigned long size, unsigned long limit_pfn); -static void init_iova_rcaches(struct iova_domain *iovad); static void free_cpu_cached_iovas(unsigned int cpu, struct iova_domain *iovad); static void free_iova_rcaches(struct iova_domain *iovad); @@ -64,8 +65,6 @@ init_iova_domain(struct iova_domain *iovad, unsigned long granule, iovad->anchor.pfn_lo = iovad->anchor.pfn_hi = IOVA_ANCHOR; rb_link_node(&iovad->anchor.node, NULL, &iovad->rbroot.rb_node); rb_insert_color(&iovad->anchor.node, &iovad->rbroot); - cpuhp_state_add_instance_nocalls(CPUHP_IOMMU_IOVA_DEAD, &iovad->cpuhp_dead); - init_iova_rcaches(iovad); } EXPORT_SYMBOL_GPL(init_iova_domain); @@ -95,10 +94,11 @@ __cached_rbnode_delete_update(struct iova_domain *iovad, struct iova *free) cached_iova = to_iova(iovad->cached32_node); if (free == cached_iova || (free->pfn_hi < iovad->dma_32bit_pfn && - free->pfn_lo >= cached_iova->pfn_lo)) { + free->pfn_lo >= cached_iova->pfn_lo)) iovad->cached32_node = rb_next(&free->node); + + if (free->pfn_lo < iovad->dma_32bit_pfn) iovad->max32_alloc_size = iovad->dma_32bit_pfn; - } cached_iova = to_iova(iovad->cached_node); if (free->pfn_lo >= cached_iova->pfn_lo) @@ -488,6 +488,13 @@ free_iova_fast(struct iova_domain *iovad, unsigned long pfn, unsigned long size) } EXPORT_SYMBOL_GPL(free_iova_fast); +static void iova_domain_free_rcaches(struct iova_domain *iovad) +{ + cpuhp_state_remove_instance_nocalls(CPUHP_IOMMU_IOVA_DEAD, + &iovad->cpuhp_dead); + free_iova_rcaches(iovad); +} + /** * put_iova_domain - destroys the iova domain * @iovad: - iova domain in question. @@ -497,9 +504,9 @@ void put_iova_domain(struct iova_domain *iovad) { struct iova *iova, *tmp; - cpuhp_state_remove_instance_nocalls(CPUHP_IOMMU_IOVA_DEAD, - &iovad->cpuhp_dead); - free_iova_rcaches(iovad); + if (iovad->rcaches) + iova_domain_free_rcaches(iovad); + rbtree_postorder_for_each_entry_safe(iova, tmp, &iovad->rbroot, node) free_iova_mem(iova); } @@ -608,6 +615,7 @@ EXPORT_SYMBOL_GPL(reserve_iova); */ #define IOVA_MAG_SIZE 128 +#define MAX_GLOBAL_MAGS 32 /* magazines per bin */ struct iova_magazine { unsigned long size; @@ -620,6 +628,13 @@ struct iova_cpu_rcache { struct iova_magazine *prev; }; +struct iova_rcache { + spinlock_t lock; + unsigned long depot_size; + struct iova_magazine *depot[MAX_GLOBAL_MAGS]; + struct iova_cpu_rcache __percpu *cpu_rcaches; +}; + static struct iova_magazine *iova_magazine_alloc(gfp_t flags) { return kzalloc(sizeof(struct iova_magazine), flags); @@ -693,28 +708,54 @@ static void iova_magazine_push(struct iova_magazine *mag, unsigned long pfn) mag->pfns[mag->size++] = pfn; } -static void init_iova_rcaches(struct iova_domain *iovad) +int iova_domain_init_rcaches(struct iova_domain *iovad) { - struct iova_cpu_rcache *cpu_rcache; - struct iova_rcache *rcache; unsigned int cpu; - int i; + int i, ret; + + iovad->rcaches = kcalloc(IOVA_RANGE_CACHE_MAX_SIZE, + sizeof(struct iova_rcache), + GFP_KERNEL); + if (!iovad->rcaches) + return -ENOMEM; for (i = 0; i < IOVA_RANGE_CACHE_MAX_SIZE; ++i) { + struct iova_cpu_rcache *cpu_rcache; + struct iova_rcache *rcache; + rcache = &iovad->rcaches[i]; spin_lock_init(&rcache->lock); rcache->depot_size = 0; - rcache->cpu_rcaches = __alloc_percpu(sizeof(*cpu_rcache), cache_line_size()); - if (WARN_ON(!rcache->cpu_rcaches)) - continue; + rcache->cpu_rcaches = __alloc_percpu(sizeof(*cpu_rcache), + cache_line_size()); + if (!rcache->cpu_rcaches) { + ret = -ENOMEM; + goto out_err; + } for_each_possible_cpu(cpu) { cpu_rcache = per_cpu_ptr(rcache->cpu_rcaches, cpu); + spin_lock_init(&cpu_rcache->lock); cpu_rcache->loaded = iova_magazine_alloc(GFP_KERNEL); cpu_rcache->prev = iova_magazine_alloc(GFP_KERNEL); + if (!cpu_rcache->loaded || !cpu_rcache->prev) { + ret = -ENOMEM; + goto out_err; + } } } + + ret = cpuhp_state_add_instance_nocalls(CPUHP_IOMMU_IOVA_DEAD, + &iovad->cpuhp_dead); + if (ret) + goto out_err; + return 0; + +out_err: + free_iova_rcaches(iovad); + return ret; } +EXPORT_SYMBOL_GPL(iova_domain_init_rcaches); /* * Try inserting IOVA range starting with 'iova_pfn' into 'rcache', and @@ -831,7 +872,7 @@ static unsigned long iova_rcache_get(struct iova_domain *iovad, { unsigned int log_size = order_base_2(size); - if (log_size >= IOVA_RANGE_CACHE_MAX_SIZE) + if (log_size >= IOVA_RANGE_CACHE_MAX_SIZE || !iovad->rcaches) return 0; return __iova_rcache_get(&iovad->rcaches[log_size], limit_pfn - size); @@ -849,6 +890,8 @@ static void free_iova_rcaches(struct iova_domain *iovad) for (i = 0; i < IOVA_RANGE_CACHE_MAX_SIZE; ++i) { rcache = &iovad->rcaches[i]; + if (!rcache->cpu_rcaches) + break; for_each_possible_cpu(cpu) { cpu_rcache = per_cpu_ptr(rcache->cpu_rcaches, cpu); iova_magazine_free(cpu_rcache->loaded); @@ -858,6 +901,9 @@ static void free_iova_rcaches(struct iova_domain *iovad) for (j = 0; j < rcache->depot_size; ++j) iova_magazine_free(rcache->depot[j]); } + + kfree(iovad->rcaches); + iovad->rcaches = NULL; } /* diff --git a/drivers/iommu/ipmmu-vmsa.c b/drivers/iommu/ipmmu-vmsa.c index ca752bdc710f..00d5f7a33499 100644 --- a/drivers/iommu/ipmmu-vmsa.c +++ b/drivers/iommu/ipmmu-vmsa.c @@ -868,14 +868,6 @@ static struct iommu_group *ipmmu_find_group(struct device *dev) static const struct iommu_ops ipmmu_ops = { .domain_alloc = ipmmu_domain_alloc, - .domain_free = ipmmu_domain_free, - .attach_dev = ipmmu_attach_device, - .detach_dev = ipmmu_detach_device, - .map = ipmmu_map, - .unmap = ipmmu_unmap, - .flush_iotlb_all = ipmmu_flush_iotlb_all, - .iotlb_sync = ipmmu_iotlb_sync, - .iova_to_phys = ipmmu_iova_to_phys, .probe_device = ipmmu_probe_device, .release_device = ipmmu_release_device, .probe_finalize = ipmmu_probe_finalize, @@ -883,6 +875,16 @@ static const struct iommu_ops ipmmu_ops = { ? generic_device_group : ipmmu_find_group, .pgsize_bitmap = SZ_1G | SZ_2M | SZ_4K, .of_xlate = ipmmu_of_xlate, + .default_domain_ops = &(const struct iommu_domain_ops) { + .attach_dev = ipmmu_attach_device, + .detach_dev = ipmmu_detach_device, + .map = ipmmu_map, + .unmap = ipmmu_unmap, + .flush_iotlb_all = ipmmu_flush_iotlb_all, + .iotlb_sync = ipmmu_iotlb_sync, + .iova_to_phys = ipmmu_iova_to_phys, + .free = ipmmu_domain_free, + } }; /* ----------------------------------------------------------------------------- diff --git a/drivers/iommu/msm_iommu.c b/drivers/iommu/msm_iommu.c index 3a38352b603f..4f441f1b750d 100644 --- a/drivers/iommu/msm_iommu.c +++ b/drivers/iommu/msm_iommu.c @@ -558,11 +558,6 @@ fail: return ret; } -static bool msm_iommu_capable(enum iommu_cap cap) -{ - return false; -} - static void print_ctx_regs(void __iomem *base, int ctx) { unsigned int fsr = GET_FSR(base, ctx); @@ -672,27 +667,28 @@ fail: } static struct iommu_ops msm_iommu_ops = { - .capable = msm_iommu_capable, .domain_alloc = msm_iommu_domain_alloc, - .domain_free = msm_iommu_domain_free, - .attach_dev = msm_iommu_attach_dev, - .detach_dev = msm_iommu_detach_dev, - .map = msm_iommu_map, - .unmap = msm_iommu_unmap, - /* - * Nothing is needed here, the barrier to guarantee - * completion of the tlb sync operation is implicitly - * taken care when the iommu client does a writel before - * kick starting the other master. - */ - .iotlb_sync = NULL, - .iotlb_sync_map = msm_iommu_sync_map, - .iova_to_phys = msm_iommu_iova_to_phys, .probe_device = msm_iommu_probe_device, .release_device = msm_iommu_release_device, .device_group = generic_device_group, .pgsize_bitmap = MSM_IOMMU_PGSIZES, .of_xlate = qcom_iommu_of_xlate, + .default_domain_ops = &(const struct iommu_domain_ops) { + .attach_dev = msm_iommu_attach_dev, + .detach_dev = msm_iommu_detach_dev, + .map = msm_iommu_map, + .unmap = msm_iommu_unmap, + /* + * Nothing is needed here, the barrier to guarantee + * completion of the tlb sync operation is implicitly + * taken care when the iommu client does a writel before + * kick starting the other master. + */ + .iotlb_sync = NULL, + .iotlb_sync_map = msm_iommu_sync_map, + .iova_to_phys = msm_iommu_iova_to_phys, + .free = msm_iommu_domain_free, + } }; static int msm_iommu_probe(struct platform_device *pdev) diff --git a/drivers/iommu/mtk_iommu.c b/drivers/iommu/mtk_iommu.c index 25b834104790..884c288a8f63 100644 --- a/drivers/iommu/mtk_iommu.c +++ b/drivers/iommu/mtk_iommu.c @@ -658,15 +658,6 @@ static void mtk_iommu_get_resv_regions(struct device *dev, static const struct iommu_ops mtk_iommu_ops = { .domain_alloc = mtk_iommu_domain_alloc, - .domain_free = mtk_iommu_domain_free, - .attach_dev = mtk_iommu_attach_device, - .detach_dev = mtk_iommu_detach_device, - .map = mtk_iommu_map, - .unmap = mtk_iommu_unmap, - .flush_iotlb_all = mtk_iommu_flush_iotlb_all, - .iotlb_sync = mtk_iommu_iotlb_sync, - .iotlb_sync_map = mtk_iommu_sync_map, - .iova_to_phys = mtk_iommu_iova_to_phys, .probe_device = mtk_iommu_probe_device, .release_device = mtk_iommu_release_device, .device_group = mtk_iommu_device_group, @@ -675,6 +666,17 @@ static const struct iommu_ops mtk_iommu_ops = { .put_resv_regions = generic_iommu_put_resv_regions, .pgsize_bitmap = SZ_4K | SZ_64K | SZ_1M | SZ_16M, .owner = THIS_MODULE, + .default_domain_ops = &(const struct iommu_domain_ops) { + .attach_dev = mtk_iommu_attach_device, + .detach_dev = mtk_iommu_detach_device, + .map = mtk_iommu_map, + .unmap = mtk_iommu_unmap, + .flush_iotlb_all = mtk_iommu_flush_iotlb_all, + .iotlb_sync = mtk_iommu_iotlb_sync, + .iotlb_sync_map = mtk_iommu_sync_map, + .iova_to_phys = mtk_iommu_iova_to_phys, + .free = mtk_iommu_domain_free, + } }; static int mtk_iommu_hw_init(const struct mtk_iommu_data *data) diff --git a/drivers/iommu/mtk_iommu_v1.c b/drivers/iommu/mtk_iommu_v1.c index be22fcf988ce..293ef7e1a861 100644 --- a/drivers/iommu/mtk_iommu_v1.c +++ b/drivers/iommu/mtk_iommu_v1.c @@ -514,12 +514,6 @@ static int mtk_iommu_hw_init(const struct mtk_iommu_data *data) static const struct iommu_ops mtk_iommu_ops = { .domain_alloc = mtk_iommu_domain_alloc, - .domain_free = mtk_iommu_domain_free, - .attach_dev = mtk_iommu_attach_device, - .detach_dev = mtk_iommu_detach_device, - .map = mtk_iommu_map, - .unmap = mtk_iommu_unmap, - .iova_to_phys = mtk_iommu_iova_to_phys, .probe_device = mtk_iommu_probe_device, .probe_finalize = mtk_iommu_probe_finalize, .release_device = mtk_iommu_release_device, @@ -527,6 +521,14 @@ static const struct iommu_ops mtk_iommu_ops = { .device_group = generic_device_group, .pgsize_bitmap = ~0UL << MT2701_IOMMU_PAGE_SHIFT, .owner = THIS_MODULE, + .default_domain_ops = &(const struct iommu_domain_ops) { + .attach_dev = mtk_iommu_attach_device, + .detach_dev = mtk_iommu_detach_device, + .map = mtk_iommu_map, + .unmap = mtk_iommu_unmap, + .iova_to_phys = mtk_iommu_iova_to_phys, + .free = mtk_iommu_domain_free, + } }; static const struct of_device_id mtk_iommu_of_ids[] = { diff --git a/drivers/iommu/omap-iommu.c b/drivers/iommu/omap-iommu.c index 980e4af3f06b..4aab631ef517 100644 --- a/drivers/iommu/omap-iommu.c +++ b/drivers/iommu/omap-iommu.c @@ -1734,16 +1734,18 @@ static struct iommu_group *omap_iommu_device_group(struct device *dev) static const struct iommu_ops omap_iommu_ops = { .domain_alloc = omap_iommu_domain_alloc, - .domain_free = omap_iommu_domain_free, - .attach_dev = omap_iommu_attach_dev, - .detach_dev = omap_iommu_detach_dev, - .map = omap_iommu_map, - .unmap = omap_iommu_unmap, - .iova_to_phys = omap_iommu_iova_to_phys, .probe_device = omap_iommu_probe_device, .release_device = omap_iommu_release_device, .device_group = omap_iommu_device_group, .pgsize_bitmap = OMAP_IOMMU_PGSIZES, + .default_domain_ops = &(const struct iommu_domain_ops) { + .attach_dev = omap_iommu_attach_dev, + .detach_dev = omap_iommu_detach_dev, + .map = omap_iommu_map, + .unmap = omap_iommu_unmap, + .iova_to_phys = omap_iommu_iova_to_phys, + .free = omap_iommu_domain_free, + } }; static int __init omap_iommu_init(void) diff --git a/drivers/iommu/rockchip-iommu.c b/drivers/iommu/rockchip-iommu.c index 7f23ad61c094..2ae97a84d41b 100644 --- a/drivers/iommu/rockchip-iommu.c +++ b/drivers/iommu/rockchip-iommu.c @@ -1187,17 +1187,19 @@ static int rk_iommu_of_xlate(struct device *dev, static const struct iommu_ops rk_iommu_ops = { .domain_alloc = rk_iommu_domain_alloc, - .domain_free = rk_iommu_domain_free, - .attach_dev = rk_iommu_attach_device, - .detach_dev = rk_iommu_detach_device, - .map = rk_iommu_map, - .unmap = rk_iommu_unmap, .probe_device = rk_iommu_probe_device, .release_device = rk_iommu_release_device, - .iova_to_phys = rk_iommu_iova_to_phys, .device_group = rk_iommu_device_group, .pgsize_bitmap = RK_IOMMU_PGSIZE_BITMAP, .of_xlate = rk_iommu_of_xlate, + .default_domain_ops = &(const struct iommu_domain_ops) { + .attach_dev = rk_iommu_attach_device, + .detach_dev = rk_iommu_detach_device, + .map = rk_iommu_map, + .unmap = rk_iommu_unmap, + .iova_to_phys = rk_iommu_iova_to_phys, + .free = rk_iommu_domain_free, + } }; static int rk_iommu_probe(struct platform_device *pdev) diff --git a/drivers/iommu/s390-iommu.c b/drivers/iommu/s390-iommu.c index 50860ebdd087..3833e86c6e7b 100644 --- a/drivers/iommu/s390-iommu.c +++ b/drivers/iommu/s390-iommu.c @@ -363,16 +363,18 @@ void zpci_destroy_iommu(struct zpci_dev *zdev) static const struct iommu_ops s390_iommu_ops = { .capable = s390_iommu_capable, .domain_alloc = s390_domain_alloc, - .domain_free = s390_domain_free, - .attach_dev = s390_iommu_attach_device, - .detach_dev = s390_iommu_detach_device, - .map = s390_iommu_map, - .unmap = s390_iommu_unmap, - .iova_to_phys = s390_iommu_iova_to_phys, .probe_device = s390_iommu_probe_device, .release_device = s390_iommu_release_device, .device_group = generic_device_group, .pgsize_bitmap = S390_IOMMU_PGSIZES, + .default_domain_ops = &(const struct iommu_domain_ops) { + .attach_dev = s390_iommu_attach_device, + .detach_dev = s390_iommu_detach_device, + .map = s390_iommu_map, + .unmap = s390_iommu_unmap, + .iova_to_phys = s390_iommu_iova_to_phys, + .free = s390_domain_free, + } }; static int __init s390_iommu_init(void) diff --git a/drivers/iommu/sprd-iommu.c b/drivers/iommu/sprd-iommu.c index 27ac818b0354..bd409bab6286 100644 --- a/drivers/iommu/sprd-iommu.c +++ b/drivers/iommu/sprd-iommu.c @@ -416,20 +416,22 @@ static int sprd_iommu_of_xlate(struct device *dev, struct of_phandle_args *args) static const struct iommu_ops sprd_iommu_ops = { .domain_alloc = sprd_iommu_domain_alloc, - .domain_free = sprd_iommu_domain_free, - .attach_dev = sprd_iommu_attach_device, - .detach_dev = sprd_iommu_detach_device, - .map = sprd_iommu_map, - .unmap = sprd_iommu_unmap, - .iotlb_sync_map = sprd_iommu_sync_map, - .iotlb_sync = sprd_iommu_sync, - .iova_to_phys = sprd_iommu_iova_to_phys, .probe_device = sprd_iommu_probe_device, .release_device = sprd_iommu_release_device, .device_group = sprd_iommu_device_group, .of_xlate = sprd_iommu_of_xlate, .pgsize_bitmap = ~0UL << SPRD_IOMMU_PAGE_SHIFT, .owner = THIS_MODULE, + .default_domain_ops = &(const struct iommu_domain_ops) { + .attach_dev = sprd_iommu_attach_device, + .detach_dev = sprd_iommu_detach_device, + .map = sprd_iommu_map, + .unmap = sprd_iommu_unmap, + .iotlb_sync_map = sprd_iommu_sync_map, + .iotlb_sync = sprd_iommu_sync, + .iova_to_phys = sprd_iommu_iova_to_phys, + .free = sprd_iommu_domain_free, + } }; static const struct of_device_id sprd_iommu_of_match[] = { diff --git a/drivers/iommu/sun50i-iommu.c b/drivers/iommu/sun50i-iommu.c index 92997021e188..c54ab477b8fd 100644 --- a/drivers/iommu/sun50i-iommu.c +++ b/drivers/iommu/sun50i-iommu.c @@ -760,19 +760,21 @@ static int sun50i_iommu_of_xlate(struct device *dev, static const struct iommu_ops sun50i_iommu_ops = { .pgsize_bitmap = SZ_4K, - .attach_dev = sun50i_iommu_attach_device, - .detach_dev = sun50i_iommu_detach_device, .device_group = sun50i_iommu_device_group, .domain_alloc = sun50i_iommu_domain_alloc, - .domain_free = sun50i_iommu_domain_free, - .flush_iotlb_all = sun50i_iommu_flush_iotlb_all, - .iotlb_sync = sun50i_iommu_iotlb_sync, - .iova_to_phys = sun50i_iommu_iova_to_phys, - .map = sun50i_iommu_map, .of_xlate = sun50i_iommu_of_xlate, .probe_device = sun50i_iommu_probe_device, .release_device = sun50i_iommu_release_device, - .unmap = sun50i_iommu_unmap, + .default_domain_ops = &(const struct iommu_domain_ops) { + .attach_dev = sun50i_iommu_attach_device, + .detach_dev = sun50i_iommu_detach_device, + .flush_iotlb_all = sun50i_iommu_flush_iotlb_all, + .iotlb_sync = sun50i_iommu_iotlb_sync, + .iova_to_phys = sun50i_iommu_iova_to_phys, + .map = sun50i_iommu_map, + .unmap = sun50i_iommu_unmap, + .free = sun50i_iommu_domain_free, + } }; static void sun50i_iommu_report_fault(struct sun50i_iommu *iommu, diff --git a/drivers/iommu/tegra-gart.c b/drivers/iommu/tegra-gart.c index 6a358f92c7e5..a6700a40a6f8 100644 --- a/drivers/iommu/tegra-gart.c +++ b/drivers/iommu/tegra-gart.c @@ -238,11 +238,6 @@ static phys_addr_t gart_iommu_iova_to_phys(struct iommu_domain *domain, return pte & GART_PAGE_MASK; } -static bool gart_iommu_capable(enum iommu_cap cap) -{ - return false; -} - static struct iommu_device *gart_iommu_probe_device(struct device *dev) { if (!dev_iommu_fwspec_get(dev)) @@ -276,21 +271,22 @@ static void gart_iommu_sync(struct iommu_domain *domain, } static const struct iommu_ops gart_iommu_ops = { - .capable = gart_iommu_capable, .domain_alloc = gart_iommu_domain_alloc, - .domain_free = gart_iommu_domain_free, - .attach_dev = gart_iommu_attach_dev, - .detach_dev = gart_iommu_detach_dev, .probe_device = gart_iommu_probe_device, .release_device = gart_iommu_release_device, .device_group = generic_device_group, - .map = gart_iommu_map, - .unmap = gart_iommu_unmap, - .iova_to_phys = gart_iommu_iova_to_phys, .pgsize_bitmap = GART_IOMMU_PGSIZES, .of_xlate = gart_iommu_of_xlate, - .iotlb_sync_map = gart_iommu_sync_map, - .iotlb_sync = gart_iommu_sync, + .default_domain_ops = &(const struct iommu_domain_ops) { + .attach_dev = gart_iommu_attach_dev, + .detach_dev = gart_iommu_detach_dev, + .map = gart_iommu_map, + .unmap = gart_iommu_unmap, + .iova_to_phys = gart_iommu_iova_to_phys, + .iotlb_sync_map = gart_iommu_sync_map, + .iotlb_sync = gart_iommu_sync, + .free = gart_iommu_domain_free, + } }; int tegra_gart_suspend(struct gart_device *gart) diff --git a/drivers/iommu/tegra-smmu.c b/drivers/iommu/tegra-smmu.c index e900e3c46903..44c3716e16ee 100644 --- a/drivers/iommu/tegra-smmu.c +++ b/drivers/iommu/tegra-smmu.c @@ -272,11 +272,6 @@ static void tegra_smmu_free_asid(struct tegra_smmu *smmu, unsigned int id) clear_bit(id, smmu->asids); } -static bool tegra_smmu_capable(enum iommu_cap cap) -{ - return false; -} - static struct iommu_domain *tegra_smmu_domain_alloc(unsigned type) { struct tegra_smmu_as *as; @@ -967,19 +962,20 @@ static int tegra_smmu_of_xlate(struct device *dev, } static const struct iommu_ops tegra_smmu_ops = { - .capable = tegra_smmu_capable, .domain_alloc = tegra_smmu_domain_alloc, - .domain_free = tegra_smmu_domain_free, - .attach_dev = tegra_smmu_attach_dev, - .detach_dev = tegra_smmu_detach_dev, .probe_device = tegra_smmu_probe_device, .release_device = tegra_smmu_release_device, .device_group = tegra_smmu_device_group, - .map = tegra_smmu_map, - .unmap = tegra_smmu_unmap, - .iova_to_phys = tegra_smmu_iova_to_phys, .of_xlate = tegra_smmu_of_xlate, .pgsize_bitmap = SZ_4K, + .default_domain_ops = &(const struct iommu_domain_ops) { + .attach_dev = tegra_smmu_attach_dev, + .detach_dev = tegra_smmu_detach_dev, + .map = tegra_smmu_map, + .unmap = tegra_smmu_unmap, + .iova_to_phys = tegra_smmu_iova_to_phys, + .free = tegra_smmu_domain_free, + } }; static void tegra_smmu_ahb_enable(void) diff --git a/drivers/iommu/virtio-iommu.c b/drivers/iommu/virtio-iommu.c index f2aa34f57454..25be4b822aa0 100644 --- a/drivers/iommu/virtio-iommu.c +++ b/drivers/iommu/virtio-iommu.c @@ -1008,12 +1008,6 @@ static int viommu_of_xlate(struct device *dev, struct of_phandle_args *args) static struct iommu_ops viommu_ops = { .domain_alloc = viommu_domain_alloc, - .domain_free = viommu_domain_free, - .attach_dev = viommu_attach_dev, - .map = viommu_map, - .unmap = viommu_unmap, - .iova_to_phys = viommu_iova_to_phys, - .iotlb_sync = viommu_iotlb_sync, .probe_device = viommu_probe_device, .probe_finalize = viommu_probe_finalize, .release_device = viommu_release_device, @@ -1022,6 +1016,14 @@ static struct iommu_ops viommu_ops = { .put_resv_regions = generic_iommu_put_resv_regions, .of_xlate = viommu_of_xlate, .owner = THIS_MODULE, + .default_domain_ops = &(const struct iommu_domain_ops) { + .attach_dev = viommu_attach_dev, + .map = viommu_map, + .unmap = viommu_unmap, + .iova_to_phys = viommu_iova_to_phys, + .iotlb_sync = viommu_iotlb_sync, + .free = viommu_domain_free, + } }; static int viommu_init_vqs(struct viommu_dev *viommu) |