diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2020-04-08 11:00:00 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2020-04-08 11:00:00 -0700 |
commit | 0906d8b975ff713cfb55328e4f3bf6de5967415e (patch) | |
tree | 213d2262bcd109a93c7ff25009af8b8945c90f52 | |
parent | 0339eb95403fb4664219be344a9399a3fdf1fae1 (diff) | |
parent | ff68eb23308e6538ec7864c83d39540f423bbe90 (diff) | |
download | linux-0906d8b975ff713cfb55328e4f3bf6de5967415e.tar.bz2 |
Merge tag 'iommu-updates-v5.7' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu
Pull iommu updates from Joerg Roedel:
- ARM-SMMU support for the TLB range invalidation command in SMMUv3.2
- ARM-SMMU introduction of command batching helpers to batch up CD and
ATC invalidation
- ARM-SMMU support for PCI PASID, along with necessary PCI symbol
exports
- Introduce a generic (actually rename an existing) IOMMU related
pointer in struct device and reduce the IOMMU related pointers
- Some fixes for the OMAP IOMMU driver to make it build on 64bit
architectures
- Various smaller fixes and improvements
* tag 'iommu-updates-v5.7' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu: (39 commits)
iommu: Move fwspec->iommu_priv to struct dev_iommu
iommu/virtio: Use accessor functions for iommu private data
iommu/qcom: Use accessor functions for iommu private data
iommu/mediatek: Use accessor functions for iommu private data
iommu/renesas: Use accessor functions for iommu private data
iommu/arm-smmu: Use accessor functions for iommu private data
iommu/arm-smmu: Refactor master_cfg/fwspec usage
iommu/arm-smmu-v3: Use accessor functions for iommu private data
iommu: Introduce accessors for iommu private data
iommu/arm-smmu: Fix uninitilized variable warning
iommu: Move iommu_fwspec to struct dev_iommu
iommu: Rename struct iommu_param to dev_iommu
iommu/tegra-gart: Remove direct access of dev->iommu_fwspec
drm/msm/mdp5: Remove direct access of dev->iommu_fwspec
ACPI/IORT: Remove direct access of dev->iommu_fwspec
iommu: Define dev_iommu_fwspec_get() for !CONFIG_IOMMU_API
iommu/virtio: Reject IOMMU page granule larger than PAGE_SIZE
iommu/virtio: Fix freeing of incomplete domains
iommu/virtio: Fix sparse warning
iommu/vt-d: Add build dependency on IOASID
...
-rw-r--r-- | MAINTAINERS | 1 | ||||
-rw-r--r-- | drivers/acpi/arm64/iort.c | 6 | ||||
-rw-r--r-- | drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c | 2 | ||||
-rw-r--r-- | drivers/iommu/Kconfig | 21 | ||||
-rw-r--r-- | drivers/iommu/amd_iommu_types.h | 2 | ||||
-rw-r--r-- | drivers/iommu/arm-smmu-v3.c | 214 | ||||
-rw-r--r-- | drivers/iommu/arm-smmu.c | 55 | ||||
-rw-r--r-- | drivers/iommu/intel-iommu.c | 3 | ||||
-rw-r--r-- | drivers/iommu/intel-svm.c | 9 | ||||
-rw-r--r-- | drivers/iommu/iommu.c | 46 | ||||
-rw-r--r-- | drivers/iommu/ipmmu-vmsa.c | 7 | ||||
-rw-r--r-- | drivers/iommu/mtk_iommu.c | 13 | ||||
-rw-r--r-- | drivers/iommu/mtk_iommu_v1.c | 14 | ||||
-rw-r--r-- | drivers/iommu/omap-iommu.c | 10 | ||||
-rw-r--r-- | drivers/iommu/omap-iopgtable.h | 3 | ||||
-rw-r--r-- | drivers/iommu/qcom_iommu.c | 63 | ||||
-rw-r--r-- | drivers/iommu/tegra-gart.c | 2 | ||||
-rw-r--r-- | drivers/iommu/virtio-iommu.c | 42 | ||||
-rw-r--r-- | drivers/pci/ats.c | 4 | ||||
-rw-r--r-- | include/linux/device.h | 9 | ||||
-rw-r--r-- | include/linux/iommu.h | 35 | ||||
-rw-r--r-- | include/uapi/linux/virtio_iommu.h | 12 |
22 files changed, 386 insertions, 187 deletions
diff --git a/MAINTAINERS b/MAINTAINERS index 2a266746f955..5058ce605a04 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -1444,6 +1444,7 @@ M: Will Deacon <will@kernel.org> R: Robin Murphy <robin.murphy@arm.com> L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained +F: Documentation/devicetree/bindings/iommu/arm,smmu* F: drivers/iommu/arm-smmu* F: drivers/iommu/io-pgtable-arm.c F: drivers/iommu/io-pgtable-arm-v7s.c diff --git a/drivers/acpi/arm64/iort.c b/drivers/acpi/arm64/iort.c index ed3d2d1a7ae9..7d04424189df 100644 --- a/drivers/acpi/arm64/iort.c +++ b/drivers/acpi/arm64/iort.c @@ -1015,6 +1015,7 @@ const struct iommu_ops *iort_iommu_configure(struct device *dev) return ops; if (dev_is_pci(dev)) { + struct iommu_fwspec *fwspec; struct pci_bus *bus = to_pci_dev(dev)->bus; struct iort_pci_alias_info info = { .dev = dev }; @@ -1027,8 +1028,9 @@ const struct iommu_ops *iort_iommu_configure(struct device *dev) err = pci_for_each_dma_alias(to_pci_dev(dev), iort_pci_iommu_init, &info); - if (!err && iort_pci_rc_supports_ats(node)) - dev->iommu_fwspec->flags |= IOMMU_FWSPEC_PCI_RC_ATS; + fwspec = dev_iommu_fwspec_get(dev); + if (fwspec && iort_pci_rc_supports_ats(node)) + fwspec->flags |= IOMMU_FWSPEC_PCI_RC_ATS; } else { int i = 0; diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c index 6650f478b226..47b989834af1 100644 --- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c +++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c @@ -633,7 +633,7 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev) if (config->platform.iommu) { iommu_dev = &pdev->dev; - if (!iommu_dev->iommu_fwspec) + if (!dev_iommu_fwspec_get(iommu_dev)) iommu_dev = iommu_dev->parent; aspace = msm_gem_address_space_create(iommu_dev, diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig index d2fade984999..58b4a4dbfc78 100644 --- a/drivers/iommu/Kconfig +++ b/drivers/iommu/Kconfig @@ -188,6 +188,7 @@ config INTEL_IOMMU select NEED_DMA_MAP_STATE select DMAR_TABLE select SWIOTLB + select IOASID help DMA remapping (DMAR) devices support enables independent address translations for Direct Memory Access (DMA) from devices. @@ -273,7 +274,7 @@ config IRQ_REMAP # OMAP IOMMU support config OMAP_IOMMU bool "OMAP IOMMU Support" - depends on ARM && MMU + depends on ARM && MMU || (COMPILE_TEST && (ARM || ARM64 || IA64 || SPARC)) depends on ARCH_OMAP2PLUS || COMPILE_TEST select IOMMU_API ---help--- @@ -291,7 +292,7 @@ config OMAP_IOMMU_DEBUG config ROCKCHIP_IOMMU bool "Rockchip IOMMU Support" - depends on ARM || ARM64 + depends on ARM || ARM64 || (COMPILE_TEST && (ARM64 || IA64 || SPARC)) depends on ARCH_ROCKCHIP || COMPILE_TEST select IOMMU_API select ARM_DMA_USE_IOMMU @@ -325,7 +326,7 @@ config TEGRA_IOMMU_SMMU config EXYNOS_IOMMU bool "Exynos IOMMU Support" - depends on ARCH_EXYNOS && MMU + depends on ARCH_EXYNOS && MMU || (COMPILE_TEST && (ARM || ARM64 || IA64 || SPARC)) depends on !CPU_BIG_ENDIAN # revisit driver if we can enable big-endian ptes select IOMMU_API select ARM_DMA_USE_IOMMU @@ -361,7 +362,7 @@ config IPMMU_VMSA config SPAPR_TCE_IOMMU bool "sPAPR TCE IOMMU Support" - depends on PPC_POWERNV || PPC_PSERIES + depends on PPC_POWERNV || PPC_PSERIES || (PPC && COMPILE_TEST) select IOMMU_API help Enables bits of IOMMU API required by VFIO. The iommu_ops @@ -370,7 +371,7 @@ config SPAPR_TCE_IOMMU # ARM IOMMU support config ARM_SMMU tristate "ARM Ltd. System MMU (SMMU) Support" - depends on (ARM64 || ARM) && MMU + depends on (ARM64 || ARM || (COMPILE_TEST && !GENERIC_ATOMIC64)) && MMU select IOMMU_API select IOMMU_IO_PGTABLE_LPAE select ARM_DMA_USE_IOMMU if ARM @@ -440,7 +441,7 @@ config S390_IOMMU config S390_CCW_IOMMU bool "S390 CCW IOMMU Support" - depends on S390 && CCW + depends on S390 && CCW || COMPILE_TEST select IOMMU_API help Enables bits of IOMMU API required by VFIO. The iommu_ops @@ -448,7 +449,7 @@ config S390_CCW_IOMMU config S390_AP_IOMMU bool "S390 AP IOMMU Support" - depends on S390 && ZCRYPT + depends on S390 && ZCRYPT || COMPILE_TEST select IOMMU_API help Enables bits of IOMMU API required by VFIO. The iommu_ops @@ -456,7 +457,7 @@ config S390_AP_IOMMU config MTK_IOMMU bool "MTK IOMMU Support" - depends on ARM || ARM64 + depends on ARM || ARM64 || COMPILE_TEST depends on ARCH_MEDIATEK || COMPILE_TEST select ARM_DMA_USE_IOMMU select IOMMU_API @@ -506,8 +507,8 @@ config HYPERV_IOMMU guests to run with x2APIC mode enabled. config VIRTIO_IOMMU - bool "Virtio IOMMU driver" - depends on VIRTIO=y + tristate "Virtio IOMMU driver" + depends on VIRTIO depends on ARM64 select IOMMU_API select INTERVAL_TREE diff --git a/drivers/iommu/amd_iommu_types.h b/drivers/iommu/amd_iommu_types.h index f8d01d6b00da..ca8c4522045b 100644 --- a/drivers/iommu/amd_iommu_types.h +++ b/drivers/iommu/amd_iommu_types.h @@ -348,7 +348,7 @@ #define DTE_GCR3_VAL_A(x) (((x) >> 12) & 0x00007ULL) #define DTE_GCR3_VAL_B(x) (((x) >> 15) & 0x0ffffULL) -#define DTE_GCR3_VAL_C(x) (((x) >> 31) & 0xfffffULL) +#define DTE_GCR3_VAL_C(x) (((x) >> 31) & 0x1fffffULL) #define DTE_GCR3_INDEX_A 0 #define DTE_GCR3_INDEX_B 1 diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c index aa3ac2a03807..82508730feb7 100644 --- a/drivers/iommu/arm-smmu-v3.c +++ b/drivers/iommu/arm-smmu-v3.c @@ -69,6 +69,9 @@ #define IDR1_SSIDSIZE GENMASK(10, 6) #define IDR1_SIDSIZE GENMASK(5, 0) +#define ARM_SMMU_IDR3 0xc +#define IDR3_RIL (1 << 10) + #define ARM_SMMU_IDR5 0x14 #define IDR5_STALL_MAX GENMASK(31, 16) #define IDR5_GRAN64K (1 << 6) @@ -346,9 +349,14 @@ #define CMDQ_CFGI_1_LEAF (1UL << 0) #define CMDQ_CFGI_1_RANGE GENMASK_ULL(4, 0) +#define CMDQ_TLBI_0_NUM GENMASK_ULL(16, 12) +#define CMDQ_TLBI_RANGE_NUM_MAX 31 +#define CMDQ_TLBI_0_SCALE GENMASK_ULL(24, 20) #define CMDQ_TLBI_0_VMID GENMASK_ULL(47, 32) #define CMDQ_TLBI_0_ASID GENMASK_ULL(63, 48) #define CMDQ_TLBI_1_LEAF (1UL << 0) +#define CMDQ_TLBI_1_TTL GENMASK_ULL(9, 8) +#define CMDQ_TLBI_1_TG GENMASK_ULL(11, 10) #define CMDQ_TLBI_1_VA_MASK GENMASK_ULL(63, 12) #define CMDQ_TLBI_1_IPA_MASK GENMASK_ULL(51, 12) @@ -473,9 +481,13 @@ struct arm_smmu_cmdq_ent { #define CMDQ_OP_TLBI_S2_IPA 0x2a #define CMDQ_OP_TLBI_NSNH_ALL 0x30 struct { + u8 num; + u8 scale; u16 asid; u16 vmid; bool leaf; + u8 ttl; + u8 tg; u64 addr; } tlbi; @@ -548,6 +560,11 @@ struct arm_smmu_cmdq { atomic_t lock; }; +struct arm_smmu_cmdq_batch { + u64 cmds[CMDQ_BATCH_ENTRIES * CMDQ_ENT_DWORDS]; + int num; +}; + struct arm_smmu_evtq { struct arm_smmu_queue q; u32 max_stalls; @@ -627,6 +644,7 @@ struct arm_smmu_device { #define ARM_SMMU_FEAT_HYP (1 << 12) #define ARM_SMMU_FEAT_STALL_FORCE (1 << 13) #define ARM_SMMU_FEAT_VAX (1 << 14) +#define ARM_SMMU_FEAT_RANGE_INV (1 << 15) u32 features; #define ARM_SMMU_OPT_SKIP_PREFETCH (1 << 0) @@ -895,14 +913,22 @@ static int arm_smmu_cmdq_build_cmd(u64 *cmd, struct arm_smmu_cmdq_ent *ent) cmd[1] |= FIELD_PREP(CMDQ_CFGI_1_RANGE, 31); break; case CMDQ_OP_TLBI_NH_VA: + cmd[0] |= FIELD_PREP(CMDQ_TLBI_0_NUM, ent->tlbi.num); + cmd[0] |= FIELD_PREP(CMDQ_TLBI_0_SCALE, ent->tlbi.scale); cmd[0] |= FIELD_PREP(CMDQ_TLBI_0_VMID, ent->tlbi.vmid); cmd[0] |= FIELD_PREP(CMDQ_TLBI_0_ASID, ent->tlbi.asid); cmd[1] |= FIELD_PREP(CMDQ_TLBI_1_LEAF, ent->tlbi.leaf); + cmd[1] |= FIELD_PREP(CMDQ_TLBI_1_TTL, ent->tlbi.ttl); + cmd[1] |= FIELD_PREP(CMDQ_TLBI_1_TG, ent->tlbi.tg); cmd[1] |= ent->tlbi.addr & CMDQ_TLBI_1_VA_MASK; break; case CMDQ_OP_TLBI_S2_IPA: + cmd[0] |= FIELD_PREP(CMDQ_TLBI_0_NUM, ent->tlbi.num); + cmd[0] |= FIELD_PREP(CMDQ_TLBI_0_SCALE, ent->tlbi.scale); cmd[0] |= FIELD_PREP(CMDQ_TLBI_0_VMID, ent->tlbi.vmid); cmd[1] |= FIELD_PREP(CMDQ_TLBI_1_LEAF, ent->tlbi.leaf); + cmd[1] |= FIELD_PREP(CMDQ_TLBI_1_TTL, ent->tlbi.ttl); + cmd[1] |= FIELD_PREP(CMDQ_TLBI_1_TG, ent->tlbi.tg); cmd[1] |= ent->tlbi.addr & CMDQ_TLBI_1_IPA_MASK; break; case CMDQ_OP_TLBI_NH_ASID: @@ -1482,6 +1508,24 @@ static int arm_smmu_cmdq_issue_sync(struct arm_smmu_device *smmu) return arm_smmu_cmdq_issue_cmdlist(smmu, NULL, 0, true); } +static void arm_smmu_cmdq_batch_add(struct arm_smmu_device *smmu, + struct arm_smmu_cmdq_batch *cmds, + struct arm_smmu_cmdq_ent *cmd) +{ + if (cmds->num == CMDQ_BATCH_ENTRIES) { + arm_smmu_cmdq_issue_cmdlist(smmu, cmds->cmds, cmds->num, false); + cmds->num = 0; + } + arm_smmu_cmdq_build_cmd(&cmds->cmds[cmds->num * CMDQ_ENT_DWORDS], cmd); + cmds->num++; +} + +static int arm_smmu_cmdq_batch_submit(struct arm_smmu_device *smmu, + struct arm_smmu_cmdq_batch *cmds) +{ + return arm_smmu_cmdq_issue_cmdlist(smmu, cmds->cmds, cmds->num, true); +} + /* Context descriptor manipulation functions */ static void arm_smmu_sync_cd(struct arm_smmu_domain *smmu_domain, int ssid, bool leaf) @@ -1489,6 +1533,7 @@ static void arm_smmu_sync_cd(struct arm_smmu_domain *smmu_domain, size_t i; unsigned long flags; struct arm_smmu_master *master; + struct arm_smmu_cmdq_batch cmds = {}; struct arm_smmu_device *smmu = smmu_domain->smmu; struct arm_smmu_cmdq_ent cmd = { .opcode = CMDQ_OP_CFGI_CD, @@ -1502,12 +1547,12 @@ static void arm_smmu_sync_cd(struct arm_smmu_domain *smmu_domain, list_for_each_entry(master, &smmu_domain->devices, domain_head) { for (i = 0; i < master->num_sids; i++) { cmd.cfgi.sid = master->sids[i]; - arm_smmu_cmdq_issue_cmd(smmu, &cmd); + arm_smmu_cmdq_batch_add(smmu, &cmds, &cmd); } } spin_unlock_irqrestore(&smmu_domain->devices_lock, flags); - arm_smmu_cmdq_issue_sync(smmu); + arm_smmu_cmdq_batch_submit(smmu, &cmds); } static int arm_smmu_alloc_cd_leaf_table(struct arm_smmu_device *smmu, @@ -1531,6 +1576,7 @@ static void arm_smmu_write_cd_l1_desc(__le64 *dst, u64 val = (l1_desc->l2ptr_dma & CTXDESC_L1_DESC_L2PTR_MASK) | CTXDESC_L1_DESC_V; + /* See comment in arm_smmu_write_ctx_desc() */ WRITE_ONCE(*dst, cpu_to_le64(val)); } @@ -1726,7 +1772,8 @@ arm_smmu_write_strtab_l1_desc(__le64 *dst, struct arm_smmu_strtab_l1_desc *desc) val |= FIELD_PREP(STRTAB_L1_DESC_SPAN, desc->span); val |= desc->l2ptr_dma & STRTAB_L1_DESC_L2PTR_MASK; - *dst = cpu_to_le64(val); + /* See comment in arm_smmu_write_ctx_desc() */ + WRITE_ONCE(*dst, cpu_to_le64(val)); } static void arm_smmu_sync_ste_for_sid(struct arm_smmu_device *smmu, u32 sid) @@ -2132,17 +2179,16 @@ arm_smmu_atc_inv_to_cmd(int ssid, unsigned long iova, size_t size, cmd->atc.size = log2_span; } -static int arm_smmu_atc_inv_master(struct arm_smmu_master *master, - struct arm_smmu_cmdq_ent *cmd) +static int arm_smmu_atc_inv_master(struct arm_smmu_master *master) { int i; + struct arm_smmu_cmdq_ent cmd; - if (!master->ats_enabled) - return 0; + arm_smmu_atc_inv_to_cmd(0, 0, 0, &cmd); for (i = 0; i < master->num_sids; i++) { - cmd->atc.sid = master->sids[i]; - arm_smmu_cmdq_issue_cmd(master->smmu, cmd); + cmd.atc.sid = master->sids[i]; + arm_smmu_cmdq_issue_cmd(master->smmu, &cmd); } return arm_smmu_cmdq_issue_sync(master->smmu); @@ -2151,10 +2197,11 @@ static int arm_smmu_atc_inv_master(struct arm_smmu_master *master, static int arm_smmu_atc_inv_domain(struct arm_smmu_domain *smmu_domain, int ssid, unsigned long iova, size_t size) { - int ret = 0; + int i; unsigned long flags; struct arm_smmu_cmdq_ent cmd; struct arm_smmu_master *master; + struct arm_smmu_cmdq_batch cmds = {}; if (!(smmu_domain->smmu->features & ARM_SMMU_FEAT_ATS)) return 0; @@ -2179,11 +2226,18 @@ static int arm_smmu_atc_inv_domain(struct arm_smmu_domain *smmu_domain, arm_smmu_atc_inv_to_cmd(ssid, iova, size, &cmd); spin_lock_irqsave(&smmu_domain->devices_lock, flags); - list_for_each_entry(master, &smmu_domain->devices, domain_head) - ret |= arm_smmu_atc_inv_master(master, &cmd); + list_for_each_entry(master, &smmu_domain->devices, domain_head) { + if (!master->ats_enabled) + continue; + + for (i = 0; i < master->num_sids; i++) { + cmd.atc.sid = master->sids[i]; + arm_smmu_cmdq_batch_add(smmu_domain->smmu, &cmds, &cmd); + } + } spin_unlock_irqrestore(&smmu_domain->devices_lock, flags); - return ret ? -ETIMEDOUT : 0; + return arm_smmu_cmdq_batch_submit(smmu_domain->smmu, &cmds); } /* IO_PGTABLE API */ @@ -2218,10 +2272,10 @@ static void arm_smmu_tlb_inv_range(unsigned long iova, size_t size, size_t granule, bool leaf, struct arm_smmu_domain *smmu_domain) { - u64 cmds[CMDQ_BATCH_ENTRIES * CMDQ_ENT_DWORDS]; struct arm_smmu_device *smmu = smmu_domain->smmu; - unsigned long start = iova, end = iova + size; - int i = 0; + unsigned long start = iova, end = iova + size, num_pages = 0, tg = 0; + size_t inv_range = granule; + struct arm_smmu_cmdq_batch cmds = {}; struct arm_smmu_cmdq_ent cmd = { .tlbi = { .leaf = leaf, @@ -2239,19 +2293,50 @@ static void arm_smmu_tlb_inv_range(unsigned long iova, size_t size, cmd.tlbi.vmid = smmu_domain->s2_cfg.vmid; } + if (smmu->features & ARM_SMMU_FEAT_RANGE_INV) { + /* Get the leaf page size */ + tg = __ffs(smmu_domain->domain.pgsize_bitmap); + + /* Convert page size of 12,14,16 (log2) to 1,2,3 */ + cmd.tlbi.tg = (tg - 10) / 2; + + /* Determine what level the granule is at */ + cmd.tlbi.ttl = 4 - ((ilog2(granule) - 3) / (tg - 3)); + + num_pages = size >> tg; + } + while (iova < end) { - if (i == CMDQ_BATCH_ENTRIES) { - arm_smmu_cmdq_issue_cmdlist(smmu, cmds, i, false); - i = 0; + if (smmu->features & ARM_SMMU_FEAT_RANGE_INV) { + /* + * On each iteration of the loop, the range is 5 bits + * worth of the aligned size remaining. + * The range in pages is: + * + * range = (num_pages & (0x1f << __ffs(num_pages))) + */ + unsigned long scale, num; + + /* Determine the power of 2 multiple number of pages */ + scale = __ffs(num_pages); + cmd.tlbi.scale = scale; + + /* Determine how many chunks of 2^scale size we have */ + num = (num_pages >> scale) & CMDQ_TLBI_RANGE_NUM_MAX; + cmd.tlbi.num = num - 1; + + /* range is num * 2^scale * pgsize */ + inv_range = num << (scale + tg); + + /* Clear out the lower order bits for the next iteration */ + num_pages -= num << scale; } cmd.tlbi.addr = iova; - arm_smmu_cmdq_build_cmd(&cmds[i * CMDQ_ENT_DWORDS], &cmd); - iova += granule; - i++; + arm_smmu_cmdq_batch_add(smmu, &cmds, &cmd); + iova += inv_range; } - - arm_smmu_cmdq_issue_cmdlist(smmu, cmds, i, true); + arm_smmu_cmdq_batch_submit(smmu, &cmds); /* * Unfortunately, this can't be leaf-only since we may have @@ -2611,7 +2696,6 @@ static void arm_smmu_enable_ats(struct arm_smmu_master *master) static void arm_smmu_disable_ats(struct arm_smmu_master *master) { - struct arm_smmu_cmdq_ent cmd; struct arm_smmu_domain *smmu_domain = master->domain; if (!master->ats_enabled) @@ -2623,11 +2707,57 @@ static void arm_smmu_disable_ats(struct arm_smmu_master *master) * ATC invalidation via the SMMU. */ wmb(); - arm_smmu_atc_inv_to_cmd(0, 0, 0, &cmd); - arm_smmu_atc_inv_master(master, &cmd); + arm_smmu_atc_inv_master(master); atomic_dec(&smmu_domain->nr_ats_masters); } +static int arm_smmu_enable_pasid(struct arm_smmu_master *master) +{ + int ret; + int features; + int num_pasids; + struct pci_dev *pdev; + + if (!dev_is_pci(master->dev)) + return -ENODEV; + + pdev = to_pci_dev(master->dev); + + features = pci_pasid_features(pdev); + if (features < 0) + return features; + + num_pasids = pci_max_pasids(pdev); + if (num_pasids <= 0) + return num_pasids; + + ret = pci_enable_pasid(pdev, features); + if (ret) { + dev_err(&pdev->dev, "Failed to enable PASID\n"); + return ret; + } + + master->ssid_bits = min_t(u8, ilog2(num_pasids), + master->smmu->ssid_bits); + return 0; +} + +static void arm_smmu_disable_pasid(struct arm_smmu_master *master) +{ + struct pci_dev *pdev; + + if (!dev_is_pci(master->dev)) + return; + + pdev = to_pci_dev(master->dev); + + if (!pdev->pasid_enabled) + return; + + master->ssid_bits = 0; + pci_disable_pasid(pdev); +} + static void arm_smmu_detach_dev(struct arm_smmu_master *master) { unsigned long flags; @@ -2659,7 +2789,7 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev) if (!fwspec) return -ENOENT; - master = fwspec->iommu_priv; + master = dev_iommu_priv_get(dev); smmu = master->smmu; arm_smmu_detach_dev(master); @@ -2795,7 +2925,7 @@ static int arm_smmu_add_device(struct device *dev) if (!fwspec || fwspec->ops != &arm_smmu_ops) return -ENODEV; - if (WARN_ON_ONCE(fwspec->iommu_priv)) + if (WARN_ON_ONCE(dev_iommu_priv_get(dev))) return -EBUSY; smmu = arm_smmu_get_by_fwnode(fwspec->iommu_fwnode); @@ -2810,7 +2940,7 @@ static int arm_smmu_add_device(struct device *dev) master->smmu = smmu; master->sids = fwspec->ids; master->num_sids = fwspec->num_ids; - fwspec->iommu_priv = master; + dev_iommu_priv_set(dev, master); /* Check the SIDs are in range of the SMMU and our stream table */ for (i = 0; i < master->num_sids; i++) { @@ -2831,13 +2961,23 @@ static int arm_smmu_add_device(struct device *dev) master->ssid_bits = min(smmu->ssid_bits, fwspec->num_pasid_bits); + /* + * Note that PASID must be enabled before, and disabled after ATS: + * PCI Express Base 4.0r1.0 - 10.5.1.3 ATS Control Register + * + * Behavior is undefined if this bit is Set and the value of the PASID + * Enable, Execute Requested Enable, or Privileged Mode Requested bits + * are changed. + */ + arm_smmu_enable_pasid(master); + if (!(smmu->features & ARM_SMMU_FEAT_2_LVL_CDTAB)) master->ssid_bits = min_t(u8, master->ssid_bits, CTXDESC_LINEAR_CDMAX); ret = iommu_device_link(&smmu->iommu, dev); if (ret) - goto err_free_master; + goto err_disable_pasid; group = iommu_group_get_for_dev(dev); if (IS_ERR(group)) { @@ -2850,9 +2990,11 @@ static int arm_smmu_add_device(struct device *dev) err_unlink: iommu_device_unlink(&smmu->iommu, dev); +err_disable_pasid: + arm_smmu_disable_pasid(master); err_free_master: kfree(master); - fwspec->iommu_priv = NULL; + dev_iommu_priv_set(dev, NULL); return ret; } @@ -2865,11 +3007,12 @@ static void arm_smmu_remove_device(struct device *dev) if (!fwspec || fwspec->ops != &arm_smmu_ops) return; - master = fwspec->iommu_priv; + master = dev_iommu_priv_get(dev); smmu = master->smmu; arm_smmu_detach_dev(master); iommu_group_remove_device(dev); iommu_device_unlink(&smmu->iommu, dev); + arm_smmu_disable_pasid(master); kfree(master); iommu_fwspec_free(dev); } @@ -3700,6 +3843,11 @@ static int arm_smmu_device_hw_probe(struct arm_smmu_device *smmu) if (smmu->sid_bits <= STRTAB_SPLIT) smmu->features &= ~ARM_SMMU_FEAT_2_LVL_STRTAB; + /* IDR3 */ + reg = readl_relaxed(smmu->base + ARM_SMMU_IDR3); + if (FIELD_GET(IDR3_RIL, reg)) + smmu->features |= ARM_SMMU_FEAT_RANGE_INV; + /* IDR5 */ reg = readl_relaxed(smmu->base + ARM_SMMU_IDR5); diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c index 16c4b87af42b..a6a5796e9c41 100644 --- a/drivers/iommu/arm-smmu.c +++ b/drivers/iommu/arm-smmu.c @@ -98,12 +98,10 @@ struct arm_smmu_master_cfg { s16 smendx[]; }; #define INVALID_SMENDX -1 -#define __fwspec_cfg(fw) ((struct arm_smmu_master_cfg *)fw->iommu_priv) -#define fwspec_smmu(fw) (__fwspec_cfg(fw)->smmu) -#define fwspec_smendx(fw, i) \ - (i >= fw->num_ids ? INVALID_SMENDX : __fwspec_cfg(fw)->smendx[i]) -#define for_each_cfg_sme(fw, i, idx) \ - for (i = 0; idx = fwspec_smendx(fw, i), i < fw->num_ids; ++i) +#define cfg_smendx(cfg, fw, i) \ + (i >= fw->num_ids ? INVALID_SMENDX : cfg->smendx[i]) +#define for_each_cfg_sme(cfg, fw, i, idx) \ + for (i = 0; idx = cfg_smendx(cfg, fw, i), i < fw->num_ids; ++i) static bool using_legacy_binding, using_generic_binding; @@ -1061,7 +1059,7 @@ static bool arm_smmu_free_sme(struct arm_smmu_device *smmu, int idx) static int arm_smmu_master_alloc_smes(struct device *dev) { struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); - struct arm_smmu_master_cfg *cfg = fwspec->iommu_priv; + struct arm_smmu_master_cfg *cfg = dev_iommu_priv_get(dev); struct arm_smmu_device *smmu = cfg->smmu; struct arm_smmu_smr *smrs = smmu->smrs; struct iommu_group *group; @@ -1069,7 +1067,7 @@ static int arm_smmu_master_alloc_smes(struct device *dev) mutex_lock(&smmu->stream_map_mutex); /* Figure out a viable stream map entry allocation */ - for_each_cfg_sme(fwspec, i, idx) { + for_each_cfg_sme(cfg, fwspec, i, idx) { u16 sid = FIELD_GET(ARM_SMMU_SMR_ID, fwspec->ids[i]); u16 mask = FIELD_GET(ARM_SMMU_SMR_MASK, fwspec->ids[i]); @@ -1100,7 +1098,7 @@ static int arm_smmu_master_alloc_smes(struct device *dev) iommu_group_put(group); /* It worked! Now, poke the actual hardware */ - for_each_cfg_sme(fwspec, i, idx) { + for_each_cfg_sme(cfg, fwspec, i, idx) { arm_smmu_write_sme(smmu, idx); smmu->s2crs[idx].group = group; } @@ -1117,14 +1115,14 @@ out_err: return ret; } -static void arm_smmu_master_free_smes(struct iommu_fwspec *fwspec) +static void arm_smmu_master_free_smes(struct arm_smmu_master_cfg *cfg, + struct iommu_fwspec *fwspec) { - struct arm_smmu_device *smmu = fwspec_smmu(fwspec); - struct arm_smmu_master_cfg *cfg = fwspec->iommu_priv; + struct arm_smmu_device *smmu = cfg->smmu; int i, idx; mutex_lock(&smmu->stream_map_mutex); - for_each_cfg_sme(fwspec, i, idx) { + for_each_cfg_sme(cfg, fwspec, i, idx) { if (arm_smmu_free_sme(smmu, idx)) arm_smmu_write_sme(smmu, idx); cfg->smendx[i] = INVALID_SMENDX; @@ -1133,6 +1131,7 @@ static void arm_smmu_master_free_smes(struct iommu_fwspec *fwspec) } static int arm_smmu_domain_add_master(struct arm_smmu_domain *smmu_domain, + struct arm_smmu_master_cfg *cfg, struct iommu_fwspec *fwspec) { struct arm_smmu_device *smmu = smmu_domain->smmu; @@ -1146,7 +1145,7 @@ static int arm_smmu_domain_add_master(struct arm_smmu_domain *smmu_domain, else type = S2CR_TYPE_TRANS; - for_each_cfg_sme(fwspec, i, idx) { + for_each_cfg_sme(cfg, fwspec, i, idx) { if (type == s2cr[idx].type && cbndx == s2cr[idx].cbndx) continue; @@ -1160,10 +1159,11 @@ static int arm_smmu_domain_add_master(struct arm_smmu_domain *smmu_domain, static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev) { - int ret; + struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); + struct arm_smmu_master_cfg *cfg; struct arm_smmu_device *smmu; - struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); + int ret; if (!fwspec || fwspec->ops != &arm_smmu_ops) { dev_err(dev, "cannot attach to SMMU, is it on the same bus?\n"); @@ -1177,10 +1177,11 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev) * domains, just say no (but more politely than by dereferencing NULL). * This should be at least a WARN_ON once that's sorted. */ - if (!fwspec->iommu_priv) + cfg = dev_iommu_priv_get(dev); + if (!cfg) return -ENODEV; - smmu = fwspec_smmu(fwspec); + smmu = cfg->smmu; ret = arm_smmu_rpm_get(smmu); if (ret < 0) @@ -1204,7 +1205,7 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev) } /* Looks ok, so add the device to the domain */ - ret = arm_smmu_domain_add_master(smmu_domain, fwspec); + ret = arm_smmu_domain_add_master(smmu_domain, cfg, fwspec); /* * Setup an autosuspend delay to avoid bouncing runpm state. @@ -1383,7 +1384,7 @@ struct arm_smmu_device *arm_smmu_get_by_fwnode(struct fwnode_handle *fwnode) static int arm_smmu_add_device(struct device *dev) { - struct arm_smmu_device *smmu; + struct arm_smmu_device *smmu = NULL; struct arm_smmu_master_cfg *cfg; struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); int i, ret; @@ -1429,7 +1430,7 @@ static int arm_smmu_add_device(struct device *dev) goto out_free; cfg->smmu = smmu; - fwspec->iommu_priv = cfg; + dev_iommu_priv_set(dev, cfg); while (i--) cfg->smendx[i] = INVALID_SMENDX; @@ -1467,7 +1468,7 @@ static void arm_smmu_remove_device(struct device *dev) if (!fwspec || fwspec->ops != &arm_smmu_ops) return; - cfg = fwspec->iommu_priv; + cfg = dev_iommu_priv_get(dev); smmu = cfg->smmu; ret = arm_smmu_rpm_get(smmu); @@ -1475,23 +1476,25 @@ static void arm_smmu_remove_device(struct device *dev) return; iommu_device_unlink(&smmu->iommu, dev); - arm_smmu_master_free_smes(fwspec); + arm_smmu_master_free_smes(cfg, fwspec); arm_smmu_rpm_put(smmu); + dev_iommu_priv_set(dev, NULL); iommu_group_remove_device(dev); - kfree(fwspec->iommu_priv); + kfree(cfg); iommu_fwspec_free(dev); } static struct iommu_group *arm_smmu_device_group(struct device *dev) { + struct arm_smmu_master_cfg *cfg = dev_iommu_priv_get(dev); struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); - struct arm_smmu_device *smmu = fwspec_smmu(fwspec); + struct arm_smmu_device *smmu = cfg->smmu; struct iommu_group *group = NULL; int i, idx; - for_each_cfg_sme(fwspec, i, idx) { + for_each_cfg_sme(cfg, fwspec, i, idx) { if (group && smmu->s2crs[idx].group && group != smmu->s2crs[idx].group) return ERR_PTR(-EINVAL); diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c index 4be549478691..ef0a5246700e 100644 --- a/drivers/iommu/intel-iommu.c +++ b/drivers/iommu/intel-iommu.c @@ -4501,7 +4501,8 @@ static struct dmar_atsr_unit *dmar_find_atsr(struct acpi_dmar_atsr *atsr) struct dmar_atsr_unit *atsru; struct acpi_dmar_atsr *tmp; - list_for_each_entry_rcu(atsru, &dmar_atsr_units, list) { + list_for_each_entry_rcu(atsru, &dmar_atsr_units, list, + dmar_rcu_check()) { tmp = (struct acpi_dmar_atsr *)atsru->hdr; if (atsr->segment != tmp->segment) continue; diff --git a/drivers/iommu/intel-svm.c b/drivers/iommu/intel-svm.c index d7f2a5358900..2998418f0a38 100644 --- a/drivers/iommu/intel-svm.c +++ b/drivers/iommu/intel-svm.c @@ -531,7 +531,7 @@ struct page_req_dsc { u64 priv_data[2]; }; -#define PRQ_RING_MASK ((0x1000 << PRQ_ORDER) - 0x10) +#define PRQ_RING_MASK ((0x1000 << PRQ_ORDER) - 0x20) static bool access_error(struct vm_area_struct *vma, struct page_req_dsc *req) { @@ -611,14 +611,15 @@ static irqreturn_t prq_event_thread(int irq, void *d) * any faults on kernel addresses. */ if (!svm->mm) goto bad_req; - /* If the mm is already defunct, don't handle faults. */ - if (!mmget_not_zero(svm->mm)) - goto bad_req; /* If address is not canonical, return invalid response */ if (!is_canonical_address(address)) goto bad_req; + /* If the mm is already defunct, don't handle faults. */ + if (!mmget_not_zero(svm->mm)) + goto bad_req; + down_read(&svm->mm->mmap_sem); vma = find_extend_vma(svm->mm, address); if (!vma || address < vma->vm_start) diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c index 3e3528436e0b..2b471419e26c 100644 --- a/drivers/iommu/iommu.c +++ b/drivers/iommu/iommu.c @@ -152,9 +152,9 @@ void iommu_device_unregister(struct iommu_device *iommu) } EXPORT_SYMBOL_GPL(iommu_device_unregister); -static struct iommu_param *iommu_get_dev_param(struct device *dev) +static struct dev_iommu *dev_iommu_get(struct device *dev) { - struct iommu_param *param = dev->iommu_param; + struct dev_iommu *param = dev->iommu; if (param) return param; @@ -164,14 +164,14 @@ static struct iommu_param *iommu_get_dev_param(struct device *dev) return NULL; mutex_init(¶m->lock); - dev->iommu_param = param; + dev->iommu = param; return param; } -static void iommu_free_dev_param(struct device *dev) +static void dev_iommu_free(struct device *dev) { - kfree(dev->iommu_param); - dev->iommu_param = NULL; + kfree(dev->iommu); + dev->iommu = NULL; } int iommu_probe_device(struct device *dev) @@ -183,7 +183,7 @@ int iommu_probe_device(struct device *dev) if (!ops) return -EINVAL; - if (!iommu_get_dev_param(dev)) + if (!dev_iommu_get(dev)) return -ENOMEM; if (!try_module_get(ops->owner)) { @@ -200,7 +200,7 @@ int iommu_probe_device(struct device *dev) err_module_put: module_put(ops->owner); err_free_dev_param: - iommu_free_dev_param(dev); + dev_iommu_free(dev); return ret; } @@ -211,9 +211,9 @@ void iommu_release_device(struct device *dev) if (dev->iommu_group) ops->remove_device(dev); - if (dev->iommu_param) { + if (dev->iommu) { module_put(ops->owner); - iommu_free_dev_param(dev); + dev_iommu_free(dev); } } @@ -972,7 +972,7 @@ int iommu_register_device_fault_handler(struct device *dev, iommu_dev_fault_handler_t handler, void *data) { - struct iommu_param *param = dev->iommu_param; + struct dev_iommu *param = dev->iommu; int ret = 0; if (!param) @@ -1015,7 +1015,7 @@ EXPORT_SYMBOL_GPL(iommu_register_device_fault_handler); */ int iommu_unregister_device_fault_handler(struct device *dev) { - struct iommu_param *param = dev->iommu_param; + struct dev_iommu *param = dev->iommu; int ret = 0; if (!param) @@ -1055,7 +1055,7 @@ EXPORT_SYMBOL_GPL(iommu_unregister_device_fault_handler); */ int iommu_report_device_fault(struct device *dev, struct iommu_fault_event *evt) { - struct iommu_param *param = dev->iommu_param; + struct dev_iommu *param = dev->iommu; struct iommu_fault_event *evt_pending = NULL; struct iommu_fault_param *fparam; int ret = 0; @@ -1104,7 +1104,7 @@ int iommu_page_response(struct device *dev, int ret = -EINVAL; struct iommu_fault_event *evt; struct iommu_fault_page_request *prm; - struct iommu_param *param = dev->iommu_param; + struct dev_iommu *param = dev->iommu; struct iommu_domain *domain = iommu_get_domain_for_dev(dev); if (!domain || !domain->ops->page_response) @@ -2405,7 +2405,11 @@ int iommu_fwspec_init(struct device *dev, struct fwnode_handle *iommu_fwnode, if (fwspec) return ops == fwspec->ops ? 0 : -EINVAL; - fwspec = kzalloc(sizeof(*fwspec), GFP_KERNEL); + if (!dev_iommu_get(dev)) + return -ENOMEM; + + /* Preallocate for the overwhelmingly common case of 1 ID */ + fwspec = kzalloc(struct_size(fwspec, ids, 1), GFP_KERNEL); if (!fwspec) return -ENOMEM; @@ -2432,15 +2436,15 @@ EXPORT_SYMBOL_GPL(iommu_fwspec_free); int iommu_fwspec_add_ids(struct device *dev, u32 *ids, int num_ids) { struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); - size_t size; - int i; + int i, new_num; if (!fwspec) return -EINVAL; - size = offsetof(struct iommu_fwspec, ids[fwspec->num_ids + num_ids]); - if (size > sizeof(*fwspec)) { - fwspec = krealloc(fwspec, size, GFP_KERNEL); + new_num = fwspec->num_ids + num_ids; + if (new_num > 1) { + fwspec = krealloc(fwspec, struct_size(fwspec, ids, new_num), + GFP_KERNEL); if (!fwspec) return -ENOMEM; @@ -2450,7 +2454,7 @@ int iommu_fwspec_add_ids(struct device *dev, u32 *ids, int num_ids) for (i = 0; i < num_ids; i++) fwspec->ids[fwspec->num_ids + i] = ids[i]; - fwspec->num_ids += num_ids; + fwspec->num_ids = new_num; return 0; } EXPORT_SYMBOL_GPL(iommu_fwspec_add_ids); diff --git a/drivers/iommu/ipmmu-vmsa.c b/drivers/iommu/ipmmu-vmsa.c index ecb3f9464dd5..310cf09feea3 100644 --- a/drivers/iommu/ipmmu-vmsa.c +++ b/drivers/iommu/ipmmu-vmsa.c @@ -89,9 +89,7 @@ static struct ipmmu_vmsa_domain *to_vmsa_domain(struct iommu_domain *dom) static struct ipmmu_vmsa_device *to_ipmmu(struct device *dev) { - struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); - - return fwspec ? fwspec->iommu_priv : NULL; + return dev_iommu_priv_get(dev); } #define TLB_LOOP_TIMEOUT 100 /* 100us */ @@ -727,14 +725,13 @@ static phys_addr_t ipmmu_iova_to_phys(struct iommu_domain *io_domain, static int ipmmu_init_platform_device(struct device *dev, struct of_phandle_args *args) { - struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); struct platform_device *ipmmu_pdev; ipmmu_pdev = of_find_device_by_node(args->np); if (!ipmmu_pdev) return -ENODEV; - fwspec->iommu_priv = platform_get_drvdata(ipmmu_pdev); + dev_iommu_priv_set(dev, platform_get_drvdata(ipmmu_pdev)); return 0; } diff --git a/drivers/iommu/mtk_iommu.c b/drivers/iommu/mtk_iommu.c index 95945f467c03..5f4d6df59cf6 100644 --- a/drivers/iommu/mtk_iommu.c +++ b/drivers/iommu/mtk_iommu.c @@ -358,8 +358,8 @@ static void mtk_iommu_domain_free(struct iommu_domain *domain) static int mtk_iommu_attach_device(struct iommu_domain *domain, struct device *dev) { + struct mtk_iommu_data *data = dev_iommu_priv_get(dev); struct mtk_iommu_domain *dom = to_mtk_domain(domain); - struct mtk_iommu_data *data = dev_iommu_fwspec_get(dev)->iommu_priv; if (!data) return -ENODEV; @@ -378,7 +378,7 @@ static int mtk_iommu_attach_device(struct iommu_domain *domain, static void mtk_iommu_detach_device(struct iommu_domain *domain, struct device *dev) { - struct mtk_iommu_data *data = dev_iommu_fwspec_get(dev)->iommu_priv; + struct mtk_iommu_data *data = dev_iommu_priv_get(dev); if (!data) return; @@ -450,7 +450,7 @@ static int mtk_iommu_add_device(struct device *dev) if (!fwspec || fwspec->ops != &mtk_iommu_ops) return -ENODEV; /* Not a iommu client device */ - data = fwspec->iommu_priv; + data = dev_iommu_priv_get(dev); iommu_device_link(&data->iommu, dev); group = iommu_group_get_for_dev(dev); @@ -469,7 +469,7 @@ static void mtk_iommu_remove_device(struct device *dev) if (!fwspec || fwspec->ops != &mtk_iommu_ops) return; - data = fwspec->iommu_priv; + data = dev_iommu_priv_get(dev); iommu_device_unlink(&data->iommu, dev); iommu_group_remove_device(dev); @@ -496,7 +496,6 @@ static struct iommu_group *mtk_iommu_device_group(struct device *dev) static int mtk_iommu_of_xlate(struct device *dev, struct of_phandle_args *args) { - struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); struct platform_device *m4updev; if (args->args_count != 1) { @@ -505,13 +504,13 @@ static int mtk_iommu_of_xlate(struct device *dev, struct of_phandle_args *args) return -EINVAL; } - if (!fwspec->iommu_priv) { + if (!dev_iommu_priv_get(dev)) { /* Get the m4u device */ m4updev = of_find_device_by_node(args->np); if (WARN_ON(!m4updev)) return -EINVAL; - fwspec->iommu_priv = platform_get_drvdata(m4updev); + dev_iommu_priv_set(dev, platform_get_drvdata(m4updev)); } return iommu_fwspec_add_ids(dev, args->args, 1); diff --git a/drivers/iommu/mtk_iommu_v1.c b/drivers/iommu/mtk_iommu_v1.c index e93b94ecac45..a31be05601c9 100644 --- a/drivers/iommu/mtk_iommu_v1.c +++ b/drivers/iommu/mtk_iommu_v1.c @@ -263,8 +263,8 @@ static void mtk_iommu_domain_free(struct iommu_domain *domain) static int mtk_iommu_attach_device(struct iommu_domain *domain, struct device *dev) { + struct mtk_iommu_data *data = dev_iommu_priv_get(dev); struct mtk_iommu_domain *dom = to_mtk_domain(domain); - struct mtk_iommu_data *data = dev_iommu_fwspec_get(dev)->iommu_priv; int ret; if (!data) @@ -286,7 +286,7 @@ static int mtk_iommu_attach_device(struct iommu_domain *domain, static void mtk_iommu_detach_device(struct iommu_domain *domain, struct device *dev) { - struct mtk_iommu_data *data = dev_iommu_fwspec_get(dev)->iommu_priv; + struct mtk_iommu_data *data = dev_iommu_priv_get(dev); if (!data) return; @@ -387,20 +387,20 @@ static int mtk_iommu_create_mapping(struct device *dev, return -EINVAL; } - if (!fwspec->iommu_priv) { + if (!dev_iommu_priv_get(dev)) { /* Get the m4u device */ m4updev = of_find_device_by_node(args->np); if (WARN_ON(!m4updev)) return -EINVAL; - fwspec->iommu_priv = platform_get_drvdata(m4updev); + dev_iommu_priv_set(dev, platform_get_drvdata(m4updev)); } ret = iommu_fwspec_add_ids(dev, args->args, 1); if (ret) return ret; - data = fwspec->iommu_priv; + data = dev_iommu_priv_get(dev); m4udev = data->dev; mtk_mapping = m4udev->archdata.iommu; if (!mtk_mapping) { @@ -459,7 +459,7 @@ static int mtk_iommu_add_device(struct device *dev) if (err) return err; - data = fwspec->iommu_priv; + data = dev_iommu_priv_get(dev); mtk_mapping = data->dev->archdata.iommu; err = arm_iommu_attach_device(dev, mtk_mapping); if (err) { @@ -478,7 +478,7 @@ static void mtk_iommu_remove_device(struct device *dev) if (!fwspec || fwspec->ops != &mtk_iommu_ops) return; - data = fwspec->iommu_priv; + data = dev_iommu_priv_get(dev); iommu_device_unlink(&data->iommu, dev); iommu_group_remove_device(dev); diff --git a/drivers/iommu/omap-iommu.c b/drivers/iommu/omap-iommu.c index be551cc34be4..887fefcb03b4 100644 --- a/drivers/iommu/omap-iommu.c +++ b/drivers/iommu/omap-iommu.c @@ -167,7 +167,7 @@ static int omap2_iommu_enable(struct omap_iommu *obj) { u32 l, pa; - if (!obj->iopgd || !IS_ALIGNED((u32)obj->iopgd, SZ_16K)) + if (!obj->iopgd || !IS_ALIGNED((unsigned long)obj->iopgd, SZ_16K)) return -EINVAL; pa = virt_to_phys(obj->iopgd); @@ -434,7 +434,7 @@ static void flush_iotlb_page(struct omap_iommu *obj, u32 da) bytes = iopgsz_to_bytes(cr.cam & 3); if ((start <= da) && (da < start + bytes)) { - dev_dbg(obj->dev, "%s: %08x<=%08x(%x)\n", + dev_dbg(obj->dev, "%s: %08x<=%08x(%zx)\n", __func__, start, da, bytes); iotlb_load_cr(obj, &cr); iommu_write_reg(obj, 1, MMU_FLUSH_ENTRY); @@ -1352,11 +1352,11 @@ static int omap_iommu_map(struct iommu_domain *domain, unsigned long da, omap_pgsz = bytes_to_iopgsz(bytes); if (omap_pgsz < 0) { - dev_err(dev, "invalid size to map: %d\n", bytes); + dev_err(dev, "invalid size to map: %zu\n", bytes); return -EINVAL; } - dev_dbg(dev, "mapping da 0x%lx to pa %pa size 0x%x\n", da, &pa, bytes); + dev_dbg(dev, "mapping da 0x%lx to pa %pa size 0x%zx\n", da, &pa, bytes); iotlb_init_entry(&e, da, pa, omap_pgsz); @@ -1393,7 +1393,7 @@ static size_t omap_iommu_unmap(struct iommu_domain *domain, unsigned long da, size_t bytes = 0; int i; - dev_dbg(dev, "unmapping da 0x%lx size %u\n", da, size); + dev_dbg(dev, "unmapping da 0x%lx size %zu\n", da, size); iommu = omap_domain->iommus; for (i = 0; i < omap_domain->num_iommus; i++, iommu++) { diff --git a/drivers/iommu/omap-iopgtable.h b/drivers/iommu/omap-iopgtable.h index 1a4adb59a859..51d74002cc30 100644 --- a/drivers/iommu/omap-iopgtable.h +++ b/drivers/iommu/omap-iopgtable.h @@ -63,7 +63,8 @@ * * va to pa translation */ -static inline phys_addr_t omap_iommu_translate(u32 d, u32 va, u32 mask) +static inline phys_addr_t omap_iommu_translate(unsigned long d, dma_addr_t va, + dma_addr_t mask) { return (d & mask) | (va & (~mask)); } diff --git a/drivers/iommu/qcom_iommu.c b/drivers/iommu/qcom_iommu.c index 4328da0b0a9f..0e2a96467767 100644 --- a/drivers/iommu/qcom_iommu.c +++ b/drivers/iommu/qcom_iommu.c @@ -48,7 +48,7 @@ struct qcom_iommu_dev { void __iomem *local_base; u32 sec_id; u8 num_ctxs; - struct qcom_iommu_ctx *ctxs[0]; /* indexed by asid-1 */ + struct qcom_iommu_ctx *ctxs[]; /* indexed by asid-1 */ }; struct qcom_iommu_ctx { @@ -74,16 +74,19 @@ static struct qcom_iommu_domain *to_qcom_iommu_domain(struct iommu_domain *dom) static const struct iommu_ops qcom_iommu_ops; -static struct qcom_iommu_dev * to_iommu(struct iommu_fwspec *fwspec) +static struct qcom_iommu_dev * to_iommu(struct device *dev) { + struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); + if (!fwspec || fwspec->ops != &qcom_iommu_ops) return NULL; - return fwspec->iommu_priv; + + return dev_iommu_priv_get(dev); } -static struct qcom_iommu_ctx * to_ctx(struct iommu_fwspec *fwspec, unsigned asid) +static struct qcom_iommu_ctx * to_ctx(struct device *dev, unsigned asid) { - struct qcom_iommu_dev *qcom_iommu = to_iommu(fwspec); + struct qcom_iommu_dev *qcom_iommu = to_iommu(dev); if (!qcom_iommu) return NULL; return qcom_iommu->ctxs[asid - 1]; @@ -115,11 +118,14 @@ iommu_readq(struct qcom_iommu_ctx *ctx, unsigned reg) static void qcom_iommu_tlb_sync(void *cookie) { - struct iommu_fwspec *fwspec = cookie; + struct iommu_fwspec *fwspec; + struct device *dev = cookie; unsigned i; + fwspec = dev_iommu_fwspec_get(dev); + for (i = 0; i < fwspec->num_ids; i++) { - struct qcom_iommu_ctx *ctx = to_ctx(fwspec, fwspec->ids[i]); + struct qcom_iommu_ctx *ctx = to_ctx(dev, fwspec->ids[i]); unsigned int val, ret; iommu_writel(ctx, ARM_SMMU_CB_TLBSYNC, 0); @@ -133,11 +139,14 @@ static void qcom_iommu_tlb_sync(void *cookie) static void qcom_iommu_tlb_inv_context(void *cookie) { - struct iommu_fwspec *fwspec = cookie; + struct device *dev = cookie; + struct iommu_fwspec *fwspec; unsigned i; + fwspec = dev_iommu_fwspec_get(dev); + for (i = 0; i < fwspec->num_ids; i++) { - struct qcom_iommu_ctx *ctx = to_ctx(fwspec, fwspec->ids[i]); + struct qcom_iommu_ctx *ctx = to_ctx(dev, fwspec->ids[i]); iommu_writel(ctx, ARM_SMMU_CB_S1_TLBIASID, ctx->asid); } @@ -147,13 +156,16 @@ static void qcom_iommu_tlb_inv_context(void *cookie) static void qcom_iommu_tlb_inv_range_nosync(unsigned long iova, size_t size, size_t granule, bool leaf, void *cookie) { - struct iommu_fwspec *fwspec = cookie; + struct device *dev = cookie; + struct iommu_fwspec *fwspec; unsigned i, reg; reg = leaf ? ARM_SMMU_CB_S1_TLBIVAL : ARM_SMMU_CB_S1_TLBIVA; + fwspec = dev_iommu_fwspec_get(dev); + for (i = 0; i < fwspec->num_ids; i++) { - struct qcom_iommu_ctx *ctx = to_ctx(fwspec, fwspec->ids[i]); + struct qcom_iommu_ctx *ctx = to_ctx(dev, fwspec->ids[i]); size_t s = size; iova = (iova >> 12) << 12; @@ -222,9 +234,10 @@ static irqreturn_t qcom_iommu_fault(int irq, void *dev) static int qcom_iommu_init_domain(struct iommu_domain *domain, struct qcom_iommu_dev *qcom_iommu, - struct iommu_fwspec *fwspec) + struct device *dev) { struct qcom_iommu_domain *qcom_domain = to_qcom_iommu_domain(domain); + struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); struct io_pgtable_ops *pgtbl_ops; struct io_pgtable_cfg pgtbl_cfg; int i, ret = 0; @@ -243,7 +256,7 @@ static int qcom_iommu_init_domain(struct iommu_domain *domain, }; qcom_domain->iommu = qcom_iommu; - pgtbl_ops = alloc_io_pgtable_ops(ARM_32_LPAE_S1, &pgtbl_cfg, fwspec); + pgtbl_ops = alloc_io_pgtable_ops(ARM_32_LPAE_S1, &pgtbl_cfg, dev); if (!pgtbl_ops) { dev_err(qcom_iommu->dev, "failed to allocate pagetable ops\n"); ret = -ENOMEM; @@ -256,7 +269,7 @@ static int qcom_iommu_init_domain(struct iommu_domain *domain, domain->geometry.force_aperture = true; for (i = 0; i < fwspec->num_ids; i++) { - struct qcom_iommu_ctx *ctx = to_ctx(fwspec, fwspec->ids[i]); + struct qcom_iommu_ctx *ctx = to_ctx(dev, fwspec->ids[i]); if (!ctx->secure_init) { ret = qcom_scm_restore_sec_cfg(qcom_iommu->sec_id, ctx->asid); @@ -363,8 +376,7 @@ static void qcom_iommu_domain_free(struct iommu_domain *domain) static int qcom_iommu_attach_dev(struct iommu_domain *domain, struct device *dev) { - struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); - struct qcom_iommu_dev *qcom_iommu = to_iommu(fwspec); + struct qcom_iommu_dev *qcom_iommu = to_iommu(dev); struct qcom_iommu_domain *qcom_domain = to_qcom_iommu_domain(domain); int ret; @@ -375,7 +387,7 @@ static int qcom_iommu_attach_dev(struct iommu_domain *domain, struct device *dev /* Ensure that the domain is finalized */ pm_runtime_get_sync(qcom_iommu->dev); - ret = qcom_iommu_init_domain(domain, qcom_iommu, fwspec); + ret = qcom_iommu_init_domain(domain, qcom_iommu, dev); pm_runtime_put_sync(qcom_iommu->dev); if (ret < 0) return ret; @@ -397,9 +409,9 @@ static int qcom_iommu_attach_dev(struct iommu_domain *domain, struct device *dev static void qcom_iommu_detach_dev(struct iommu_domain *domain, struct device *dev) { - struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); - struct qcom_iommu_dev *qcom_iommu = to_iommu(fwspec); struct qcom_iommu_domain *qcom_domain = to_qcom_iommu_domain(domain); + struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); + struct qcom_iommu_dev *qcom_iommu = to_iommu(dev); unsigned i; if (WARN_ON(!qcom_domain->iommu)) @@ -407,7 +419,7 @@ static void qcom_iommu_detach_dev(struct iommu_domain *domain, struct device *de pm_runtime_get_sync(qcom_iommu->dev); for (i = 0; i < fwspec->num_ids; i++) { - struct qcom_iommu_ctx *ctx = to_ctx(fwspec, fwspec->ids[i]); + struct qcom_iommu_ctx *ctx = to_ctx(dev, fwspec->ids[i]); /* Disable the context bank: */ iommu_writel(ctx, ARM_SMMU_CB_SCTLR, 0); @@ -514,7 +526,7 @@ static bool qcom_iommu_capable(enum iommu_cap cap) static int qcom_iommu_add_device(struct device *dev) { - struct qcom_iommu_dev *qcom_iommu = to_iommu(dev_iommu_fwspec_get(dev)); + struct qcom_iommu_dev *qcom_iommu = to_iommu(dev); struct iommu_group *group; struct device_link *link; @@ -545,7 +557,7 @@ static int qcom_iommu_add_device(struct device *dev) static void qcom_iommu_remove_device(struct device *dev) { - struct qcom_iommu_dev *qcom_iommu = to_iommu(dev_iommu_fwspec_get(dev)); + struct qcom_iommu_dev *qcom_iommu = to_iommu(dev); if (!qcom_iommu) return; @@ -557,7 +569,6 @@ static void qcom_iommu_remove_device(struct device *dev) static int qcom_iommu_of_xlate(struct device *dev, struct of_phandle_args *args) { - struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); struct qcom_iommu_dev *qcom_iommu; struct platform_device *iommu_pdev; unsigned asid = args->args[0]; @@ -583,14 +594,14 @@ static int qcom_iommu_of_xlate(struct device *dev, struct of_phandle_args *args) WARN_ON(asid > qcom_iommu->num_ctxs)) return -EINVAL; - if (!fwspec->iommu_priv) { - fwspec->iommu_priv = qcom_iommu; + if (!dev_iommu_priv_get(dev)) { + dev_iommu_priv_set(dev, qcom_iommu); } else { /* make sure devices iommus dt node isn't referring to * multiple different iommu devices. Multiple context * banks are ok, but multiple devices are not: */ - if (WARN_ON(qcom_iommu != fwspec->iommu_priv)) + if (WARN_ON(qcom_iommu != dev_iommu_priv_get(dev))) return -EINVAL; } diff --git a/drivers/iommu/tegra-gart.c b/drivers/iommu/tegra-gart.c index 3fb7ba72507d..db6559e8336f 100644 --- a/drivers/iommu/tegra-gart.c +++ b/drivers/iommu/tegra-gart.c @@ -247,7 +247,7 @@ static int gart_iommu_add_device(struct device *dev) { struct iommu_group *group; - if (!dev->iommu_fwspec) + if (!dev_iommu_fwspec_get(dev)) return -ENODEV; group = iommu_group_get_for_dev(dev); diff --git a/drivers/iommu/virtio-iommu.c b/drivers/iommu/virtio-iommu.c index cce329d71fba..d5cac4f46ca5 100644 --- a/drivers/iommu/virtio-iommu.c +++ b/drivers/iommu/virtio-iommu.c @@ -466,7 +466,7 @@ static int viommu_probe_endpoint(struct viommu_dev *viommu, struct device *dev) struct virtio_iommu_req_probe *probe; struct virtio_iommu_probe_property *prop; struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); - struct viommu_endpoint *vdev = fwspec->iommu_priv; + struct viommu_endpoint *vdev = dev_iommu_priv_get(dev); if (!fwspec->num_ids) return -EINVAL; @@ -607,24 +607,36 @@ static struct iommu_domain *viommu_domain_alloc(unsigned type) return &vdomain->domain; } -static int viommu_domain_finalise(struct viommu_dev *viommu, +static int viommu_domain_finalise(struct viommu_endpoint *vdev, struct iommu_domain *domain) { int ret; + unsigned long viommu_page_size; + struct viommu_dev *viommu = vdev->viommu; struct viommu_domain *vdomain = to_viommu_domain(domain); - vdomain->viommu = viommu; - vdomain->map_flags = viommu->map_flags; + viommu_page_size = 1UL << __ffs(viommu->pgsize_bitmap); + if (viommu_page_size > PAGE_SIZE) { + dev_err(vdev->dev, + "granule 0x%lx larger than system page size 0x%lx\n", + viommu_page_size, PAGE_SIZE); + return -EINVAL; + } + + ret = ida_alloc_range(&viommu->domain_ids, viommu->first_domain, + viommu->last_domain, GFP_KERNEL); + if (ret < 0) + return ret; + + vdomain->id = (unsigned int)ret; domain->pgsize_bitmap = viommu->pgsize_bitmap; domain->geometry = viommu->geometry; - ret = ida_alloc_range(&viommu->domain_ids, viommu->first_domain, - viommu->last_domain, GFP_KERNEL); - if (ret >= 0) - vdomain->id = (unsigned int)ret; + vdomain->map_flags = viommu->map_flags; + vdomain->viommu = viommu; - return ret > 0 ? 0 : ret; + return 0; } static void viommu_domain_free(struct iommu_domain *domain) @@ -648,7 +660,7 @@ static int viommu_attach_dev(struct iommu_domain *domain, struct device *dev) int ret = 0; struct virtio_iommu_req_attach req; struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); - struct viommu_endpoint *vdev = fwspec->iommu_priv; + struct viommu_endpoint *vdev = dev_iommu_priv_get(dev); struct viommu_domain *vdomain = to_viommu_domain(domain); mutex_lock(&vdomain->mutex); @@ -657,7 +669,7 @@ static int viommu_attach_dev(struct iommu_domain *domain, struct device *dev) * Properly initialize the domain now that we know which viommu * owns it. */ - ret = viommu_domain_finalise(vdev->viommu, domain); + ret = viommu_domain_finalise(vdev, domain); } else if (vdomain->viommu != vdev->viommu) { dev_err(dev, "cannot attach to foreign vIOMMU\n"); ret = -EXDEV; @@ -807,8 +819,7 @@ static void viommu_iotlb_sync(struct iommu_domain *domain, static void viommu_get_resv_regions(struct device *dev, struct list_head *head) { struct iommu_resv_region *entry, *new_entry, *msi = NULL; - struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); - struct viommu_endpoint *vdev = fwspec->iommu_priv; + struct viommu_endpoint *vdev = dev_iommu_priv_get(dev); int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO; list_for_each_entry(entry, &vdev->resv_regions, list) { @@ -876,7 +887,7 @@ static int viommu_add_device(struct device *dev) vdev->dev = dev; vdev->viommu = viommu; INIT_LIST_HEAD(&vdev->resv_regions); - fwspec->iommu_priv = vdev; + dev_iommu_priv_set(dev, vdev); if (viommu->probe_size) { /* Get additional information for this endpoint */ @@ -920,7 +931,7 @@ static void viommu_remove_device(struct device *dev) if (!fwspec || fwspec->ops != &viommu_ops) return; - vdev = fwspec->iommu_priv; + vdev = dev_iommu_priv_get(dev); iommu_group_remove_device(dev); iommu_device_unlink(&vdev->viommu->iommu, dev); @@ -1082,7 +1093,6 @@ static int viommu_probe(struct virtio_device *vdev) #ifdef CONFIG_PCI if (pci_bus_type.iommu_ops != &viommu_ops) { - pci_request_acs(); ret = bus_set_iommu(&pci_bus_type, &viommu_ops); if (ret) goto err_unregister; diff --git a/drivers/pci/ats.c b/drivers/pci/ats.c index 3ef0bb281e7c..390e92f2d8d1 100644 --- a/drivers/pci/ats.c +++ b/drivers/pci/ats.c @@ -366,6 +366,7 @@ int pci_enable_pasid(struct pci_dev *pdev, int features) return 0; } +EXPORT_SYMBOL_GPL(pci_enable_pasid); /** * pci_disable_pasid - Disable the PASID capability @@ -390,6 +391,7 @@ void pci_disable_pasid(struct pci_dev *pdev) pdev->pasid_enabled = 0; } +EXPORT_SYMBOL_GPL(pci_disable_pasid); /** * pci_restore_pasid_state - Restore PASID capabilities @@ -441,6 +443,7 @@ int pci_pasid_features(struct pci_dev *pdev) return supported; } +EXPORT_SYMBOL_GPL(pci_pasid_features); #define PASID_NUMBER_SHIFT 8 #define PASID_NUMBER_MASK (0x1f << PASID_NUMBER_SHIFT) @@ -469,4 +472,5 @@ int pci_max_pasids(struct pci_dev *pdev) return (1 << supported); } +EXPORT_SYMBOL_GPL(pci_max_pasids); #endif /* CONFIG_PCI_PASID */ diff --git a/include/linux/device.h b/include/linux/device.h index 1311f276f533..ac8e37cd716a 100644 --- a/include/linux/device.h +++ b/include/linux/device.h @@ -42,9 +42,8 @@ struct device_node; struct fwnode_handle; struct iommu_ops; struct iommu_group; -struct iommu_fwspec; struct dev_pin_info; -struct iommu_param; +struct dev_iommu; /** * struct subsys_interface - interfaces to device functions @@ -513,8 +512,7 @@ struct dev_links_info { * gone away. This should be set by the allocator of the * device (i.e. the bus driver that discovered the device). * @iommu_group: IOMMU group the device belongs to. - * @iommu_fwspec: IOMMU-specific properties supplied by firmware. - * @iommu_param: Per device generic IOMMU runtime data + * @iommu: Per device generic IOMMU runtime data * * @offline_disabled: If set, the device is permanently online. * @offline: Set after successful invocation of bus type's .offline(). @@ -613,8 +611,7 @@ struct device { void (*release)(struct device *dev); struct iommu_group *iommu_group; - struct iommu_fwspec *iommu_fwspec; - struct iommu_param *iommu_param; + struct dev_iommu *iommu; bool offline_disabled:1; bool offline:1; diff --git a/include/linux/iommu.h b/include/linux/iommu.h index d1b5f4d98569..7ef8b0bda695 100644 --- a/include/linux/iommu.h +++ b/include/linux/iommu.h @@ -365,17 +365,20 @@ struct iommu_fault_param { }; /** - * struct iommu_param - collection of per-device IOMMU data + * struct dev_iommu - Collection of per-device IOMMU data * * @fault_param: IOMMU detected device fault reporting data + * @fwspec: IOMMU fwspec data + * @priv: IOMMU Driver private data * * TODO: migrate other per device data pointers under iommu_dev_data, e.g. * struct iommu_group *iommu_group; - * struct iommu_fwspec *iommu_fwspec; */ -struct iommu_param { +struct dev_iommu { struct mutex lock; - struct iommu_fault_param *fault_param; + struct iommu_fault_param *fault_param; + struct iommu_fwspec *fwspec; + void *priv; }; int iommu_device_register(struct iommu_device *iommu); @@ -588,11 +591,10 @@ struct iommu_group *fsl_mc_device_group(struct device *dev); struct iommu_fwspec { const struct iommu_ops *ops; struct fwnode_handle *iommu_fwnode; - void *iommu_priv; u32 flags; u32 num_pasid_bits; unsigned int num_ids; - u32 ids[1]; + u32 ids[]; }; /* ATS is supported */ @@ -614,13 +616,26 @@ const struct iommu_ops *iommu_ops_from_fwnode(struct fwnode_handle *fwnode); static inline struct iommu_fwspec *dev_iommu_fwspec_get(struct device *dev) { - return dev->iommu_fwspec; + if (dev->iommu) + return dev->iommu->fwspec; + else + return NULL; } static inline void dev_iommu_fwspec_set(struct device *dev, struct iommu_fwspec *fwspec) { - dev->iommu_fwspec = fwspec; + dev->iommu->fwspec = fwspec; +} + +static inline void *dev_iommu_priv_get(struct device *dev) +{ + return dev->iommu->priv; +} + +static inline void dev_iommu_priv_set(struct device *dev, void *priv) +{ + dev->iommu->priv = priv; } int iommu_probe_device(struct device *dev); @@ -1073,6 +1088,10 @@ static inline int iommu_sva_unbind_gpasid(struct iommu_domain *domain, return -ENODEV; } +static inline struct iommu_fwspec *dev_iommu_fwspec_get(struct device *dev) +{ + return NULL; +} #endif /* CONFIG_IOMMU_API */ #ifdef CONFIG_IOMMU_DEBUGFS diff --git a/include/uapi/linux/virtio_iommu.h b/include/uapi/linux/virtio_iommu.h index 237e36a280cb..48e3c29223b5 100644 --- a/include/uapi/linux/virtio_iommu.h +++ b/include/uapi/linux/virtio_iommu.h @@ -18,24 +18,24 @@ #define VIRTIO_IOMMU_F_MMIO 5 struct virtio_iommu_range_64 { - __le64 start; - __le64 end; + __u64 start; + __u64 end; }; struct virtio_iommu_range_32 { - __le32 start; - __le32 end; + __u32 start; + __u32 end; }; struct virtio_iommu_config { /* Supported page sizes */ - __le64 page_size_mask; + __u64 page_size_mask; /* Supported IOVA range */ struct virtio_iommu_range_64 input_range; /* Max domain ID size */ struct virtio_iommu_range_32 domain_range; /* Probe buffer size */ - __le32 probe_size; + __u32 probe_size; }; /* Request types */ |