diff options
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/acpi/arm64/iort.c | 2 | ||||
-rw-r--r-- | drivers/dma/pl330.c | 5 | ||||
-rw-r--r-- | drivers/iommu/Kconfig | 3 | ||||
-rw-r--r-- | drivers/iommu/amd_iommu.c | 72 | ||||
-rw-r--r-- | drivers/iommu/amd_iommu_init.c | 11 | ||||
-rw-r--r-- | drivers/iommu/amd_iommu_types.h | 4 | ||||
-rw-r--r-- | drivers/iommu/arm-smmu-v3.c | 90 | ||||
-rw-r--r-- | drivers/iommu/arm-smmu.c | 135 | ||||
-rw-r--r-- | drivers/iommu/dma-iommu.c | 183 | ||||
-rw-r--r-- | drivers/iommu/dmar.c | 20 | ||||
-rw-r--r-- | drivers/iommu/exynos-iommu.c | 55 | ||||
-rw-r--r-- | drivers/iommu/intel-iommu.c | 116 | ||||
-rw-r--r-- | drivers/iommu/io-pgtable-arm-v7s.c | 6 | ||||
-rw-r--r-- | drivers/iommu/io-pgtable-arm.c | 5 | ||||
-rw-r--r-- | drivers/iommu/iommu-sysfs.c | 61 | ||||
-rw-r--r-- | drivers/iommu/iommu.c | 285 | ||||
-rw-r--r-- | drivers/iommu/iova.c | 23 | ||||
-rw-r--r-- | drivers/iommu/ipmmu-vmsa.c | 2 | ||||
-rw-r--r-- | drivers/iommu/msm_iommu.c | 73 | ||||
-rw-r--r-- | drivers/iommu/msm_iommu.h | 3 | ||||
-rw-r--r-- | drivers/iommu/mtk_iommu.c | 27 | ||||
-rw-r--r-- | drivers/iommu/mtk_iommu.h | 2 | ||||
-rw-r--r-- | drivers/iommu/of_iommu.c | 4 | ||||
-rw-r--r-- | drivers/irqchip/irq-gic-v3-its.c | 1 | ||||
-rw-r--r-- | drivers/vfio/vfio_iommu_type1.c | 40 |
25 files changed, 930 insertions, 298 deletions
diff --git a/drivers/acpi/arm64/iort.c b/drivers/acpi/arm64/iort.c index e0d2e6e6e40c..3752521c62ab 100644 --- a/drivers/acpi/arm64/iort.c +++ b/drivers/acpi/arm64/iort.c @@ -536,7 +536,7 @@ static const struct iommu_ops *iort_iommu_xlate(struct device *dev, if (!iort_fwnode) return NULL; - ops = iommu_get_instance(iort_fwnode); + ops = iommu_ops_from_fwnode(iort_fwnode); if (!ops) return NULL; diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c index 7539f73df9e0..f37f4978dabb 100644 --- a/drivers/dma/pl330.c +++ b/drivers/dma/pl330.c @@ -1859,9 +1859,10 @@ static int dmac_alloc_resources(struct pl330_dmac *pl330) * Alloc MicroCode buffer for 'chans' Channel threads. * A channel's buffer offset is (Channel_Id * MCODE_BUFF_PERCHAN) */ - pl330->mcode_cpu = dma_alloc_coherent(pl330->ddma.dev, + pl330->mcode_cpu = dma_alloc_attrs(pl330->ddma.dev, chans * pl330->mcbufsz, - &pl330->mcode_bus, GFP_KERNEL); + &pl330->mcode_bus, GFP_KERNEL, + DMA_ATTR_PRIVILEGED); if (!pl330->mcode_cpu) { dev_err(pl330->ddma.dev, "%s:%d Can't allocate memory!\n", __func__, __LINE__); diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig index 8ee54d71c7eb..37e204f3d9be 100644 --- a/drivers/iommu/Kconfig +++ b/drivers/iommu/Kconfig @@ -352,9 +352,6 @@ config MTK_IOMMU_V1 select IOMMU_API select MEMORY select MTK_SMI - select COMMON_CLK_MT2701_MMSYS - select COMMON_CLK_MT2701_IMGSYS - select COMMON_CLK_MT2701_VDECSYS help Support for the M4U on certain Mediatek SoCs. M4U generation 1 HW is Multimedia Memory Managememt Unit. This option enables remapping of diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c index 3ef0f42984f2..1b5b8c5361c5 100644 --- a/drivers/iommu/amd_iommu.c +++ b/drivers/iommu/amd_iommu.c @@ -112,7 +112,7 @@ static struct timer_list queue_timer; * Domain for untranslated devices - only allocated * if iommu=pt passed on kernel cmd line. */ -static const struct iommu_ops amd_iommu_ops; +const struct iommu_ops amd_iommu_ops; static ATOMIC_NOTIFIER_HEAD(ppr_notifier); int amd_iommu_max_glx_val = -1; @@ -445,6 +445,7 @@ static void init_iommu_group(struct device *dev) static int iommu_init_device(struct device *dev) { struct iommu_dev_data *dev_data; + struct amd_iommu *iommu; int devid; if (dev->archdata.iommu) @@ -454,6 +455,8 @@ static int iommu_init_device(struct device *dev) if (devid < 0) return devid; + iommu = amd_iommu_rlookup_table[devid]; + dev_data = find_dev_data(devid); if (!dev_data) return -ENOMEM; @@ -469,8 +472,7 @@ static int iommu_init_device(struct device *dev) dev->archdata.iommu = dev_data; - iommu_device_link(amd_iommu_rlookup_table[dev_data->devid]->iommu_dev, - dev); + iommu_device_link(&iommu->iommu, dev); return 0; } @@ -495,13 +497,16 @@ static void iommu_ignore_device(struct device *dev) static void iommu_uninit_device(struct device *dev) { - int devid; struct iommu_dev_data *dev_data; + struct amd_iommu *iommu; + int devid; devid = get_device_id(dev); if (devid < 0) return; + iommu = amd_iommu_rlookup_table[devid]; + dev_data = search_dev_data(devid); if (!dev_data) return; @@ -509,8 +514,7 @@ static void iommu_uninit_device(struct device *dev) if (dev_data->domain) detach_device(dev); - iommu_device_unlink(amd_iommu_rlookup_table[dev_data->devid]->iommu_dev, - dev); + iommu_device_unlink(&iommu->iommu, dev); iommu_group_remove_device(dev); @@ -3161,9 +3165,10 @@ static bool amd_iommu_capable(enum iommu_cap cap) return false; } -static void amd_iommu_get_dm_regions(struct device *dev, - struct list_head *head) +static void amd_iommu_get_resv_regions(struct device *dev, + struct list_head *head) { + struct iommu_resv_region *region; struct unity_map_entry *entry; int devid; @@ -3172,41 +3177,56 @@ static void amd_iommu_get_dm_regions(struct device *dev, return; list_for_each_entry(entry, &amd_iommu_unity_map, list) { - struct iommu_dm_region *region; + size_t length; + int prot = 0; if (devid < entry->devid_start || devid > entry->devid_end) continue; - region = kzalloc(sizeof(*region), GFP_KERNEL); + length = entry->address_end - entry->address_start; + if (entry->prot & IOMMU_PROT_IR) + prot |= IOMMU_READ; + if (entry->prot & IOMMU_PROT_IW) + prot |= IOMMU_WRITE; + + region = iommu_alloc_resv_region(entry->address_start, + length, prot, + IOMMU_RESV_DIRECT); if (!region) { pr_err("Out of memory allocating dm-regions for %s\n", dev_name(dev)); return; } - - region->start = entry->address_start; - region->length = entry->address_end - entry->address_start; - if (entry->prot & IOMMU_PROT_IR) - region->prot |= IOMMU_READ; - if (entry->prot & IOMMU_PROT_IW) - region->prot |= IOMMU_WRITE; - list_add_tail(®ion->list, head); } + + region = iommu_alloc_resv_region(MSI_RANGE_START, + MSI_RANGE_END - MSI_RANGE_START + 1, + 0, IOMMU_RESV_RESERVED); + if (!region) + return; + list_add_tail(®ion->list, head); + + region = iommu_alloc_resv_region(HT_RANGE_START, + HT_RANGE_END - HT_RANGE_START + 1, + 0, IOMMU_RESV_RESERVED); + if (!region) + return; + list_add_tail(®ion->list, head); } -static void amd_iommu_put_dm_regions(struct device *dev, +static void amd_iommu_put_resv_regions(struct device *dev, struct list_head *head) { - struct iommu_dm_region *entry, *next; + struct iommu_resv_region *entry, *next; list_for_each_entry_safe(entry, next, head, list) kfree(entry); } -static void amd_iommu_apply_dm_region(struct device *dev, +static void amd_iommu_apply_resv_region(struct device *dev, struct iommu_domain *domain, - struct iommu_dm_region *region) + struct iommu_resv_region *region) { struct dma_ops_domain *dma_dom = to_dma_ops_domain(to_pdomain(domain)); unsigned long start, end; @@ -3217,7 +3237,7 @@ static void amd_iommu_apply_dm_region(struct device *dev, WARN_ON_ONCE(reserve_iova(&dma_dom->iovad, start, end) == NULL); } -static const struct iommu_ops amd_iommu_ops = { +const struct iommu_ops amd_iommu_ops = { .capable = amd_iommu_capable, .domain_alloc = amd_iommu_domain_alloc, .domain_free = amd_iommu_domain_free, @@ -3230,9 +3250,9 @@ static const struct iommu_ops amd_iommu_ops = { .add_device = amd_iommu_add_device, .remove_device = amd_iommu_remove_device, .device_group = amd_iommu_device_group, - .get_dm_regions = amd_iommu_get_dm_regions, - .put_dm_regions = amd_iommu_put_dm_regions, - .apply_dm_region = amd_iommu_apply_dm_region, + .get_resv_regions = amd_iommu_get_resv_regions, + .put_resv_regions = amd_iommu_put_resv_regions, + .apply_resv_region = amd_iommu_apply_resv_region, .pgsize_bitmap = AMD_IOMMU_PGSIZES, }; diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c index 6799cf9713f7..04cdac7ab3e3 100644 --- a/drivers/iommu/amd_iommu_init.c +++ b/drivers/iommu/amd_iommu_init.c @@ -94,6 +94,8 @@ * out of it. */ +extern const struct iommu_ops amd_iommu_ops; + /* * structure describing one IOMMU in the ACPI table. Typically followed by one * or more ivhd_entrys. @@ -1635,9 +1637,10 @@ static int iommu_init_pci(struct amd_iommu *iommu) amd_iommu_erratum_746_workaround(iommu); amd_iommu_ats_write_check_workaround(iommu); - iommu->iommu_dev = iommu_device_create(&iommu->dev->dev, iommu, - amd_iommu_groups, "ivhd%d", - iommu->index); + iommu_device_sysfs_add(&iommu->iommu, &iommu->dev->dev, + amd_iommu_groups, "ivhd%d", iommu->index); + iommu_device_set_ops(&iommu->iommu, &amd_iommu_ops); + iommu_device_register(&iommu->iommu); return pci_enable_device(iommu->dev); } @@ -2230,7 +2233,7 @@ static int __init early_amd_iommu_init(void) */ ret = check_ivrs_checksum(ivrs_base); if (ret) - return ret; + goto out; amd_iommu_target_ivhd_type = get_highest_supported_ivhd_type(ivrs_base); DUMP_printk("Using IVHD type %#x\n", amd_iommu_target_ivhd_type); diff --git a/drivers/iommu/amd_iommu_types.h b/drivers/iommu/amd_iommu_types.h index 0d91785ebdc3..af00f381a7b1 100644 --- a/drivers/iommu/amd_iommu_types.h +++ b/drivers/iommu/amd_iommu_types.h @@ -535,8 +535,8 @@ struct amd_iommu { /* if one, we need to send a completion wait command */ bool need_sync; - /* IOMMU sysfs device */ - struct device *iommu_dev; + /* Handle for IOMMU core code */ + struct iommu_device iommu; /* * We can't rely on the BIOS to restore all values on reinit, so we diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c index 4d6ec444a9d6..5806a6acc94e 100644 --- a/drivers/iommu/arm-smmu-v3.c +++ b/drivers/iommu/arm-smmu-v3.c @@ -269,9 +269,6 @@ #define STRTAB_STE_1_SHCFG_INCOMING 1UL #define STRTAB_STE_1_SHCFG_SHIFT 44 -#define STRTAB_STE_1_PRIVCFG_UNPRIV 2UL -#define STRTAB_STE_1_PRIVCFG_SHIFT 48 - #define STRTAB_STE_2_S2VMID_SHIFT 0 #define STRTAB_STE_2_S2VMID_MASK 0xffffUL #define STRTAB_STE_2_VTCR_SHIFT 32 @@ -412,6 +409,9 @@ /* High-level queue structures */ #define ARM_SMMU_POLL_TIMEOUT_US 100 +#define MSI_IOVA_BASE 0x8000000 +#define MSI_IOVA_LENGTH 0x100000 + static bool disable_bypass; module_param_named(disable_bypass, disable_bypass, bool, S_IRUGO); MODULE_PARM_DESC(disable_bypass, @@ -616,6 +616,9 @@ struct arm_smmu_device { unsigned int sid_bits; struct arm_smmu_strtab_cfg strtab_cfg; + + /* IOMMU core code handle */ + struct iommu_device iommu; }; /* SMMU private data for each master */ @@ -1042,13 +1045,8 @@ static void arm_smmu_write_strtab_ent(struct arm_smmu_device *smmu, u32 sid, } } - /* Nuke the existing Config, as we're going to rewrite it */ - val &= ~(STRTAB_STE_0_CFG_MASK << STRTAB_STE_0_CFG_SHIFT); - - if (ste->valid) - val |= STRTAB_STE_0_V; - else - val &= ~STRTAB_STE_0_V; + /* Nuke the existing STE_0 value, as we're going to rewrite it */ + val = ste->valid ? STRTAB_STE_0_V : 0; if (ste->bypass) { val |= disable_bypass ? STRTAB_STE_0_CFG_ABORT @@ -1073,9 +1071,7 @@ static void arm_smmu_write_strtab_ent(struct arm_smmu_device *smmu, u32 sid, #ifdef CONFIG_PCI_ATS STRTAB_STE_1_EATS_TRANS << STRTAB_STE_1_EATS_SHIFT | #endif - STRTAB_STE_1_STRW_NSEL1 << STRTAB_STE_1_STRW_SHIFT | - STRTAB_STE_1_PRIVCFG_UNPRIV << - STRTAB_STE_1_PRIVCFG_SHIFT); + STRTAB_STE_1_STRW_NSEL1 << STRTAB_STE_1_STRW_SHIFT); if (smmu->features & ARM_SMMU_FEAT_STALLS) dst[1] |= cpu_to_le64(STRTAB_STE_1_S1STALLD); @@ -1083,7 +1079,6 @@ static void arm_smmu_write_strtab_ent(struct arm_smmu_device *smmu, u32 sid, val |= (ste->s1_cfg->cdptr_dma & STRTAB_STE_0_S1CTXPTR_MASK << STRTAB_STE_0_S1CTXPTR_SHIFT) | STRTAB_STE_0_CFG_S1_TRANS; - } if (ste->s2_cfg) { @@ -1372,8 +1367,6 @@ static bool arm_smmu_capable(enum iommu_cap cap) switch (cap) { case IOMMU_CAP_CACHE_COHERENCY: return true; - case IOMMU_CAP_INTR_REMAP: - return true; /* MSIs are just memory writes */ case IOMMU_CAP_NOEXEC: return true; default: @@ -1795,8 +1788,10 @@ static int arm_smmu_add_device(struct device *dev) } group = iommu_group_get_for_dev(dev); - if (!IS_ERR(group)) + if (!IS_ERR(group)) { iommu_group_put(group); + iommu_device_link(&smmu->iommu, dev); + } return PTR_ERR_OR_ZERO(group); } @@ -1805,14 +1800,17 @@ static void arm_smmu_remove_device(struct device *dev) { struct iommu_fwspec *fwspec = dev->iommu_fwspec; struct arm_smmu_master_data *master; + struct arm_smmu_device *smmu; if (!fwspec || fwspec->ops != &arm_smmu_ops) return; master = fwspec->iommu_priv; + smmu = master->smmu; if (master && master->ste.valid) arm_smmu_detach_dev(dev); iommu_group_remove_device(dev); + iommu_device_unlink(&smmu->iommu, dev); kfree(master); iommu_fwspec_free(dev); } @@ -1883,6 +1881,29 @@ static int arm_smmu_of_xlate(struct device *dev, struct of_phandle_args *args) return iommu_fwspec_add_ids(dev, args->args, 1); } +static void arm_smmu_get_resv_regions(struct device *dev, + struct list_head *head) +{ + struct iommu_resv_region *region; + int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO; + + region = iommu_alloc_resv_region(MSI_IOVA_BASE, MSI_IOVA_LENGTH, + prot, IOMMU_RESV_MSI); + if (!region) + return; + + list_add_tail(®ion->list, head); +} + +static void arm_smmu_put_resv_regions(struct device *dev, + struct list_head *head) +{ + struct iommu_resv_region *entry, *next; + + list_for_each_entry_safe(entry, next, head, list) + kfree(entry); +} + static struct iommu_ops arm_smmu_ops = { .capable = arm_smmu_capable, .domain_alloc = arm_smmu_domain_alloc, @@ -1898,6 +1919,8 @@ static struct iommu_ops arm_smmu_ops = { .domain_get_attr = arm_smmu_domain_get_attr, .domain_set_attr = arm_smmu_domain_set_attr, .of_xlate = arm_smmu_of_xlate, + .get_resv_regions = arm_smmu_get_resv_regions, + .put_resv_regions = arm_smmu_put_resv_regions, .pgsize_bitmap = -1UL, /* Restricted during device attach */ }; @@ -1983,17 +2006,9 @@ static int arm_smmu_init_strtab_2lvl(struct arm_smmu_device *smmu) u32 size, l1size; struct arm_smmu_strtab_cfg *cfg = &smmu->strtab_cfg; - /* - * If we can resolve everything with a single L2 table, then we - * just need a single L1 descriptor. Otherwise, calculate the L1 - * size, capped to the SIDSIZE. - */ - if (smmu->sid_bits < STRTAB_SPLIT) { - size = 0; - } else { - size = STRTAB_L1_SZ_SHIFT - (ilog2(STRTAB_L1_DESC_DWORDS) + 3); - size = min(size, smmu->sid_bits - STRTAB_SPLIT); - } + /* Calculate the L1 size, capped to the SIDSIZE. */ + size = STRTAB_L1_SZ_SHIFT - (ilog2(STRTAB_L1_DESC_DWORDS) + 3); + size = min(size, smmu->sid_bits - STRTAB_SPLIT); cfg->num_l1_ents = 1 << size; size += STRTAB_SPLIT; @@ -2504,6 +2519,13 @@ static int arm_smmu_device_hw_probe(struct arm_smmu_device *smmu) smmu->ssid_bits = reg >> IDR1_SSID_SHIFT & IDR1_SSID_MASK; smmu->sid_bits = reg >> IDR1_SID_SHIFT & IDR1_SID_MASK; + /* + * If the SMMU supports fewer bits than would fill a single L2 stream + * table, use a linear table instead. + */ + if (smmu->sid_bits <= STRTAB_SPLIT) + smmu->features &= ~ARM_SMMU_FEAT_2_LVL_STRTAB; + /* IDR5 */ reg = readl_relaxed(smmu->base + ARM_SMMU_IDR5); @@ -2613,6 +2635,7 @@ static int arm_smmu_device_probe(struct platform_device *pdev) { int irq, ret; struct resource *res; + resource_size_t ioaddr; struct arm_smmu_device *smmu; struct device *dev = &pdev->dev; bool bypass; @@ -2630,6 +2653,7 @@ static int arm_smmu_device_probe(struct platform_device *pdev) dev_err(dev, "MMIO region too small (%pr)\n", res); return -EINVAL; } + ioaddr = res->start; smmu->base = devm_ioremap_resource(dev, res); if (IS_ERR(smmu->base)) @@ -2682,7 +2706,15 @@ static int arm_smmu_device_probe(struct platform_device *pdev) return ret; /* And we're up. Go go go! */ - iommu_register_instance(dev->fwnode, &arm_smmu_ops); + ret = iommu_device_sysfs_add(&smmu->iommu, dev, NULL, + "smmu3.%pa", &ioaddr); + if (ret) + return ret; + + iommu_device_set_ops(&smmu->iommu, &arm_smmu_ops); + iommu_device_set_fwnode(&smmu->iommu, dev->fwnode); + + ret = iommu_device_register(&smmu->iommu); #ifdef CONFIG_PCI if (pci_bus_type.iommu_ops != &arm_smmu_ops) { diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c index a60cded8a6ed..abf6496843a6 100644 --- a/drivers/iommu/arm-smmu.c +++ b/drivers/iommu/arm-smmu.c @@ -24,6 +24,7 @@ * - v7/v8 long-descriptor format * - Non-secure access to the SMMU * - Context fault reporting + * - Extended Stream ID (16 bit) */ #define pr_fmt(fmt) "arm-smmu: " fmt @@ -87,6 +88,7 @@ #define sCR0_CLIENTPD (1 << 0) #define sCR0_GFRE (1 << 1) #define sCR0_GFIE (1 << 2) +#define sCR0_EXIDENABLE (1 << 3) #define sCR0_GCFGFRE (1 << 4) #define sCR0_GCFGFIE (1 << 5) #define sCR0_USFCFG (1 << 10) @@ -126,6 +128,7 @@ #define ID0_NUMIRPT_MASK 0xff #define ID0_NUMSIDB_SHIFT 9 #define ID0_NUMSIDB_MASK 0xf +#define ID0_EXIDS (1 << 8) #define ID0_NUMSMRG_SHIFT 0 #define ID0_NUMSMRG_MASK 0xff @@ -169,6 +172,7 @@ #define ARM_SMMU_GR0_S2CR(n) (0xc00 + ((n) << 2)) #define S2CR_CBNDX_SHIFT 0 #define S2CR_CBNDX_MASK 0xff +#define S2CR_EXIDVALID (1 << 10) #define S2CR_TYPE_SHIFT 16 #define S2CR_TYPE_MASK 0x3 enum arm_smmu_s2cr_type { @@ -260,6 +264,7 @@ enum arm_smmu_s2cr_privcfg { #define TTBCR2_SEP_SHIFT 15 #define TTBCR2_SEP_UPSTREAM (0x7 << TTBCR2_SEP_SHIFT) +#define TTBCR2_AS (1 << 4) #define TTBRn_ASID_SHIFT 48 @@ -281,6 +286,9 @@ enum arm_smmu_s2cr_privcfg { #define FSYNR0_WNR (1 << 4) +#define MSI_IOVA_BASE 0x8000000 +#define MSI_IOVA_LENGTH 0x100000 + static int force_stage; module_param(force_stage, int, S_IRUGO); MODULE_PARM_DESC(force_stage, @@ -351,6 +359,7 @@ struct arm_smmu_device { #define ARM_SMMU_FEAT_FMT_AARCH64_64K (1 << 9) #define ARM_SMMU_FEAT_FMT_AARCH32_L (1 << 10) #define ARM_SMMU_FEAT_FMT_AARCH32_S (1 << 11) +#define ARM_SMMU_FEAT_EXIDS (1 << 12) u32 features; #define ARM_SMMU_OPT_SECURE_CFG_ACCESS (1 << 0) @@ -380,6 +389,9 @@ struct arm_smmu_device { unsigned int *irqs; u32 cavium_id_base; /* Specific to Cavium */ + + /* IOMMU core code handle */ + struct iommu_device iommu; }; enum arm_smmu_context_fmt { @@ -778,6 +790,8 @@ static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain, reg = pgtbl_cfg->arm_lpae_s1_cfg.tcr; reg2 = pgtbl_cfg->arm_lpae_s1_cfg.tcr >> 32; reg2 |= TTBCR2_SEP_UPSTREAM; + if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64) + reg2 |= TTBCR2_AS; } if (smmu->version > ARM_SMMU_V1) writel_relaxed(reg2, cb_base + ARM_SMMU_CB_TTBCR2); @@ -1048,7 +1062,7 @@ static void arm_smmu_write_smr(struct arm_smmu_device *smmu, int idx) struct arm_smmu_smr *smr = smmu->smrs + idx; u32 reg = smr->id << SMR_ID_SHIFT | smr->mask << SMR_MASK_SHIFT; - if (smr->valid) + if (!(smmu->features & ARM_SMMU_FEAT_EXIDS) && smr->valid) reg |= SMR_VALID; writel_relaxed(reg, ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_SMR(idx)); } @@ -1060,6 +1074,9 @@ static void arm_smmu_write_s2cr(struct arm_smmu_device *smmu, int idx) (s2cr->cbndx & S2CR_CBNDX_MASK) << S2CR_CBNDX_SHIFT | (s2cr->privcfg & S2CR_PRIVCFG_MASK) << S2CR_PRIVCFG_SHIFT; + if (smmu->features & ARM_SMMU_FEAT_EXIDS && smmu->smrs && + smmu->smrs[idx].valid) + reg |= S2CR_EXIDVALID; writel_relaxed(reg, ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_S2CR(idx)); } @@ -1070,6 +1087,34 @@ static void arm_smmu_write_sme(struct arm_smmu_device *smmu, int idx) arm_smmu_write_smr(smmu, idx); } +/* + * The width of SMR's mask field depends on sCR0_EXIDENABLE, so this function + * should be called after sCR0 is written. + */ +static void arm_smmu_test_smr_masks(struct arm_smmu_device *smmu) +{ + void __iomem *gr0_base = ARM_SMMU_GR0(smmu); + u32 smr; + + if (!smmu->smrs) + return; + + /* + * SMR.ID bits may not be preserved if the corresponding MASK + * bits are set, so check each one separately. We can reject + * masters later if they try to claim IDs outside these masks. + */ + smr = smmu->streamid_mask << SMR_ID_SHIFT; + writel_relaxed(smr, gr0_base + ARM_SMMU_GR0_SMR(0)); + smr = readl_relaxed(gr0_base + ARM_SMMU_GR0_SMR(0)); + smmu->streamid_mask = smr >> SMR_ID_SHIFT; + + smr = smmu->streamid_mask << SMR_MASK_SHIFT; + writel_relaxed(smr, gr0_base + ARM_SMMU_GR0_SMR(0)); + smr = readl_relaxed(gr0_base + ARM_SMMU_GR0_SMR(0)); + smmu->smr_mask_mask = smr >> SMR_MASK_SHIFT; +} + static int arm_smmu_find_sme(struct arm_smmu_device *smmu, u16 id, u16 mask) { struct arm_smmu_smr *smrs = smmu->smrs; @@ -1214,7 +1259,7 @@ static int arm_smmu_domain_add_master(struct arm_smmu_domain *smmu_domain, continue; s2cr[idx].type = type; - s2cr[idx].privcfg = S2CR_PRIVCFG_UNPRIV; + s2cr[idx].privcfg = S2CR_PRIVCFG_DEFAULT; s2cr[idx].cbndx = cbndx; arm_smmu_write_s2cr(smmu, idx); } @@ -1371,8 +1416,6 @@ static bool arm_smmu_capable(enum iommu_cap cap) * requests. */ return true; - case IOMMU_CAP_INTR_REMAP: - return true; /* MSIs are just memory writes */ case IOMMU_CAP_NOEXEC: return true; default: @@ -1444,6 +1487,8 @@ static int arm_smmu_add_device(struct device *dev) if (ret) goto out_free; + iommu_device_link(&smmu->iommu, dev); + return 0; out_free: @@ -1456,10 +1501,17 @@ out_free: static void arm_smmu_remove_device(struct device *dev) { struct iommu_fwspec *fwspec = dev->iommu_fwspec; + struct arm_smmu_master_cfg *cfg; + struct arm_smmu_device *smmu; + if (!fwspec || fwspec->ops != &arm_smmu_ops) return; + cfg = fwspec->iommu_priv; + smmu = cfg->smmu; + + iommu_device_unlink(&smmu->iommu, dev); arm_smmu_master_free_smes(fwspec); iommu_group_remove_device(dev); kfree(fwspec->iommu_priv); @@ -1549,6 +1601,29 @@ static int arm_smmu_of_xlate(struct device *dev, struct of_phandle_args *args) return iommu_fwspec_add_ids(dev, &fwid, 1); } +static void arm_smmu_get_resv_regions(struct device *dev, + struct list_head *head) +{ + struct iommu_resv_region *region; + int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO; + + region = iommu_alloc_resv_region(MSI_IOVA_BASE, MSI_IOVA_LENGTH, + prot, IOMMU_RESV_MSI); + if (!region) + return; + + list_add_tail(®ion->list, head); +} + +static void arm_smmu_put_resv_regions(struct device *dev, + struct list_head *head) +{ + struct iommu_resv_region *entry, *next; + + list_for_each_entry_safe(entry, next, head, list) + kfree(entry); +} + static struct iommu_ops arm_smmu_ops = { .capable = arm_smmu_capable, .domain_alloc = arm_smmu_domain_alloc, @@ -1564,6 +1639,8 @@ static struct iommu_ops arm_smmu_ops = { .domain_get_attr = arm_smmu_domain_get_attr, .domain_set_attr = arm_smmu_domain_set_attr, .of_xlate = arm_smmu_of_xlate, + .get_resv_regions = arm_smmu_get_resv_regions, + .put_resv_regions = arm_smmu_put_resv_regions, .pgsize_bitmap = -1UL, /* Restricted during device attach */ }; @@ -1648,6 +1725,9 @@ static void arm_smmu_device_reset(struct arm_smmu_device *smmu) if (smmu->features & ARM_SMMU_FEAT_VMID16) reg |= sCR0_VMID16EN; + if (smmu->features & ARM_SMMU_FEAT_EXIDS) + reg |= sCR0_EXIDENABLE; + /* Push the button */ __arm_smmu_tlb_sync(smmu); writel(reg, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0); @@ -1735,11 +1815,14 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu) "\t(IDR0.CTTW overridden by FW configuration)\n"); /* Max. number of entries we have for stream matching/indexing */ - size = 1 << ((id >> ID0_NUMSIDB_SHIFT) & ID0_NUMSIDB_MASK); + if (smmu->version == ARM_SMMU_V2 && id & ID0_EXIDS) { + smmu->features |= ARM_SMMU_FEAT_EXIDS; + size = 1 << 16; + } else { + size = 1 << ((id >> ID0_NUMSIDB_SHIFT) & ID0_NUMSIDB_MASK); + } smmu->streamid_mask = size - 1; if (id & ID0_SMS) { - u32 smr; - smmu->features |= ARM_SMMU_FEAT_STREAM_MATCH; size = (id >> ID0_NUMSMRG_SHIFT) & ID0_NUMSMRG_MASK; if (size == 0) { @@ -1748,21 +1831,6 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu) return -ENODEV; } - /* - * SMR.ID bits may not be preserved if the corresponding MASK - * bits are set, so check each one separately. We can reject - * masters later if they try to claim IDs outside these masks. - */ - smr = smmu->streamid_mask << SMR_ID_SHIFT; - writel_relaxed(smr, gr0_base + ARM_SMMU_GR0_SMR(0)); - smr = readl_relaxed(gr0_base + ARM_SMMU_GR0_SMR(0)); - smmu->streamid_mask = smr >> SMR_ID_SHIFT; - - smr = smmu->streamid_mask << SMR_MASK_SHIFT; - writel_relaxed(smr, gr0_base + ARM_SMMU_GR0_SMR(0)); - smr = readl_relaxed(gr0_base + ARM_SMMU_GR0_SMR(0)); - smmu->smr_mask_mask = smr >> SMR_MASK_SHIFT; - /* Zero-initialised to mark as invalid */ smmu->smrs = devm_kcalloc(smmu->dev, size, sizeof(*smmu->smrs), GFP_KERNEL); @@ -1770,8 +1838,7 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu) return -ENOMEM; dev_notice(smmu->dev, - "\tstream matching with %lu register groups, mask 0x%x", - size, smmu->smr_mask_mask); + "\tstream matching with %lu register groups", size); } /* s2cr->type == 0 means translation, so initialise explicitly */ smmu->s2crs = devm_kmalloc_array(smmu->dev, size, sizeof(*smmu->s2crs), @@ -2011,6 +2078,7 @@ static int arm_smmu_device_dt_probe(struct platform_device *pdev, static int arm_smmu_device_probe(struct platform_device *pdev) { struct resource *res; + resource_size_t ioaddr; struct arm_smmu_device *smmu; struct device *dev = &pdev->dev; int num_irqs, i, err; @@ -2031,6 +2099,7 @@ static int arm_smmu_device_probe(struct platform_device *pdev) return err; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + ioaddr = res->start; smmu->base = devm_ioremap_resource(dev, res); if (IS_ERR(smmu->base)) return PTR_ERR(smmu->base); @@ -2091,9 +2160,25 @@ static int arm_smmu_device_probe(struct platform_device *pdev) } } - iommu_register_instance(dev->fwnode, &arm_smmu_ops); + err = iommu_device_sysfs_add(&smmu->iommu, smmu->dev, NULL, + "smmu.%pa", &ioaddr); + if (err) { + dev_err(dev, "Failed to register iommu in sysfs\n"); + return err; + } + + iommu_device_set_ops(&smmu->iommu, &arm_smmu_ops); + iommu_device_set_fwnode(&smmu->iommu, dev->fwnode); + + err = iommu_device_register(&smmu->iommu); + if (err) { + dev_err(dev, "Failed to register iommu\n"); + return err; + } + platform_set_drvdata(pdev, smmu); arm_smmu_device_reset(smmu); + arm_smmu_test_smr_masks(smmu); /* Oh, for a proper bus abstraction */ if (!iommu_present(&platform_bus_type)) diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c index 2db0d641cf45..48d36ce59efb 100644 --- a/drivers/iommu/dma-iommu.c +++ b/drivers/iommu/dma-iommu.c @@ -37,15 +37,50 @@ struct iommu_dma_msi_page { phys_addr_t phys; }; +enum iommu_dma_cookie_type { + IOMMU_DMA_IOVA_COOKIE, + IOMMU_DMA_MSI_COOKIE, +}; + struct iommu_dma_cookie { - struct iova_domain iovad; - struct list_head msi_page_list; - spinlock_t msi_lock; + enum iommu_dma_cookie_type type; + union { + /* Full allocator for IOMMU_DMA_IOVA_COOKIE */ + struct iova_domain iovad; + /* Trivial linear page allocator for IOMMU_DMA_MSI_COOKIE */ + dma_addr_t msi_iova; + }; + struct list_head msi_page_list; + spinlock_t msi_lock; }; +static inline size_t cookie_msi_granule(struct iommu_dma_cookie *cookie) +{ + if (cookie->type == IOMMU_DMA_IOVA_COOKIE) + return cookie->iovad.granule; + return PAGE_SIZE; +} + static inline struct iova_domain *cookie_iovad(struct iommu_domain *domain) { - return &((struct iommu_dma_cookie *)domain->iova_cookie)->iovad; + struct iommu_dma_cookie *cookie = domain->iova_cookie; + + if (cookie->type == IOMMU_DMA_IOVA_COOKIE) + return &cookie->iovad; + return NULL; +} + +static struct iommu_dma_cookie *cookie_alloc(enum iommu_dma_cookie_type type) +{ + struct iommu_dma_cookie *cookie; + + cookie = kzalloc(sizeof(*cookie), GFP_KERNEL); + if (cookie) { + spin_lock_init(&cookie->msi_lock); + INIT_LIST_HEAD(&cookie->msi_page_list); + cookie->type = type; + } + return cookie; } int iommu_dma_init(void) @@ -62,25 +97,53 @@ int iommu_dma_init(void) */ int iommu_get_dma_cookie(struct iommu_domain *domain) { + if (domain->iova_cookie) + return -EEXIST; + + domain->iova_cookie = cookie_alloc(IOMMU_DMA_IOVA_COOKIE); + if (!domain->iova_cookie) + return -ENOMEM; + + return 0; +} +EXPORT_SYMBOL(iommu_get_dma_cookie); + +/** + * iommu_get_msi_cookie - Acquire just MSI remapping resources + * @domain: IOMMU domain to prepare + * @base: Start address of IOVA region for MSI mappings + * + * Users who manage their own IOVA allocation and do not want DMA API support, + * but would still like to take advantage of automatic MSI remapping, can use + * this to initialise their own domain appropriately. Users should reserve a + * contiguous IOVA region, starting at @base, large enough to accommodate the + * number of PAGE_SIZE mappings necessary to cover every MSI doorbell address + * used by the devices attached to @domain. + */ +int iommu_get_msi_cookie(struct iommu_domain *domain, dma_addr_t base) +{ struct iommu_dma_cookie *cookie; + if (domain->type != IOMMU_DOMAIN_UNMANAGED) + return -EINVAL; + if (domain->iova_cookie) return -EEXIST; - cookie = kzalloc(sizeof(*cookie), GFP_KERNEL); + cookie = cookie_alloc(IOMMU_DMA_MSI_COOKIE); if (!cookie) return -ENOMEM; - spin_lock_init(&cookie->msi_lock); - INIT_LIST_HEAD(&cookie->msi_page_list); + cookie->msi_iova = base; domain->iova_cookie = cookie; return 0; } -EXPORT_SYMBOL(iommu_get_dma_cookie); +EXPORT_SYMBOL(iommu_get_msi_cookie); /** * iommu_put_dma_cookie - Release a domain's DMA mapping resources - * @domain: IOMMU domain previously prepared by iommu_get_dma_cookie() + * @domain: IOMMU domain previously prepared by iommu_get_dma_cookie() or + * iommu_get_msi_cookie() * * IOMMU drivers should normally call this from their domain_free callback. */ @@ -92,7 +155,7 @@ void iommu_put_dma_cookie(struct iommu_domain *domain) if (!cookie) return; - if (cookie->iovad.granule) + if (cookie->type == IOMMU_DMA_IOVA_COOKIE && cookie->iovad.granule) put_iova_domain(&cookie->iovad); list_for_each_entry_safe(msi, tmp, &cookie->msi_page_list, list) { @@ -137,11 +200,13 @@ static void iova_reserve_pci_windows(struct pci_dev *dev, int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base, u64 size, struct device *dev) { - struct iova_domain *iovad = cookie_iovad(domain); + struct iommu_dma_cookie *cookie = domain->iova_cookie; + struct iova_domain *iovad = &cookie->iovad; unsigned long order, base_pfn, end_pfn; + bool pci = dev && dev_is_pci(dev); - if (!iovad) - return -ENODEV; + if (!cookie || cookie->type != IOMMU_DMA_IOVA_COOKIE) + return -EINVAL; /* Use the smallest supported page size for IOVA granularity */ order = __ffs(domain->pgsize_bitmap); @@ -161,19 +226,31 @@ int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base, end_pfn = min_t(unsigned long, end_pfn, domain->geometry.aperture_end >> order); } + /* + * PCI devices may have larger DMA masks, but still prefer allocating + * within a 32-bit mask to avoid DAC addressing. Such limitations don't + * apply to the typical platform device, so for those we may as well + * leave the cache limit at the top of their range to save an rb_last() + * traversal on every allocation. + */ + if (pci) + end_pfn &= DMA_BIT_MASK(32) >> order; - /* All we can safely do with an existing domain is enlarge it */ + /* start_pfn is always nonzero for an already-initialised domain */ if (iovad->start_pfn) { if (1UL << order != iovad->granule || - base_pfn != iovad->start_pfn || - end_pfn < iovad->dma_32bit_pfn) { + base_pfn != iovad->start_pfn) { pr_warn("Incompatible range for DMA domain\n"); return -EFAULT; } - iovad->dma_32bit_pfn = end_pfn; + /* + * If we have devices with different DMA masks, move the free + * area cache limit down for the benefit of the smaller one. + */ + iovad->dma_32bit_pfn = min(end_pfn, iovad->dma_32bit_pfn); } else { init_iova_domain(iovad, 1UL << order, base_pfn, end_pfn); - if (dev && dev_is_pci(dev)) + if (pci) iova_reserve_pci_windows(to_pci_dev(dev), iovad); } return 0; @@ -181,16 +258,22 @@ int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base, EXPORT_SYMBOL(iommu_dma_init_domain); /** - * dma_direction_to_prot - Translate DMA API directions to IOMMU API page flags + * dma_info_to_prot - Translate DMA API directions and attributes to IOMMU API + * page flags. * @dir: Direction of DMA transfer * @coherent: Is the DMA master cache-coherent? + * @attrs: DMA attributes for the mapping * * Return: corresponding IOMMU API page protection flags */ -int dma_direction_to_prot(enum dma_data_direction dir, bool coherent) +int dma_info_to_prot(enum dma_data_direction dir, bool coherent, + unsigned long attrs) { int prot = coherent ? IOMMU_CACHE : 0; + if (attrs & DMA_ATTR_PRIVILEGED) + prot |= IOMMU_PRIV; + switch (dir) { case DMA_BIDIRECTIONAL: return prot | IOMMU_READ | IOMMU_WRITE; @@ -204,19 +287,28 @@ int dma_direction_to_prot(enum dma_data_direction dir, bool coherent) } static struct iova *__alloc_iova(struct iommu_domain *domain, size_t size, - dma_addr_t dma_limit) + dma_addr_t dma_limit, struct device *dev) { struct iova_domain *iovad = cookie_iovad(domain); unsigned long shift = iova_shift(iovad); unsigned long length = iova_align(iovad, size) >> shift; + struct iova *iova = NULL; if (domain->geometry.force_aperture) dma_limit = min(dma_limit, domain->geometry.aperture_end); + + /* Try to get PCI devices a SAC address */ + if (dma_limit > DMA_BIT_MASK(32) && dev_is_pci(dev)) + iova = alloc_iova(iovad, length, DMA_BIT_MASK(32) >> shift, + true); /* * Enforce size-alignment to be safe - there could perhaps be an * attribute to control this per-device, or at least per-domain... */ - return alloc_iova(iovad, length, dma_limit >> shift, true); + if (!iova) + iova = alloc_iova(iovad, length, dma_limit >> shift, true); + + return iova; } /* The IOVA allocator knows what we mapped, so just unmap whatever that was */ @@ -369,7 +461,7 @@ struct page **iommu_dma_alloc(struct device *dev, size_t size, gfp_t gfp, if (!pages) return NULL; - iova = __alloc_iova(domain, size, dev->coherent_dma_mask); + iova = __alloc_iova(domain, size, dev->coherent_dma_mask, dev); if (!iova) goto out_free_pages; @@ -440,7 +532,7 @@ static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys, struct iova_domain *iovad = cookie_iovad(domain); size_t iova_off = iova_offset(iovad, phys); size_t len = iova_align(iovad, size + iova_off); - struct iova *iova = __alloc_iova(domain, len, dma_get_mask(dev)); + struct iova *iova = __alloc_iova(domain, len, dma_get_mask(dev), dev); if (!iova) return DMA_ERROR_CODE; @@ -598,7 +690,7 @@ int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg, prev = s; } - iova = __alloc_iova(domain, iova_len, dma_get_mask(dev)); + iova = __alloc_iova(domain, iova_len, dma_get_mask(dev), dev); if (!iova) goto out_restore_sg; @@ -633,7 +725,7 @@ dma_addr_t iommu_dma_map_resource(struct device *dev, phys_addr_t phys, size_t size, enum dma_data_direction dir, unsigned long attrs) { return __iommu_dma_map(dev, phys, size, - dma_direction_to_prot(dir, false) | IOMMU_MMIO); + dma_info_to_prot(dir, false, attrs) | IOMMU_MMIO); } void iommu_dma_unmap_resource(struct device *dev, dma_addr_t handle, @@ -642,16 +734,6 @@ void iommu_dma_unmap_resource(struct device *dev, dma_addr_t handle, __iommu_dma_unmap(iommu_get_domain_for_dev(dev), handle); } -int iommu_dma_supported(struct device *dev, u64 mask) -{ - /* - * 'Special' IOMMUs which don't have the same addressing capability - * as the CPU will have to wait until we have some way to query that - * before they'll be able to use this framework. - */ - return 1; -} - int iommu_dma_mapping_error(struct device *dev, dma_addr_t dma_addr) { return dma_addr == DMA_ERROR_CODE; @@ -662,11 +744,12 @@ static struct iommu_dma_msi_page *iommu_dma_get_msi_page(struct device *dev, { struct iommu_dma_cookie *cookie = domain->iova_cookie; struct iommu_dma_msi_page *msi_page; - struct iova_domain *iovad = &cookie->iovad; + struct iova_domain *iovad = cookie_iovad(domain); struct iova *iova; int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO; + size_t size = cookie_msi_granule(cookie); - msi_addr &= ~(phys_addr_t)iova_mask(iovad); + msi_addr &= ~(phys_addr_t)(size - 1); list_for_each_entry(msi_page, &cookie->msi_page_list, list) if (msi_page->phys == msi_addr) return msi_page; @@ -675,13 +758,18 @@ static struct iommu_dma_msi_page *iommu_dma_get_msi_page(struct device *dev, if (!msi_page) return NULL; - iova = __alloc_iova(domain, iovad->granule, dma_get_mask(dev)); - if (!iova) - goto out_free_page; - msi_page->phys = msi_addr; - msi_page->iova = iova_dma_addr(iovad, iova); - if (iommu_map(domain, msi_page->iova, msi_addr, iovad->granule, prot)) + if (iovad) { + iova = __alloc_iova(domain, size, dma_get_mask(dev), dev); + if (!iova) + goto out_free_page; + msi_page->iova = iova_dma_addr(iovad, iova); + } else { + msi_page->iova = cookie->msi_iova; + cookie->msi_iova += size; + } + + if (iommu_map(domain, msi_page->iova, msi_addr, size, prot)) goto out_free_iova; INIT_LIST_HEAD(&msi_page->list); @@ -689,7 +777,10 @@ static struct iommu_dma_msi_page *iommu_dma_get_msi_page(struct device *dev, return msi_page; out_free_iova: - __free_iova(iovad, iova); + if (iovad) + __free_iova(iovad, iova); + else + cookie->msi_iova -= size; out_free_page: kfree(msi_page); return NULL; @@ -730,7 +821,7 @@ void iommu_dma_map_msi_msg(int irq, struct msi_msg *msg) msg->data = ~0U; } else { msg->address_hi = upper_32_bits(msi_page->iova); - msg->address_lo &= iova_mask(&cookie->iovad); + msg->address_lo &= cookie_msi_granule(cookie) - 1; msg->address_lo += lower_32_bits(msi_page->iova); } } diff --git a/drivers/iommu/dmar.c b/drivers/iommu/dmar.c index 8ccbd7023194..d9c0decfc91a 100644 --- a/drivers/iommu/dmar.c +++ b/drivers/iommu/dmar.c @@ -74,6 +74,8 @@ static unsigned long dmar_seq_ids[BITS_TO_LONGS(DMAR_UNITS_SUPPORTED)]; static int alloc_iommu(struct dmar_drhd_unit *drhd); static void free_iommu(struct intel_iommu *iommu); +extern const struct iommu_ops intel_iommu_ops; + static void dmar_register_drhd_unit(struct dmar_drhd_unit *drhd) { /* @@ -1078,14 +1080,17 @@ static int alloc_iommu(struct dmar_drhd_unit *drhd) raw_spin_lock_init(&iommu->register_lock); if (intel_iommu_enabled) { - iommu->iommu_dev = iommu_device_create(NULL, iommu, - intel_iommu_groups, - "%s", iommu->name); + err = iommu_device_sysfs_add(&iommu->iommu, NULL, + intel_iommu_groups, + "%s", iommu->name); + if (err) + goto err_unmap; - if (IS_ERR(iommu->iommu_dev)) { - err = PTR_ERR(iommu->iommu_dev); + iommu_device_set_ops(&iommu->iommu, &intel_iommu_ops); + + err = iommu_device_register(&iommu->iommu); + if (err) goto err_unmap; - } } drhd->iommu = iommu; @@ -1103,7 +1108,8 @@ error: static void free_iommu(struct intel_iommu *iommu) { - iommu_device_destroy(iommu->iommu_dev); + iommu_device_sysfs_remove(&iommu->iommu); + iommu_device_unregister(&iommu->iommu); if (iommu->irq) { if (iommu->pr_irq) { diff --git a/drivers/iommu/exynos-iommu.c b/drivers/iommu/exynos-iommu.c index 57ba0d3091ea..a7e0821c9967 100644 --- a/drivers/iommu/exynos-iommu.c +++ b/drivers/iommu/exynos-iommu.c @@ -276,6 +276,8 @@ struct sysmmu_drvdata { struct list_head owner_node; /* node for owner controllers list */ phys_addr_t pgtable; /* assigned page table structure */ unsigned int version; /* our version */ + + struct iommu_device iommu; /* IOMMU core handle */ }; static struct exynos_iommu_domain *to_exynos_domain(struct iommu_domain *dom) @@ -381,13 +383,14 @@ static void show_fault_information(struct sysmmu_drvdata *data, { sysmmu_pte_t *ent; - dev_err(data->sysmmu, "%s FAULT occurred at %#x (page table base: %pa)\n", - finfo->name, fault_addr, &data->pgtable); + dev_err(data->sysmmu, "%s: %s FAULT occurred at %#x\n", + dev_name(data->master), finfo->name, fault_addr); + dev_dbg(data->sysmmu, "Page table base: %pa\n", &data->pgtable); ent = section_entry(phys_to_virt(data->pgtable), fault_addr); - dev_err(data->sysmmu, "\tLv1 entry: %#x\n", *ent); + dev_dbg(data->sysmmu, "\tLv1 entry: %#x\n", *ent); if (lv1ent_page(ent)) { ent = page_entry(ent, fault_addr); - dev_err(data->sysmmu, "\t Lv2 entry: %#x\n", *ent); + dev_dbg(data->sysmmu, "\t Lv2 entry: %#x\n", *ent); } } @@ -611,6 +614,18 @@ static int __init exynos_sysmmu_probe(struct platform_device *pdev) data->sysmmu = dev; spin_lock_init(&data->lock); + ret = iommu_device_sysfs_add(&data->iommu, &pdev->dev, NULL, + dev_name(data->sysmmu)); + if (ret) + return ret; + + iommu_device_set_ops(&data->iommu, &exynos_iommu_ops); + iommu_device_set_fwnode(&data->iommu, &dev->of_node->fwnode); + + ret = iommu_device_register(&data->iommu); + if (ret) + return ret; + platform_set_drvdata(pdev, data); __sysmmu_get_version(data); @@ -628,8 +643,6 @@ static int __init exynos_sysmmu_probe(struct platform_device *pdev) pm_runtime_enable(dev); - of_iommu_set_ops(dev->of_node, &exynos_iommu_ops); - return 0; } @@ -743,6 +756,8 @@ static struct iommu_domain *exynos_iommu_domain_alloc(unsigned type) DMA_TO_DEVICE); /* For mapping page table entries we rely on dma == phys */ BUG_ON(handle != virt_to_phys(domain->pgtable)); + if (dma_mapping_error(dma_dev, handle)) + goto err_lv2ent; spin_lock_init(&domain->lock); spin_lock_init(&domain->pgtablelock); @@ -754,6 +769,8 @@ static struct iommu_domain *exynos_iommu_domain_alloc(unsigned type) return &domain->domain; +err_lv2ent: + free_pages((unsigned long)domain->lv2entcnt, 1); err_counter: free_pages((unsigned long)domain->pgtable, 2); err_dma_cookie: @@ -897,6 +914,7 @@ static sysmmu_pte_t *alloc_lv2entry(struct exynos_iommu_domain *domain, } if (lv1ent_fault(sent)) { + dma_addr_t handle; sysmmu_pte_t *pent; bool need_flush_flpd_cache = lv1ent_zero(sent); @@ -908,7 +926,12 @@ static sysmmu_pte_t *alloc_lv2entry(struct exynos_iommu_domain *domain, update_pte(sent, mk_lv1ent_page(virt_to_phys(pent))); kmemleak_ignore(pent); *pgcounter = NUM_LV2ENTRIES; - dma_map_single(dma_dev, pent, LV2TABLE_SIZE, DMA_TO_DEVICE); + handle = dma_map_single(dma_dev, pent, LV2TABLE_SIZE, + DMA_TO_DEVICE); + if (dma_mapping_error(dma_dev, handle)) { + kmem_cache_free(lv2table_kmem_cache, pent); + return ERR_PTR(-EADDRINUSE); + } /* * If pre-fetched SLPD is a faulty SLPD in zero_l2_table, @@ -1231,9 +1254,21 @@ static int exynos_iommu_add_device(struct device *dev) static void exynos_iommu_remove_device(struct device *dev) { + struct exynos_iommu_owner *owner = dev->archdata.iommu; + if (!has_sysmmu(dev)) return; + if (owner->domain) { + struct iommu_group *group = iommu_group_get(dev); + + if (group) { + WARN_ON(owner->domain != + iommu_group_default_domain(group)); + exynos_iommu_detach_device(owner->domain, dev); + iommu_group_put(group); + } + } iommu_group_remove_device(dev); } @@ -1242,7 +1277,7 @@ static int exynos_iommu_of_xlate(struct device *dev, { struct exynos_iommu_owner *owner = dev->archdata.iommu; struct platform_device *sysmmu = of_find_device_by_node(spec->np); - struct sysmmu_drvdata *data; + struct sysmmu_drvdata *data, *entry; if (!sysmmu) return -ENODEV; @@ -1261,6 +1296,10 @@ static int exynos_iommu_of_xlate(struct device *dev, dev->archdata.iommu = owner; } + list_for_each_entry(entry, &owner->controllers, owner_node) + if (entry == data) + return 0; + list_add_tail(&data->owner_node, &owner->controllers); data->master = dev; diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c index 8a185250ae5a..f5e02f8e7371 100644 --- a/drivers/iommu/intel-iommu.c +++ b/drivers/iommu/intel-iommu.c @@ -440,6 +440,7 @@ struct dmar_rmrr_unit { u64 end_address; /* reserved end address */ struct dmar_dev_scope *devices; /* target devices */ int devices_cnt; /* target device count */ + struct iommu_resv_region *resv; /* reserved region handle */ }; struct dmar_atsr_unit { @@ -547,7 +548,7 @@ EXPORT_SYMBOL_GPL(intel_iommu_gfx_mapped); static DEFINE_SPINLOCK(device_domain_lock); static LIST_HEAD(device_domain_list); -static const struct iommu_ops intel_iommu_ops; +const struct iommu_ops intel_iommu_ops; static bool translation_pre_enabled(struct intel_iommu *iommu) { @@ -1144,7 +1145,7 @@ static void dma_pte_free_level(struct dmar_domain *domain, int level, if (!dma_pte_present(pte) || dma_pte_superpage(pte)) goto next; - level_pfn = pfn & level_mask(level - 1); + level_pfn = pfn & level_mask(level); level_pte = phys_to_virt(dma_pte_addr(pte)); if (level > 2) @@ -3325,13 +3326,14 @@ static int __init init_dmars(void) iommu_identity_mapping |= IDENTMAP_GFX; #endif + check_tylersburg_isoch(); + if (iommu_identity_mapping) { ret = si_domain_init(hw_pass_through); if (ret) goto free_iommu; } - check_tylersburg_isoch(); /* * If we copied translations from a previous kernel in the kdump @@ -4246,27 +4248,40 @@ static inline void init_iommu_pm_ops(void) {} int __init dmar_parse_one_rmrr(struct acpi_dmar_header *header, void *arg) { struct acpi_dmar_reserved_memory *rmrr; + int prot = DMA_PTE_READ|DMA_PTE_WRITE; struct dmar_rmrr_unit *rmrru; + size_t length; rmrru = kzalloc(sizeof(*rmrru), GFP_KERNEL); if (!rmrru) - return -ENOMEM; + goto out; rmrru->hdr = header; rmrr = (struct acpi_dmar_reserved_memory *)header; rmrru->base_address = rmrr->base_address; rmrru->end_address = rmrr->end_address; + + length = rmrr->end_address - rmrr->base_address + 1; + rmrru->resv = iommu_alloc_resv_region(rmrr->base_address, length, prot, + IOMMU_RESV_DIRECT); + if (!rmrru->resv) + goto free_rmrru; + rmrru->devices = dmar_alloc_dev_scope((void *)(rmrr + 1), ((void *)rmrr) + rmrr->header.length, &rmrru->devices_cnt); - if (rmrru->devices_cnt && rmrru->devices == NULL) { - kfree(rmrru); - return -ENOMEM; - } + if (rmrru->devices_cnt && rmrru->devices == NULL) + goto free_all; list_add(&rmrru->list, &dmar_rmrr_units); return 0; +free_all: + kfree(rmrru->resv); +free_rmrru: + kfree(rmrru); +out: + return -ENOMEM; } static struct dmar_atsr_unit *dmar_find_atsr(struct acpi_dmar_atsr *atsr) @@ -4480,6 +4495,7 @@ static void intel_iommu_free_dmars(void) list_for_each_entry_safe(rmrru, rmrr_n, &dmar_rmrr_units, list) { list_del(&rmrru->list); dmar_free_dev_scope(&rmrru->devices, &rmrru->devices_cnt); + kfree(rmrru->resv); kfree(rmrru); } @@ -4853,10 +4869,13 @@ int __init intel_iommu_init(void) init_iommu_pm_ops(); - for_each_active_iommu(iommu, drhd) - iommu->iommu_dev = iommu_device_create(NULL, iommu, - intel_iommu_groups, - "%s", iommu->name); + for_each_active_iommu(iommu, drhd) { + iommu_device_sysfs_add(&iommu->iommu, NULL, + intel_iommu_groups, + "%s", iommu->name); + iommu_device_set_ops(&iommu->iommu, &intel_iommu_ops); + iommu_device_register(&iommu->iommu); + } bus_set_iommu(&pci_bus_type, &intel_iommu_ops); bus_register_notifier(&pci_bus_type, &device_nb); @@ -5178,7 +5197,7 @@ static int intel_iommu_add_device(struct device *dev) if (!iommu) return -ENODEV; - iommu_device_link(iommu->iommu_dev, dev); + iommu_device_link(&iommu->iommu, dev); group = iommu_group_get_for_dev(dev); @@ -5200,7 +5219,46 @@ static void intel_iommu_remove_device(struct device *dev) iommu_group_remove_device(dev); - iommu_device_unlink(iommu->iommu_dev, dev); + iommu_device_unlink(&iommu->iommu, dev); +} + +static void intel_iommu_get_resv_regions(struct device *device, + struct list_head *head) +{ + struct iommu_resv_region *reg; + struct dmar_rmrr_unit *rmrr; + struct device *i_dev; + int i; + + rcu_read_lock(); + for_each_rmrr_units(rmrr) { + for_each_active_dev_scope(rmrr->devices, rmrr->devices_cnt, + i, i_dev) { + if (i_dev != device) + continue; + + list_add_tail(&rmrr->resv->list, head); + } + } + rcu_read_unlock(); + + reg = iommu_alloc_resv_region(IOAPIC_RANGE_START, + IOAPIC_RANGE_END - IOAPIC_RANGE_START + 1, + 0, IOMMU_RESV_RESERVED); + if (!reg) + return; + list_add_tail(®->list, head); +} + +static void intel_iommu_put_resv_regions(struct device *dev, + struct list_head *head) +{ + struct iommu_resv_region *entry, *next; + + list_for_each_entry_safe(entry, next, head, list) { + if (entry->type == IOMMU_RESV_RESERVED) + kfree(entry); + } } #ifdef CONFIG_INTEL_IOMMU_SVM @@ -5332,20 +5390,22 @@ struct intel_iommu *intel_svm_device_to_iommu(struct device *dev) } #endif /* CONFIG_INTEL_IOMMU_SVM */ -static const struct iommu_ops intel_iommu_ops = { - .capable = intel_iommu_capable, - .domain_alloc = intel_iommu_domain_alloc, - .domain_free = intel_iommu_domain_free, - .attach_dev = intel_iommu_attach_device, - .detach_dev = intel_iommu_detach_device, - .map = intel_iommu_map, - .unmap = intel_iommu_unmap, - .map_sg = default_iommu_map_sg, - .iova_to_phys = intel_iommu_iova_to_phys, - .add_device = intel_iommu_add_device, - .remove_device = intel_iommu_remove_device, - .device_group = pci_device_group, - .pgsize_bitmap = INTEL_IOMMU_PGSIZES, +const struct iommu_ops intel_iommu_ops = { + .capable = intel_iommu_capable, + .domain_alloc = intel_iommu_domain_alloc, + .domain_free = intel_iommu_domain_free, + .attach_dev = intel_iommu_attach_device, + .detach_dev = intel_iommu_detach_device, + .map = intel_iommu_map, + .unmap = intel_iommu_unmap, + .map_sg = default_iommu_map_sg, + .iova_to_phys = intel_iommu_iova_to_phys, + .add_device = intel_iommu_add_device, + .remove_device = intel_iommu_remove_device, + .get_resv_regions = intel_iommu_get_resv_regions, + .put_resv_regions = intel_iommu_put_resv_regions, + .device_group = pci_device_group, + .pgsize_bitmap = INTEL_IOMMU_PGSIZES, }; static void quirk_iommu_g4x_gfx(struct pci_dev *dev) diff --git a/drivers/iommu/io-pgtable-arm-v7s.c b/drivers/iommu/io-pgtable-arm-v7s.c index 0769276c0537..1c049e2e12bf 100644 --- a/drivers/iommu/io-pgtable-arm-v7s.c +++ b/drivers/iommu/io-pgtable-arm-v7s.c @@ -265,7 +265,9 @@ static arm_v7s_iopte arm_v7s_prot_to_pte(int prot, int lvl, if (!(prot & IOMMU_MMIO)) pte |= ARM_V7S_ATTR_TEX(1); if (ap) { - pte |= ARM_V7S_PTE_AF | ARM_V7S_PTE_AP_UNPRIV; + pte |= ARM_V7S_PTE_AF; + if (!(prot & IOMMU_PRIV)) + pte |= ARM_V7S_PTE_AP_UNPRIV; if (!(prot & IOMMU_WRITE)) pte |= ARM_V7S_PTE_AP_RDONLY; } @@ -288,6 +290,8 @@ static int arm_v7s_pte_to_prot(arm_v7s_iopte pte, int lvl) if (!(attr & ARM_V7S_PTE_AP_RDONLY)) prot |= IOMMU_WRITE; + if (!(attr & ARM_V7S_PTE_AP_UNPRIV)) + prot |= IOMMU_PRIV; if ((attr & (ARM_V7S_TEX_MASK << ARM_V7S_TEX_SHIFT)) == 0) prot |= IOMMU_MMIO; else if (pte & ARM_V7S_ATTR_C) diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c index a40ce3406fef..feacc54bec68 100644 --- a/drivers/iommu/io-pgtable-arm.c +++ b/drivers/iommu/io-pgtable-arm.c @@ -350,11 +350,14 @@ static arm_lpae_iopte arm_lpae_prot_to_pte(struct arm_lpae_io_pgtable *data, if (data->iop.fmt == ARM_64_LPAE_S1 || data->iop.fmt == ARM_32_LPAE_S1) { - pte = ARM_LPAE_PTE_AP_UNPRIV | ARM_LPAE_PTE_nG; + pte = ARM_LPAE_PTE_nG; if (!(prot & IOMMU_WRITE) && (prot & IOMMU_READ)) pte |= ARM_LPAE_PTE_AP_RDONLY; + if (!(prot & IOMMU_PRIV)) + pte |= ARM_LPAE_PTE_AP_UNPRIV; + if (prot & IOMMU_MMIO) pte |= (ARM_LPAE_MAIR_ATTR_IDX_DEV << ARM_LPAE_PTE_ATTRINDX_SHIFT); diff --git a/drivers/iommu/iommu-sysfs.c b/drivers/iommu/iommu-sysfs.c index 39b2d9127dbf..c58351ed61c1 100644 --- a/drivers/iommu/iommu-sysfs.c +++ b/drivers/iommu/iommu-sysfs.c @@ -50,85 +50,76 @@ static int __init iommu_dev_init(void) postcore_initcall(iommu_dev_init); /* - * Create an IOMMU device and return a pointer to it. IOMMU specific - * attributes can be provided as an attribute group, allowing a unique - * namespace per IOMMU type. + * Init the struct device for the IOMMU. IOMMU specific attributes can + * be provided as an attribute group, allowing a unique namespace per + * IOMMU type. */ -struct device *iommu_device_create(struct device *parent, void *drvdata, - const struct attribute_group **groups, - const char *fmt, ...) +int iommu_device_sysfs_add(struct iommu_device *iommu, + struct device *parent, + const struct attribute_group **groups, + const char *fmt, ...) { - struct device *dev; va_list vargs; int ret; - dev = kzalloc(sizeof(*dev), GFP_KERNEL); - if (!dev) - return ERR_PTR(-ENOMEM); + device_initialize(&iommu->dev); - device_initialize(dev); - - dev->class = &iommu_class; - dev->parent = parent; - dev->groups = groups; - dev_set_drvdata(dev, drvdata); + iommu->dev.class = &iommu_class; + iommu->dev.parent = parent; + iommu->dev.groups = groups; va_start(vargs, fmt); - ret = kobject_set_name_vargs(&dev->kobj, fmt, vargs); + ret = kobject_set_name_vargs(&iommu->dev.kobj, fmt, vargs); va_end(vargs); if (ret) goto error; - ret = device_add(dev); + ret = device_add(&iommu->dev); if (ret) goto error; - return dev; + return 0; error: - put_device(dev); - return ERR_PTR(ret); + put_device(&iommu->dev); + return ret; } -void iommu_device_destroy(struct device *dev) +void iommu_device_sysfs_remove(struct iommu_device *iommu) { - if (!dev || IS_ERR(dev)) - return; - - device_unregister(dev); + device_unregister(&iommu->dev); } - /* * IOMMU drivers can indicate a device is managed by a given IOMMU using * this interface. A link to the device will be created in the "devices" * directory of the IOMMU device in sysfs and an "iommu" link will be * created under the linked device, pointing back at the IOMMU device. */ -int iommu_device_link(struct device *dev, struct device *link) +int iommu_device_link(struct iommu_device *iommu, struct device *link) { int ret; - if (!dev || IS_ERR(dev)) + if (!iommu || IS_ERR(iommu)) return -ENODEV; - ret = sysfs_add_link_to_group(&dev->kobj, "devices", + ret = sysfs_add_link_to_group(&iommu->dev.kobj, "devices", &link->kobj, dev_name(link)); if (ret) return ret; - ret = sysfs_create_link_nowarn(&link->kobj, &dev->kobj, "iommu"); + ret = sysfs_create_link_nowarn(&link->kobj, &iommu->dev.kobj, "iommu"); if (ret) - sysfs_remove_link_from_group(&dev->kobj, "devices", + sysfs_remove_link_from_group(&iommu->dev.kobj, "devices", dev_name(link)); return ret; } -void iommu_device_unlink(struct device *dev, struct device *link) +void iommu_device_unlink(struct iommu_device *iommu, struct device *link) { - if (!dev || IS_ERR(dev)) + if (!iommu || IS_ERR(iommu)) return; sysfs_remove_link(&link->kobj, "iommu"); - sysfs_remove_link_from_group(&dev->kobj, "devices", dev_name(link)); + sysfs_remove_link_from_group(&iommu->dev.kobj, "devices", dev_name(link)); } diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c index dbe7f653bb7c..8ea14f41a979 100644 --- a/drivers/iommu/iommu.c +++ b/drivers/iommu/iommu.c @@ -55,7 +55,7 @@ struct iommu_group { struct iommu_domain *domain; }; -struct iommu_device { +struct group_device { struct list_head list; struct device *dev; char *name; @@ -68,6 +68,12 @@ struct iommu_group_attribute { const char *buf, size_t count); }; +static const char * const iommu_group_resv_type_string[] = { + [IOMMU_RESV_DIRECT] = "direct", + [IOMMU_RESV_RESERVED] = "reserved", + [IOMMU_RESV_MSI] = "msi", +}; + #define IOMMU_GROUP_ATTR(_name, _mode, _show, _store) \ struct iommu_group_attribute iommu_group_attr_##_name = \ __ATTR(_name, _mode, _show, _store) @@ -77,6 +83,25 @@ struct iommu_group_attribute iommu_group_attr_##_name = \ #define to_iommu_group(_kobj) \ container_of(_kobj, struct iommu_group, kobj) +static LIST_HEAD(iommu_device_list); +static DEFINE_SPINLOCK(iommu_device_lock); + +int iommu_device_register(struct iommu_device *iommu) +{ + spin_lock(&iommu_device_lock); + list_add_tail(&iommu->list, &iommu_device_list); + spin_unlock(&iommu_device_lock); + + return 0; +} + +void iommu_device_unregister(struct iommu_device *iommu) +{ + spin_lock(&iommu_device_lock); + list_del(&iommu->list); + spin_unlock(&iommu_device_lock); +} + static struct iommu_domain *__iommu_domain_alloc(struct bus_type *bus, unsigned type); static int __iommu_attach_device(struct iommu_domain *domain, @@ -133,8 +158,131 @@ static ssize_t iommu_group_show_name(struct iommu_group *group, char *buf) return sprintf(buf, "%s\n", group->name); } +/** + * iommu_insert_resv_region - Insert a new region in the + * list of reserved regions. + * @new: new region to insert + * @regions: list of regions + * + * The new element is sorted by address with respect to the other + * regions of the same type. In case it overlaps with another + * region of the same type, regions are merged. In case it + * overlaps with another region of different type, regions are + * not merged. + */ +static int iommu_insert_resv_region(struct iommu_resv_region *new, + struct list_head *regions) +{ + struct iommu_resv_region *region; + phys_addr_t start = new->start; + phys_addr_t end = new->start + new->length - 1; + struct list_head *pos = regions->next; + + while (pos != regions) { + struct iommu_resv_region *entry = + list_entry(pos, struct iommu_resv_region, list); + phys_addr_t a = entry->start; + phys_addr_t b = entry->start + entry->length - 1; + int type = entry->type; + + if (end < a) { + goto insert; + } else if (start > b) { + pos = pos->next; + } else if ((start >= a) && (end <= b)) { + if (new->type == type) + goto done; + else + pos = pos->next; + } else { + if (new->type == type) { + phys_addr_t new_start = min(a, start); + phys_addr_t new_end = max(b, end); + + list_del(&entry->list); + entry->start = new_start; + entry->length = new_end - new_start + 1; + iommu_insert_resv_region(entry, regions); + } else { + pos = pos->next; + } + } + } +insert: + region = iommu_alloc_resv_region(new->start, new->length, + new->prot, new->type); + if (!region) + return -ENOMEM; + + list_add_tail(®ion->list, pos); +done: + return 0; +} + +static int +iommu_insert_device_resv_regions(struct list_head *dev_resv_regions, + struct list_head *group_resv_regions) +{ + struct iommu_resv_region *entry; + int ret = 0; + + list_for_each_entry(entry, dev_resv_regions, list) { + ret = iommu_insert_resv_region(entry, group_resv_regions); + if (ret) + break; + } + return ret; +} + +int iommu_get_group_resv_regions(struct iommu_group *group, + struct list_head *head) +{ + struct group_device *device; + int ret = 0; + + mutex_lock(&group->mutex); + list_for_each_entry(device, &group->devices, list) { + struct list_head dev_resv_regions; + + INIT_LIST_HEAD(&dev_resv_regions); + iommu_get_resv_regions(device->dev, &dev_resv_regions); + ret = iommu_insert_device_resv_regions(&dev_resv_regions, head); + iommu_put_resv_regions(device->dev, &dev_resv_regions); + if (ret) + break; + } + mutex_unlock(&group->mutex); + return ret; +} +EXPORT_SYMBOL_GPL(iommu_get_group_resv_regions); + +static ssize_t iommu_group_show_resv_regions(struct iommu_group *group, + char *buf) +{ + struct iommu_resv_region *region, *next; + struct list_head group_resv_regions; + char *str = buf; + + INIT_LIST_HEAD(&group_resv_regions); + iommu_get_group_resv_regions(group, &group_resv_regions); + + list_for_each_entry_safe(region, next, &group_resv_regions, list) { + str += sprintf(str, "0x%016llx 0x%016llx %s\n", + (long long int)region->start, + (long long int)(region->start + + region->length - 1), + iommu_group_resv_type_string[region->type]); + kfree(region); + } + + return (str - buf); +} + static IOMMU_GROUP_ATTR(name, S_IRUGO, iommu_group_show_name, NULL); +static IOMMU_GROUP_ATTR(reserved_regions, 0444, + iommu_group_show_resv_regions, NULL); + static void iommu_group_release(struct kobject *kobj) { struct iommu_group *group = to_iommu_group(kobj); @@ -212,6 +360,11 @@ struct iommu_group *iommu_group_alloc(void) */ kobject_put(&group->kobj); + ret = iommu_group_create_file(group, + &iommu_group_attr_reserved_regions); + if (ret) + return ERR_PTR(ret); + pr_debug("Allocated group %d\n", group->id); return group; @@ -318,7 +471,7 @@ static int iommu_group_create_direct_mappings(struct iommu_group *group, struct device *dev) { struct iommu_domain *domain = group->default_domain; - struct iommu_dm_region *entry; + struct iommu_resv_region *entry; struct list_head mappings; unsigned long pg_size; int ret = 0; @@ -331,18 +484,21 @@ static int iommu_group_create_direct_mappings(struct iommu_group *group, pg_size = 1UL << __ffs(domain->pgsize_bitmap); INIT_LIST_HEAD(&mappings); - iommu_get_dm_regions(dev, &mappings); + iommu_get_resv_regions(dev, &mappings); /* We need to consider overlapping regions for different devices */ list_for_each_entry(entry, &mappings, list) { dma_addr_t start, end, addr; - if (domain->ops->apply_dm_region) - domain->ops->apply_dm_region(dev, domain, entry); + if (domain->ops->apply_resv_region) + domain->ops->apply_resv_region(dev, domain, entry); start = ALIGN(entry->start, pg_size); end = ALIGN(entry->start + entry->length, pg_size); + if (entry->type != IOMMU_RESV_DIRECT) + continue; + for (addr = start; addr < end; addr += pg_size) { phys_addr_t phys_addr; @@ -358,7 +514,7 @@ static int iommu_group_create_direct_mappings(struct iommu_group *group, } out: - iommu_put_dm_regions(dev, &mappings); + iommu_put_resv_regions(dev, &mappings); return ret; } @@ -374,7 +530,7 @@ out: int iommu_group_add_device(struct iommu_group *group, struct device *dev) { int ret, i = 0; - struct iommu_device *device; + struct group_device *device; device = kzalloc(sizeof(*device), GFP_KERNEL); if (!device) @@ -383,36 +539,30 @@ int iommu_group_add_device(struct iommu_group *group, struct device *dev) device->dev = dev; ret = sysfs_create_link(&dev->kobj, &group->kobj, "iommu_group"); - if (ret) { - kfree(device); - return ret; - } + if (ret) + goto err_free_device; device->name = kasprintf(GFP_KERNEL, "%s", kobject_name(&dev->kobj)); rename: if (!device->name) { - sysfs_remove_link(&dev->kobj, "iommu_group"); - kfree(device); - return -ENOMEM; + ret = -ENOMEM; + goto err_remove_link; } ret = sysfs_create_link_nowarn(group->devices_kobj, &dev->kobj, device->name); if (ret) { - kfree(device->name); if (ret == -EEXIST && i >= 0) { /* * Account for the slim chance of collision * and append an instance to the name. */ + kfree(device->name); device->name = kasprintf(GFP_KERNEL, "%s.%d", kobject_name(&dev->kobj), i++); goto rename; } - - sysfs_remove_link(&dev->kobj, "iommu_group"); - kfree(device); - return ret; + goto err_free_name; } kobject_get(group->devices_kobj); @@ -424,8 +574,10 @@ rename: mutex_lock(&group->mutex); list_add_tail(&device->list, &group->devices); if (group->domain) - __iommu_attach_device(group->domain, dev); + ret = __iommu_attach_device(group->domain, dev); mutex_unlock(&group->mutex); + if (ret) + goto err_put_group; /* Notify any listeners about change to group. */ blocking_notifier_call_chain(&group->notifier, @@ -436,6 +588,21 @@ rename: pr_info("Adding device %s to group %d\n", dev_name(dev), group->id); return 0; + +err_put_group: + mutex_lock(&group->mutex); + list_del(&device->list); + mutex_unlock(&group->mutex); + dev->iommu_group = NULL; + kobject_put(group->devices_kobj); +err_free_name: + kfree(device->name); +err_remove_link: + sysfs_remove_link(&dev->kobj, "iommu_group"); +err_free_device: + kfree(device); + pr_err("Failed to add device %s to group %d: %d\n", dev_name(dev), group->id, ret); + return ret; } EXPORT_SYMBOL_GPL(iommu_group_add_device); @@ -449,7 +616,7 @@ EXPORT_SYMBOL_GPL(iommu_group_add_device); void iommu_group_remove_device(struct device *dev) { struct iommu_group *group = dev->iommu_group; - struct iommu_device *tmp_device, *device = NULL; + struct group_device *tmp_device, *device = NULL; pr_info("Removing device %s from group %d\n", dev_name(dev), group->id); @@ -484,7 +651,7 @@ EXPORT_SYMBOL_GPL(iommu_group_remove_device); static int iommu_group_device_count(struct iommu_group *group) { - struct iommu_device *entry; + struct group_device *entry; int ret = 0; list_for_each_entry(entry, &group->devices, list) @@ -507,7 +674,7 @@ static int iommu_group_device_count(struct iommu_group *group) static int __iommu_group_for_each_dev(struct iommu_group *group, void *data, int (*fn)(struct device *, void *)) { - struct iommu_device *device; + struct group_device *device; int ret = 0; list_for_each_entry(device, &group->devices, list) { @@ -1559,20 +1726,38 @@ int iommu_domain_set_attr(struct iommu_domain *domain, } EXPORT_SYMBOL_GPL(iommu_domain_set_attr); -void iommu_get_dm_regions(struct device *dev, struct list_head *list) +void iommu_get_resv_regions(struct device *dev, struct list_head *list) { const struct iommu_ops *ops = dev->bus->iommu_ops; - if (ops && ops->get_dm_regions) - ops->get_dm_regions(dev, list); + if (ops && ops->get_resv_regions) + ops->get_resv_regions(dev, list); } -void iommu_put_dm_regions(struct device *dev, struct list_head *list) +void iommu_put_resv_regions(struct device *dev, struct list_head *list) { const struct iommu_ops *ops = dev->bus->iommu_ops; - if (ops && ops->put_dm_regions) - ops->put_dm_regions(dev, list); + if (ops && ops->put_resv_regions) + ops->put_resv_regions(dev, list); +} + +struct iommu_resv_region *iommu_alloc_resv_region(phys_addr_t start, + size_t length, + int prot, int type) +{ + struct iommu_resv_region *region; + + region = kzalloc(sizeof(*region), GFP_KERNEL); + if (!region) + return NULL; + + INIT_LIST_HEAD(®ion->list); + region->start = start; + region->length = length; + region->prot = prot; + region->type = type; + return region; } /* Request that a device is direct mapped by the IOMMU */ @@ -1628,43 +1813,18 @@ out: return ret; } -struct iommu_instance { - struct list_head list; - struct fwnode_handle *fwnode; - const struct iommu_ops *ops; -}; -static LIST_HEAD(iommu_instance_list); -static DEFINE_SPINLOCK(iommu_instance_lock); - -void iommu_register_instance(struct fwnode_handle *fwnode, - const struct iommu_ops *ops) +const struct iommu_ops *iommu_ops_from_fwnode(struct fwnode_handle *fwnode) { - struct iommu_instance *iommu = kzalloc(sizeof(*iommu), GFP_KERNEL); - - if (WARN_ON(!iommu)) - return; - - of_node_get(to_of_node(fwnode)); - INIT_LIST_HEAD(&iommu->list); - iommu->fwnode = fwnode; - iommu->ops = ops; - spin_lock(&iommu_instance_lock); - list_add_tail(&iommu->list, &iommu_instance_list); - spin_unlock(&iommu_instance_lock); -} - -const struct iommu_ops *iommu_get_instance(struct fwnode_handle *fwnode) -{ - struct iommu_instance *instance; const struct iommu_ops *ops = NULL; + struct iommu_device *iommu; - spin_lock(&iommu_instance_lock); - list_for_each_entry(instance, &iommu_instance_list, list) - if (instance->fwnode == fwnode) { - ops = instance->ops; + spin_lock(&iommu_device_lock); + list_for_each_entry(iommu, &iommu_device_list, list) + if (iommu->fwnode == fwnode) { + ops = iommu->ops; break; } - spin_unlock(&iommu_instance_lock); + spin_unlock(&iommu_device_lock); return ops; } @@ -1714,13 +1874,14 @@ int iommu_fwspec_add_ids(struct device *dev, u32 *ids, int num_ids) fwspec = krealloc(dev->iommu_fwspec, size, GFP_KERNEL); if (!fwspec) return -ENOMEM; + + dev->iommu_fwspec = fwspec; } for (i = 0; i < num_ids; i++) fwspec->ids[fwspec->num_ids + i] = ids[i]; fwspec->num_ids += num_ids; - dev->iommu_fwspec = fwspec; return 0; } EXPORT_SYMBOL_GPL(iommu_fwspec_add_ids); diff --git a/drivers/iommu/iova.c b/drivers/iommu/iova.c index 080beca0197d..b7268a14184f 100644 --- a/drivers/iommu/iova.c +++ b/drivers/iommu/iova.c @@ -62,7 +62,7 @@ __get_cached_rbnode(struct iova_domain *iovad, unsigned long *limit_pfn) else { struct rb_node *prev_node = rb_prev(iovad->cached32_node); struct iova *curr_iova = - container_of(iovad->cached32_node, struct iova, node); + rb_entry(iovad->cached32_node, struct iova, node); *limit_pfn = curr_iova->pfn_lo - 1; return prev_node; } @@ -86,11 +86,11 @@ __cached_rbnode_delete_update(struct iova_domain *iovad, struct iova *free) if (!iovad->cached32_node) return; curr = iovad->cached32_node; - cached_iova = container_of(curr, struct iova, node); + cached_iova = rb_entry(curr, struct iova, node); if (free->pfn_lo >= cached_iova->pfn_lo) { struct rb_node *node = rb_next(&free->node); - struct iova *iova = container_of(node, struct iova, node); + struct iova *iova = rb_entry(node, struct iova, node); /* only cache if it's below 32bit pfn */ if (node && iova->pfn_lo < iovad->dma_32bit_pfn) @@ -125,7 +125,7 @@ static int __alloc_and_insert_iova_range(struct iova_domain *iovad, curr = __get_cached_rbnode(iovad, &limit_pfn); prev = curr; while (curr) { - struct iova *curr_iova = container_of(curr, struct iova, node); + struct iova *curr_iova = rb_entry(curr, struct iova, node); if (limit_pfn < curr_iova->pfn_lo) goto move_left; @@ -171,8 +171,7 @@ move_left: /* Figure out where to put new node */ while (*entry) { - struct iova *this = container_of(*entry, - struct iova, node); + struct iova *this = rb_entry(*entry, struct iova, node); parent = *entry; if (new->pfn_lo < this->pfn_lo) @@ -201,7 +200,7 @@ iova_insert_rbtree(struct rb_root *root, struct iova *iova) struct rb_node **new = &(root->rb_node), *parent = NULL; /* Figure out where to put new node */ while (*new) { - struct iova *this = container_of(*new, struct iova, node); + struct iova *this = rb_entry(*new, struct iova, node); parent = *new; @@ -311,7 +310,7 @@ private_find_iova(struct iova_domain *iovad, unsigned long pfn) assert_spin_locked(&iovad->iova_rbtree_lock); while (node) { - struct iova *iova = container_of(node, struct iova, node); + struct iova *iova = rb_entry(node, struct iova, node); /* If pfn falls within iova's range, return iova */ if ((pfn >= iova->pfn_lo) && (pfn <= iova->pfn_hi)) { @@ -463,7 +462,7 @@ void put_iova_domain(struct iova_domain *iovad) spin_lock_irqsave(&iovad->iova_rbtree_lock, flags); node = rb_first(&iovad->rbroot); while (node) { - struct iova *iova = container_of(node, struct iova, node); + struct iova *iova = rb_entry(node, struct iova, node); rb_erase(node, &iovad->rbroot); free_iova_mem(iova); @@ -477,7 +476,7 @@ static int __is_range_overlap(struct rb_node *node, unsigned long pfn_lo, unsigned long pfn_hi) { - struct iova *iova = container_of(node, struct iova, node); + struct iova *iova = rb_entry(node, struct iova, node); if ((pfn_lo <= iova->pfn_hi) && (pfn_hi >= iova->pfn_lo)) return 1; @@ -541,7 +540,7 @@ reserve_iova(struct iova_domain *iovad, spin_lock_irqsave(&iovad->iova_rbtree_lock, flags); for (node = rb_first(&iovad->rbroot); node; node = rb_next(node)) { if (__is_range_overlap(node, pfn_lo, pfn_hi)) { - iova = container_of(node, struct iova, node); + iova = rb_entry(node, struct iova, node); __adjust_overlap_range(iova, &pfn_lo, &pfn_hi); if ((pfn_lo >= iova->pfn_lo) && (pfn_hi <= iova->pfn_hi)) @@ -578,7 +577,7 @@ copy_reserved_iova(struct iova_domain *from, struct iova_domain *to) spin_lock_irqsave(&from->iova_rbtree_lock, flags); for (node = rb_first(&from->rbroot); node; node = rb_next(node)) { - struct iova *iova = container_of(node, struct iova, node); + struct iova *iova = rb_entry(node, struct iova, node); struct iova *new_iova; new_iova = reserve_iova(to, iova->pfn_lo, iova->pfn_hi); diff --git a/drivers/iommu/ipmmu-vmsa.c b/drivers/iommu/ipmmu-vmsa.c index ace331da6459..b7e14ee863f9 100644 --- a/drivers/iommu/ipmmu-vmsa.c +++ b/drivers/iommu/ipmmu-vmsa.c @@ -313,6 +313,8 @@ static int ipmmu_domain_init_context(struct ipmmu_vmsa_domain *domain) domain->cfg.ias = 32; domain->cfg.oas = 40; domain->cfg.tlb = &ipmmu_gather_ops; + domain->io_domain.geometry.aperture_end = DMA_BIT_MASK(32); + domain->io_domain.geometry.force_aperture = true; /* * TODO: Add support for coherent walk through CCI with DVM and remove * cache handling. For now, delegate it to the io-pgtable code. diff --git a/drivers/iommu/msm_iommu.c b/drivers/iommu/msm_iommu.c index b09692bb5b0a..d0448353d501 100644 --- a/drivers/iommu/msm_iommu.c +++ b/drivers/iommu/msm_iommu.c @@ -371,6 +371,58 @@ static int msm_iommu_domain_config(struct msm_priv *priv) return 0; } +/* Must be called under msm_iommu_lock */ +static struct msm_iommu_dev *find_iommu_for_dev(struct device *dev) +{ + struct msm_iommu_dev *iommu, *ret = NULL; + struct msm_iommu_ctx_dev *master; + + list_for_each_entry(iommu, &qcom_iommu_devices, dev_node) { + master = list_first_entry(&iommu->ctx_list, + struct msm_iommu_ctx_dev, + list); + if (master->of_node == dev->of_node) { + ret = iommu; + break; + } + } + + return ret; +} + +static int msm_iommu_add_device(struct device *dev) +{ + struct msm_iommu_dev *iommu; + unsigned long flags; + int ret = 0; + + spin_lock_irqsave(&msm_iommu_lock, flags); + + iommu = find_iommu_for_dev(dev); + if (iommu) + iommu_device_link(&iommu->iommu, dev); + else + ret = -ENODEV; + + spin_unlock_irqrestore(&msm_iommu_lock, flags); + + return ret; +} + +static void msm_iommu_remove_device(struct device *dev) +{ + struct msm_iommu_dev *iommu; + unsigned long flags; + + spin_lock_irqsave(&msm_iommu_lock, flags); + + iommu = find_iommu_for_dev(dev); + if (iommu) + iommu_device_unlink(&iommu->iommu, dev); + + spin_unlock_irqrestore(&msm_iommu_lock, flags); +} + static int msm_iommu_attach_dev(struct iommu_domain *domain, struct device *dev) { int ret = 0; @@ -646,6 +698,8 @@ static struct iommu_ops msm_iommu_ops = { .unmap = msm_iommu_unmap, .map_sg = default_iommu_map_sg, .iova_to_phys = msm_iommu_iova_to_phys, + .add_device = msm_iommu_add_device, + .remove_device = msm_iommu_remove_device, .pgsize_bitmap = MSM_IOMMU_PGSIZES, .of_xlate = qcom_iommu_of_xlate, }; @@ -653,6 +707,7 @@ static struct iommu_ops msm_iommu_ops = { static int msm_iommu_probe(struct platform_device *pdev) { struct resource *r; + resource_size_t ioaddr; struct msm_iommu_dev *iommu; int ret, par, val; @@ -696,6 +751,7 @@ static int msm_iommu_probe(struct platform_device *pdev) ret = PTR_ERR(iommu->base); goto fail; } + ioaddr = r->start; iommu->irq = platform_get_irq(pdev, 0); if (iommu->irq < 0) { @@ -737,7 +793,22 @@ static int msm_iommu_probe(struct platform_device *pdev) } list_add(&iommu->dev_node, &qcom_iommu_devices); - of_iommu_set_ops(pdev->dev.of_node, &msm_iommu_ops); + + ret = iommu_device_sysfs_add(&iommu->iommu, iommu->dev, NULL, + "msm-smmu.%pa", &ioaddr); + if (ret) { + pr_err("Could not add msm-smmu at %pa to sysfs\n", &ioaddr); + goto fail; + } + + iommu_device_set_ops(&iommu->iommu, &msm_iommu_ops); + iommu_device_set_fwnode(&iommu->iommu, &pdev->dev.of_node->fwnode); + + ret = iommu_device_register(&iommu->iommu); + if (ret) { + pr_err("Could not register msm-smmu at %pa\n", &ioaddr); + goto fail; + } pr_info("device mapped at %p, irq %d with %d ctx banks\n", iommu->base, iommu->irq, iommu->ncb); diff --git a/drivers/iommu/msm_iommu.h b/drivers/iommu/msm_iommu.h index 4ca25d50d679..ae92d2779c42 100644 --- a/drivers/iommu/msm_iommu.h +++ b/drivers/iommu/msm_iommu.h @@ -19,6 +19,7 @@ #define MSM_IOMMU_H #include <linux/interrupt.h> +#include <linux/iommu.h> #include <linux/clk.h> /* Sharability attributes of MSM IOMMU mappings */ @@ -68,6 +69,8 @@ struct msm_iommu_dev { struct list_head dom_node; struct list_head ctx_list; DECLARE_BITMAP(context_map, IOMMU_MAX_CBS); + + struct iommu_device iommu; }; /** diff --git a/drivers/iommu/mtk_iommu.c b/drivers/iommu/mtk_iommu.c index 1479c76ece9e..5d14cd15198d 100644 --- a/drivers/iommu/mtk_iommu.c +++ b/drivers/iommu/mtk_iommu.c @@ -360,11 +360,15 @@ static phys_addr_t mtk_iommu_iova_to_phys(struct iommu_domain *domain, static int mtk_iommu_add_device(struct device *dev) { + struct mtk_iommu_data *data; struct iommu_group *group; if (!dev->iommu_fwspec || dev->iommu_fwspec->ops != &mtk_iommu_ops) return -ENODEV; /* Not a iommu client device */ + data = dev->iommu_fwspec->iommu_priv; + iommu_device_link(&data->iommu, dev); + group = iommu_group_get_for_dev(dev); if (IS_ERR(group)) return PTR_ERR(group); @@ -375,9 +379,14 @@ static int mtk_iommu_add_device(struct device *dev) static void mtk_iommu_remove_device(struct device *dev) { + struct mtk_iommu_data *data; + if (!dev->iommu_fwspec || dev->iommu_fwspec->ops != &mtk_iommu_ops) return; + data = dev->iommu_fwspec->iommu_priv; + iommu_device_unlink(&data->iommu, dev); + iommu_group_remove_device(dev); iommu_fwspec_free(dev); } @@ -497,6 +506,7 @@ static int mtk_iommu_probe(struct platform_device *pdev) struct mtk_iommu_data *data; struct device *dev = &pdev->dev; struct resource *res; + resource_size_t ioaddr; struct component_match *match = NULL; void *protect; int i, larb_nr, ret; @@ -519,6 +529,7 @@ static int mtk_iommu_probe(struct platform_device *pdev) data->base = devm_ioremap_resource(dev, res); if (IS_ERR(data->base)) return PTR_ERR(data->base); + ioaddr = res->start; data->irq = platform_get_irq(pdev, 0); if (data->irq < 0) @@ -567,6 +578,18 @@ static int mtk_iommu_probe(struct platform_device *pdev) if (ret) return ret; + ret = iommu_device_sysfs_add(&data->iommu, dev, NULL, + "mtk-iommu.%pa", &ioaddr); + if (ret) + return ret; + + iommu_device_set_ops(&data->iommu, &mtk_iommu_ops); + iommu_device_set_fwnode(&data->iommu, &pdev->dev.of_node->fwnode); + + ret = iommu_device_register(&data->iommu); + if (ret) + return ret; + if (!iommu_present(&platform_bus_type)) bus_set_iommu(&platform_bus_type, &mtk_iommu_ops); @@ -577,6 +600,9 @@ static int mtk_iommu_remove(struct platform_device *pdev) { struct mtk_iommu_data *data = platform_get_drvdata(pdev); + iommu_device_sysfs_remove(&data->iommu); + iommu_device_unregister(&data->iommu); + if (iommu_present(&platform_bus_type)) bus_set_iommu(&platform_bus_type, NULL); @@ -655,7 +681,6 @@ static int mtk_iommu_init_fn(struct device_node *np) return ret; } - of_iommu_set_ops(np, &mtk_iommu_ops); return 0; } diff --git a/drivers/iommu/mtk_iommu.h b/drivers/iommu/mtk_iommu.h index 50177f738e4e..2a28eadeea0e 100644 --- a/drivers/iommu/mtk_iommu.h +++ b/drivers/iommu/mtk_iommu.h @@ -47,6 +47,8 @@ struct mtk_iommu_data { struct iommu_group *m4u_group; struct mtk_smi_iommu smi_imu; /* SMI larb iommu info */ bool enable_4GB; + + struct iommu_device iommu; }; static inline int compare_of(struct device *dev, void *data) diff --git a/drivers/iommu/of_iommu.c b/drivers/iommu/of_iommu.c index 0f57ddc4ecc2..2683e9fc0dcf 100644 --- a/drivers/iommu/of_iommu.c +++ b/drivers/iommu/of_iommu.c @@ -127,7 +127,7 @@ static const struct iommu_ops "iommu-map-mask", &iommu_spec.np, iommu_spec.args)) return NULL; - ops = of_iommu_get_ops(iommu_spec.np); + ops = iommu_ops_from_fwnode(&iommu_spec.np->fwnode); if (!ops || !ops->of_xlate || iommu_fwspec_init(&pdev->dev, &iommu_spec.np->fwnode, ops) || ops->of_xlate(&pdev->dev, &iommu_spec)) @@ -157,7 +157,7 @@ const struct iommu_ops *of_iommu_configure(struct device *dev, "#iommu-cells", idx, &iommu_spec)) { np = iommu_spec.np; - ops = of_iommu_get_ops(np); + ops = iommu_ops_from_fwnode(&np->fwnode); if (!ops || !ops->of_xlate || iommu_fwspec_init(dev, &np->fwnode, ops) || diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c index 4a895c6d6805..23201004fd7a 100644 --- a/drivers/irqchip/irq-gic-v3-its.c +++ b/drivers/irqchip/irq-gic-v3-its.c @@ -1646,6 +1646,7 @@ static int its_init_domain(struct fwnode_handle *handle, struct its_node *its) inner_domain->parent = its_parent; inner_domain->bus_token = DOMAIN_BUS_NEXUS; + inner_domain->flags |= IRQ_DOMAIN_FLAG_MSI_REMAP; info->ops = &its_msi_domain_ops; info->data = its; inner_domain->host_data = info; diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c index b3cc33fa6d26..bd6f293c4ebd 100644 --- a/drivers/vfio/vfio_iommu_type1.c +++ b/drivers/vfio/vfio_iommu_type1.c @@ -38,6 +38,8 @@ #include <linux/workqueue.h> #include <linux/mdev.h> #include <linux/notifier.h> +#include <linux/dma-iommu.h> +#include <linux/irqdomain.h> #define DRIVER_VERSION "0.2" #define DRIVER_AUTHOR "Alex Williamson <alex.williamson@redhat.com>" @@ -1179,6 +1181,28 @@ static struct vfio_group *find_iommu_group(struct vfio_domain *domain, return NULL; } +static bool vfio_iommu_has_resv_msi(struct iommu_group *group, + phys_addr_t *base) +{ + struct list_head group_resv_regions; + struct iommu_resv_region *region, *next; + bool ret = false; + + INIT_LIST_HEAD(&group_resv_regions); + iommu_get_group_resv_regions(group, &group_resv_regions); + list_for_each_entry(region, &group_resv_regions, list) { + if (region->type & IOMMU_RESV_MSI) { + *base = region->start; + ret = true; + goto out; + } + } +out: + list_for_each_entry_safe(region, next, &group_resv_regions, list) + kfree(region); + return ret; +} + static int vfio_iommu_type1_attach_group(void *iommu_data, struct iommu_group *iommu_group) { @@ -1187,6 +1211,8 @@ static int vfio_iommu_type1_attach_group(void *iommu_data, struct vfio_domain *domain, *d; struct bus_type *bus = NULL, *mdev_bus; int ret; + bool resv_msi, msi_remap; + phys_addr_t resv_msi_base; mutex_lock(&iommu->lock); @@ -1256,11 +1282,15 @@ static int vfio_iommu_type1_attach_group(void *iommu_data, if (ret) goto out_domain; + resv_msi = vfio_iommu_has_resv_msi(iommu_group, &resv_msi_base); + INIT_LIST_HEAD(&domain->group_list); list_add(&group->next, &domain->group_list); - if (!allow_unsafe_interrupts && - !iommu_capable(bus, IOMMU_CAP_INTR_REMAP)) { + msi_remap = resv_msi ? irq_domain_check_msi_remap() : + iommu_capable(bus, IOMMU_CAP_INTR_REMAP); + + if (!allow_unsafe_interrupts && !msi_remap) { pr_warn("%s: No interrupt remapping support. Use the module param \"allow_unsafe_interrupts\" to enable VFIO IOMMU support on this platform\n", __func__); ret = -EPERM; @@ -1302,6 +1332,12 @@ static int vfio_iommu_type1_attach_group(void *iommu_data, if (ret) goto out_detach; + if (resv_msi) { + ret = iommu_get_msi_cookie(domain->domain, resv_msi_base); + if (ret) + goto out_detach; + } + list_add(&domain->next, &iommu->domain_list); mutex_unlock(&iommu->lock); |