diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2022-10-10 13:20:53 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2022-10-10 13:20:53 -0700 |
commit | f23cdfcd04f7c044ee47dac04484b8d289088776 (patch) | |
tree | ab4c292b83cf4534b08cdd258cda38893f2cfa39 /drivers/iommu/intel/iommu.c | |
parent | 706eacadd5c5cc13510ba69eea2917c2ce5ffa99 (diff) | |
parent | 38713c6028a3172c4c256512c3fbcfc799fe2d43 (diff) | |
download | linux-f23cdfcd04f7c044ee47dac04484b8d289088776.tar.bz2 |
Merge tag 'iommu-updates-v6.1' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu
Pull iommu updates from Joerg Roedel:
- remove the bus_set_iommu() interface which became unnecesary because
of IOMMU per-device probing
- make the dma-iommu.h header private
- Intel VT-d changes from Lu Baolu:
- Decouple PASID and PRI from SVA
- Add ESRTPS & ESIRTPS capability check
- Cleanups
- Apple DART support for the M1 Pro/MAX SOCs
- support for AMD IOMMUv2 page-tables for the DMA-API layer.
The v2 page-tables are compatible with the x86 CPU page-tables. Using
them for DMA-API prepares support for hardware-assisted IOMMU
virtualization
- support for MT6795 Helio X10 M4Us in the Mediatek IOMMU driver
- some smaller fixes and cleanups
* tag 'iommu-updates-v6.1' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu: (59 commits)
iommu/vt-d: Avoid unnecessary global DMA cache invalidation
iommu/vt-d: Avoid unnecessary global IRTE cache invalidation
iommu/vt-d: Rename cap_5lp_support to cap_fl5lp_support
iommu/vt-d: Remove pasid_set_eafe()
iommu/vt-d: Decouple PASID & PRI enabling from SVA
iommu/vt-d: Remove unnecessary SVA data accesses in page fault path
dt-bindings: iommu: arm,smmu-v3: Relax order of interrupt names
iommu: dart: Support t6000 variant
iommu/io-pgtable-dart: Add DART PTE support for t6000
iommu/io-pgtable: Add DART subpage protection support
iommu/io-pgtable: Move Apple DART support to its own file
iommu/mediatek: Add support for MT6795 Helio X10 M4Us
iommu/mediatek: Introduce new flag TF_PORT_TO_ADDR_MT8173
dt-bindings: mediatek: Add bindings for MT6795 M4U
iommu/iova: Fix module config properly
iommu/amd: Fix sparse warning
iommu/amd: Remove outdated comment
iommu/amd: Free domain ID after domain_flush_pages
iommu/amd: Free domain id in error path
iommu/virtio: Fix compile error with viommu_capable()
...
Diffstat (limited to 'drivers/iommu/intel/iommu.c')
-rw-r--r-- | drivers/iommu/intel/iommu.c | 95 |
1 files changed, 27 insertions, 68 deletions
diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c index 31bc50e538a3..a8b36c3fddf1 100644 --- a/drivers/iommu/intel/iommu.c +++ b/drivers/iommu/intel/iommu.c @@ -15,7 +15,6 @@ #include <linux/crash_dump.h> #include <linux/dma-direct.h> -#include <linux/dma-iommu.h> #include <linux/dmi.h> #include <linux/intel-svm.h> #include <linux/memory.h> @@ -26,6 +25,7 @@ #include <linux/tboot.h> #include "iommu.h" +#include "../dma-iommu.h" #include "../irq_remapping.h" #include "../iommu-sva-lib.h" #include "pasid.h" @@ -199,6 +199,11 @@ static inline void context_set_domain_id(struct context_entry *context, context->hi |= (value & ((1 << 16) - 1)) << 8; } +static inline void context_set_pasid(struct context_entry *context) +{ + context->lo |= CONTEXT_PASIDE; +} + static inline int context_domain_id(struct context_entry *c) { return((c->hi >> 8) & 0xffff); @@ -399,7 +404,7 @@ static unsigned long __iommu_calculate_sagaw(struct intel_iommu *iommu) { unsigned long fl_sagaw, sl_sagaw; - fl_sagaw = BIT(2) | (cap_5lp_support(iommu->cap) ? BIT(3) : 0); + fl_sagaw = BIT(2) | (cap_fl5lp_support(iommu->cap) ? BIT(3) : 0); sl_sagaw = cap_sagaw(iommu->cap); /* Second level only. */ @@ -1234,6 +1239,13 @@ static void iommu_set_root_entry(struct intel_iommu *iommu) raw_spin_unlock_irqrestore(&iommu->register_lock, flag); + /* + * Hardware invalidates all DMA remapping hardware translation + * caches as part of SRTP flow. + */ + if (cap_esrtps(iommu->cap)) + return; + iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL); if (sm_supported(iommu)) qi_flush_pasid_cache(iommu, 0, QI_PC_GLOBAL, 0); @@ -1350,21 +1362,18 @@ static void __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did, } static struct device_domain_info * -iommu_support_dev_iotlb(struct dmar_domain *domain, struct intel_iommu *iommu, - u8 bus, u8 devfn) +domain_lookup_dev_info(struct dmar_domain *domain, + struct intel_iommu *iommu, u8 bus, u8 devfn) { struct device_domain_info *info; unsigned long flags; - if (!iommu->qi) - return NULL; - spin_lock_irqsave(&domain->lock, flags); list_for_each_entry(info, &domain->devices, link) { if (info->iommu == iommu && info->bus == bus && info->devfn == devfn) { spin_unlock_irqrestore(&domain->lock, flags); - return info->ats_supported ? info : NULL; + return info; } } spin_unlock_irqrestore(&domain->lock, flags); @@ -1389,7 +1398,7 @@ static void domain_update_iotlb(struct dmar_domain *domain) spin_unlock_irqrestore(&domain->lock, flags); } -static void iommu_enable_dev_iotlb(struct device_domain_info *info) +static void iommu_enable_pci_caps(struct device_domain_info *info) { struct pci_dev *pdev; @@ -1412,7 +1421,6 @@ static void iommu_enable_dev_iotlb(struct device_domain_info *info) info->pfsid = pci_dev_id(pf_pdev); } -#ifdef CONFIG_INTEL_IOMMU_SVM /* The PCIe spec, in its wisdom, declares that the behaviour of the device if you enable PASID support after ATS support is undefined. So always enable PASID support on devices which @@ -1425,7 +1433,7 @@ static void iommu_enable_dev_iotlb(struct device_domain_info *info) (info->pasid_enabled ? pci_prg_resp_pasid_required(pdev) : 1) && !pci_reset_pri(pdev) && !pci_enable_pri(pdev, PRQ_DEPTH)) info->pri_enabled = 1; -#endif + if (info->ats_supported && pci_ats_page_aligned(pdev) && !pci_enable_ats(pdev, VTD_PAGE_SHIFT)) { info->ats_enabled = 1; @@ -1448,16 +1456,16 @@ static void iommu_disable_dev_iotlb(struct device_domain_info *info) info->ats_enabled = 0; domain_update_iotlb(info->domain); } -#ifdef CONFIG_INTEL_IOMMU_SVM + if (info->pri_enabled) { pci_disable_pri(pdev); info->pri_enabled = 0; } + if (info->pasid_enabled) { pci_disable_pasid(pdev); info->pasid_enabled = 0; } -#endif } static void __iommu_flush_dev_iotlb(struct device_domain_info *info, @@ -1907,7 +1915,7 @@ static int domain_context_mapping_one(struct dmar_domain *domain, u8 bus, u8 devfn) { struct device_domain_info *info = - iommu_support_dev_iotlb(domain, iommu, bus, devfn); + domain_lookup_dev_info(domain, iommu, bus, devfn); u16 did = domain_id_iommu(domain, iommu); int translation = CONTEXT_TT_MULTI_LEVEL; struct context_entry *context; @@ -1980,6 +1988,8 @@ static int domain_context_mapping_one(struct dmar_domain *domain, context_set_sm_dte(context); if (info && info->pri_supported) context_set_sm_pre(context); + if (info && info->pasid_supported) + context_set_pasid(context); } else { struct dma_pte *pgd = domain->pgd; int agaw; @@ -2037,7 +2047,7 @@ static int domain_context_mapping_one(struct dmar_domain *domain, } else { iommu_flush_write_buffer(iommu); } - iommu_enable_dev_iotlb(info); + iommu_enable_pci_caps(info); ret = 0; @@ -3896,7 +3906,6 @@ static int __init probe_acpi_namespace_devices(void) continue; } - pn->dev->bus->iommu_ops = &intel_iommu_ops; ret = iommu_probe_device(pn->dev); if (ret) break; @@ -4029,7 +4038,6 @@ int __init intel_iommu_init(void) } up_read(&dmar_global_lock); - bus_set_iommu(&pci_bus_type, &intel_iommu_ops); if (si_domain && !hw_pass_through) register_memory_notifier(&intel_iommu_memory_nb); @@ -4437,7 +4445,7 @@ static bool intel_iommu_enforce_cache_coherency(struct iommu_domain *domain) return true; } -static bool intel_iommu_capable(enum iommu_cap cap) +static bool intel_iommu_capable(struct device *dev, enum iommu_cap cap) { if (cap == IOMMU_CAP_CACHE_COHERENCY) return true; @@ -4457,7 +4465,7 @@ static struct iommu_device *intel_iommu_probe_device(struct device *dev) u8 bus, devfn; iommu = device_to_iommu(dev, &bus, &devfn); - if (!iommu) + if (!iommu || !iommu->iommu.ops) return ERR_PTR(-ENODEV); info = kzalloc(sizeof(*info), GFP_KERNEL); @@ -4574,52 +4582,6 @@ static void intel_iommu_get_resv_regions(struct device *device, list_add_tail(®->list, head); } -int intel_iommu_enable_pasid(struct intel_iommu *iommu, struct device *dev) -{ - struct device_domain_info *info = dev_iommu_priv_get(dev); - struct context_entry *context; - struct dmar_domain *domain; - u64 ctx_lo; - int ret; - - domain = info->domain; - if (!domain) - return -EINVAL; - - spin_lock(&iommu->lock); - ret = -EINVAL; - if (!info->pasid_supported) - goto out; - - context = iommu_context_addr(iommu, info->bus, info->devfn, 0); - if (WARN_ON(!context)) - goto out; - - ctx_lo = context[0].lo; - - if (!(ctx_lo & CONTEXT_PASIDE)) { - ctx_lo |= CONTEXT_PASIDE; - context[0].lo = ctx_lo; - wmb(); - iommu->flush.flush_context(iommu, - domain_id_iommu(domain, iommu), - PCI_DEVID(info->bus, info->devfn), - DMA_CCMD_MASK_NOBIT, - DMA_CCMD_DEVICE_INVL); - } - - /* Enable PASID support in the device, if it wasn't already */ - if (!info->pasid_enabled) - iommu_enable_dev_iotlb(info); - - ret = 0; - - out: - spin_unlock(&iommu->lock); - - return ret; -} - static struct iommu_group *intel_iommu_device_group(struct device *dev) { if (dev_is_pci(dev)) @@ -4643,9 +4605,6 @@ static int intel_iommu_enable_sva(struct device *dev) if (!(iommu->flags & VTD_FLAG_SVM_CAPABLE)) return -ENODEV; - if (intel_iommu_enable_pasid(iommu, dev)) - return -ENODEV; - if (!info->pasid_enabled || !info->pri_enabled || !info->ats_enabled) return -EINVAL; |