summaryrefslogtreecommitdiffstats
path: root/drivers/iommu
diff options
context:
space:
mode:
authorJani Nikula <jani.nikula@intel.com>2022-04-11 16:01:56 +0300
committerJani Nikula <jani.nikula@intel.com>2022-04-11 16:01:56 +0300
commit83970cd63b9f864525761137b500113ab0b49c94 (patch)
tree747b97113d60666cbb44f8a5633b961b7eda3c86 /drivers/iommu
parent618f5df1f6a5a3f29fad824116da291a7d14ab5e (diff)
parent3123109284176b1532874591f7c81f3837bbdc17 (diff)
downloadlinux-83970cd63b9f864525761137b500113ab0b49c94.tar.bz2
Merge drm/drm-next into drm-intel-next
Sync up with v5.18-rc1, in particular to get 5e3094cfd9fb ("drm/i915/xehpsdv: Add has_flat_ccs to device info"). Signed-off-by: Jani Nikula <jani.nikula@intel.com>
Diffstat (limited to 'drivers/iommu')
-rw-r--r--drivers/iommu/Kconfig6
-rw-r--r--drivers/iommu/Makefile2
-rw-r--r--drivers/iommu/amd/amd_iommu.h5
-rw-r--r--drivers/iommu/amd/amd_iommu_types.h1
-rw-r--r--drivers/iommu/amd/init.c28
-rw-r--r--drivers/iommu/amd/io_pgtable.c12
-rw-r--r--drivers/iommu/amd/iommu.c33
-rw-r--r--drivers/iommu/amd/iommu_v2.c37
-rw-r--r--drivers/iommu/apple-dart.c20
-rw-r--r--drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c5
-rw-r--r--drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c45
-rw-r--r--drivers/iommu/arm/arm-smmu/arm-smmu.c113
-rw-r--r--drivers/iommu/arm/arm-smmu/arm-smmu.h5
-rw-r--r--drivers/iommu/arm/arm-smmu/qcom_iommu.c28
-rw-r--r--drivers/iommu/dma-iommu.c18
-rw-r--r--drivers/iommu/exynos-iommu.c14
-rw-r--r--drivers/iommu/fsl_pamu_domain.c10
-rw-r--r--drivers/iommu/intel/Kconfig2
-rw-r--r--drivers/iommu/intel/debugfs.c6
-rw-r--r--drivers/iommu/intel/dmar.c5
-rw-r--r--drivers/iommu/intel/iommu.c1001
-rw-r--r--drivers/iommu/intel/pasid.c173
-rw-r--r--drivers/iommu/intel/pasid.h4
-rw-r--r--drivers/iommu/intel/svm.c229
-rw-r--r--drivers/iommu/ioasid.c39
-rw-r--r--drivers/iommu/iommu-sva-lib.c39
-rw-r--r--drivers/iommu/iommu-sva-lib.h7
-rw-r--r--drivers/iommu/iommu.c339
-rw-r--r--drivers/iommu/iova.c78
-rw-r--r--drivers/iommu/ipmmu-vmsa.c32
-rw-r--r--drivers/iommu/msm_iommu.c74
-rw-r--r--drivers/iommu/mtk_iommu.c100
-rw-r--r--drivers/iommu/mtk_iommu.h10
-rw-r--r--drivers/iommu/mtk_iommu_v1.c60
-rw-r--r--drivers/iommu/omap-iommu.c14
-rw-r--r--drivers/iommu/rockchip-iommu.c21
-rw-r--r--drivers/iommu/s390-iommu.c14
-rw-r--r--drivers/iommu/sprd-iommu.c18
-rw-r--r--drivers/iommu/sun50i-iommu.c18
-rw-r--r--drivers/iommu/tegra-gart.c24
-rw-r--r--drivers/iommu/tegra-smmu.c24
-rw-r--r--drivers/iommu/virtio-iommu.c14
42 files changed, 712 insertions, 2015 deletions
diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig
index 3eb68fa1b8cc..c79a0df090c0 100644
--- a/drivers/iommu/Kconfig
+++ b/drivers/iommu/Kconfig
@@ -144,8 +144,8 @@ config IOMMU_DMA
select IRQ_MSI_IOMMU
select NEED_SG_DMA_LENGTH
-# Shared Virtual Addressing library
-config IOMMU_SVA_LIB
+# Shared Virtual Addressing
+config IOMMU_SVA
bool
select IOASID
@@ -379,7 +379,7 @@ config ARM_SMMU_V3
config ARM_SMMU_V3_SVA
bool "Shared Virtual Addressing support for the ARM SMMUv3"
depends on ARM_SMMU_V3
- select IOMMU_SVA_LIB
+ select IOMMU_SVA
select MMU_NOTIFIER
help
Support for sharing process address spaces with devices using the
diff --git a/drivers/iommu/Makefile b/drivers/iommu/Makefile
index bc7f730edbb0..44475a9b3eea 100644
--- a/drivers/iommu/Makefile
+++ b/drivers/iommu/Makefile
@@ -27,6 +27,6 @@ obj-$(CONFIG_FSL_PAMU) += fsl_pamu.o fsl_pamu_domain.o
obj-$(CONFIG_S390_IOMMU) += s390-iommu.o
obj-$(CONFIG_HYPERV_IOMMU) += hyperv-iommu.o
obj-$(CONFIG_VIRTIO_IOMMU) += virtio-iommu.o
-obj-$(CONFIG_IOMMU_SVA_LIB) += iommu-sva-lib.o io-pgfault.o
+obj-$(CONFIG_IOMMU_SVA) += iommu-sva-lib.o io-pgfault.o
obj-$(CONFIG_SPRD_IOMMU) += sprd-iommu.o
obj-$(CONFIG_APPLE_DART) += apple-dart.o
diff --git a/drivers/iommu/amd/amd_iommu.h b/drivers/iommu/amd/amd_iommu.h
index 416815a525d6..1ab31074f5b3 100644
--- a/drivers/iommu/amd/amd_iommu.h
+++ b/drivers/iommu/amd/amd_iommu.h
@@ -14,7 +14,7 @@
extern irqreturn_t amd_iommu_int_thread(int irq, void *data);
extern irqreturn_t amd_iommu_int_handler(int irq, void *data);
extern void amd_iommu_apply_erratum_63(u16 devid);
-extern void amd_iommu_reset_cmd_buffer(struct amd_iommu *iommu);
+extern void amd_iommu_restart_event_logging(struct amd_iommu *iommu);
extern int amd_iommu_init_devices(void);
extern void amd_iommu_uninit_devices(void);
extern void amd_iommu_init_notifier(void);
@@ -116,8 +116,7 @@ void amd_iommu_domain_clr_pt_root(struct protection_domain *domain)
extern bool translation_pre_enabled(struct amd_iommu *iommu);
-extern bool amd_iommu_is_attach_deferred(struct iommu_domain *domain,
- struct device *dev);
+extern bool amd_iommu_is_attach_deferred(struct device *dev);
extern int __init add_special_device(u8 type, u8 id, u16 *devid,
bool cmd_line);
diff --git a/drivers/iommu/amd/amd_iommu_types.h b/drivers/iommu/amd/amd_iommu_types.h
index ffc89c4fb120..47108ed44fbb 100644
--- a/drivers/iommu/amd/amd_iommu_types.h
+++ b/drivers/iommu/amd/amd_iommu_types.h
@@ -110,6 +110,7 @@
#define PASID_MASK 0x0000ffff
/* MMIO status bits */
+#define MMIO_STATUS_EVT_OVERFLOW_INT_MASK (1 << 0)
#define MMIO_STATUS_EVT_INT_MASK (1 << 1)
#define MMIO_STATUS_COM_WAIT_INT_MASK (1 << 2)
#define MMIO_STATUS_PPR_INT_MASK (1 << 6)
diff --git a/drivers/iommu/amd/init.c b/drivers/iommu/amd/init.c
index b10fb52ea442..b4a798c7b347 100644
--- a/drivers/iommu/amd/init.c
+++ b/drivers/iommu/amd/init.c
@@ -658,10 +658,20 @@ static int __init alloc_command_buffer(struct amd_iommu *iommu)
}
/*
+ * This function restarts event logging in case the IOMMU experienced
+ * an event log buffer overflow.
+ */
+void amd_iommu_restart_event_logging(struct amd_iommu *iommu)
+{
+ iommu_feature_disable(iommu, CONTROL_EVT_LOG_EN);
+ iommu_feature_enable(iommu, CONTROL_EVT_LOG_EN);
+}
+
+/*
* This function resets the command buffer if the IOMMU stopped fetching
* commands from it.
*/
-void amd_iommu_reset_cmd_buffer(struct amd_iommu *iommu)
+static void amd_iommu_reset_cmd_buffer(struct amd_iommu *iommu)
{
iommu_feature_disable(iommu, CONTROL_CMDBUF_EN);
@@ -980,6 +990,7 @@ static bool copy_device_table(void)
get_order(dev_table_size));
if (old_dev_tbl_cpy == NULL) {
pr_err("Failed to allocate memory for copying old device table!\n");
+ memunmap(old_devtb);
return false;
}
@@ -1010,6 +1021,7 @@ static bool copy_device_table(void)
if ((int_ctl != DTE_IRQ_REMAP_INTCTL) ||
(int_tab_len != DTE_INTTABLEN)) {
pr_err("Wrong old irq remapping flag: %#x\n", devid);
+ memunmap(old_devtb);
return false;
}
@@ -1943,9 +1955,11 @@ static int __init amd_iommu_init_pci(void)
for_each_iommu(iommu) {
ret = iommu_init_pci(iommu);
- if (ret)
- break;
-
+ if (ret) {
+ pr_err("IOMMU%d: Failed to initialize IOMMU Hardware (error=%d)!\n",
+ iommu->index, ret);
+ goto out;
+ }
/* Need to setup range after PCI init */
iommu_set_cwwb_range(iommu);
}
@@ -1961,6 +1975,11 @@ static int __init amd_iommu_init_pci(void)
* active.
*/
ret = amd_iommu_init_api();
+ if (ret) {
+ pr_err("IOMMU: Failed to initialize IOMMU-API interface (error=%d)!\n",
+ ret);
+ goto out;
+ }
init_device_table_dma();
@@ -1970,6 +1989,7 @@ static int __init amd_iommu_init_pci(void)
if (!ret)
print_iommu_info();
+out:
return ret;
}
diff --git a/drivers/iommu/amd/io_pgtable.c b/drivers/iommu/amd/io_pgtable.c
index b1bf4125b0f7..6608d1717574 100644
--- a/drivers/iommu/amd/io_pgtable.c
+++ b/drivers/iommu/amd/io_pgtable.c
@@ -492,18 +492,18 @@ static void v1_free_pgtable(struct io_pgtable *iop)
dom = container_of(pgtable, struct protection_domain, iop);
- /* Update data structure */
- amd_iommu_domain_clr_pt_root(dom);
-
- /* Make changes visible to IOMMUs */
- amd_iommu_domain_update(dom);
-
/* Page-table is not visible to IOMMU anymore, so free it */
BUG_ON(pgtable->mode < PAGE_MODE_NONE ||
pgtable->mode > PAGE_MODE_6_LEVEL);
free_sub_pt(pgtable->root, pgtable->mode, &freelist);
+ /* Update data structure */
+ amd_iommu_domain_clr_pt_root(dom);
+
+ /* Make changes visible to IOMMUs */
+ amd_iommu_domain_update(dom);
+
put_pages_list(&freelist);
}
diff --git a/drivers/iommu/amd/iommu.c b/drivers/iommu/amd/iommu.c
index 461f1844ed1f..a1ada7bff44e 100644
--- a/drivers/iommu/amd/iommu.c
+++ b/drivers/iommu/amd/iommu.c
@@ -764,7 +764,8 @@ amd_iommu_set_pci_msi_domain(struct device *dev, struct amd_iommu *iommu) { }
#endif /* !CONFIG_IRQ_REMAP */
#define AMD_IOMMU_INT_MASK \
- (MMIO_STATUS_EVT_INT_MASK | \
+ (MMIO_STATUS_EVT_OVERFLOW_INT_MASK | \
+ MMIO_STATUS_EVT_INT_MASK | \
MMIO_STATUS_PPR_INT_MASK | \
MMIO_STATUS_GALOG_INT_MASK)
@@ -774,7 +775,7 @@ irqreturn_t amd_iommu_int_thread(int irq, void *data)
u32 status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET);
while (status & AMD_IOMMU_INT_MASK) {
- /* Enable EVT and PPR and GA interrupts again */
+ /* Enable interrupt sources again */
writel(AMD_IOMMU_INT_MASK,
iommu->mmio_base + MMIO_STATUS_OFFSET);
@@ -795,6 +796,11 @@ irqreturn_t amd_iommu_int_thread(int irq, void *data)
}
#endif
+ if (status & MMIO_STATUS_EVT_OVERFLOW_INT_MASK) {
+ pr_info_ratelimited("IOMMU event log overflow\n");
+ amd_iommu_restart_event_logging(iommu);
+ }
+
/*
* Hardware bug: ERBT1312
* When re-enabling interrupt (by writing 1
@@ -2215,8 +2221,7 @@ static void amd_iommu_get_resv_regions(struct device *dev,
list_add_tail(&region->list, head);
}
-bool amd_iommu_is_attach_deferred(struct iommu_domain *domain,
- struct device *dev)
+bool amd_iommu_is_attach_deferred(struct device *dev)
{
struct iommu_dev_data *dev_data = dev_iommu_priv_get(dev);
@@ -2269,13 +2274,6 @@ static int amd_iommu_def_domain_type(struct device *dev)
const struct iommu_ops amd_iommu_ops = {
.capable = amd_iommu_capable,
.domain_alloc = amd_iommu_domain_alloc,
- .domain_free = amd_iommu_domain_free,
- .attach_dev = amd_iommu_attach_device,
- .detach_dev = amd_iommu_detach_device,
- .map = amd_iommu_map,
- .iotlb_sync_map = amd_iommu_iotlb_sync_map,
- .unmap = amd_iommu_unmap,
- .iova_to_phys = amd_iommu_iova_to_phys,
.probe_device = amd_iommu_probe_device,
.release_device = amd_iommu_release_device,
.probe_finalize = amd_iommu_probe_finalize,
@@ -2284,9 +2282,18 @@ const struct iommu_ops amd_iommu_ops = {
.put_resv_regions = generic_iommu_put_resv_regions,
.is_attach_deferred = amd_iommu_is_attach_deferred,
.pgsize_bitmap = AMD_IOMMU_PGSIZES,
- .flush_iotlb_all = amd_iommu_flush_iotlb_all,
- .iotlb_sync = amd_iommu_iotlb_sync,
.def_domain_type = amd_iommu_def_domain_type,
+ .default_domain_ops = &(const struct iommu_domain_ops) {
+ .attach_dev = amd_iommu_attach_device,
+ .detach_dev = amd_iommu_detach_device,
+ .map = amd_iommu_map,
+ .unmap = amd_iommu_unmap,
+ .iotlb_sync_map = amd_iommu_iotlb_sync_map,
+ .iova_to_phys = amd_iommu_iova_to_phys,
+ .flush_iotlb_all = amd_iommu_flush_iotlb_all,
+ .iotlb_sync = amd_iommu_iotlb_sync,
+ .free = amd_iommu_domain_free,
+ }
};
/*****************************************************************************
diff --git a/drivers/iommu/amd/iommu_v2.c b/drivers/iommu/amd/iommu_v2.c
index 58da08cc3d01..e56b137ceabd 100644
--- a/drivers/iommu/amd/iommu_v2.c
+++ b/drivers/iommu/amd/iommu_v2.c
@@ -24,7 +24,6 @@
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Joerg Roedel <jroedel@suse.de>");
-#define MAX_DEVICES 0x10000
#define PRI_QUEUE_SIZE 512
struct pri_queue {
@@ -71,7 +70,6 @@ struct fault {
struct pasid_state *state;
struct mm_struct *mm;
u64 address;
- u16 devid;
u32 pasid;
u16 tag;
u16 finish;
@@ -125,6 +123,15 @@ static void free_device_state(struct device_state *dev_state)
{
struct iommu_group *group;
+ /* Get rid of any remaining pasid states */
+ free_pasid_states(dev_state);
+
+ /*
+ * Wait until the last reference is dropped before freeing
+ * the device state.
+ */
+ wait_event(dev_state->wq, !atomic_read(&dev_state->count));
+
/*
* First detach device from domain - No more PRI requests will arrive
* from that device after it is unbound from the IOMMUv2 domain.
@@ -537,7 +544,7 @@ static int ppr_notifier(struct notifier_block *nb, unsigned long e, void *data)
ret = NOTIFY_DONE;
/* In kdump kernel pci dev is not initialized yet -> send INVALID */
- if (amd_iommu_is_attach_deferred(NULL, &pdev->dev)) {
+ if (amd_iommu_is_attach_deferred(&pdev->dev)) {
amd_iommu_complete_ppr(pdev, iommu_fault->pasid,
PPR_INVALID, tag);
goto out;
@@ -850,15 +857,7 @@ void amd_iommu_free_device(struct pci_dev *pdev)
spin_unlock_irqrestore(&state_lock, flags);
- /* Get rid of any remaining pasid states */
- free_pasid_states(dev_state);
-
put_device_state(dev_state);
- /*
- * Wait until the last reference is dropped before freeing
- * the device state.
- */
- wait_event(dev_state->wq, !atomic_read(&dev_state->count));
free_device_state(dev_state);
}
EXPORT_SYMBOL(amd_iommu_free_device);
@@ -955,8 +954,8 @@ out:
static void __exit amd_iommu_v2_exit(void)
{
- struct device_state *dev_state;
- int i;
+ struct device_state *dev_state, *next;
+ unsigned long flags;
if (!amd_iommu_v2_supported())
return;
@@ -969,18 +968,18 @@ static void __exit amd_iommu_v2_exit(void)
* The loop below might call flush_workqueue(), so call
* destroy_workqueue() after it
*/
- for (i = 0; i < MAX_DEVICES; ++i) {
- dev_state = get_device_state(i);
-
- if (dev_state == NULL)
- continue;
+ spin_lock_irqsave(&state_lock, flags);
+ list_for_each_entry_safe(dev_state, next, &state_list, list) {
WARN_ON_ONCE(1);
put_device_state(dev_state);
- amd_iommu_free_device(dev_state->pdev);
+ list_del(&dev_state->list);
+ free_device_state(dev_state);
}
+ spin_unlock_irqrestore(&state_lock, flags);
+
destroy_workqueue(iommu_wq);
}
diff --git a/drivers/iommu/apple-dart.c b/drivers/iommu/apple-dart.c
index 565ef5598811..decafb07ad08 100644
--- a/drivers/iommu/apple-dart.c
+++ b/drivers/iommu/apple-dart.c
@@ -765,15 +765,6 @@ static void apple_dart_get_resv_regions(struct device *dev,
static const struct iommu_ops apple_dart_iommu_ops = {
.domain_alloc = apple_dart_domain_alloc,
- .domain_free = apple_dart_domain_free,
- .attach_dev = apple_dart_attach_dev,
- .detach_dev = apple_dart_detach_dev,
- .map_pages = apple_dart_map_pages,
- .unmap_pages = apple_dart_unmap_pages,
- .flush_iotlb_all = apple_dart_flush_iotlb_all,
- .iotlb_sync = apple_dart_iotlb_sync,
- .iotlb_sync_map = apple_dart_iotlb_sync_map,
- .iova_to_phys = apple_dart_iova_to_phys,
.probe_device = apple_dart_probe_device,
.release_device = apple_dart_release_device,
.device_group = apple_dart_device_group,
@@ -782,6 +773,17 @@ static const struct iommu_ops apple_dart_iommu_ops = {
.get_resv_regions = apple_dart_get_resv_regions,
.put_resv_regions = generic_iommu_put_resv_regions,
.pgsize_bitmap = -1UL, /* Restricted during dart probe */
+ .default_domain_ops = &(const struct iommu_domain_ops) {
+ .attach_dev = apple_dart_attach_dev,
+ .detach_dev = apple_dart_detach_dev,
+ .map_pages = apple_dart_map_pages,
+ .unmap_pages = apple_dart_unmap_pages,
+ .flush_iotlb_all = apple_dart_flush_iotlb_all,
+ .iotlb_sync = apple_dart_iotlb_sync,
+ .iotlb_sync_map = apple_dart_iotlb_sync_map,
+ .iova_to_phys = apple_dart_iova_to_phys,
+ .free = apple_dart_domain_free,
+ }
};
static irqreturn_t apple_dart_irq(int irq, void *dev)
diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c
index a737ba5f727e..22ddd05bbdcd 100644
--- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c
+++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c
@@ -340,14 +340,12 @@ __arm_smmu_sva_bind(struct device *dev, struct mm_struct *mm)
bond->smmu_mn = arm_smmu_mmu_notifier_get(smmu_domain, mm);
if (IS_ERR(bond->smmu_mn)) {
ret = PTR_ERR(bond->smmu_mn);
- goto err_free_pasid;
+ goto err_free_bond;
}
list_add(&bond->list, &master->bonds);
return &bond->sva;
-err_free_pasid:
- iommu_sva_free_pasid(mm);
err_free_bond:
kfree(bond);
return ERR_PTR(ret);
@@ -377,7 +375,6 @@ void arm_smmu_sva_unbind(struct iommu_sva *handle)
if (refcount_dec_and_test(&bond->refs)) {
list_del(&bond->list);
arm_smmu_mmu_notifier_put(bond->smmu_mn);
- iommu_sva_free_pasid(bond->mm);
kfree(bond);
}
mutex_unlock(&sva_lock);
diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
index 6dc6d8b6b368..627a3ed5ee8f 100644
--- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
+++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
@@ -1558,6 +1558,7 @@ static irqreturn_t arm_smmu_evtq_thread(int irq, void *dev)
dev_info(smmu->dev, "\t0x%016llx\n",
(unsigned long long)evt[i]);
+ cond_resched();
}
/*
@@ -2841,17 +2842,9 @@ static int arm_smmu_dev_disable_feature(struct device *dev,
static struct iommu_ops arm_smmu_ops = {
.capable = arm_smmu_capable,
.domain_alloc = arm_smmu_domain_alloc,
- .domain_free = arm_smmu_domain_free,
- .attach_dev = arm_smmu_attach_dev,
- .map_pages = arm_smmu_map_pages,
- .unmap_pages = arm_smmu_unmap_pages,
- .flush_iotlb_all = arm_smmu_flush_iotlb_all,
- .iotlb_sync = arm_smmu_iotlb_sync,
- .iova_to_phys = arm_smmu_iova_to_phys,
.probe_device = arm_smmu_probe_device,
.release_device = arm_smmu_release_device,
.device_group = arm_smmu_device_group,
- .enable_nesting = arm_smmu_enable_nesting,
.of_xlate = arm_smmu_of_xlate,
.get_resv_regions = arm_smmu_get_resv_regions,
.put_resv_regions = generic_iommu_put_resv_regions,
@@ -2865,6 +2858,16 @@ static struct iommu_ops arm_smmu_ops = {
.page_response = arm_smmu_page_response,
.pgsize_bitmap = -1UL, /* Restricted during device attach */
.owner = THIS_MODULE,
+ .default_domain_ops = &(const struct iommu_domain_ops) {
+ .attach_dev = arm_smmu_attach_dev,
+ .map_pages = arm_smmu_map_pages,
+ .unmap_pages = arm_smmu_unmap_pages,
+ .flush_iotlb_all = arm_smmu_flush_iotlb_all,
+ .iotlb_sync = arm_smmu_iotlb_sync,
+ .iova_to_phys = arm_smmu_iova_to_phys,
+ .enable_nesting = arm_smmu_enable_nesting,
+ .free = arm_smmu_domain_free,
+ }
};
/* Probing and initialisation functions */
@@ -2911,32 +2914,20 @@ static int arm_smmu_init_one_queue(struct arm_smmu_device *smmu,
return 0;
}
-static void arm_smmu_cmdq_free_bitmap(void *data)
-{
- unsigned long *bitmap = data;
- bitmap_free(bitmap);
-}
-
static int arm_smmu_cmdq_init(struct arm_smmu_device *smmu)
{
- int ret = 0;
struct arm_smmu_cmdq *cmdq = &smmu->cmdq;
unsigned int nents = 1 << cmdq->q.llq.max_n_shift;
- atomic_long_t *bitmap;
atomic_set(&cmdq->owner_prod, 0);
atomic_set(&cmdq->lock, 0);
- bitmap = (atomic_long_t *)bitmap_zalloc(nents, GFP_KERNEL);
- if (!bitmap) {
- dev_err(smmu->dev, "failed to allocate cmdq bitmap\n");
- ret = -ENOMEM;
- } else {
- cmdq->valid_map = bitmap;
- devm_add_action(smmu->dev, arm_smmu_cmdq_free_bitmap, bitmap);
- }
+ cmdq->valid_map = (atomic_long_t *)devm_bitmap_zalloc(smmu->dev, nents,
+ GFP_KERNEL);
+ if (!cmdq->valid_map)
+ return -ENOMEM;
- return ret;
+ return 0;
}
static int arm_smmu_init_queues(struct arm_smmu_device *smmu)
@@ -2981,10 +2972,10 @@ static int arm_smmu_init_l1_strtab(struct arm_smmu_device *smmu)
{
unsigned int i;
struct arm_smmu_strtab_cfg *cfg = &smmu->strtab_cfg;
- size_t size = sizeof(*cfg->l1_desc) * cfg->num_l1_ents;
void *strtab = smmu->strtab_cfg.strtab;
- cfg->l1_desc = devm_kzalloc(smmu->dev, size, GFP_KERNEL);
+ cfg->l1_desc = devm_kcalloc(smmu->dev, cfg->num_l1_ents,
+ sizeof(*cfg->l1_desc), GFP_KERNEL);
if (!cfg->l1_desc)
return -ENOMEM;
diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu.c b/drivers/iommu/arm/arm-smmu/arm-smmu.c
index 4bc75c4ce402..568cce590ccc 100644
--- a/drivers/iommu/arm/arm-smmu/arm-smmu.c
+++ b/drivers/iommu/arm/arm-smmu/arm-smmu.c
@@ -807,7 +807,7 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
* Request context fault interrupt. Do this last to avoid the
* handler seeing a half-initialised domain state.
*/
- irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx];
+ irq = smmu->irqs[cfg->irptndx];
if (smmu->impl && smmu->impl->context_fault)
context_fault = smmu->impl->context_fault;
@@ -858,7 +858,7 @@ static void arm_smmu_destroy_domain_context(struct iommu_domain *domain)
arm_smmu_write_context_bank(smmu, cfg->cbndx);
if (cfg->irptndx != ARM_SMMU_INVALID_IRPTNDX) {
- irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx];
+ irq = smmu->irqs[cfg->irptndx];
devm_free_irq(smmu->dev, irq, domain);
}
@@ -1583,25 +1583,27 @@ static int arm_smmu_def_domain_type(struct device *dev)
static struct iommu_ops arm_smmu_ops = {
.capable = arm_smmu_capable,
.domain_alloc = arm_smmu_domain_alloc,
- .domain_free = arm_smmu_domain_free,
- .attach_dev = arm_smmu_attach_dev,
- .map_pages = arm_smmu_map_pages,
- .unmap_pages = arm_smmu_unmap_pages,
- .flush_iotlb_all = arm_smmu_flush_iotlb_all,
- .iotlb_sync = arm_smmu_iotlb_sync,
- .iova_to_phys = arm_smmu_iova_to_phys,
.probe_device = arm_smmu_probe_device,
.release_device = arm_smmu_release_device,
.probe_finalize = arm_smmu_probe_finalize,
.device_group = arm_smmu_device_group,
- .enable_nesting = arm_smmu_enable_nesting,
- .set_pgtable_quirks = arm_smmu_set_pgtable_quirks,
.of_xlate = arm_smmu_of_xlate,
.get_resv_regions = arm_smmu_get_resv_regions,
.put_resv_regions = generic_iommu_put_resv_regions,
.def_domain_type = arm_smmu_def_domain_type,
.pgsize_bitmap = -1UL, /* Restricted during device attach */
.owner = THIS_MODULE,
+ .default_domain_ops = &(const struct iommu_domain_ops) {
+ .attach_dev = arm_smmu_attach_dev,
+ .map_pages = arm_smmu_map_pages,
+ .unmap_pages = arm_smmu_unmap_pages,
+ .flush_iotlb_all = arm_smmu_flush_iotlb_all,
+ .iotlb_sync = arm_smmu_iotlb_sync,
+ .iova_to_phys = arm_smmu_iova_to_phys,
+ .enable_nesting = arm_smmu_enable_nesting,
+ .set_pgtable_quirks = arm_smmu_set_pgtable_quirks,
+ .free = arm_smmu_domain_free,
+ }
};
static void arm_smmu_device_reset(struct arm_smmu_device *smmu)
@@ -1951,8 +1953,8 @@ static int acpi_smmu_get_data(u32 model, struct arm_smmu_device *smmu)
return ret;
}
-static int arm_smmu_device_acpi_probe(struct platform_device *pdev,
- struct arm_smmu_device *smmu)
+static int arm_smmu_device_acpi_probe(struct arm_smmu_device *smmu,
+ u32 *global_irqs, u32 *pmu_irqs)
{
struct device *dev = smmu->dev;
struct acpi_iort_node *node =
@@ -1968,7 +1970,8 @@ static int arm_smmu_device_acpi_probe(struct platform_device *pdev,
return ret;
/* Ignore the configuration access interrupt */
- smmu->num_global_irqs = 1;
+ *global_irqs = 1;
+ *pmu_irqs = 0;
if (iort_smmu->flags & ACPI_IORT_SMMU_COHERENT_WALK)
smmu->features |= ARM_SMMU_FEAT_COHERENT_WALK;
@@ -1976,25 +1979,24 @@ static int arm_smmu_device_acpi_probe(struct platform_device *pdev,
return 0;
}
#else
-static inline int arm_smmu_device_acpi_probe(struct platform_device *pdev,
- struct arm_smmu_device *smmu)
+static inline int arm_smmu_device_acpi_probe(struct arm_smmu_device *smmu,
+ u32 *global_irqs, u32 *pmu_irqs)
{
return -ENODEV;
}
#endif
-static int arm_smmu_device_dt_probe(struct platform_device *pdev,
- struct arm_smmu_device *smmu)
+static int arm_smmu_device_dt_probe(struct arm_smmu_device *smmu,
+ u32 *global_irqs, u32 *pmu_irqs)
{
const struct arm_smmu_match_data *data;
- struct device *dev = &pdev->dev;
+ struct device *dev = smmu->dev;
bool legacy_binding;
- if (of_property_read_u32(dev->of_node, "#global-interrupts",
- &smmu->num_global_irqs)) {
- dev_err(dev, "missing #global-interrupts property\n");
- return -ENODEV;
- }
+ if (of_property_read_u32(dev->of_node, "#global-interrupts", global_irqs))
+ return dev_err_probe(dev, -ENODEV,
+ "missing #global-interrupts property\n");
+ *pmu_irqs = 0;
data = of_device_get_match_data(dev);
smmu->version = data->version;
@@ -2073,6 +2075,7 @@ static int arm_smmu_device_probe(struct platform_device *pdev)
struct arm_smmu_device *smmu;
struct device *dev = &pdev->dev;
int num_irqs, i, err;
+ u32 global_irqs, pmu_irqs;
irqreturn_t (*global_fault)(int irq, void *dev);
smmu = devm_kzalloc(dev, sizeof(*smmu), GFP_KERNEL);
@@ -2083,10 +2086,9 @@ static int arm_smmu_device_probe(struct platform_device *pdev)
smmu->dev = dev;
if (dev->of_node)
- err = arm_smmu_device_dt_probe(pdev, smmu);
+ err = arm_smmu_device_dt_probe(smmu, &global_irqs, &pmu_irqs);
else
- err = arm_smmu_device_acpi_probe(pdev, smmu);
-
+ err = arm_smmu_device_acpi_probe(smmu, &global_irqs, &pmu_irqs);
if (err)
return err;
@@ -2105,31 +2107,25 @@ static int arm_smmu_device_probe(struct platform_device *pdev)
if (IS_ERR(smmu))
return PTR_ERR(smmu);
- num_irqs = 0;
- while ((res = platform_get_resource(pdev, IORESOURCE_IRQ, num_irqs))) {
- num_irqs++;
- if (num_irqs > smmu->num_global_irqs)
- smmu->num_context_irqs++;
- }
+ num_irqs = platform_irq_count(pdev);
- if (!smmu->num_context_irqs) {
- dev_err(dev, "found %d interrupts but expected at least %d\n",
- num_irqs, smmu->num_global_irqs + 1);
- return -ENODEV;
- }
+ smmu->num_context_irqs = num_irqs - global_irqs - pmu_irqs;
+ if (smmu->num_context_irqs <= 0)
+ return dev_err_probe(dev, -ENODEV,
+ "found %d interrupts but expected at least %d\n",
+ num_irqs, global_irqs + pmu_irqs + 1);
- smmu->irqs = devm_kcalloc(dev, num_irqs, sizeof(*smmu->irqs),
- GFP_KERNEL);
- if (!smmu->irqs) {
- dev_err(dev, "failed to allocate %d irqs\n", num_irqs);
- return -ENOMEM;
- }
+ smmu->irqs = devm_kcalloc(dev, smmu->num_context_irqs,
+ sizeof(*smmu->irqs), GFP_KERNEL);
+ if (!smmu->irqs)
+ return dev_err_probe(dev, -ENOMEM, "failed to allocate %d irqs\n",
+ smmu->num_context_irqs);
- for (i = 0; i < num_irqs; ++i) {
- int irq = platform_get_irq(pdev, i);
+ for (i = 0; i < smmu->num_context_irqs; i++) {
+ int irq = platform_get_irq(pdev, global_irqs + pmu_irqs + i);
if (irq < 0)
- return -ENODEV;
+ return irq;
smmu->irqs[i] = irq;
}
@@ -2165,17 +2161,18 @@ static int arm_smmu_device_probe(struct platform_device *pdev)
else
global_fault = arm_smmu_global_fault;
- for (i = 0; i < smmu->num_global_irqs; ++i) {
- err = devm_request_irq(smmu->dev, smmu->irqs[i],
- global_fault,
- IRQF_SHARED,
- "arm-smmu global fault",
- smmu);
- if (err) {
- dev_err(dev, "failed to request global IRQ %d (%u)\n",
- i, smmu->irqs[i]);
- return err;
- }
+ for (i = 0; i < global_irqs; i++) {
+ int irq = platform_get_irq(pdev, i);
+
+ if (irq < 0)
+ return irq;
+
+ err = devm_request_irq(dev, irq, global_fault, IRQF_SHARED,
+ "arm-smmu global fault", smmu);
+ if (err)
+ return dev_err_probe(dev, err,
+ "failed to request global IRQ %d (%u)\n",
+ i, irq);
}
err = iommu_device_sysfs_add(&smmu->iommu, smmu->dev, NULL,
diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu.h b/drivers/iommu/arm/arm-smmu/arm-smmu.h
index 432de2f742c3..2b9b42fb6f30 100644
--- a/drivers/iommu/arm/arm-smmu/arm-smmu.h
+++ b/drivers/iommu/arm/arm-smmu/arm-smmu.h
@@ -318,11 +318,10 @@ struct arm_smmu_device {
unsigned long pa_size;
unsigned long pgsize_bitmap;
- u32 num_global_irqs;
- u32 num_context_irqs;
+ int num_context_irqs;
+ int num_clks;
unsigned int *irqs;
struct clk_bulk_data *clks;
- int num_clks;
spinlock_t global_sync_lock;
diff --git a/drivers/iommu/arm/arm-smmu/qcom_iommu.c b/drivers/iommu/arm/arm-smmu/qcom_iommu.c
index b91874cb6cf3..4c077c38fbd6 100644
--- a/drivers/iommu/arm/arm-smmu/qcom_iommu.c
+++ b/drivers/iommu/arm/arm-smmu/qcom_iommu.c
@@ -590,19 +590,21 @@ static int qcom_iommu_of_xlate(struct device *dev, struct of_phandle_args *args)
static const struct iommu_ops qcom_iommu_ops = {
.capable = qcom_iommu_capable,
.domain_alloc = qcom_iommu_domain_alloc,
- .domain_free = qcom_iommu_domain_free,
- .attach_dev = qcom_iommu_attach_dev,
- .detach_dev = qcom_iommu_detach_dev,
- .map = qcom_iommu_map,
- .unmap = qcom_iommu_unmap,
- .flush_iotlb_all = qcom_iommu_flush_iotlb_all,
- .iotlb_sync = qcom_iommu_iotlb_sync,
- .iova_to_phys = qcom_iommu_iova_to_phys,
.probe_device = qcom_iommu_probe_device,
.release_device = qcom_iommu_release_device,
.device_group = generic_device_group,
.of_xlate = qcom_iommu_of_xlate,
.pgsize_bitmap = SZ_4K | SZ_64K | SZ_1M | SZ_16M,
+ .default_domain_ops = &(const struct iommu_domain_ops) {
+ .attach_dev = qcom_iommu_attach_dev,
+ .detach_dev = qcom_iommu_detach_dev,
+ .map = qcom_iommu_map,
+ .unmap = qcom_iommu_unmap,
+ .flush_iotlb_all = qcom_iommu_flush_iotlb_all,
+ .iotlb_sync = qcom_iommu_iotlb_sync,
+ .iova_to_phys = qcom_iommu_iova_to_phys,
+ .free = qcom_iommu_domain_free,
+ }
};
static int qcom_iommu_sec_ptbl_init(struct device *dev)
@@ -827,20 +829,20 @@ static int qcom_iommu_device_probe(struct platform_device *pdev)
ret = devm_of_platform_populate(dev);
if (ret) {
dev_err(dev, "Failed to populate iommu contexts\n");
- return ret;
+ goto err_pm_disable;
}
ret = iommu_device_sysfs_add(&qcom_iommu->iommu, dev, NULL,
dev_name(dev));
if (ret) {
dev_err(dev, "Failed to register iommu in sysfs\n");
- return ret;
+ goto err_pm_disable;
}
ret = iommu_device_register(&qcom_iommu->iommu, &qcom_iommu_ops, dev);
if (ret) {
dev_err(dev, "Failed to register iommu\n");
- return ret;
+ goto err_pm_disable;
}
bus_set_iommu(&platform_bus_type, &qcom_iommu_ops);
@@ -852,6 +854,10 @@ static int qcom_iommu_device_probe(struct platform_device *pdev)
}
return 0;
+
+err_pm_disable:
+ pm_runtime_disable(dev);
+ return ret;
}
static int qcom_iommu_device_remove(struct platform_device *pdev)
diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
index d85d54f2b549..09f6e1c0f9c0 100644
--- a/drivers/iommu/dma-iommu.c
+++ b/drivers/iommu/dma-iommu.c
@@ -525,6 +525,7 @@ static int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base,
struct iommu_dma_cookie *cookie = domain->iova_cookie;
unsigned long order, base_pfn;
struct iova_domain *iovad;
+ int ret;
if (!cookie || cookie->type != IOMMU_DMA_IOVA_COOKIE)
return -EINVAL;
@@ -559,6 +560,9 @@ static int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base,
}
init_iova_domain(iovad, 1UL << order, base_pfn);
+ ret = iova_domain_init_rcaches(iovad);
+ if (ret)
+ return ret;
/* If the FQ fails we can simply fall back to strict mode */
if (domain->type == IOMMU_DOMAIN_DMA_FQ && iommu_dma_init_fq(domain))
@@ -852,7 +856,6 @@ out_unmap:
return NULL;
}
-#ifdef CONFIG_DMA_REMAP
static struct sg_table *iommu_dma_alloc_noncontiguous(struct device *dev,
size_t size, enum dma_data_direction dir, gfp_t gfp,
unsigned long attrs)
@@ -882,7 +885,6 @@ static void iommu_dma_free_noncontiguous(struct device *dev, size_t size,
sg_free_table(&sh->sgt);
kfree(sh);
}
-#endif /* CONFIG_DMA_REMAP */
static void iommu_dma_sync_single_for_cpu(struct device *dev,
dma_addr_t dma_handle, size_t size, enum dma_data_direction dir)
@@ -1276,7 +1278,7 @@ static void __iommu_dma_free(struct device *dev, size_t size, void *cpu_addr)
dma_free_from_pool(dev, cpu_addr, alloc_size))
return;
- if (IS_ENABLED(CONFIG_DMA_REMAP) && is_vmalloc_addr(cpu_addr)) {
+ if (is_vmalloc_addr(cpu_addr)) {
/*
* If it the address is remapped, then it's either non-coherent
* or highmem CMA, or an iommu_dma_alloc_remap() construction.
@@ -1318,7 +1320,7 @@ static void *iommu_dma_alloc_pages(struct device *dev, size_t size,
if (!page)
return NULL;
- if (IS_ENABLED(CONFIG_DMA_REMAP) && (!coherent || PageHighMem(page))) {
+ if (!coherent || PageHighMem(page)) {
pgprot_t prot = dma_pgprot(dev, PAGE_KERNEL, attrs);
cpu_addr = dma_common_contiguous_remap(page, alloc_size,
@@ -1350,7 +1352,7 @@ static void *iommu_dma_alloc(struct device *dev, size_t size,
gfp |= __GFP_ZERO;
- if (IS_ENABLED(CONFIG_DMA_REMAP) && gfpflags_allow_blocking(gfp) &&
+ if (gfpflags_allow_blocking(gfp) &&
!(attrs & DMA_ATTR_FORCE_CONTIGUOUS)) {
return iommu_dma_alloc_remap(dev, size, handle, gfp,
dma_pgprot(dev, PAGE_KERNEL, attrs), attrs);
@@ -1391,7 +1393,7 @@ static int iommu_dma_mmap(struct device *dev, struct vm_area_struct *vma,
if (off >= nr_pages || vma_pages(vma) > nr_pages - off)
return -ENXIO;
- if (IS_ENABLED(CONFIG_DMA_REMAP) && is_vmalloc_addr(cpu_addr)) {
+ if (is_vmalloc_addr(cpu_addr)) {
struct page **pages = dma_common_find_pages(cpu_addr);
if (pages)
@@ -1413,7 +1415,7 @@ static int iommu_dma_get_sgtable(struct device *dev, struct sg_table *sgt,
struct page *page;
int ret;
- if (IS_ENABLED(CONFIG_DMA_REMAP) && is_vmalloc_addr(cpu_addr)) {
+ if (is_vmalloc_addr(cpu_addr)) {
struct page **pages = dma_common_find_pages(cpu_addr);
if (pages) {
@@ -1445,10 +1447,8 @@ static const struct dma_map_ops iommu_dma_ops = {
.free = iommu_dma_free,
.alloc_pages = dma_common_alloc_pages,
.free_pages = dma_common_free_pages,
-#ifdef CONFIG_DMA_REMAP
.alloc_noncontiguous = iommu_dma_alloc_noncontiguous,
.free_noncontiguous = iommu_dma_free_noncontiguous,
-#endif
.mmap = iommu_dma_mmap,
.get_sgtable = iommu_dma_get_sgtable,
.map_page = iommu_dma_map_page,
diff --git a/drivers/iommu/exynos-iommu.c b/drivers/iommu/exynos-iommu.c
index 939ffa768986..71f2018e23fe 100644
--- a/drivers/iommu/exynos-iommu.c
+++ b/drivers/iommu/exynos-iommu.c
@@ -1309,17 +1309,19 @@ static int exynos_iommu_of_xlate(struct device *dev,
static const struct iommu_ops exynos_iommu_ops = {
.domain_alloc = exynos_iommu_domain_alloc,
- .domain_free = exynos_iommu_domain_free,
- .attach_dev = exynos_iommu_attach_device,
- .detach_dev = exynos_iommu_detach_device,
- .map = exynos_iommu_map,
- .unmap = exynos_iommu_unmap,
- .iova_to_phys = exynos_iommu_iova_to_phys,
.device_group = generic_device_group,
.probe_device = exynos_iommu_probe_device,
.release_device = exynos_iommu_release_device,
.pgsize_bitmap = SECT_SIZE | LPAGE_SIZE | SPAGE_SIZE,
.of_xlate = exynos_iommu_of_xlate,
+ .default_domain_ops = &(const struct iommu_domain_ops) {
+ .attach_dev = exynos_iommu_attach_device,
+ .detach_dev = exynos_iommu_detach_device,
+ .map = exynos_iommu_map,
+ .unmap = exynos_iommu_unmap,
+ .iova_to_phys = exynos_iommu_iova_to_phys,
+ .free = exynos_iommu_domain_free,
+ }
};
static int __init exynos_iommu_init(void)
diff --git a/drivers/iommu/fsl_pamu_domain.c b/drivers/iommu/fsl_pamu_domain.c
index a47f47307109..69a4a62dc3b9 100644
--- a/drivers/iommu/fsl_pamu_domain.c
+++ b/drivers/iommu/fsl_pamu_domain.c
@@ -453,13 +453,15 @@ static void fsl_pamu_release_device(struct device *dev)
static const struct iommu_ops fsl_pamu_ops = {
.capable = fsl_pamu_capable,
.domain_alloc = fsl_pamu_domain_alloc,
- .domain_free = fsl_pamu_domain_free,
- .attach_dev = fsl_pamu_attach_device,
- .detach_dev = fsl_pamu_detach_device,
- .iova_to_phys = fsl_pamu_iova_to_phys,
.probe_device = fsl_pamu_probe_device,
.release_device = fsl_pamu_release_device,
.device_group = fsl_pamu_device_group,
+ .default_domain_ops = &(const struct iommu_domain_ops) {
+ .attach_dev = fsl_pamu_attach_device,
+ .detach_dev = fsl_pamu_detach_device,
+ .iova_to_phys = fsl_pamu_iova_to_phys,
+ .free = fsl_pamu_domain_free,
+ }
};
int __init pamu_domain_init(void)
diff --git a/drivers/iommu/intel/Kconfig b/drivers/iommu/intel/Kconfig
index 247d0f2d5fdf..39a06d245f12 100644
--- a/drivers/iommu/intel/Kconfig
+++ b/drivers/iommu/intel/Kconfig
@@ -52,7 +52,7 @@ config INTEL_IOMMU_SVM
select PCI_PRI
select MMU_NOTIFIER
select IOASID
- select IOMMU_SVA_LIB
+ select IOMMU_SVA
help
Shared Virtual Memory (SVM) provides a facility for devices
to access DMA resources through process address space by
diff --git a/drivers/iommu/intel/debugfs.c b/drivers/iommu/intel/debugfs.c
index 62e23ff3c987..ed796eea4581 100644
--- a/drivers/iommu/intel/debugfs.c
+++ b/drivers/iommu/intel/debugfs.c
@@ -344,15 +344,15 @@ static void pgtable_walk_level(struct seq_file *m, struct dma_pte *pde,
static int show_device_domain_translation(struct device *dev, void *data)
{
- struct dmar_domain *domain = find_domain(dev);
+ struct device_domain_info *info = dev_iommu_priv_get(dev);
+ struct dmar_domain *domain = info->domain;
struct seq_file *m = data;
u64 path[6] = { 0 };
if (!domain)
return 0;
- seq_printf(m, "Device %s with pasid %d @0x%llx\n",
- dev_name(dev), domain->default_pasid,
+ seq_printf(m, "Device %s @0x%llx\n", dev_name(dev),
(u64)virt_to_phys(domain->pgd));
seq_puts(m, "IOVA_PFN\t\tPML5E\t\t\tPML4E\t\t\tPDPE\t\t\tPDE\t\t\tPTE\n");
diff --git a/drivers/iommu/intel/dmar.c b/drivers/iommu/intel/dmar.c
index 915bff76fe96..4de960834a1b 100644
--- a/drivers/iommu/intel/dmar.c
+++ b/drivers/iommu/intel/dmar.c
@@ -66,8 +66,6 @@ static unsigned long dmar_seq_ids[BITS_TO_LONGS(DMAR_UNITS_SUPPORTED)];
static int alloc_iommu(struct dmar_drhd_unit *drhd);
static void free_iommu(struct intel_iommu *iommu);
-extern const struct iommu_ops intel_iommu_ops;
-
static void dmar_register_drhd_unit(struct dmar_drhd_unit *drhd)
{
/*
@@ -789,7 +787,8 @@ static int __init dmar_acpi_dev_scope_init(void)
andd->device_name);
continue;
}
- if (acpi_bus_get_device(h, &adev)) {
+ adev = acpi_fetch_acpi_dev(h);
+ if (!adev) {
pr_err("Failed to get device for ACPI object %s\n",
andd->device_name);
continue;
diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c
index be9487516617..2200e3c5d506 100644
--- a/drivers/iommu/intel/iommu.c
+++ b/drivers/iommu/intel/iommu.c
@@ -13,38 +13,18 @@
#define pr_fmt(fmt) "DMAR: " fmt
#define dev_fmt(fmt) pr_fmt(fmt)
-#include <linux/init.h>
-#include <linux/bitmap.h>
-#include <linux/debugfs.h>
-#include <linux/export.h>
-#include <linux/slab.h>
-#include <linux/irq.h>
-#include <linux/interrupt.h>
-#include <linux/spinlock.h>
-#include <linux/pci.h>
-#include <linux/dmar.h>
-#include <linux/dma-map-ops.h>
-#include <linux/mempool.h>
-#include <linux/memory.h>
-#include <linux/cpu.h>
-#include <linux/timer.h>
-#include <linux/io.h>
-#include <linux/iova.h>
-#include <linux/iommu.h>
+#include <linux/crash_dump.h>
+#include <linux/dma-direct.h>
#include <linux/dma-iommu.h>
+#include <linux/dmi.h>
#include <linux/intel-iommu.h>
#include <linux/intel-svm.h>
+#include <linux/memory.h>
+#include <linux/pci.h>
+#include <linux/pci-ats.h>
+#include <linux/spinlock.h>
#include <linux/syscore_ops.h>
#include <linux/tboot.h>
-#include <linux/dmi.h>
-#include <linux/pci-ats.h>
-#include <linux/memblock.h>
-#include <linux/dma-direct.h>
-#include <linux/crash_dump.h>
-#include <linux/numa.h>
-#include <asm/irq_remapping.h>
-#include <asm/cacheflush.h>
-#include <asm/iommu.h>
#include "../irq_remapping.h"
#include "../iommu-sva-lib.h"
@@ -316,14 +296,9 @@ static LIST_HEAD(dmar_satc_units);
/* bitmap for indexing intel_iommus */
static int g_num_of_iommus;
-static void domain_exit(struct dmar_domain *domain);
static void domain_remove_dev_info(struct dmar_domain *domain);
static void dmar_remove_one_dev_info(struct device *dev);
static void __dmar_remove_one_dev_info(struct device_domain_info *info);
-static int intel_iommu_attach_device(struct iommu_domain *domain,
- struct device *dev);
-static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain,
- dma_addr_t iova);
int dmar_disabled = !IS_ENABLED(CONFIG_INTEL_IOMMU_DEFAULT_ON);
int intel_iommu_sm = IS_ENABLED(CONFIG_INTEL_IOMMU_SCALABLE_MODE_DEFAULT_ON);
@@ -342,21 +317,6 @@ static int iommu_skip_te_disable;
int intel_iommu_gfx_mapped;
EXPORT_SYMBOL_GPL(intel_iommu_gfx_mapped);
-#define DEFER_DEVICE_DOMAIN_INFO ((struct device_domain_info *)(-2))
-struct device_domain_info *get_domain_info(struct device *dev)
-{
- struct device_domain_info *info;
-
- if (!dev)
- return NULL;
-
- info = dev_iommu_priv_get(dev);
- if (unlikely(info == DEFER_DEVICE_DOMAIN_INFO))
- return NULL;
-
- return info;
-}
-
DEFINE_SPINLOCK(device_domain_lock);
static LIST_HEAD(device_domain_list);
@@ -452,39 +412,6 @@ static int __init intel_iommu_setup(char *str)
}
__setup("intel_iommu=", intel_iommu_setup);
-static struct kmem_cache *iommu_domain_cache;
-static struct kmem_cache *iommu_devinfo_cache;
-
-static struct dmar_domain* get_iommu_domain(struct intel_iommu *iommu, u16 did)
-{
- struct dmar_domain **domains;
- int idx = did >> 8;
-
- domains = iommu->domains[idx];
- if (!domains)
- return NULL;
-
- return domains[did & 0xff];
-}
-
-static void set_iommu_domain(struct intel_iommu *iommu, u16 did,
- struct dmar_domain *domain)
-{
- struct dmar_domain **domains;
- int idx = did >> 8;
-
- if (!iommu->domains[idx]) {
- size_t size = 256 * sizeof(struct dmar_domain *);
- iommu->domains[idx] = kzalloc(size, GFP_ATOMIC);
- }
-
- domains = iommu->domains[idx];
- if (WARN_ON(!domains))
- return;
- else
- domains[did & 0xff] = domain;
-}
-
void *alloc_pgtable_page(int node)
{
struct page *page;
@@ -501,26 +428,6 @@ void free_pgtable_page(void *vaddr)
free_page((unsigned long)vaddr);
}
-static inline void *alloc_domain_mem(void)
-{
- return kmem_cache_alloc(iommu_domain_cache, GFP_ATOMIC);
-}
-
-static void free_domain_mem(void *vaddr)
-{
- kmem_cache_free(iommu_domain_cache, vaddr);
-}
-
-static inline void * alloc_devinfo_mem(void)
-{
- return kmem_cache_alloc(iommu_devinfo_cache, GFP_ATOMIC);
-}
-
-static inline void free_devinfo_mem(void *vaddr)
-{
- kmem_cache_free(iommu_devinfo_cache, vaddr);
-}
-
static inline int domain_type_is_si(struct dmar_domain *domain)
{
return domain->domain.type == IOMMU_DOMAIN_IDENTITY;
@@ -794,11 +701,6 @@ struct context_entry *iommu_context_addr(struct intel_iommu *iommu, u8 bus,
return &context[devfn];
}
-static bool attach_deferred(struct device *dev)
-{
- return dev_iommu_priv_get(dev) == DEFER_DEVICE_DOMAIN_INFO;
-}
-
/**
* is_downstream_to_pci_bridge - test if a device belongs to the PCI
* sub-hierarchy of a candidate PCI-PCI bridge
@@ -925,7 +827,7 @@ struct intel_iommu *device_to_iommu(struct device *dev, u8 *bus, u8 *devfn)
}
if (pdev && drhd->include_all) {
- got_pdev:
+got_pdev:
if (bus && devfn) {
*bus = pdev->bus->number;
*devfn = pdev->devfn;
@@ -934,7 +836,7 @@ struct intel_iommu *device_to_iommu(struct device *dev, u8 *bus, u8 *devfn)
}
}
iommu = NULL;
- out:
+out:
if (iommu_is_dummy(iommu, dev))
iommu = NULL;
@@ -1573,18 +1475,6 @@ static void domain_update_iotlb(struct dmar_domain *domain)
break;
}
- if (!has_iotlb_device) {
- struct subdev_domain_info *sinfo;
-
- list_for_each_entry(sinfo, &domain->subdevices, link_domain) {
- info = get_domain_info(sinfo->pdev);
- if (info && info->ats_enabled) {
- has_iotlb_device = true;
- break;
- }
- }
- }
-
domain->has_iotlb_device = has_iotlb_device;
}
@@ -1682,7 +1572,6 @@ static void iommu_flush_dev_iotlb(struct dmar_domain *domain,
{
unsigned long flags;
struct device_domain_info *info;
- struct subdev_domain_info *sinfo;
if (!domain->has_iotlb_device)
return;
@@ -1691,27 +1580,9 @@ static void iommu_flush_dev_iotlb(struct dmar_domain *domain,
list_for_each_entry(info, &domain->devices, link)
__iommu_flush_dev_iotlb(info, addr, mask);
- list_for_each_entry(sinfo, &domain->subdevices, link_domain) {
- info = get_domain_info(sinfo->pdev);
- __iommu_flush_dev_iotlb(info, addr, mask);
- }
spin_unlock_irqrestore(&device_domain_lock, flags);
}
-static void domain_flush_piotlb(struct intel_iommu *iommu,
- struct dmar_domain *domain,
- u64 addr, unsigned long npages, bool ih)
-{
- u16 did = domain->iommu_did[iommu->seq_id];
-
- if (domain->default_pasid)
- qi_flush_piotlb(iommu, did, domain->default_pasid,
- addr, npages, ih);
-
- if (!list_empty(&domain->devices))
- qi_flush_piotlb(iommu, did, PASID_RID2PASID, addr, npages, ih);
-}
-
static void iommu_flush_iotlb_psi(struct intel_iommu *iommu,
struct dmar_domain *domain,
unsigned long pfn, unsigned int pages,
@@ -1727,7 +1598,7 @@ static void iommu_flush_iotlb_psi(struct intel_iommu *iommu,
ih = 1 << 6;
if (domain_use_first_level(domain)) {
- domain_flush_piotlb(iommu, domain, addr, pages, ih);
+ qi_flush_piotlb(iommu, did, PASID_RID2PASID, addr, pages, ih);
} else {
/*
* Fallback to domain selective flush if no PSI support or
@@ -1776,14 +1647,13 @@ static void intel_flush_iotlb_all(struct iommu_domain *domain)
u16 did = dmar_domain->iommu_did[iommu->seq_id];
if (domain_use_first_level(dmar_domain))
- domain_flush_piotlb(iommu, dmar_domain, 0, -1, 0);
+ qi_flush_piotlb(iommu, did, PASID_RID2PASID, 0, -1, 0);
else
iommu->flush.flush_iotlb(iommu, did, 0, 0,
DMA_TLB_DSI_FLUSH);
if (!cap_caching_mode(iommu->cap))
- iommu_flush_dev_iotlb(get_iommu_domain(iommu, did),
- 0, MAX_AGAW_PFN_WIDTH);
+ iommu_flush_dev_iotlb(dmar_domain, 0, MAX_AGAW_PFN_WIDTH);
}
}
@@ -1846,7 +1716,6 @@ static void iommu_disable_translation(struct intel_iommu *iommu)
static int iommu_init_domains(struct intel_iommu *iommu)
{
u32 ndomains;
- size_t size;
ndomains = cap_ndoms(iommu->cap);
pr_debug("%s: Number of Domains supported <%d>\n",
@@ -1858,24 +1727,6 @@ static int iommu_init_domains(struct intel_iommu *iommu)
if (!iommu->domain_ids)
return -ENOMEM;
- size = (ALIGN(ndomains, 256) >> 8) * sizeof(struct dmar_domain **);
- iommu->domains = kzalloc(size, GFP_KERNEL);
-
- if (iommu->domains) {
- size = 256 * sizeof(struct dmar_domain *);
- iommu->domains[0] = kzalloc(size, GFP_KERNEL);
- }
-
- if (!iommu->domains || !iommu->domains[0]) {
- pr_err("%s: Allocating domain array failed\n",
- iommu->name);
- bitmap_free(iommu->domain_ids);
- kfree(iommu->domains);
- iommu->domain_ids = NULL;
- iommu->domains = NULL;
- return -ENOMEM;
- }
-
/*
* If Caching mode is set, then invalid translations are tagged
* with domain-id 0, hence we need to pre-allocate it. We also
@@ -1902,7 +1753,7 @@ static void disable_dmar_iommu(struct intel_iommu *iommu)
struct device_domain_info *info, *tmp;
unsigned long flags;
- if (!iommu->domains || !iommu->domain_ids)
+ if (!iommu->domain_ids)
return;
spin_lock_irqsave(&device_domain_lock, flags);
@@ -1923,15 +1774,8 @@ static void disable_dmar_iommu(struct intel_iommu *iommu)
static void free_dmar_iommu(struct intel_iommu *iommu)
{
- if ((iommu->domains) && (iommu->domain_ids)) {
- int elems = ALIGN(cap_ndoms(iommu->cap), 256) >> 8;
- int i;
-
- for (i = 0; i < elems; i++)
- kfree(iommu->domains[i]);
- kfree(iommu->domains);
+ if (iommu->domain_ids) {
bitmap_free(iommu->domain_ids);
- iommu->domains = NULL;
iommu->domain_ids = NULL;
}
@@ -1973,17 +1817,15 @@ static struct dmar_domain *alloc_domain(unsigned int type)
{
struct dmar_domain *domain;
- domain = alloc_domain_mem();
+ domain = kzalloc(sizeof(*domain), GFP_KERNEL);
if (!domain)
return NULL;
- memset(domain, 0, sizeof(*domain));
domain->nid = NUMA_NO_NODE;
if (first_level_by_default(type))
domain->flags |= DOMAIN_FLAG_USE_FIRST_LEVEL;
domain->has_iotlb_device = false;
INIT_LIST_HEAD(&domain->devices);
- INIT_LIST_HEAD(&domain->subdevices);
return domain;
}
@@ -2010,11 +1852,8 @@ static int domain_attach_iommu(struct dmar_domain *domain,
}
set_bit(num, iommu->domain_ids);
- set_iommu_domain(iommu, num, domain);
-
domain->iommu_did[iommu->seq_id] = num;
domain->nid = iommu->node;
-
domain_update_iommu_cap(domain);
}
@@ -2033,8 +1872,6 @@ static void domain_detach_iommu(struct dmar_domain *domain,
if (domain->iommu_refcnt[iommu->seq_id] == 0) {
num = domain->iommu_did[iommu->seq_id];
clear_bit(num, iommu->domain_ids);
- set_iommu_domain(iommu, num, NULL);
-
domain_update_iommu_cap(domain);
domain->iommu_did[iommu->seq_id] = 0;
}
@@ -2067,7 +1904,7 @@ static void domain_exit(struct dmar_domain *domain)
put_pages_list(&freelist);
}
- free_domain_mem(domain);
+ kfree(domain);
}
/*
@@ -2550,15 +2387,6 @@ static void domain_context_clear_one(struct device_domain_info *info, u8 bus, u8
__iommu_flush_dev_iotlb(info, 0, MAX_AGAW_PFN_WIDTH);
}
-static inline void unlink_domain_info(struct device_domain_info *info)
-{
- assert_spin_locked(&device_domain_lock);
- list_del(&info->link);
- list_del(&info->global);
- if (info->dev)
- dev_iommu_priv_set(info->dev, NULL);
-}
-
static void domain_remove_dev_info(struct dmar_domain *domain)
{
struct device_domain_info *info, *tmp;
@@ -2570,24 +2398,6 @@ static void domain_remove_dev_info(struct dmar_domain *domain)
spin_unlock_irqrestore(&device_domain_lock, flags);
}
-struct dmar_domain *find_domain(struct device *dev)
-{
- struct device_domain_info *info;
-
- if (unlikely(!dev || !dev->iommu))
- return NULL;
-
- if (unlikely(attach_deferred(dev)))
- return NULL;
-
- /* No lock here, assumes no domain exit in normal case */
- info = get_domain_info(dev);
- if (likely(info))
- return info->domain;
-
- return NULL;
-}
-
static inline struct device_domain_info *
dmar_search_domain_by_dev_info(int segment, int bus, int devfn)
{
@@ -2648,97 +2458,24 @@ static struct dmar_domain *dmar_insert_one_dev_info(struct intel_iommu *iommu,
struct device *dev,
struct dmar_domain *domain)
{
- struct dmar_domain *found = NULL;
- struct device_domain_info *info;
+ struct device_domain_info *info = dev_iommu_priv_get(dev);
unsigned long flags;
int ret;
- info = alloc_devinfo_mem();
- if (!info)
- return NULL;
-
- if (!dev_is_real_dma_subdevice(dev)) {
- info->bus = bus;
- info->devfn = devfn;
- info->segment = iommu->segment;
- } else {
- struct pci_dev *pdev = to_pci_dev(dev);
-
- info->bus = pdev->bus->number;
- info->devfn = pdev->devfn;
- info->segment = pci_domain_nr(pdev->bus);
- }
-
- info->ats_supported = info->pasid_supported = info->pri_supported = 0;
- info->ats_enabled = info->pasid_enabled = info->pri_enabled = 0;
- info->ats_qdep = 0;
- info->dev = dev;
- info->domain = domain;
- info->iommu = iommu;
- info->pasid_table = NULL;
- info->auxd_enabled = 0;
- INIT_LIST_HEAD(&info->subdevices);
-
- if (dev && dev_is_pci(dev)) {
- struct pci_dev *pdev = to_pci_dev(info->dev);
-
- if (ecap_dev_iotlb_support(iommu->ecap) &&
- pci_ats_supported(pdev) &&
- dmar_find_matched_atsr_unit(pdev))
- info->ats_supported = 1;
-
- if (sm_supported(iommu)) {
- if (pasid_supported(iommu)) {
- int features = pci_pasid_features(pdev);
- if (features >= 0)
- info->pasid_supported = features | 1;
- }
-
- if (info->ats_supported && ecap_prs(iommu->ecap) &&
- pci_pri_supported(pdev))
- info->pri_supported = 1;
- }
- }
-
spin_lock_irqsave(&device_domain_lock, flags);
- if (dev)
- found = find_domain(dev);
-
- if (!found) {
- struct device_domain_info *info2;
- info2 = dmar_search_domain_by_dev_info(info->segment, info->bus,
- info->devfn);
- if (info2) {
- found = info2->domain;
- info2->dev = dev;
- }
- }
-
- if (found) {
- spin_unlock_irqrestore(&device_domain_lock, flags);
- free_devinfo_mem(info);
- /* Caller must free the original domain */
- return found;
- }
-
+ info->domain = domain;
spin_lock(&iommu->lock);
ret = domain_attach_iommu(domain, iommu);
spin_unlock(&iommu->lock);
-
if (ret) {
spin_unlock_irqrestore(&device_domain_lock, flags);
- free_devinfo_mem(info);
return NULL;
}
-
list_add(&info->link, &domain->devices);
- list_add(&info->global, &device_domain_list);
- if (dev)
- dev_iommu_priv_set(dev, info);
spin_unlock_irqrestore(&device_domain_lock, flags);
/* PASID table is mandatory for a PCI device in scalable mode. */
- if (dev && dev_is_pci(dev) && sm_supported(iommu)) {
+ if (sm_supported(iommu) && !dev_is_real_dma_subdevice(dev)) {
ret = intel_pasid_alloc_table(dev);
if (ret) {
dev_err(dev, "PASID table allocation failed\n");
@@ -3460,70 +3197,6 @@ error:
return ret;
}
-static inline int iommu_domain_cache_init(void)
-{
- int ret = 0;
-
- iommu_domain_cache = kmem_cache_create("iommu_domain",
- sizeof(struct dmar_domain),
- 0,
- SLAB_HWCACHE_ALIGN,
-
- NULL);
- if (!iommu_domain_cache) {
- pr_err("Couldn't create iommu_domain cache\n");
- ret = -ENOMEM;
- }
-
- return ret;
-}
-
-static inline int iommu_devinfo_cache_init(void)
-{
- int ret = 0;
-
- iommu_devinfo_cache = kmem_cache_create("iommu_devinfo",
- sizeof(struct device_domain_info),
- 0,
- SLAB_HWCACHE_ALIGN,
- NULL);
- if (!iommu_devinfo_cache) {
- pr_err("Couldn't create devinfo cache\n");
- ret = -ENOMEM;
- }
-
- return ret;
-}
-
-static int __init iommu_init_mempool(void)
-{
- int ret;
- ret = iova_cache_get();
- if (ret)
- return ret;
-
- ret = iommu_domain_cache_init();
- if (ret)
- goto domain_error;
-
- ret = iommu_devinfo_cache_init();
- if (!ret)
- return ret;
-
- kmem_cache_destroy(iommu_domain_cache);
-domain_error:
- iova_cache_put();
-
- return -ENOMEM;
-}
-
-static void __init iommu_exit_mempool(void)
-{
- kmem_cache_destroy(iommu_devinfo_cache);
- kmem_cache_destroy(iommu_domain_cache);
- iova_cache_put();
-}
-
static void __init init_no_remapping_devices(void)
{
struct dmar_drhd_unit *drhd;
@@ -3691,7 +3364,7 @@ static void __init init_iommu_pm_ops(void)
static inline void init_iommu_pm_ops(void) {}
#endif /* CONFIG_PM */
-static int rmrr_sanity_check(struct acpi_dmar_reserved_memory *rmrr)
+static int __init rmrr_sanity_check(struct acpi_dmar_reserved_memory *rmrr)
{
if (!IS_ALIGNED(rmrr->base_address, PAGE_SIZE) ||
!IS_ALIGNED(rmrr->end_address + 1, PAGE_SIZE) ||
@@ -4020,7 +3693,31 @@ static void intel_iommu_free_dmars(void)
}
}
-int dmar_find_matched_atsr_unit(struct pci_dev *dev)
+static struct dmar_satc_unit *dmar_find_matched_satc_unit(struct pci_dev *dev)
+{
+ struct dmar_satc_unit *satcu;
+ struct acpi_dmar_satc *satc;
+ struct device *tmp;
+ int i;
+
+ dev = pci_physfn(dev);
+ rcu_read_lock();
+
+ list_for_each_entry_rcu(satcu, &dmar_satc_units, list) {
+ satc = container_of(satcu->hdr, struct acpi_dmar_satc, header);
+ if (satc->segment != pci_domain_nr(dev->bus))
+ continue;
+ for_each_dev_scope(satcu->devices, satcu->devices_cnt, i, tmp)
+ if (to_pci_dev(tmp) == dev)
+ goto out;
+ }
+ satcu = NULL;
+out:
+ rcu_read_unlock();
+ return satcu;
+}
+
+static int dmar_ats_supported(struct pci_dev *dev, struct intel_iommu *iommu)
{
int i, ret = 1;
struct pci_bus *bus;
@@ -4028,8 +3725,20 @@ int dmar_find_matched_atsr_unit(struct pci_dev *dev)
struct device *tmp;
struct acpi_dmar_atsr *atsr;
struct dmar_atsr_unit *atsru;
+ struct dmar_satc_unit *satcu;
dev = pci_physfn(dev);
+ satcu = dmar_find_matched_satc_unit(dev);
+ if (satcu)
+ /*
+ * This device supports ATS as it is in SATC table.
+ * When IOMMU is in legacy mode, enabling ATS is done
+ * automatically by HW for the device that requires
+ * ATS, hence OS should not enable this device ATS
+ * to avoid duplicated TLB invalidation.
+ */
+ return !(satcu->atc_required && !sm_supported(iommu));
+
for (bus = dev->bus; bus; bus = bus->parent) {
bridge = bus->self;
/* If it's an integrated device, allow ATS */
@@ -4375,12 +4084,6 @@ int __init intel_iommu_init(void)
force_on = (!intel_iommu_tboot_noforce && tboot_force_iommu()) ||
platform_optin_force_iommu();
- if (iommu_init_mempool()) {
- if (force_on)
- panic("tboot: Failed to initialize iommu memory\n");
- return -ENOMEM;
- }
-
down_write(&dmar_global_lock);
if (dmar_table_init()) {
if (force_on)
@@ -4501,7 +4204,6 @@ int __init intel_iommu_init(void)
out_free_dmar:
intel_iommu_free_dmars();
up_write(&dmar_global_lock);
- iommu_exit_mempool();
return ret;
}
@@ -4552,13 +4254,11 @@ static void __dmar_remove_one_dev_info(struct device_domain_info *info)
intel_pasid_free_table(info->dev);
}
- unlink_domain_info(info);
+ list_del(&info->link);
spin_lock_irqsave(&iommu->lock, flags);
domain_detach_iommu(domain, iommu);
spin_unlock_irqrestore(&iommu->lock, flags);
-
- free_devinfo_mem(info);
}
static void dmar_remove_one_dev_info(struct device *dev)
@@ -4567,7 +4267,7 @@ static void dmar_remove_one_dev_info(struct device *dev)
unsigned long flags;
spin_lock_irqsave(&device_domain_lock, flags);
- info = get_domain_info(dev);
+ info = dev_iommu_priv_get(dev);
if (info)
__dmar_remove_one_dev_info(info);
spin_unlock_irqrestore(&device_domain_lock, flags);
@@ -4637,183 +4337,6 @@ static void intel_iommu_domain_free(struct iommu_domain *domain)
domain_exit(to_dmar_domain(domain));
}
-/*
- * Check whether a @domain could be attached to the @dev through the
- * aux-domain attach/detach APIs.
- */
-static inline bool
-is_aux_domain(struct device *dev, struct iommu_domain *domain)
-{
- struct device_domain_info *info = get_domain_info(dev);
-
- return info && info->auxd_enabled &&
- domain->type == IOMMU_DOMAIN_UNMANAGED;
-}
-
-static inline struct subdev_domain_info *
-lookup_subdev_info(struct dmar_domain *domain, struct device *dev)
-{
- struct subdev_domain_info *sinfo;
-
- if (!list_empty(&domain->subdevices)) {
- list_for_each_entry(sinfo, &domain->subdevices, link_domain) {
- if (sinfo->pdev == dev)
- return sinfo;
- }
- }
-
- return NULL;
-}
-
-static int auxiliary_link_device(struct dmar_domain *domain,
- struct device *dev)
-{
- struct device_domain_info *info = get_domain_info(dev);
- struct subdev_domain_info *sinfo = lookup_subdev_info(domain, dev);
-
- assert_spin_locked(&device_domain_lock);
- if (WARN_ON(!info))
- return -EINVAL;
-
- if (!sinfo) {
- sinfo = kzalloc(sizeof(*sinfo), GFP_ATOMIC);
- if (!sinfo)
- return -ENOMEM;
- sinfo->domain = domain;
- sinfo->pdev = dev;
- list_add(&sinfo->link_phys, &info->subdevices);
- list_add(&sinfo->link_domain, &domain->subdevices);
- }
-
- return ++sinfo->users;
-}
-
-static int auxiliary_unlink_device(struct dmar_domain *domain,
- struct device *dev)
-{
- struct device_domain_info *info = get_domain_info(dev);
- struct subdev_domain_info *sinfo = lookup_subdev_info(domain, dev);
- int ret;
-
- assert_spin_locked(&device_domain_lock);
- if (WARN_ON(!info || !sinfo || sinfo->users <= 0))
- return -EINVAL;
-
- ret = --sinfo->users;
- if (!ret) {
- list_del(&sinfo->link_phys);
- list_del(&sinfo->link_domain);
- kfree(sinfo);
- }
-
- return ret;
-}
-
-static int aux_domain_add_dev(struct dmar_domain *domain,
- struct device *dev)
-{
- int ret;
- unsigned long flags;
- struct intel_iommu *iommu;
-
- iommu = device_to_iommu(dev, NULL, NULL);
- if (!iommu)
- return -ENODEV;
-
- if (domain->default_pasid <= 0) {
- u32 pasid;
-
- /* No private data needed for the default pasid */
- pasid = ioasid_alloc(NULL, PASID_MIN,
- pci_max_pasids(to_pci_dev(dev)) - 1,
- NULL);
- if (pasid == INVALID_IOASID) {
- pr_err("Can't allocate default pasid\n");
- return -ENODEV;
- }
- domain->default_pasid = pasid;
- }
-
- spin_lock_irqsave(&device_domain_lock, flags);
- ret = auxiliary_link_device(domain, dev);
- if (ret <= 0)
- goto link_failed;
-
- /*
- * Subdevices from the same physical device can be attached to the
- * same domain. For such cases, only the first subdevice attachment
- * needs to go through the full steps in this function. So if ret >
- * 1, just goto out.
- */
- if (ret > 1)
- goto out;
-
- /*
- * iommu->lock must be held to attach domain to iommu and setup the
- * pasid entry for second level translation.
- */
- spin_lock(&iommu->lock);
- ret = domain_attach_iommu(domain, iommu);
- if (ret)
- goto attach_failed;
-
- /* Setup the PASID entry for mediated devices: */
- if (domain_use_first_level(domain))
- ret = domain_setup_first_level(iommu, domain, dev,
- domain->default_pasid);
- else
- ret = intel_pasid_setup_second_level(iommu, domain, dev,
- domain->default_pasid);
- if (ret)
- goto table_failed;
-
- spin_unlock(&iommu->lock);
-out:
- spin_unlock_irqrestore(&device_domain_lock, flags);
-
- return 0;
-
-table_failed:
- domain_detach_iommu(domain, iommu);
-attach_failed:
- spin_unlock(&iommu->lock);
- auxiliary_unlink_device(domain, dev);
-link_failed:
- spin_unlock_irqrestore(&device_domain_lock, flags);
- if (list_empty(&domain->subdevices) && domain->default_pasid > 0)
- ioasid_put(domain->default_pasid);
-
- return ret;
-}
-
-static void aux_domain_remove_dev(struct dmar_domain *domain,
- struct device *dev)
-{
- struct device_domain_info *info;
- struct intel_iommu *iommu;
- unsigned long flags;
-
- if (!is_aux_domain(dev, &domain->domain))
- return;
-
- spin_lock_irqsave(&device_domain_lock, flags);
- info = get_domain_info(dev);
- iommu = info->iommu;
-
- if (!auxiliary_unlink_device(domain, dev)) {
- spin_lock(&iommu->lock);
- intel_pasid_tear_down_entry(iommu, dev,
- domain->default_pasid, false);
- domain_detach_iommu(domain, iommu);
- spin_unlock(&iommu->lock);
- }
-
- spin_unlock_irqrestore(&device_domain_lock, flags);
-
- if (list_empty(&domain->subdevices) && domain->default_pasid > 0)
- ioasid_put(domain->default_pasid);
-}
-
static int prepare_domain_attach_device(struct iommu_domain *domain,
struct device *dev)
{
@@ -4825,13 +4348,6 @@ static int prepare_domain_attach_device(struct iommu_domain *domain,
if (!iommu)
return -ENODEV;
- if ((dmar_domain->flags & DOMAIN_FLAG_NESTING_MODE) &&
- !ecap_nest(iommu->ecap)) {
- dev_err(dev, "%s: iommu not support nested translation\n",
- iommu->name);
- return -EINVAL;
- }
-
/* check if this iommu agaw is sufficient for max mapped address */
addr_width = agaw_to_width(iommu->agaw);
if (addr_width > cap_mgaw(iommu->cap))
@@ -4873,15 +4389,11 @@ static int intel_iommu_attach_device(struct iommu_domain *domain,
return -EPERM;
}
- if (is_aux_domain(dev, domain))
- return -EPERM;
-
/* normally dev is not mapped */
if (unlikely(domain_context_mapped(dev))) {
- struct dmar_domain *old_domain;
+ struct device_domain_info *info = dev_iommu_priv_get(dev);
- old_domain = find_domain(dev);
- if (old_domain)
+ if (info->domain)
dmar_remove_one_dev_info(dev);
}
@@ -4892,212 +4404,12 @@ static int intel_iommu_attach_device(struct iommu_domain *domain,
return domain_add_dev_info(to_dmar_domain(domain), dev);
}
-static int intel_iommu_aux_attach_device(struct iommu_domain *domain,
- struct device *dev)
-{
- int ret;
-
- if (!is_aux_domain(dev, domain))
- return -EPERM;
-
- ret = prepare_domain_attach_device(domain, dev);
- if (ret)
- return ret;
-
- return aux_domain_add_dev(to_dmar_domain(domain), dev);
-}
-
static void intel_iommu_detach_device(struct iommu_domain *domain,
struct device *dev)
{
dmar_remove_one_dev_info(dev);
}
-static void intel_iommu_aux_detach_device(struct iommu_domain *domain,
- struct device *dev)
-{
- aux_domain_remove_dev(to_dmar_domain(domain), dev);
-}
-
-#ifdef CONFIG_INTEL_IOMMU_SVM
-/*
- * 2D array for converting and sanitizing IOMMU generic TLB granularity to
- * VT-d granularity. Invalidation is typically included in the unmap operation
- * as a result of DMA or VFIO unmap. However, for assigned devices guest
- * owns the first level page tables. Invalidations of translation caches in the
- * guest are trapped and passed down to the host.
- *
- * vIOMMU in the guest will only expose first level page tables, therefore
- * we do not support IOTLB granularity for request without PASID (second level).
- *
- * For example, to find the VT-d granularity encoding for IOTLB
- * type and page selective granularity within PASID:
- * X: indexed by iommu cache type
- * Y: indexed by enum iommu_inv_granularity
- * [IOMMU_CACHE_INV_TYPE_IOTLB][IOMMU_INV_GRANU_ADDR]
- */
-
-static const int
-inv_type_granu_table[IOMMU_CACHE_INV_TYPE_NR][IOMMU_INV_GRANU_NR] = {
- /*
- * PASID based IOTLB invalidation: PASID selective (per PASID),
- * page selective (address granularity)
- */
- {-EINVAL, QI_GRAN_NONG_PASID, QI_GRAN_PSI_PASID},
- /* PASID based dev TLBs */
- {-EINVAL, -EINVAL, QI_DEV_IOTLB_GRAN_PASID_SEL},
- /* PASID cache */
- {-EINVAL, -EINVAL, -EINVAL}
-};
-
-static inline int to_vtd_granularity(int type, int granu)
-{
- return inv_type_granu_table[type][granu];
-}
-
-static inline u64 to_vtd_size(u64 granu_size, u64 nr_granules)
-{
- u64 nr_pages = (granu_size * nr_granules) >> VTD_PAGE_SHIFT;
-
- /* VT-d size is encoded as 2^size of 4K pages, 0 for 4k, 9 for 2MB, etc.
- * IOMMU cache invalidate API passes granu_size in bytes, and number of
- * granu size in contiguous memory.
- */
- return order_base_2(nr_pages);
-}
-
-static int
-intel_iommu_sva_invalidate(struct iommu_domain *domain, struct device *dev,
- struct iommu_cache_invalidate_info *inv_info)
-{
- struct dmar_domain *dmar_domain = to_dmar_domain(domain);
- struct device_domain_info *info;
- struct intel_iommu *iommu;
- unsigned long flags;
- int cache_type;
- u8 bus, devfn;
- u16 did, sid;
- int ret = 0;
- u64 size = 0;
-
- if (!inv_info || !dmar_domain)
- return -EINVAL;
-
- if (!dev || !dev_is_pci(dev))
- return -ENODEV;
-
- iommu = device_to_iommu(dev, &bus, &devfn);
- if (!iommu)
- return -ENODEV;
-
- if (!(dmar_domain->flags & DOMAIN_FLAG_NESTING_MODE))
- return -EINVAL;
-
- spin_lock_irqsave(&device_domain_lock, flags);
- spin_lock(&iommu->lock);
- info = get_domain_info(dev);
- if (!info) {
- ret = -EINVAL;
- goto out_unlock;
- }
- did = dmar_domain->iommu_did[iommu->seq_id];
- sid = PCI_DEVID(bus, devfn);
-
- /* Size is only valid in address selective invalidation */
- if (inv_info->granularity == IOMMU_INV_GRANU_ADDR)
- size = to_vtd_size(inv_info->granu.addr_info.granule_size,
- inv_info->granu.addr_info.nb_granules);
-
- for_each_set_bit(cache_type,
- (unsigned long *)&inv_info->cache,
- IOMMU_CACHE_INV_TYPE_NR) {
- int granu = 0;
- u64 pasid = 0;
- u64 addr = 0;
-
- granu = to_vtd_granularity(cache_type, inv_info->granularity);
- if (granu == -EINVAL) {
- pr_err_ratelimited("Invalid cache type and granu combination %d/%d\n",
- cache_type, inv_info->granularity);
- break;
- }
-
- /*
- * PASID is stored in different locations based on the
- * granularity.
- */
- if (inv_info->granularity == IOMMU_INV_GRANU_PASID &&
- (inv_info->granu.pasid_info.flags & IOMMU_INV_PASID_FLAGS_PASID))
- pasid = inv_info->granu.pasid_info.pasid;
- else if (inv_info->granularity == IOMMU_INV_GRANU_ADDR &&
- (inv_info->granu.addr_info.flags & IOMMU_INV_ADDR_FLAGS_PASID))
- pasid = inv_info->granu.addr_info.pasid;
-
- switch (BIT(cache_type)) {
- case IOMMU_CACHE_INV_TYPE_IOTLB:
- /* HW will ignore LSB bits based on address mask */
- if (inv_info->granularity == IOMMU_INV_GRANU_ADDR &&
- size &&
- (inv_info->granu.addr_info.addr & ((BIT(VTD_PAGE_SHIFT + size)) - 1))) {
- pr_err_ratelimited("User address not aligned, 0x%llx, size order %llu\n",
- inv_info->granu.addr_info.addr, size);
- }
-
- /*
- * If granu is PASID-selective, address is ignored.
- * We use npages = -1 to indicate that.
- */
- qi_flush_piotlb(iommu, did, pasid,
- mm_to_dma_pfn(inv_info->granu.addr_info.addr),
- (granu == QI_GRAN_NONG_PASID) ? -1 : 1 << size,
- inv_info->granu.addr_info.flags & IOMMU_INV_ADDR_FLAGS_LEAF);
-
- if (!info->ats_enabled)
- break;
- /*
- * Always flush device IOTLB if ATS is enabled. vIOMMU
- * in the guest may assume IOTLB flush is inclusive,
- * which is more efficient.
- */
- fallthrough;
- case IOMMU_CACHE_INV_TYPE_DEV_IOTLB:
- /*
- * PASID based device TLB invalidation does not support
- * IOMMU_INV_GRANU_PASID granularity but only supports
- * IOMMU_INV_GRANU_ADDR.
- * The equivalent of that is we set the size to be the
- * entire range of 64 bit. User only provides PASID info
- * without address info. So we set addr to 0.
- */
- if (inv_info->granularity == IOMMU_INV_GRANU_PASID) {
- size = 64 - VTD_PAGE_SHIFT;
- addr = 0;
- } else if (inv_info->granularity == IOMMU_INV_GRANU_ADDR) {
- addr = inv_info->granu.addr_info.addr;
- }
-
- if (info->ats_enabled)
- qi_flush_dev_iotlb_pasid(iommu, sid,
- info->pfsid, pasid,
- info->ats_qdep, addr,
- size);
- else
- pr_warn_ratelimited("Passdown device IOTLB flush w/o ATS!\n");
- break;
- default:
- dev_err_ratelimited(dev, "Unsupported IOMMU invalidation type %d\n",
- cache_type);
- ret = -EINVAL;
- }
- }
-out_unlock:
- spin_unlock(&iommu->lock);
- spin_unlock_irqrestore(&device_domain_lock, flags);
-
- return ret;
-}
-#endif
-
static int intel_iommu_map(struct iommu_domain *domain,
unsigned long iova, phys_addr_t hpa,
size_t size, int iommu_prot, gfp_t gfp)
@@ -5245,28 +4557,73 @@ static bool intel_iommu_capable(enum iommu_cap cap)
static struct iommu_device *intel_iommu_probe_device(struct device *dev)
{
+ struct pci_dev *pdev = dev_is_pci(dev) ? to_pci_dev(dev) : NULL;
+ struct device_domain_info *info;
struct intel_iommu *iommu;
+ unsigned long flags;
+ u8 bus, devfn;
- iommu = device_to_iommu(dev, NULL, NULL);
+ iommu = device_to_iommu(dev, &bus, &devfn);
if (!iommu)
return ERR_PTR(-ENODEV);
- if (translation_pre_enabled(iommu))
- dev_iommu_priv_set(dev, DEFER_DEVICE_DOMAIN_INFO);
+ info = kzalloc(sizeof(*info), GFP_KERNEL);
+ if (!info)
+ return ERR_PTR(-ENOMEM);
+
+ if (dev_is_real_dma_subdevice(dev)) {
+ info->bus = pdev->bus->number;
+ info->devfn = pdev->devfn;
+ info->segment = pci_domain_nr(pdev->bus);
+ } else {
+ info->bus = bus;
+ info->devfn = devfn;
+ info->segment = iommu->segment;
+ }
+
+ info->dev = dev;
+ info->iommu = iommu;
+ if (dev_is_pci(dev)) {
+ if (ecap_dev_iotlb_support(iommu->ecap) &&
+ pci_ats_supported(pdev) &&
+ dmar_ats_supported(pdev, iommu))
+ info->ats_supported = 1;
+
+ if (sm_supported(iommu)) {
+ if (pasid_supported(iommu)) {
+ int features = pci_pasid_features(pdev);
+
+ if (features >= 0)
+ info->pasid_supported = features | 1;
+ }
+
+ if (info->ats_supported && ecap_prs(iommu->ecap) &&
+ pci_pri_supported(pdev))
+ info->pri_supported = 1;
+ }
+ }
+
+ spin_lock_irqsave(&device_domain_lock, flags);
+ list_add(&info->global, &device_domain_list);
+ dev_iommu_priv_set(dev, info);
+ spin_unlock_irqrestore(&device_domain_lock, flags);
return &iommu->iommu;
}
static void intel_iommu_release_device(struct device *dev)
{
- struct intel_iommu *iommu;
-
- iommu = device_to_iommu(dev, NULL, NULL);
- if (!iommu)
- return;
+ struct device_domain_info *info = dev_iommu_priv_get(dev);
+ unsigned long flags;
dmar_remove_one_dev_info(dev);
+ spin_lock_irqsave(&device_domain_lock, flags);
+ dev_iommu_priv_set(dev, NULL);
+ list_del(&info->global);
+ spin_unlock_irqrestore(&device_domain_lock, flags);
+
+ kfree(info);
set_dma_ops(dev, NULL);
}
@@ -5335,14 +4692,14 @@ static void intel_iommu_get_resv_regions(struct device *device,
int intel_iommu_enable_pasid(struct intel_iommu *iommu, struct device *dev)
{
- struct device_domain_info *info;
+ struct device_domain_info *info = dev_iommu_priv_get(dev);
struct context_entry *context;
struct dmar_domain *domain;
unsigned long flags;
u64 ctx_lo;
int ret;
- domain = find_domain(dev);
+ domain = info->domain;
if (!domain)
return -EINVAL;
@@ -5350,8 +4707,7 @@ int intel_iommu_enable_pasid(struct intel_iommu *iommu, struct device *dev)
spin_lock(&iommu->lock);
ret = -EINVAL;
- info = get_domain_info(dev);
- if (!info || !info->pasid_supported)
+ if (!info->pasid_supported)
goto out;
context = iommu_context_addr(iommu, info->bus, info->devfn, 0);
@@ -5391,49 +4747,9 @@ static struct iommu_group *intel_iommu_device_group(struct device *dev)
return generic_device_group(dev);
}
-static int intel_iommu_enable_auxd(struct device *dev)
-{
- struct device_domain_info *info;
- struct intel_iommu *iommu;
- unsigned long flags;
- int ret;
-
- iommu = device_to_iommu(dev, NULL, NULL);
- if (!iommu || dmar_disabled)
- return -EINVAL;
-
- if (!sm_supported(iommu) || !pasid_supported(iommu))
- return -EINVAL;
-
- ret = intel_iommu_enable_pasid(iommu, dev);
- if (ret)
- return -ENODEV;
-
- spin_lock_irqsave(&device_domain_lock, flags);
- info = get_domain_info(dev);
- info->auxd_enabled = 1;
- spin_unlock_irqrestore(&device_domain_lock, flags);
-
- return 0;
-}
-
-static int intel_iommu_disable_auxd(struct device *dev)
-{
- struct device_domain_info *info;
- unsigned long flags;
-
- spin_lock_irqsave(&device_domain_lock, flags);
- info = get_domain_info(dev);
- if (!WARN_ON(!info))
- info->auxd_enabled = 0;
- spin_unlock_irqrestore(&device_domain_lock, flags);
-
- return 0;
-}
-
static int intel_iommu_enable_sva(struct device *dev)
{
- struct device_domain_info *info = get_domain_info(dev);
+ struct device_domain_info *info = dev_iommu_priv_get(dev);
struct intel_iommu *iommu;
int ret;
@@ -5462,7 +4778,7 @@ static int intel_iommu_enable_sva(struct device *dev)
static int intel_iommu_disable_sva(struct device *dev)
{
- struct device_domain_info *info = get_domain_info(dev);
+ struct device_domain_info *info = dev_iommu_priv_get(dev);
struct intel_iommu *iommu = info->iommu;
int ret;
@@ -5475,7 +4791,7 @@ static int intel_iommu_disable_sva(struct device *dev)
static int intel_iommu_enable_iopf(struct device *dev)
{
- struct device_domain_info *info = get_domain_info(dev);
+ struct device_domain_info *info = dev_iommu_priv_get(dev);
if (info && info->pri_supported)
return 0;
@@ -5487,9 +4803,6 @@ static int
intel_iommu_dev_enable_feat(struct device *dev, enum iommu_dev_features feat)
{
switch (feat) {
- case IOMMU_DEV_FEAT_AUX:
- return intel_iommu_enable_auxd(dev);
-
case IOMMU_DEV_FEAT_IOPF:
return intel_iommu_enable_iopf(dev);
@@ -5505,9 +4818,6 @@ static int
intel_iommu_dev_disable_feat(struct device *dev, enum iommu_dev_features feat)
{
switch (feat) {
- case IOMMU_DEV_FEAT_AUX:
- return intel_iommu_disable_auxd(dev);
-
case IOMMU_DEV_FEAT_IOPF:
return 0;
@@ -5519,48 +4829,11 @@ intel_iommu_dev_disable_feat(struct device *dev, enum iommu_dev_features feat)
}
}
-static bool
-intel_iommu_dev_feat_enabled(struct device *dev, enum iommu_dev_features feat)
-{
- struct device_domain_info *info = get_domain_info(dev);
-
- if (feat == IOMMU_DEV_FEAT_AUX)
- return scalable_mode_support() && info && info->auxd_enabled;
-
- return false;
-}
-
-static int
-intel_iommu_aux_get_pasid(struct iommu_domain *domain, struct device *dev)
+static bool intel_iommu_is_attach_deferred(struct device *dev)
{
- struct dmar_domain *dmar_domain = to_dmar_domain(domain);
+ struct device_domain_info *info = dev_iommu_priv_get(dev);
- return dmar_domain->default_pasid > 0 ?
- dmar_domain->default_pasid : -EINVAL;
-}
-
-static bool intel_iommu_is_attach_deferred(struct iommu_domain *domain,
- struct device *dev)
-{
- return attach_deferred(dev);
-}
-
-static int
-intel_iommu_enable_nesting(struct iommu_domain *domain)
-{
- struct dmar_domain *dmar_domain = to_dmar_domain(domain);
- unsigned long flags;
- int ret = -ENODEV;
-
- spin_lock_irqsave(&device_domain_lock, flags);
- if (list_empty(&dmar_domain->devices)) {
- dmar_domain->flags |= DOMAIN_FLAG_NESTING_MODE;
- dmar_domain->flags &= ~DOMAIN_FLAG_USE_FIRST_LEVEL;
- ret = 0;
- }
- spin_unlock_irqrestore(&device_domain_lock, flags);
-
- return ret;
+ return translation_pre_enabled(info->iommu) && !info->domain;
}
/*
@@ -5598,40 +4871,34 @@ static void intel_iommu_iotlb_sync_map(struct iommu_domain *domain,
const struct iommu_ops intel_iommu_ops = {
.capable = intel_iommu_capable,
.domain_alloc = intel_iommu_domain_alloc,
- .domain_free = intel_iommu_domain_free,
- .enable_nesting = intel_iommu_enable_nesting,
- .attach_dev = intel_iommu_attach_device,
- .detach_dev = intel_iommu_detach_device,
- .aux_attach_dev = intel_iommu_aux_attach_device,
- .aux_detach_dev = intel_iommu_aux_detach_device,
- .aux_get_pasid = intel_iommu_aux_get_pasid,
- .map_pages = intel_iommu_map_pages,
- .unmap_pages = intel_iommu_unmap_pages,
- .iotlb_sync_map = intel_iommu_iotlb_sync_map,
- .flush_iotlb_all = intel_flush_iotlb_all,
- .iotlb_sync = intel_iommu_tlb_sync,
- .iova_to_phys = intel_iommu_iova_to_phys,
.probe_device = intel_iommu_probe_device,
.probe_finalize = intel_iommu_probe_finalize,
.release_device = intel_iommu_release_device,
.get_resv_regions = intel_iommu_get_resv_regions,
.put_resv_regions = generic_iommu_put_resv_regions,
.device_group = intel_iommu_device_group,
- .dev_feat_enabled = intel_iommu_dev_feat_enabled,
.dev_enable_feat = intel_iommu_dev_enable_feat,
.dev_disable_feat = intel_iommu_dev_disable_feat,
.is_attach_deferred = intel_iommu_is_attach_deferred,
.def_domain_type = device_def_domain_type,
.pgsize_bitmap = SZ_4K,
#ifdef CONFIG_INTEL_IOMMU_SVM
- .cache_invalidate = intel_iommu_sva_invalidate,
- .sva_bind_gpasid = intel_svm_bind_gpasid,
- .sva_unbind_gpasid = intel_svm_unbind_gpasid,
.sva_bind = intel_svm_bind,
.sva_unbind = intel_svm_unbind,
.sva_get_pasid = intel_svm_get_pasid,
.page_response = intel_svm_page_response,
#endif
+ .default_domain_ops = &(const struct iommu_domain_ops) {
+ .attach_dev = intel_iommu_attach_device,
+ .detach_dev = intel_iommu_detach_device,
+ .map_pages = intel_iommu_map_pages,
+ .unmap_pages = intel_iommu_unmap_pages,
+ .iotlb_sync_map = intel_iommu_iotlb_sync_map,
+ .flush_iotlb_all = intel_flush_iotlb_all,
+ .iotlb_sync = intel_iommu_tlb_sync,
+ .iova_to_phys = intel_iommu_iova_to_phys,
+ .free = intel_iommu_domain_free,
+ }
};
static void quirk_iommu_igfx(struct pci_dev *dev)
diff --git a/drivers/iommu/intel/pasid.c b/drivers/iommu/intel/pasid.c
index 07c390aed1fe..f8d215d85695 100644
--- a/drivers/iommu/intel/pasid.c
+++ b/drivers/iommu/intel/pasid.c
@@ -150,7 +150,7 @@ int intel_pasid_alloc_table(struct device *dev)
int size;
might_sleep();
- info = get_domain_info(dev);
+ info = dev_iommu_priv_get(dev);
if (WARN_ON(!info || !dev_is_pci(dev) || info->pasid_table))
return -EINVAL;
@@ -197,7 +197,7 @@ void intel_pasid_free_table(struct device *dev)
struct pasid_entry *table;
int i, max_pde;
- info = get_domain_info(dev);
+ info = dev_iommu_priv_get(dev);
if (!info || !dev_is_pci(dev) || !info->pasid_table)
return;
@@ -223,7 +223,7 @@ struct pasid_table *intel_pasid_get_table(struct device *dev)
{
struct device_domain_info *info;
- info = get_domain_info(dev);
+ info = dev_iommu_priv_get(dev);
if (!info)
return NULL;
@@ -234,7 +234,7 @@ static int intel_pasid_get_dev_max_id(struct device *dev)
{
struct device_domain_info *info;
- info = get_domain_info(dev);
+ info = dev_iommu_priv_get(dev);
if (!info || !info->pasid_table)
return 0;
@@ -254,7 +254,7 @@ static struct pasid_entry *intel_pasid_get_entry(struct device *dev, u32 pasid)
return NULL;
dir = pasid_table->table;
- info = get_domain_info(dev);
+ info = dev_iommu_priv_get(dev);
dir_index = pasid >> PASID_PDE_SHIFT;
index = pasid & PASID_PTE_MASK;
@@ -487,7 +487,7 @@ devtlb_invalidation_with_pasid(struct intel_iommu *iommu,
struct device_domain_info *info;
u16 sid, qdep, pfsid;
- info = get_domain_info(dev);
+ info = dev_iommu_priv_get(dev);
if (!info || !info->ats_enabled)
return;
@@ -762,164 +762,3 @@ int intel_pasid_setup_pass_through(struct intel_iommu *iommu,
return 0;
}
-
-static int
-intel_pasid_setup_bind_data(struct intel_iommu *iommu, struct pasid_entry *pte,
- struct iommu_gpasid_bind_data_vtd *pasid_data)
-{
- /*
- * Not all guest PASID table entry fields are passed down during bind,
- * here we only set up the ones that are dependent on guest settings.
- * Execution related bits such as NXE, SMEP are not supported.
- * Other fields, such as snoop related, are set based on host needs
- * regardless of guest settings.
- */
- if (pasid_data->flags & IOMMU_SVA_VTD_GPASID_SRE) {
- if (!ecap_srs(iommu->ecap)) {
- pr_err_ratelimited("No supervisor request support on %s\n",
- iommu->name);
- return -EINVAL;
- }
- pasid_set_sre(pte);
- /* Enable write protect WP if guest requested */
- if (pasid_data->flags & IOMMU_SVA_VTD_GPASID_WPE)
- pasid_set_wpe(pte);
- }
-
- if (pasid_data->flags & IOMMU_SVA_VTD_GPASID_EAFE) {
- if (!ecap_eafs(iommu->ecap)) {
- pr_err_ratelimited("No extended access flag support on %s\n",
- iommu->name);
- return -EINVAL;
- }
- pasid_set_eafe(pte);
- }
-
- /*
- * Memory type is only applicable to devices inside processor coherent
- * domain. Will add MTS support once coherent devices are available.
- */
- if (pasid_data->flags & IOMMU_SVA_VTD_GPASID_MTS_MASK) {
- pr_warn_ratelimited("No memory type support %s\n",
- iommu->name);
- return -EINVAL;
- }
-
- return 0;
-}
-
-/**
- * intel_pasid_setup_nested() - Set up PASID entry for nested translation.
- * This could be used for guest shared virtual address. In this case, the
- * first level page tables are used for GVA-GPA translation in the guest,
- * second level page tables are used for GPA-HPA translation.
- *
- * @iommu: IOMMU which the device belong to
- * @dev: Device to be set up for translation
- * @gpgd: FLPTPTR: First Level Page translation pointer in GPA
- * @pasid: PASID to be programmed in the device PASID table
- * @pasid_data: Additional PASID info from the guest bind request
- * @domain: Domain info for setting up second level page tables
- * @addr_width: Address width of the first level (guest)
- */
-int intel_pasid_setup_nested(struct intel_iommu *iommu, struct device *dev,
- pgd_t *gpgd, u32 pasid,
- struct iommu_gpasid_bind_data_vtd *pasid_data,
- struct dmar_domain *domain, int addr_width)
-{
- struct pasid_entry *pte;
- struct dma_pte *pgd;
- int ret = 0;
- u64 pgd_val;
- int agaw;
- u16 did;
-
- if (!ecap_nest(iommu->ecap)) {
- pr_err_ratelimited("IOMMU: %s: No nested translation support\n",
- iommu->name);
- return -EINVAL;
- }
-
- if (!(domain->flags & DOMAIN_FLAG_NESTING_MODE)) {
- pr_err_ratelimited("Domain is not in nesting mode, %x\n",
- domain->flags);
- return -EINVAL;
- }
-
- pte = intel_pasid_get_entry(dev, pasid);
- if (WARN_ON(!pte))
- return -EINVAL;
-
- /*
- * Caller must ensure PASID entry is not in use, i.e. not bind the
- * same PASID to the same device twice.
- */
- if (pasid_pte_is_present(pte))
- return -EBUSY;
-
- pasid_clear_entry(pte);
-
- /* Sanity checking performed by caller to make sure address
- * width matching in two dimensions:
- * 1. CPU vs. IOMMU
- * 2. Guest vs. Host.
- */
- switch (addr_width) {
-#ifdef CONFIG_X86
- case ADDR_WIDTH_5LEVEL:
- if (!cpu_feature_enabled(X86_FEATURE_LA57) ||
- !cap_5lp_support(iommu->cap)) {
- dev_err_ratelimited(dev,
- "5-level paging not supported\n");
- return -EINVAL;
- }
-
- pasid_set_flpm(pte, 1);
- break;
-#endif
- case ADDR_WIDTH_4LEVEL:
- pasid_set_flpm(pte, 0);
- break;
- default:
- dev_err_ratelimited(dev, "Invalid guest address width %d\n",
- addr_width);
- return -EINVAL;
- }
-
- /* First level PGD is in GPA, must be supported by the second level */
- if ((uintptr_t)gpgd > domain->max_addr) {
- dev_err_ratelimited(dev,
- "Guest PGD %lx not supported, max %llx\n",
- (uintptr_t)gpgd, domain->max_addr);
- return -EINVAL;
- }
- pasid_set_flptr(pte, (uintptr_t)gpgd);
-
- ret = intel_pasid_setup_bind_data(iommu, pte, pasid_data);
- if (ret)
- return ret;
-
- /* Setup the second level based on the given domain */
- pgd = domain->pgd;
-
- agaw = iommu_skip_agaw(domain, iommu, &pgd);
- if (agaw < 0) {
- dev_err_ratelimited(dev, "Invalid domain page table\n");
- return -EINVAL;
- }
- pgd_val = virt_to_phys(pgd);
- pasid_set_slptr(pte, pgd_val);
- pasid_set_fault_enable(pte);
-
- did = domain->iommu_did[iommu->seq_id];
- pasid_set_domain_id(pte, did);
-
- pasid_set_address_width(pte, agaw);
- pasid_set_page_snoop(pte, !!ecap_smpwc(iommu->ecap));
-
- pasid_set_translation_type(pte, PASID_ENTRY_PGTT_NESTED);
- pasid_set_present(pte);
- pasid_flush_caches(iommu, pte, pasid, did);
-
- return ret;
-}
diff --git a/drivers/iommu/intel/pasid.h b/drivers/iommu/intel/pasid.h
index d5552e2c160d..ab4408c824a5 100644
--- a/drivers/iommu/intel/pasid.h
+++ b/drivers/iommu/intel/pasid.h
@@ -118,10 +118,6 @@ int intel_pasid_setup_second_level(struct intel_iommu *iommu,
int intel_pasid_setup_pass_through(struct intel_iommu *iommu,
struct dmar_domain *domain,
struct device *dev, u32 pasid);
-int intel_pasid_setup_nested(struct intel_iommu *iommu,
- struct device *dev, pgd_t *pgd, u32 pasid,
- struct iommu_gpasid_bind_data_vtd *pasid_data,
- struct dmar_domain *domain, int addr_width);
void intel_pasid_tear_down_entry(struct intel_iommu *iommu,
struct device *dev, u32 pasid,
bool fault_ignore);
diff --git a/drivers/iommu/intel/svm.c b/drivers/iommu/intel/svm.c
index 5b5d69b04fcc..23a38763c1d1 100644
--- a/drivers/iommu/intel/svm.c
+++ b/drivers/iommu/intel/svm.c
@@ -168,11 +168,6 @@ int intel_svm_finish_prq(struct intel_iommu *iommu)
return 0;
}
-static inline bool intel_svm_capable(struct intel_iommu *iommu)
-{
- return iommu->flags & VTD_FLAG_SVM_CAPABLE;
-}
-
void intel_svm_check(struct intel_iommu *iommu)
{
if (!pasid_supported(iommu))
@@ -200,7 +195,7 @@ static void __flush_svm_range_dev(struct intel_svm *svm,
unsigned long address,
unsigned long pages, int ih)
{
- struct device_domain_info *info = get_domain_info(sdev->dev);
+ struct device_domain_info *info = dev_iommu_priv_get(sdev->dev);
if (WARN_ON(!pages))
return;
@@ -318,193 +313,6 @@ out:
return 0;
}
-int intel_svm_bind_gpasid(struct iommu_domain *domain, struct device *dev,
- struct iommu_gpasid_bind_data *data)
-{
- struct intel_iommu *iommu = device_to_iommu(dev, NULL, NULL);
- struct intel_svm_dev *sdev = NULL;
- struct dmar_domain *dmar_domain;
- struct device_domain_info *info;
- struct intel_svm *svm = NULL;
- unsigned long iflags;
- int ret = 0;
-
- if (WARN_ON(!iommu) || !data)
- return -EINVAL;
-
- if (data->format != IOMMU_PASID_FORMAT_INTEL_VTD)
- return -EINVAL;
-
- /* IOMMU core ensures argsz is more than the start of the union */
- if (data->argsz < offsetofend(struct iommu_gpasid_bind_data, vendor.vtd))
- return -EINVAL;
-
- /* Make sure no undefined flags are used in vendor data */
- if (data->vendor.vtd.flags & ~(IOMMU_SVA_VTD_GPASID_LAST - 1))
- return -EINVAL;
-
- if (!dev_is_pci(dev))
- return -ENOTSUPP;
-
- /* VT-d supports devices with full 20 bit PASIDs only */
- if (pci_max_pasids(to_pci_dev(dev)) != PASID_MAX)
- return -EINVAL;
-
- /*
- * We only check host PASID range, we have no knowledge to check
- * guest PASID range.
- */
- if (data->hpasid <= 0 || data->hpasid >= PASID_MAX)
- return -EINVAL;
-
- info = get_domain_info(dev);
- if (!info)
- return -EINVAL;
-
- dmar_domain = to_dmar_domain(domain);
-
- mutex_lock(&pasid_mutex);
- ret = pasid_to_svm_sdev(dev, data->hpasid, &svm, &sdev);
- if (ret)
- goto out;
-
- if (sdev) {
- /*
- * Do not allow multiple bindings of the same device-PASID since
- * there is only one SL page tables per PASID. We may revisit
- * once sharing PGD across domains are supported.
- */
- dev_warn_ratelimited(dev, "Already bound with PASID %u\n",
- svm->pasid);
- ret = -EBUSY;
- goto out;
- }
-
- if (!svm) {
- /* We come here when PASID has never been bond to a device. */
- svm = kzalloc(sizeof(*svm), GFP_KERNEL);
- if (!svm) {
- ret = -ENOMEM;
- goto out;
- }
- /* REVISIT: upper layer/VFIO can track host process that bind
- * the PASID. ioasid_set = mm might be sufficient for vfio to
- * check pasid VMM ownership. We can drop the following line
- * once VFIO and IOASID set check is in place.
- */
- svm->mm = get_task_mm(current);
- svm->pasid = data->hpasid;
- if (data->flags & IOMMU_SVA_GPASID_VAL) {
- svm->gpasid = data->gpasid;
- svm->flags |= SVM_FLAG_GUEST_PASID;
- }
- pasid_private_add(data->hpasid, svm);
- INIT_LIST_HEAD_RCU(&svm->devs);
- mmput(svm->mm);
- }
- sdev = kzalloc(sizeof(*sdev), GFP_KERNEL);
- if (!sdev) {
- ret = -ENOMEM;
- goto out;
- }
- sdev->dev = dev;
- sdev->sid = PCI_DEVID(info->bus, info->devfn);
- sdev->iommu = iommu;
-
- /* Only count users if device has aux domains */
- if (iommu_dev_feature_enabled(dev, IOMMU_DEV_FEAT_AUX))
- sdev->users = 1;
-
- /* Set up device context entry for PASID if not enabled already */
- ret = intel_iommu_enable_pasid(iommu, sdev->dev);
- if (ret) {
- dev_err_ratelimited(dev, "Failed to enable PASID capability\n");
- kfree(sdev);
- goto out;
- }
-
- /*
- * PASID table is per device for better security. Therefore, for
- * each bind of a new device even with an existing PASID, we need to
- * call the nested mode setup function here.
- */
- spin_lock_irqsave(&iommu->lock, iflags);
- ret = intel_pasid_setup_nested(iommu, dev,
- (pgd_t *)(uintptr_t)data->gpgd,
- data->hpasid, &data->vendor.vtd, dmar_domain,
- data->addr_width);
- spin_unlock_irqrestore(&iommu->lock, iflags);
- if (ret) {
- dev_err_ratelimited(dev, "Failed to set up PASID %llu in nested mode, Err %d\n",
- data->hpasid, ret);
- /*
- * PASID entry should be in cleared state if nested mode
- * set up failed. So we only need to clear IOASID tracking
- * data such that free call will succeed.
- */
- kfree(sdev);
- goto out;
- }
-
- svm->flags |= SVM_FLAG_GUEST_MODE;
-
- init_rcu_head(&sdev->rcu);
- list_add_rcu(&sdev->list, &svm->devs);
- out:
- if (!IS_ERR_OR_NULL(svm) && list_empty(&svm->devs)) {
- pasid_private_remove(data->hpasid);
- kfree(svm);
- }
-
- mutex_unlock(&pasid_mutex);
- return ret;
-}
-
-int intel_svm_unbind_gpasid(struct device *dev, u32 pasid)
-{
- struct intel_iommu *iommu = device_to_iommu(dev, NULL, NULL);
- struct intel_svm_dev *sdev;
- struct intel_svm *svm;
- int ret;
-
- if (WARN_ON(!iommu))
- return -EINVAL;
-
- mutex_lock(&pasid_mutex);
- ret = pasid_to_svm_sdev(dev, pasid, &svm, &sdev);
- if (ret)
- goto out;
-
- if (sdev) {
- if (iommu_dev_feature_enabled(dev, IOMMU_DEV_FEAT_AUX))
- sdev->users--;
- if (!sdev->users) {
- list_del_rcu(&sdev->list);
- intel_pasid_tear_down_entry(iommu, dev,
- svm->pasid, false);
- intel_svm_drain_prq(dev, svm->pasid);
- kfree_rcu(sdev, rcu);
-
- if (list_empty(&svm->devs)) {
- /*
- * We do not free the IOASID here in that
- * IOMMU driver did not allocate it.
- * Unlike native SVM, IOASID for guest use was
- * allocated prior to the bind call.
- * In any case, if the free call comes before
- * the unbind, IOMMU driver will get notified
- * and perform cleanup.
- */
- pasid_private_remove(pasid);
- kfree(svm);
- }
- }
- }
-out:
- mutex_unlock(&pasid_mutex);
- return ret;
-}
-
static int intel_svm_alloc_pasid(struct device *dev, struct mm_struct *mm,
unsigned int flags)
{
@@ -514,17 +322,12 @@ static int intel_svm_alloc_pasid(struct device *dev, struct mm_struct *mm,
return iommu_sva_alloc_pasid(mm, PASID_MIN, max_pasid - 1);
}
-static void intel_svm_free_pasid(struct mm_struct *mm)
-{
- iommu_sva_free_pasid(mm);
-}
-
static struct iommu_sva *intel_svm_bind_mm(struct intel_iommu *iommu,
struct device *dev,
struct mm_struct *mm,
unsigned int flags)
{
- struct device_domain_info *info = get_domain_info(dev);
+ struct device_domain_info *info = dev_iommu_priv_get(dev);
unsigned long iflags, sflags;
struct intel_svm_dev *sdev;
struct intel_svm *svm;
@@ -662,8 +465,6 @@ static int intel_svm_unbind_mm(struct device *dev, u32 pasid)
kfree(svm);
}
}
- /* Drop a PASID reference and free it if no reference. */
- intel_svm_free_pasid(mm);
}
out:
return ret;
@@ -732,7 +533,7 @@ static void intel_svm_drain_prq(struct device *dev, u32 pasid)
u16 sid, did;
int qdep;
- info = get_domain_info(dev);
+ info = dev_iommu_priv_get(dev);
if (WARN_ON(!info || !dev_is_pci(dev)))
return;
@@ -1047,8 +848,6 @@ struct iommu_sva *intel_svm_bind(struct device *dev, struct mm_struct *mm, void
}
sva = intel_svm_bind_mm(iommu, dev, mm, flags);
- if (IS_ERR_OR_NULL(sva))
- intel_svm_free_pasid(mm);
mutex_unlock(&pasid_mutex);
return sva;
@@ -1126,28 +925,6 @@ int intel_svm_page_response(struct device *dev,
}
/*
- * For responses from userspace, need to make sure that the
- * pasid has been bound to its mm.
- */
- if (svm->flags & SVM_FLAG_GUEST_MODE) {
- struct mm_struct *mm;
-
- mm = get_task_mm(current);
- if (!mm) {
- ret = -EINVAL;
- goto out;
- }
-
- if (mm != svm->mm) {
- ret = -ENODEV;
- mmput(mm);
- goto out;
- }
-
- mmput(mm);
- }
-
- /*
* Per VT-d spec. v3.0 ch7.7, system software must respond
* with page group response if private data is present (PDP)
* or last page in group (LPIG) bit is set. This is an
diff --git a/drivers/iommu/ioasid.c b/drivers/iommu/ioasid.c
index 06fee7416816..a786c034907c 100644
--- a/drivers/iommu/ioasid.c
+++ b/drivers/iommu/ioasid.c
@@ -2,7 +2,7 @@
/*
* I/O Address Space ID allocator. There is one global IOASID space, split into
* subsets. Users create a subset with DECLARE_IOASID_SET, then allocate and
- * free IOASIDs with ioasid_alloc and ioasid_put.
+ * free IOASIDs with ioasid_alloc() and ioasid_free().
*/
#include <linux/ioasid.h>
#include <linux/module.h>
@@ -15,7 +15,6 @@ struct ioasid_data {
struct ioasid_set *set;
void *private;
struct rcu_head rcu;
- refcount_t refs;
};
/*
@@ -315,7 +314,6 @@ ioasid_t ioasid_alloc(struct ioasid_set *set, ioasid_t min, ioasid_t max,
data->set = set;
data->private = private;
- refcount_set(&data->refs, 1);
/*
* Custom allocator needs allocator data to perform platform specific
@@ -348,35 +346,11 @@ exit_free:
EXPORT_SYMBOL_GPL(ioasid_alloc);
/**
- * ioasid_get - obtain a reference to the IOASID
- * @ioasid: the ID to get
- */
-void ioasid_get(ioasid_t ioasid)
-{
- struct ioasid_data *ioasid_data;
-
- spin_lock(&ioasid_allocator_lock);
- ioasid_data = xa_load(&active_allocator->xa, ioasid);
- if (ioasid_data)
- refcount_inc(&ioasid_data->refs);
- else
- WARN_ON(1);
- spin_unlock(&ioasid_allocator_lock);
-}
-EXPORT_SYMBOL_GPL(ioasid_get);
-
-/**
- * ioasid_put - Release a reference to an ioasid
+ * ioasid_free - Free an ioasid
* @ioasid: the ID to remove
- *
- * Put a reference to the IOASID, free it when the number of references drops to
- * zero.
- *
- * Return: %true if the IOASID was freed, %false otherwise.
*/
-bool ioasid_put(ioasid_t ioasid)
+void ioasid_free(ioasid_t ioasid)
{
- bool free = false;
struct ioasid_data *ioasid_data;
spin_lock(&ioasid_allocator_lock);
@@ -386,10 +360,6 @@ bool ioasid_put(ioasid_t ioasid)
goto exit_unlock;
}
- free = refcount_dec_and_test(&ioasid_data->refs);
- if (!free)
- goto exit_unlock;
-
active_allocator->ops->free(ioasid, active_allocator->ops->pdata);
/* Custom allocator needs additional steps to free the xa element */
if (active_allocator->flags & IOASID_ALLOCATOR_CUSTOM) {
@@ -399,9 +369,8 @@ bool ioasid_put(ioasid_t ioasid)
exit_unlock:
spin_unlock(&ioasid_allocator_lock);
- return free;
}
-EXPORT_SYMBOL_GPL(ioasid_put);
+EXPORT_SYMBOL_GPL(ioasid_free);
/**
* ioasid_find - Find IOASID data
diff --git a/drivers/iommu/iommu-sva-lib.c b/drivers/iommu/iommu-sva-lib.c
index bd41405d34e9..106506143896 100644
--- a/drivers/iommu/iommu-sva-lib.c
+++ b/drivers/iommu/iommu-sva-lib.c
@@ -18,8 +18,7 @@ static DECLARE_IOASID_SET(iommu_sva_pasid);
*
* Try to allocate a PASID for this mm, or take a reference to the existing one
* provided it fits within the [@min, @max] range. On success the PASID is
- * available in mm->pasid, and must be released with iommu_sva_free_pasid().
- * @min must be greater than 0, because 0 indicates an unused mm->pasid.
+ * available in mm->pasid and will be available for the lifetime of the mm.
*
* Returns 0 on success and < 0 on error.
*/
@@ -33,38 +32,24 @@ int iommu_sva_alloc_pasid(struct mm_struct *mm, ioasid_t min, ioasid_t max)
return -EINVAL;
mutex_lock(&iommu_sva_lock);
- if (mm->pasid) {
- if (mm->pasid >= min && mm->pasid <= max)
- ioasid_get(mm->pasid);
- else
+ /* Is a PASID already associated with this mm? */
+ if (pasid_valid(mm->pasid)) {
+ if (mm->pasid < min || mm->pasid >= max)
ret = -EOVERFLOW;
- } else {
- pasid = ioasid_alloc(&iommu_sva_pasid, min, max, mm);
- if (pasid == INVALID_IOASID)
- ret = -ENOMEM;
- else
- mm->pasid = pasid;
+ goto out;
}
+
+ pasid = ioasid_alloc(&iommu_sva_pasid, min, max, mm);
+ if (!pasid_valid(pasid))
+ ret = -ENOMEM;
+ else
+ mm_pasid_set(mm, pasid);
+out:
mutex_unlock(&iommu_sva_lock);
return ret;
}
EXPORT_SYMBOL_GPL(iommu_sva_alloc_pasid);
-/**
- * iommu_sva_free_pasid - Release the mm's PASID
- * @mm: the mm
- *
- * Drop one reference to a PASID allocated with iommu_sva_alloc_pasid()
- */
-void iommu_sva_free_pasid(struct mm_struct *mm)
-{
- mutex_lock(&iommu_sva_lock);
- if (ioasid_put(mm->pasid))
- mm->pasid = 0;
- mutex_unlock(&iommu_sva_lock);
-}
-EXPORT_SYMBOL_GPL(iommu_sva_free_pasid);
-
/* ioasid_find getter() requires a void * argument */
static bool __mmget_not_zero(void *mm)
{
diff --git a/drivers/iommu/iommu-sva-lib.h b/drivers/iommu/iommu-sva-lib.h
index 031155010ca8..8909ea1094e3 100644
--- a/drivers/iommu/iommu-sva-lib.h
+++ b/drivers/iommu/iommu-sva-lib.h
@@ -9,7 +9,6 @@
#include <linux/mm_types.h>
int iommu_sva_alloc_pasid(struct mm_struct *mm, ioasid_t min, ioasid_t max);
-void iommu_sva_free_pasid(struct mm_struct *mm);
struct mm_struct *iommu_sva_find(ioasid_t pasid);
/* I/O Page fault */
@@ -17,7 +16,7 @@ struct device;
struct iommu_fault;
struct iopf_queue;
-#ifdef CONFIG_IOMMU_SVA_LIB
+#ifdef CONFIG_IOMMU_SVA
int iommu_queue_iopf(struct iommu_fault *fault, void *cookie);
int iopf_queue_add_device(struct iopf_queue *queue, struct device *dev);
@@ -28,7 +27,7 @@ struct iopf_queue *iopf_queue_alloc(const char *name);
void iopf_queue_free(struct iopf_queue *queue);
int iopf_queue_discard_partial(struct iopf_queue *queue);
-#else /* CONFIG_IOMMU_SVA_LIB */
+#else /* CONFIG_IOMMU_SVA */
static inline int iommu_queue_iopf(struct iommu_fault *fault, void *cookie)
{
return -ENODEV;
@@ -64,5 +63,5 @@ static inline int iopf_queue_discard_partial(struct iopf_queue *queue)
{
return -ENODEV;
}
-#endif /* CONFIG_IOMMU_SVA_LIB */
+#endif /* CONFIG_IOMMU_SVA */
#endif /* _IOMMU_SVA_LIB_H */
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
index 107dcf5938d6..f2c45b85b9fc 100644
--- a/drivers/iommu/iommu.c
+++ b/drivers/iommu/iommu.c
@@ -323,13 +323,14 @@ err_out:
void iommu_release_device(struct device *dev)
{
- const struct iommu_ops *ops = dev->bus->iommu_ops;
+ const struct iommu_ops *ops;
if (!dev->iommu)
return;
iommu_device_unlink(dev->iommu->iommu_dev, dev);
+ ops = dev_iommu_ops(dev);
ops->release_device(dev);
iommu_group_remove_device(dev);
@@ -790,9 +791,6 @@ static int iommu_create_device_direct_mappings(struct iommu_group *group,
dma_addr_t start, end, addr;
size_t map_size = 0;
- if (domain->ops->apply_resv_region)
- domain->ops->apply_resv_region(dev, domain, entry);
-
start = ALIGN(entry->start, pg_size);
end = ALIGN(entry->start + entry->length, pg_size);
@@ -833,11 +831,12 @@ out:
return ret;
}
-static bool iommu_is_attach_deferred(struct iommu_domain *domain,
- struct device *dev)
+static bool iommu_is_attach_deferred(struct device *dev)
{
- if (domain->ops->is_attach_deferred)
- return domain->ops->is_attach_deferred(domain, dev);
+ const struct iommu_ops *ops = dev_iommu_ops(dev);
+
+ if (ops->is_attach_deferred)
+ return ops->is_attach_deferred(dev);
return false;
}
@@ -894,7 +893,7 @@ rename:
mutex_lock(&group->mutex);
list_add_tail(&device->list, &group->devices);
- if (group->domain && !iommu_is_attach_deferred(group->domain, dev))
+ if (group->domain && !iommu_is_attach_deferred(dev))
ret = __iommu_attach_device(group->domain, dev);
mutex_unlock(&group->mutex);
if (ret)
@@ -1255,10 +1254,10 @@ int iommu_page_response(struct device *dev,
struct iommu_fault_event *evt;
struct iommu_fault_page_request *prm;
struct dev_iommu *param = dev->iommu;
+ const struct iommu_ops *ops = dev_iommu_ops(dev);
bool has_pasid = msg->flags & IOMMU_PAGE_RESP_PASID_VALID;
- struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
- if (!domain || !domain->ops->page_response)
+ if (!ops->page_response)
return -ENODEV;
if (!param || !param->fault_param)
@@ -1299,7 +1298,7 @@ int iommu_page_response(struct device *dev,
msg->pasid = 0;
}
- ret = domain->ops->page_response(dev, evt, msg);
+ ret = ops->page_response(dev, evt, msg);
list_del(&evt->list);
kfree(evt);
break;
@@ -1524,7 +1523,7 @@ EXPORT_SYMBOL_GPL(fsl_mc_device_group);
static int iommu_get_def_domain_type(struct device *dev)
{
- const struct iommu_ops *ops = dev->bus->iommu_ops;
+ const struct iommu_ops *ops = dev_iommu_ops(dev);
if (dev_is_pci(dev) && to_pci_dev(dev)->untrusted)
return IOMMU_DOMAIN_DMA;
@@ -1583,7 +1582,7 @@ static int iommu_alloc_default_domain(struct iommu_group *group,
*/
static struct iommu_group *iommu_group_get_for_dev(struct device *dev)
{
- const struct iommu_ops *ops = dev->bus->iommu_ops;
+ const struct iommu_ops *ops = dev_iommu_ops(dev);
struct iommu_group *group;
int ret;
@@ -1591,9 +1590,6 @@ static struct iommu_group *iommu_group_get_for_dev(struct device *dev)
if (group)
return group;
- if (!ops)
- return ERR_PTR(-EINVAL);
-
group = ops->device_group(dev);
if (WARN_ON_ONCE(group == NULL))
return ERR_PTR(-EINVAL);
@@ -1748,7 +1744,7 @@ static int iommu_group_do_dma_attach(struct device *dev, void *data)
struct iommu_domain *domain = data;
int ret = 0;
- if (!iommu_is_attach_deferred(domain, dev))
+ if (!iommu_is_attach_deferred(dev))
ret = __iommu_attach_device(domain, dev);
return ret;
@@ -1762,10 +1758,10 @@ static int __iommu_group_dma_attach(struct iommu_group *group)
static int iommu_group_do_probe_finalize(struct device *dev, void *data)
{
- struct iommu_domain *domain = data;
+ const struct iommu_ops *ops = dev_iommu_ops(dev);
- if (domain->ops->probe_finalize)
- domain->ops->probe_finalize(dev);
+ if (ops->probe_finalize)
+ ops->probe_finalize(dev);
return 0;
}
@@ -1954,10 +1950,11 @@ static struct iommu_domain *__iommu_domain_alloc(struct bus_type *bus,
if (!domain)
return NULL;
- domain->ops = bus->iommu_ops;
domain->type = type;
/* Assume all sizes by default; the driver may override this later */
- domain->pgsize_bitmap = bus->iommu_ops->pgsize_bitmap;
+ domain->pgsize_bitmap = bus->iommu_ops->pgsize_bitmap;
+ if (!domain->ops)
+ domain->ops = bus->iommu_ops->default_domain_ops;
if (iommu_is_dma_domain(domain) && iommu_get_dma_cookie(domain)) {
iommu_domain_free(domain);
@@ -1975,7 +1972,7 @@ EXPORT_SYMBOL_GPL(iommu_domain_alloc);
void iommu_domain_free(struct iommu_domain *domain)
{
iommu_put_dma_cookie(domain);
- domain->ops->domain_free(domain);
+ domain->ops->free(domain);
}
EXPORT_SYMBOL_GPL(iommu_domain_free);
@@ -2023,228 +2020,16 @@ EXPORT_SYMBOL_GPL(iommu_attach_device);
int iommu_deferred_attach(struct device *dev, struct iommu_domain *domain)
{
- const struct iommu_ops *ops = domain->ops;
-
- if (ops->is_attach_deferred && ops->is_attach_deferred(domain, dev))
+ if (iommu_is_attach_deferred(dev))
return __iommu_attach_device(domain, dev);
return 0;
}
-/*
- * Check flags and other user provided data for valid combinations. We also
- * make sure no reserved fields or unused flags are set. This is to ensure
- * not breaking userspace in the future when these fields or flags are used.
- */
-static int iommu_check_cache_invl_data(struct iommu_cache_invalidate_info *info)
-{
- u32 mask;
- int i;
-
- if (info->version != IOMMU_CACHE_INVALIDATE_INFO_VERSION_1)
- return -EINVAL;
-
- mask = (1 << IOMMU_CACHE_INV_TYPE_NR) - 1;
- if (info->cache & ~mask)
- return -EINVAL;
-
- if (info->granularity >= IOMMU_INV_GRANU_NR)
- return -EINVAL;
-
- switch (info->granularity) {
- case IOMMU_INV_GRANU_ADDR:
- if (info->cache & IOMMU_CACHE_INV_TYPE_PASID)
- return -EINVAL;
-
- mask = IOMMU_INV_ADDR_FLAGS_PASID |
- IOMMU_INV_ADDR_FLAGS_ARCHID |
- IOMMU_INV_ADDR_FLAGS_LEAF;
-
- if (info->granu.addr_info.flags & ~mask)
- return -EINVAL;
- break;
- case IOMMU_INV_GRANU_PASID:
- mask = IOMMU_INV_PASID_FLAGS_PASID |
- IOMMU_INV_PASID_FLAGS_ARCHID;
- if (info->granu.pasid_info.flags & ~mask)
- return -EINVAL;
-
- break;
- case IOMMU_INV_GRANU_DOMAIN:
- if (info->cache & IOMMU_CACHE_INV_TYPE_DEV_IOTLB)
- return -EINVAL;
- break;
- default:
- return -EINVAL;
- }
-
- /* Check reserved padding fields */
- for (i = 0; i < sizeof(info->padding); i++) {
- if (info->padding[i])
- return -EINVAL;
- }
-
- return 0;
-}
-
-int iommu_uapi_cache_invalidate(struct iommu_domain *domain, struct device *dev,
- void __user *uinfo)
-{
- struct iommu_cache_invalidate_info inv_info = { 0 };
- u32 minsz;
- int ret;
-
- if (unlikely(!domain->ops->cache_invalidate))
- return -ENODEV;
-
- /*
- * No new spaces can be added before the variable sized union, the
- * minimum size is the offset to the union.
- */
- minsz = offsetof(struct iommu_cache_invalidate_info, granu);
-
- /* Copy minsz from user to get flags and argsz */
- if (copy_from_user(&inv_info, uinfo, minsz))
- return -EFAULT;
-
- /* Fields before the variable size union are mandatory */
- if (inv_info.argsz < minsz)
- return -EINVAL;
-
- /* PASID and address granu require additional info beyond minsz */
- if (inv_info.granularity == IOMMU_INV_GRANU_PASID &&
- inv_info.argsz < offsetofend(struct iommu_cache_invalidate_info, granu.pasid_info))
- return -EINVAL;
-
- if (inv_info.granularity == IOMMU_INV_GRANU_ADDR &&
- inv_info.argsz < offsetofend(struct iommu_cache_invalidate_info, granu.addr_info))
- return -EINVAL;
-
- /*
- * User might be using a newer UAPI header which has a larger data
- * size, we shall support the existing flags within the current
- * size. Copy the remaining user data _after_ minsz but not more
- * than the current kernel supported size.
- */
- if (copy_from_user((void *)&inv_info + minsz, uinfo + minsz,
- min_t(u32, inv_info.argsz, sizeof(inv_info)) - minsz))
- return -EFAULT;
-
- /* Now the argsz is validated, check the content */
- ret = iommu_check_cache_invl_data(&inv_info);
- if (ret)
- return ret;
-
- return domain->ops->cache_invalidate(domain, dev, &inv_info);
-}
-EXPORT_SYMBOL_GPL(iommu_uapi_cache_invalidate);
-
-static int iommu_check_bind_data(struct iommu_gpasid_bind_data *data)
-{
- u64 mask;
- int i;
-
- if (data->version != IOMMU_GPASID_BIND_VERSION_1)
- return -EINVAL;
-
- /* Check the range of supported formats */
- if (data->format >= IOMMU_PASID_FORMAT_LAST)
- return -EINVAL;
-
- /* Check all flags */
- mask = IOMMU_SVA_GPASID_VAL;
- if (data->flags & ~mask)
- return -EINVAL;
-
- /* Check reserved padding fields */
- for (i = 0; i < sizeof(data->padding); i++) {
- if (data->padding[i])
- return -EINVAL;
- }
-
- return 0;
-}
-
-static int iommu_sva_prepare_bind_data(void __user *udata,
- struct iommu_gpasid_bind_data *data)
-{
- u32 minsz;
-
- /*
- * No new spaces can be added before the variable sized union, the
- * minimum size is the offset to the union.
- */
- minsz = offsetof(struct iommu_gpasid_bind_data, vendor);
-
- /* Copy minsz from user to get flags and argsz */
- if (copy_from_user(data, udata, minsz))
- return -EFAULT;
-
- /* Fields before the variable size union are mandatory */
- if (data->argsz < minsz)
- return -EINVAL;
- /*
- * User might be using a newer UAPI header, we shall let IOMMU vendor
- * driver decide on what size it needs. Since the guest PASID bind data
- * can be vendor specific, larger argsz could be the result of extension
- * for one vendor but it should not affect another vendor.
- * Copy the remaining user data _after_ minsz
- */
- if (copy_from_user((void *)data + minsz, udata + minsz,
- min_t(u32, data->argsz, sizeof(*data)) - minsz))
- return -EFAULT;
-
- return iommu_check_bind_data(data);
-}
-
-int iommu_uapi_sva_bind_gpasid(struct iommu_domain *domain, struct device *dev,
- void __user *udata)
-{
- struct iommu_gpasid_bind_data data = { 0 };
- int ret;
-
- if (unlikely(!domain->ops->sva_bind_gpasid))
- return -ENODEV;
-
- ret = iommu_sva_prepare_bind_data(udata, &data);
- if (ret)
- return ret;
-
- return domain->ops->sva_bind_gpasid(domain, dev, &data);
-}
-EXPORT_SYMBOL_GPL(iommu_uapi_sva_bind_gpasid);
-
-int iommu_sva_unbind_gpasid(struct iommu_domain *domain, struct device *dev,
- ioasid_t pasid)
-{
- if (unlikely(!domain->ops->sva_unbind_gpasid))
- return -ENODEV;
-
- return domain->ops->sva_unbind_gpasid(dev, pasid);
-}
-EXPORT_SYMBOL_GPL(iommu_sva_unbind_gpasid);
-
-int iommu_uapi_sva_unbind_gpasid(struct iommu_domain *domain, struct device *dev,
- void __user *udata)
-{
- struct iommu_gpasid_bind_data data = { 0 };
- int ret;
-
- if (unlikely(!domain->ops->sva_bind_gpasid))
- return -ENODEV;
-
- ret = iommu_sva_prepare_bind_data(udata, &data);
- if (ret)
- return ret;
-
- return iommu_sva_unbind_gpasid(domain, dev, data.hpasid);
-}
-EXPORT_SYMBOL_GPL(iommu_uapi_sva_unbind_gpasid);
-
static void __iommu_detach_device(struct iommu_domain *domain,
struct device *dev)
{
- if (iommu_is_attach_deferred(domain, dev))
+ if (iommu_is_attach_deferred(dev))
return;
if (unlikely(domain->ops->detach_dev == NULL))
@@ -2458,7 +2243,7 @@ static int __iommu_map_pages(struct iommu_domain *domain, unsigned long iova,
phys_addr_t paddr, size_t size, int prot,
gfp_t gfp, size_t *mapped)
{
- const struct iommu_ops *ops = domain->ops;
+ const struct iommu_domain_ops *ops = domain->ops;
size_t pgsize, count;
int ret;
@@ -2481,7 +2266,7 @@ static int __iommu_map_pages(struct iommu_domain *domain, unsigned long iova,
static int __iommu_map(struct iommu_domain *domain, unsigned long iova,
phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
{
- const struct iommu_ops *ops = domain->ops;
+ const struct iommu_domain_ops *ops = domain->ops;
unsigned long orig_iova = iova;
unsigned int min_pagesz;
size_t orig_size = size;
@@ -2541,7 +2326,7 @@ static int __iommu_map(struct iommu_domain *domain, unsigned long iova,
static int _iommu_map(struct iommu_domain *domain, unsigned long iova,
phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
{
- const struct iommu_ops *ops = domain->ops;
+ const struct iommu_domain_ops *ops = domain->ops;
int ret;
ret = __iommu_map(domain, iova, paddr, size, prot, gfp);
@@ -2570,7 +2355,7 @@ static size_t __iommu_unmap_pages(struct iommu_domain *domain,
unsigned long iova, size_t size,
struct iommu_iotlb_gather *iotlb_gather)
{
- const struct iommu_ops *ops = domain->ops;
+ const struct iommu_domain_ops *ops = domain->ops;
size_t pgsize, count;
pgsize = iommu_pgsize(domain, iova, iova, size, &count);
@@ -2583,7 +2368,7 @@ static size_t __iommu_unmap(struct iommu_domain *domain,
unsigned long iova, size_t size,
struct iommu_iotlb_gather *iotlb_gather)
{
- const struct iommu_ops *ops = domain->ops;
+ const struct iommu_domain_ops *ops = domain->ops;
size_t unmapped_page, unmapped = 0;
unsigned long orig_iova = iova;
unsigned int min_pagesz;
@@ -2659,7 +2444,7 @@ static ssize_t __iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
struct scatterlist *sg, unsigned int nents, int prot,
gfp_t gfp)
{
- const struct iommu_ops *ops = domain->ops;
+ const struct iommu_domain_ops *ops = domain->ops;
size_t len = 0, mapped = 0;
phys_addr_t start;
unsigned int i = 0;
@@ -2792,17 +2577,17 @@ EXPORT_SYMBOL_GPL(iommu_set_pgtable_quirks);
void iommu_get_resv_regions(struct device *dev, struct list_head *list)
{
- const struct iommu_ops *ops = dev->bus->iommu_ops;
+ const struct iommu_ops *ops = dev_iommu_ops(dev);
- if (ops && ops->get_resv_regions)
+ if (ops->get_resv_regions)
ops->get_resv_regions(dev, list);
}
void iommu_put_resv_regions(struct device *dev, struct list_head *list)
{
- const struct iommu_ops *ops = dev->bus->iommu_ops;
+ const struct iommu_ops *ops = dev_iommu_ops(dev);
- if (ops && ops->put_resv_regions)
+ if (ops->put_resv_regions)
ops->put_resv_regions(dev, list);
}
@@ -2959,8 +2744,6 @@ EXPORT_SYMBOL_GPL(iommu_dev_enable_feature);
/*
* The device drivers should do the necessary cleanups before calling this.
- * For example, before disabling the aux-domain feature, the device driver
- * should detach all aux-domains. Otherwise, this will return -EBUSY.
*/
int iommu_dev_disable_feature(struct device *dev, enum iommu_dev_features feat)
{
@@ -2988,50 +2771,6 @@ bool iommu_dev_feature_enabled(struct device *dev, enum iommu_dev_features feat)
}
EXPORT_SYMBOL_GPL(iommu_dev_feature_enabled);
-/*
- * Aux-domain specific attach/detach.
- *
- * Only works if iommu_dev_feature_enabled(dev, IOMMU_DEV_FEAT_AUX) returns
- * true. Also, as long as domains are attached to a device through this
- * interface, any tries to call iommu_attach_device() should fail
- * (iommu_detach_device() can't fail, so we fail when trying to re-attach).
- * This should make us safe against a device being attached to a guest as a
- * whole while there are still pasid users on it (aux and sva).
- */
-int iommu_aux_attach_device(struct iommu_domain *domain, struct device *dev)
-{
- int ret = -ENODEV;
-
- if (domain->ops->aux_attach_dev)
- ret = domain->ops->aux_attach_dev(domain, dev);
-
- if (!ret)
- trace_attach_device_to_domain(dev);
-
- return ret;
-}
-EXPORT_SYMBOL_GPL(iommu_aux_attach_device);
-
-void iommu_aux_detach_device(struct iommu_domain *domain, struct device *dev)
-{
- if (domain->ops->aux_detach_dev) {
- domain->ops->aux_detach_dev(domain, dev);
- trace_detach_device_from_domain(dev);
- }
-}
-EXPORT_SYMBOL_GPL(iommu_aux_detach_device);
-
-int iommu_aux_get_pasid(struct iommu_domain *domain, struct device *dev)
-{
- int ret = -ENODEV;
-
- if (domain->ops->aux_get_pasid)
- ret = domain->ops->aux_get_pasid(domain, dev);
-
- return ret;
-}
-EXPORT_SYMBOL_GPL(iommu_aux_get_pasid);
-
/**
* iommu_sva_bind_device() - Bind a process address space to a device
* @dev: the device
@@ -3053,9 +2792,9 @@ iommu_sva_bind_device(struct device *dev, struct mm_struct *mm, void *drvdata)
{
struct iommu_group *group;
struct iommu_sva *handle = ERR_PTR(-EINVAL);
- const struct iommu_ops *ops = dev->bus->iommu_ops;
+ const struct iommu_ops *ops = dev_iommu_ops(dev);
- if (!ops || !ops->sva_bind)
+ if (!ops->sva_bind)
return ERR_PTR(-ENODEV);
group = iommu_group_get(dev);
@@ -3096,9 +2835,9 @@ void iommu_sva_unbind_device(struct iommu_sva *handle)
{
struct iommu_group *group;
struct device *dev = handle->dev;
- const struct iommu_ops *ops = dev->bus->iommu_ops;
+ const struct iommu_ops *ops = dev_iommu_ops(dev);
- if (!ops || !ops->sva_unbind)
+ if (!ops->sva_unbind)
return;
group = iommu_group_get(dev);
@@ -3115,9 +2854,9 @@ EXPORT_SYMBOL_GPL(iommu_sva_unbind_device);
u32 iommu_sva_get_pasid(struct iommu_sva *handle)
{
- const struct iommu_ops *ops = handle->dev->bus->iommu_ops;
+ const struct iommu_ops *ops = dev_iommu_ops(handle->dev);
- if (!ops || !ops->sva_get_pasid)
+ if (!ops->sva_get_pasid)
return IOMMU_PASID_INVALID;
return ops->sva_get_pasid(handle);
diff --git a/drivers/iommu/iova.c b/drivers/iommu/iova.c
index b28c9435b898..db77aa675145 100644
--- a/drivers/iommu/iova.c
+++ b/drivers/iommu/iova.c
@@ -15,13 +15,14 @@
/* The anchor node sits above the top of the usable address space */
#define IOVA_ANCHOR ~0UL
+#define IOVA_RANGE_CACHE_MAX_SIZE 6 /* log of max cached IOVA range size (in pages) */
+
static bool iova_rcache_insert(struct iova_domain *iovad,
unsigned long pfn,
unsigned long size);
static unsigned long iova_rcache_get(struct iova_domain *iovad,
unsigned long size,
unsigned long limit_pfn);
-static void init_iova_rcaches(struct iova_domain *iovad);
static void free_cpu_cached_iovas(unsigned int cpu, struct iova_domain *iovad);
static void free_iova_rcaches(struct iova_domain *iovad);
@@ -64,8 +65,6 @@ init_iova_domain(struct iova_domain *iovad, unsigned long granule,
iovad->anchor.pfn_lo = iovad->anchor.pfn_hi = IOVA_ANCHOR;
rb_link_node(&iovad->anchor.node, NULL, &iovad->rbroot.rb_node);
rb_insert_color(&iovad->anchor.node, &iovad->rbroot);
- cpuhp_state_add_instance_nocalls(CPUHP_IOMMU_IOVA_DEAD, &iovad->cpuhp_dead);
- init_iova_rcaches(iovad);
}
EXPORT_SYMBOL_GPL(init_iova_domain);
@@ -95,10 +94,11 @@ __cached_rbnode_delete_update(struct iova_domain *iovad, struct iova *free)
cached_iova = to_iova(iovad->cached32_node);
if (free == cached_iova ||
(free->pfn_hi < iovad->dma_32bit_pfn &&
- free->pfn_lo >= cached_iova->pfn_lo)) {
+ free->pfn_lo >= cached_iova->pfn_lo))
iovad->cached32_node = rb_next(&free->node);
+
+ if (free->pfn_lo < iovad->dma_32bit_pfn)
iovad->max32_alloc_size = iovad->dma_32bit_pfn;
- }
cached_iova = to_iova(iovad->cached_node);
if (free->pfn_lo >= cached_iova->pfn_lo)
@@ -488,6 +488,13 @@ free_iova_fast(struct iova_domain *iovad, unsigned long pfn, unsigned long size)
}
EXPORT_SYMBOL_GPL(free_iova_fast);
+static void iova_domain_free_rcaches(struct iova_domain *iovad)
+{
+ cpuhp_state_remove_instance_nocalls(CPUHP_IOMMU_IOVA_DEAD,
+ &iovad->cpuhp_dead);
+ free_iova_rcaches(iovad);
+}
+
/**
* put_iova_domain - destroys the iova domain
* @iovad: - iova domain in question.
@@ -497,9 +504,9 @@ void put_iova_domain(struct iova_domain *iovad)
{
struct iova *iova, *tmp;
- cpuhp_state_remove_instance_nocalls(CPUHP_IOMMU_IOVA_DEAD,
- &iovad->cpuhp_dead);
- free_iova_rcaches(iovad);
+ if (iovad->rcaches)
+ iova_domain_free_rcaches(iovad);
+
rbtree_postorder_for_each_entry_safe(iova, tmp, &iovad->rbroot, node)
free_iova_mem(iova);
}
@@ -608,6 +615,7 @@ EXPORT_SYMBOL_GPL(reserve_iova);
*/
#define IOVA_MAG_SIZE 128
+#define MAX_GLOBAL_MAGS 32 /* magazines per bin */
struct iova_magazine {
unsigned long size;
@@ -620,6 +628,13 @@ struct iova_cpu_rcache {
struct iova_magazine *prev;
};
+struct iova_rcache {
+ spinlock_t lock;
+ unsigned long depot_size;
+ struct iova_magazine *depot[MAX_GLOBAL_MAGS];
+ struct iova_cpu_rcache __percpu *cpu_rcaches;
+};
+
static struct iova_magazine *iova_magazine_alloc(gfp_t flags)
{
return kzalloc(sizeof(struct iova_magazine), flags);
@@ -693,28 +708,54 @@ static void iova_magazine_push(struct iova_magazine *mag, unsigned long pfn)
mag->pfns[mag->size++] = pfn;
}
-static void init_iova_rcaches(struct iova_domain *iovad)
+int iova_domain_init_rcaches(struct iova_domain *iovad)
{
- struct iova_cpu_rcache *cpu_rcache;
- struct iova_rcache *rcache;
unsigned int cpu;
- int i;
+ int i, ret;
+
+ iovad->rcaches = kcalloc(IOVA_RANGE_CACHE_MAX_SIZE,
+ sizeof(struct iova_rcache),
+ GFP_KERNEL);
+ if (!iovad->rcaches)
+ return -ENOMEM;
for (i = 0; i < IOVA_RANGE_CACHE_MAX_SIZE; ++i) {
+ struct iova_cpu_rcache *cpu_rcache;
+ struct iova_rcache *rcache;
+
rcache = &iovad->rcaches[i];
spin_lock_init(&rcache->lock);
rcache->depot_size = 0;
- rcache->cpu_rcaches = __alloc_percpu(sizeof(*cpu_rcache), cache_line_size());
- if (WARN_ON(!rcache->cpu_rcaches))
- continue;
+ rcache->cpu_rcaches = __alloc_percpu(sizeof(*cpu_rcache),
+ cache_line_size());
+ if (!rcache->cpu_rcaches) {
+ ret = -ENOMEM;
+ goto out_err;
+ }
for_each_possible_cpu(cpu) {
cpu_rcache = per_cpu_ptr(rcache->cpu_rcaches, cpu);
+
spin_lock_init(&cpu_rcache->lock);
cpu_rcache->loaded = iova_magazine_alloc(GFP_KERNEL);
cpu_rcache->prev = iova_magazine_alloc(GFP_KERNEL);
+ if (!cpu_rcache->loaded || !cpu_rcache->prev) {
+ ret = -ENOMEM;
+ goto out_err;
+ }
}
}
+
+ ret = cpuhp_state_add_instance_nocalls(CPUHP_IOMMU_IOVA_DEAD,
+ &iovad->cpuhp_dead);
+ if (ret)
+ goto out_err;
+ return 0;
+
+out_err:
+ free_iova_rcaches(iovad);
+ return ret;
}
+EXPORT_SYMBOL_GPL(iova_domain_init_rcaches);
/*
* Try inserting IOVA range starting with 'iova_pfn' into 'rcache', and
@@ -831,7 +872,7 @@ static unsigned long iova_rcache_get(struct iova_domain *iovad,
{
unsigned int log_size = order_base_2(size);
- if (log_size >= IOVA_RANGE_CACHE_MAX_SIZE)
+ if (log_size >= IOVA_RANGE_CACHE_MAX_SIZE || !iovad->rcaches)
return 0;
return __iova_rcache_get(&iovad->rcaches[log_size], limit_pfn - size);
@@ -849,6 +890,8 @@ static void free_iova_rcaches(struct iova_domain *iovad)
for (i = 0; i < IOVA_RANGE_CACHE_MAX_SIZE; ++i) {
rcache = &iovad->rcaches[i];
+ if (!rcache->cpu_rcaches)
+ break;
for_each_possible_cpu(cpu) {
cpu_rcache = per_cpu_ptr(rcache->cpu_rcaches, cpu);
iova_magazine_free(cpu_rcache->loaded);
@@ -858,6 +901,9 @@ static void free_iova_rcaches(struct iova_domain *iovad)
for (j = 0; j < rcache->depot_size; ++j)
iova_magazine_free(rcache->depot[j]);
}
+
+ kfree(iovad->rcaches);
+ iovad->rcaches = NULL;
}
/*
diff --git a/drivers/iommu/ipmmu-vmsa.c b/drivers/iommu/ipmmu-vmsa.c
index ca752bdc710f..8fdb84b3642b 100644
--- a/drivers/iommu/ipmmu-vmsa.c
+++ b/drivers/iommu/ipmmu-vmsa.c
@@ -719,6 +719,7 @@ static int ipmmu_init_platform_device(struct device *dev,
static const struct soc_device_attribute soc_needs_opt_in[] = {
{ .family = "R-Car Gen3", },
+ { .family = "R-Car Gen4", },
{ .family = "RZ/G2", },
{ /* sentinel */ }
};
@@ -743,7 +744,7 @@ static bool ipmmu_device_is_allowed(struct device *dev)
unsigned int i;
/*
- * R-Car Gen3 and RZ/G2 use the allow list to opt-in devices.
+ * R-Car Gen3/4 and RZ/G2 use the allow list to opt-in devices.
* For Other SoCs, this returns true anyway.
*/
if (!soc_device_match(soc_needs_opt_in))
@@ -868,14 +869,6 @@ static struct iommu_group *ipmmu_find_group(struct device *dev)
static const struct iommu_ops ipmmu_ops = {
.domain_alloc = ipmmu_domain_alloc,
- .domain_free = ipmmu_domain_free,
- .attach_dev = ipmmu_attach_device,
- .detach_dev = ipmmu_detach_device,
- .map = ipmmu_map,
- .unmap = ipmmu_unmap,
- .flush_iotlb_all = ipmmu_flush_iotlb_all,
- .iotlb_sync = ipmmu_iotlb_sync,
- .iova_to_phys = ipmmu_iova_to_phys,
.probe_device = ipmmu_probe_device,
.release_device = ipmmu_release_device,
.probe_finalize = ipmmu_probe_finalize,
@@ -883,6 +876,16 @@ static const struct iommu_ops ipmmu_ops = {
? generic_device_group : ipmmu_find_group,
.pgsize_bitmap = SZ_1G | SZ_2M | SZ_4K,
.of_xlate = ipmmu_of_xlate,
+ .default_domain_ops = &(const struct iommu_domain_ops) {
+ .attach_dev = ipmmu_attach_device,
+ .detach_dev = ipmmu_detach_device,
+ .map = ipmmu_map,
+ .unmap = ipmmu_unmap,
+ .flush_iotlb_all = ipmmu_flush_iotlb_all,
+ .iotlb_sync = ipmmu_iotlb_sync,
+ .iova_to_phys = ipmmu_iova_to_phys,
+ .free = ipmmu_domain_free,
+ }
};
/* -----------------------------------------------------------------------------
@@ -926,7 +929,7 @@ static const struct ipmmu_features ipmmu_features_rcar_gen3 = {
.utlb_offset_base = 0,
};
-static const struct ipmmu_features ipmmu_features_r8a779a0 = {
+static const struct ipmmu_features ipmmu_features_rcar_gen4 = {
.use_ns_alias_offset = false,
.has_cache_leaf_nodes = true,
.number_of_contexts = 16,
@@ -982,7 +985,10 @@ static const struct of_device_id ipmmu_of_ids[] = {
.data = &ipmmu_features_rcar_gen3,
}, {
.compatible = "renesas,ipmmu-r8a779a0",
- .data = &ipmmu_features_r8a779a0,
+ .data = &ipmmu_features_rcar_gen4,
+ }, {
+ .compatible = "renesas,rcar-gen4-ipmmu",
+ .data = &ipmmu_features_rcar_gen4,
}, {
/* Terminator */
},
@@ -1006,7 +1012,9 @@ static int ipmmu_probe(struct platform_device *pdev)
bitmap_zero(mmu->ctx, IPMMU_CTX_MAX);
mmu->features = of_device_get_match_data(&pdev->dev);
memset(mmu->utlb_ctx, IPMMU_CTX_INVALID, mmu->features->num_utlbs);
- dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(40));
+ ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(40));
+ if (ret)
+ return ret;
/* Map I/O memory and request IRQ. */
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
diff --git a/drivers/iommu/msm_iommu.c b/drivers/iommu/msm_iommu.c
index 3a38352b603f..50f57624610f 100644
--- a/drivers/iommu/msm_iommu.c
+++ b/drivers/iommu/msm_iommu.c
@@ -558,11 +558,6 @@ fail:
return ret;
}
-static bool msm_iommu_capable(enum iommu_cap cap)
-{
- return false;
-}
-
static void print_ctx_regs(void __iomem *base, int ctx)
{
unsigned int fsr = GET_FSR(base, ctx);
@@ -672,27 +667,28 @@ fail:
}
static struct iommu_ops msm_iommu_ops = {
- .capable = msm_iommu_capable,
.domain_alloc = msm_iommu_domain_alloc,
- .domain_free = msm_iommu_domain_free,
- .attach_dev = msm_iommu_attach_dev,
- .detach_dev = msm_iommu_detach_dev,
- .map = msm_iommu_map,
- .unmap = msm_iommu_unmap,
- /*
- * Nothing is needed here, the barrier to guarantee
- * completion of the tlb sync operation is implicitly
- * taken care when the iommu client does a writel before
- * kick starting the other master.
- */
- .iotlb_sync = NULL,
- .iotlb_sync_map = msm_iommu_sync_map,
- .iova_to_phys = msm_iommu_iova_to_phys,
.probe_device = msm_iommu_probe_device,
.release_device = msm_iommu_release_device,
.device_group = generic_device_group,
.pgsize_bitmap = MSM_IOMMU_PGSIZES,
.of_xlate = qcom_iommu_of_xlate,
+ .default_domain_ops = &(const struct iommu_domain_ops) {
+ .attach_dev = msm_iommu_attach_dev,
+ .detach_dev = msm_iommu_detach_dev,
+ .map = msm_iommu_map,
+ .unmap = msm_iommu_unmap,
+ /*
+ * Nothing is needed here, the barrier to guarantee
+ * completion of the tlb sync operation is implicitly
+ * taken care when the iommu client does a writel before
+ * kick starting the other master.
+ */
+ .iotlb_sync = NULL,
+ .iotlb_sync_map = msm_iommu_sync_map,
+ .iova_to_phys = msm_iommu_iova_to_phys,
+ .free = msm_iommu_domain_free,
+ }
};
static int msm_iommu_probe(struct platform_device *pdev)
@@ -710,36 +706,32 @@ static int msm_iommu_probe(struct platform_device *pdev)
INIT_LIST_HEAD(&iommu->ctx_list);
iommu->pclk = devm_clk_get(iommu->dev, "smmu_pclk");
- if (IS_ERR(iommu->pclk)) {
- dev_err(iommu->dev, "could not get smmu_pclk\n");
- return PTR_ERR(iommu->pclk);
- }
+ if (IS_ERR(iommu->pclk))
+ return dev_err_probe(iommu->dev, PTR_ERR(iommu->pclk),
+ "could not get smmu_pclk\n");
ret = clk_prepare(iommu->pclk);
- if (ret) {
- dev_err(iommu->dev, "could not prepare smmu_pclk\n");
- return ret;
- }
+ if (ret)
+ return dev_err_probe(iommu->dev, ret,
+ "could not prepare smmu_pclk\n");
iommu->clk = devm_clk_get(iommu->dev, "iommu_clk");
if (IS_ERR(iommu->clk)) {
- dev_err(iommu->dev, "could not get iommu_clk\n");
clk_unprepare(iommu->pclk);
- return PTR_ERR(iommu->clk);
+ return dev_err_probe(iommu->dev, PTR_ERR(iommu->clk),
+ "could not get iommu_clk\n");
}
ret = clk_prepare(iommu->clk);
if (ret) {
- dev_err(iommu->dev, "could not prepare iommu_clk\n");
clk_unprepare(iommu->pclk);
- return ret;
+ return dev_err_probe(iommu->dev, ret, "could not prepare iommu_clk\n");
}
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
iommu->base = devm_ioremap_resource(iommu->dev, r);
if (IS_ERR(iommu->base)) {
- dev_err(iommu->dev, "could not get iommu base\n");
- ret = PTR_ERR(iommu->base);
+ ret = dev_err_probe(iommu->dev, PTR_ERR(iommu->base), "could not get iommu base\n");
goto fail;
}
ioaddr = r->start;
@@ -831,16 +823,4 @@ static struct platform_driver msm_iommu_driver = {
.probe = msm_iommu_probe,
.remove = msm_iommu_remove,
};
-
-static int __init msm_iommu_driver_init(void)
-{
- int ret;
-
- ret = platform_driver_register(&msm_iommu_driver);
- if (ret != 0)
- pr_err("Failed to register IOMMU driver\n");
-
- return ret;
-}
-subsys_initcall(msm_iommu_driver_init);
-
+builtin_platform_driver(msm_iommu_driver);
diff --git a/drivers/iommu/mtk_iommu.c b/drivers/iommu/mtk_iommu.c
index 25b834104790..6fd75a60abd6 100644
--- a/drivers/iommu/mtk_iommu.c
+++ b/drivers/iommu/mtk_iommu.c
@@ -210,33 +210,27 @@ static struct mtk_iommu_domain *to_mtk_domain(struct iommu_domain *dom)
static void mtk_iommu_tlb_flush_all(struct mtk_iommu_data *data)
{
- for_each_m4u(data) {
- if (pm_runtime_get_if_in_use(data->dev) <= 0)
- continue;
-
- writel_relaxed(F_INVLD_EN1 | F_INVLD_EN0,
- data->base + data->plat_data->inv_sel_reg);
- writel_relaxed(F_ALL_INVLD, data->base + REG_MMU_INVALIDATE);
- wmb(); /* Make sure the tlb flush all done */
+ unsigned long flags;
- pm_runtime_put(data->dev);
- }
+ spin_lock_irqsave(&data->tlb_lock, flags);
+ writel_relaxed(F_INVLD_EN1 | F_INVLD_EN0,
+ data->base + data->plat_data->inv_sel_reg);
+ writel_relaxed(F_ALL_INVLD, data->base + REG_MMU_INVALIDATE);
+ wmb(); /* Make sure the tlb flush all done */
+ spin_unlock_irqrestore(&data->tlb_lock, flags);
}
static void mtk_iommu_tlb_flush_range_sync(unsigned long iova, size_t size,
size_t granule,
struct mtk_iommu_data *data)
{
- bool has_pm = !!data->dev->pm_domain;
unsigned long flags;
int ret;
u32 tmp;
for_each_m4u(data) {
- if (has_pm) {
- if (pm_runtime_get_if_in_use(data->dev) <= 0)
- continue;
- }
+ if (pm_runtime_get_if_in_use(data->dev) <= 0)
+ continue;
spin_lock_irqsave(&data->tlb_lock, flags);
writel_relaxed(F_INVLD_EN1 | F_INVLD_EN0,
@@ -252,17 +246,18 @@ static void mtk_iommu_tlb_flush_range_sync(unsigned long iova, size_t size,
/* tlb sync */
ret = readl_poll_timeout_atomic(data->base + REG_MMU_CPE_DONE,
tmp, tmp != 0, 10, 1000);
+
+ /* Clear the CPE status */
+ writel_relaxed(0, data->base + REG_MMU_CPE_DONE);
+ spin_unlock_irqrestore(&data->tlb_lock, flags);
+
if (ret) {
dev_warn(data->dev,
"Partial TLB flush timed out, falling back to full flush\n");
mtk_iommu_tlb_flush_all(data);
}
- /* Clear the CPE status */
- writel_relaxed(0, data->base + REG_MMU_CPE_DONE);
- spin_unlock_irqrestore(&data->tlb_lock, flags);
- if (has_pm)
- pm_runtime_put(data->dev);
+ pm_runtime_put(data->dev);
}
}
@@ -562,22 +557,52 @@ static struct iommu_device *mtk_iommu_probe_device(struct device *dev)
{
struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
struct mtk_iommu_data *data;
+ struct device_link *link;
+ struct device *larbdev;
+ unsigned int larbid, larbidx, i;
if (!fwspec || fwspec->ops != &mtk_iommu_ops)
return ERR_PTR(-ENODEV); /* Not a iommu client device */
data = dev_iommu_priv_get(dev);
+ /*
+ * Link the consumer device with the smi-larb device(supplier).
+ * The device that connects with each a larb is a independent HW.
+ * All the ports in each a device should be in the same larbs.
+ */
+ larbid = MTK_M4U_TO_LARB(fwspec->ids[0]);
+ for (i = 1; i < fwspec->num_ids; i++) {
+ larbidx = MTK_M4U_TO_LARB(fwspec->ids[i]);
+ if (larbid != larbidx) {
+ dev_err(dev, "Can only use one larb. Fail@larb%d-%d.\n",
+ larbid, larbidx);
+ return ERR_PTR(-EINVAL);
+ }
+ }
+ larbdev = data->larb_imu[larbid].dev;
+ link = device_link_add(dev, larbdev,
+ DL_FLAG_PM_RUNTIME | DL_FLAG_STATELESS);
+ if (!link)
+ dev_err(dev, "Unable to link %s\n", dev_name(larbdev));
return &data->iommu;
}
static void mtk_iommu_release_device(struct device *dev)
{
struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
+ struct mtk_iommu_data *data;
+ struct device *larbdev;
+ unsigned int larbid;
if (!fwspec || fwspec->ops != &mtk_iommu_ops)
return;
+ data = dev_iommu_priv_get(dev);
+ larbid = MTK_M4U_TO_LARB(fwspec->ids[0]);
+ larbdev = data->larb_imu[larbid].dev;
+ device_link_remove(dev, larbdev);
+
iommu_fwspec_free(dev);
}
@@ -658,15 +683,6 @@ static void mtk_iommu_get_resv_regions(struct device *dev,
static const struct iommu_ops mtk_iommu_ops = {
.domain_alloc = mtk_iommu_domain_alloc,
- .domain_free = mtk_iommu_domain_free,
- .attach_dev = mtk_iommu_attach_device,
- .detach_dev = mtk_iommu_detach_device,
- .map = mtk_iommu_map,
- .unmap = mtk_iommu_unmap,
- .flush_iotlb_all = mtk_iommu_flush_iotlb_all,
- .iotlb_sync = mtk_iommu_iotlb_sync,
- .iotlb_sync_map = mtk_iommu_sync_map,
- .iova_to_phys = mtk_iommu_iova_to_phys,
.probe_device = mtk_iommu_probe_device,
.release_device = mtk_iommu_release_device,
.device_group = mtk_iommu_device_group,
@@ -675,6 +691,17 @@ static const struct iommu_ops mtk_iommu_ops = {
.put_resv_regions = generic_iommu_put_resv_regions,
.pgsize_bitmap = SZ_4K | SZ_64K | SZ_1M | SZ_16M,
.owner = THIS_MODULE,
+ .default_domain_ops = &(const struct iommu_domain_ops) {
+ .attach_dev = mtk_iommu_attach_device,
+ .detach_dev = mtk_iommu_detach_device,
+ .map = mtk_iommu_map,
+ .unmap = mtk_iommu_unmap,
+ .flush_iotlb_all = mtk_iommu_flush_iotlb_all,
+ .iotlb_sync = mtk_iommu_iotlb_sync,
+ .iotlb_sync_map = mtk_iommu_sync_map,
+ .iova_to_phys = mtk_iommu_iova_to_phys,
+ .free = mtk_iommu_domain_free,
+ }
};
static int mtk_iommu_hw_init(const struct mtk_iommu_data *data)
@@ -848,12 +875,16 @@ static int mtk_iommu_probe(struct platform_device *pdev)
plarbdev = of_find_device_by_node(larbnode);
if (!plarbdev) {
of_node_put(larbnode);
+ return -ENODEV;
+ }
+ if (!plarbdev->dev.driver) {
+ of_node_put(larbnode);
return -EPROBE_DEFER;
}
data->larb_imu[id].dev = &plarbdev->dev;
- component_match_add_release(dev, &match, release_of,
- compare_of, larbnode);
+ component_match_add_release(dev, &match, component_release_of,
+ component_compare_of, larbnode);
}
/* Get smi-common dev from the last larb. */
@@ -980,6 +1011,13 @@ static int __maybe_unused mtk_iommu_runtime_resume(struct device *dev)
writel_relaxed(reg->ivrp_paddr, base + REG_MMU_IVRP_PADDR);
writel_relaxed(reg->vld_pa_rng, base + REG_MMU_VLD_PA_RNG);
writel(m4u_dom->cfg.arm_v7s_cfg.ttbr & MMU_PT_ADDR_MASK, base + REG_MMU_PT_BASE_ADDR);
+
+ /*
+ * Users may allocate dma buffer before they call pm_runtime_get,
+ * in which case it will lack the necessary tlb flush.
+ * Thus, make sure to update the tlb after each PM resume.
+ */
+ mtk_iommu_tlb_flush_all(data);
return 0;
}
diff --git a/drivers/iommu/mtk_iommu.h b/drivers/iommu/mtk_iommu.h
index f81fa8862ed0..b742432220c5 100644
--- a/drivers/iommu/mtk_iommu.h
+++ b/drivers/iommu/mtk_iommu.h
@@ -84,16 +84,6 @@ struct mtk_iommu_data {
struct mtk_smi_larb_iommu larb_imu[MTK_LARB_NR_MAX];
};
-static inline int compare_of(struct device *dev, void *data)
-{
- return dev->of_node == data;
-}
-
-static inline void release_of(struct device *dev, void *data)
-{
- of_node_put(data);
-}
-
static inline int mtk_iommu_bind(struct device *dev)
{
struct mtk_iommu_data *data = dev_get_drvdata(dev);
diff --git a/drivers/iommu/mtk_iommu_v1.c b/drivers/iommu/mtk_iommu_v1.c
index be22fcf988ce..ecff800656e6 100644
--- a/drivers/iommu/mtk_iommu_v1.c
+++ b/drivers/iommu/mtk_iommu_v1.c
@@ -423,7 +423,18 @@ static struct iommu_device *mtk_iommu_probe_device(struct device *dev)
struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
struct of_phandle_args iommu_spec;
struct mtk_iommu_data *data;
- int err, idx = 0;
+ int err, idx = 0, larbid, larbidx;
+ struct device_link *link;
+ struct device *larbdev;
+
+ /*
+ * In the deferred case, free the existed fwspec.
+ * Always initialize the fwspec internally.
+ */
+ if (fwspec) {
+ iommu_fwspec_free(dev);
+ fwspec = dev_iommu_fwspec_get(dev);
+ }
while (!of_parse_phandle_with_args(dev->of_node, "iommus",
"#iommu-cells",
@@ -444,6 +455,23 @@ static struct iommu_device *mtk_iommu_probe_device(struct device *dev)
data = dev_iommu_priv_get(dev);
+ /* Link the consumer device with the smi-larb device(supplier) */
+ larbid = mt2701_m4u_to_larb(fwspec->ids[0]);
+ for (idx = 1; idx < fwspec->num_ids; idx++) {
+ larbidx = mt2701_m4u_to_larb(fwspec->ids[idx]);
+ if (larbid != larbidx) {
+ dev_err(dev, "Can only use one larb. Fail@larb%d-%d.\n",
+ larbid, larbidx);
+ return ERR_PTR(-EINVAL);
+ }
+ }
+
+ larbdev = data->larb_imu[larbid].dev;
+ link = device_link_add(dev, larbdev,
+ DL_FLAG_PM_RUNTIME | DL_FLAG_STATELESS);
+ if (!link)
+ dev_err(dev, "Unable to link %s\n", dev_name(larbdev));
+
return &data->iommu;
}
@@ -464,10 +492,18 @@ static void mtk_iommu_probe_finalize(struct device *dev)
static void mtk_iommu_release_device(struct device *dev)
{
struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
+ struct mtk_iommu_data *data;
+ struct device *larbdev;
+ unsigned int larbid;
if (!fwspec || fwspec->ops != &mtk_iommu_ops)
return;
+ data = dev_iommu_priv_get(dev);
+ larbid = mt2701_m4u_to_larb(fwspec->ids[0]);
+ larbdev = data->larb_imu[larbid].dev;
+ device_link_remove(dev, larbdev);
+
iommu_fwspec_free(dev);
}
@@ -514,12 +550,6 @@ static int mtk_iommu_hw_init(const struct mtk_iommu_data *data)
static const struct iommu_ops mtk_iommu_ops = {
.domain_alloc = mtk_iommu_domain_alloc,
- .domain_free = mtk_iommu_domain_free,
- .attach_dev = mtk_iommu_attach_device,
- .detach_dev = mtk_iommu_detach_device,
- .map = mtk_iommu_map,
- .unmap = mtk_iommu_unmap,
- .iova_to_phys = mtk_iommu_iova_to_phys,
.probe_device = mtk_iommu_probe_device,
.probe_finalize = mtk_iommu_probe_finalize,
.release_device = mtk_iommu_release_device,
@@ -527,6 +557,14 @@ static const struct iommu_ops mtk_iommu_ops = {
.device_group = generic_device_group,
.pgsize_bitmap = ~0UL << MT2701_IOMMU_PAGE_SHIFT,
.owner = THIS_MODULE,
+ .default_domain_ops = &(const struct iommu_domain_ops) {
+ .attach_dev = mtk_iommu_attach_device,
+ .detach_dev = mtk_iommu_detach_device,
+ .map = mtk_iommu_map,
+ .unmap = mtk_iommu_unmap,
+ .iova_to_phys = mtk_iommu_iova_to_phys,
+ .free = mtk_iommu_domain_free,
+ }
};
static const struct of_device_id mtk_iommu_of_ids[] = {
@@ -595,12 +633,16 @@ static int mtk_iommu_probe(struct platform_device *pdev)
plarbdev = of_find_device_by_node(larbnode);
if (!plarbdev) {
of_node_put(larbnode);
+ return -ENODEV;
+ }
+ if (!plarbdev->dev.driver) {
+ of_node_put(larbnode);
return -EPROBE_DEFER;
}
data->larb_imu[i].dev = &plarbdev->dev;
- component_match_add_release(dev, &match, release_of,
- compare_of, larbnode);
+ component_match_add_release(dev, &match, component_release_of,
+ component_compare_of, larbnode);
}
platform_set_drvdata(pdev, data);
diff --git a/drivers/iommu/omap-iommu.c b/drivers/iommu/omap-iommu.c
index 980e4af3f06b..4aab631ef517 100644
--- a/drivers/iommu/omap-iommu.c
+++ b/drivers/iommu/omap-iommu.c
@@ -1734,16 +1734,18 @@ static struct iommu_group *omap_iommu_device_group(struct device *dev)
static const struct iommu_ops omap_iommu_ops = {
.domain_alloc = omap_iommu_domain_alloc,
- .domain_free = omap_iommu_domain_free,
- .attach_dev = omap_iommu_attach_dev,
- .detach_dev = omap_iommu_detach_dev,
- .map = omap_iommu_map,
- .unmap = omap_iommu_unmap,
- .iova_to_phys = omap_iommu_iova_to_phys,
.probe_device = omap_iommu_probe_device,
.release_device = omap_iommu_release_device,
.device_group = omap_iommu_device_group,
.pgsize_bitmap = OMAP_IOMMU_PGSIZES,
+ .default_domain_ops = &(const struct iommu_domain_ops) {
+ .attach_dev = omap_iommu_attach_dev,
+ .detach_dev = omap_iommu_detach_dev,
+ .map = omap_iommu_map,
+ .unmap = omap_iommu_unmap,
+ .iova_to_phys = omap_iommu_iova_to_phys,
+ .free = omap_iommu_domain_free,
+ }
};
static int __init omap_iommu_init(void)
diff --git a/drivers/iommu/rockchip-iommu.c b/drivers/iommu/rockchip-iommu.c
index 7f23ad61c094..ab57c4b8fade 100644
--- a/drivers/iommu/rockchip-iommu.c
+++ b/drivers/iommu/rockchip-iommu.c
@@ -1187,17 +1187,19 @@ static int rk_iommu_of_xlate(struct device *dev,
static const struct iommu_ops rk_iommu_ops = {
.domain_alloc = rk_iommu_domain_alloc,
- .domain_free = rk_iommu_domain_free,
- .attach_dev = rk_iommu_attach_device,
- .detach_dev = rk_iommu_detach_device,
- .map = rk_iommu_map,
- .unmap = rk_iommu_unmap,
.probe_device = rk_iommu_probe_device,
.release_device = rk_iommu_release_device,
- .iova_to_phys = rk_iommu_iova_to_phys,
.device_group = rk_iommu_device_group,
.pgsize_bitmap = RK_IOMMU_PGSIZE_BITMAP,
.of_xlate = rk_iommu_of_xlate,
+ .default_domain_ops = &(const struct iommu_domain_ops) {
+ .attach_dev = rk_iommu_attach_device,
+ .detach_dev = rk_iommu_detach_device,
+ .map = rk_iommu_map,
+ .unmap = rk_iommu_unmap,
+ .iova_to_phys = rk_iommu_iova_to_phys,
+ .free = rk_iommu_domain_free,
+ }
};
static int rk_iommu_probe(struct platform_device *pdev)
@@ -1407,9 +1409,4 @@ static struct platform_driver rk_iommu_driver = {
.suppress_bind_attrs = true,
},
};
-
-static int __init rk_iommu_init(void)
-{
- return platform_driver_register(&rk_iommu_driver);
-}
-subsys_initcall(rk_iommu_init);
+builtin_platform_driver(rk_iommu_driver);
diff --git a/drivers/iommu/s390-iommu.c b/drivers/iommu/s390-iommu.c
index 50860ebdd087..3833e86c6e7b 100644
--- a/drivers/iommu/s390-iommu.c
+++ b/drivers/iommu/s390-iommu.c
@@ -363,16 +363,18 @@ void zpci_destroy_iommu(struct zpci_dev *zdev)
static const struct iommu_ops s390_iommu_ops = {
.capable = s390_iommu_capable,
.domain_alloc = s390_domain_alloc,
- .domain_free = s390_domain_free,
- .attach_dev = s390_iommu_attach_device,
- .detach_dev = s390_iommu_detach_device,
- .map = s390_iommu_map,
- .unmap = s390_iommu_unmap,
- .iova_to_phys = s390_iommu_iova_to_phys,
.probe_device = s390_iommu_probe_device,
.release_device = s390_iommu_release_device,
.device_group = generic_device_group,
.pgsize_bitmap = S390_IOMMU_PGSIZES,
+ .default_domain_ops = &(const struct iommu_domain_ops) {
+ .attach_dev = s390_iommu_attach_device,
+ .detach_dev = s390_iommu_detach_device,
+ .map = s390_iommu_map,
+ .unmap = s390_iommu_unmap,
+ .iova_to_phys = s390_iommu_iova_to_phys,
+ .free = s390_domain_free,
+ }
};
static int __init s390_iommu_init(void)
diff --git a/drivers/iommu/sprd-iommu.c b/drivers/iommu/sprd-iommu.c
index 27ac818b0354..bd409bab6286 100644
--- a/drivers/iommu/sprd-iommu.c
+++ b/drivers/iommu/sprd-iommu.c
@@ -416,20 +416,22 @@ static int sprd_iommu_of_xlate(struct device *dev, struct of_phandle_args *args)
static const struct iommu_ops sprd_iommu_ops = {
.domain_alloc = sprd_iommu_domain_alloc,
- .domain_free = sprd_iommu_domain_free,
- .attach_dev = sprd_iommu_attach_device,
- .detach_dev = sprd_iommu_detach_device,
- .map = sprd_iommu_map,
- .unmap = sprd_iommu_unmap,
- .iotlb_sync_map = sprd_iommu_sync_map,
- .iotlb_sync = sprd_iommu_sync,
- .iova_to_phys = sprd_iommu_iova_to_phys,
.probe_device = sprd_iommu_probe_device,
.release_device = sprd_iommu_release_device,
.device_group = sprd_iommu_device_group,
.of_xlate = sprd_iommu_of_xlate,
.pgsize_bitmap = ~0UL << SPRD_IOMMU_PAGE_SHIFT,
.owner = THIS_MODULE,
+ .default_domain_ops = &(const struct iommu_domain_ops) {
+ .attach_dev = sprd_iommu_attach_device,
+ .detach_dev = sprd_iommu_detach_device,
+ .map = sprd_iommu_map,
+ .unmap = sprd_iommu_unmap,
+ .iotlb_sync_map = sprd_iommu_sync_map,
+ .iotlb_sync = sprd_iommu_sync,
+ .iova_to_phys = sprd_iommu_iova_to_phys,
+ .free = sprd_iommu_domain_free,
+ }
};
static const struct of_device_id sprd_iommu_of_match[] = {
diff --git a/drivers/iommu/sun50i-iommu.c b/drivers/iommu/sun50i-iommu.c
index 92997021e188..c54ab477b8fd 100644
--- a/drivers/iommu/sun50i-iommu.c
+++ b/drivers/iommu/sun50i-iommu.c
@@ -760,19 +760,21 @@ static int sun50i_iommu_of_xlate(struct device *dev,
static const struct iommu_ops sun50i_iommu_ops = {
.pgsize_bitmap = SZ_4K,
- .attach_dev = sun50i_iommu_attach_device,
- .detach_dev = sun50i_iommu_detach_device,
.device_group = sun50i_iommu_device_group,
.domain_alloc = sun50i_iommu_domain_alloc,
- .domain_free = sun50i_iommu_domain_free,
- .flush_iotlb_all = sun50i_iommu_flush_iotlb_all,
- .iotlb_sync = sun50i_iommu_iotlb_sync,
- .iova_to_phys = sun50i_iommu_iova_to_phys,
- .map = sun50i_iommu_map,
.of_xlate = sun50i_iommu_of_xlate,
.probe_device = sun50i_iommu_probe_device,
.release_device = sun50i_iommu_release_device,
- .unmap = sun50i_iommu_unmap,
+ .default_domain_ops = &(const struct iommu_domain_ops) {
+ .attach_dev = sun50i_iommu_attach_device,
+ .detach_dev = sun50i_iommu_detach_device,
+ .flush_iotlb_all = sun50i_iommu_flush_iotlb_all,
+ .iotlb_sync = sun50i_iommu_iotlb_sync,
+ .iova_to_phys = sun50i_iommu_iova_to_phys,
+ .map = sun50i_iommu_map,
+ .unmap = sun50i_iommu_unmap,
+ .free = sun50i_iommu_domain_free,
+ }
};
static void sun50i_iommu_report_fault(struct sun50i_iommu *iommu,
diff --git a/drivers/iommu/tegra-gart.c b/drivers/iommu/tegra-gart.c
index 6a358f92c7e5..a6700a40a6f8 100644
--- a/drivers/iommu/tegra-gart.c
+++ b/drivers/iommu/tegra-gart.c
@@ -238,11 +238,6 @@ static phys_addr_t gart_iommu_iova_to_phys(struct iommu_domain *domain,
return pte & GART_PAGE_MASK;
}
-static bool gart_iommu_capable(enum iommu_cap cap)
-{
- return false;
-}
-
static struct iommu_device *gart_iommu_probe_device(struct device *dev)
{
if (!dev_iommu_fwspec_get(dev))
@@ -276,21 +271,22 @@ static void gart_iommu_sync(struct iommu_domain *domain,
}
static const struct iommu_ops gart_iommu_ops = {
- .capable = gart_iommu_capable,
.domain_alloc = gart_iommu_domain_alloc,
- .domain_free = gart_iommu_domain_free,
- .attach_dev = gart_iommu_attach_dev,
- .detach_dev = gart_iommu_detach_dev,
.probe_device = gart_iommu_probe_device,
.release_device = gart_iommu_release_device,
.device_group = generic_device_group,
- .map = gart_iommu_map,
- .unmap = gart_iommu_unmap,
- .iova_to_phys = gart_iommu_iova_to_phys,
.pgsize_bitmap = GART_IOMMU_PGSIZES,
.of_xlate = gart_iommu_of_xlate,
- .iotlb_sync_map = gart_iommu_sync_map,
- .iotlb_sync = gart_iommu_sync,
+ .default_domain_ops = &(const struct iommu_domain_ops) {
+ .attach_dev = gart_iommu_attach_dev,
+ .detach_dev = gart_iommu_detach_dev,
+ .map = gart_iommu_map,
+ .unmap = gart_iommu_unmap,
+ .iova_to_phys = gart_iommu_iova_to_phys,
+ .iotlb_sync_map = gart_iommu_sync_map,
+ .iotlb_sync = gart_iommu_sync,
+ .free = gart_iommu_domain_free,
+ }
};
int tegra_gart_suspend(struct gart_device *gart)
diff --git a/drivers/iommu/tegra-smmu.c b/drivers/iommu/tegra-smmu.c
index e900e3c46903..1fea68e551f1 100644
--- a/drivers/iommu/tegra-smmu.c
+++ b/drivers/iommu/tegra-smmu.c
@@ -272,11 +272,6 @@ static void tegra_smmu_free_asid(struct tegra_smmu *smmu, unsigned int id)
clear_bit(id, smmu->asids);
}
-static bool tegra_smmu_capable(enum iommu_cap cap)
-{
- return false;
-}
-
static struct iommu_domain *tegra_smmu_domain_alloc(unsigned type)
{
struct tegra_smmu_as *as;
@@ -808,8 +803,10 @@ static struct tegra_smmu *tegra_smmu_find(struct device_node *np)
return NULL;
mc = platform_get_drvdata(pdev);
- if (!mc)
+ if (!mc) {
+ put_device(&pdev->dev);
return NULL;
+ }
return mc->smmu;
}
@@ -967,19 +964,20 @@ static int tegra_smmu_of_xlate(struct device *dev,
}
static const struct iommu_ops tegra_smmu_ops = {
- .capable = tegra_smmu_capable,
.domain_alloc = tegra_smmu_domain_alloc,
- .domain_free = tegra_smmu_domain_free,
- .attach_dev = tegra_smmu_attach_dev,
- .detach_dev = tegra_smmu_detach_dev,
.probe_device = tegra_smmu_probe_device,
.release_device = tegra_smmu_release_device,
.device_group = tegra_smmu_device_group,
- .map = tegra_smmu_map,
- .unmap = tegra_smmu_unmap,
- .iova_to_phys = tegra_smmu_iova_to_phys,
.of_xlate = tegra_smmu_of_xlate,
.pgsize_bitmap = SZ_4K,
+ .default_domain_ops = &(const struct iommu_domain_ops) {
+ .attach_dev = tegra_smmu_attach_dev,
+ .detach_dev = tegra_smmu_detach_dev,
+ .map = tegra_smmu_map,
+ .unmap = tegra_smmu_unmap,
+ .iova_to_phys = tegra_smmu_iova_to_phys,
+ .free = tegra_smmu_domain_free,
+ }
};
static void tegra_smmu_ahb_enable(void)
diff --git a/drivers/iommu/virtio-iommu.c b/drivers/iommu/virtio-iommu.c
index f2aa34f57454..25be4b822aa0 100644
--- a/drivers/iommu/virtio-iommu.c
+++ b/drivers/iommu/virtio-iommu.c
@@ -1008,12 +1008,6 @@ static int viommu_of_xlate(struct device *dev, struct of_phandle_args *args)
static struct iommu_ops viommu_ops = {
.domain_alloc = viommu_domain_alloc,
- .domain_free = viommu_domain_free,
- .attach_dev = viommu_attach_dev,
- .map = viommu_map,
- .unmap = viommu_unmap,
- .iova_to_phys = viommu_iova_to_phys,
- .iotlb_sync = viommu_iotlb_sync,
.probe_device = viommu_probe_device,
.probe_finalize = viommu_probe_finalize,
.release_device = viommu_release_device,
@@ -1022,6 +1016,14 @@ static struct iommu_ops viommu_ops = {
.put_resv_regions = generic_iommu_put_resv_regions,
.of_xlate = viommu_of_xlate,
.owner = THIS_MODULE,
+ .default_domain_ops = &(const struct iommu_domain_ops) {
+ .attach_dev = viommu_attach_dev,
+ .map = viommu_map,
+ .unmap = viommu_unmap,
+ .iova_to_phys = viommu_iova_to_phys,
+ .iotlb_sync = viommu_iotlb_sync,
+ .free = viommu_domain_free,
+ }
};
static int viommu_init_vqs(struct viommu_dev *viommu)