summaryrefslogtreecommitdiffstats
path: root/drivers/iommu
diff options
context:
space:
mode:
authorLu Baolu <baolu.lu@linux.intel.com>2019-05-25 13:41:32 +0800
committerJoerg Roedel <jroedel@suse.de>2019-05-28 10:19:11 +0200
commit8af46c784ecfe8929f66b5eaae987f6874953226 (patch)
tree6f4bbaec5617602575cb02e97f9c8e32f432da6a /drivers/iommu
parentfa212a97f3a366adcb2046c565e7c978ab067c73 (diff)
downloadlinux-8af46c784ecfe8929f66b5eaae987f6874953226.tar.bz2
iommu/vt-d: Implement is_attach_deferred iommu ops entry
As a domain is now attached to a device earlier, we should implement the is_attach_deferred call-back and use it to defer the domain attach from iommu driver init to device driver init when iommu is pre-enabled in kdump kernel. Suggested-by: Tom Murphy <tmurphy@arista.com> Signed-off-by: Lu Baolu <baolu.lu@linux.intel.com> Signed-off-by: Joerg Roedel <jroedel@suse.de>
Diffstat (limited to 'drivers/iommu')
-rw-r--r--drivers/iommu/intel-iommu.c23
1 files changed, 23 insertions, 0 deletions
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index 0bae0b73076c..c8b73802f0e0 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -353,6 +353,8 @@ static void domain_context_clear(struct intel_iommu *iommu,
static int domain_detach_iommu(struct dmar_domain *domain,
struct intel_iommu *iommu);
static bool device_is_rmrr_locked(struct device *dev);
+static int intel_iommu_attach_device(struct iommu_domain *domain,
+ struct device *dev);
#ifdef CONFIG_INTEL_IOMMU_DEFAULT_ON
int dmar_disabled = 0;
@@ -378,6 +380,7 @@ int intel_iommu_gfx_mapped;
EXPORT_SYMBOL_GPL(intel_iommu_gfx_mapped);
#define DUMMY_DEVICE_DOMAIN_INFO ((struct device_domain_info *)(-1))
+#define DEFER_DEVICE_DOMAIN_INFO ((struct device_domain_info *)(-2))
static DEFINE_SPINLOCK(device_domain_lock);
static LIST_HEAD(device_domain_list);
@@ -2408,8 +2411,18 @@ static struct dmar_domain *find_domain(struct device *dev)
{
struct device_domain_info *info;
+ if (unlikely(dev->archdata.iommu == DEFER_DEVICE_DOMAIN_INFO)) {
+ struct iommu_domain *domain;
+
+ dev->archdata.iommu = NULL;
+ domain = iommu_get_domain_for_dev(dev);
+ if (domain)
+ intel_iommu_attach_device(domain, dev);
+ }
+
/* No lock here, assumes no domain exit in normal case */
info = dev->archdata.iommu;
+
if (likely(info))
return info->domain;
return NULL;
@@ -5504,6 +5517,9 @@ static int intel_iommu_add_device(struct device *dev)
iommu_device_link(&iommu->iommu, dev);
+ if (translation_pre_enabled(iommu))
+ dev->archdata.iommu = DEFER_DEVICE_DOMAIN_INFO;
+
group = iommu_group_get_for_dev(dev);
if (IS_ERR(group))
@@ -5828,6 +5844,12 @@ intel_iommu_aux_get_pasid(struct iommu_domain *domain, struct device *dev)
dmar_domain->default_pasid : -EINVAL;
}
+static bool intel_iommu_is_attach_deferred(struct iommu_domain *domain,
+ struct device *dev)
+{
+ return dev->archdata.iommu == DEFER_DEVICE_DOMAIN_INFO;
+}
+
const struct iommu_ops intel_iommu_ops = {
.capable = intel_iommu_capable,
.domain_alloc = intel_iommu_domain_alloc,
@@ -5850,6 +5872,7 @@ const struct iommu_ops intel_iommu_ops = {
.dev_feat_enabled = intel_iommu_dev_feat_enabled,
.dev_enable_feat = intel_iommu_dev_enable_feat,
.dev_disable_feat = intel_iommu_dev_disable_feat,
+ .is_attach_deferred = intel_iommu_is_attach_deferred,
.pgsize_bitmap = INTEL_IOMMU_PGSIZES,
};