summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJason Gunthorpe <jgg@nvidia.com>2022-11-29 16:29:24 -0400
committerJason Gunthorpe <jgg@nvidia.com>2022-11-29 16:34:15 -0400
commit4989764d8ed3d3d1024e4e831ff2affc40ee01d6 (patch)
tree660b3a81ff3bef19d26d368e8a7ad2fa828cbb94
parent69e61edebea030f177de7a23b8d5d9b8c4a90bda (diff)
downloadlinux-4989764d8ed3d3d1024e4e831ff2affc40ee01d6.tar.bz2
iommu: Add IOMMU_CAP_ENFORCE_CACHE_COHERENCY
This queries if a domain linked to a device should expect to support enforce_cache_coherency() so iommufd can negotiate the rules for when a domain should be shared or not. For iommufd a device that declares IOMMU_CAP_ENFORCE_CACHE_COHERENCY will not be attached to a domain that does not support it. Link: https://lore.kernel.org/r/1-v6-a196d26f289e+11787-iommufd_jgg@nvidia.com Reviewed-by: Lu Baolu <baolu.lu@linux.intel.com> Reviewed-by: Kevin Tian <kevin.tian@intel.com> Tested-by: Nicolin Chen <nicolinc@nvidia.com> Tested-by: Yi Liu <yi.l.liu@intel.com> Tested-by: Lixiao Yang <lixiao.yang@intel.com> Tested-by: Matthew Rosato <mjrosato@linux.ibm.com> Tested-by: Yu He <yu.he@intel.com> Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
-rw-r--r--drivers/iommu/amd/iommu.c2
-rw-r--r--drivers/iommu/intel/iommu.c16
-rw-r--r--include/linux/iommu.h5
3 files changed, 18 insertions, 5 deletions
diff --git a/drivers/iommu/amd/iommu.c b/drivers/iommu/amd/iommu.c
index 45299eb7e8e3..240c535e317c 100644
--- a/drivers/iommu/amd/iommu.c
+++ b/drivers/iommu/amd/iommu.c
@@ -2278,6 +2278,8 @@ static bool amd_iommu_capable(struct device *dev, enum iommu_cap cap)
return false;
case IOMMU_CAP_PRE_BOOT_PROTECTION:
return amdr_ivrs_remap_support;
+ case IOMMU_CAP_ENFORCE_CACHE_COHERENCY:
+ return true;
default:
break;
}
diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c
index f298e51d5aa6..157c97274110 100644
--- a/drivers/iommu/intel/iommu.c
+++ b/drivers/iommu/intel/iommu.c
@@ -4450,14 +4450,20 @@ static bool intel_iommu_enforce_cache_coherency(struct iommu_domain *domain)
static bool intel_iommu_capable(struct device *dev, enum iommu_cap cap)
{
- if (cap == IOMMU_CAP_CACHE_COHERENCY)
+ struct device_domain_info *info = dev_iommu_priv_get(dev);
+
+ switch (cap) {
+ case IOMMU_CAP_CACHE_COHERENCY:
return true;
- if (cap == IOMMU_CAP_INTR_REMAP)
+ case IOMMU_CAP_INTR_REMAP:
return irq_remapping_enabled == 1;
- if (cap == IOMMU_CAP_PRE_BOOT_PROTECTION)
+ case IOMMU_CAP_PRE_BOOT_PROTECTION:
return dmar_platform_optin();
-
- return false;
+ case IOMMU_CAP_ENFORCE_CACHE_COHERENCY:
+ return ecap_sc_support(info->iommu->ecap);
+ default:
+ return false;
+ }
}
static struct iommu_device *intel_iommu_probe_device(struct device *dev)
diff --git a/include/linux/iommu.h b/include/linux/iommu.h
index 68d7d304cdb7..a09fd32d8cc2 100644
--- a/include/linux/iommu.h
+++ b/include/linux/iommu.h
@@ -124,6 +124,11 @@ enum iommu_cap {
IOMMU_CAP_NOEXEC, /* IOMMU_NOEXEC flag */
IOMMU_CAP_PRE_BOOT_PROTECTION, /* Firmware says it used the IOMMU for
DMA protection and we should too */
+ /*
+ * Per-device flag indicating if enforce_cache_coherency() will work on
+ * this device.
+ */
+ IOMMU_CAP_ENFORCE_CACHE_COHERENCY,
};
/* These are the possible reserved region types */