summaryrefslogtreecommitdiffstats
path: root/drivers/vfio
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2022-05-31 09:56:54 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2022-05-31 09:56:54 -0700
commite1cbc3b96a9974746b2a80c3a6c8a0f7eff7b1b5 (patch)
tree83c969a9dc2ba9c0dda2f9d04369b5f8c0563521 /drivers/vfio
parent3335d5550256210c9b213f67240221633d8f7b53 (diff)
parentb0dacee202efbf1a5d9f5cdfd82049e8b5b085d2 (diff)
downloadlinux-e1cbc3b96a9974746b2a80c3a6c8a0f7eff7b1b5.tar.bz2
Merge tag 'iommu-updates-v5.19' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu
Pull iommu updates from Joerg Roedel: - Intel VT-d driver updates: - Domain force snooping improvement. - Cleanups, no intentional functional changes. - ARM SMMU driver updates: - Add new Qualcomm device-tree compatible strings - Add new Nvidia device-tree compatible string for Tegra234 - Fix UAF in SMMUv3 shared virtual addressing code - Force identity-mapped domains for users of ye olde SMMU legacy binding - Minor cleanups - Fix a BUG_ON in the vfio_iommu_group_notifier: - Groundwork for upcoming iommufd framework - Introduction of DMA ownership so that an entire IOMMU group is either controlled by the kernel or by user-space - MT8195 and MT8186 support in the Mediatek IOMMU driver - Make forcing of cache-coherent DMA more coherent between IOMMU drivers - Fixes for thunderbolt device DMA protection - Various smaller fixes and cleanups * tag 'iommu-updates-v5.19' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu: (88 commits) iommu/amd: Increase timeout waiting for GA log enablement iommu/s390: Tolerate repeat attach_dev calls iommu/vt-d: Remove hard coding PGSNP bit in PASID entries iommu/vt-d: Remove domain_update_iommu_snooping() iommu/vt-d: Check domain force_snooping against attached devices iommu/vt-d: Block force-snoop domain attaching if no SC support iommu/vt-d: Size Page Request Queue to avoid overflow condition iommu/vt-d: Fold dmar_insert_one_dev_info() into its caller iommu/vt-d: Change return type of dmar_insert_one_dev_info() iommu/vt-d: Remove unneeded validity check on dev iommu/dma: Explicitly sort PCI DMA windows iommu/dma: Fix iova map result check bug iommu/mediatek: Fix NULL pointer dereference when printing dev_name iommu: iommu_group_claim_dma_owner() must always assign a domain iommu/arm-smmu: Force identity domains for legacy binding iommu/arm-smmu: Support Tegra234 SMMU dt-bindings: arm-smmu: Add compatible for Tegra234 SOC dt-bindings: arm-smmu: Document nvidia,memory-controller property iommu/arm-smmu-qcom: Add SC8280XP support dt-bindings: arm-smmu: Add compatible for Qualcomm SC8280XP ...
Diffstat (limited to 'drivers/vfio')
-rw-r--r--drivers/vfio/fsl-mc/vfio_fsl_mc.c1
-rw-r--r--drivers/vfio/pci/vfio_pci.c1
-rw-r--r--drivers/vfio/platform/vfio_amba.c1
-rw-r--r--drivers/vfio/platform/vfio_platform.c1
-rw-r--r--drivers/vfio/vfio.c252
-rw-r--r--drivers/vfio/vfio_iommu_type1.c30
6 files changed, 45 insertions, 241 deletions
diff --git a/drivers/vfio/fsl-mc/vfio_fsl_mc.c b/drivers/vfio/fsl-mc/vfio_fsl_mc.c
index 6e2e62c6f47a..3feff729f3ce 100644
--- a/drivers/vfio/fsl-mc/vfio_fsl_mc.c
+++ b/drivers/vfio/fsl-mc/vfio_fsl_mc.c
@@ -588,6 +588,7 @@ static struct fsl_mc_driver vfio_fsl_mc_driver = {
.name = "vfio-fsl-mc",
.owner = THIS_MODULE,
},
+ .driver_managed_dma = true,
};
static int __init vfio_fsl_mc_driver_init(void)
diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c
index 2b047469e02f..58839206d1ca 100644
--- a/drivers/vfio/pci/vfio_pci.c
+++ b/drivers/vfio/pci/vfio_pci.c
@@ -194,6 +194,7 @@ static struct pci_driver vfio_pci_driver = {
.remove = vfio_pci_remove,
.sriov_configure = vfio_pci_sriov_configure,
.err_handler = &vfio_pci_core_err_handlers,
+ .driver_managed_dma = true,
};
static void __init vfio_pci_fill_ids(void)
diff --git a/drivers/vfio/platform/vfio_amba.c b/drivers/vfio/platform/vfio_amba.c
index badfffea14fb..1aaa4f721bd2 100644
--- a/drivers/vfio/platform/vfio_amba.c
+++ b/drivers/vfio/platform/vfio_amba.c
@@ -95,6 +95,7 @@ static struct amba_driver vfio_amba_driver = {
.name = "vfio-amba",
.owner = THIS_MODULE,
},
+ .driver_managed_dma = true,
};
module_amba_driver(vfio_amba_driver);
diff --git a/drivers/vfio/platform/vfio_platform.c b/drivers/vfio/platform/vfio_platform.c
index 68a1c87066d7..04f40c5acfd6 100644
--- a/drivers/vfio/platform/vfio_platform.c
+++ b/drivers/vfio/platform/vfio_platform.c
@@ -76,6 +76,7 @@ static struct platform_driver vfio_platform_driver = {
.driver = {
.name = "vfio-platform",
},
+ .driver_managed_dma = true,
};
module_platform_driver(vfio_platform_driver);
diff --git a/drivers/vfio/vfio.c b/drivers/vfio/vfio.c
index a4555014bd1e..6a7fb8d91aaa 100644
--- a/drivers/vfio/vfio.c
+++ b/drivers/vfio/vfio.c
@@ -62,11 +62,6 @@ struct vfio_container {
bool noiommu;
};
-struct vfio_unbound_dev {
- struct device *dev;
- struct list_head unbound_next;
-};
-
struct vfio_group {
struct device dev;
struct cdev cdev;
@@ -76,11 +71,8 @@ struct vfio_group {
struct vfio_container *container;
struct list_head device_list;
struct mutex device_lock;
- struct notifier_block nb;
struct list_head vfio_next;
struct list_head container_next;
- struct list_head unbound_list;
- struct mutex unbound_lock;
atomic_t opened;
wait_queue_head_t container_q;
enum vfio_group_type type;
@@ -281,8 +273,6 @@ void vfio_unregister_iommu_driver(const struct vfio_iommu_driver_ops *ops)
}
EXPORT_SYMBOL_GPL(vfio_unregister_iommu_driver);
-static int vfio_iommu_group_notifier(struct notifier_block *nb,
- unsigned long action, void *data);
static void vfio_group_get(struct vfio_group *group);
/*
@@ -340,16 +330,8 @@ vfio_group_get_from_iommu(struct iommu_group *iommu_group)
static void vfio_group_release(struct device *dev)
{
struct vfio_group *group = container_of(dev, struct vfio_group, dev);
- struct vfio_unbound_dev *unbound, *tmp;
-
- list_for_each_entry_safe(unbound, tmp,
- &group->unbound_list, unbound_next) {
- list_del(&unbound->unbound_next);
- kfree(unbound);
- }
mutex_destroy(&group->device_lock);
- mutex_destroy(&group->unbound_lock);
iommu_group_put(group->iommu_group);
ida_free(&vfio.group_ida, MINOR(group->dev.devt));
kfree(group);
@@ -381,8 +363,6 @@ static struct vfio_group *vfio_group_alloc(struct iommu_group *iommu_group,
refcount_set(&group->users, 1);
INIT_LIST_HEAD(&group->device_list);
mutex_init(&group->device_lock);
- INIT_LIST_HEAD(&group->unbound_list);
- mutex_init(&group->unbound_lock);
init_waitqueue_head(&group->container_q);
group->iommu_group = iommu_group;
/* put in vfio_group_release() */
@@ -412,13 +392,6 @@ static struct vfio_group *vfio_create_group(struct iommu_group *iommu_group,
goto err_put;
}
- group->nb.notifier_call = vfio_iommu_group_notifier;
- err = iommu_group_register_notifier(iommu_group, &group->nb);
- if (err) {
- ret = ERR_PTR(err);
- goto err_put;
- }
-
mutex_lock(&vfio.group_lock);
/* Did we race creating this group? */
@@ -439,7 +412,6 @@ static struct vfio_group *vfio_create_group(struct iommu_group *iommu_group,
err_unlock:
mutex_unlock(&vfio.group_lock);
- iommu_group_unregister_notifier(group->iommu_group, &group->nb);
err_put:
put_device(&group->dev);
return ret;
@@ -464,7 +436,6 @@ static void vfio_group_put(struct vfio_group *group)
cdev_device_del(&group->cdev, &group->dev);
mutex_unlock(&vfio.group_lock);
- iommu_group_unregister_notifier(group->iommu_group, &group->nb);
put_device(&group->dev);
}
@@ -521,175 +492,6 @@ static struct vfio_device *vfio_group_get_device(struct vfio_group *group,
}
/*
- * Some drivers, like pci-stub, are only used to prevent other drivers from
- * claiming a device and are therefore perfectly legitimate for a user owned
- * group. The pci-stub driver has no dependencies on DMA or the IOVA mapping
- * of the device, but it does prevent the user from having direct access to
- * the device, which is useful in some circumstances.
- *
- * We also assume that we can include PCI interconnect devices, ie. bridges.
- * IOMMU grouping on PCI necessitates that if we lack isolation on a bridge
- * then all of the downstream devices will be part of the same IOMMU group as
- * the bridge. Thus, if placing the bridge into the user owned IOVA space
- * breaks anything, it only does so for user owned devices downstream. Note
- * that error notification via MSI can be affected for platforms that handle
- * MSI within the same IOVA space as DMA.
- */
-static const char * const vfio_driver_allowed[] = { "pci-stub" };
-
-static bool vfio_dev_driver_allowed(struct device *dev,
- struct device_driver *drv)
-{
- if (dev_is_pci(dev)) {
- struct pci_dev *pdev = to_pci_dev(dev);
-
- if (pdev->hdr_type != PCI_HEADER_TYPE_NORMAL)
- return true;
- }
-
- return match_string(vfio_driver_allowed,
- ARRAY_SIZE(vfio_driver_allowed),
- drv->name) >= 0;
-}
-
-/*
- * A vfio group is viable for use by userspace if all devices are in
- * one of the following states:
- * - driver-less
- * - bound to a vfio driver
- * - bound to an otherwise allowed driver
- * - a PCI interconnect device
- *
- * We use two methods to determine whether a device is bound to a vfio
- * driver. The first is to test whether the device exists in the vfio
- * group. The second is to test if the device exists on the group
- * unbound_list, indicating it's in the middle of transitioning from
- * a vfio driver to driver-less.
- */
-static int vfio_dev_viable(struct device *dev, void *data)
-{
- struct vfio_group *group = data;
- struct vfio_device *device;
- struct device_driver *drv = READ_ONCE(dev->driver);
- struct vfio_unbound_dev *unbound;
- int ret = -EINVAL;
-
- mutex_lock(&group->unbound_lock);
- list_for_each_entry(unbound, &group->unbound_list, unbound_next) {
- if (dev == unbound->dev) {
- ret = 0;
- break;
- }
- }
- mutex_unlock(&group->unbound_lock);
-
- if (!ret || !drv || vfio_dev_driver_allowed(dev, drv))
- return 0;
-
- device = vfio_group_get_device(group, dev);
- if (device) {
- vfio_device_put(device);
- return 0;
- }
-
- return ret;
-}
-
-/*
- * Async device support
- */
-static int vfio_group_nb_add_dev(struct vfio_group *group, struct device *dev)
-{
- struct vfio_device *device;
-
- /* Do we already know about it? We shouldn't */
- device = vfio_group_get_device(group, dev);
- if (WARN_ON_ONCE(device)) {
- vfio_device_put(device);
- return 0;
- }
-
- /* Nothing to do for idle groups */
- if (!atomic_read(&group->container_users))
- return 0;
-
- /* TODO Prevent device auto probing */
- dev_WARN(dev, "Device added to live group %d!\n",
- iommu_group_id(group->iommu_group));
-
- return 0;
-}
-
-static int vfio_group_nb_verify(struct vfio_group *group, struct device *dev)
-{
- /* We don't care what happens when the group isn't in use */
- if (!atomic_read(&group->container_users))
- return 0;
-
- return vfio_dev_viable(dev, group);
-}
-
-static int vfio_iommu_group_notifier(struct notifier_block *nb,
- unsigned long action, void *data)
-{
- struct vfio_group *group = container_of(nb, struct vfio_group, nb);
- struct device *dev = data;
- struct vfio_unbound_dev *unbound;
-
- switch (action) {
- case IOMMU_GROUP_NOTIFY_ADD_DEVICE:
- vfio_group_nb_add_dev(group, dev);
- break;
- case IOMMU_GROUP_NOTIFY_DEL_DEVICE:
- /*
- * Nothing to do here. If the device is in use, then the
- * vfio sub-driver should block the remove callback until
- * it is unused. If the device is unused or attached to a
- * stub driver, then it should be released and we don't
- * care that it will be going away.
- */
- break;
- case IOMMU_GROUP_NOTIFY_BIND_DRIVER:
- dev_dbg(dev, "%s: group %d binding to driver\n", __func__,
- iommu_group_id(group->iommu_group));
- break;
- case IOMMU_GROUP_NOTIFY_BOUND_DRIVER:
- dev_dbg(dev, "%s: group %d bound to driver %s\n", __func__,
- iommu_group_id(group->iommu_group), dev->driver->name);
- BUG_ON(vfio_group_nb_verify(group, dev));
- break;
- case IOMMU_GROUP_NOTIFY_UNBIND_DRIVER:
- dev_dbg(dev, "%s: group %d unbinding from driver %s\n",
- __func__, iommu_group_id(group->iommu_group),
- dev->driver->name);
- break;
- case IOMMU_GROUP_NOTIFY_UNBOUND_DRIVER:
- dev_dbg(dev, "%s: group %d unbound from driver\n", __func__,
- iommu_group_id(group->iommu_group));
- /*
- * XXX An unbound device in a live group is ok, but we'd
- * really like to avoid the above BUG_ON by preventing other
- * drivers from binding to it. Once that occurs, we have to
- * stop the system to maintain isolation. At a minimum, we'd
- * want a toggle to disable driver auto probe for this device.
- */
-
- mutex_lock(&group->unbound_lock);
- list_for_each_entry(unbound,
- &group->unbound_list, unbound_next) {
- if (dev == unbound->dev) {
- list_del(&unbound->unbound_next);
- kfree(unbound);
- break;
- }
- }
- mutex_unlock(&group->unbound_lock);
- break;
- }
- return NOTIFY_OK;
-}
-
-/*
* VFIO driver API
*/
void vfio_init_group_dev(struct vfio_device *device, struct device *dev,
@@ -815,6 +617,13 @@ static int __vfio_register_dev(struct vfio_device *device,
int vfio_register_group_dev(struct vfio_device *device)
{
+ /*
+ * VFIO always sets IOMMU_CACHE because we offer no way for userspace to
+ * restore cache coherency.
+ */
+ if (!iommu_capable(device->dev->bus, IOMMU_CAP_CACHE_COHERENCY))
+ return -EINVAL;
+
return __vfio_register_dev(device,
vfio_group_find_or_alloc(device->dev));
}
@@ -889,29 +698,10 @@ static struct vfio_device *vfio_device_get_from_name(struct vfio_group *group,
void vfio_unregister_group_dev(struct vfio_device *device)
{
struct vfio_group *group = device->group;
- struct vfio_unbound_dev *unbound;
unsigned int i = 0;
bool interrupted = false;
long rc;
- /*
- * When the device is removed from the group, the group suddenly
- * becomes non-viable; the device has a driver (until the unbind
- * completes), but it's not present in the group. This is bad news
- * for any external users that need to re-acquire a group reference
- * in order to match and release their existing reference. To
- * solve this, we track such devices on the unbound_list to bridge
- * the gap until they're fully unbound.
- */
- unbound = kzalloc(sizeof(*unbound), GFP_KERNEL);
- if (unbound) {
- unbound->dev = device->dev;
- mutex_lock(&group->unbound_lock);
- list_add(&unbound->unbound_next, &group->unbound_list);
- mutex_unlock(&group->unbound_lock);
- }
- WARN_ON(!unbound);
-
vfio_device_put(device);
rc = try_wait_for_completion(&device->comp);
while (rc <= 0) {
@@ -1198,6 +988,8 @@ static void __vfio_group_unset_container(struct vfio_group *group)
driver->ops->detach_group(container->iommu_data,
group->iommu_group);
+ iommu_group_release_dma_owner(group->iommu_group);
+
group->container = NULL;
wake_up(&group->container_q);
list_del(&group->container_next);
@@ -1282,13 +1074,19 @@ static int vfio_group_set_container(struct vfio_group *group, int container_fd)
goto unlock_out;
}
+ ret = iommu_group_claim_dma_owner(group->iommu_group, f.file);
+ if (ret)
+ goto unlock_out;
+
driver = container->iommu_driver;
if (driver) {
ret = driver->ops->attach_group(container->iommu_data,
group->iommu_group,
group->type);
- if (ret)
+ if (ret) {
+ iommu_group_release_dma_owner(group->iommu_group);
goto unlock_out;
+ }
}
group->container = container;
@@ -1305,12 +1103,6 @@ unlock_out:
return ret;
}
-static bool vfio_group_viable(struct vfio_group *group)
-{
- return (iommu_group_for_each_dev(group->iommu_group,
- group, vfio_dev_viable) == 0);
-}
-
static int vfio_group_add_container_user(struct vfio_group *group)
{
if (!atomic_inc_not_zero(&group->container_users))
@@ -1320,7 +1112,7 @@ static int vfio_group_add_container_user(struct vfio_group *group)
atomic_dec(&group->container_users);
return -EPERM;
}
- if (!group->container->iommu_driver || !vfio_group_viable(group)) {
+ if (!group->container->iommu_driver) {
atomic_dec(&group->container_users);
return -EINVAL;
}
@@ -1338,7 +1130,7 @@ static int vfio_group_get_device_fd(struct vfio_group *group, char *buf)
int ret = 0;
if (0 == atomic_read(&group->container_users) ||
- !group->container->iommu_driver || !vfio_group_viable(group))
+ !group->container->iommu_driver)
return -EINVAL;
if (group->type == VFIO_NO_IOMMU && !capable(CAP_SYS_RAWIO))
@@ -1430,11 +1222,11 @@ static long vfio_group_fops_unl_ioctl(struct file *filep,
status.flags = 0;
- if (vfio_group_viable(group))
- status.flags |= VFIO_GROUP_FLAGS_VIABLE;
-
if (group->container)
- status.flags |= VFIO_GROUP_FLAGS_CONTAINER_SET;
+ status.flags |= VFIO_GROUP_FLAGS_CONTAINER_SET |
+ VFIO_GROUP_FLAGS_VIABLE;
+ else if (!iommu_group_dma_owner_claimed(group->iommu_group))
+ status.flags |= VFIO_GROUP_FLAGS_VIABLE;
if (copy_to_user((void __user *)arg, &status, minsz))
return -EFAULT;
diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c
index 9394aa9444c1..c13b9290e357 100644
--- a/drivers/vfio/vfio_iommu_type1.c
+++ b/drivers/vfio/vfio_iommu_type1.c
@@ -84,8 +84,8 @@ struct vfio_domain {
struct iommu_domain *domain;
struct list_head next;
struct list_head group_list;
- int prot; /* IOMMU_CACHE */
- bool fgsp; /* Fine-grained super pages */
+ bool fgsp : 1; /* Fine-grained super pages */
+ bool enforce_cache_coherency : 1;
};
struct vfio_dma {
@@ -1461,7 +1461,7 @@ static int vfio_iommu_map(struct vfio_iommu *iommu, dma_addr_t iova,
list_for_each_entry(d, &iommu->domain_list, next) {
ret = iommu_map(d->domain, iova, (phys_addr_t)pfn << PAGE_SHIFT,
- npage << PAGE_SHIFT, prot | d->prot);
+ npage << PAGE_SHIFT, prot | IOMMU_CACHE);
if (ret)
goto unwind;
@@ -1771,7 +1771,7 @@ static int vfio_iommu_replay(struct vfio_iommu *iommu,
}
ret = iommu_map(domain->domain, iova, phys,
- size, dma->prot | domain->prot);
+ size, dma->prot | IOMMU_CACHE);
if (ret) {
if (!dma->iommu_mapped) {
vfio_unpin_pages_remote(dma, iova,
@@ -1859,7 +1859,7 @@ static void vfio_test_domain_fgsp(struct vfio_domain *domain)
return;
ret = iommu_map(domain->domain, 0, page_to_phys(pages), PAGE_SIZE * 2,
- IOMMU_READ | IOMMU_WRITE | domain->prot);
+ IOMMU_READ | IOMMU_WRITE | IOMMU_CACHE);
if (!ret) {
size_t unmapped = iommu_unmap(domain->domain, 0, PAGE_SIZE);
@@ -2267,8 +2267,15 @@ static int vfio_iommu_type1_attach_group(void *iommu_data,
goto out_detach;
}
- if (iommu_capable(bus, IOMMU_CAP_CACHE_COHERENCY))
- domain->prot |= IOMMU_CACHE;
+ /*
+ * If the IOMMU can block non-coherent operations (ie PCIe TLPs with
+ * no-snoop set) then VFIO always turns this feature on because on Intel
+ * platforms it optimizes KVM to disable wbinvd emulation.
+ */
+ if (domain->domain->ops->enforce_cache_coherency)
+ domain->enforce_cache_coherency =
+ domain->domain->ops->enforce_cache_coherency(
+ domain->domain);
/*
* Try to match an existing compatible domain. We don't want to
@@ -2279,7 +2286,8 @@ static int vfio_iommu_type1_attach_group(void *iommu_data,
*/
list_for_each_entry(d, &iommu->domain_list, next) {
if (d->domain->ops == domain->domain->ops &&
- d->prot == domain->prot) {
+ d->enforce_cache_coherency ==
+ domain->enforce_cache_coherency) {
iommu_detach_group(domain->domain, group->iommu_group);
if (!iommu_attach_group(d->domain,
group->iommu_group)) {
@@ -2611,14 +2619,14 @@ static void vfio_iommu_type1_release(void *iommu_data)
kfree(iommu);
}
-static int vfio_domains_have_iommu_cache(struct vfio_iommu *iommu)
+static int vfio_domains_have_enforce_cache_coherency(struct vfio_iommu *iommu)
{
struct vfio_domain *domain;
int ret = 1;
mutex_lock(&iommu->lock);
list_for_each_entry(domain, &iommu->domain_list, next) {
- if (!(domain->prot & IOMMU_CACHE)) {
+ if (!(domain->enforce_cache_coherency)) {
ret = 0;
break;
}
@@ -2641,7 +2649,7 @@ static int vfio_iommu_type1_check_extension(struct vfio_iommu *iommu,
case VFIO_DMA_CC_IOMMU:
if (!iommu)
return 0;
- return vfio_domains_have_iommu_cache(iommu);
+ return vfio_domains_have_enforce_cache_coherency(iommu);
default:
return 0;
}