summaryrefslogtreecommitdiffstats
path: root/drivers/iommu/iommu.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/iommu/iommu.c')
-rw-r--r--drivers/iommu/iommu.c339
1 files changed, 39 insertions, 300 deletions
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
index 107dcf5938d6..f2c45b85b9fc 100644
--- a/drivers/iommu/iommu.c
+++ b/drivers/iommu/iommu.c
@@ -323,13 +323,14 @@ err_out:
void iommu_release_device(struct device *dev)
{
- const struct iommu_ops *ops = dev->bus->iommu_ops;
+ const struct iommu_ops *ops;
if (!dev->iommu)
return;
iommu_device_unlink(dev->iommu->iommu_dev, dev);
+ ops = dev_iommu_ops(dev);
ops->release_device(dev);
iommu_group_remove_device(dev);
@@ -790,9 +791,6 @@ static int iommu_create_device_direct_mappings(struct iommu_group *group,
dma_addr_t start, end, addr;
size_t map_size = 0;
- if (domain->ops->apply_resv_region)
- domain->ops->apply_resv_region(dev, domain, entry);
-
start = ALIGN(entry->start, pg_size);
end = ALIGN(entry->start + entry->length, pg_size);
@@ -833,11 +831,12 @@ out:
return ret;
}
-static bool iommu_is_attach_deferred(struct iommu_domain *domain,
- struct device *dev)
+static bool iommu_is_attach_deferred(struct device *dev)
{
- if (domain->ops->is_attach_deferred)
- return domain->ops->is_attach_deferred(domain, dev);
+ const struct iommu_ops *ops = dev_iommu_ops(dev);
+
+ if (ops->is_attach_deferred)
+ return ops->is_attach_deferred(dev);
return false;
}
@@ -894,7 +893,7 @@ rename:
mutex_lock(&group->mutex);
list_add_tail(&device->list, &group->devices);
- if (group->domain && !iommu_is_attach_deferred(group->domain, dev))
+ if (group->domain && !iommu_is_attach_deferred(dev))
ret = __iommu_attach_device(group->domain, dev);
mutex_unlock(&group->mutex);
if (ret)
@@ -1255,10 +1254,10 @@ int iommu_page_response(struct device *dev,
struct iommu_fault_event *evt;
struct iommu_fault_page_request *prm;
struct dev_iommu *param = dev->iommu;
+ const struct iommu_ops *ops = dev_iommu_ops(dev);
bool has_pasid = msg->flags & IOMMU_PAGE_RESP_PASID_VALID;
- struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
- if (!domain || !domain->ops->page_response)
+ if (!ops->page_response)
return -ENODEV;
if (!param || !param->fault_param)
@@ -1299,7 +1298,7 @@ int iommu_page_response(struct device *dev,
msg->pasid = 0;
}
- ret = domain->ops->page_response(dev, evt, msg);
+ ret = ops->page_response(dev, evt, msg);
list_del(&evt->list);
kfree(evt);
break;
@@ -1524,7 +1523,7 @@ EXPORT_SYMBOL_GPL(fsl_mc_device_group);
static int iommu_get_def_domain_type(struct device *dev)
{
- const struct iommu_ops *ops = dev->bus->iommu_ops;
+ const struct iommu_ops *ops = dev_iommu_ops(dev);
if (dev_is_pci(dev) && to_pci_dev(dev)->untrusted)
return IOMMU_DOMAIN_DMA;
@@ -1583,7 +1582,7 @@ static int iommu_alloc_default_domain(struct iommu_group *group,
*/
static struct iommu_group *iommu_group_get_for_dev(struct device *dev)
{
- const struct iommu_ops *ops = dev->bus->iommu_ops;
+ const struct iommu_ops *ops = dev_iommu_ops(dev);
struct iommu_group *group;
int ret;
@@ -1591,9 +1590,6 @@ static struct iommu_group *iommu_group_get_for_dev(struct device *dev)
if (group)
return group;
- if (!ops)
- return ERR_PTR(-EINVAL);
-
group = ops->device_group(dev);
if (WARN_ON_ONCE(group == NULL))
return ERR_PTR(-EINVAL);
@@ -1748,7 +1744,7 @@ static int iommu_group_do_dma_attach(struct device *dev, void *data)
struct iommu_domain *domain = data;
int ret = 0;
- if (!iommu_is_attach_deferred(domain, dev))
+ if (!iommu_is_attach_deferred(dev))
ret = __iommu_attach_device(domain, dev);
return ret;
@@ -1762,10 +1758,10 @@ static int __iommu_group_dma_attach(struct iommu_group *group)
static int iommu_group_do_probe_finalize(struct device *dev, void *data)
{
- struct iommu_domain *domain = data;
+ const struct iommu_ops *ops = dev_iommu_ops(dev);
- if (domain->ops->probe_finalize)
- domain->ops->probe_finalize(dev);
+ if (ops->probe_finalize)
+ ops->probe_finalize(dev);
return 0;
}
@@ -1954,10 +1950,11 @@ static struct iommu_domain *__iommu_domain_alloc(struct bus_type *bus,
if (!domain)
return NULL;
- domain->ops = bus->iommu_ops;
domain->type = type;
/* Assume all sizes by default; the driver may override this later */
- domain->pgsize_bitmap = bus->iommu_ops->pgsize_bitmap;
+ domain->pgsize_bitmap = bus->iommu_ops->pgsize_bitmap;
+ if (!domain->ops)
+ domain->ops = bus->iommu_ops->default_domain_ops;
if (iommu_is_dma_domain(domain) && iommu_get_dma_cookie(domain)) {
iommu_domain_free(domain);
@@ -1975,7 +1972,7 @@ EXPORT_SYMBOL_GPL(iommu_domain_alloc);
void iommu_domain_free(struct iommu_domain *domain)
{
iommu_put_dma_cookie(domain);
- domain->ops->domain_free(domain);
+ domain->ops->free(domain);
}
EXPORT_SYMBOL_GPL(iommu_domain_free);
@@ -2023,228 +2020,16 @@ EXPORT_SYMBOL_GPL(iommu_attach_device);
int iommu_deferred_attach(struct device *dev, struct iommu_domain *domain)
{
- const struct iommu_ops *ops = domain->ops;
-
- if (ops->is_attach_deferred && ops->is_attach_deferred(domain, dev))
+ if (iommu_is_attach_deferred(dev))
return __iommu_attach_device(domain, dev);
return 0;
}
-/*
- * Check flags and other user provided data for valid combinations. We also
- * make sure no reserved fields or unused flags are set. This is to ensure
- * not breaking userspace in the future when these fields or flags are used.
- */
-static int iommu_check_cache_invl_data(struct iommu_cache_invalidate_info *info)
-{
- u32 mask;
- int i;
-
- if (info->version != IOMMU_CACHE_INVALIDATE_INFO_VERSION_1)
- return -EINVAL;
-
- mask = (1 << IOMMU_CACHE_INV_TYPE_NR) - 1;
- if (info->cache & ~mask)
- return -EINVAL;
-
- if (info->granularity >= IOMMU_INV_GRANU_NR)
- return -EINVAL;
-
- switch (info->granularity) {
- case IOMMU_INV_GRANU_ADDR:
- if (info->cache & IOMMU_CACHE_INV_TYPE_PASID)
- return -EINVAL;
-
- mask = IOMMU_INV_ADDR_FLAGS_PASID |
- IOMMU_INV_ADDR_FLAGS_ARCHID |
- IOMMU_INV_ADDR_FLAGS_LEAF;
-
- if (info->granu.addr_info.flags & ~mask)
- return -EINVAL;
- break;
- case IOMMU_INV_GRANU_PASID:
- mask = IOMMU_INV_PASID_FLAGS_PASID |
- IOMMU_INV_PASID_FLAGS_ARCHID;
- if (info->granu.pasid_info.flags & ~mask)
- return -EINVAL;
-
- break;
- case IOMMU_INV_GRANU_DOMAIN:
- if (info->cache & IOMMU_CACHE_INV_TYPE_DEV_IOTLB)
- return -EINVAL;
- break;
- default:
- return -EINVAL;
- }
-
- /* Check reserved padding fields */
- for (i = 0; i < sizeof(info->padding); i++) {
- if (info->padding[i])
- return -EINVAL;
- }
-
- return 0;
-}
-
-int iommu_uapi_cache_invalidate(struct iommu_domain *domain, struct device *dev,
- void __user *uinfo)
-{
- struct iommu_cache_invalidate_info inv_info = { 0 };
- u32 minsz;
- int ret;
-
- if (unlikely(!domain->ops->cache_invalidate))
- return -ENODEV;
-
- /*
- * No new spaces can be added before the variable sized union, the
- * minimum size is the offset to the union.
- */
- minsz = offsetof(struct iommu_cache_invalidate_info, granu);
-
- /* Copy minsz from user to get flags and argsz */
- if (copy_from_user(&inv_info, uinfo, minsz))
- return -EFAULT;
-
- /* Fields before the variable size union are mandatory */
- if (inv_info.argsz < minsz)
- return -EINVAL;
-
- /* PASID and address granu require additional info beyond minsz */
- if (inv_info.granularity == IOMMU_INV_GRANU_PASID &&
- inv_info.argsz < offsetofend(struct iommu_cache_invalidate_info, granu.pasid_info))
- return -EINVAL;
-
- if (inv_info.granularity == IOMMU_INV_GRANU_ADDR &&
- inv_info.argsz < offsetofend(struct iommu_cache_invalidate_info, granu.addr_info))
- return -EINVAL;
-
- /*
- * User might be using a newer UAPI header which has a larger data
- * size, we shall support the existing flags within the current
- * size. Copy the remaining user data _after_ minsz but not more
- * than the current kernel supported size.
- */
- if (copy_from_user((void *)&inv_info + minsz, uinfo + minsz,
- min_t(u32, inv_info.argsz, sizeof(inv_info)) - minsz))
- return -EFAULT;
-
- /* Now the argsz is validated, check the content */
- ret = iommu_check_cache_invl_data(&inv_info);
- if (ret)
- return ret;
-
- return domain->ops->cache_invalidate(domain, dev, &inv_info);
-}
-EXPORT_SYMBOL_GPL(iommu_uapi_cache_invalidate);
-
-static int iommu_check_bind_data(struct iommu_gpasid_bind_data *data)
-{
- u64 mask;
- int i;
-
- if (data->version != IOMMU_GPASID_BIND_VERSION_1)
- return -EINVAL;
-
- /* Check the range of supported formats */
- if (data->format >= IOMMU_PASID_FORMAT_LAST)
- return -EINVAL;
-
- /* Check all flags */
- mask = IOMMU_SVA_GPASID_VAL;
- if (data->flags & ~mask)
- return -EINVAL;
-
- /* Check reserved padding fields */
- for (i = 0; i < sizeof(data->padding); i++) {
- if (data->padding[i])
- return -EINVAL;
- }
-
- return 0;
-}
-
-static int iommu_sva_prepare_bind_data(void __user *udata,
- struct iommu_gpasid_bind_data *data)
-{
- u32 minsz;
-
- /*
- * No new spaces can be added before the variable sized union, the
- * minimum size is the offset to the union.
- */
- minsz = offsetof(struct iommu_gpasid_bind_data, vendor);
-
- /* Copy minsz from user to get flags and argsz */
- if (copy_from_user(data, udata, minsz))
- return -EFAULT;
-
- /* Fields before the variable size union are mandatory */
- if (data->argsz < minsz)
- return -EINVAL;
- /*
- * User might be using a newer UAPI header, we shall let IOMMU vendor
- * driver decide on what size it needs. Since the guest PASID bind data
- * can be vendor specific, larger argsz could be the result of extension
- * for one vendor but it should not affect another vendor.
- * Copy the remaining user data _after_ minsz
- */
- if (copy_from_user((void *)data + minsz, udata + minsz,
- min_t(u32, data->argsz, sizeof(*data)) - minsz))
- return -EFAULT;
-
- return iommu_check_bind_data(data);
-}
-
-int iommu_uapi_sva_bind_gpasid(struct iommu_domain *domain, struct device *dev,
- void __user *udata)
-{
- struct iommu_gpasid_bind_data data = { 0 };
- int ret;
-
- if (unlikely(!domain->ops->sva_bind_gpasid))
- return -ENODEV;
-
- ret = iommu_sva_prepare_bind_data(udata, &data);
- if (ret)
- return ret;
-
- return domain->ops->sva_bind_gpasid(domain, dev, &data);
-}
-EXPORT_SYMBOL_GPL(iommu_uapi_sva_bind_gpasid);
-
-int iommu_sva_unbind_gpasid(struct iommu_domain *domain, struct device *dev,
- ioasid_t pasid)
-{
- if (unlikely(!domain->ops->sva_unbind_gpasid))
- return -ENODEV;
-
- return domain->ops->sva_unbind_gpasid(dev, pasid);
-}
-EXPORT_SYMBOL_GPL(iommu_sva_unbind_gpasid);
-
-int iommu_uapi_sva_unbind_gpasid(struct iommu_domain *domain, struct device *dev,
- void __user *udata)
-{
- struct iommu_gpasid_bind_data data = { 0 };
- int ret;
-
- if (unlikely(!domain->ops->sva_bind_gpasid))
- return -ENODEV;
-
- ret = iommu_sva_prepare_bind_data(udata, &data);
- if (ret)
- return ret;
-
- return iommu_sva_unbind_gpasid(domain, dev, data.hpasid);
-}
-EXPORT_SYMBOL_GPL(iommu_uapi_sva_unbind_gpasid);
-
static void __iommu_detach_device(struct iommu_domain *domain,
struct device *dev)
{
- if (iommu_is_attach_deferred(domain, dev))
+ if (iommu_is_attach_deferred(dev))
return;
if (unlikely(domain->ops->detach_dev == NULL))
@@ -2458,7 +2243,7 @@ static int __iommu_map_pages(struct iommu_domain *domain, unsigned long iova,
phys_addr_t paddr, size_t size, int prot,
gfp_t gfp, size_t *mapped)
{
- const struct iommu_ops *ops = domain->ops;
+ const struct iommu_domain_ops *ops = domain->ops;
size_t pgsize, count;
int ret;
@@ -2481,7 +2266,7 @@ static int __iommu_map_pages(struct iommu_domain *domain, unsigned long iova,
static int __iommu_map(struct iommu_domain *domain, unsigned long iova,
phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
{
- const struct iommu_ops *ops = domain->ops;
+ const struct iommu_domain_ops *ops = domain->ops;
unsigned long orig_iova = iova;
unsigned int min_pagesz;
size_t orig_size = size;
@@ -2541,7 +2326,7 @@ static int __iommu_map(struct iommu_domain *domain, unsigned long iova,
static int _iommu_map(struct iommu_domain *domain, unsigned long iova,
phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
{
- const struct iommu_ops *ops = domain->ops;
+ const struct iommu_domain_ops *ops = domain->ops;
int ret;
ret = __iommu_map(domain, iova, paddr, size, prot, gfp);
@@ -2570,7 +2355,7 @@ static size_t __iommu_unmap_pages(struct iommu_domain *domain,
unsigned long iova, size_t size,
struct iommu_iotlb_gather *iotlb_gather)
{
- const struct iommu_ops *ops = domain->ops;
+ const struct iommu_domain_ops *ops = domain->ops;
size_t pgsize, count;
pgsize = iommu_pgsize(domain, iova, iova, size, &count);
@@ -2583,7 +2368,7 @@ static size_t __iommu_unmap(struct iommu_domain *domain,
unsigned long iova, size_t size,
struct iommu_iotlb_gather *iotlb_gather)
{
- const struct iommu_ops *ops = domain->ops;
+ const struct iommu_domain_ops *ops = domain->ops;
size_t unmapped_page, unmapped = 0;
unsigned long orig_iova = iova;
unsigned int min_pagesz;
@@ -2659,7 +2444,7 @@ static ssize_t __iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
struct scatterlist *sg, unsigned int nents, int prot,
gfp_t gfp)
{
- const struct iommu_ops *ops = domain->ops;
+ const struct iommu_domain_ops *ops = domain->ops;
size_t len = 0, mapped = 0;
phys_addr_t start;
unsigned int i = 0;
@@ -2792,17 +2577,17 @@ EXPORT_SYMBOL_GPL(iommu_set_pgtable_quirks);
void iommu_get_resv_regions(struct device *dev, struct list_head *list)
{
- const struct iommu_ops *ops = dev->bus->iommu_ops;
+ const struct iommu_ops *ops = dev_iommu_ops(dev);
- if (ops && ops->get_resv_regions)
+ if (ops->get_resv_regions)
ops->get_resv_regions(dev, list);
}
void iommu_put_resv_regions(struct device *dev, struct list_head *list)
{
- const struct iommu_ops *ops = dev->bus->iommu_ops;
+ const struct iommu_ops *ops = dev_iommu_ops(dev);
- if (ops && ops->put_resv_regions)
+ if (ops->put_resv_regions)
ops->put_resv_regions(dev, list);
}
@@ -2959,8 +2744,6 @@ EXPORT_SYMBOL_GPL(iommu_dev_enable_feature);
/*
* The device drivers should do the necessary cleanups before calling this.
- * For example, before disabling the aux-domain feature, the device driver
- * should detach all aux-domains. Otherwise, this will return -EBUSY.
*/
int iommu_dev_disable_feature(struct device *dev, enum iommu_dev_features feat)
{
@@ -2988,50 +2771,6 @@ bool iommu_dev_feature_enabled(struct device *dev, enum iommu_dev_features feat)
}
EXPORT_SYMBOL_GPL(iommu_dev_feature_enabled);
-/*
- * Aux-domain specific attach/detach.
- *
- * Only works if iommu_dev_feature_enabled(dev, IOMMU_DEV_FEAT_AUX) returns
- * true. Also, as long as domains are attached to a device through this
- * interface, any tries to call iommu_attach_device() should fail
- * (iommu_detach_device() can't fail, so we fail when trying to re-attach).
- * This should make us safe against a device being attached to a guest as a
- * whole while there are still pasid users on it (aux and sva).
- */
-int iommu_aux_attach_device(struct iommu_domain *domain, struct device *dev)
-{
- int ret = -ENODEV;
-
- if (domain->ops->aux_attach_dev)
- ret = domain->ops->aux_attach_dev(domain, dev);
-
- if (!ret)
- trace_attach_device_to_domain(dev);
-
- return ret;
-}
-EXPORT_SYMBOL_GPL(iommu_aux_attach_device);
-
-void iommu_aux_detach_device(struct iommu_domain *domain, struct device *dev)
-{
- if (domain->ops->aux_detach_dev) {
- domain->ops->aux_detach_dev(domain, dev);
- trace_detach_device_from_domain(dev);
- }
-}
-EXPORT_SYMBOL_GPL(iommu_aux_detach_device);
-
-int iommu_aux_get_pasid(struct iommu_domain *domain, struct device *dev)
-{
- int ret = -ENODEV;
-
- if (domain->ops->aux_get_pasid)
- ret = domain->ops->aux_get_pasid(domain, dev);
-
- return ret;
-}
-EXPORT_SYMBOL_GPL(iommu_aux_get_pasid);
-
/**
* iommu_sva_bind_device() - Bind a process address space to a device
* @dev: the device
@@ -3053,9 +2792,9 @@ iommu_sva_bind_device(struct device *dev, struct mm_struct *mm, void *drvdata)
{
struct iommu_group *group;
struct iommu_sva *handle = ERR_PTR(-EINVAL);
- const struct iommu_ops *ops = dev->bus->iommu_ops;
+ const struct iommu_ops *ops = dev_iommu_ops(dev);
- if (!ops || !ops->sva_bind)
+ if (!ops->sva_bind)
return ERR_PTR(-ENODEV);
group = iommu_group_get(dev);
@@ -3096,9 +2835,9 @@ void iommu_sva_unbind_device(struct iommu_sva *handle)
{
struct iommu_group *group;
struct device *dev = handle->dev;
- const struct iommu_ops *ops = dev->bus->iommu_ops;
+ const struct iommu_ops *ops = dev_iommu_ops(dev);
- if (!ops || !ops->sva_unbind)
+ if (!ops->sva_unbind)
return;
group = iommu_group_get(dev);
@@ -3115,9 +2854,9 @@ EXPORT_SYMBOL_GPL(iommu_sva_unbind_device);
u32 iommu_sva_get_pasid(struct iommu_sva *handle)
{
- const struct iommu_ops *ops = handle->dev->bus->iommu_ops;
+ const struct iommu_ops *ops = dev_iommu_ops(handle->dev);
- if (!ops || !ops->sva_get_pasid)
+ if (!ops->sva_get_pasid)
return IOMMU_PASID_INVALID;
return ops->sva_get_pasid(handle);