summaryrefslogtreecommitdiffstats
path: root/drivers/xen
diff options
context:
space:
mode:
authorOleksandr Tyshchenko <oleksandr_tyshchenko@epam.com>2022-10-05 20:48:23 +0300
committerJuergen Gross <jgross@suse.com>2022-10-06 08:30:13 +0200
commit77be00f194b6e1647cddb644b7023b352c2c6ee8 (patch)
tree3237742d79ae30d18df367d2d3ce788c528f800c /drivers/xen
parente433715b116553892ecad8796018ae4b64304252 (diff)
downloadlinux-77be00f194b6e1647cddb644b7023b352c2c6ee8.tar.bz2
xen/virtio: Fix potential deadlock when accessing xen_grant_dma_devices
As find_xen_grant_dma_data() is called from both interrupt and process contexts, the access to xen_grant_dma_devices XArray must be protected by xa_lock_irqsave to avoid deadlock scenario. As XArray API doesn't provide xa_store_irqsave helper, call lockless __xa_store directly and guard it externally. Also move the storage of the XArray's entry to a separate helper. Fixes: d6aca3504c7d ("xen/grant-dma-ops: Add option to restrict memory access under Xen") Signed-off-by: Oleksandr Tyshchenko <oleksandr_tyshchenko@epam.com> Reviewed-by: Stefano Stabellini <sstabellini@kernel.org> Reviewed-by: Juergen Gross <jgross@suse.com> Link: https://lore.kernel.org/r/20221005174823.1800761-3-olekstysh@gmail.com Signed-off-by: Juergen Gross <jgross@suse.com>
Diffstat (limited to 'drivers/xen')
-rw-r--r--drivers/xen/grant-dma-ops.c24
1 files changed, 19 insertions, 5 deletions
diff --git a/drivers/xen/grant-dma-ops.c b/drivers/xen/grant-dma-ops.c
index 1998d0e8ce82..c66f56d24013 100644
--- a/drivers/xen/grant-dma-ops.c
+++ b/drivers/xen/grant-dma-ops.c
@@ -25,7 +25,7 @@ struct xen_grant_dma_data {
bool broken;
};
-static DEFINE_XARRAY(xen_grant_dma_devices);
+static DEFINE_XARRAY_FLAGS(xen_grant_dma_devices, XA_FLAGS_LOCK_IRQ);
#define XEN_GRANT_DMA_ADDR_OFF (1ULL << 63)
@@ -42,14 +42,29 @@ static inline grant_ref_t dma_to_grant(dma_addr_t dma)
static struct xen_grant_dma_data *find_xen_grant_dma_data(struct device *dev)
{
struct xen_grant_dma_data *data;
+ unsigned long flags;
- xa_lock(&xen_grant_dma_devices);
+ xa_lock_irqsave(&xen_grant_dma_devices, flags);
data = xa_load(&xen_grant_dma_devices, (unsigned long)dev);
- xa_unlock(&xen_grant_dma_devices);
+ xa_unlock_irqrestore(&xen_grant_dma_devices, flags);
return data;
}
+static int store_xen_grant_dma_data(struct device *dev,
+ struct xen_grant_dma_data *data)
+{
+ unsigned long flags;
+ int ret;
+
+ xa_lock_irqsave(&xen_grant_dma_devices, flags);
+ ret = xa_err(__xa_store(&xen_grant_dma_devices, (unsigned long)dev, data,
+ GFP_ATOMIC));
+ xa_unlock_irqrestore(&xen_grant_dma_devices, flags);
+
+ return ret;
+}
+
/*
* DMA ops for Xen frontends (e.g. virtio).
*
@@ -338,8 +353,7 @@ void xen_grant_setup_dma_ops(struct device *dev)
*/
data->backend_domid = iommu_spec.args[0];
- if (xa_err(xa_store(&xen_grant_dma_devices, (unsigned long)dev, data,
- GFP_KERNEL))) {
+ if (store_xen_grant_dma_data(dev, data)) {
dev_err(dev, "Cannot store Xen grant DMA data\n");
goto err;
}