summaryrefslogtreecommitdiffstats
path: root/drivers/dma
diff options
context:
space:
mode:
authorDave Airlie <airlied@redhat.com>2020-07-24 08:28:09 +1000
committerDave Airlie <airlied@redhat.com>2020-07-24 08:48:05 +1000
commit41206a073ceebc517245677a19f52ba6379b33a9 (patch)
tree2fc35aac6abe32b99058ad55b0fc6e4d449d1056 /drivers/dma
parent206739119508d5ab4b42ab480ff61a7e6cd72d7c (diff)
parentba47d845d715a010f7b51f6f89bae32845e6acb7 (diff)
downloadlinux-41206a073ceebc517245677a19f52ba6379b33a9.tar.bz2
Merge v5.8-rc6 into drm-next
I've got a silent conflict + two trees based on fixes to merge. Fixes a silent merge with amdgpu Signed-off-by: Dave Airlie <airlied@redhat.com>
Diffstat (limited to 'drivers/dma')
-rw-r--r--drivers/dma/dmatest.c2
-rw-r--r--drivers/dma/dw/core.c12
-rw-r--r--drivers/dma/fsl-edma-common.c28
-rw-r--r--drivers/dma/fsl-edma-common.h2
-rw-r--r--drivers/dma/fsl-edma.c7
-rw-r--r--drivers/dma/idxd/cdev.c19
-rw-r--r--drivers/dma/idxd/device.c25
-rw-r--r--drivers/dma/idxd/idxd.h1
-rw-r--r--drivers/dma/idxd/irq.c3
-rw-r--r--drivers/dma/idxd/sysfs.c5
-rw-r--r--drivers/dma/imx-sdma.c11
-rw-r--r--drivers/dma/ioat/dma.c12
-rw-r--r--drivers/dma/ioat/dma.h2
-rw-r--r--drivers/dma/mcf-edma.c7
-rw-r--r--drivers/dma/sh/usb-dmac.c2
-rw-r--r--drivers/dma/tegra210-adma.c5
-rw-r--r--drivers/dma/ti/k3-udma-private.c1
-rw-r--r--drivers/dma/ti/k3-udma.c39
18 files changed, 124 insertions, 59 deletions
diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c
index b175229a4b01..604f80357931 100644
--- a/drivers/dma/dmatest.c
+++ b/drivers/dma/dmatest.c
@@ -1176,6 +1176,8 @@ static int dmatest_run_set(const char *val, const struct kernel_param *kp)
} else if (dmatest_run) {
if (!is_threaded_test_pending(info)) {
pr_info("No channels configured, continue with any\n");
+ if (!is_threaded_test_run(info))
+ stop_threaded_test(info);
add_threaded_test(info);
}
start_threaded_tests(info);
diff --git a/drivers/dma/dw/core.c b/drivers/dma/dw/core.c
index 21cb2a58dbd2..a1b56f52db2f 100644
--- a/drivers/dma/dw/core.c
+++ b/drivers/dma/dw/core.c
@@ -118,16 +118,11 @@ static void dwc_initialize(struct dw_dma_chan *dwc)
{
struct dw_dma *dw = to_dw_dma(dwc->chan.device);
- if (test_bit(DW_DMA_IS_INITIALIZED, &dwc->flags))
- return;
-
dw->initialize_chan(dwc);
/* Enable interrupts */
channel_set_bit(dw, MASK.XFER, dwc->mask);
channel_set_bit(dw, MASK.ERROR, dwc->mask);
-
- set_bit(DW_DMA_IS_INITIALIZED, &dwc->flags);
}
/*----------------------------------------------------------------------*/
@@ -954,8 +949,6 @@ static void dwc_issue_pending(struct dma_chan *chan)
void do_dw_dma_off(struct dw_dma *dw)
{
- unsigned int i;
-
dma_writel(dw, CFG, 0);
channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask);
@@ -966,9 +959,6 @@ void do_dw_dma_off(struct dw_dma *dw)
while (dma_readl(dw, CFG) & DW_CFG_DMA_EN)
cpu_relax();
-
- for (i = 0; i < dw->dma.chancnt; i++)
- clear_bit(DW_DMA_IS_INITIALIZED, &dw->chan[i].flags);
}
void do_dw_dma_on(struct dw_dma *dw)
@@ -1032,8 +1022,6 @@ static void dwc_free_chan_resources(struct dma_chan *chan)
/* Clear custom channel configuration */
memset(&dwc->dws, 0, sizeof(struct dw_dma_slave));
- clear_bit(DW_DMA_IS_INITIALIZED, &dwc->flags);
-
/* Disable interrupts */
channel_clear_bit(dw, MASK.XFER, dwc->mask);
channel_clear_bit(dw, MASK.BLOCK, dwc->mask);
diff --git a/drivers/dma/fsl-edma-common.c b/drivers/dma/fsl-edma-common.c
index 5697c3622699..930ae268c497 100644
--- a/drivers/dma/fsl-edma-common.c
+++ b/drivers/dma/fsl-edma-common.c
@@ -352,26 +352,28 @@ static void fsl_edma_set_tcd_regs(struct fsl_edma_chan *fsl_chan,
/*
* TCD parameters are stored in struct fsl_edma_hw_tcd in little
* endian format. However, we need to load the TCD registers in
- * big- or little-endian obeying the eDMA engine model endian.
+ * big- or little-endian obeying the eDMA engine model endian,
+ * and this is performed from specific edma_write functions
*/
edma_writew(edma, 0, &regs->tcd[ch].csr);
- edma_writel(edma, le32_to_cpu(tcd->saddr), &regs->tcd[ch].saddr);
- edma_writel(edma, le32_to_cpu(tcd->daddr), &regs->tcd[ch].daddr);
- edma_writew(edma, le16_to_cpu(tcd->attr), &regs->tcd[ch].attr);
- edma_writew(edma, le16_to_cpu(tcd->soff), &regs->tcd[ch].soff);
+ edma_writel(edma, (s32)tcd->saddr, &regs->tcd[ch].saddr);
+ edma_writel(edma, (s32)tcd->daddr, &regs->tcd[ch].daddr);
- edma_writel(edma, le32_to_cpu(tcd->nbytes), &regs->tcd[ch].nbytes);
- edma_writel(edma, le32_to_cpu(tcd->slast), &regs->tcd[ch].slast);
+ edma_writew(edma, (s16)tcd->attr, &regs->tcd[ch].attr);
+ edma_writew(edma, tcd->soff, &regs->tcd[ch].soff);
- edma_writew(edma, le16_to_cpu(tcd->citer), &regs->tcd[ch].citer);
- edma_writew(edma, le16_to_cpu(tcd->biter), &regs->tcd[ch].biter);
- edma_writew(edma, le16_to_cpu(tcd->doff), &regs->tcd[ch].doff);
+ edma_writel(edma, (s32)tcd->nbytes, &regs->tcd[ch].nbytes);
+ edma_writel(edma, (s32)tcd->slast, &regs->tcd[ch].slast);
- edma_writel(edma, le32_to_cpu(tcd->dlast_sga),
+ edma_writew(edma, (s16)tcd->citer, &regs->tcd[ch].citer);
+ edma_writew(edma, (s16)tcd->biter, &regs->tcd[ch].biter);
+ edma_writew(edma, (s16)tcd->doff, &regs->tcd[ch].doff);
+
+ edma_writel(edma, (s32)tcd->dlast_sga,
&regs->tcd[ch].dlast_sga);
- edma_writew(edma, le16_to_cpu(tcd->csr), &regs->tcd[ch].csr);
+ edma_writew(edma, (s16)tcd->csr, &regs->tcd[ch].csr);
}
static inline
@@ -589,6 +591,8 @@ void fsl_edma_xfer_desc(struct fsl_edma_chan *fsl_chan)
{
struct virt_dma_desc *vdesc;
+ lockdep_assert_held(&fsl_chan->vchan.lock);
+
vdesc = vchan_next_desc(&fsl_chan->vchan);
if (!vdesc)
return;
diff --git a/drivers/dma/fsl-edma-common.h b/drivers/dma/fsl-edma-common.h
index 67e422590c9a..ec1169741de1 100644
--- a/drivers/dma/fsl-edma-common.h
+++ b/drivers/dma/fsl-edma-common.h
@@ -33,7 +33,7 @@
#define EDMA_TCD_ATTR_DSIZE_16BIT BIT(0)
#define EDMA_TCD_ATTR_DSIZE_32BIT BIT(1)
#define EDMA_TCD_ATTR_DSIZE_64BIT (BIT(0) | BIT(1))
-#define EDMA_TCD_ATTR_DSIZE_32BYTE (BIT(3) | BIT(0))
+#define EDMA_TCD_ATTR_DSIZE_32BYTE (BIT(2) | BIT(0))
#define EDMA_TCD_ATTR_SSIZE_8BIT 0
#define EDMA_TCD_ATTR_SSIZE_16BIT (EDMA_TCD_ATTR_DSIZE_16BIT << 8)
#define EDMA_TCD_ATTR_SSIZE_32BIT (EDMA_TCD_ATTR_DSIZE_32BIT << 8)
diff --git a/drivers/dma/fsl-edma.c b/drivers/dma/fsl-edma.c
index eff7ebd8cf35..90bb72af306c 100644
--- a/drivers/dma/fsl-edma.c
+++ b/drivers/dma/fsl-edma.c
@@ -45,6 +45,13 @@ static irqreturn_t fsl_edma_tx_handler(int irq, void *dev_id)
fsl_chan = &fsl_edma->chans[ch];
spin_lock(&fsl_chan->vchan.lock);
+
+ if (!fsl_chan->edesc) {
+ /* terminate_all called before */
+ spin_unlock(&fsl_chan->vchan.lock);
+ continue;
+ }
+
if (!fsl_chan->edesc->iscyclic) {
list_del(&fsl_chan->edesc->vdesc.node);
vchan_cookie_complete(&fsl_chan->edesc->vdesc);
diff --git a/drivers/dma/idxd/cdev.c b/drivers/dma/idxd/cdev.c
index ff49847e37a8..cb376cf6a2d2 100644
--- a/drivers/dma/idxd/cdev.c
+++ b/drivers/dma/idxd/cdev.c
@@ -74,6 +74,7 @@ static int idxd_cdev_open(struct inode *inode, struct file *filp)
struct idxd_device *idxd;
struct idxd_wq *wq;
struct device *dev;
+ int rc = 0;
wq = inode_wq(inode);
idxd = wq->idxd;
@@ -81,17 +82,27 @@ static int idxd_cdev_open(struct inode *inode, struct file *filp)
dev_dbg(dev, "%s called: %d\n", __func__, idxd_wq_refcount(wq));
- if (idxd_wq_refcount(wq) > 0 && wq_dedicated(wq))
- return -EBUSY;
-
ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
if (!ctx)
return -ENOMEM;
+ mutex_lock(&wq->wq_lock);
+
+ if (idxd_wq_refcount(wq) > 0 && wq_dedicated(wq)) {
+ rc = -EBUSY;
+ goto failed;
+ }
+
ctx->wq = wq;
filp->private_data = ctx;
idxd_wq_get(wq);
+ mutex_unlock(&wq->wq_lock);
return 0;
+
+ failed:
+ mutex_unlock(&wq->wq_lock);
+ kfree(ctx);
+ return rc;
}
static int idxd_cdev_release(struct inode *node, struct file *filep)
@@ -105,7 +116,9 @@ static int idxd_cdev_release(struct inode *node, struct file *filep)
filep->private_data = NULL;
kfree(ctx);
+ mutex_lock(&wq->wq_lock);
idxd_wq_put(wq);
+ mutex_unlock(&wq->wq_lock);
return 0;
}
diff --git a/drivers/dma/idxd/device.c b/drivers/dma/idxd/device.c
index 8d79a8787104..8d2718c585dc 100644
--- a/drivers/dma/idxd/device.c
+++ b/drivers/dma/idxd/device.c
@@ -320,6 +320,31 @@ void idxd_wq_unmap_portal(struct idxd_wq *wq)
devm_iounmap(dev, wq->dportal);
}
+void idxd_wq_disable_cleanup(struct idxd_wq *wq)
+{
+ struct idxd_device *idxd = wq->idxd;
+ struct device *dev = &idxd->pdev->dev;
+ int i, wq_offset;
+
+ lockdep_assert_held(&idxd->dev_lock);
+ memset(&wq->wqcfg, 0, sizeof(wq->wqcfg));
+ wq->type = IDXD_WQT_NONE;
+ wq->size = 0;
+ wq->group = NULL;
+ wq->threshold = 0;
+ wq->priority = 0;
+ clear_bit(WQ_FLAG_DEDICATED, &wq->flags);
+ memset(wq->name, 0, WQ_NAME_SIZE);
+
+ for (i = 0; i < 8; i++) {
+ wq_offset = idxd->wqcfg_offset + wq->id * 32 + i * sizeof(u32);
+ iowrite32(0, idxd->reg_base + wq_offset);
+ dev_dbg(dev, "WQ[%d][%d][%#x]: %#x\n",
+ wq->id, i, wq_offset,
+ ioread32(idxd->reg_base + wq_offset));
+ }
+}
+
/* Device control bits */
static inline bool idxd_is_enabled(struct idxd_device *idxd)
{
diff --git a/drivers/dma/idxd/idxd.h b/drivers/dma/idxd/idxd.h
index b8f8a363b4a7..908c8d0ef3ab 100644
--- a/drivers/dma/idxd/idxd.h
+++ b/drivers/dma/idxd/idxd.h
@@ -290,6 +290,7 @@ int idxd_wq_enable(struct idxd_wq *wq);
int idxd_wq_disable(struct idxd_wq *wq);
int idxd_wq_map_portal(struct idxd_wq *wq);
void idxd_wq_unmap_portal(struct idxd_wq *wq);
+void idxd_wq_disable_cleanup(struct idxd_wq *wq);
/* submission */
int idxd_submit_desc(struct idxd_wq *wq, struct idxd_desc *desc);
diff --git a/drivers/dma/idxd/irq.c b/drivers/dma/idxd/irq.c
index 6510791b9921..8a35f58da689 100644
--- a/drivers/dma/idxd/irq.c
+++ b/drivers/dma/idxd/irq.c
@@ -141,7 +141,7 @@ irqreturn_t idxd_misc_thread(int vec, void *data)
iowrite32(cause, idxd->reg_base + IDXD_INTCAUSE_OFFSET);
if (!err)
- return IRQ_HANDLED;
+ goto out;
gensts.bits = ioread32(idxd->reg_base + IDXD_GENSTATS_OFFSET);
if (gensts.state == IDXD_DEVICE_STATE_HALT) {
@@ -162,6 +162,7 @@ irqreturn_t idxd_misc_thread(int vec, void *data)
spin_unlock_bh(&idxd->dev_lock);
}
+ out:
idxd_unmask_msix_vector(idxd, irq_entry->id);
return IRQ_HANDLED;
}
diff --git a/drivers/dma/idxd/sysfs.c b/drivers/dma/idxd/sysfs.c
index 052dae5d6ddd..2e2c5082f322 100644
--- a/drivers/dma/idxd/sysfs.c
+++ b/drivers/dma/idxd/sysfs.c
@@ -315,6 +315,11 @@ static int idxd_config_bus_remove(struct device *dev)
idxd_unregister_dma_device(idxd);
spin_lock_irqsave(&idxd->dev_lock, flags);
rc = idxd_device_disable(idxd);
+ for (i = 0; i < idxd->max_wqs; i++) {
+ struct idxd_wq *wq = &idxd->wqs[i];
+
+ idxd_wq_disable_cleanup(wq);
+ }
spin_unlock_irqrestore(&idxd->dev_lock, flags);
module_put(THIS_MODULE);
if (rc < 0)
diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c
index 91774039ae5d..270992c4fe47 100644
--- a/drivers/dma/imx-sdma.c
+++ b/drivers/dma/imx-sdma.c
@@ -1331,8 +1331,7 @@ static void sdma_free_chan_resources(struct dma_chan *chan)
sdma_channel_synchronize(chan);
- if (sdmac->event_id0 >= 0)
- sdma_event_disable(sdmac, sdmac->event_id0);
+ sdma_event_disable(sdmac, sdmac->event_id0);
if (sdmac->event_id1)
sdma_event_disable(sdmac, sdmac->event_id1);
@@ -1632,11 +1631,9 @@ static int sdma_config(struct dma_chan *chan,
memcpy(&sdmac->slave_config, dmaengine_cfg, sizeof(*dmaengine_cfg));
/* Set ENBLn earlier to make sure dma request triggered after that */
- if (sdmac->event_id0 >= 0) {
- if (sdmac->event_id0 >= sdmac->sdma->drvdata->num_events)
- return -EINVAL;
- sdma_event_enable(sdmac, sdmac->event_id0);
- }
+ if (sdmac->event_id0 >= sdmac->sdma->drvdata->num_events)
+ return -EINVAL;
+ sdma_event_enable(sdmac, sdmac->event_id0);
if (sdmac->event_id1) {
if (sdmac->event_id1 >= sdmac->sdma->drvdata->num_events)
diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c
index 8ad0ad861c86..fd782aee02d9 100644
--- a/drivers/dma/ioat/dma.c
+++ b/drivers/dma/ioat/dma.c
@@ -26,6 +26,18 @@
#include "../dmaengine.h"
+int completion_timeout = 200;
+module_param(completion_timeout, int, 0644);
+MODULE_PARM_DESC(completion_timeout,
+ "set ioat completion timeout [msec] (default 200 [msec])");
+int idle_timeout = 2000;
+module_param(idle_timeout, int, 0644);
+MODULE_PARM_DESC(idle_timeout,
+ "set ioat idel timeout [msec] (default 2000 [msec])");
+
+#define IDLE_TIMEOUT msecs_to_jiffies(idle_timeout)
+#define COMPLETION_TIMEOUT msecs_to_jiffies(completion_timeout)
+
static char *chanerr_str[] = {
"DMA Transfer Source Address Error",
"DMA Transfer Destination Address Error",
diff --git a/drivers/dma/ioat/dma.h b/drivers/dma/ioat/dma.h
index e6b622e1ba92..f7f31fdf14cf 100644
--- a/drivers/dma/ioat/dma.h
+++ b/drivers/dma/ioat/dma.h
@@ -104,8 +104,6 @@ struct ioatdma_chan {
#define IOAT_RUN 5
#define IOAT_CHAN_ACTIVE 6
struct timer_list timer;
- #define COMPLETION_TIMEOUT msecs_to_jiffies(100)
- #define IDLE_TIMEOUT msecs_to_jiffies(2000)
#define RESET_DELAY msecs_to_jiffies(100)
struct ioatdma_device *ioat_dma;
dma_addr_t completion_dma;
diff --git a/drivers/dma/mcf-edma.c b/drivers/dma/mcf-edma.c
index e15bd15a9ef6..e12b754e6398 100644
--- a/drivers/dma/mcf-edma.c
+++ b/drivers/dma/mcf-edma.c
@@ -35,6 +35,13 @@ static irqreturn_t mcf_edma_tx_handler(int irq, void *dev_id)
mcf_chan = &mcf_edma->chans[ch];
spin_lock(&mcf_chan->vchan.lock);
+
+ if (!mcf_chan->edesc) {
+ /* terminate_all called before */
+ spin_unlock(&mcf_chan->vchan.lock);
+ continue;
+ }
+
if (!mcf_chan->edesc->iscyclic) {
list_del(&mcf_chan->edesc->vdesc.node);
vchan_cookie_complete(&mcf_chan->edesc->vdesc);
diff --git a/drivers/dma/sh/usb-dmac.c b/drivers/dma/sh/usb-dmac.c
index b218a013c260..8f7ceb698226 100644
--- a/drivers/dma/sh/usb-dmac.c
+++ b/drivers/dma/sh/usb-dmac.c
@@ -586,6 +586,8 @@ static void usb_dmac_isr_transfer_end(struct usb_dmac_chan *chan)
desc->residue = usb_dmac_get_current_residue(chan, desc,
desc->sg_index - 1);
desc->done_cookie = desc->vd.tx.cookie;
+ desc->vd.tx_result.result = DMA_TRANS_NOERROR;
+ desc->vd.tx_result.residue = desc->residue;
vchan_cookie_complete(&desc->vd);
/* Restart the next transfer if this driver has a next desc */
diff --git a/drivers/dma/tegra210-adma.c b/drivers/dma/tegra210-adma.c
index db58d7e4f9fe..c5fa2ef74abc 100644
--- a/drivers/dma/tegra210-adma.c
+++ b/drivers/dma/tegra210-adma.c
@@ -658,6 +658,7 @@ static int tegra_adma_alloc_chan_resources(struct dma_chan *dc)
ret = pm_runtime_get_sync(tdc2dev(tdc));
if (ret < 0) {
+ pm_runtime_put_noidle(tdc2dev(tdc));
free_irq(tdc->irq, tdc);
return ret;
}
@@ -869,8 +870,10 @@ static int tegra_adma_probe(struct platform_device *pdev)
pm_runtime_enable(&pdev->dev);
ret = pm_runtime_get_sync(&pdev->dev);
- if (ret < 0)
+ if (ret < 0) {
+ pm_runtime_put_noidle(&pdev->dev);
goto rpm_disable;
+ }
ret = tegra_adma_init(tdma);
if (ret)
diff --git a/drivers/dma/ti/k3-udma-private.c b/drivers/dma/ti/k3-udma-private.c
index 0b8f3dd6b146..77e8e67d995b 100644
--- a/drivers/dma/ti/k3-udma-private.c
+++ b/drivers/dma/ti/k3-udma-private.c
@@ -42,6 +42,7 @@ struct udma_dev *of_xudma_dev_get(struct device_node *np, const char *property)
ud = platform_get_drvdata(pdev);
if (!ud) {
pr_debug("UDMA has not been probed\n");
+ put_device(&pdev->dev);
return ERR_PTR(-EPROBE_DEFER);
}
diff --git a/drivers/dma/ti/k3-udma.c b/drivers/dma/ti/k3-udma.c
index c91e2dc1bb72..6c879a734360 100644
--- a/drivers/dma/ti/k3-udma.c
+++ b/drivers/dma/ti/k3-udma.c
@@ -1753,7 +1753,8 @@ static int udma_alloc_chan_resources(struct dma_chan *chan)
dev_err(ud->ddev.dev,
"Descriptor pool allocation failed\n");
uc->use_dma_pool = false;
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto err_cleanup;
}
}
@@ -1773,16 +1774,18 @@ static int udma_alloc_chan_resources(struct dma_chan *chan)
ret = udma_get_chan_pair(uc);
if (ret)
- return ret;
+ goto err_cleanup;
ret = udma_alloc_tx_resources(uc);
- if (ret)
- return ret;
+ if (ret) {
+ udma_put_rchan(uc);
+ goto err_cleanup;
+ }
ret = udma_alloc_rx_resources(uc);
if (ret) {
udma_free_tx_resources(uc);
- return ret;
+ goto err_cleanup;
}
uc->config.src_thread = ud->psil_base + uc->tchan->id;
@@ -1800,10 +1803,8 @@ static int udma_alloc_chan_resources(struct dma_chan *chan)
uc->id);
ret = udma_alloc_tx_resources(uc);
- if (ret) {
- uc->config.remote_thread_id = -1;
- return ret;
- }
+ if (ret)
+ goto err_cleanup;
uc->config.src_thread = ud->psil_base + uc->tchan->id;
uc->config.dst_thread = uc->config.remote_thread_id;
@@ -1820,10 +1821,8 @@ static int udma_alloc_chan_resources(struct dma_chan *chan)
uc->id);
ret = udma_alloc_rx_resources(uc);
- if (ret) {
- uc->config.remote_thread_id = -1;
- return ret;
- }
+ if (ret)
+ goto err_cleanup;
uc->config.src_thread = uc->config.remote_thread_id;
uc->config.dst_thread = (ud->psil_base + uc->rchan->id) |
@@ -1838,7 +1837,9 @@ static int udma_alloc_chan_resources(struct dma_chan *chan)
/* Can not happen */
dev_err(uc->ud->dev, "%s: chan%d invalid direction (%u)\n",
__func__, uc->id, uc->config.dir);
- return -EINVAL;
+ ret = -EINVAL;
+ goto err_cleanup;
+
}
/* check if the channel configuration was successful */
@@ -1847,7 +1848,7 @@ static int udma_alloc_chan_resources(struct dma_chan *chan)
if (udma_is_chan_running(uc)) {
dev_warn(ud->dev, "chan%d: is running!\n", uc->id);
- udma_stop(uc);
+ udma_reset_chan(uc, false);
if (udma_is_chan_running(uc)) {
dev_err(ud->dev, "chan%d: won't stop!\n", uc->id);
ret = -EBUSY;
@@ -1906,8 +1907,6 @@ static int udma_alloc_chan_resources(struct dma_chan *chan)
udma_reset_rings(uc);
- INIT_DELAYED_WORK_ONSTACK(&uc->tx_drain.work,
- udma_check_tx_completion);
return 0;
err_irq_free:
@@ -1919,7 +1918,7 @@ err_psi_free:
err_res_free:
udma_free_tx_resources(uc);
udma_free_rx_resources(uc);
-
+err_cleanup:
udma_reset_uchan(uc);
if (uc->use_dma_pool) {
@@ -3019,7 +3018,6 @@ static void udma_free_chan_resources(struct dma_chan *chan)
}
cancel_delayed_work_sync(&uc->tx_drain.work);
- destroy_delayed_work_on_stack(&uc->tx_drain.work);
if (uc->irq_num_ring > 0) {
free_irq(uc->irq_num_ring, uc);
@@ -3593,7 +3591,7 @@ static int udma_probe(struct platform_device *pdev)
return ret;
}
- ret = of_property_read_u32(navss_node, "ti,udma-atype", &ud->atype);
+ ret = of_property_read_u32(dev->of_node, "ti,udma-atype", &ud->atype);
if (!ret && ud->atype > 2) {
dev_err(dev, "Invalid atype: %u\n", ud->atype);
return -EINVAL;
@@ -3711,6 +3709,7 @@ static int udma_probe(struct platform_device *pdev)
tasklet_init(&uc->vc.task, udma_vchan_complete,
(unsigned long)&uc->vc);
init_completion(&uc->teardown_completed);
+ INIT_DELAYED_WORK(&uc->tx_drain.work, udma_check_tx_completion);
}
ret = dma_async_device_register(&ud->ddev);