diff options
author | Tudor Ambarus <tudor.ambarus@microchip.com> | 2020-01-23 14:03:17 +0000 |
---|---|---|
committer | Vinod Koul <vkoul@kernel.org> | 2020-02-25 11:27:27 +0530 |
commit | 191bd1cad3535a30c2931dc1757e260afe03854b (patch) | |
tree | 2c5237475f045f78ac744db8ada45c9be9f46f84 /drivers/dma | |
parent | 8592f2c81ebc494017d574fd8b731be44087a2e9 (diff) | |
download | linux-191bd1cad3535a30c2931dc1757e260afe03854b.tar.bz2 |
dmaengine: at_xdmac: Fix locking in tasklet
Tasklets run with all the interrupts enabled. This means that we should
replace all the (already present) spin_lock_irqsave() uses in the tasklet
with spin_lock_irq() to protect being interrupted by a IRQ which tries
to get the same lock (via calls to device_prep_dma_* for example).
spin_lock and spin_lock_bh in tasklets are not enough to protect from IRQs,
update these to spin_lock_irq().
at_xdmac_advance_work() can be called with all the interrupts enabled (when
called from tasklet), or with interrupts disabled (when called from
at_xdmac_issue_pending). Move the locking in the callers to be able to use
spin_lock_irq() and spin_lock_irqsave() for these cases.
Signed-off-by: Tudor Ambarus <tudor.ambarus@microchip.com>
Acked-by: Ludovic Desroches <ludovic.desroches@microchip.com>
Link: https://lore.kernel.org/r/20200123140237.125799-10-tudor.ambarus@microchip.com
Signed-off-by: Vinod Koul <vkoul@kernel.org>
Diffstat (limited to 'drivers/dma')
-rw-r--r-- | drivers/dma/at_xdmac.c | 23 |
1 files changed, 12 insertions, 11 deletions
diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c index 31321da69ae6..bb0eaf38b594 100644 --- a/drivers/dma/at_xdmac.c +++ b/drivers/dma/at_xdmac.c @@ -1543,9 +1543,6 @@ static void at_xdmac_remove_xfer(struct at_xdmac_chan *atchan, static void at_xdmac_advance_work(struct at_xdmac_chan *atchan) { struct at_xdmac_desc *desc; - unsigned long flags; - - spin_lock_irqsave(&atchan->lock, flags); /* * If channel is enabled, do nothing, advance_work will be triggered @@ -1559,8 +1556,6 @@ static void at_xdmac_advance_work(struct at_xdmac_chan *atchan) if (!desc->active_xfer) at_xdmac_start_xfer(atchan, desc); } - - spin_unlock_irqrestore(&atchan->lock, flags); } static void at_xdmac_handle_cyclic(struct at_xdmac_chan *atchan) @@ -1596,7 +1591,7 @@ static void at_xdmac_handle_error(struct at_xdmac_chan *atchan) if (atchan->irq_status & AT_XDMAC_CIS_ROIS) dev_err(chan2dev(&atchan->chan), "request overflow error!!!"); - spin_lock_bh(&atchan->lock); + spin_lock_irq(&atchan->lock); /* Channel must be disabled first as it's not done automatically */ at_xdmac_write(atxdmac, AT_XDMAC_GD, atchan->mask); @@ -1607,7 +1602,7 @@ static void at_xdmac_handle_error(struct at_xdmac_chan *atchan) struct at_xdmac_desc, xfer_node); - spin_unlock_bh(&atchan->lock); + spin_unlock_irq(&atchan->lock); /* Print bad descriptor's details if needed */ dev_dbg(chan2dev(&atchan->chan), @@ -1640,21 +1635,21 @@ static void at_xdmac_tasklet(unsigned long data) if (atchan->irq_status & error_mask) at_xdmac_handle_error(atchan); - spin_lock(&atchan->lock); + spin_lock_irq(&atchan->lock); desc = list_first_entry(&atchan->xfers_list, struct at_xdmac_desc, xfer_node); dev_vdbg(chan2dev(&atchan->chan), "%s: desc 0x%p\n", __func__, desc); if (!desc->active_xfer) { dev_err(chan2dev(&atchan->chan), "Xfer not active: exiting"); - spin_unlock(&atchan->lock); + spin_unlock_irq(&atchan->lock); return; } txd = &desc->tx_dma_desc; at_xdmac_remove_xfer(atchan, desc); - spin_unlock(&atchan->lock); + spin_unlock_irq(&atchan->lock); dma_cookie_complete(txd); if (txd->flags & DMA_PREP_INTERRUPT) @@ -1662,7 +1657,9 @@ static void at_xdmac_tasklet(unsigned long data) dma_run_dependencies(txd); + spin_lock_irq(&atchan->lock); at_xdmac_advance_work(atchan); + spin_unlock_irq(&atchan->lock); } } @@ -1723,11 +1720,15 @@ static irqreturn_t at_xdmac_interrupt(int irq, void *dev_id) static void at_xdmac_issue_pending(struct dma_chan *chan) { struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan); + unsigned long flags; dev_dbg(chan2dev(&atchan->chan), "%s\n", __func__); - if (!at_xdmac_chan_is_cyclic(atchan)) + if (!at_xdmac_chan_is_cyclic(atchan)) { + spin_lock_irqsave(&atchan->lock, flags); at_xdmac_advance_work(atchan); + spin_unlock_irqrestore(&atchan->lock, flags); + } return; } |