summaryrefslogtreecommitdiffstats
path: root/drivers/dma/stm32-dma.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2020-12-17 12:52:23 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2020-12-17 12:52:23 -0800
commit6daa90439e91bb9a71864b02f7d0af8587ea889a (patch)
treecd747a40f9aac87089d3d73de9d42c5863fe7896 /drivers/dma/stm32-dma.c
parent83005cd6bc76eef7bbf46b55bbb00ccc9534c38c (diff)
parent115ff12aecfd55376d704fa2c0a2d117e5827f9f (diff)
downloadlinux-6daa90439e91bb9a71864b02f7d0af8587ea889a.tar.bz2
Merge tag 'dmaengine-5.11-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/vkoul/dmaengine
Pull dmaengine updates from Vinod Koul: "The last dmaengine updates for this year :) This contains couple of new drivers, new device support and updates to bunch of drivers. New drivers/devices: - Qualcomm ADM driver - Qualcomm GPI driver - Allwinner A100 DMA support - Microchip Sama7g5 support - Mediatek MT8516 apdma Updates: - more updates to idxd driver and support for IAX config - runtime PM support for dw driver - TI drivers" * tag 'dmaengine-5.11-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/vkoul/dmaengine: (75 commits) soc: ti: k3-ringacc: Use correct error casting in k3_ringacc_dmarings_init dmaengine: ti: k3-udma-glue: Add support for K3 PKTDMA dmaengine: ti: k3-udma: Initial support for K3 PKTDMA dmaengine: ti: k3-udma: Add support for BCDMA channel TPL handling dmaengine: ti: k3-udma: Initial support for K3 BCDMA soc: ti: k3-ringacc: add AM64 DMA rings support. dmaengine: ti: Add support for k3 event routers dmaengine: ti: k3-psil: Add initial map for AM64 dmaengine: ti: k3-psil: Extend psil_endpoint_config for K3 PKTDMA dt-bindings: dma: ti: Add document for K3 PKTDMA dt-bindings: dma: ti: Add document for K3 BCDMA dmaengine: dmatest: Use dmaengine_get_dma_device dmaengine: doc: client: Update for dmaengine_get_dma_device() usage dmaengine: Add support for per channel coherency handling dmaengine: of-dma: Add support for optional router configuration callback dmaengine: ti: k3-udma-glue: Configure the dma_dev for rings dmaengine: ti: k3-udma-glue: Get the ringacc from udma_dev dmaengine: ti: k3-udma-glue: Add function to get device pointer for DMA API dmaengine: ti: k3-udma: Add support for second resource range from sysfw dmaengine: ti: k3-udma: Wait for peer teardown completion if supported ...
Diffstat (limited to 'drivers/dma/stm32-dma.c')
-rw-r--r--drivers/dma/stm32-dma.c47
1 files changed, 31 insertions, 16 deletions
diff --git a/drivers/dma/stm32-dma.c b/drivers/dma/stm32-dma.c
index d0055d2f0b9a..f54ecb123a52 100644
--- a/drivers/dma/stm32-dma.c
+++ b/drivers/dma/stm32-dma.c
@@ -264,9 +264,11 @@ static int stm32_dma_get_width(struct stm32_dma_chan *chan,
}
static enum dma_slave_buswidth stm32_dma_get_max_width(u32 buf_len,
+ dma_addr_t buf_addr,
u32 threshold)
{
enum dma_slave_buswidth max_width;
+ u64 addr = buf_addr;
if (threshold == STM32_DMA_FIFO_THRESHOLD_FULL)
max_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
@@ -277,6 +279,9 @@ static enum dma_slave_buswidth stm32_dma_get_max_width(u32 buf_len,
max_width > DMA_SLAVE_BUSWIDTH_1_BYTE)
max_width = max_width >> 1;
+ if (do_div(addr, max_width))
+ max_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
+
return max_width;
}
@@ -648,21 +653,12 @@ static irqreturn_t stm32_dma_chan_irq(int irq, void *devid)
scr = stm32_dma_read(dmadev, STM32_DMA_SCR(chan->id));
sfcr = stm32_dma_read(dmadev, STM32_DMA_SFCR(chan->id));
- if (status & STM32_DMA_TCI) {
- stm32_dma_irq_clear(chan, STM32_DMA_TCI);
- if (scr & STM32_DMA_SCR_TCIE)
- stm32_dma_handle_chan_done(chan);
- status &= ~STM32_DMA_TCI;
- }
- if (status & STM32_DMA_HTI) {
- stm32_dma_irq_clear(chan, STM32_DMA_HTI);
- status &= ~STM32_DMA_HTI;
- }
if (status & STM32_DMA_FEI) {
stm32_dma_irq_clear(chan, STM32_DMA_FEI);
status &= ~STM32_DMA_FEI;
if (sfcr & STM32_DMA_SFCR_FEIE) {
- if (!(scr & STM32_DMA_SCR_EN))
+ if (!(scr & STM32_DMA_SCR_EN) &&
+ !(status & STM32_DMA_TCI))
dev_err(chan2dev(chan), "FIFO Error\n");
else
dev_dbg(chan2dev(chan), "FIFO over/underrun\n");
@@ -674,6 +670,19 @@ static irqreturn_t stm32_dma_chan_irq(int irq, void *devid)
if (sfcr & STM32_DMA_SCR_DMEIE)
dev_dbg(chan2dev(chan), "Direct mode overrun\n");
}
+
+ if (status & STM32_DMA_TCI) {
+ stm32_dma_irq_clear(chan, STM32_DMA_TCI);
+ if (scr & STM32_DMA_SCR_TCIE)
+ stm32_dma_handle_chan_done(chan);
+ status &= ~STM32_DMA_TCI;
+ }
+
+ if (status & STM32_DMA_HTI) {
+ stm32_dma_irq_clear(chan, STM32_DMA_HTI);
+ status &= ~STM32_DMA_HTI;
+ }
+
if (status) {
stm32_dma_irq_clear(chan, status);
dev_err(chan2dev(chan), "DMA error: status=0x%08x\n", status);
@@ -703,7 +712,7 @@ static void stm32_dma_issue_pending(struct dma_chan *c)
static int stm32_dma_set_xfer_param(struct stm32_dma_chan *chan,
enum dma_transfer_direction direction,
enum dma_slave_buswidth *buswidth,
- u32 buf_len)
+ u32 buf_len, dma_addr_t buf_addr)
{
enum dma_slave_buswidth src_addr_width, dst_addr_width;
int src_bus_width, dst_bus_width;
@@ -735,7 +744,8 @@ static int stm32_dma_set_xfer_param(struct stm32_dma_chan *chan,
return dst_burst_size;
/* Set memory data size */
- src_addr_width = stm32_dma_get_max_width(buf_len, fifoth);
+ src_addr_width = stm32_dma_get_max_width(buf_len, buf_addr,
+ fifoth);
chan->mem_width = src_addr_width;
src_bus_width = stm32_dma_get_width(chan, src_addr_width);
if (src_bus_width < 0)
@@ -784,7 +794,8 @@ static int stm32_dma_set_xfer_param(struct stm32_dma_chan *chan,
return src_burst_size;
/* Set memory data size */
- dst_addr_width = stm32_dma_get_max_width(buf_len, fifoth);
+ dst_addr_width = stm32_dma_get_max_width(buf_len, buf_addr,
+ fifoth);
chan->mem_width = dst_addr_width;
dst_bus_width = stm32_dma_get_width(chan, dst_addr_width);
if (dst_bus_width < 0)
@@ -872,7 +883,8 @@ static struct dma_async_tx_descriptor *stm32_dma_prep_slave_sg(
for_each_sg(sgl, sg, sg_len, i) {
ret = stm32_dma_set_xfer_param(chan, direction, &buswidth,
- sg_dma_len(sg));
+ sg_dma_len(sg),
+ sg_dma_address(sg));
if (ret < 0)
goto err;
@@ -940,7 +952,8 @@ static struct dma_async_tx_descriptor *stm32_dma_prep_dma_cyclic(
return NULL;
}
- ret = stm32_dma_set_xfer_param(chan, direction, &buswidth, period_len);
+ ret = stm32_dma_set_xfer_param(chan, direction, &buswidth, period_len,
+ buf_addr);
if (ret < 0)
return NULL;
@@ -1216,6 +1229,8 @@ static void stm32_dma_free_chan_resources(struct dma_chan *c)
pm_runtime_put(dmadev->ddev.dev);
vchan_free_chan_resources(to_virt_chan(c));
+ stm32_dma_clear_reg(&chan->chan_reg);
+ chan->threshold = 0;
}
static void stm32_dma_desc_free(struct virt_dma_desc *vdesc)