summaryrefslogtreecommitdiffstats
path: root/drivers/dma
diff options
context:
space:
mode:
authorPeter Ujfalusi <peter.ujfalusi@ti.com>2017-09-18 11:16:26 +0300
committerVinod Koul <vinod.koul@intel.com>2017-09-21 22:51:07 +0530
commit87a2f622cc6446c7d09ac655b7b9b04886f16a4c (patch)
treede4fa5bff5fac78e3de3465f70ae0e8584f18262 /drivers/dma
parent2bd6bf03f4c1c59381d62c61d03f6cc3fe71f66e (diff)
downloadlinux-87a2f622cc6446c7d09ac655b7b9b04886f16a4c.tar.bz2
dmaengine: edma: Align the memcpy acnt array size with the transfer
Memory to Memory transfers does not have any special alignment needs regarding to acnt array size, but if one of the areas are in memory mapped regions (like PCIe memory), we need to make sure that the acnt array size is aligned with the mem copy parameters. Before "dmaengine: edma: Optimize memcpy operation" change the memcpy was set up in a different way: acnt == number of bytes in a word based on __ffs((src | dest | len), bcnt and ccnt for looping the necessary number of words to comlete the trasnfer. Instead of reverting the commit we can fix it to make sure that the ACNT size is aligned to the traswnfer. Fixes: df6694f80365a (dmaengine: edma: Optimize memcpy operation) Signed-off-by: Peter Ujfalusi <peter.ujfalusi@ti.com> Cc: stable@vger.kernel.org Signed-off-by: Vinod Koul <vinod.koul@intel.com>
Diffstat (limited to 'drivers/dma')
-rw-r--r--drivers/dma/edma.c19
1 files changed, 16 insertions, 3 deletions
diff --git a/drivers/dma/edma.c b/drivers/dma/edma.c
index 3879f80a4815..a7ea20e7b8e9 100644
--- a/drivers/dma/edma.c
+++ b/drivers/dma/edma.c
@@ -1143,11 +1143,24 @@ static struct dma_async_tx_descriptor *edma_prep_dma_memcpy(
struct edma_desc *edesc;
struct device *dev = chan->device->dev;
struct edma_chan *echan = to_edma_chan(chan);
- unsigned int width, pset_len;
+ unsigned int width, pset_len, array_size;
if (unlikely(!echan || !len))
return NULL;
+ /* Align the array size (acnt block) with the transfer properties */
+ switch (__ffs((src | dest | len))) {
+ case 0:
+ array_size = SZ_32K - 1;
+ break;
+ case 1:
+ array_size = SZ_32K - 2;
+ break;
+ default:
+ array_size = SZ_32K - 4;
+ break;
+ }
+
if (len < SZ_64K) {
/*
* Transfer size less than 64K can be handled with one paRAM
@@ -1169,7 +1182,7 @@ static struct dma_async_tx_descriptor *edma_prep_dma_memcpy(
* When the full_length is multibple of 32767 one slot can be
* used to complete the transfer.
*/
- width = SZ_32K - 1;
+ width = array_size;
pset_len = rounddown(len, width);
/* One slot is enough for lengths multiple of (SZ_32K -1) */
if (unlikely(pset_len == len))
@@ -1217,7 +1230,7 @@ static struct dma_async_tx_descriptor *edma_prep_dma_memcpy(
}
dest += pset_len;
src += pset_len;
- pset_len = width = len % (SZ_32K - 1);
+ pset_len = width = len % array_size;
ret = edma_config_pset(chan, &edesc->pset[1], src, dest, 1,
width, pset_len, DMA_MEM_TO_MEM);