summaryrefslogtreecommitdiffstats
path: root/drivers/dma/at_hdmac.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2015-09-04 11:10:18 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2015-09-04 11:10:18 -0700
commit352712274507645b6f82b8763977ad87321919a3 (patch)
tree62ec7872a91e8e984f557f96496f2a369631a068 /drivers/dma/at_hdmac.c
parent88a99886c26fec8bf662e7b6bc080431a8660326 (diff)
parentab98193dace971f4742eebb5103212e23bb392f5 (diff)
downloadlinux-352712274507645b6f82b8763977ad87321919a3.tar.bz2
Merge tag 'dmaengine-4.3-rc1' of git://git.infradead.org/users/vkoul/slave-dma
Pull dmaengine updates from Vinod Koul: "This time we have aded a new capability for scatter-gathered memset using dmaengine APIs. This is supported in xdmac & hdmac drivers We have added support for reusing descriptors for examples like video buffers etc. Driver will follow The behaviour of descriptor ack has been clarified and documented New devices added are: - dma controller in sun[457]i SoCs - lpc18xx dmamux - ZTE ZX296702 dma controller - Analog Devices AXI-DMAC DMA controller - eDMA support for dma-crossbar - imx6sx support in imx-sdma driver - imx-sdma device to device support Other: - jz4780 fixes - ioatdma large refactor and cleanup for removal of ioat v1 and v2 which is deprecated and fixes - ACPI support in X-Gene DMA engine driver - ipu irq fixes - mvxor fixes - minor fixes spread thru drivers" [ The Kconfig and Makefile entries got re-sorted alphabetically, and I handled the conflict with the new Intel integrated IDMA driver by slightly mis-sorting it on purpose: "IDMA64" got sorted after "IMX" in order to keep the Intel entries together. I think it might be a good idea to just rename the IDMA64 config entry to INTEL_IDMA64 to make the sorting be a true sort, not this mismash. Also, this merge disables the COMPILE_TEST for the sun4i DMA controller, because it does not compile cleanly at all. - Linus ] * tag 'dmaengine-4.3-rc1' of git://git.infradead.org/users/vkoul/slave-dma: (89 commits) dmaengine: ioatdma: add Broadwell EP ioatdma PCI dev IDs dmaengine :ipu: change ipu_irq_handler() to remove compile warning dmaengine: ioatdma: Fix variable array length dmaengine: ioatdma: fix sparse "error" with prep lock dmaengine: hdmac: Add memset capabilities dmaengine: sort the sh Makefile dmaengine: sort the sh Kconfig dmaengine: sort the dw Kconfig dmaengine: sort the Kconfig dmaengine: sort the makefile drivers/dma: make mv_xor.c driver explicitly non-modular dmaengine: Add support for the Analog Devices AXI-DMAC DMA controller devicetree: Add bindings documentation for Analog Devices AXI-DMAC dmaengine: xgene-dma: Fix the lock to allow client for further submission of requests dmaengine: ioatdma: fix coccinelle warning dmaengine: ioatdma: fix zero day warning on incompatible pointer type dmaengine: tegra-apb: Simplify locking for device using global pause dmaengine: tegra-apb: Remove unnecessary return statements and variables dmaengine: tegra-apb: Avoid unnecessary channel base address calculation dmaengine: tegra-apb: Remove unused variables ...
Diffstat (limited to 'drivers/dma/at_hdmac.c')
-rw-r--r--drivers/dma/at_hdmac.c127
1 files changed, 121 insertions, 6 deletions
diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c
index d3629b7482dd..58d406230d89 100644
--- a/drivers/dma/at_hdmac.c
+++ b/drivers/dma/at_hdmac.c
@@ -448,6 +448,7 @@ static void
atc_chain_complete(struct at_dma_chan *atchan, struct at_desc *desc)
{
struct dma_async_tx_descriptor *txd = &desc->txd;
+ struct at_dma *atdma = to_at_dma(atchan->chan_common.device);
dev_vdbg(chan2dev(&atchan->chan_common),
"descriptor %u complete\n", txd->cookie);
@@ -456,6 +457,13 @@ atc_chain_complete(struct at_dma_chan *atchan, struct at_desc *desc)
if (!atc_chan_is_cyclic(atchan))
dma_cookie_complete(txd);
+ /* If the transfer was a memset, free our temporary buffer */
+ if (desc->memset) {
+ dma_pool_free(atdma->memset_pool, desc->memset_vaddr,
+ desc->memset_paddr);
+ desc->memset = false;
+ }
+
/* move children to free_list */
list_splice_init(&desc->tx_list, &atchan->free_list);
/* move myself to free_list */
@@ -717,14 +725,14 @@ atc_prep_dma_interleaved(struct dma_chan *chan,
size_t len = 0;
int i;
+ if (unlikely(!xt || xt->numf != 1 || !xt->frame_size))
+ return NULL;
+
dev_info(chan2dev(chan),
"%s: src=0x%08x, dest=0x%08x, numf=%d, frame_size=%d, flags=0x%lx\n",
__func__, xt->src_start, xt->dst_start, xt->numf,
xt->frame_size, flags);
- if (unlikely(!xt || xt->numf != 1 || !xt->frame_size))
- return NULL;
-
/*
* The controller can only "skip" X bytes every Y bytes, so we
* need to make sure we are given a template that fit that
@@ -873,6 +881,93 @@ err_desc_get:
return NULL;
}
+/**
+ * atc_prep_dma_memset - prepare a memcpy operation
+ * @chan: the channel to prepare operation on
+ * @dest: operation virtual destination address
+ * @value: value to set memory buffer to
+ * @len: operation length
+ * @flags: tx descriptor status flags
+ */
+static struct dma_async_tx_descriptor *
+atc_prep_dma_memset(struct dma_chan *chan, dma_addr_t dest, int value,
+ size_t len, unsigned long flags)
+{
+ struct at_dma_chan *atchan = to_at_dma_chan(chan);
+ struct at_dma *atdma = to_at_dma(chan->device);
+ struct at_desc *desc = NULL;
+ size_t xfer_count;
+ u32 ctrla;
+ u32 ctrlb;
+
+ dev_vdbg(chan2dev(chan), "%s: d0x%x v0x%x l0x%zx f0x%lx\n", __func__,
+ dest, value, len, flags);
+
+ if (unlikely(!len)) {
+ dev_dbg(chan2dev(chan), "%s: length is zero!\n", __func__);
+ return NULL;
+ }
+
+ if (!is_dma_fill_aligned(chan->device, dest, 0, len)) {
+ dev_dbg(chan2dev(chan), "%s: buffer is not aligned\n",
+ __func__);
+ return NULL;
+ }
+
+ xfer_count = len >> 2;
+ if (xfer_count > ATC_BTSIZE_MAX) {
+ dev_err(chan2dev(chan), "%s: buffer is too big\n",
+ __func__);
+ return NULL;
+ }
+
+ ctrlb = ATC_DEFAULT_CTRLB | ATC_IEN
+ | ATC_SRC_ADDR_MODE_FIXED
+ | ATC_DST_ADDR_MODE_INCR
+ | ATC_FC_MEM2MEM;
+
+ ctrla = ATC_SRC_WIDTH(2) |
+ ATC_DST_WIDTH(2);
+
+ desc = atc_desc_get(atchan);
+ if (!desc) {
+ dev_err(chan2dev(chan), "%s: can't get a descriptor\n",
+ __func__);
+ return NULL;
+ }
+
+ desc->memset_vaddr = dma_pool_alloc(atdma->memset_pool, GFP_ATOMIC,
+ &desc->memset_paddr);
+ if (!desc->memset_vaddr) {
+ dev_err(chan2dev(chan), "%s: couldn't allocate buffer\n",
+ __func__);
+ goto err_put_desc;
+ }
+
+ *desc->memset_vaddr = value;
+ desc->memset = true;
+
+ desc->lli.saddr = desc->memset_paddr;
+ desc->lli.daddr = dest;
+ desc->lli.ctrla = ctrla | xfer_count;
+ desc->lli.ctrlb = ctrlb;
+
+ desc->txd.cookie = -EBUSY;
+ desc->len = len;
+ desc->total_len = len;
+
+ /* set end-of-link on the descriptor */
+ set_desc_eol(desc);
+
+ desc->txd.flags = flags;
+
+ return &desc->txd;
+
+err_put_desc:
+ atc_desc_put(atchan, desc);
+ return NULL;
+}
+
/**
* atc_prep_slave_sg - prepare descriptors for a DMA_SLAVE transaction
@@ -1755,6 +1850,8 @@ static int __init at_dma_probe(struct platform_device *pdev)
dma_cap_set(DMA_SG, at91sam9rl_config.cap_mask);
dma_cap_set(DMA_INTERLEAVE, at91sam9g45_config.cap_mask);
dma_cap_set(DMA_MEMCPY, at91sam9g45_config.cap_mask);
+ dma_cap_set(DMA_MEMSET, at91sam9g45_config.cap_mask);
+ dma_cap_set(DMA_PRIVATE, at91sam9g45_config.cap_mask);
dma_cap_set(DMA_SLAVE, at91sam9g45_config.cap_mask);
dma_cap_set(DMA_SG, at91sam9g45_config.cap_mask);
@@ -1818,7 +1915,16 @@ static int __init at_dma_probe(struct platform_device *pdev)
if (!atdma->dma_desc_pool) {
dev_err(&pdev->dev, "No memory for descriptors dma pool\n");
err = -ENOMEM;
- goto err_pool_create;
+ goto err_desc_pool_create;
+ }
+
+ /* create a pool of consistent memory blocks for memset blocks */
+ atdma->memset_pool = dma_pool_create("at_hdmac_memset_pool",
+ &pdev->dev, sizeof(int), 4, 0);
+ if (!atdma->memset_pool) {
+ dev_err(&pdev->dev, "No memory for memset dma pool\n");
+ err = -ENOMEM;
+ goto err_memset_pool_create;
}
/* clear any pending interrupt */
@@ -1864,6 +1970,11 @@ static int __init at_dma_probe(struct platform_device *pdev)
if (dma_has_cap(DMA_MEMCPY, atdma->dma_common.cap_mask))
atdma->dma_common.device_prep_dma_memcpy = atc_prep_dma_memcpy;
+ if (dma_has_cap(DMA_MEMSET, atdma->dma_common.cap_mask)) {
+ atdma->dma_common.device_prep_dma_memset = atc_prep_dma_memset;
+ atdma->dma_common.fill_align = DMAENGINE_ALIGN_4_BYTES;
+ }
+
if (dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask)) {
atdma->dma_common.device_prep_slave_sg = atc_prep_slave_sg;
/* controller can do slave DMA: can trigger cyclic transfers */
@@ -1884,8 +1995,9 @@ static int __init at_dma_probe(struct platform_device *pdev)
dma_writel(atdma, EN, AT_DMA_ENABLE);
- dev_info(&pdev->dev, "Atmel AHB DMA Controller ( %s%s%s), %d channels\n",
+ dev_info(&pdev->dev, "Atmel AHB DMA Controller ( %s%s%s%s), %d channels\n",
dma_has_cap(DMA_MEMCPY, atdma->dma_common.cap_mask) ? "cpy " : "",
+ dma_has_cap(DMA_MEMSET, atdma->dma_common.cap_mask) ? "set " : "",
dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask) ? "slave " : "",
dma_has_cap(DMA_SG, atdma->dma_common.cap_mask) ? "sg-cpy " : "",
plat_dat->nr_channels);
@@ -1910,8 +2022,10 @@ static int __init at_dma_probe(struct platform_device *pdev)
err_of_dma_controller_register:
dma_async_device_unregister(&atdma->dma_common);
+ dma_pool_destroy(atdma->memset_pool);
+err_memset_pool_create:
dma_pool_destroy(atdma->dma_desc_pool);
-err_pool_create:
+err_desc_pool_create:
free_irq(platform_get_irq(pdev, 0), atdma);
err_irq:
clk_disable_unprepare(atdma->clk);
@@ -1936,6 +2050,7 @@ static int at_dma_remove(struct platform_device *pdev)
at_dma_off(atdma);
dma_async_device_unregister(&atdma->dma_common);
+ dma_pool_destroy(atdma->memset_pool);
dma_pool_destroy(atdma->dma_desc_pool);
free_irq(platform_get_irq(pdev, 0), atdma);