From d97758e048e5fe91c7d8ff9ce5f030ee88d92161 Mon Sep 17 00:00:00 2001 From: Serge Semin Date: Thu, 23 Jul 2020 03:58:41 +0300 Subject: dmaengine: Introduce min burst length capability Some hardware aside from default 0/1 may have greater minimum burst transactions length constraints. Here we introduce the DMA device and slave capability, which if required can be initialized by the DMA engine driver with the device-specific value. Signed-off-by: Serge Semin Reviewed-by: Andy Shevchenko Link: https://lore.kernel.org/r/20200723005848.31907-4-Sergey.Semin@baikalelectronics.ru Signed-off-by: Vinod Koul --- drivers/dma/dmaengine.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers/dma/dmaengine.c') diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c index 2b06a7a8629d..2f1a7c0c5446 100644 --- a/drivers/dma/dmaengine.c +++ b/drivers/dma/dmaengine.c @@ -592,6 +592,7 @@ int dma_get_slave_caps(struct dma_chan *chan, struct dma_slave_caps *caps) caps->src_addr_widths = device->src_addr_widths; caps->dst_addr_widths = device->dst_addr_widths; caps->directions = device->directions; + caps->min_burst = device->min_burst; caps->max_burst = device->max_burst; caps->residue_granularity = device->residue_granularity; caps->descriptor_reuse = device->descriptor_reuse; -- cgit v1.2.3 From b1b40b8fe7e8fb26e33bad1766ce322d2c63a6c7 Mon Sep 17 00:00:00 2001 From: Serge Semin Date: Thu, 23 Jul 2020 03:58:42 +0300 Subject: dmaengine: Introduce max SG burst capability Some devices may lack the support of the hardware accelerated SG list entries automatic walking through and execution. In this case a burden of the SG list traversal and DMA engine re-initialization lies on the DMA engine driver (normally implemented by using a DMA transfer completion IRQ to recharge the DMA device with a next SG list entry). But such solution may not be suitable for some DMA consumers. In particular SPI devices need both Tx and Rx DMA channels work synchronously in order to avoid the Rx FIFO overflow. In case if Rx DMA channel is paused for some time while the Tx DMA channel works implicitly pulling data into the Rx FIFO, the later will be eventually overflown, which will cause the data loss. So if SG list entries aren't automatically fetched by the DMA engine, but are one-by-one manually selected for execution in the ISRs/deferred work/etc., such problem will eventually happen due to the non-deterministic latencies of the service execution. In order to let the DMA consumer know about the DMA device capabilities regarding the hardware accelerated SG list traversal we introduce the max_sg_burst capability. It is supposed to be initialized by the DMA engine driver with 0 if there is no limitation of the number of SG entries atomically executed and with non-zero value if there is such constraints, so the upper limit is determined by the number set to the property. Suggested-by: Andy Shevchenko Signed-off-by: Serge Semin Reviewed-by: Andy Shevchenko Link: https://lore.kernel.org/r/20200723005848.31907-5-Sergey.Semin@baikalelectronics.ru Signed-off-by: Vinod Koul --- drivers/dma/dmaengine.c | 1 + include/linux/dmaengine.h | 8 ++++++++ 2 files changed, 9 insertions(+) (limited to 'drivers/dma/dmaengine.c') diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c index 2f1a7c0c5446..8177f78faeda 100644 --- a/drivers/dma/dmaengine.c +++ b/drivers/dma/dmaengine.c @@ -594,6 +594,7 @@ int dma_get_slave_caps(struct dma_chan *chan, struct dma_slave_caps *caps) caps->directions = device->directions; caps->min_burst = device->min_burst; caps->max_burst = device->max_burst; + caps->max_sg_burst = device->max_sg_burst; caps->residue_granularity = device->residue_granularity; caps->descriptor_reuse = device->descriptor_reuse; caps->cmd_pause = !!device->device_pause; diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h index 7d6e2aa26980..4cbe09e66db2 100644 --- a/include/linux/dmaengine.h +++ b/include/linux/dmaengine.h @@ -469,6 +469,9 @@ enum dma_residue_granularity { * should be checked by controller as well * @min_burst: min burst capability per-transfer * @max_burst: max burst capability per-transfer + * @max_sg_burst: max number of SG list entries executed in a single burst + * DMA tansaction with no software intervention for reinitialization. + * Zero value means unlimited number of entries. * @cmd_pause: true, if pause is supported (i.e. for reading residue or * for resume later) * @cmd_resume: true, if resume is supported @@ -483,6 +486,7 @@ struct dma_slave_caps { u32 directions; u32 min_burst; u32 max_burst; + u32 max_sg_burst; bool cmd_pause; bool cmd_resume; bool cmd_terminate; @@ -775,6 +779,9 @@ struct dma_filter { * should be checked by controller as well * @min_burst: min burst capability per-transfer * @max_burst: max burst capability per-transfer + * @max_sg_burst: max number of SG list entries executed in a single burst + * DMA tansaction with no software intervention for reinitialization. + * Zero value means unlimited number of entries. * @residue_granularity: granularity of the transfer residue reported * by tx_status * @device_alloc_chan_resources: allocate resources and return the @@ -846,6 +853,7 @@ struct dma_device { u32 directions; u32 min_burst; u32 max_burst; + u32 max_sg_burst; bool descriptor_reuse; enum dma_residue_granularity residue_granularity; -- cgit v1.2.3 From 3b6d694eb3eebd86ec44a119e730943ac8e03a6b Mon Sep 17 00:00:00 2001 From: Serge Semin Date: Thu, 23 Jul 2020 03:58:43 +0300 Subject: dmaengine: Introduce DMA-device device_caps callback There are DMA devices (like ours version of Synopsys DW DMAC) which have DMA capabilities non-uniformly redistributed between the device channels. In order to provide a way of exposing the channel-specific parameters to the DMA engine consumers, we introduce a new DMA-device callback. In case if provided it gets called from the dma_get_slave_caps() method and is able to override the generic DMA-device capabilities. Signed-off-by: Serge Semin Reviewed-by: Andy Shevchenko Link: https://lore.kernel.org/r/20200723005848.31907-6-Sergey.Semin@baikalelectronics.ru Signed-off-by: Vinod Koul --- drivers/dma/dmaengine.c | 10 ++++++++++ include/linux/dmaengine.h | 4 ++++ 2 files changed, 14 insertions(+) (limited to 'drivers/dma/dmaengine.c') diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c index 8177f78faeda..a53e71d2bbd4 100644 --- a/drivers/dma/dmaengine.c +++ b/drivers/dma/dmaengine.c @@ -601,6 +601,16 @@ int dma_get_slave_caps(struct dma_chan *chan, struct dma_slave_caps *caps) caps->cmd_resume = !!device->device_resume; caps->cmd_terminate = !!device->device_terminate_all; + /* + * DMA engine device might be configured with non-uniformly + * distributed slave capabilities per device channels. In this + * case the corresponding driver may provide the device_caps + * callback to override the generic capabilities with + * channel-specific ones. + */ + if (device->device_caps) + device->device_caps(chan, caps); + return 0; } EXPORT_SYMBOL_GPL(dma_get_slave_caps); diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h index 4cbe09e66db2..d718671bfd25 100644 --- a/include/linux/dmaengine.h +++ b/include/linux/dmaengine.h @@ -801,6 +801,8 @@ struct dma_filter { * be called after period_len bytes have been transferred. * @device_prep_interleaved_dma: Transfer expression in a generic way. * @device_prep_dma_imm_data: DMA's 8 byte immediate data to the dst address + * @device_caps: May be used to override the generic DMA slave capabilities + * with per-channel specific ones * @device_config: Pushes a new configuration to a channel, return 0 or an error * code * @device_pause: Pauses any transfer happening on a channel. Returns @@ -901,6 +903,8 @@ struct dma_device { struct dma_chan *chan, dma_addr_t dst, u64 data, unsigned long flags); + void (*device_caps)(struct dma_chan *chan, + struct dma_slave_caps *caps); int (*device_config)(struct dma_chan *chan, struct dma_slave_config *config); int (*device_pause)(struct dma_chan *chan); -- cgit v1.2.3