From 56b94b02cb079a049d8fe68ce4171f57bfb109b9 Mon Sep 17 00:00:00 2001 From: Vinod Koul Date: Thu, 25 Oct 2018 15:06:06 +0100 Subject: dmaengine: mmp_pdma: remove dma_slave_config direction usage dma_slave_config direction was marked as deprecated quite some time back, remove the usage from this driver so that the field can be removed Signed-off-by: Vinod Koul --- drivers/dma/mmp_pdma.c | 28 +++++++++++++++++++++++----- 1 file changed, 23 insertions(+), 5 deletions(-) (limited to 'drivers/dma') diff --git a/drivers/dma/mmp_pdma.c b/drivers/dma/mmp_pdma.c index eb3a1f42ab06..334bab92d26d 100644 --- a/drivers/dma/mmp_pdma.c +++ b/drivers/dma/mmp_pdma.c @@ -96,6 +96,7 @@ struct mmp_pdma_chan { struct dma_async_tx_descriptor desc; struct mmp_pdma_phy *phy; enum dma_transfer_direction dir; + struct dma_slave_config slave_config; struct mmp_pdma_desc_sw *cyclic_first; /* first desc_sw if channel * is in cyclic mode */ @@ -140,6 +141,10 @@ struct mmp_pdma_device { #define to_mmp_pdma_dev(dmadev) \ container_of(dmadev, struct mmp_pdma_device, device) +static int mmp_pdma_config_write(struct dma_chan *dchan, + struct dma_slave_config *cfg, + enum dma_transfer_direction direction); + static void set_desc(struct mmp_pdma_phy *phy, dma_addr_t addr) { u32 reg = (phy->idx << 4) + DDADR; @@ -537,6 +542,8 @@ mmp_pdma_prep_slave_sg(struct dma_chan *dchan, struct scatterlist *sgl, chan->byte_align = false; + mmp_pdma_config_write(dchan, &chan->slave_config, dir); + for_each_sg(sgl, sg, sg_len, i) { addr = sg_dma_address(sg); avail = sg_dma_len(sgl); @@ -619,6 +626,7 @@ mmp_pdma_prep_dma_cyclic(struct dma_chan *dchan, return NULL; chan = to_mmp_pdma_chan(dchan); + mmp_pdma_config_write(dchan, &chan->slave_config, direction); switch (direction) { case DMA_MEM_TO_DEV: @@ -684,8 +692,9 @@ fail: return NULL; } -static int mmp_pdma_config(struct dma_chan *dchan, - struct dma_slave_config *cfg) +static int mmp_pdma_config_write(struct dma_chan *dchan, + struct dma_slave_config *cfg, + enum dma_transfer_direction direction) { struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan); u32 maxburst = 0, addr = 0; @@ -694,12 +703,12 @@ static int mmp_pdma_config(struct dma_chan *dchan, if (!dchan) return -EINVAL; - if (cfg->direction == DMA_DEV_TO_MEM) { + if (direction == DMA_DEV_TO_MEM) { chan->dcmd = DCMD_INCTRGADDR | DCMD_FLOWSRC; maxburst = cfg->src_maxburst; width = cfg->src_addr_width; addr = cfg->src_addr; - } else if (cfg->direction == DMA_MEM_TO_DEV) { + } else if (direction == DMA_MEM_TO_DEV) { chan->dcmd = DCMD_INCSRCADDR | DCMD_FLOWTRG; maxburst = cfg->dst_maxburst; width = cfg->dst_addr_width; @@ -720,7 +729,7 @@ static int mmp_pdma_config(struct dma_chan *dchan, else if (maxburst == 32) chan->dcmd |= DCMD_BURST32; - chan->dir = cfg->direction; + chan->dir = direction; chan->dev_addr = addr; /* FIXME: drivers should be ported over to use the filter * function. Once that's done, the following two lines can @@ -732,6 +741,15 @@ static int mmp_pdma_config(struct dma_chan *dchan, return 0; } +static int mmp_pdma_config(struct dma_chan *dchan, + struct dma_slave_config *cfg) +{ + struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan); + + memcpy(&chan->slave_config, cfg, sizeof(*cfg)); + return 0; +} + static int mmp_pdma_terminate_all(struct dma_chan *dchan) { struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan); -- cgit v1.2.3 From ab39e1473acef33b4f0114088b51ac05e0b01ba3 Mon Sep 17 00:00:00 2001 From: Stefan Wahren Date: Tue, 23 Oct 2018 13:06:07 +0200 Subject: dmaengine: bcm2835: make license text and module license match The license text is specifying GPL v2 or later but the MODULE_LICENSE is set to GPL v2 which means GNU Public License v2 only. So choose the license text as the correct one. Signed-off-by: Stefan Wahren Acked-by: Florian Kauer Acked-by: Martin Sperl Signed-off-by: Vinod Koul --- drivers/dma/bcm2835-dma.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/dma') diff --git a/drivers/dma/bcm2835-dma.c b/drivers/dma/bcm2835-dma.c index cad55ab80d41..ed871eb8e3cb 100644 --- a/drivers/dma/bcm2835-dma.c +++ b/drivers/dma/bcm2835-dma.c @@ -1056,4 +1056,4 @@ module_platform_driver(bcm2835_dma_driver); MODULE_ALIAS("platform:bcm2835-dma"); MODULE_DESCRIPTION("BCM2835 DMA engine driver"); MODULE_AUTHOR("Florian Meier "); -MODULE_LICENSE("GPL v2"); +MODULE_LICENSE("GPL"); -- cgit v1.2.3 From 80c4445e56f4f237d8c89ec4cee58d9218f55c18 Mon Sep 17 00:00:00 2001 From: Stefan Wahren Date: Sat, 10 Nov 2018 16:34:40 +0100 Subject: dmaengine: bcm2835: Switch to SPDX identifier Adopt the SPDX license identifier headers to ease license compliance management. Signed-off-by: Stefan Wahren Reviewed-by: Eric Anholt Signed-off-by: Vinod Koul --- drivers/dma/bcm2835-dma.c | 11 +---------- 1 file changed, 1 insertion(+), 10 deletions(-) (limited to 'drivers/dma') diff --git a/drivers/dma/bcm2835-dma.c b/drivers/dma/bcm2835-dma.c index ed871eb8e3cb..1a44c8086d77 100644 --- a/drivers/dma/bcm2835-dma.c +++ b/drivers/dma/bcm2835-dma.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0+ /* * BCM2835 DMA engine support * @@ -18,16 +19,6 @@ * * MARVELL MMP Peripheral DMA Driver * Copyright 2012 Marvell International Ltd. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. */ #include #include -- cgit v1.2.3 From 9be92baa4772a315ff258f59d87a8427d5015a7c Mon Sep 17 00:00:00 2001 From: Kuninori Morimoto Date: Thu, 8 Nov 2018 06:32:44 +0000 Subject: dmaengine: sh: convert to SPDX identifiers This patch updates license to use SPDX-License-Identifier instead of verbose license text. Signed-off-by: Kuninori Morimoto Signed-off-by: Vinod Koul --- drivers/dma/sh/Kconfig | 1 + include/linux/shdma-base.h | 7 ++----- 2 files changed, 3 insertions(+), 5 deletions(-) (limited to 'drivers/dma') diff --git a/drivers/dma/sh/Kconfig b/drivers/dma/sh/Kconfig index 6e0685f1a838..1c4675425a1e 100644 --- a/drivers/dma/sh/Kconfig +++ b/drivers/dma/sh/Kconfig @@ -1,3 +1,4 @@ +# SPDX-License-Identifier: GPL-2.0 # # DMA engine configuration for sh # diff --git a/include/linux/shdma-base.h b/include/linux/shdma-base.h index d927647e6350..6dfd05ef5c2d 100644 --- a/include/linux/shdma-base.h +++ b/include/linux/shdma-base.h @@ -1,4 +1,5 @@ -/* +/* SPDX-License-Identifier: GPL-2.0 + * * Dmaengine driver base library for DMA controllers, found on SH-based SoCs * * extracted from shdma.c and headers @@ -7,10 +8,6 @@ * Copyright (C) 2009 Nobuhiro Iwamatsu * Copyright (C) 2009 Renesas Solutions, Inc. All rights reserved. * Copyright (C) 2007 Freescale Semiconductor, Inc. All rights reserved. - * - * This is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. */ #ifndef SHDMA_BASE_H -- cgit v1.2.3 From bc822e80170d672dd8ff0d07c521cf72f491cb6c Mon Sep 17 00:00:00 2001 From: Russell King Date: Tue, 6 Nov 2018 13:45:10 +0000 Subject: dmaengine: sa11x0: unexport sa11x0_dma_filter_fn and clean up As we now have no users of sa11x0_dma_filter_fn() in the tree, we can unexport this function, and remove the now unused header file. Signed-off-by: Russell King Signed-off-by: Vinod Koul --- drivers/dma/sa11x0-dma.c | 21 ++++++++------------- include/linux/sa11x0-dma.h | 24 ------------------------ 2 files changed, 8 insertions(+), 37 deletions(-) delete mode 100644 include/linux/sa11x0-dma.h (limited to 'drivers/dma') diff --git a/drivers/dma/sa11x0-dma.c b/drivers/dma/sa11x0-dma.c index b31d07c7d93c..784d5f1a473b 100644 --- a/drivers/dma/sa11x0-dma.c +++ b/drivers/dma/sa11x0-dma.c @@ -17,7 +17,6 @@ #include #include #include -#include #include #include @@ -830,6 +829,14 @@ static const struct dma_slave_map sa11x0_dma_map[] = { { "sa11x0-ssp", "rx", "Ser4SSPRc" }, }; +static bool sa11x0_dma_filter_fn(struct dma_chan *chan, void *param) +{ + struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan); + const char *p = param; + + return !strcmp(c->name, p); +} + static int sa11x0_dma_init_dmadev(struct dma_device *dmadev, struct device *dev) { @@ -1087,18 +1094,6 @@ static struct platform_driver sa11x0_dma_driver = { .remove = sa11x0_dma_remove, }; -bool sa11x0_dma_filter_fn(struct dma_chan *chan, void *param) -{ - if (chan->device->dev->driver == &sa11x0_dma_driver.driver) { - struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan); - const char *p = param; - - return !strcmp(c->name, p); - } - return false; -} -EXPORT_SYMBOL(sa11x0_dma_filter_fn); - static int __init sa11x0_dma_init(void) { return platform_driver_register(&sa11x0_dma_driver); diff --git a/include/linux/sa11x0-dma.h b/include/linux/sa11x0-dma.h deleted file mode 100644 index 65839a58b8e5..000000000000 --- a/include/linux/sa11x0-dma.h +++ /dev/null @@ -1,24 +0,0 @@ -/* - * SA11x0 DMA Engine support - * - * Copyright (C) 2012 Russell King - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ -#ifndef __LINUX_SA11X0_DMA_H -#define __LINUX_SA11X0_DMA_H - -struct dma_chan; - -#if defined(CONFIG_DMA_SA11X0) || defined(CONFIG_DMA_SA11X0_MODULE) -bool sa11x0_dma_filter_fn(struct dma_chan *, void *); -#else -static inline bool sa11x0_dma_filter_fn(struct dma_chan *c, void *d) -{ - return false; -} -#endif - -#endif -- cgit v1.2.3 From 9b68cc012a73b9333863436ac876561674035ee0 Mon Sep 17 00:00:00 2001 From: Yangtao Li Date: Thu, 1 Nov 2018 11:35:43 -0400 Subject: dmaengine: ep93xx: fix some typo Signed-off-by: Yangtao Li Signed-off-by: Vinod Koul --- drivers/dma/ep93xx_dma.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'drivers/dma') diff --git a/drivers/dma/ep93xx_dma.c b/drivers/dma/ep93xx_dma.c index f674eb5fbbef..594a88f4f99c 100644 --- a/drivers/dma/ep93xx_dma.c +++ b/drivers/dma/ep93xx_dma.c @@ -997,7 +997,7 @@ ep93xx_dma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, for (offset = 0; offset < len; offset += bytes) { desc = ep93xx_dma_desc_get(edmac); if (!desc) { - dev_warn(chan2dev(edmac), "couln't get descriptor\n"); + dev_warn(chan2dev(edmac), "couldn't get descriptor\n"); goto fail; } @@ -1069,7 +1069,7 @@ ep93xx_dma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, desc = ep93xx_dma_desc_get(edmac); if (!desc) { - dev_warn(chan2dev(edmac), "couln't get descriptor\n"); + dev_warn(chan2dev(edmac), "couldn't get descriptor\n"); goto fail; } @@ -1149,7 +1149,7 @@ ep93xx_dma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t dma_addr, for (offset = 0; offset < buf_len; offset += period_len) { desc = ep93xx_dma_desc_get(edmac); if (!desc) { - dev_warn(chan2dev(edmac), "couln't get descriptor\n"); + dev_warn(chan2dev(edmac), "couldn't get descriptor\n"); goto fail; } -- cgit v1.2.3 From b1f01e48df5a3454b88c5ff1eb4501f685351c67 Mon Sep 17 00:00:00 2001 From: Shun-Chih Yu Date: Thu, 18 Oct 2018 15:49:11 +0800 Subject: dmaengine: mediatek: Add MediaTek Command-Queue DMA controller for MT6765 SoC MediaTek Command-Queue DMA controller (CQDMA) on MT6765 SoC is dedicated to memory-to-memory transfer through queue based descriptor management. There are only 3 physical channels inside CQDMA, while the driver is extended to support 32 virtual channels for multiple dma users to issue dma requests onto the CQDMA simultaneously. Signed-off-by: Shun-Chih Yu Signed-off-by: Vinod Koul --- drivers/dma/mediatek/Kconfig | 13 + drivers/dma/mediatek/Makefile | 1 + drivers/dma/mediatek/mtk-cqdma.c | 951 +++++++++++++++++++++++++++++++++++++++ 3 files changed, 965 insertions(+) create mode 100644 drivers/dma/mediatek/mtk-cqdma.c (limited to 'drivers/dma') diff --git a/drivers/dma/mediatek/Kconfig b/drivers/dma/mediatek/Kconfig index 27bac0bba09e..680fc0572d87 100644 --- a/drivers/dma/mediatek/Kconfig +++ b/drivers/dma/mediatek/Kconfig @@ -11,3 +11,16 @@ config MTK_HSDMA This controller provides the channels which is dedicated to memory-to-memory transfer to offload from CPU through ring- based descriptor management. + +config MTK_CQDMA + tristate "MediaTek Command-Queue DMA controller support" + depends on ARCH_MEDIATEK || COMPILE_TEST + select DMA_ENGINE + select DMA_VIRTUAL_CHANNELS + select ASYNC_TX_ENABLE_CHANNEL_SWITCH + help + Enable support for Command-Queue DMA controller on MediaTek + SoCs. + + This controller provides the channels which is dedicated to + memory-to-memory transfer to offload from CPU. diff --git a/drivers/dma/mediatek/Makefile b/drivers/dma/mediatek/Makefile index 6e778f842f01..41bb3815f636 100644 --- a/drivers/dma/mediatek/Makefile +++ b/drivers/dma/mediatek/Makefile @@ -1 +1,2 @@ obj-$(CONFIG_MTK_HSDMA) += mtk-hsdma.o +obj-$(CONFIG_MTK_CQDMA) += mtk-cqdma.o diff --git a/drivers/dma/mediatek/mtk-cqdma.c b/drivers/dma/mediatek/mtk-cqdma.c new file mode 100644 index 000000000000..131f3974740d --- /dev/null +++ b/drivers/dma/mediatek/mtk-cqdma.c @@ -0,0 +1,951 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (c) 2018-2019 MediaTek Inc. + +/* + * Driver for MediaTek Command-Queue DMA Controller + * + * Author: Shun-Chih Yu + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "../virt-dma.h" + +#define MTK_CQDMA_USEC_POLL 10 +#define MTK_CQDMA_TIMEOUT_POLL 1000 +#define MTK_CQDMA_DMA_BUSWIDTHS BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) +#define MTK_CQDMA_ALIGN_SIZE 1 + +/* The default number of virtual channel */ +#define MTK_CQDMA_NR_VCHANS 32 + +/* The default number of physical channel */ +#define MTK_CQDMA_NR_PCHANS 3 + +/* Registers for underlying dma manipulation */ +#define MTK_CQDMA_INT_FLAG 0x0 +#define MTK_CQDMA_INT_EN 0x4 +#define MTK_CQDMA_EN 0x8 +#define MTK_CQDMA_RESET 0xc +#define MTK_CQDMA_FLUSH 0x14 +#define MTK_CQDMA_SRC 0x1c +#define MTK_CQDMA_DST 0x20 +#define MTK_CQDMA_LEN1 0x24 +#define MTK_CQDMA_LEN2 0x28 +#define MTK_CQDMA_SRC2 0x60 +#define MTK_CQDMA_DST2 0x64 + +/* Registers setting */ +#define MTK_CQDMA_EN_BIT BIT(0) +#define MTK_CQDMA_INT_FLAG_BIT BIT(0) +#define MTK_CQDMA_INT_EN_BIT BIT(0) +#define MTK_CQDMA_FLUSH_BIT BIT(0) + +#define MTK_CQDMA_WARM_RST_BIT BIT(0) +#define MTK_CQDMA_HARD_RST_BIT BIT(1) + +#define MTK_CQDMA_MAX_LEN GENMASK(27, 0) +#define MTK_CQDMA_ADDR_LIMIT GENMASK(31, 0) +#define MTK_CQDMA_ADDR2_SHFIT (32) + +/** + * struct mtk_cqdma_vdesc - The struct holding info describing virtual + * descriptor (CVD) + * @vd: An instance for struct virt_dma_desc + * @len: The total data size device wants to move + * @residue: The remaining data size device will move + * @dest: The destination address device wants to move to + * @src: The source address device wants to move from + * @ch: The pointer to the corresponding dma channel + * @node: The lise_head struct to build link-list for VDs + * @parent: The pointer to the parent CVD + */ +struct mtk_cqdma_vdesc { + struct virt_dma_desc vd; + size_t len; + size_t residue; + dma_addr_t dest; + dma_addr_t src; + struct dma_chan *ch; + + struct list_head node; + struct mtk_cqdma_vdesc *parent; +}; + +/** + * struct mtk_cqdma_pchan - The struct holding info describing physical + * channel (PC) + * @queue: Queue for the PDs issued to this PC + * @base: The mapped register I/O base of this PC + * @irq: The IRQ that this PC are using + * @refcnt: Track how many VCs are using this PC + * @tasklet: Tasklet for this PC + * @lock: Lock protect agaisting multiple VCs access PC + */ +struct mtk_cqdma_pchan { + struct list_head queue; + void __iomem *base; + u32 irq; + + refcount_t refcnt; + + struct tasklet_struct tasklet; + + /* lock to protect PC */ + spinlock_t lock; +}; + +/** + * struct mtk_cqdma_vchan - The struct holding info describing virtual + * channel (VC) + * @vc: An instance for struct virt_dma_chan + * @pc: The pointer to the underlying PC + * @issue_completion: The wait for all issued descriptors completited + * @issue_synchronize: Bool indicating channel synchronization starts + */ +struct mtk_cqdma_vchan { + struct virt_dma_chan vc; + struct mtk_cqdma_pchan *pc; + struct completion issue_completion; + bool issue_synchronize; +}; + +/** + * struct mtk_cqdma_device - The struct holding info describing CQDMA + * device + * @ddev: An instance for struct dma_device + * @clk: The clock that device internal is using + * @dma_requests: The number of VCs the device supports to + * @dma_channels: The number of PCs the device supports to + * @vc: The pointer to all available VCs + * @pc: The pointer to all the underlying PCs + */ +struct mtk_cqdma_device { + struct dma_device ddev; + struct clk *clk; + + u32 dma_requests; + u32 dma_channels; + struct mtk_cqdma_vchan *vc; + struct mtk_cqdma_pchan **pc; +}; + +static struct mtk_cqdma_device *to_cqdma_dev(struct dma_chan *chan) +{ + return container_of(chan->device, struct mtk_cqdma_device, ddev); +} + +static struct mtk_cqdma_vchan *to_cqdma_vchan(struct dma_chan *chan) +{ + return container_of(chan, struct mtk_cqdma_vchan, vc.chan); +} + +static struct mtk_cqdma_vdesc *to_cqdma_vdesc(struct virt_dma_desc *vd) +{ + return container_of(vd, struct mtk_cqdma_vdesc, vd); +} + +static struct device *cqdma2dev(struct mtk_cqdma_device *cqdma) +{ + return cqdma->ddev.dev; +} + +static u32 mtk_dma_read(struct mtk_cqdma_pchan *pc, u32 reg) +{ + return readl(pc->base + reg); +} + +static void mtk_dma_write(struct mtk_cqdma_pchan *pc, u32 reg, u32 val) +{ + writel_relaxed(val, pc->base + reg); +} + +static void mtk_dma_rmw(struct mtk_cqdma_pchan *pc, u32 reg, + u32 mask, u32 set) +{ + u32 val; + + val = mtk_dma_read(pc, reg); + val &= ~mask; + val |= set; + mtk_dma_write(pc, reg, val); +} + +static void mtk_dma_set(struct mtk_cqdma_pchan *pc, u32 reg, u32 val) +{ + mtk_dma_rmw(pc, reg, 0, val); +} + +static void mtk_dma_clr(struct mtk_cqdma_pchan *pc, u32 reg, u32 val) +{ + mtk_dma_rmw(pc, reg, val, 0); +} + +static void mtk_cqdma_vdesc_free(struct virt_dma_desc *vd) +{ + kfree(to_cqdma_vdesc(vd)); +} + +static int mtk_cqdma_poll_engine_done(struct mtk_cqdma_pchan *pc, bool atomic) +{ + u32 status = 0; + + if (!atomic) + return readl_poll_timeout(pc->base + MTK_CQDMA_EN, + status, + !(status & MTK_CQDMA_EN_BIT), + MTK_CQDMA_USEC_POLL, + MTK_CQDMA_TIMEOUT_POLL); + + return readl_poll_timeout_atomic(pc->base + MTK_CQDMA_EN, + status, + !(status & MTK_CQDMA_EN_BIT), + MTK_CQDMA_USEC_POLL, + MTK_CQDMA_TIMEOUT_POLL); +} + +static int mtk_cqdma_hard_reset(struct mtk_cqdma_pchan *pc) +{ + mtk_dma_set(pc, MTK_CQDMA_RESET, MTK_CQDMA_HARD_RST_BIT); + mtk_dma_clr(pc, MTK_CQDMA_RESET, MTK_CQDMA_HARD_RST_BIT); + + return mtk_cqdma_poll_engine_done(pc, false); +} + +static void mtk_cqdma_start(struct mtk_cqdma_pchan *pc, + struct mtk_cqdma_vdesc *cvd) +{ + /* wait for the previous transaction done */ + if (mtk_cqdma_poll_engine_done(pc, true) < 0) + dev_err(cqdma2dev(to_cqdma_dev(cvd->ch)), "cqdma wait transaction timeout\n"); + + /* warm reset the dma engine for the new transaction */ + mtk_dma_set(pc, MTK_CQDMA_RESET, MTK_CQDMA_WARM_RST_BIT); + if (mtk_cqdma_poll_engine_done(pc, true) < 0) + dev_err(cqdma2dev(to_cqdma_dev(cvd->ch)), "cqdma warm reset timeout\n"); + + /* setup the source */ + mtk_dma_set(pc, MTK_CQDMA_SRC, cvd->src & MTK_CQDMA_ADDR_LIMIT); +#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT + mtk_dma_set(pc, MTK_CQDMA_SRC2, cvd->src >> MTK_CQDMA_ADDR2_SHFIT); +#else + mtk_dma_set(pc, MTK_CQDMA_SRC2, 0); +#endif + + /* setup the destination */ + mtk_dma_set(pc, MTK_CQDMA_DST, cvd->dest & MTK_CQDMA_ADDR_LIMIT); +#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT + mtk_dma_set(pc, MTK_CQDMA_DST2, cvd->dest >> MTK_CQDMA_ADDR2_SHFIT); +#else + mtk_dma_set(pc, MTK_CQDMA_SRC2, 0); +#endif + + /* setup the length */ + mtk_dma_set(pc, MTK_CQDMA_LEN1, cvd->len); + + /* start dma engine */ + mtk_dma_set(pc, MTK_CQDMA_EN, MTK_CQDMA_EN_BIT); +} + +static void mtk_cqdma_issue_vchan_pending(struct mtk_cqdma_vchan *cvc) +{ + struct virt_dma_desc *vd, *vd2; + struct mtk_cqdma_pchan *pc = cvc->pc; + struct mtk_cqdma_vdesc *cvd; + bool trigger_engine = false; + + lockdep_assert_held(&cvc->vc.lock); + lockdep_assert_held(&pc->lock); + + list_for_each_entry_safe(vd, vd2, &cvc->vc.desc_issued, node) { + /* need to trigger dma engine if PC's queue is empty */ + if (list_empty(&pc->queue)) + trigger_engine = true; + + cvd = to_cqdma_vdesc(vd); + + /* add VD into PC's queue */ + list_add_tail(&cvd->node, &pc->queue); + + /* start the dma engine */ + if (trigger_engine) + mtk_cqdma_start(pc, cvd); + + /* remove VD from list desc_issued */ + list_del(&vd->node); + } +} + +/* + * return true if this VC is active, + * meaning that there are VDs under processing by the PC + */ +static bool mtk_cqdma_is_vchan_active(struct mtk_cqdma_vchan *cvc) +{ + struct mtk_cqdma_vdesc *cvd; + + list_for_each_entry(cvd, &cvc->pc->queue, node) + if (cvc == to_cqdma_vchan(cvd->ch)) + return true; + + return false; +} + +/* + * return the pointer of the CVD that is just consumed by the PC + */ +static struct mtk_cqdma_vdesc +*mtk_cqdma_consume_work_queue(struct mtk_cqdma_pchan *pc) +{ + struct mtk_cqdma_vchan *cvc; + struct mtk_cqdma_vdesc *cvd, *ret = NULL; + + /* consume a CVD from PC's queue */ + cvd = list_first_entry_or_null(&pc->queue, + struct mtk_cqdma_vdesc, node); + if (unlikely(!cvd || !cvd->parent)) + return NULL; + + cvc = to_cqdma_vchan(cvd->ch); + ret = cvd; + + /* update residue of the parent CVD */ + cvd->parent->residue -= cvd->len; + + /* delete CVD from PC's queue */ + list_del(&cvd->node); + + spin_lock(&cvc->vc.lock); + + /* check whether all the child CVDs completed */ + if (!cvd->parent->residue) { + /* add the parent VD into list desc_completed */ + vchan_cookie_complete(&cvd->parent->vd); + + /* setup completion if this VC is under synchronization */ + if (cvc->issue_synchronize && !mtk_cqdma_is_vchan_active(cvc)) { + complete(&cvc->issue_completion); + cvc->issue_synchronize = false; + } + } + + spin_unlock(&cvc->vc.lock); + + /* start transaction for next CVD in the queue */ + cvd = list_first_entry_or_null(&pc->queue, + struct mtk_cqdma_vdesc, node); + if (cvd) + mtk_cqdma_start(pc, cvd); + + return ret; +} + +static void mtk_cqdma_tasklet_cb(unsigned long data) +{ + struct mtk_cqdma_pchan *pc = (struct mtk_cqdma_pchan *)data; + struct mtk_cqdma_vdesc *cvd = NULL; + unsigned long flags; + + spin_lock_irqsave(&pc->lock, flags); + /* consume the queue */ + cvd = mtk_cqdma_consume_work_queue(pc); + spin_unlock_irqrestore(&pc->lock, flags); + + /* submit the next CVD */ + if (cvd) { + dma_run_dependencies(&cvd->vd.tx); + + /* + * free child CVD after completion. + * the parent CVD would be freeed with desc_free by user. + */ + if (cvd->parent != cvd) + kfree(cvd); + } + + /* re-enable interrupt before leaving tasklet */ + enable_irq(pc->irq); +} + +static irqreturn_t mtk_cqdma_irq(int irq, void *devid) +{ + struct mtk_cqdma_device *cqdma = devid; + irqreturn_t ret = IRQ_NONE; + bool schedule_tasklet = false; + u32 i; + + /* clear interrupt flags for each PC */ + for (i = 0; i < cqdma->dma_channels; ++i, schedule_tasklet = false) { + spin_lock(&cqdma->pc[i]->lock); + if (mtk_dma_read(cqdma->pc[i], + MTK_CQDMA_INT_FLAG) & MTK_CQDMA_INT_FLAG_BIT) { + /* clear interrupt */ + mtk_dma_clr(cqdma->pc[i], MTK_CQDMA_INT_FLAG, + MTK_CQDMA_INT_FLAG_BIT); + + schedule_tasklet = true; + ret = IRQ_HANDLED; + } + spin_unlock(&cqdma->pc[i]->lock); + + if (schedule_tasklet) { + /* disable interrupt */ + disable_irq_nosync(cqdma->pc[i]->irq); + + /* schedule the tasklet to handle the transactions */ + tasklet_schedule(&cqdma->pc[i]->tasklet); + } + } + + return ret; +} + +static struct virt_dma_desc *mtk_cqdma_find_active_desc(struct dma_chan *c, + dma_cookie_t cookie) +{ + struct mtk_cqdma_vchan *cvc = to_cqdma_vchan(c); + struct virt_dma_desc *vd; + unsigned long flags; + + spin_lock_irqsave(&cvc->pc->lock, flags); + list_for_each_entry(vd, &cvc->pc->queue, node) + if (vd->tx.cookie == cookie) { + spin_unlock_irqrestore(&cvc->pc->lock, flags); + return vd; + } + spin_unlock_irqrestore(&cvc->pc->lock, flags); + + list_for_each_entry(vd, &cvc->vc.desc_issued, node) + if (vd->tx.cookie == cookie) + return vd; + + return NULL; +} + +static enum dma_status mtk_cqdma_tx_status(struct dma_chan *c, + dma_cookie_t cookie, + struct dma_tx_state *txstate) +{ + struct mtk_cqdma_vchan *cvc = to_cqdma_vchan(c); + struct mtk_cqdma_vdesc *cvd; + struct virt_dma_desc *vd; + enum dma_status ret; + unsigned long flags; + size_t bytes = 0; + + ret = dma_cookie_status(c, cookie, txstate); + if (ret == DMA_COMPLETE || !txstate) + return ret; + + spin_lock_irqsave(&cvc->vc.lock, flags); + vd = mtk_cqdma_find_active_desc(c, cookie); + spin_unlock_irqrestore(&cvc->vc.lock, flags); + + if (vd) { + cvd = to_cqdma_vdesc(vd); + bytes = cvd->residue; + } + + dma_set_residue(txstate, bytes); + + return ret; +} + +static void mtk_cqdma_issue_pending(struct dma_chan *c) +{ + struct mtk_cqdma_vchan *cvc = to_cqdma_vchan(c); + unsigned long pc_flags; + unsigned long vc_flags; + + /* acquire PC's lock before VS's lock for lock dependency in tasklet */ + spin_lock_irqsave(&cvc->pc->lock, pc_flags); + spin_lock_irqsave(&cvc->vc.lock, vc_flags); + + if (vchan_issue_pending(&cvc->vc)) + mtk_cqdma_issue_vchan_pending(cvc); + + spin_unlock_irqrestore(&cvc->vc.lock, vc_flags); + spin_unlock_irqrestore(&cvc->pc->lock, pc_flags); +} + +static struct dma_async_tx_descriptor * +mtk_cqdma_prep_dma_memcpy(struct dma_chan *c, dma_addr_t dest, + dma_addr_t src, size_t len, unsigned long flags) +{ + struct mtk_cqdma_vdesc **cvd; + struct dma_async_tx_descriptor *tx = NULL, *prev_tx = NULL; + size_t i, tlen, nr_vd; + + /* + * In the case that trsanction length is larger than the + * DMA engine supports, a single memcpy transaction needs + * to be separated into several DMA transactions. + * Each DMA transaction would be described by a CVD, + * and the first one is referred as the parent CVD, + * while the others are child CVDs. + * The parent CVD's tx descriptor is the only tx descriptor + * returned to the DMA user, and it should not be completed + * until all the child CVDs completed. + */ + nr_vd = DIV_ROUND_UP(len, MTK_CQDMA_MAX_LEN); + cvd = kcalloc(nr_vd, sizeof(*cvd), GFP_NOWAIT); + if (!cvd) + return NULL; + + for (i = 0; i < nr_vd; ++i) { + cvd[i] = kzalloc(sizeof(*cvd[i]), GFP_NOWAIT); + if (!cvd[i]) { + for (; i > 0; --i) + kfree(cvd[i - 1]); + return NULL; + } + + /* setup dma channel */ + cvd[i]->ch = c; + + /* setup sourece, destination, and length */ + tlen = (len > MTK_CQDMA_MAX_LEN) ? MTK_CQDMA_MAX_LEN : len; + cvd[i]->len = tlen; + cvd[i]->src = src; + cvd[i]->dest = dest; + + /* setup tx descriptor */ + tx = vchan_tx_prep(to_virt_chan(c), &cvd[i]->vd, flags); + tx->next = NULL; + + if (!i) { + cvd[0]->residue = len; + } else { + prev_tx->next = tx; + cvd[i]->residue = tlen; + } + + cvd[i]->parent = cvd[0]; + + /* update the src, dest, len, prev_tx for the next CVD */ + src += tlen; + dest += tlen; + len -= tlen; + prev_tx = tx; + } + + return &cvd[0]->vd.tx; +} + +static void mtk_cqdma_free_inactive_desc(struct dma_chan *c) +{ + struct virt_dma_chan *vc = to_virt_chan(c); + unsigned long flags; + LIST_HEAD(head); + + /* + * set desc_allocated, desc_submitted, + * and desc_issued as the candicates to be freed + */ + spin_lock_irqsave(&vc->lock, flags); + list_splice_tail_init(&vc->desc_allocated, &head); + list_splice_tail_init(&vc->desc_submitted, &head); + list_splice_tail_init(&vc->desc_issued, &head); + spin_unlock_irqrestore(&vc->lock, flags); + + /* free descriptor lists */ + vchan_dma_desc_free_list(vc, &head); +} + +static void mtk_cqdma_free_active_desc(struct dma_chan *c) +{ + struct mtk_cqdma_vchan *cvc = to_cqdma_vchan(c); + bool sync_needed = false; + unsigned long pc_flags; + unsigned long vc_flags; + + /* acquire PC's lock first due to lock dependency in dma ISR */ + spin_lock_irqsave(&cvc->pc->lock, pc_flags); + spin_lock_irqsave(&cvc->vc.lock, vc_flags); + + /* synchronization is required if this VC is active */ + if (mtk_cqdma_is_vchan_active(cvc)) { + cvc->issue_synchronize = true; + sync_needed = true; + } + + spin_unlock_irqrestore(&cvc->vc.lock, vc_flags); + spin_unlock_irqrestore(&cvc->pc->lock, pc_flags); + + /* waiting for the completion of this VC */ + if (sync_needed) + wait_for_completion(&cvc->issue_completion); + + /* free all descriptors in list desc_completed */ + vchan_synchronize(&cvc->vc); + + WARN_ONCE(!list_empty(&cvc->vc.desc_completed), + "Desc pending still in list desc_completed\n"); +} + +static int mtk_cqdma_terminate_all(struct dma_chan *c) +{ + /* free descriptors not processed yet by hardware */ + mtk_cqdma_free_inactive_desc(c); + + /* free descriptors being processed by hardware */ + mtk_cqdma_free_active_desc(c); + + return 0; +} + +static int mtk_cqdma_alloc_chan_resources(struct dma_chan *c) +{ + struct mtk_cqdma_device *cqdma = to_cqdma_dev(c); + struct mtk_cqdma_vchan *vc = to_cqdma_vchan(c); + struct mtk_cqdma_pchan *pc = NULL; + u32 i, min_refcnt = U32_MAX, refcnt; + unsigned long flags; + + /* allocate PC with the minimun refcount */ + for (i = 0; i < cqdma->dma_channels; ++i) { + refcnt = refcount_read(&cqdma->pc[i]->refcnt); + if (refcnt < min_refcnt) { + pc = cqdma->pc[i]; + min_refcnt = refcnt; + } + } + + if (!pc) + return -ENOSPC; + + spin_lock_irqsave(&pc->lock, flags); + + if (!refcount_read(&pc->refcnt)) { + /* allocate PC when the refcount is zero */ + mtk_cqdma_hard_reset(pc); + + /* enable interrupt for this PC */ + mtk_dma_set(pc, MTK_CQDMA_INT_EN, MTK_CQDMA_INT_EN_BIT); + + /* + * refcount_inc would complain increment on 0; use-after-free. + * Thus, we need to explicitly set it as 1 initially. + */ + refcount_set(&pc->refcnt, 1); + } else { + refcount_inc(&pc->refcnt); + } + + spin_unlock_irqrestore(&pc->lock, flags); + + vc->pc = pc; + + return 0; +} + +static void mtk_cqdma_free_chan_resources(struct dma_chan *c) +{ + struct mtk_cqdma_vchan *cvc = to_cqdma_vchan(c); + unsigned long flags; + + /* free all descriptors in all lists on the VC */ + mtk_cqdma_terminate_all(c); + + spin_lock_irqsave(&cvc->pc->lock, flags); + + /* PC is not freed until there is no VC mapped to it */ + if (refcount_dec_and_test(&cvc->pc->refcnt)) { + /* start the flush operation and stop the engine */ + mtk_dma_set(cvc->pc, MTK_CQDMA_FLUSH, MTK_CQDMA_FLUSH_BIT); + + /* wait for the completion of flush operation */ + if (mtk_cqdma_poll_engine_done(cvc->pc, false) < 0) + dev_err(cqdma2dev(to_cqdma_dev(c)), "cqdma flush timeout\n"); + + /* clear the flush bit and interrupt flag */ + mtk_dma_clr(cvc->pc, MTK_CQDMA_FLUSH, MTK_CQDMA_FLUSH_BIT); + mtk_dma_clr(cvc->pc, MTK_CQDMA_INT_FLAG, + MTK_CQDMA_INT_FLAG_BIT); + + /* disable interrupt for this PC */ + mtk_dma_clr(cvc->pc, MTK_CQDMA_INT_EN, MTK_CQDMA_INT_EN_BIT); + } + + spin_unlock_irqrestore(&cvc->pc->lock, flags); +} + +static int mtk_cqdma_hw_init(struct mtk_cqdma_device *cqdma) +{ + unsigned long flags; + int err; + u32 i; + + pm_runtime_enable(cqdma2dev(cqdma)); + pm_runtime_get_sync(cqdma2dev(cqdma)); + + err = clk_prepare_enable(cqdma->clk); + + if (err) { + pm_runtime_put_sync(cqdma2dev(cqdma)); + pm_runtime_disable(cqdma2dev(cqdma)); + return err; + } + + /* reset all PCs */ + for (i = 0; i < cqdma->dma_channels; ++i) { + spin_lock_irqsave(&cqdma->pc[i]->lock, flags); + if (mtk_cqdma_hard_reset(cqdma->pc[i]) < 0) { + dev_err(cqdma2dev(cqdma), "cqdma hard reset timeout\n"); + spin_unlock_irqrestore(&cqdma->pc[i]->lock, flags); + + clk_disable_unprepare(cqdma->clk); + pm_runtime_put_sync(cqdma2dev(cqdma)); + pm_runtime_disable(cqdma2dev(cqdma)); + return -EINVAL; + } + spin_unlock_irqrestore(&cqdma->pc[i]->lock, flags); + } + + return 0; +} + +static void mtk_cqdma_hw_deinit(struct mtk_cqdma_device *cqdma) +{ + unsigned long flags; + u32 i; + + /* reset all PCs */ + for (i = 0; i < cqdma->dma_channels; ++i) { + spin_lock_irqsave(&cqdma->pc[i]->lock, flags); + if (mtk_cqdma_hard_reset(cqdma->pc[i]) < 0) + dev_err(cqdma2dev(cqdma), "cqdma hard reset timeout\n"); + spin_unlock_irqrestore(&cqdma->pc[i]->lock, flags); + } + + clk_disable_unprepare(cqdma->clk); + + pm_runtime_put_sync(cqdma2dev(cqdma)); + pm_runtime_disable(cqdma2dev(cqdma)); +} + +static const struct of_device_id mtk_cqdma_match[] = { + { .compatible = "mediatek,mt6765-cqdma" }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, mtk_cqdma_match); + +static int mtk_cqdma_probe(struct platform_device *pdev) +{ + struct mtk_cqdma_device *cqdma; + struct mtk_cqdma_vchan *vc; + struct dma_device *dd; + struct resource *res; + int err; + u32 i; + + cqdma = devm_kzalloc(&pdev->dev, sizeof(*cqdma), GFP_KERNEL); + if (!cqdma) + return -ENOMEM; + + dd = &cqdma->ddev; + + cqdma->clk = devm_clk_get(&pdev->dev, "cqdma"); + if (IS_ERR(cqdma->clk)) { + dev_err(&pdev->dev, "No clock for %s\n", + dev_name(&pdev->dev)); + return PTR_ERR(cqdma->clk); + } + + dma_cap_set(DMA_MEMCPY, dd->cap_mask); + + dd->copy_align = MTK_CQDMA_ALIGN_SIZE; + dd->device_alloc_chan_resources = mtk_cqdma_alloc_chan_resources; + dd->device_free_chan_resources = mtk_cqdma_free_chan_resources; + dd->device_tx_status = mtk_cqdma_tx_status; + dd->device_issue_pending = mtk_cqdma_issue_pending; + dd->device_prep_dma_memcpy = mtk_cqdma_prep_dma_memcpy; + dd->device_terminate_all = mtk_cqdma_terminate_all; + dd->src_addr_widths = MTK_CQDMA_DMA_BUSWIDTHS; + dd->dst_addr_widths = MTK_CQDMA_DMA_BUSWIDTHS; + dd->directions = BIT(DMA_MEM_TO_MEM); + dd->residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT; + dd->dev = &pdev->dev; + INIT_LIST_HEAD(&dd->channels); + + if (pdev->dev.of_node && of_property_read_u32(pdev->dev.of_node, + "dma-requests", + &cqdma->dma_requests)) { + dev_info(&pdev->dev, + "Using %u as missing dma-requests property\n", + MTK_CQDMA_NR_VCHANS); + + cqdma->dma_requests = MTK_CQDMA_NR_VCHANS; + } + + if (pdev->dev.of_node && of_property_read_u32(pdev->dev.of_node, + "dma-channels", + &cqdma->dma_channels)) { + dev_info(&pdev->dev, + "Using %u as missing dma-channels property\n", + MTK_CQDMA_NR_PCHANS); + + cqdma->dma_channels = MTK_CQDMA_NR_PCHANS; + } + + cqdma->pc = devm_kcalloc(&pdev->dev, cqdma->dma_channels, + sizeof(*cqdma->pc), GFP_KERNEL); + if (!cqdma->pc) + return -ENOMEM; + + /* initialization for PCs */ + for (i = 0; i < cqdma->dma_channels; ++i) { + cqdma->pc[i] = devm_kcalloc(&pdev->dev, 1, + sizeof(**cqdma->pc), GFP_KERNEL); + if (!cqdma->pc[i]) + return -ENOMEM; + + INIT_LIST_HEAD(&cqdma->pc[i]->queue); + spin_lock_init(&cqdma->pc[i]->lock); + refcount_set(&cqdma->pc[i]->refcnt, 0); + + res = platform_get_resource(pdev, IORESOURCE_MEM, i); + if (!res) { + dev_err(&pdev->dev, "No mem resource for %s\n", + dev_name(&pdev->dev)); + return -EINVAL; + } + + cqdma->pc[i]->base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(cqdma->pc[i]->base)) + return PTR_ERR(cqdma->pc[i]->base); + + /* allocate IRQ resource */ + res = platform_get_resource(pdev, IORESOURCE_IRQ, i); + if (!res) { + dev_err(&pdev->dev, "No irq resource for %s\n", + dev_name(&pdev->dev)); + return -EINVAL; + } + cqdma->pc[i]->irq = res->start; + + err = devm_request_irq(&pdev->dev, cqdma->pc[i]->irq, + mtk_cqdma_irq, 0, dev_name(&pdev->dev), + cqdma); + if (err) { + dev_err(&pdev->dev, + "request_irq failed with err %d\n", err); + return -EINVAL; + } + } + + /* allocate resource for VCs */ + cqdma->vc = devm_kcalloc(&pdev->dev, cqdma->dma_requests, + sizeof(*cqdma->vc), GFP_KERNEL); + if (!cqdma->vc) + return -ENOMEM; + + for (i = 0; i < cqdma->dma_requests; i++) { + vc = &cqdma->vc[i]; + vc->vc.desc_free = mtk_cqdma_vdesc_free; + vchan_init(&vc->vc, dd); + init_completion(&vc->issue_completion); + } + + err = dma_async_device_register(dd); + if (err) + return err; + + err = of_dma_controller_register(pdev->dev.of_node, + of_dma_xlate_by_chan_id, cqdma); + if (err) { + dev_err(&pdev->dev, + "MediaTek CQDMA OF registration failed %d\n", err); + goto err_unregister; + } + + err = mtk_cqdma_hw_init(cqdma); + if (err) { + dev_err(&pdev->dev, + "MediaTek CQDMA HW initialization failed %d\n", err); + goto err_unregister; + } + + platform_set_drvdata(pdev, cqdma); + + /* initialize tasklet for each PC */ + for (i = 0; i < cqdma->dma_channels; ++i) + tasklet_init(&cqdma->pc[i]->tasklet, mtk_cqdma_tasklet_cb, + (unsigned long)cqdma->pc[i]); + + dev_info(&pdev->dev, "MediaTek CQDMA driver registered\n"); + + return 0; + +err_unregister: + dma_async_device_unregister(dd); + + return err; +} + +static int mtk_cqdma_remove(struct platform_device *pdev) +{ + struct mtk_cqdma_device *cqdma = platform_get_drvdata(pdev); + struct mtk_cqdma_vchan *vc; + unsigned long flags; + int i; + + /* kill VC task */ + for (i = 0; i < cqdma->dma_requests; i++) { + vc = &cqdma->vc[i]; + + list_del(&vc->vc.chan.device_node); + tasklet_kill(&vc->vc.task); + } + + /* disable interrupt */ + for (i = 0; i < cqdma->dma_channels; i++) { + spin_lock_irqsave(&cqdma->pc[i]->lock, flags); + mtk_dma_clr(cqdma->pc[i], MTK_CQDMA_INT_EN, + MTK_CQDMA_INT_EN_BIT); + spin_unlock_irqrestore(&cqdma->pc[i]->lock, flags); + + /* Waits for any pending IRQ handlers to complete */ + synchronize_irq(cqdma->pc[i]->irq); + + tasklet_kill(&cqdma->pc[i]->tasklet); + } + + /* disable hardware */ + mtk_cqdma_hw_deinit(cqdma); + + dma_async_device_unregister(&cqdma->ddev); + of_dma_controller_free(pdev->dev.of_node); + + return 0; +} + +static struct platform_driver mtk_cqdma_driver = { + .probe = mtk_cqdma_probe, + .remove = mtk_cqdma_remove, + .driver = { + .name = KBUILD_MODNAME, + .of_match_table = mtk_cqdma_match, + }, +}; +module_platform_driver(mtk_cqdma_driver); + +MODULE_DESCRIPTION("MediaTek CQDMA Controller Driver"); +MODULE_AUTHOR("Shun-Chih Yu "); +MODULE_LICENSE("GPL v2"); -- cgit v1.2.3 From 91b438286ef227b5a9148156896175c8e386c6b2 Mon Sep 17 00:00:00 2001 From: Radhey Shyam Pandey Date: Sat, 29 Sep 2018 11:17:57 -0600 Subject: dmaengine: xilinx_dma: Refactor axidma channel allocation In axidma alloc_chan_resources merge BD and cyclic BD allocation. Signed-off-by: Radhey Shyam Pandey Signed-off-by: Michal Simek Signed-off-by: Vinod Koul --- drivers/dma/xilinx/xilinx_dma.c | 36 ++++++++++++++++++------------------ 1 file changed, 18 insertions(+), 18 deletions(-) (limited to 'drivers/dma') diff --git a/drivers/dma/xilinx/xilinx_dma.c b/drivers/dma/xilinx/xilinx_dma.c index c12442312595..06d1632ff1a2 100644 --- a/drivers/dma/xilinx/xilinx_dma.c +++ b/drivers/dma/xilinx/xilinx_dma.c @@ -887,6 +887,24 @@ static int xilinx_dma_alloc_chan_resources(struct dma_chan *dchan) chan->id); return -ENOMEM; } + /* + * For cyclic DMA mode we need to program the tail Descriptor + * register with a value which is not a part of the BD chain + * so allocating a desc segment during channel allocation for + * programming tail descriptor. + */ + chan->cyclic_seg_v = dma_zalloc_coherent(chan->dev, + sizeof(*chan->cyclic_seg_v), + &chan->cyclic_seg_p, GFP_KERNEL); + if (!chan->cyclic_seg_v) { + dev_err(chan->dev, + "unable to allocate desc segment for cyclic DMA\n"); + dma_free_coherent(chan->dev, sizeof(*chan->seg_v) * + XILINX_DMA_NUM_DESCS, chan->seg_v, + chan->seg_p); + return -ENOMEM; + } + chan->cyclic_seg_v->phys = chan->cyclic_seg_p; for (i = 0; i < XILINX_DMA_NUM_DESCS; i++) { chan->seg_v[i].hw.next_desc = @@ -922,24 +940,6 @@ static int xilinx_dma_alloc_chan_resources(struct dma_chan *dchan) return -ENOMEM; } - if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) { - /* - * For cyclic DMA mode we need to program the tail Descriptor - * register with a value which is not a part of the BD chain - * so allocating a desc segment during channel allocation for - * programming tail descriptor. - */ - chan->cyclic_seg_v = dma_zalloc_coherent(chan->dev, - sizeof(*chan->cyclic_seg_v), - &chan->cyclic_seg_p, GFP_KERNEL); - if (!chan->cyclic_seg_v) { - dev_err(chan->dev, - "unable to allocate desc segment for cyclic DMA\n"); - return -ENOMEM; - } - chan->cyclic_seg_v->phys = chan->cyclic_seg_p; - } - dma_cookie_init(dchan); if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) { -- cgit v1.2.3 From 4e47d24a908c882b94c3f60cec56f02ac4e9bcea Mon Sep 17 00:00:00 2001 From: Radhey Shyam Pandey Date: Sat, 29 Sep 2018 11:17:59 -0600 Subject: dmaengine: xilinx_dma: Introduce helper macro for preparing dma address This patch introduces the xilinx_prep_dma_addr_t macro which prepares dma_addr_t from hardware buffer descriptor LSB and MSB fields. It will be used in simple dma 64-bit programming sequence. Signed-off-by: Radhey Shyam Pandey Reviewed-by: Appana Durga Kedareswara Rao Signed-off-by: Vinod Koul --- drivers/dma/xilinx/xilinx_dma.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'drivers/dma') diff --git a/drivers/dma/xilinx/xilinx_dma.c b/drivers/dma/xilinx/xilinx_dma.c index 06d1632ff1a2..153ca584eba5 100644 --- a/drivers/dma/xilinx/xilinx_dma.c +++ b/drivers/dma/xilinx/xilinx_dma.c @@ -190,6 +190,8 @@ /* AXI CDMA Specific Masks */ #define XILINX_CDMA_CR_SGMODE BIT(3) +#define xilinx_prep_dma_addr_t(addr) \ + ((dma_addr_t)((u64)addr##_##msb << 32 | (addr))) /** * struct xilinx_vdma_desc_hw - Hardware Descriptor * @next_desc: Next Descriptor Pointer @0x00 -- cgit v1.2.3 From 0e03aca2659ef7a85eaff1a1ca9b0b498002ede8 Mon Sep 17 00:00:00 2001 From: Radhey Shyam Pandey Date: Sat, 29 Sep 2018 11:18:00 -0600 Subject: dmaengine: xilinx_dma: Fix 64-bit simple CDMA transfer In AXI CDMA simple mode also pass MSB bits of source and destination address to xilinx_write function. This fixes simple CDMA operation mode using 64-bit addressing. Signed-off-by: Radhey Shyam Pandey Signed-off-by: Michal Simek Reviewed-by: Appana Durga Kedareswara Rao Signed-off-by: Vinod Koul --- drivers/dma/xilinx/xilinx_dma.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) (limited to 'drivers/dma') diff --git a/drivers/dma/xilinx/xilinx_dma.c b/drivers/dma/xilinx/xilinx_dma.c index 153ca584eba5..02880963092f 100644 --- a/drivers/dma/xilinx/xilinx_dma.c +++ b/drivers/dma/xilinx/xilinx_dma.c @@ -1247,8 +1247,10 @@ static void xilinx_cdma_start_transfer(struct xilinx_dma_chan *chan) hw = &segment->hw; - xilinx_write(chan, XILINX_CDMA_REG_SRCADDR, hw->src_addr); - xilinx_write(chan, XILINX_CDMA_REG_DSTADDR, hw->dest_addr); + xilinx_write(chan, XILINX_CDMA_REG_SRCADDR, + xilinx_prep_dma_addr_t(hw->src_addr)); + xilinx_write(chan, XILINX_CDMA_REG_DSTADDR, + xilinx_prep_dma_addr_t(hw->dest_addr)); /* Start the transfer */ dma_ctrl_write(chan, XILINX_DMA_REG_BTT, -- cgit v1.2.3 From af8bf89a8e0b8354576dcaa507c8f2fd4a205742 Mon Sep 17 00:00:00 2001 From: Fabio Estevam Date: Thu, 25 Oct 2018 14:52:36 -0300 Subject: dmaengine: imx-sdma: Use a single line for dma_alloc_coherent() Make the call to dma_alloc_coherent() to fit into a single line, which helps readability. Signed-off-by: Fabio Estevam Signed-off-by: Vinod Koul --- drivers/dma/imx-sdma.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) (limited to 'drivers/dma') diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c index b4ec2d20e661..a2b488da2e07 100644 --- a/drivers/dma/imx-sdma.c +++ b/drivers/dma/imx-sdma.c @@ -671,9 +671,7 @@ static int sdma_load_script(struct sdma_engine *sdma, void *buf, int size, int ret; unsigned long flags; - buf_virt = dma_alloc_coherent(NULL, - size, - &buf_phys, GFP_KERNEL); + buf_virt = dma_alloc_coherent(NULL, size, &buf_phys, GFP_KERNEL); if (!buf_virt) { return -ENOMEM; } -- cgit v1.2.3 From aeaebcc17cdf37065d2693865eeb1ff1c7dc5bf3 Mon Sep 17 00:00:00 2001 From: Nathan Chancellor Date: Thu, 25 Oct 2018 11:05:25 -0700 Subject: dmaengine: xilinx_dma: Remove __aligned attribute on zynqmp_dma_desc_ll Clang warns: drivers/dma/xilinx/zynqmp_dma.c:166:4: warning: attribute 'aligned' is ignored, place it after "struct" to apply attribute to type declaration [-Wignored-attributes] }; __aligned(64) ^ ./include/linux/compiler_types.h:200:38: note: expanded from macro '__aligned' ^ 1 warning generated. As Nick pointed out in the previous version of this patch, the author likely intended for this struct to be 8-byte (64-bit) aligned, not 64-byte, which is the default. Remove the hanging __aligned attribute. Fixes: b0cc417c1637 ("dmaengine: Add Xilinx zynqmp dma engine driver support") Reported-by: Nick Desaulniers Suggested-by: Nick Desaulniers Signed-off-by: Nathan Chancellor Reviewed-by: Nick Desaulniers Signed-off-by: Vinod Koul --- drivers/dma/xilinx/zynqmp_dma.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/dma') diff --git a/drivers/dma/xilinx/zynqmp_dma.c b/drivers/dma/xilinx/zynqmp_dma.c index c74a88b65039..73de6a6179fc 100644 --- a/drivers/dma/xilinx/zynqmp_dma.c +++ b/drivers/dma/xilinx/zynqmp_dma.c @@ -163,7 +163,7 @@ struct zynqmp_dma_desc_ll { u32 ctrl; u64 nxtdscraddr; u64 rsvd; -}; __aligned(64) +}; /** * struct zynqmp_dma_desc_sw - Per Transaction structure -- cgit v1.2.3 From 0255200bd29afc320c6ea4c1adf8bdc13a9b3c15 Mon Sep 17 00:00:00 2001 From: Alexandru Ardelean Date: Mon, 29 Oct 2018 12:08:08 +0200 Subject: dmaengine: dmatest: unmap data on a single code-path when xfer done After the DMA transfer is done, we don't need to call the un-mapping code in 3 places. One is enough. Signed-off-by: Alexandru Ardelean Signed-off-by: Vinod Koul --- drivers/dma/dmatest.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) (limited to 'drivers/dma') diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c index aa1712beb0cc..5d4b1e053fb7 100644 --- a/drivers/dma/dmatest.c +++ b/drivers/dma/dmatest.c @@ -721,14 +721,14 @@ static int dmatest_func(void *data) status = dma_async_is_tx_complete(chan, cookie, NULL, NULL); + dmaengine_unmap_put(um); + if (!done->done) { - dmaengine_unmap_put(um); result("test timed out", total_tests, src_off, dst_off, len, 0); failed_tests++; continue; } else if (status != DMA_COMPLETE) { - dmaengine_unmap_put(um); result(status == DMA_ERROR ? "completion error status" : "completion busy status", total_tests, src_off, @@ -737,8 +737,6 @@ static int dmatest_func(void *data) continue; } - dmaengine_unmap_put(um); - if (params->noverify) { verbose_result("test passed", total_tests, src_off, dst_off, len, 0); -- cgit v1.2.3 From fbffb6b4d44f1263390130cb8b35cabc030af3f7 Mon Sep 17 00:00:00 2001 From: Alexandru Ardelean Date: Mon, 29 Oct 2018 11:23:36 +0200 Subject: dmaengine: dmatest: use dmaengine_terminate_sync() instead The `dmaengine_terminate_all()` is marked as deprecated, so update the test with `dmaengine_terminate_sync()` which is the recommended alternative. Signed-off-by: Alexandru Ardelean Signed-off-by: Vinod Koul --- drivers/dma/dmatest.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers/dma') diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c index 5d4b1e053fb7..214391ba019a 100644 --- a/drivers/dma/dmatest.c +++ b/drivers/dma/dmatest.c @@ -810,7 +810,7 @@ err_thread_type: /* terminate all transfers on specified channels */ if (ret || failed_tests) - dmaengine_terminate_all(chan); + dmaengine_terminate_sync(chan); thread->done = true; wake_up(&thread_wait); @@ -834,7 +834,7 @@ static void dmatest_cleanup_channel(struct dmatest_chan *dtc) } /* terminate all transfers on specified channels */ - dmaengine_terminate_all(dtc->chan); + dmaengine_terminate_sync(dtc->chan); kfree(dtc); } -- cgit v1.2.3 From 787d3083caf899b8c3abf5a0c7a04e79d77f2c32 Mon Sep 17 00:00:00 2001 From: Alexandru Ardelean Date: Thu, 1 Nov 2018 18:07:16 +0200 Subject: dmaengine: dmatest: move size checks earlier in function There's no need to allocate all that memory if these sizes are invalid anyway. Signed-off-by: Alexandru Ardelean Signed-off-by: Vinod Koul --- drivers/dma/dmatest.c | 26 +++++++++++++------------- 1 file changed, 13 insertions(+), 13 deletions(-) (limited to 'drivers/dma') diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c index 214391ba019a..e71aa1e3451c 100644 --- a/drivers/dma/dmatest.c +++ b/drivers/dma/dmatest.c @@ -507,6 +507,19 @@ static int dmatest_func(void *data) } else goto err_thread_type; + /* Check if buffer count fits into map count variable (u8) */ + if ((src_cnt + dst_cnt) >= 255) { + pr_err("too many buffers (%d of 255 supported)\n", + src_cnt + dst_cnt); + goto err_thread_type; + } + + if (1 << align > params->buf_size) { + pr_err("%u-byte buffer too small for %d-byte alignment\n", + params->buf_size, 1 << align); + goto err_thread_type; + } + thread->srcs = kcalloc(src_cnt + 1, sizeof(u8 *), GFP_KERNEL); if (!thread->srcs) goto err_srcs; @@ -576,19 +589,6 @@ static int dmatest_func(void *data) total_tests++; - /* Check if buffer count fits into map count variable (u8) */ - if ((src_cnt + dst_cnt) >= 255) { - pr_err("too many buffers (%d of 255 supported)\n", - src_cnt + dst_cnt); - break; - } - - if (1 << align > params->buf_size) { - pr_err("%u-byte buffer too small for %d-byte alignment\n", - params->buf_size, 1 << align); - break; - } - if (params->norandom) len = params->buf_size; else -- cgit v1.2.3 From 32e74aabebc8d045a11452d2de0ac9d2625fcd45 Mon Sep 17 00:00:00 2001 From: Masahiro Yamada Date: Fri, 12 Oct 2018 01:41:03 +0900 Subject: dmaengine: uniphier-mdmac: add UniPhier MIO DMAC driver The MIO DMAC (Media IO DMA Controller) is used in UniPhier LD4, Pro4, and sLD8 SoCs. Signed-off-by: Masahiro Yamada Signed-off-by: Vinod Koul --- MAINTAINERS | 1 + drivers/dma/Kconfig | 11 + drivers/dma/Makefile | 1 + drivers/dma/uniphier-mdmac.c | 506 +++++++++++++++++++++++++++++++++++++++++++ 4 files changed, 519 insertions(+) create mode 100644 drivers/dma/uniphier-mdmac.c (limited to 'drivers/dma') diff --git a/MAINTAINERS b/MAINTAINERS index f4855974f325..981e72946e7a 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -2256,6 +2256,7 @@ F: arch/arm/mm/cache-uniphier.c F: arch/arm64/boot/dts/socionext/uniphier* F: drivers/bus/uniphier-system-bus.c F: drivers/clk/uniphier/ +F: drivers/dmaengine/uniphier-mdmac.c F: drivers/gpio/gpio-uniphier.c F: drivers/i2c/busses/i2c-uniphier* F: drivers/irqchip/irq-uniphier-aidet.c diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig index de511db021cc..d2286c7f7222 100644 --- a/drivers/dma/Kconfig +++ b/drivers/dma/Kconfig @@ -587,6 +587,17 @@ config TIMB_DMA help Enable support for the Timberdale FPGA DMA engine. +config UNIPHIER_MDMAC + tristate "UniPhier MIO DMAC" + depends on ARCH_UNIPHIER || COMPILE_TEST + depends on OF + select DMA_ENGINE + select DMA_VIRTUAL_CHANNELS + help + Enable support for the MIO DMAC (Media I/O DMA controller) on the + UniPhier platform. This DMA controller is used as the external + DMA engine of the SD/eMMC controllers of the LD4, Pro4, sLD8 SoCs. + config XGENE_DMA tristate "APM X-Gene DMA support" depends on ARCH_XGENE || COMPILE_TEST diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile index 7fcc4d8e336d..09571a81353d 100644 --- a/drivers/dma/Makefile +++ b/drivers/dma/Makefile @@ -70,6 +70,7 @@ obj-$(CONFIG_TXX9_DMAC) += txx9dmac.o obj-$(CONFIG_TEGRA20_APB_DMA) += tegra20-apb-dma.o obj-$(CONFIG_TEGRA210_ADMA) += tegra210-adma.o obj-$(CONFIG_TIMB_DMA) += timb_dma.o +obj-$(CONFIG_UNIPHIER_MDMAC) += uniphier-mdmac.o obj-$(CONFIG_XGENE_DMA) += xgene-dma.o obj-$(CONFIG_ZX_DMA) += zx_dma.o obj-$(CONFIG_ST_FDMA) += st_fdma.o diff --git a/drivers/dma/uniphier-mdmac.c b/drivers/dma/uniphier-mdmac.c new file mode 100644 index 000000000000..ec65a7430dc4 --- /dev/null +++ b/drivers/dma/uniphier-mdmac.c @@ -0,0 +1,506 @@ +// SPDX-License-Identifier: GPL-2.0 +// +// Copyright (C) 2018 Socionext Inc. +// Author: Masahiro Yamada + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "virt-dma.h" + +/* registers common for all channels */ +#define UNIPHIER_MDMAC_CMD 0x000 /* issue DMA start/abort */ +#define UNIPHIER_MDMAC_CMD_ABORT BIT(31) /* 1: abort, 0: start */ + +/* per-channel registers */ +#define UNIPHIER_MDMAC_CH_OFFSET 0x100 +#define UNIPHIER_MDMAC_CH_STRIDE 0x040 + +#define UNIPHIER_MDMAC_CH_IRQ_STAT 0x010 /* current hw status (RO) */ +#define UNIPHIER_MDMAC_CH_IRQ_REQ 0x014 /* latched STAT (WOC) */ +#define UNIPHIER_MDMAC_CH_IRQ_EN 0x018 /* IRQ enable mask */ +#define UNIPHIER_MDMAC_CH_IRQ_DET 0x01c /* REQ & EN (RO) */ +#define UNIPHIER_MDMAC_CH_IRQ__ABORT BIT(13) +#define UNIPHIER_MDMAC_CH_IRQ__DONE BIT(1) +#define UNIPHIER_MDMAC_CH_SRC_MODE 0x020 /* mode of source */ +#define UNIPHIER_MDMAC_CH_DEST_MODE 0x024 /* mode of destination */ +#define UNIPHIER_MDMAC_CH_MODE__ADDR_INC (0 << 4) +#define UNIPHIER_MDMAC_CH_MODE__ADDR_DEC (1 << 4) +#define UNIPHIER_MDMAC_CH_MODE__ADDR_FIXED (2 << 4) +#define UNIPHIER_MDMAC_CH_SRC_ADDR 0x028 /* source address */ +#define UNIPHIER_MDMAC_CH_DEST_ADDR 0x02c /* destination address */ +#define UNIPHIER_MDMAC_CH_SIZE 0x030 /* transfer bytes */ + +#define UNIPHIER_MDMAC_SLAVE_BUSWIDTHS \ + (BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \ + BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \ + BIT(DMA_SLAVE_BUSWIDTH_3_BYTES) | \ + BIT(DMA_SLAVE_BUSWIDTH_4_BYTES)) + +struct uniphier_mdmac_desc { + struct virt_dma_desc vd; + struct scatterlist *sgl; + unsigned int sg_len; + unsigned int sg_cur; + enum dma_transfer_direction dir; +}; + +struct uniphier_mdmac_chan { + struct virt_dma_chan vc; + struct uniphier_mdmac_device *mdev; + struct uniphier_mdmac_desc *md; + void __iomem *reg_ch_base; + unsigned int chan_id; +}; + +struct uniphier_mdmac_device { + struct dma_device ddev; + struct clk *clk; + void __iomem *reg_base; + struct uniphier_mdmac_chan channels[0]; +}; + +static struct uniphier_mdmac_chan * +to_uniphier_mdmac_chan(struct virt_dma_chan *vc) +{ + return container_of(vc, struct uniphier_mdmac_chan, vc); +} + +static struct uniphier_mdmac_desc * +to_uniphier_mdmac_desc(struct virt_dma_desc *vd) +{ + return container_of(vd, struct uniphier_mdmac_desc, vd); +} + +/* mc->vc.lock must be held by caller */ +static struct uniphier_mdmac_desc * +uniphier_mdmac_next_desc(struct uniphier_mdmac_chan *mc) +{ + struct virt_dma_desc *vd; + + vd = vchan_next_desc(&mc->vc); + if (!vd) { + mc->md = NULL; + return NULL; + } + + list_del(&vd->node); + + mc->md = to_uniphier_mdmac_desc(vd); + + return mc->md; +} + +/* mc->vc.lock must be held by caller */ +static void uniphier_mdmac_handle(struct uniphier_mdmac_chan *mc, + struct uniphier_mdmac_desc *md) +{ + struct uniphier_mdmac_device *mdev = mc->mdev; + struct scatterlist *sg; + u32 irq_flag = UNIPHIER_MDMAC_CH_IRQ__DONE; + u32 src_mode, src_addr, dest_mode, dest_addr, chunk_size; + + sg = &md->sgl[md->sg_cur]; + + if (md->dir == DMA_MEM_TO_DEV) { + src_mode = UNIPHIER_MDMAC_CH_MODE__ADDR_INC; + src_addr = sg_dma_address(sg); + dest_mode = UNIPHIER_MDMAC_CH_MODE__ADDR_FIXED; + dest_addr = 0; + } else { + src_mode = UNIPHIER_MDMAC_CH_MODE__ADDR_FIXED; + src_addr = 0; + dest_mode = UNIPHIER_MDMAC_CH_MODE__ADDR_INC; + dest_addr = sg_dma_address(sg); + } + + chunk_size = sg_dma_len(sg); + + writel(src_mode, mc->reg_ch_base + UNIPHIER_MDMAC_CH_SRC_MODE); + writel(dest_mode, mc->reg_ch_base + UNIPHIER_MDMAC_CH_DEST_MODE); + writel(src_addr, mc->reg_ch_base + UNIPHIER_MDMAC_CH_SRC_ADDR); + writel(dest_addr, mc->reg_ch_base + UNIPHIER_MDMAC_CH_DEST_ADDR); + writel(chunk_size, mc->reg_ch_base + UNIPHIER_MDMAC_CH_SIZE); + + /* write 1 to clear */ + writel(irq_flag, mc->reg_ch_base + UNIPHIER_MDMAC_CH_IRQ_REQ); + + writel(irq_flag, mc->reg_ch_base + UNIPHIER_MDMAC_CH_IRQ_EN); + + writel(BIT(mc->chan_id), mdev->reg_base + UNIPHIER_MDMAC_CMD); +} + +/* mc->vc.lock must be held by caller */ +static void uniphier_mdmac_start(struct uniphier_mdmac_chan *mc) +{ + struct uniphier_mdmac_desc *md; + + md = uniphier_mdmac_next_desc(mc); + if (md) + uniphier_mdmac_handle(mc, md); +} + +/* mc->vc.lock must be held by caller */ +static int uniphier_mdmac_abort(struct uniphier_mdmac_chan *mc) +{ + struct uniphier_mdmac_device *mdev = mc->mdev; + u32 irq_flag = UNIPHIER_MDMAC_CH_IRQ__ABORT; + u32 val; + + /* write 1 to clear */ + writel(irq_flag, mc->reg_ch_base + UNIPHIER_MDMAC_CH_IRQ_REQ); + + writel(UNIPHIER_MDMAC_CMD_ABORT | BIT(mc->chan_id), + mdev->reg_base + UNIPHIER_MDMAC_CMD); + + /* + * Abort should be accepted soon. We poll the bit here instead of + * waiting for the interrupt. + */ + return readl_poll_timeout(mc->reg_ch_base + UNIPHIER_MDMAC_CH_IRQ_REQ, + val, val & irq_flag, 0, 20); +} + +static irqreturn_t uniphier_mdmac_interrupt(int irq, void *dev_id) +{ + struct uniphier_mdmac_chan *mc = dev_id; + struct uniphier_mdmac_desc *md; + irqreturn_t ret = IRQ_HANDLED; + u32 irq_stat; + + spin_lock(&mc->vc.lock); + + irq_stat = readl(mc->reg_ch_base + UNIPHIER_MDMAC_CH_IRQ_DET); + + /* + * Some channels share a single interrupt line. If the IRQ status is 0, + * this is probably triggered by a different channel. + */ + if (!irq_stat) { + ret = IRQ_NONE; + goto out; + } + + /* write 1 to clear */ + writel(irq_stat, mc->reg_ch_base + UNIPHIER_MDMAC_CH_IRQ_REQ); + + /* + * UNIPHIER_MDMAC_CH_IRQ__DONE interrupt is asserted even when the DMA + * is aborted. To distinguish the normal completion and the abort, + * check mc->md. If it is NULL, we are aborting. + */ + md = mc->md; + if (!md) + goto out; + + md->sg_cur++; + + if (md->sg_cur >= md->sg_len) { + vchan_cookie_complete(&md->vd); + md = uniphier_mdmac_next_desc(mc); + if (!md) + goto out; + } + + uniphier_mdmac_handle(mc, md); + +out: + spin_unlock(&mc->vc.lock); + + return ret; +} + +static void uniphier_mdmac_free_chan_resources(struct dma_chan *chan) +{ + vchan_free_chan_resources(to_virt_chan(chan)); +} + +static struct dma_async_tx_descriptor * +uniphier_mdmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, + unsigned int sg_len, + enum dma_transfer_direction direction, + unsigned long flags, void *context) +{ + struct virt_dma_chan *vc = to_virt_chan(chan); + struct uniphier_mdmac_desc *md; + + if (!is_slave_direction(direction)) + return NULL; + + md = kzalloc(sizeof(*md), GFP_NOWAIT); + if (!md) + return NULL; + + md->sgl = sgl; + md->sg_len = sg_len; + md->dir = direction; + + return vchan_tx_prep(vc, &md->vd, flags); +} + +static int uniphier_mdmac_terminate_all(struct dma_chan *chan) +{ + struct virt_dma_chan *vc = to_virt_chan(chan); + struct uniphier_mdmac_chan *mc = to_uniphier_mdmac_chan(vc); + unsigned long flags; + int ret = 0; + LIST_HEAD(head); + + spin_lock_irqsave(&vc->lock, flags); + + if (mc->md) { + vchan_terminate_vdesc(&mc->md->vd); + mc->md = NULL; + ret = uniphier_mdmac_abort(mc); + } + vchan_get_all_descriptors(vc, &head); + + spin_unlock_irqrestore(&vc->lock, flags); + + vchan_dma_desc_free_list(vc, &head); + + return ret; +} + +static void uniphier_mdmac_synchronize(struct dma_chan *chan) +{ + vchan_synchronize(to_virt_chan(chan)); +} + +static enum dma_status uniphier_mdmac_tx_status(struct dma_chan *chan, + dma_cookie_t cookie, + struct dma_tx_state *txstate) +{ + struct virt_dma_chan *vc; + struct virt_dma_desc *vd; + struct uniphier_mdmac_chan *mc; + struct uniphier_mdmac_desc *md = NULL; + enum dma_status stat; + unsigned long flags; + int i; + + stat = dma_cookie_status(chan, cookie, txstate); + /* Return immediately if we do not need to compute the residue. */ + if (stat == DMA_COMPLETE || !txstate) + return stat; + + vc = to_virt_chan(chan); + + spin_lock_irqsave(&vc->lock, flags); + + mc = to_uniphier_mdmac_chan(vc); + + if (mc->md && mc->md->vd.tx.cookie == cookie) { + /* residue from the on-flight chunk */ + txstate->residue = readl(mc->reg_ch_base + + UNIPHIER_MDMAC_CH_SIZE); + md = mc->md; + } + + if (!md) { + vd = vchan_find_desc(vc, cookie); + if (vd) + md = to_uniphier_mdmac_desc(vd); + } + + if (md) { + /* residue from the queued chunks */ + for (i = md->sg_cur; i < md->sg_len; i++) + txstate->residue += sg_dma_len(&md->sgl[i]); + } + + spin_unlock_irqrestore(&vc->lock, flags); + + return stat; +} + +static void uniphier_mdmac_issue_pending(struct dma_chan *chan) +{ + struct virt_dma_chan *vc = to_virt_chan(chan); + struct uniphier_mdmac_chan *mc = to_uniphier_mdmac_chan(vc); + unsigned long flags; + + spin_lock_irqsave(&vc->lock, flags); + + if (vchan_issue_pending(vc) && !mc->md) + uniphier_mdmac_start(mc); + + spin_unlock_irqrestore(&vc->lock, flags); +} + +static void uniphier_mdmac_desc_free(struct virt_dma_desc *vd) +{ + kfree(to_uniphier_mdmac_desc(vd)); +} + +static int uniphier_mdmac_chan_init(struct platform_device *pdev, + struct uniphier_mdmac_device *mdev, + int chan_id) +{ + struct device *dev = &pdev->dev; + struct uniphier_mdmac_chan *mc = &mdev->channels[chan_id]; + char *irq_name; + int irq, ret; + + irq = platform_get_irq(pdev, chan_id); + if (irq < 0) { + dev_err(&pdev->dev, "failed to get IRQ number for ch%d\n", + chan_id); + return irq; + } + + irq_name = devm_kasprintf(dev, GFP_KERNEL, "uniphier-mio-dmac-ch%d", + chan_id); + if (!irq_name) + return -ENOMEM; + + ret = devm_request_irq(dev, irq, uniphier_mdmac_interrupt, + IRQF_SHARED, irq_name, mc); + if (ret) + return ret; + + mc->mdev = mdev; + mc->reg_ch_base = mdev->reg_base + UNIPHIER_MDMAC_CH_OFFSET + + UNIPHIER_MDMAC_CH_STRIDE * chan_id; + mc->chan_id = chan_id; + mc->vc.desc_free = uniphier_mdmac_desc_free; + vchan_init(&mc->vc, &mdev->ddev); + + return 0; +} + +static int uniphier_mdmac_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct uniphier_mdmac_device *mdev; + struct dma_device *ddev; + struct resource *res; + int nr_chans, ret, i; + + nr_chans = platform_irq_count(pdev); + if (nr_chans < 0) + return nr_chans; + + ret = dma_set_mask(dev, DMA_BIT_MASK(32)); + if (ret) + return ret; + + mdev = devm_kzalloc(dev, struct_size(mdev, channels, nr_chans), + GFP_KERNEL); + if (!mdev) + return -ENOMEM; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + mdev->reg_base = devm_ioremap_resource(dev, res); + if (IS_ERR(mdev->reg_base)) + return PTR_ERR(mdev->reg_base); + + mdev->clk = devm_clk_get(dev, NULL); + if (IS_ERR(mdev->clk)) { + dev_err(dev, "failed to get clock\n"); + return PTR_ERR(mdev->clk); + } + + ret = clk_prepare_enable(mdev->clk); + if (ret) + return ret; + + ddev = &mdev->ddev; + ddev->dev = dev; + dma_cap_set(DMA_PRIVATE, ddev->cap_mask); + ddev->src_addr_widths = UNIPHIER_MDMAC_SLAVE_BUSWIDTHS; + ddev->dst_addr_widths = UNIPHIER_MDMAC_SLAVE_BUSWIDTHS; + ddev->directions = BIT(DMA_MEM_TO_DEV) | BIT(DMA_DEV_TO_MEM); + ddev->residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT; + ddev->device_free_chan_resources = uniphier_mdmac_free_chan_resources; + ddev->device_prep_slave_sg = uniphier_mdmac_prep_slave_sg; + ddev->device_terminate_all = uniphier_mdmac_terminate_all; + ddev->device_synchronize = uniphier_mdmac_synchronize; + ddev->device_tx_status = uniphier_mdmac_tx_status; + ddev->device_issue_pending = uniphier_mdmac_issue_pending; + INIT_LIST_HEAD(&ddev->channels); + + for (i = 0; i < nr_chans; i++) { + ret = uniphier_mdmac_chan_init(pdev, mdev, i); + if (ret) + goto disable_clk; + } + + ret = dma_async_device_register(ddev); + if (ret) + goto disable_clk; + + ret = of_dma_controller_register(dev->of_node, of_dma_xlate_by_chan_id, + ddev); + if (ret) + goto unregister_dmac; + + platform_set_drvdata(pdev, mdev); + + return 0; + +unregister_dmac: + dma_async_device_unregister(ddev); +disable_clk: + clk_disable_unprepare(mdev->clk); + + return ret; +} + +static int uniphier_mdmac_remove(struct platform_device *pdev) +{ + struct uniphier_mdmac_device *mdev = platform_get_drvdata(pdev); + struct dma_chan *chan; + int ret; + + /* + * Before reaching here, almost all descriptors have been freed by the + * ->device_free_chan_resources() hook. However, each channel might + * be still holding one descriptor that was on-flight at that moment. + * Terminate it to make sure this hardware is no longer running. Then, + * free the channel resources once again to avoid memory leak. + */ + list_for_each_entry(chan, &mdev->ddev.channels, device_node) { + ret = dmaengine_terminate_sync(chan); + if (ret) + return ret; + uniphier_mdmac_free_chan_resources(chan); + } + + of_dma_controller_free(pdev->dev.of_node); + dma_async_device_unregister(&mdev->ddev); + clk_disable_unprepare(mdev->clk); + + return 0; +} + +static const struct of_device_id uniphier_mdmac_match[] = { + { .compatible = "socionext,uniphier-mio-dmac" }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, uniphier_mdmac_match); + +static struct platform_driver uniphier_mdmac_driver = { + .probe = uniphier_mdmac_probe, + .remove = uniphier_mdmac_remove, + .driver = { + .name = "uniphier-mio-dmac", + .of_match_table = uniphier_mdmac_match, + }, +}; +module_platform_driver(uniphier_mdmac_driver); + +MODULE_AUTHOR("Masahiro Yamada "); +MODULE_DESCRIPTION("UniPhier MIO DMAC driver"); +MODULE_LICENSE("GPL v2"); -- cgit v1.2.3 From 627469e4445b9b12e0229b3bdf8564d5ce384dd7 Mon Sep 17 00:00:00 2001 From: Jia-Ju Bai Date: Tue, 6 Nov 2018 11:33:48 +0800 Subject: dmaengine: coh901318: Fix a double-lock bug The function coh901318_alloc_chan_resources() calls spin_lock_irqsave() before calling coh901318_config(). But coh901318_config() calls spin_lock_irqsave() again in its definition, which may cause a double-lock bug. Because coh901318_config() is only called by coh901318_alloc_chan_resources(), the bug fix is to remove the calls to spin-lock and -unlock functions in coh901318_config(). Signed-off-by: Jia-Ju Bai Reviewed-by: Linus Walleij Signed-off-by: Vinod Koul --- drivers/dma/coh901318.c | 4 ---- 1 file changed, 4 deletions(-) (limited to 'drivers/dma') diff --git a/drivers/dma/coh901318.c b/drivers/dma/coh901318.c index eebaba3d9e78..fd862a478738 100644 --- a/drivers/dma/coh901318.c +++ b/drivers/dma/coh901318.c @@ -1807,8 +1807,6 @@ static int coh901318_config(struct coh901318_chan *cohc, int channel = cohc->id; void __iomem *virtbase = cohc->base->virtbase; - spin_lock_irqsave(&cohc->lock, flags); - if (param) p = param; else @@ -1828,8 +1826,6 @@ static int coh901318_config(struct coh901318_chan *cohc, coh901318_set_conf(cohc, p->config); coh901318_set_ctrl(cohc, p->ctrl_lli_last); - spin_unlock_irqrestore(&cohc->lock, flags); - return 0; } -- cgit v1.2.3 From 7b0c03ecc42fb223baf015877fee9d517c2c8af1 Mon Sep 17 00:00:00 2001 From: Christian Lamparter Date: Sat, 17 Nov 2018 17:17:21 +0100 Subject: dmaengine: dw-dmac: implement dma protection control setting This patch adds a new device-tree property that allows to specify the dma protection control bits for the all of the DMA controller's channel uniformly. Setting the "correct" bits can have a huge impact on the PPC460EX and APM82181 that use this DMA engine in combination with a DesignWare' SATA-II core (sata_dwc_460ex driver). In the OpenWrt Forum, the user takimata reported that: |It seems your patch unleashed the full power of the SATA port. |Where I was previously hitting a really hard limit at around |82 MB/s for reading and 27 MB/s for writing, I am now getting this: | |root@OpenWrt:/mnt# time dd if=/dev/zero of=tempfile bs=1M count=1024 |1024+0 records in |1024+0 records out |real 0m 13.65s |user 0m 0.01s |sys 0m 11.89s | |root@OpenWrt:/mnt# time dd if=tempfile of=/dev/null bs=1M count=1024 |1024+0 records in |1024+0 records out |real 0m 8.41s |user 0m 0.01s |sys 0m 4.70s | |This means: 121 MB/s reading and 75 MB/s writing! | |The drive is a WD Green WD10EARX taken from an older MBL Single. |I repeated the test a few times with even larger files to rule out |any caching, I'm still seeing the same great performance. OpenWrt is |now completely on par with the original MBL firmware's performance. Another user And.short reported: |I can report that your fix worked! Boots up fine with two |drives even with more partitions, and no more reboot on |concurrent disk access! A closer look into the sata_dwc_460ex code revealed that the driver did initally set the correct protection control bits. However, this feature was lost when the sata_dwc_460ex driver was converted to the generic DMA driver framework. BugLink: https://forum.openwrt.org/t/wd-mybook-live-duo-two-disks/16195/55 BugLink: https://forum.openwrt.org/t/wd-mybook-live-duo-two-disks/16195/50 Fixes: 8b3444852a2b ("sata_dwc_460ex: move to generic DMA driver") Reviewed-by: Andy Shevchenko Signed-off-by: Christian Lamparter Signed-off-by: Vinod Koul --- drivers/dma/dw/core.c | 2 ++ drivers/dma/dw/platform.c | 6 ++++++ drivers/dma/dw/regs.h | 4 ++++ include/linux/platform_data/dma-dw.h | 6 ++++++ 4 files changed, 18 insertions(+) (limited to 'drivers/dma') diff --git a/drivers/dma/dw/core.c b/drivers/dma/dw/core.c index d0c3e50b39fb..2c5ca1961256 100644 --- a/drivers/dma/dw/core.c +++ b/drivers/dma/dw/core.c @@ -160,12 +160,14 @@ static void dwc_initialize_chan_idma32(struct dw_dma_chan *dwc) static void dwc_initialize_chan_dw(struct dw_dma_chan *dwc) { + struct dw_dma *dw = to_dw_dma(dwc->chan.device); u32 cfghi = DWC_CFGH_FIFO_MODE; u32 cfglo = DWC_CFGL_CH_PRIOR(dwc->priority); bool hs_polarity = dwc->dws.hs_polarity; cfghi |= DWC_CFGH_DST_PER(dwc->dws.dst_id); cfghi |= DWC_CFGH_SRC_PER(dwc->dws.src_id); + cfghi |= DWC_CFGH_PROTCTL(dw->pdata->protctl); /* Set polarity of handshake interface */ cfglo |= hs_polarity ? DWC_CFGL_HS_DST_POL | DWC_CFGL_HS_SRC_POL : 0; diff --git a/drivers/dma/dw/platform.c b/drivers/dma/dw/platform.c index f01b2c173fa6..31ff8113c3de 100644 --- a/drivers/dma/dw/platform.c +++ b/drivers/dma/dw/platform.c @@ -162,6 +162,12 @@ dw_dma_parse_dt(struct platform_device *pdev) pdata->multi_block[tmp] = 1; } + if (!of_property_read_u32(np, "snps,dma-protection-control", &tmp)) { + if (tmp > CHAN_PROTCTL_MASK) + return NULL; + pdata->protctl = tmp; + } + return pdata; } #else diff --git a/drivers/dma/dw/regs.h b/drivers/dma/dw/regs.h index 09e7dfdbb790..646c9c960c07 100644 --- a/drivers/dma/dw/regs.h +++ b/drivers/dma/dw/regs.h @@ -200,6 +200,10 @@ enum dw_dma_msize { #define DWC_CFGH_FCMODE (1 << 0) #define DWC_CFGH_FIFO_MODE (1 << 1) #define DWC_CFGH_PROTCTL(x) ((x) << 2) +#define DWC_CFGH_PROTCTL_DATA (0 << 2) /* data access - always set */ +#define DWC_CFGH_PROTCTL_PRIV (1 << 2) /* privileged -> AHB HPROT[1] */ +#define DWC_CFGH_PROTCTL_BUFFER (2 << 2) /* bufferable -> AHB HPROT[2] */ +#define DWC_CFGH_PROTCTL_CACHE (4 << 2) /* cacheable -> AHB HPROT[3] */ #define DWC_CFGH_DS_UPD_EN (1 << 5) #define DWC_CFGH_SS_UPD_EN (1 << 6) #define DWC_CFGH_SRC_PER(x) ((x) << 7) diff --git a/include/linux/platform_data/dma-dw.h b/include/linux/platform_data/dma-dw.h index 896cb71a382c..1a1d58ebffbf 100644 --- a/include/linux/platform_data/dma-dw.h +++ b/include/linux/platform_data/dma-dw.h @@ -49,6 +49,7 @@ struct dw_dma_slave { * @data_width: Maximum data width supported by hardware per AHB master * (in bytes, power of 2) * @multi_block: Multi block transfers supported by hardware per channel. + * @protctl: Protection control signals setting per channel. */ struct dw_dma_platform_data { unsigned int nr_channels; @@ -65,6 +66,11 @@ struct dw_dma_platform_data { unsigned char nr_masters; unsigned char data_width[DW_DMA_MAX_NR_MASTERS]; unsigned char multi_block[DW_DMA_MAX_NR_CHANNELS]; +#define CHAN_PROTCTL_PRIVILEGED BIT(0) +#define CHAN_PROTCTL_BUFFERABLE BIT(1) +#define CHAN_PROTCTL_CACHEABLE BIT(2) +#define CHAN_PROTCTL_MASK GENMASK(2, 0) + unsigned char protctl; }; #endif /* _PLATFORM_DATA_DMA_DW_H */ -- cgit v1.2.3 From 107d06441b709d31ce592535086992799ee51e17 Mon Sep 17 00:00:00 2001 From: Vinod Koul Date: Thu, 25 Oct 2018 15:15:28 +0100 Subject: dmaengine: imx-sdma: remove dma_slave_config direction usage and leave sdma_event_enable() dma_slave_config direction was marked as deprecated quite some time back, remove the usage from this driver so that the field can be removed ENBLn bit should be set before any dma request triggered, please refer to the below information from i.mx6sololite RM. Otherwise, spi/uart test will be fail because there is dma request from tx fifo always before dmaengine_prep_slave_sg() in where ENBLn set and violate the below rule. https://www.nxp.com/docs/en/reference-manual/IMX6SLRM.pdf: 40.8.28 Channel Enable RAM (SDMAARM_CHNENBLn) "It is thus essential for the Arm platform to program them before any DMA request is triggered to the SDMA, otherwise an unpredictable combination of channels may be started". Signed-off-by: Robin Gong [vkoul: sqashed patch from Robin into direction change] Signed-off-by: Vinod Koul --- drivers/dma/imx-sdma.c | 56 +++++++++++++++++++++++++++++++++++--------------- 1 file changed, 39 insertions(+), 17 deletions(-) (limited to 'drivers/dma') diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c index b4ec2d20e661..d6a5d6ddfdad 100644 --- a/drivers/dma/imx-sdma.c +++ b/drivers/dma/imx-sdma.c @@ -335,6 +335,7 @@ struct sdma_desc { * @sdma: pointer to the SDMA engine for this channel * @channel: the channel number, matches dmaengine chan_id + 1 * @direction: transfer type. Needed for setting SDMA script + * @slave_config Slave configuration * @peripheral_type: Peripheral type. Needed for setting SDMA script * @event_id0: aka dma request line * @event_id1: for channels that use 2 events @@ -362,6 +363,7 @@ struct sdma_channel { struct sdma_engine *sdma; unsigned int channel; enum dma_transfer_direction direction; + struct dma_slave_config slave_config; enum sdma_peripheral_type peripheral_type; unsigned int event_id0; unsigned int event_id1; @@ -440,6 +442,10 @@ struct sdma_engine { struct sdma_buffer_descriptor *bd0; }; +static int sdma_config_write(struct dma_chan *chan, + struct dma_slave_config *dmaengine_cfg, + enum dma_transfer_direction direction); + static struct sdma_driver_data sdma_imx31 = { .chnenbl0 = SDMA_CHNENBL0_IMX31, .num_events = 32, @@ -1104,18 +1110,6 @@ static int sdma_config_channel(struct dma_chan *chan) sdmac->shp_addr = 0; sdmac->per_addr = 0; - if (sdmac->event_id0) { - if (sdmac->event_id0 >= sdmac->sdma->drvdata->num_events) - return -EINVAL; - sdma_event_enable(sdmac, sdmac->event_id0); - } - - if (sdmac->event_id1) { - if (sdmac->event_id1 >= sdmac->sdma->drvdata->num_events) - return -EINVAL; - sdma_event_enable(sdmac, sdmac->event_id1); - } - switch (sdmac->peripheral_type) { case IMX_DMATYPE_DSP: sdma_config_ownership(sdmac, false, true, true); @@ -1415,6 +1409,8 @@ static struct dma_async_tx_descriptor *sdma_prep_slave_sg( struct scatterlist *sg; struct sdma_desc *desc; + sdma_config_write(chan, &sdmac->slave_config, direction); + desc = sdma_transfer_init(sdmac, direction, sg_len); if (!desc) goto err_out; @@ -1499,6 +1495,8 @@ static struct dma_async_tx_descriptor *sdma_prep_dma_cyclic( dev_dbg(sdma->dev, "%s channel: %d\n", __func__, channel); + sdma_config_write(chan, &sdmac->slave_config, direction); + desc = sdma_transfer_init(sdmac, direction, num_periods); if (!desc) goto err_out; @@ -1554,17 +1552,18 @@ err_out: return NULL; } -static int sdma_config(struct dma_chan *chan, - struct dma_slave_config *dmaengine_cfg) +static int sdma_config_write(struct dma_chan *chan, + struct dma_slave_config *dmaengine_cfg, + enum dma_transfer_direction direction) { struct sdma_channel *sdmac = to_sdma_chan(chan); - if (dmaengine_cfg->direction == DMA_DEV_TO_MEM) { + if (direction == DMA_DEV_TO_MEM) { sdmac->per_address = dmaengine_cfg->src_addr; sdmac->watermark_level = dmaengine_cfg->src_maxburst * dmaengine_cfg->src_addr_width; sdmac->word_size = dmaengine_cfg->src_addr_width; - } else if (dmaengine_cfg->direction == DMA_DEV_TO_DEV) { + } else if (direction == DMA_DEV_TO_DEV) { sdmac->per_address2 = dmaengine_cfg->src_addr; sdmac->per_address = dmaengine_cfg->dst_addr; sdmac->watermark_level = dmaengine_cfg->src_maxburst & @@ -1578,10 +1577,33 @@ static int sdma_config(struct dma_chan *chan, dmaengine_cfg->dst_addr_width; sdmac->word_size = dmaengine_cfg->dst_addr_width; } - sdmac->direction = dmaengine_cfg->direction; + sdmac->direction = direction; return sdma_config_channel(chan); } +static int sdma_config(struct dma_chan *chan, + struct dma_slave_config *dmaengine_cfg) +{ + struct sdma_channel *sdmac = to_sdma_chan(chan); + + memcpy(&sdmac->slave_config, dmaengine_cfg, sizeof(*dmaengine_cfg)); + + /* Set ENBLn earlier to make sure dma request triggered after that */ + if (sdmac->event_id0) { + if (sdmac->event_id0 >= sdmac->sdma->drvdata->num_events) + return -EINVAL; + sdma_event_enable(sdmac, sdmac->event_id0); + } + + if (sdmac->event_id1) { + if (sdmac->event_id1 >= sdmac->sdma->drvdata->num_events) + return -EINVAL; + sdma_event_enable(sdmac, sdmac->event_id1); + } + + return 0; +} + static enum dma_status sdma_tx_status(struct dma_chan *chan, dma_cookie_t cookie, struct dma_tx_state *txstate) -- cgit v1.2.3 From 445897cbc9d3e2bcae8a1b3bcf80f69b699eafda Mon Sep 17 00:00:00 2001 From: Vinod Koul Date: Thu, 25 Oct 2018 15:26:07 +0100 Subject: dmaengine: pl330: remove dma_slave_config direction usage dma_slave_config direction was marked as deprecated quite some time back, remove the usage from this driver so that the field can be removed Signed-off-by: Vinod Koul --- drivers/dma/pl330.c | 28 ++++++++++++++++++++++++---- 1 file changed, 24 insertions(+), 4 deletions(-) (limited to 'drivers/dma') diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c index 88750a34e859..cff1b143fff5 100644 --- a/drivers/dma/pl330.c +++ b/drivers/dma/pl330.c @@ -448,6 +448,7 @@ struct dma_pl330_chan { /* DMA-mapped view of the FIFO; may differ if an IOMMU is present */ dma_addr_t fifo_dma; enum dma_data_direction dir; + struct dma_slave_config slave_config; /* for cyclic capability */ bool cyclic; @@ -542,6 +543,10 @@ struct _xfer_spec { struct dma_pl330_desc *desc; }; +static int pl330_config_write(struct dma_chan *chan, + struct dma_slave_config *slave_config, + enum dma_transfer_direction direction); + static inline bool _queue_full(struct pl330_thread *thrd) { return thrd->req[0].desc != NULL && thrd->req[1].desc != NULL; @@ -2220,20 +2225,21 @@ static int fixup_burst_len(int max_burst_len, int quirks) return max_burst_len; } -static int pl330_config(struct dma_chan *chan, - struct dma_slave_config *slave_config) +static int pl330_config_write(struct dma_chan *chan, + struct dma_slave_config *slave_config, + enum dma_transfer_direction direction) { struct dma_pl330_chan *pch = to_pchan(chan); pl330_unprep_slave_fifo(pch); - if (slave_config->direction == DMA_MEM_TO_DEV) { + if (direction == DMA_MEM_TO_DEV) { if (slave_config->dst_addr) pch->fifo_addr = slave_config->dst_addr; if (slave_config->dst_addr_width) pch->burst_sz = __ffs(slave_config->dst_addr_width); pch->burst_len = fixup_burst_len(slave_config->dst_maxburst, pch->dmac->quirks); - } else if (slave_config->direction == DMA_DEV_TO_MEM) { + } else if (direction == DMA_DEV_TO_MEM) { if (slave_config->src_addr) pch->fifo_addr = slave_config->src_addr; if (slave_config->src_addr_width) @@ -2245,6 +2251,16 @@ static int pl330_config(struct dma_chan *chan, return 0; } +static int pl330_config(struct dma_chan *chan, + struct dma_slave_config *slave_config) +{ + struct dma_pl330_chan *pch = to_pchan(chan); + + memcpy(&pch->slave_config, slave_config, sizeof(*slave_config)); + + return 0; +} + static int pl330_terminate_all(struct dma_chan *chan) { struct dma_pl330_chan *pch = to_pchan(chan); @@ -2661,6 +2677,8 @@ static struct dma_async_tx_descriptor *pl330_prep_dma_cyclic( return NULL; } + pl330_config_write(chan, &pch->slave_config, direction); + if (!pl330_prep_slave_fifo(pch, direction)) return NULL; @@ -2815,6 +2833,8 @@ pl330_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, if (unlikely(!pch || !sgl || !sg_len)) return NULL; + pl330_config_write(chan, &pch->slave_config, direction); + if (!pl330_prep_slave_fifo(pch, direction)) return NULL; -- cgit v1.2.3 From 9e314ef35c3844b53a72c588028383e5e678f4d2 Mon Sep 17 00:00:00 2001 From: Vinod Koul Date: Mon, 29 Oct 2018 10:09:47 +0530 Subject: dmaengine: ste_dma40: remove dma_slave_config direction usage dma_slave_config direction was marked as deprecated quite some time back, remove the usage from this driver so that the field can be removed Reviewed-by: Linus Walleij Signed-off-by: Vinod Koul --- drivers/dma/ste_dma40.c | 31 +++++++++++++++++++++++++------ 1 file changed, 25 insertions(+), 6 deletions(-) (limited to 'drivers/dma') diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c index 5e328bd10c27..907ae97a3ef4 100644 --- a/drivers/dma/ste_dma40.c +++ b/drivers/dma/ste_dma40.c @@ -442,6 +442,7 @@ struct d40_base; * @queue: Queued jobs. * @prepare_queue: Prepared jobs. * @dma_cfg: The client configuration of this dma channel. + * @slave_config: DMA slave configuration. * @configured: whether the dma_cfg configuration is valid * @base: Pointer to the device instance struct. * @src_def_cfg: Default cfg register setting for src. @@ -468,6 +469,7 @@ struct d40_chan { struct list_head queue; struct list_head prepare_queue; struct stedma40_chan_cfg dma_cfg; + struct dma_slave_config slave_config; bool configured; struct d40_base *base; /* Default register configurations */ @@ -625,6 +627,10 @@ static void __iomem *chan_base(struct d40_chan *chan) #define chan_err(d40c, format, arg...) \ d40_err(chan2dev(d40c), format, ## arg) +static int d40_set_runtime_config_write(struct dma_chan *chan, + struct dma_slave_config *config, + enum dma_transfer_direction direction); + static int d40_pool_lli_alloc(struct d40_chan *d40c, struct d40_desc *d40d, int lli_len) { @@ -2216,6 +2222,8 @@ d40_prep_sg(struct dma_chan *dchan, struct scatterlist *sg_src, return NULL; } + d40_set_runtime_config_write(dchan, &chan->slave_config, direction); + spin_lock_irqsave(&chan->lock, flags); desc = d40_prep_desc(chan, sg_src, sg_len, dma_flags); @@ -2634,11 +2642,22 @@ dma40_config_to_halfchannel(struct d40_chan *d40c, return 0; } -/* Runtime reconfiguration extension */ static int d40_set_runtime_config(struct dma_chan *chan, struct dma_slave_config *config) { struct d40_chan *d40c = container_of(chan, struct d40_chan, chan); + + memcpy(&d40c->slave_config, config, sizeof(*config)); + + return 0; +} + +/* Runtime reconfiguration extension */ +static int d40_set_runtime_config_write(struct dma_chan *chan, + struct dma_slave_config *config, + enum dma_transfer_direction direction) +{ + struct d40_chan *d40c = container_of(chan, struct d40_chan, chan); struct stedma40_chan_cfg *cfg = &d40c->dma_cfg; enum dma_slave_buswidth src_addr_width, dst_addr_width; dma_addr_t config_addr; @@ -2655,7 +2674,7 @@ static int d40_set_runtime_config(struct dma_chan *chan, dst_addr_width = config->dst_addr_width; dst_maxburst = config->dst_maxburst; - if (config->direction == DMA_DEV_TO_MEM) { + if (direction == DMA_DEV_TO_MEM) { config_addr = config->src_addr; if (cfg->dir != DMA_DEV_TO_MEM) @@ -2671,7 +2690,7 @@ static int d40_set_runtime_config(struct dma_chan *chan, if (dst_maxburst == 0) dst_maxburst = src_maxburst; - } else if (config->direction == DMA_MEM_TO_DEV) { + } else if (direction == DMA_MEM_TO_DEV) { config_addr = config->dst_addr; if (cfg->dir != DMA_MEM_TO_DEV) @@ -2689,7 +2708,7 @@ static int d40_set_runtime_config(struct dma_chan *chan, } else { dev_err(d40c->base->dev, "unrecognized channel direction %d\n", - config->direction); + direction); return -EINVAL; } @@ -2746,12 +2765,12 @@ static int d40_set_runtime_config(struct dma_chan *chan, /* These settings will take precedence later */ d40c->runtime_addr = config_addr; - d40c->runtime_direction = config->direction; + d40c->runtime_direction = direction; dev_dbg(d40c->base->dev, "configured channel %s for %s, data width %d/%d, " "maxburst %d/%d elements, LE, no flow control\n", dma_chan_name(chan), - (config->direction == DMA_DEV_TO_MEM) ? "RX" : "TX", + (direction == DMA_DEV_TO_MEM) ? "RX" : "TX", src_addr_width, dst_addr_width, src_maxburst, dst_maxburst); -- cgit v1.2.3 From 35faaf0df42d285b40f8a6310afbe096720f7758 Mon Sep 17 00:00:00 2001 From: Vinod Koul Date: Mon, 26 Nov 2018 13:34:15 +0530 Subject: dmaengine: coh901318: Remove unused variable Commit 627469e4445b ("dmaengine: coh901318: Fix a double-lock bug") left flags variable unused, so remove it to fix the warning. drivers/dma/coh901318.c: In function 'coh901318_config': drivers/dma/coh901318.c:1805:16: warning: unused variable 'flags' [-Wunused-variable] unsigned long flags; ^~~~~ Fixes: 627469e4445b ("dmaengine: coh901318: Fix a double-lock bug") Reported-By: Stephen Rothwell Signed-off-by: Vinod Koul --- drivers/dma/coh901318.c | 1 - 1 file changed, 1 deletion(-) (limited to 'drivers/dma') diff --git a/drivers/dma/coh901318.c b/drivers/dma/coh901318.c index fd862a478738..b69d66e44052 100644 --- a/drivers/dma/coh901318.c +++ b/drivers/dma/coh901318.c @@ -1802,7 +1802,6 @@ static struct dma_chan *coh901318_xlate(struct of_phandle_args *dma_spec, static int coh901318_config(struct coh901318_chan *cohc, struct coh901318_params *param) { - unsigned long flags; const struct coh901318_params *p; int channel = cohc->id; void __iomem *virtbase = cohc->base->virtbase; -- cgit v1.2.3 From a19788612f51b7874e0cedce4dfff535e502015a Mon Sep 17 00:00:00 2001 From: Geert Uytterhoeven Date: Mon, 3 Dec 2018 15:18:06 +0100 Subject: dmaengine: sh: Remove R-Mobile APE6 support Renesas R-Mobile APE6 support is currently unused: - DMA slaves were never enabled in r8a73a4.dtsi, - The driver relies on legacy filter matching and describing all slaves and MID/RIDs in a table, unlike modern DMA engine drivers for similar hardware like rcar-dmac, - The driver doesn't seem to work well. Remove the driver, it can be resurrected from git history when needed. As this was the last user of SH_DMAE_BASE on Renesas ARM SoCs, the sh-dma-engine driver core is now used on SuperH only. Note that the DT bindings are still present, as r8a73a4.dtsi uses them. Signed-off-by: Geert Uytterhoeven Reviewed-by: Ulrich Hecht Reviewed-by: Simon Horman Signed-off-by: Vinod Koul --- drivers/dma/sh/Kconfig | 11 +------ drivers/dma/sh/Makefile | 1 - drivers/dma/sh/shdma-r8a73a4.c | 74 ------------------------------------------ drivers/dma/sh/shdma.h | 7 ---- drivers/dma/sh/shdmac.c | 7 ---- 5 files changed, 1 insertion(+), 99 deletions(-) delete mode 100644 drivers/dma/sh/shdma-r8a73a4.c (limited to 'drivers/dma') diff --git a/drivers/dma/sh/Kconfig b/drivers/dma/sh/Kconfig index 1c4675425a1e..4d6b02b3b1f1 100644 --- a/drivers/dma/sh/Kconfig +++ b/drivers/dma/sh/Kconfig @@ -13,7 +13,7 @@ config RENESAS_DMA config SH_DMAE_BASE bool "Renesas SuperH DMA Engine support" - depends on SUPERH || ARCH_RENESAS || COMPILE_TEST + depends on SUPERH || COMPILE_TEST depends on !SUPERH || SH_DMA depends on !SH_DMA_API default y @@ -31,15 +31,6 @@ config SH_DMAE help Enable support for the Renesas SuperH DMA controllers. -if SH_DMAE - -config SH_DMAE_R8A73A4 - def_bool y - depends on ARCH_R8A73A4 - depends on OF - -endif - config RCAR_DMAC tristate "Renesas R-Car Gen2 DMA Controller" depends on ARCH_RENESAS || COMPILE_TEST diff --git a/drivers/dma/sh/Makefile b/drivers/dma/sh/Makefile index 7d7c9491ade1..42110dd57a56 100644 --- a/drivers/dma/sh/Makefile +++ b/drivers/dma/sh/Makefile @@ -10,7 +10,6 @@ obj-$(CONFIG_SH_DMAE_BASE) += shdma-base.o shdma-of.o # shdma-y := shdmac.o -shdma-$(CONFIG_SH_DMAE_R8A73A4) += shdma-r8a73a4.o shdma-objs := $(shdma-y) obj-$(CONFIG_SH_DMAE) += shdma.o diff --git a/drivers/dma/sh/shdma-r8a73a4.c b/drivers/dma/sh/shdma-r8a73a4.c deleted file mode 100644 index ddc9a3578353..000000000000 --- a/drivers/dma/sh/shdma-r8a73a4.c +++ /dev/null @@ -1,74 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * Renesas SuperH DMA Engine support for r8a73a4 (APE6) SoCs - * - * Copyright (C) 2013 Renesas Electronics, Inc. - */ -#include - -#include "shdma-arm.h" - -static const unsigned int dma_ts_shift[] = SH_DMAE_TS_SHIFT; - -static const struct sh_dmae_slave_config dma_slaves[] = { - { - .chcr = CHCR_TX(XMIT_SZ_32BIT), - .mid_rid = 0xd1, /* MMC0 Tx */ - }, { - .chcr = CHCR_RX(XMIT_SZ_32BIT), - .mid_rid = 0xd2, /* MMC0 Rx */ - }, { - .chcr = CHCR_TX(XMIT_SZ_32BIT), - .mid_rid = 0xe1, /* MMC1 Tx */ - }, { - .chcr = CHCR_RX(XMIT_SZ_32BIT), - .mid_rid = 0xe2, /* MMC1 Rx */ - }, -}; - -#define DMAE_CHANNEL(a, b) \ - { \ - .offset = (a) - 0x20, \ - .dmars = (a) - 0x20 + 0x40, \ - .chclr_bit = (b), \ - .chclr_offset = 0x80 - 0x20, \ - } - -static const struct sh_dmae_channel dma_channels[] = { - DMAE_CHANNEL(0x8000, 0), - DMAE_CHANNEL(0x8080, 1), - DMAE_CHANNEL(0x8100, 2), - DMAE_CHANNEL(0x8180, 3), - DMAE_CHANNEL(0x8200, 4), - DMAE_CHANNEL(0x8280, 5), - DMAE_CHANNEL(0x8300, 6), - DMAE_CHANNEL(0x8380, 7), - DMAE_CHANNEL(0x8400, 8), - DMAE_CHANNEL(0x8480, 9), - DMAE_CHANNEL(0x8500, 10), - DMAE_CHANNEL(0x8580, 11), - DMAE_CHANNEL(0x8600, 12), - DMAE_CHANNEL(0x8680, 13), - DMAE_CHANNEL(0x8700, 14), - DMAE_CHANNEL(0x8780, 15), - DMAE_CHANNEL(0x8800, 16), - DMAE_CHANNEL(0x8880, 17), - DMAE_CHANNEL(0x8900, 18), - DMAE_CHANNEL(0x8980, 19), -}; - -const struct sh_dmae_pdata r8a73a4_dma_pdata = { - .slave = dma_slaves, - .slave_num = ARRAY_SIZE(dma_slaves), - .channel = dma_channels, - .channel_num = ARRAY_SIZE(dma_channels), - .ts_low_shift = TS_LOW_SHIFT, - .ts_low_mask = TS_LOW_BIT << TS_LOW_SHIFT, - .ts_high_shift = TS_HI_SHIFT, - .ts_high_mask = TS_HI_BIT << TS_HI_SHIFT, - .ts_shift = dma_ts_shift, - .ts_shift_num = ARRAY_SIZE(dma_ts_shift), - .dmaor_init = DMAOR_DME, - .chclr_present = 1, - .chclr_bitwise = 1, -}; diff --git a/drivers/dma/sh/shdma.h b/drivers/dma/sh/shdma.h index bfb69909bd19..9c121a4b33ad 100644 --- a/drivers/dma/sh/shdma.h +++ b/drivers/dma/sh/shdma.h @@ -58,11 +58,4 @@ struct sh_dmae_desc { #define to_sh_dev(chan) container_of(chan->shdma_chan.dma_chan.device,\ struct sh_dmae_device, shdma_dev.dma_dev) -#ifdef CONFIG_SH_DMAE_R8A73A4 -extern const struct sh_dmae_pdata r8a73a4_dma_pdata; -#define r8a73a4_shdma_devid (&r8a73a4_dma_pdata) -#else -#define r8a73a4_shdma_devid NULL -#endif - #endif /* __DMA_SHDMA_H */ diff --git a/drivers/dma/sh/shdmac.c b/drivers/dma/sh/shdmac.c index 7971ea275387..5aafe548ca5f 100644 --- a/drivers/dma/sh/shdmac.c +++ b/drivers/dma/sh/shdmac.c @@ -665,12 +665,6 @@ static const struct shdma_ops sh_dmae_shdma_ops = { .get_partial = sh_dmae_get_partial, }; -static const struct of_device_id sh_dmae_of_match[] = { - {.compatible = "renesas,shdma-r8a73a4", .data = r8a73a4_shdma_devid,}, - {} -}; -MODULE_DEVICE_TABLE(of, sh_dmae_of_match); - static int sh_dmae_probe(struct platform_device *pdev) { const enum dma_slave_buswidth widths = @@ -915,7 +909,6 @@ static struct platform_driver sh_dmae_driver = { .driver = { .pm = &sh_dmae_pm, .name = SH_DMAE_DRV_NAME, - .of_match_table = sh_dmae_of_match, }, .remove = sh_dmae_remove, }; -- cgit v1.2.3 From 3f3c75541ffe082194e48ea9aa5edf2341f77753 Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Mon, 3 Dec 2018 17:49:33 +0300 Subject: dmaengine: dmatest: fix a small memory leak in dmatest_func() We recently moved the test size tests around but it means we need to adjust the error handling as well or we leak the "pq_coefs" memory. I updated the label name to reflect that we're freeing coefs. Fixes: 787d3083caf8 ("dmaengine: dmatest: move size checks earlier in function") Signed-off-by: Dan Carpenter Signed-off-by: Vinod Koul --- drivers/dma/dmatest.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'drivers/dma') diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c index e71aa1e3451c..28deaa084257 100644 --- a/drivers/dma/dmatest.c +++ b/drivers/dma/dmatest.c @@ -511,18 +511,18 @@ static int dmatest_func(void *data) if ((src_cnt + dst_cnt) >= 255) { pr_err("too many buffers (%d of 255 supported)\n", src_cnt + dst_cnt); - goto err_thread_type; + goto err_free_coefs; } if (1 << align > params->buf_size) { pr_err("%u-byte buffer too small for %d-byte alignment\n", params->buf_size, 1 << align); - goto err_thread_type; + goto err_free_coefs; } thread->srcs = kcalloc(src_cnt + 1, sizeof(u8 *), GFP_KERNEL); if (!thread->srcs) - goto err_srcs; + goto err_free_coefs; thread->usrcs = kcalloc(src_cnt + 1, sizeof(u8 *), GFP_KERNEL); if (!thread->usrcs) @@ -800,7 +800,7 @@ err_srcbuf: kfree(thread->usrcs); err_usrcs: kfree(thread->srcs); -err_srcs: +err_free_coefs: kfree(pq_coefs); err_thread_type: pr_info("%s: summary %u tests, %u failures %llu iops %llu KB/s (%d)\n", -- cgit v1.2.3 From a0ecabf503413446f08deb8f7226b6135cf922b0 Mon Sep 17 00:00:00 2001 From: Baolin Wang Date: Tue, 6 Nov 2018 13:01:31 +0800 Subject: dmaengine: sprd: Remove direction usage from struct dma_slave_config The direction field of struct dma_slave_config was marked deprecated, thus remove the usage. Signed-off-by: Baolin Wang Signed-off-by: Vinod Koul --- drivers/dma/sprd-dma.c | 3 --- 1 file changed, 3 deletions(-) (limited to 'drivers/dma') diff --git a/drivers/dma/sprd-dma.c b/drivers/dma/sprd-dma.c index 38d4e4f07c66..c226dc93e401 100644 --- a/drivers/dma/sprd-dma.c +++ b/drivers/dma/sprd-dma.c @@ -847,9 +847,6 @@ static int sprd_dma_slave_config(struct dma_chan *chan, struct sprd_dma_chn *schan = to_sprd_dma_chan(chan); struct dma_slave_config *slave_cfg = &schan->slave_cfg; - if (!is_slave_direction(config->direction)) - return -EINVAL; - memcpy(slave_cfg, config, sizeof(*config)); return 0; } -- cgit v1.2.3 From d762ab33ccd03e8c1ad50b814d2deccec15b8c28 Mon Sep 17 00:00:00 2001 From: Eric Long Date: Tue, 6 Nov 2018 13:01:32 +0800 Subject: dmaengine: sprd: Get transfer residue depending on the transfer direction Add one field to save the transfer direction for struct sprd_dma_desc, which is used to get correct transfer residue depending on the transfer direction. [Baolin Wang adds one field to present the transfer direction] Signed-off-by: Eric Long Signed-off-by: Baolin Wang Signed-off-by: Vinod Koul --- drivers/dma/sprd-dma.c | 21 ++++++++++++++++++++- 1 file changed, 20 insertions(+), 1 deletion(-) (limited to 'drivers/dma') diff --git a/drivers/dma/sprd-dma.c b/drivers/dma/sprd-dma.c index c226dc93e401..4f3587b826da 100644 --- a/drivers/dma/sprd-dma.c +++ b/drivers/dma/sprd-dma.c @@ -159,6 +159,7 @@ struct sprd_dma_chn_hw { struct sprd_dma_desc { struct virt_dma_desc vd; struct sprd_dma_chn_hw chn_hw; + enum dma_transfer_direction dir; }; /* dma channel description */ @@ -331,6 +332,17 @@ static void sprd_dma_stop_and_disable(struct sprd_dma_chn *schan) sprd_dma_disable_chn(schan); } +static unsigned long sprd_dma_get_src_addr(struct sprd_dma_chn *schan) +{ + unsigned long addr, addr_high; + + addr = readl(schan->chn_base + SPRD_DMA_CHN_SRC_ADDR); + addr_high = readl(schan->chn_base + SPRD_DMA_CHN_WARP_PTR) & + SPRD_DMA_HIGH_ADDR_MASK; + + return addr | (addr_high << SPRD_DMA_HIGH_ADDR_OFFSET); +} + static unsigned long sprd_dma_get_dst_addr(struct sprd_dma_chn *schan) { unsigned long addr, addr_high; @@ -534,7 +546,12 @@ static enum dma_status sprd_dma_tx_status(struct dma_chan *chan, else pos = 0; } else if (schan->cur_desc && schan->cur_desc->vd.tx.cookie == cookie) { - pos = sprd_dma_get_dst_addr(schan); + struct sprd_dma_desc *sdesc = to_sprd_dma_desc(vd); + + if (sdesc->dir == DMA_DEV_TO_MEM) + pos = sprd_dma_get_dst_addr(schan); + else + pos = sprd_dma_get_src_addr(schan); } else { pos = 0; } @@ -804,6 +821,8 @@ sprd_dma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, if (!sdesc) return NULL; + sdesc->dir = dir; + for_each_sg(sgl, sg, sglen, i) { len = sg_dma_len(sg); -- cgit v1.2.3 From 13e8997924a0df7f0136d742aa829b791889d3ce Mon Sep 17 00:00:00 2001 From: Eric Long Date: Tue, 6 Nov 2018 13:01:33 +0800 Subject: dmaengine: sprd: Fix the last link-list configuration We will pass sglen as 0 configure the last link-list configuration when filling the descriptor, which will cause the incorrect link-list configuration. Thus we should check if the sglen is 0 to configure the correct link-list configuration. Signed-off-by: Eric Long Signed-off-by: Baolin Wang Signed-off-by: Vinod Koul --- drivers/dma/sprd-dma.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'drivers/dma') diff --git a/drivers/dma/sprd-dma.c b/drivers/dma/sprd-dma.c index 4f3587b826da..e6a74dc7da95 100644 --- a/drivers/dma/sprd-dma.c +++ b/drivers/dma/sprd-dma.c @@ -697,7 +697,8 @@ static int sprd_dma_fill_desc(struct dma_chan *chan, hw->cfg |= SPRD_DMA_LINKLIST_EN; /* link-list index */ - temp = (sg_index + 1) % sglen; + temp = sglen ? (sg_index + 1) % sglen : 0; + /* Next link-list configuration's physical address offset */ temp = temp * sizeof(*hw) + SPRD_DMA_CHN_SRC_ADDR; /* -- cgit v1.2.3 From 0e5d7b1eb6fc06d2f9287519e238fa13efb69cee Mon Sep 17 00:00:00 2001 From: Eric Long Date: Tue, 6 Nov 2018 13:01:34 +0800 Subject: dmaengine: sprd: Set cur_desc as NULL when free or terminate one dma channel It will be failed to start one new transfer if the channel started one none interrupt transfer before, since we will only set the schan->cur_desc as NULL depending on the transfer interrupt now. Thus we should set schan->cur_desc as NULL when free or terminate one dma channel to avoid this issue. Signed-off-by: Eric Long Signed-off-by: Baolin Wang Signed-off-by: Vinod Koul --- drivers/dma/sprd-dma.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers/dma') diff --git a/drivers/dma/sprd-dma.c b/drivers/dma/sprd-dma.c index e6a74dc7da95..1b39661cd17b 100644 --- a/drivers/dma/sprd-dma.c +++ b/drivers/dma/sprd-dma.c @@ -439,6 +439,7 @@ static void sprd_dma_stop(struct sprd_dma_chn *schan) sprd_dma_stop_and_disable(schan); sprd_dma_unset_uid(schan); sprd_dma_clear_int(schan); + schan->cur_desc = NULL; } static bool sprd_dma_check_trans_done(struct sprd_dma_desc *sdesc, -- cgit v1.2.3 From 97dbd6ea02beb3a7027c158e0a110b5095268d59 Mon Sep 17 00:00:00 2001 From: Eric Long Date: Tue, 6 Nov 2018 13:01:35 +0800 Subject: dmaengine: sprd: Support DMA link-list cyclic callback The Spreadtrum DMA link-list mode is always one cyclic transfer, so we should clear the SPRD_DMA_LLIST_END flag for the link-list configuration. Moreover add cyclic callback support for the cyclic transfer. Signed-off-by: Eric Long Signed-off-by: Baolin Wang Signed-off-by: Vinod Koul --- drivers/dma/sprd-dma.c | 25 ++++++++++++++----------- 1 file changed, 14 insertions(+), 11 deletions(-) (limited to 'drivers/dma') diff --git a/drivers/dma/sprd-dma.c b/drivers/dma/sprd-dma.c index 1b39661cd17b..cefe42fb7100 100644 --- a/drivers/dma/sprd-dma.c +++ b/drivers/dma/sprd-dma.c @@ -463,7 +463,7 @@ static irqreturn_t dma_irq_handle(int irq, void *dev_id) struct sprd_dma_desc *sdesc; enum sprd_dma_req_mode req_type; enum sprd_dma_int_type int_type; - bool trans_done = false; + bool trans_done = false, cyclic = false; u32 i; while (irq_status) { @@ -478,13 +478,19 @@ static irqreturn_t dma_irq_handle(int irq, void *dev_id) sdesc = schan->cur_desc; - /* Check if the dma request descriptor is done. */ - trans_done = sprd_dma_check_trans_done(sdesc, int_type, - req_type); - if (trans_done == true) { - vchan_cookie_complete(&sdesc->vd); - schan->cur_desc = NULL; - sprd_dma_start(schan); + /* cyclic mode schedule callback */ + cyclic = schan->linklist.phy_addr ? true : false; + if (cyclic == true) { + vchan_cyclic_callback(&sdesc->vd); + } else { + /* Check if the dma request descriptor is done. */ + trans_done = sprd_dma_check_trans_done(sdesc, int_type, + req_type); + if (trans_done == true) { + vchan_cookie_complete(&sdesc->vd); + schan->cur_desc = NULL; + sprd_dma_start(schan); + } } spin_unlock(&schan->vc.lock); } @@ -692,9 +698,6 @@ static int sprd_dma_fill_desc(struct dma_chan *chan, /* link-list configuration */ if (schan->linklist.phy_addr) { - if (sg_index == sglen - 1) - hw->frg_len |= SPRD_DMA_LLIST_END; - hw->cfg |= SPRD_DMA_LINKLIST_EN; /* link-list index */ -- cgit v1.2.3 From 770399df90b6e43bd086653f0a35888dca056576 Mon Sep 17 00:00:00 2001 From: Eric Long Date: Tue, 6 Nov 2018 13:01:36 +0800 Subject: dmaengine: sprd: Support DMA 2-stage transfer mode The Spreadtrum DMA controller supports channel 2-stage tansfer mode, that means we can request 2 dma channels, one for source channel, and another one for destination channel. Once the source channel's transaction is done, it will trigger the destination channel's transaction automatically by hardware signal. Signed-off-by: Eric Long Signed-off-by: Baolin Wang Signed-off-by: Vinod Koul --- drivers/dma/sprd-dma.c | 98 +++++++++++++++++++++++++++++++++++++++++++- include/linux/dma/sprd-dma.h | 62 ++++++++++++++++++++++++++-- 2 files changed, 156 insertions(+), 4 deletions(-) (limited to 'drivers/dma') diff --git a/drivers/dma/sprd-dma.c b/drivers/dma/sprd-dma.c index cefe42fb7100..50d6569585b4 100644 --- a/drivers/dma/sprd-dma.c +++ b/drivers/dma/sprd-dma.c @@ -36,6 +36,8 @@ #define SPRD_DMA_GLB_CHN_EN_STS 0x1c #define SPRD_DMA_GLB_DEBUG_STS 0x20 #define SPRD_DMA_GLB_ARB_SEL_STS 0x24 +#define SPRD_DMA_GLB_2STAGE_GRP1 0x28 +#define SPRD_DMA_GLB_2STAGE_GRP2 0x2c #define SPRD_DMA_GLB_REQ_UID(uid) (0x4 * ((uid) - 1)) #define SPRD_DMA_GLB_REQ_UID_OFFSET 0x2000 @@ -57,6 +59,18 @@ #define SPRD_DMA_CHN_SRC_BLK_STEP 0x38 #define SPRD_DMA_CHN_DES_BLK_STEP 0x3c +/* SPRD_DMA_GLB_2STAGE_GRP register definition */ +#define SPRD_DMA_GLB_2STAGE_EN BIT(24) +#define SPRD_DMA_GLB_CHN_INT_MASK GENMASK(23, 20) +#define SPRD_DMA_GLB_LIST_DONE_TRG BIT(19) +#define SPRD_DMA_GLB_TRANS_DONE_TRG BIT(18) +#define SPRD_DMA_GLB_BLOCK_DONE_TRG BIT(17) +#define SPRD_DMA_GLB_FRAG_DONE_TRG BIT(16) +#define SPRD_DMA_GLB_TRG_OFFSET 16 +#define SPRD_DMA_GLB_DEST_CHN_MASK GENMASK(13, 8) +#define SPRD_DMA_GLB_DEST_CHN_OFFSET 8 +#define SPRD_DMA_GLB_SRC_CHN_MASK GENMASK(5, 0) + /* SPRD_DMA_CHN_INTC register definition */ #define SPRD_DMA_INT_MASK GENMASK(4, 0) #define SPRD_DMA_INT_CLR_OFFSET 24 @@ -118,6 +132,10 @@ #define SPRD_DMA_SRC_TRSF_STEP_OFFSET 0 #define SPRD_DMA_TRSF_STEP_MASK GENMASK(15, 0) +/* define DMA channel mode & trigger mode mask */ +#define SPRD_DMA_CHN_MODE_MASK GENMASK(7, 0) +#define SPRD_DMA_TRG_MODE_MASK GENMASK(7, 0) + /* define the DMA transfer step type */ #define SPRD_DMA_NONE_STEP 0 #define SPRD_DMA_BYTE_STEP 1 @@ -170,6 +188,8 @@ struct sprd_dma_chn { struct dma_slave_config slave_cfg; u32 chn_num; u32 dev_id; + enum sprd_dma_chn_mode chn_mode; + enum sprd_dma_trg_mode trg_mode; struct sprd_dma_desc *cur_desc; }; @@ -206,6 +226,16 @@ static inline struct sprd_dma_desc *to_sprd_dma_desc(struct virt_dma_desc *vd) return container_of(vd, struct sprd_dma_desc, vd); } +static void sprd_dma_glb_update(struct sprd_dma_dev *sdev, u32 reg, + u32 mask, u32 val) +{ + u32 orig = readl(sdev->glb_base + reg); + u32 tmp; + + tmp = (orig & ~mask) | val; + writel(tmp, sdev->glb_base + reg); +} + static void sprd_dma_chn_update(struct sprd_dma_chn *schan, u32 reg, u32 mask, u32 val) { @@ -389,6 +419,49 @@ static enum sprd_dma_req_mode sprd_dma_get_req_type(struct sprd_dma_chn *schan) return (frag_reg >> SPRD_DMA_REQ_MODE_OFFSET) & SPRD_DMA_REQ_MODE_MASK; } +static int sprd_dma_set_2stage_config(struct sprd_dma_chn *schan) +{ + struct sprd_dma_dev *sdev = to_sprd_dma_dev(&schan->vc.chan); + u32 val, chn = schan->chn_num + 1; + + switch (schan->chn_mode) { + case SPRD_DMA_SRC_CHN0: + val = chn & SPRD_DMA_GLB_SRC_CHN_MASK; + val |= BIT(schan->trg_mode - 1) << SPRD_DMA_GLB_TRG_OFFSET; + val |= SPRD_DMA_GLB_2STAGE_EN; + sprd_dma_glb_update(sdev, SPRD_DMA_GLB_2STAGE_GRP1, val, val); + break; + + case SPRD_DMA_SRC_CHN1: + val = chn & SPRD_DMA_GLB_SRC_CHN_MASK; + val |= BIT(schan->trg_mode - 1) << SPRD_DMA_GLB_TRG_OFFSET; + val |= SPRD_DMA_GLB_2STAGE_EN; + sprd_dma_glb_update(sdev, SPRD_DMA_GLB_2STAGE_GRP2, val, val); + break; + + case SPRD_DMA_DST_CHN0: + val = (chn << SPRD_DMA_GLB_DEST_CHN_OFFSET) & + SPRD_DMA_GLB_DEST_CHN_MASK; + val |= SPRD_DMA_GLB_2STAGE_EN; + sprd_dma_glb_update(sdev, SPRD_DMA_GLB_2STAGE_GRP1, val, val); + break; + + case SPRD_DMA_DST_CHN1: + val = (chn << SPRD_DMA_GLB_DEST_CHN_OFFSET) & + SPRD_DMA_GLB_DEST_CHN_MASK; + val |= SPRD_DMA_GLB_2STAGE_EN; + sprd_dma_glb_update(sdev, SPRD_DMA_GLB_2STAGE_GRP2, val, val); + break; + + default: + dev_err(sdev->dma_dev.dev, "invalid channel mode setting %d\n", + schan->chn_mode); + return -EINVAL; + } + + return 0; +} + static void sprd_dma_set_chn_config(struct sprd_dma_chn *schan, struct sprd_dma_desc *sdesc) { @@ -422,6 +495,13 @@ static void sprd_dma_start(struct sprd_dma_chn *schan) list_del(&vd->node); schan->cur_desc = to_sprd_dma_desc(vd); + /* + * Set 2-stage configuration if the channel starts one 2-stage + * transfer. + */ + if (schan->chn_mode && sprd_dma_set_2stage_config(schan)) + return; + /* * Copy the DMA configuration from DMA descriptor to this hardware * channel. @@ -617,6 +697,7 @@ static int sprd_dma_fill_desc(struct dma_chan *chan, { struct sprd_dma_dev *sdev = to_sprd_dma_dev(chan); struct sprd_dma_chn *schan = to_sprd_dma_chan(chan); + enum sprd_dma_chn_mode chn_mode = schan->chn_mode; u32 req_mode = (flags >> SPRD_DMA_REQ_SHIFT) & SPRD_DMA_REQ_MODE_MASK; u32 int_mode = flags & SPRD_DMA_INT_MASK; int src_datawidth, dst_datawidth, src_step, dst_step; @@ -628,7 +709,16 @@ static int sprd_dma_fill_desc(struct dma_chan *chan, dev_err(sdev->dma_dev.dev, "invalid source step\n"); return src_step; } - dst_step = SPRD_DMA_NONE_STEP; + + /* + * For 2-stage transfer, destination channel step can not be 0, + * since destination device is AON IRAM. + */ + if (chn_mode == SPRD_DMA_DST_CHN0 || + chn_mode == SPRD_DMA_DST_CHN1) + dst_step = src_step; + else + dst_step = SPRD_DMA_NONE_STEP; } else { dst_step = sprd_dma_get_step(slave_cfg->dst_addr_width); if (dst_step < 0) { @@ -855,6 +945,12 @@ sprd_dma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, } } + /* Set channel mode and trigger mode for 2-stage transfer */ + schan->chn_mode = + (flags >> SPRD_DMA_CHN_MODE_SHIFT) & SPRD_DMA_CHN_MODE_MASK; + schan->trg_mode = + (flags >> SPRD_DMA_TRG_MODE_SHIFT) & SPRD_DMA_TRG_MODE_MASK; + ret = sprd_dma_fill_desc(chan, &sdesc->chn_hw, 0, 0, src, dst, len, dir, flags, slave_cfg); if (ret) { diff --git a/include/linux/dma/sprd-dma.h b/include/linux/dma/sprd-dma.h index b42b80e52cc2..ab82df64682a 100644 --- a/include/linux/dma/sprd-dma.h +++ b/include/linux/dma/sprd-dma.h @@ -3,9 +3,65 @@ #ifndef _SPRD_DMA_H_ #define _SPRD_DMA_H_ -#define SPRD_DMA_REQ_SHIFT 16 -#define SPRD_DMA_FLAGS(req_mode, int_type) \ - ((req_mode) << SPRD_DMA_REQ_SHIFT | (int_type)) +#define SPRD_DMA_REQ_SHIFT 8 +#define SPRD_DMA_TRG_MODE_SHIFT 16 +#define SPRD_DMA_CHN_MODE_SHIFT 24 +#define SPRD_DMA_FLAGS(chn_mode, trg_mode, req_mode, int_type) \ + ((chn_mode) << SPRD_DMA_CHN_MODE_SHIFT | \ + (trg_mode) << SPRD_DMA_TRG_MODE_SHIFT | \ + (req_mode) << SPRD_DMA_REQ_SHIFT | (int_type)) + +/* + * The Spreadtrum DMA controller supports channel 2-stage tansfer, that means + * we can request 2 dma channels, one for source channel, and another one for + * destination channel. Each channel is independent, and has its own + * configurations. Once the source channel's transaction is done, it will + * trigger the destination channel's transaction automatically by hardware + * signal. + * + * To support 2-stage tansfer, we must configure the channel mode and trigger + * mode as below definition. + */ + +/* + * enum sprd_dma_chn_mode: define the DMA channel mode for 2-stage transfer + * @SPRD_DMA_CHN_MODE_NONE: No channel mode setting which means channel doesn't + * support the 2-stage transfer. + * @SPRD_DMA_SRC_CHN0: Channel used as source channel 0. + * @SPRD_DMA_SRC_CHN1: Channel used as source channel 1. + * @SPRD_DMA_DST_CHN0: Channel used as destination channel 0. + * @SPRD_DMA_DST_CHN1: Channel used as destination channel 1. + * + * Now the DMA controller can supports 2 groups 2-stage transfer. + */ +enum sprd_dma_chn_mode { + SPRD_DMA_CHN_MODE_NONE, + SPRD_DMA_SRC_CHN0, + SPRD_DMA_SRC_CHN1, + SPRD_DMA_DST_CHN0, + SPRD_DMA_DST_CHN1, +}; + +/* + * enum sprd_dma_trg_mode: define the DMA channel trigger mode for 2-stage + * transfer + * @SPRD_DMA_NO_TRG: No trigger setting. + * @SPRD_DMA_FRAG_DONE_TRG: Trigger the transaction of destination channel + * automatically once the source channel's fragment request is done. + * @SPRD_DMA_BLOCK_DONE_TRG: Trigger the transaction of destination channel + * automatically once the source channel's block request is done. + * @SPRD_DMA_TRANS_DONE_TRG: Trigger the transaction of destination channel + * automatically once the source channel's transfer request is done. + * @SPRD_DMA_LIST_DONE_TRG: Trigger the transaction of destination channel + * automatically once the source channel's link-list request is done. + */ +enum sprd_dma_trg_mode { + SPRD_DMA_NO_TRG, + SPRD_DMA_FRAG_DONE_TRG, + SPRD_DMA_BLOCK_DONE_TRG, + SPRD_DMA_TRANS_DONE_TRG, + SPRD_DMA_LIST_DONE_TRG, +}; /* * enum sprd_dma_req_mode: define the DMA request mode -- cgit v1.2.3 From 531971231dac0edf17af32b06f09681f6506c0a1 Mon Sep 17 00:00:00 2001 From: Eric Long Date: Tue, 6 Nov 2018 13:01:37 +0800 Subject: dmaengine: sprd: Add me as one of the module authors Add me as one of the module authors. Signed-off-by: Eric Long Signed-off-by: Baolin Wang Signed-off-by: Vinod Koul --- drivers/dma/sprd-dma.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers/dma') diff --git a/drivers/dma/sprd-dma.c b/drivers/dma/sprd-dma.c index 50d6569585b4..e2f016700fcc 100644 --- a/drivers/dma/sprd-dma.c +++ b/drivers/dma/sprd-dma.c @@ -1226,4 +1226,5 @@ module_platform_driver(sprd_dma_driver); MODULE_LICENSE("GPL v2"); MODULE_DESCRIPTION("DMA driver for Spreadtrum"); MODULE_AUTHOR("Baolin Wang "); +MODULE_AUTHOR("Eric Long "); MODULE_ALIAS("platform:sprd-dma"); -- cgit v1.2.3 From a5b21a8ba2a0d6ab114b8ca9c8423e84f47844b1 Mon Sep 17 00:00:00 2001 From: Michael Tretter Date: Mon, 26 Nov 2018 16:14:25 +0100 Subject: dmaengine: zynqmp_dma: replace spin_lock_bh with spin_lock_irqsave All device_prep_dma_* functions and device_issue_pending can be called from an interrupt context. As this includes hard IRQs, we must use spin_lock_irqsave() instead of spin_lock_bh() to access chan->lock. Signed-off-by: Michael Tretter Signed-off-by: Vinod Koul --- drivers/dma/xilinx/zynqmp_dma.c | 37 ++++++++++++++++++++++--------------- 1 file changed, 22 insertions(+), 15 deletions(-) (limited to 'drivers/dma') diff --git a/drivers/dma/xilinx/zynqmp_dma.c b/drivers/dma/xilinx/zynqmp_dma.c index c74a88b65039..6f26b59a7216 100644 --- a/drivers/dma/xilinx/zynqmp_dma.c +++ b/drivers/dma/xilinx/zynqmp_dma.c @@ -375,9 +375,10 @@ static dma_cookie_t zynqmp_dma_tx_submit(struct dma_async_tx_descriptor *tx) struct zynqmp_dma_chan *chan = to_chan(tx->chan); struct zynqmp_dma_desc_sw *desc, *new; dma_cookie_t cookie; + unsigned long irqflags; new = tx_to_desc(tx); - spin_lock_bh(&chan->lock); + spin_lock_irqsave(&chan->lock, irqflags); cookie = dma_cookie_assign(tx); if (!list_empty(&chan->pending_list)) { @@ -393,7 +394,7 @@ static dma_cookie_t zynqmp_dma_tx_submit(struct dma_async_tx_descriptor *tx) } list_add_tail(&new->node, &chan->pending_list); - spin_unlock_bh(&chan->lock); + spin_unlock_irqrestore(&chan->lock, irqflags); return cookie; } @@ -408,12 +409,13 @@ static struct zynqmp_dma_desc_sw * zynqmp_dma_get_descriptor(struct zynqmp_dma_chan *chan) { struct zynqmp_dma_desc_sw *desc; + unsigned long irqflags; - spin_lock_bh(&chan->lock); + spin_lock_irqsave(&chan->lock, irqflags); desc = list_first_entry(&chan->free_list, struct zynqmp_dma_desc_sw, node); list_del(&desc->node); - spin_unlock_bh(&chan->lock); + spin_unlock_irqrestore(&chan->lock, irqflags); INIT_LIST_HEAD(&desc->tx_list); /* Clear the src and dst descriptor memory */ @@ -643,10 +645,11 @@ static void zynqmp_dma_complete_descriptor(struct zynqmp_dma_chan *chan) static void zynqmp_dma_issue_pending(struct dma_chan *dchan) { struct zynqmp_dma_chan *chan = to_chan(dchan); + unsigned long irqflags; - spin_lock_bh(&chan->lock); + spin_lock_irqsave(&chan->lock, irqflags); zynqmp_dma_start_transfer(chan); - spin_unlock_bh(&chan->lock); + spin_unlock_irqrestore(&chan->lock, irqflags); } /** @@ -667,10 +670,11 @@ static void zynqmp_dma_free_descriptors(struct zynqmp_dma_chan *chan) static void zynqmp_dma_free_chan_resources(struct dma_chan *dchan) { struct zynqmp_dma_chan *chan = to_chan(dchan); + unsigned long irqflags; - spin_lock_bh(&chan->lock); + spin_lock_irqsave(&chan->lock, irqflags); zynqmp_dma_free_descriptors(chan); - spin_unlock_bh(&chan->lock); + spin_unlock_irqrestore(&chan->lock, irqflags); dma_free_coherent(chan->dev, (2 * ZYNQMP_DMA_DESC_SIZE(chan) * ZYNQMP_DMA_NUM_DESCS), chan->desc_pool_v, chan->desc_pool_p); @@ -743,8 +747,9 @@ static void zynqmp_dma_do_tasklet(unsigned long data) { struct zynqmp_dma_chan *chan = (struct zynqmp_dma_chan *)data; u32 count; + unsigned long irqflags; - spin_lock(&chan->lock); + spin_lock_irqsave(&chan->lock, irqflags); if (chan->err) { zynqmp_dma_reset(chan); @@ -764,7 +769,7 @@ static void zynqmp_dma_do_tasklet(unsigned long data) zynqmp_dma_start_transfer(chan); unlock: - spin_unlock(&chan->lock); + spin_unlock_irqrestore(&chan->lock, irqflags); } /** @@ -776,11 +781,12 @@ unlock: static int zynqmp_dma_device_terminate_all(struct dma_chan *dchan) { struct zynqmp_dma_chan *chan = to_chan(dchan); + unsigned long irqflags; - spin_lock_bh(&chan->lock); + spin_lock_irqsave(&chan->lock, irqflags); writel(ZYNQMP_DMA_IDS_DEFAULT_MASK, chan->regs + ZYNQMP_DMA_IDS); zynqmp_dma_free_descriptors(chan); - spin_unlock_bh(&chan->lock); + spin_unlock_irqrestore(&chan->lock, irqflags); return 0; } @@ -804,19 +810,20 @@ static struct dma_async_tx_descriptor *zynqmp_dma_prep_memcpy( void *desc = NULL, *prev = NULL; size_t copy; u32 desc_cnt; + unsigned long irqflags; chan = to_chan(dchan); desc_cnt = DIV_ROUND_UP(len, ZYNQMP_DMA_MAX_TRANS_LEN); - spin_lock_bh(&chan->lock); + spin_lock_irqsave(&chan->lock, irqflags); if (desc_cnt > chan->desc_free_cnt) { - spin_unlock_bh(&chan->lock); + spin_unlock_irqrestore(&chan->lock, irqflags); dev_dbg(chan->dev, "chan %p descs are not available\n", chan); return NULL; } chan->desc_free_cnt = chan->desc_free_cnt - desc_cnt; - spin_unlock_bh(&chan->lock); + spin_unlock_irqrestore(&chan->lock, irqflags); do { /* Allocate and populate the descriptor */ -- cgit v1.2.3 From d53513d5dc285d9a95a534fc41c5c08af6b60eac Mon Sep 17 00:00:00 2001 From: Seraj Alijan Date: Mon, 10 Dec 2018 08:52:31 +0000 Subject: dmaengine: dmatest: Add support for multi channel testing Add support for running tests on multiple channels simultaneously as the driver currently limits to 1 channel per test run. This will add support for stress testing DMA controllers with multi channel capabilities. This is done by adding a callback function to the "channel" parameter that registers the requested channel prior to the "run" parameter being set to 1. Each time the "channel" parameter is populated with a new dma channel, a new test is appended to the thread queue. Once the "run" parameter is set to 1, the test will kick start all pending threads. Signed-off-by: Seraj Alijan Signed-off-by: Vinod Koul --- drivers/dma/dmatest.c | 196 +++++++++++++++++++++++++++++++++++++++++++++----- 1 file changed, 177 insertions(+), 19 deletions(-) (limited to 'drivers/dma') diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c index 28deaa084257..d19277234c2d 100644 --- a/drivers/dma/dmatest.c +++ b/drivers/dma/dmatest.c @@ -27,11 +27,6 @@ static unsigned int test_buf_size = 16384; module_param(test_buf_size, uint, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(test_buf_size, "Size of the memcpy test buffer"); -static char test_channel[20]; -module_param_string(channel, test_channel, sizeof(test_channel), - S_IRUGO | S_IWUSR); -MODULE_PARM_DESC(channel, "Bus ID of the channel to test (default: any)"); - static char test_device[32]; module_param_string(device, test_device, sizeof(test_device), S_IRUGO | S_IWUSR); @@ -139,6 +134,28 @@ static bool dmatest_run; module_param_cb(run, &run_ops, &dmatest_run, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(run, "Run the test (default: false)"); +static int dmatest_chan_set(const char *val, const struct kernel_param *kp); +static int dmatest_chan_get(char *val, const struct kernel_param *kp); +static const struct kernel_param_ops multi_chan_ops = { + .set = dmatest_chan_set, + .get = dmatest_chan_get, +}; + +static char test_channel[20]; +static struct kparam_string newchan_kps = { + .string = test_channel, + .maxlen = 20, +}; +module_param_cb(channel, &multi_chan_ops, &newchan_kps, 0644); +MODULE_PARM_DESC(channel, "Bus ID of the channel to test (default: any)"); + +static int dmatest_test_list_get(char *val, const struct kernel_param *kp); +static const struct kernel_param_ops test_list_ops = { + .get = dmatest_test_list_get, +}; +module_param_cb(test_list, &test_list_ops, NULL, 0444); +MODULE_PARM_DESC(test_list, "Print current test list"); + /* Maximum amount of mismatched bytes in buffer to print */ #define MAX_ERROR_COUNT 32 @@ -179,6 +196,7 @@ struct dmatest_thread { wait_queue_head_t done_wait; struct dmatest_done test_done; bool done; + bool pending; }; struct dmatest_chan { @@ -206,6 +224,22 @@ static bool is_threaded_test_run(struct dmatest_info *info) return false; } +static bool is_threaded_test_pending(struct dmatest_info *info) +{ + struct dmatest_chan *dtc; + + list_for_each_entry(dtc, &info->channels, node) { + struct dmatest_thread *thread; + + list_for_each_entry(thread, &dtc->threads, node) { + if (thread->pending) + return true; + } + } + + return false; +} + static int dmatest_wait_get(char *val, const struct kernel_param *kp) { struct dmatest_info *info = &test_info; @@ -476,6 +510,7 @@ static int dmatest_func(void *data) ret = -ENOMEM; smp_rmb(); + thread->pending = false; info = thread->info; params = &info->params; chan = thread->chan; @@ -884,7 +919,7 @@ static int dmatest_add_threads(struct dmatest_info *info, /* srcbuf and dstbuf are allocated by the thread itself */ get_task_struct(thread->task); list_add_tail(&thread->node, &dtc->threads); - wake_up_process(thread->task); + thread->pending = true; } return i; @@ -930,7 +965,7 @@ static int dmatest_add_channel(struct dmatest_info *info, thread_count += cnt > 0 ? cnt : 0; } - pr_info("Started %u threads using %s\n", + pr_info("Added %u threads using %s\n", thread_count, dma_chan_name(chan)); list_add_tail(&dtc->node, &info->channels); @@ -975,7 +1010,7 @@ static void request_channels(struct dmatest_info *info, } } -static void run_threaded_test(struct dmatest_info *info) +static void add_threaded_test(struct dmatest_info *info) { struct dmatest_params *params = &info->params; @@ -998,6 +1033,24 @@ static void run_threaded_test(struct dmatest_info *info) request_channels(info, DMA_PQ); } +static void run_pending_tests(struct dmatest_info *info) +{ + struct dmatest_chan *dtc; + unsigned int thread_count = 0; + + list_for_each_entry(dtc, &info->channels, node) { + struct dmatest_thread *thread; + + thread_count = 0; + list_for_each_entry(thread, &dtc->threads, node) { + wake_up_process(thread->task); + thread_count++; + } + pr_info("Started %u threads using %s\n", + thread_count, dma_chan_name(dtc->chan)); + } +} + static void stop_threaded_test(struct dmatest_info *info) { struct dmatest_chan *dtc, *_dtc; @@ -1014,7 +1067,7 @@ static void stop_threaded_test(struct dmatest_info *info) info->nr_channels = 0; } -static void restart_threaded_test(struct dmatest_info *info, bool run) +static void start_threaded_tests(struct dmatest_info *info) { /* we might be called early to set run=, defer running until all * parameters have been evaluated @@ -1022,11 +1075,7 @@ static void restart_threaded_test(struct dmatest_info *info, bool run) if (!info->did_init) return; - /* Stop any running test first */ - stop_threaded_test(info); - - /* Run test with new parameters */ - run_threaded_test(info); + run_pending_tests(info); } static int dmatest_run_get(char *val, const struct kernel_param *kp) @@ -1037,7 +1086,8 @@ static int dmatest_run_get(char *val, const struct kernel_param *kp) if (is_threaded_test_run(info)) { dmatest_run = true; } else { - stop_threaded_test(info); + if (!is_threaded_test_pending(info)) + stop_threaded_test(info); dmatest_run = false; } mutex_unlock(&info->lock); @@ -1055,18 +1105,125 @@ static int dmatest_run_set(const char *val, const struct kernel_param *kp) if (ret) { mutex_unlock(&info->lock); return ret; + } else if (dmatest_run) { + if (is_threaded_test_pending(info)) + start_threaded_tests(info); + else + pr_info("Could not start test, no channels configured\n"); + } else { + stop_threaded_test(info); + } + + mutex_unlock(&info->lock); + + return ret; +} + +static int dmatest_chan_set(const char *val, const struct kernel_param *kp) +{ + struct dmatest_info *info = &test_info; + struct dmatest_chan *dtc; + char chan_reset_val[20]; + int ret = 0; + + mutex_lock(&info->lock); + ret = param_set_copystring(val, kp); + if (ret) { + mutex_unlock(&info->lock); + return ret; + } + /*Clear any previously run threads */ + if (!is_threaded_test_run(info) && !is_threaded_test_pending(info)) + stop_threaded_test(info); + /* Reject channels that are already registered */ + if (is_threaded_test_pending(info)) { + list_for_each_entry(dtc, &info->channels, node) { + if (strcmp(dma_chan_name(dtc->chan), + strim(test_channel)) == 0) { + dtc = list_last_entry(&info->channels, + struct dmatest_chan, + node); + strlcpy(chan_reset_val, + dma_chan_name(dtc->chan), + sizeof(chan_reset_val)); + ret = -EBUSY; + goto add_chan_err; + } + } } - if (is_threaded_test_run(info)) + add_threaded_test(info); + + /* Check if channel was added successfully */ + dtc = list_last_entry(&info->channels, struct dmatest_chan, node); + + if (dtc->chan) { + /* + * if new channel was not successfully added, revert the + * "test_channel" string to the name of the last successfully + * added channel. exception for when users issues empty string + * to channel parameter. + */ + if ((strcmp(dma_chan_name(dtc->chan), strim(test_channel)) != 0) + && (strcmp("", strim(test_channel)) != 0)) { + ret = -EINVAL; + strlcpy(chan_reset_val, dma_chan_name(dtc->chan), + sizeof(chan_reset_val)); + goto add_chan_err; + } + + } else { + /* Clear test_channel if no channels were added successfully */ + strlcpy(chan_reset_val, "", sizeof(chan_reset_val)); ret = -EBUSY; - else if (dmatest_run) - restart_threaded_test(info, dmatest_run); + goto add_chan_err; + } + + mutex_unlock(&info->lock); + + return ret; +add_chan_err: + param_set_copystring(chan_reset_val, kp); mutex_unlock(&info->lock); return ret; } +static int dmatest_chan_get(char *val, const struct kernel_param *kp) +{ + struct dmatest_info *info = &test_info; + + mutex_lock(&info->lock); + if (!is_threaded_test_run(info) && !is_threaded_test_pending(info)) { + stop_threaded_test(info); + strlcpy(test_channel, "", sizeof(test_channel)); + } + mutex_unlock(&info->lock); + + return param_get_string(val, kp); +} + +static int dmatest_test_list_get(char *val, const struct kernel_param *kp) +{ + struct dmatest_info *info = &test_info; + struct dmatest_chan *dtc; + unsigned int thread_count = 0; + + list_for_each_entry(dtc, &info->channels, node) { + struct dmatest_thread *thread; + + thread_count = 0; + list_for_each_entry(thread, &dtc->threads, node) { + thread_count++; + } + pr_info("%u threads using %s\n", + thread_count, dma_chan_name(dtc->chan)); + } + + return 0; +} + static int __init dmatest_init(void) { struct dmatest_info *info = &test_info; @@ -1074,7 +1231,8 @@ static int __init dmatest_init(void) if (dmatest_run) { mutex_lock(&info->lock); - run_threaded_test(info); + add_threaded_test(info); + run_pending_tests(info); mutex_unlock(&info->lock); } -- cgit v1.2.3 From 6138f967bccc7a84a167769c2e045c346ad37191 Mon Sep 17 00:00:00 2001 From: Seraj Alijan Date: Mon, 10 Dec 2018 08:52:34 +0000 Subject: dmaengine: dmatest: Use fixed point div to calculate iops Use fixed point division to calculate iops to prevent reporting 0 iops when operations last for longer than a second. Signed-off-by: Seraj Alijan Signed-off-by: Vinod Koul --- drivers/dma/dmatest.c | 17 ++++++++++++++--- 1 file changed, 14 insertions(+), 3 deletions(-) (limited to 'drivers/dma') diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c index d19277234c2d..998344b0fb7a 100644 --- a/drivers/dma/dmatest.c +++ b/drivers/dma/dmatest.c @@ -177,6 +177,13 @@ MODULE_PARM_DESC(test_list, "Print current test list"); #define PATTERN_COUNT_MASK 0x1f #define PATTERN_MEMSET_IDX 0x01 +/* Fixed point arithmetic ops */ +#define FIXPT_SHIFT 8 +#define FIXPNT_MASK 0xFF +#define FIXPT_TO_INT(a) ((a) >> FIXPT_SHIFT) +#define INT_TO_FIXPT(a) ((a) << FIXPT_SHIFT) +#define FIXPT_GET_FRAC(a) ((((a) & FIXPNT_MASK) * 100) >> FIXPT_SHIFT) + /* poor man's completion - we want to use wait_event_freezable() on it */ struct dmatest_done { bool done; @@ -453,13 +460,15 @@ static unsigned long long dmatest_persec(s64 runtime, unsigned int val) } per_sec *= val; + per_sec = INT_TO_FIXPT(per_sec); do_div(per_sec, runtime); + return per_sec; } static unsigned long long dmatest_KBs(s64 runtime, unsigned long long len) { - return dmatest_persec(runtime, len >> 10); + return FIXPT_TO_INT(dmatest_persec(runtime, len >> 10)); } /* @@ -500,6 +509,7 @@ static int dmatest_func(void *data) ktime_t comparetime = 0; s64 runtime = 0; unsigned long long total_len = 0; + unsigned long long iops = 0; u8 align = 0; bool is_memset = false; dma_addr_t *srcs; @@ -838,9 +848,10 @@ err_usrcs: err_free_coefs: kfree(pq_coefs); err_thread_type: - pr_info("%s: summary %u tests, %u failures %llu iops %llu KB/s (%d)\n", + iops = dmatest_persec(runtime, total_tests); + pr_info("%s: summary %u tests, %u failures %llu.%02llu iops %llu KB/s (%d)\n", current->comm, total_tests, failed_tests, - dmatest_persec(runtime, total_tests), + FIXPT_TO_INT(iops), FIXPT_GET_FRAC(iops), dmatest_KBs(runtime, total_len), ret); /* terminate all transfers on specified channels */ -- cgit v1.2.3 From a875abfadf265cb1970036898068b34fc63759b7 Mon Sep 17 00:00:00 2001 From: Seraj Alijan Date: Mon, 10 Dec 2018 08:52:37 +0000 Subject: dmaengine: dmatest: Add alignment parameter Add parameter "alignment" to allow setting the address alignment manually. Having the ability to configure address alignment from user space adds new testing capabilities where different alignments can be configured for testing without having to modify the dma device alignment properties. If configured, the alignment value will override the device alignment property of the target device. Signed-off-by: Seraj Alijan Signed-off-by: Vinod Koul --- drivers/dma/dmatest.c | 18 ++++++++++++++---- 1 file changed, 14 insertions(+), 4 deletions(-) (limited to 'drivers/dma') diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c index 998344b0fb7a..6302ebef2938 100644 --- a/drivers/dma/dmatest.c +++ b/drivers/dma/dmatest.c @@ -79,6 +79,10 @@ static bool verbose; module_param(verbose, bool, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(verbose, "Enable \"success\" result messages (default: off)"); +static int alignment = -1; +module_param(alignment, int, 0644); +MODULE_PARM_DESC(alignment, "Custom data address alignment taken as 2^(alignment) (default: not used (-1))"); + /** * struct dmatest_params - test parameters. * @buf_size: size of the memcpy test buffer @@ -103,6 +107,7 @@ struct dmatest_params { int timeout; bool noverify; bool norandom; + int alignment; }; /** @@ -526,22 +531,26 @@ static int dmatest_func(void *data) chan = thread->chan; dev = chan->device; if (thread->type == DMA_MEMCPY) { - align = dev->copy_align; + align = params->alignment < 0 ? dev->copy_align : + params->alignment; src_cnt = dst_cnt = 1; } else if (thread->type == DMA_MEMSET) { - align = dev->fill_align; + align = params->alignment < 0 ? dev->fill_align : + params->alignment; src_cnt = dst_cnt = 1; is_memset = true; } else if (thread->type == DMA_XOR) { /* force odd to ensure dst = src */ src_cnt = min_odd(params->xor_sources | 1, dev->max_xor); dst_cnt = 1; - align = dev->xor_align; + align = params->alignment < 0 ? dev->xor_align : + params->alignment; } else if (thread->type == DMA_PQ) { /* force odd to ensure dst = src */ src_cnt = min_odd(params->pq_sources | 1, dma_maxpq(dev, 0)); dst_cnt = 2; - align = dev->pq_align; + align = params->alignment < 0 ? dev->pq_align : + params->alignment; pq_coefs = kmalloc(params->pq_sources + 1, GFP_KERNEL); if (!pq_coefs) @@ -1037,6 +1046,7 @@ static void add_threaded_test(struct dmatest_info *info) params->timeout = timeout; params->noverify = noverify; params->norandom = norandom; + params->alignment = alignment; request_channels(info, DMA_MEMCPY); request_channels(info, DMA_MEMSET); -- cgit v1.2.3 From 13396a130ffec45a736bcc08ad92d35e45f67dd8 Mon Sep 17 00:00:00 2001 From: Seraj Alijan Date: Mon, 10 Dec 2018 08:52:39 +0000 Subject: dmaengine: dmatest: Add transfer_size parameter Existing transfer size "len" is either generated randomly or set to the size of test_buf_size. In some cases we need to explicitly specify a transfer size that is different from the buffer size and non aligned to test the target device's ability to handle unaligned transfers. This patch adds optional parameter "transfer_size" to allow setting explicit transfer size for dma transfers. Signed-off-by: Seraj Alijan Signed-off-by: Vinod Koul --- drivers/dma/dmatest.c | 28 ++++++++++++++++++++++------ 1 file changed, 22 insertions(+), 6 deletions(-) (limited to 'drivers/dma') diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c index 6302ebef2938..2eea4ef72915 100644 --- a/drivers/dma/dmatest.c +++ b/drivers/dma/dmatest.c @@ -83,6 +83,10 @@ static int alignment = -1; module_param(alignment, int, 0644); MODULE_PARM_DESC(alignment, "Custom data address alignment taken as 2^(alignment) (default: not used (-1))"); +static unsigned int transfer_size; +module_param(transfer_size, uint, 0644); +MODULE_PARM_DESC(transfer_size, "Optional custom transfer size in bytes (default: not used (0))"); + /** * struct dmatest_params - test parameters. * @buf_size: size of the memcpy test buffer @@ -108,6 +112,7 @@ struct dmatest_params { bool noverify; bool norandom; int alignment; + unsigned int transfer_size; }; /** @@ -643,15 +648,25 @@ static int dmatest_func(void *data) total_tests++; - if (params->norandom) + if (params->transfer_size) { + if (params->transfer_size >= params->buf_size) { + pr_err("%u-byte transfer size must be lower than %u-buffer size\n", + params->transfer_size, params->buf_size); + break; + } + len = params->transfer_size; + } else if (params->norandom) { len = params->buf_size; - else + } else { len = dmatest_random() % params->buf_size + 1; + } - len = (len >> align) << align; - if (!len) - len = 1 << align; - + /* Do not alter transfer size explicitly defined by user */ + if (!params->transfer_size) { + len = (len >> align) << align; + if (!len) + len = 1 << align; + } total_len += len; if (params->norandom) { @@ -1047,6 +1062,7 @@ static void add_threaded_test(struct dmatest_info *info) params->noverify = noverify; params->norandom = norandom; params->alignment = alignment; + params->transfer_size = transfer_size; request_channels(info, DMA_MEMCPY); request_channels(info, DMA_MEMSET); -- cgit v1.2.3 From 8e1897bc8eaf1383d6b33d5232136221ffd9c818 Mon Sep 17 00:00:00 2001 From: Yangtao Li Date: Wed, 5 Dec 2018 11:18:57 -0500 Subject: dmaengine: amba-pl08x: convert to DEFINE_SHOW_ATTRIBUTE Use DEFINE_SHOW_ATTRIBUTE macro to simplify the code. Signed-off-by: Yangtao Li Signed-off-by: Vinod Koul --- drivers/dma/amba-pl08x.c | 14 ++------------ 1 file changed, 2 insertions(+), 12 deletions(-) (limited to 'drivers/dma') diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c index 97483df1f82e..fc8c2bab563c 100644 --- a/drivers/dma/amba-pl08x.c +++ b/drivers/dma/amba-pl08x.c @@ -2505,24 +2505,14 @@ static int pl08x_debugfs_show(struct seq_file *s, void *data) return 0; } -static int pl08x_debugfs_open(struct inode *inode, struct file *file) -{ - return single_open(file, pl08x_debugfs_show, inode->i_private); -} - -static const struct file_operations pl08x_debugfs_operations = { - .open = pl08x_debugfs_open, - .read = seq_read, - .llseek = seq_lseek, - .release = single_release, -}; +DEFINE_SHOW_ATTRIBUTE(pl08x_debugfs); static void init_pl08x_debugfs(struct pl08x_driver_data *pl08x) { /* Expose a simple debugfs interface to view all clocks */ (void) debugfs_create_file(dev_name(&pl08x->adev->dev), S_IFREG | S_IRUGO, NULL, pl08x, - &pl08x_debugfs_operations); + &pl08x_debugfs_fops); } #else -- cgit v1.2.3 From 0f7ab39a6b401a4fef3f3b7004a2e58ef67079af Mon Sep 17 00:00:00 2001 From: Yangtao Li Date: Wed, 5 Dec 2018 11:18:58 -0500 Subject: dmaengine: mic_x100_dma: convert to DEFINE_SHOW_ATTRIBUTE Use DEFINE_SHOW_ATTRIBUTE macro to simplify the code. Signed-off-by: Yangtao Li Signed-off-by: Vinod Koul --- drivers/dma/mic_x100_dma.c | 22 +++------------------- 1 file changed, 3 insertions(+), 19 deletions(-) (limited to 'drivers/dma') diff --git a/drivers/dma/mic_x100_dma.c b/drivers/dma/mic_x100_dma.c index adfd316db1a8..6a91e28d537d 100644 --- a/drivers/dma/mic_x100_dma.c +++ b/drivers/dma/mic_x100_dma.c @@ -676,7 +676,7 @@ static void mic_dma_dev_unreg(struct mic_dma_device *mic_dma_dev) } /* DEBUGFS CODE */ -static int mic_dma_reg_seq_show(struct seq_file *s, void *pos) +static int mic_dma_reg_show(struct seq_file *s, void *pos) { struct mic_dma_device *mic_dma_dev = s->private; int i, chan_num, first_chan = mic_dma_dev->start_ch; @@ -707,23 +707,7 @@ static int mic_dma_reg_seq_show(struct seq_file *s, void *pos) return 0; } -static int mic_dma_reg_debug_open(struct inode *inode, struct file *file) -{ - return single_open(file, mic_dma_reg_seq_show, inode->i_private); -} - -static int mic_dma_reg_debug_release(struct inode *inode, struct file *file) -{ - return single_release(inode, file); -} - -static const struct file_operations mic_dma_reg_ops = { - .owner = THIS_MODULE, - .open = mic_dma_reg_debug_open, - .read = seq_read, - .llseek = seq_lseek, - .release = mic_dma_reg_debug_release -}; +DEFINE_SHOW_ATTRIBUTE(mic_dma_reg); /* Debugfs parent dir */ static struct dentry *mic_dma_dbg; @@ -747,7 +731,7 @@ static int mic_dma_driver_probe(struct mbus_device *mbdev) if (mic_dma_dev->dbg_dir) debugfs_create_file("mic_dma_reg", 0444, mic_dma_dev->dbg_dir, mic_dma_dev, - &mic_dma_reg_ops); + &mic_dma_reg_fops); } return 0; } -- cgit v1.2.3 From e00f50a79e61396ca531b71e04857120332948d5 Mon Sep 17 00:00:00 2001 From: Yangtao Li Date: Wed, 5 Dec 2018 11:18:59 -0500 Subject: dmaengine: pxa: remove DBGFS_FUNC_DECL() We already have the DEFINE_SHOW_ATTRIBUTE, There is no need to define such a macro, so remove DBGFS_FUNC_DECL. Signed-off-by: Yangtao Li Acked-by: Robert Jarzmik Signed-off-by: Vinod Koul --- drivers/dma/pxa_dma.c | 36 ++++++++++++------------------------ 1 file changed, 12 insertions(+), 24 deletions(-) (limited to 'drivers/dma') diff --git a/drivers/dma/pxa_dma.c b/drivers/dma/pxa_dma.c index 825725057e00..e78fe98b5cf1 100644 --- a/drivers/dma/pxa_dma.c +++ b/drivers/dma/pxa_dma.c @@ -189,7 +189,7 @@ bool pxad_filter_fn(struct dma_chan *chan, void *param); #include #include -static int dbg_show_requester_chan(struct seq_file *s, void *p) +static int requester_chan_show(struct seq_file *s, void *p) { struct pxad_phy *phy = s->private; int i; @@ -220,7 +220,7 @@ static int is_phys_valid(unsigned long addr) #define PXA_DCSR_STR(flag) (dcsr & PXA_DCSR_##flag ? #flag" " : "") #define PXA_DCMD_STR(flag) (dcmd & PXA_DCMD_##flag ? #flag" " : "") -static int dbg_show_descriptors(struct seq_file *s, void *p) +static int descriptors_show(struct seq_file *s, void *p) { struct pxad_phy *phy = s->private; int i, max_show = 20, burst, width; @@ -263,7 +263,7 @@ static int dbg_show_descriptors(struct seq_file *s, void *p) return 0; } -static int dbg_show_chan_state(struct seq_file *s, void *p) +static int chan_state_show(struct seq_file *s, void *p) { struct pxad_phy *phy = s->private; u32 dcsr, dcmd; @@ -306,7 +306,7 @@ static int dbg_show_chan_state(struct seq_file *s, void *p) return 0; } -static int dbg_show_state(struct seq_file *s, void *p) +static int state_show(struct seq_file *s, void *p) { struct pxad_device *pdev = s->private; @@ -317,22 +317,10 @@ static int dbg_show_state(struct seq_file *s, void *p) return 0; } -#define DBGFS_FUNC_DECL(name) \ -static int dbg_open_##name(struct inode *inode, struct file *file) \ -{ \ - return single_open(file, dbg_show_##name, inode->i_private); \ -} \ -static const struct file_operations dbg_fops_##name = { \ - .open = dbg_open_##name, \ - .llseek = seq_lseek, \ - .read = seq_read, \ - .release = single_release, \ -} - -DBGFS_FUNC_DECL(state); -DBGFS_FUNC_DECL(chan_state); -DBGFS_FUNC_DECL(descriptors); -DBGFS_FUNC_DECL(requester_chan); +DEFINE_SHOW_ATTRIBUTE(state); +DEFINE_SHOW_ATTRIBUTE(chan_state); +DEFINE_SHOW_ATTRIBUTE(descriptors); +DEFINE_SHOW_ATTRIBUTE(requester_chan); static struct dentry *pxad_dbg_alloc_chan(struct pxad_device *pdev, int ch, struct dentry *chandir) @@ -348,13 +336,13 @@ static struct dentry *pxad_dbg_alloc_chan(struct pxad_device *pdev, if (chan) chan_state = debugfs_create_file("state", 0400, chan, dt, - &dbg_fops_chan_state); + &chan_state_fops); if (chan_state) chan_descr = debugfs_create_file("descriptors", 0400, chan, dt, - &dbg_fops_descriptors); + &descriptors_fops); if (chan_descr) chan_reqs = debugfs_create_file("requesters", 0400, chan, dt, - &dbg_fops_requester_chan); + &requester_chan_fops); if (!chan_reqs) goto err_state; @@ -375,7 +363,7 @@ static void pxad_init_debugfs(struct pxad_device *pdev) goto err_root; pdev->dbgfs_state = debugfs_create_file("state", 0400, pdev->dbgfs_root, - pdev, &dbg_fops_state); + pdev, &state_fops); if (!pdev->dbgfs_state) goto err_state; -- cgit v1.2.3 From f7f41722efacc121cbfd36a8480cd2e66223479d Mon Sep 17 00:00:00 2001 From: Yangtao Li Date: Wed, 5 Dec 2018 11:19:00 -0500 Subject: dmaengine: qcom_hidma: convert to DEFINE_SHOW_ATTRIBUTE Use DEFINE_SHOW_ATTRIBUTE macro to simplify the code. Signed-off-by: Yangtao Li Acked-by: Sinan Kaya Signed-off-by: Vinod Koul --- drivers/dma/qcom/hidma_dbg.c | 33 ++++++--------------------------- 1 file changed, 6 insertions(+), 27 deletions(-) (limited to 'drivers/dma') diff --git a/drivers/dma/qcom/hidma_dbg.c b/drivers/dma/qcom/hidma_dbg.c index 3bdcb8056a36..9523faf7acdc 100644 --- a/drivers/dma/qcom/hidma_dbg.c +++ b/drivers/dma/qcom/hidma_dbg.c @@ -85,11 +85,11 @@ static void hidma_ll_devstats(struct seq_file *s, void *llhndl) } /* - * hidma_chan_stats: display HIDMA channel statistics + * hidma_chan_show: display HIDMA channel statistics * * Display the statistics for the current HIDMA virtual channel device. */ -static int hidma_chan_stats(struct seq_file *s, void *unused) +static int hidma_chan_show(struct seq_file *s, void *unused) { struct hidma_chan *mchan = s->private; struct hidma_desc *mdesc; @@ -117,11 +117,11 @@ static int hidma_chan_stats(struct seq_file *s, void *unused) } /* - * hidma_dma_info: display HIDMA device info + * hidma_dma_show: display HIDMA device info * * Display the info for the current HIDMA device. */ -static int hidma_dma_info(struct seq_file *s, void *unused) +static int hidma_dma_show(struct seq_file *s, void *unused) { struct hidma_dev *dmadev = s->private; resource_size_t sz; @@ -138,29 +138,8 @@ static int hidma_dma_info(struct seq_file *s, void *unused) return 0; } -static int hidma_chan_stats_open(struct inode *inode, struct file *file) -{ - return single_open(file, hidma_chan_stats, inode->i_private); -} - -static int hidma_dma_info_open(struct inode *inode, struct file *file) -{ - return single_open(file, hidma_dma_info, inode->i_private); -} - -static const struct file_operations hidma_chan_fops = { - .open = hidma_chan_stats_open, - .read = seq_read, - .llseek = seq_lseek, - .release = single_release, -}; - -static const struct file_operations hidma_dma_fops = { - .open = hidma_dma_info_open, - .read = seq_read, - .llseek = seq_lseek, - .release = single_release, -}; +DEFINE_SHOW_ATTRIBUTE(hidma_chan); +DEFINE_SHOW_ATTRIBUTE(hidma_dma); void hidma_debug_uninit(struct hidma_dev *dmadev) { -- cgit v1.2.3