diff options
author | Olof Johansson <olof@lixom.net> | 2013-05-27 20:09:53 -0700 |
---|---|---|
committer | Olof Johansson <olof@lixom.net> | 2013-05-27 20:10:04 -0700 |
commit | 6f39ef575df368ed77fcaa2f1d0f28191c7414fc (patch) | |
tree | e64a261770acf65a32e498f5b8f1f8323e14fbb7 /drivers/dma | |
parent | e4aa937ec75df0eea0bee03bffa3303ad36c986b (diff) | |
parent | 95e4bf98520c9a92cd1b87d12c89e8c60c5fe2ca (diff) | |
download | linux-6f39ef575df368ed77fcaa2f1d0f28191c7414fc.tar.bz2 |
Merge tag 'ux500-dma40-for-arm-soc' of git://git.kernel.org/pub/scm/linux/kernel/git/linusw/linux-stericsson into next/drivers
From Linus Walleij:
This is a set of patches from Lee Jones to start converting
the ux500 to fetch DMA channels from the device tree:
- Full DT support and channel mapping in the DMA40 driver
- Dropping of platform data for migrated devices on the DT
boot path.
* tag 'ux500-dma40-for-arm-soc' of git://git.kernel.org/pub/scm/linux/kernel/git/linusw/linux-stericsson: (36 commits)
ARM: ux500: Register Cryp and Hash platform drivers on Snowball
crypto: ux500/[cryp|hash] - Show successful start-up in the bootlog
ARM: ux500: Stop passing Cryp DMA channel config information though pdata
crypto: ux500/cryp - Set DMA configuration though dma_slave_config()
crypto: ux500/cryp - Prepare clock before enabling it
ARM: ux500: Stop passing Hash DMA channel config information though pdata
crypto: ux500/hash - Set DMA configuration though dma_slave_config()
crypto: ux500/hash - Prepare clock before enabling it
ARM: ux500: Remove unnecessary attributes from DMA channel request pdata
dmaengine: ste_dma40: Correct copy/paste error
ARM: ux500: Remove DMA address look-up table
dmaengine: ste_dma40: Remove redundant address fetching function
dmaengine: ste_dma40: Only use addresses passed as configuration information
ARM: ux500: Stop passing UART's platform data for Device Tree boots
dmaengine: ste_dma40: Don't configure runtime configurable setup during allocate
dmaengine: ste_dma40: Remove unnecessary call to d40_phy_cfg()
dmaengine: ste_dma40: Separate Logical Global Interrupt Mask (GIM) unmasking
ARM: ux500: Pass remnant platform data though to DMA40 driver
dmaengine: ste_dma40: Supply full Device Tree parsing support
dmaengine: ste_dma40: Allow driver to be probe()able when DT is enabled
...
Signed-off-by: Olof Johansson <olof@lixom.net>
Diffstat (limited to 'drivers/dma')
-rw-r--r-- | drivers/dma/ste_dma40.c | 366 | ||||
-rw-r--r-- | drivers/dma/ste_dma40_ll.c | 106 | ||||
-rw-r--r-- | drivers/dma/ste_dma40_ll.h | 3 |
3 files changed, 260 insertions, 215 deletions
diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c index 1734feec47b1..6ed775736853 100644 --- a/drivers/dma/ste_dma40.c +++ b/drivers/dma/ste_dma40.c @@ -17,6 +17,8 @@ #include <linux/pm.h> #include <linux/pm_runtime.h> #include <linux/err.h> +#include <linux/of.h> +#include <linux/of_dma.h> #include <linux/amba/bus.h> #include <linux/regulator/consumer.h> #include <linux/platform_data/dma-ste-dma40.h> @@ -45,6 +47,9 @@ #define D40_LCLA_LINK_PER_EVENT_GRP 128 #define D40_LCLA_END D40_LCLA_LINK_PER_EVENT_GRP +/* Max number of logical channels per physical channel */ +#define D40_MAX_LOG_CHAN_PER_PHY 32 + /* Attempts before giving up to trying to get pages that are aligned */ #define MAX_LCLA_ALLOC_ATTEMPTS 256 @@ -53,7 +58,50 @@ #define D40_ALLOC_PHY (1 << 30) #define D40_ALLOC_LOG_FREE 0 -#define MAX(a, b) (((a) < (b)) ? (b) : (a)) +/* Reserved event lines for memcpy only. */ +#define DB8500_DMA_MEMCPY_EV_0 51 +#define DB8500_DMA_MEMCPY_EV_1 56 +#define DB8500_DMA_MEMCPY_EV_2 57 +#define DB8500_DMA_MEMCPY_EV_3 58 +#define DB8500_DMA_MEMCPY_EV_4 59 +#define DB8500_DMA_MEMCPY_EV_5 60 + +static int dma40_memcpy_channels[] = { + DB8500_DMA_MEMCPY_EV_0, + DB8500_DMA_MEMCPY_EV_1, + DB8500_DMA_MEMCPY_EV_2, + DB8500_DMA_MEMCPY_EV_3, + DB8500_DMA_MEMCPY_EV_4, + DB8500_DMA_MEMCPY_EV_5, +}; + +/* Default configuration for physcial memcpy */ +struct stedma40_chan_cfg dma40_memcpy_conf_phy = { + .mode = STEDMA40_MODE_PHYSICAL, + .dir = STEDMA40_MEM_TO_MEM, + + .src_info.data_width = STEDMA40_BYTE_WIDTH, + .src_info.psize = STEDMA40_PSIZE_PHY_1, + .src_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL, + + .dst_info.data_width = STEDMA40_BYTE_WIDTH, + .dst_info.psize = STEDMA40_PSIZE_PHY_1, + .dst_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL, +}; + +/* Default configuration for logical memcpy */ +struct stedma40_chan_cfg dma40_memcpy_conf_log = { + .mode = STEDMA40_MODE_LOGICAL, + .dir = STEDMA40_MEM_TO_MEM, + + .src_info.data_width = STEDMA40_BYTE_WIDTH, + .src_info.psize = STEDMA40_PSIZE_LOG_1, + .src_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL, + + .dst_info.data_width = STEDMA40_BYTE_WIDTH, + .dst_info.psize = STEDMA40_PSIZE_LOG_1, + .dst_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL, +}; /** * enum 40_command - The different commands and/or statuses. @@ -171,6 +219,9 @@ static u32 d40_backup_regs_chan[] = { D40_CHAN_REG_SDLNK, }; +#define BACKUP_REGS_SZ_MAX ((BACKUP_REGS_SZ_V4A > BACKUP_REGS_SZ_V4B) ? \ + BACKUP_REGS_SZ_V4A : BACKUP_REGS_SZ_V4B) + /** * struct d40_interrupt_lookup - lookup table for interrupt handler * @@ -534,7 +585,7 @@ struct d40_base { resource_size_t lcpa_size; struct kmem_cache *desc_slab; u32 reg_val_backup[BACKUP_REGS_SZ]; - u32 reg_val_backup_v4[MAX(BACKUP_REGS_SZ_V4A, BACKUP_REGS_SZ_V4B)]; + u32 reg_val_backup_v4[BACKUP_REGS_SZ_MAX]; u32 *reg_val_backup_chan; u16 gcc_pwr_off_mask; bool initialized; @@ -1257,21 +1308,17 @@ static void __d40_config_set_event(struct d40_chan *d40c, static void d40_config_set_event(struct d40_chan *d40c, enum d40_events event_type) { + u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dev_type); + /* Enable event line connected to device (or memcpy) */ if ((d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) || - (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_PERIPH)) { - u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.src_dev_type); - + (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_PERIPH)) __d40_config_set_event(d40c, event_type, event, D40_CHAN_REG_SSLNK); - } - - if (d40c->dma_cfg.dir != STEDMA40_PERIPH_TO_MEM) { - u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dst_dev_type); + if (d40c->dma_cfg.dir != STEDMA40_PERIPH_TO_MEM) __d40_config_set_event(d40c, event_type, event, D40_CHAN_REG_SDLNK); - } } static u32 d40_chan_has_events(struct d40_chan *d40c) @@ -1713,8 +1760,6 @@ static int d40_validate_conf(struct d40_chan *d40c, struct stedma40_chan_cfg *conf) { int res = 0; - u32 dst_event_group = D40_TYPE_TO_GROUP(conf->dst_dev_type); - u32 src_event_group = D40_TYPE_TO_GROUP(conf->src_dev_type); bool is_log = conf->mode == STEDMA40_MODE_LOGICAL; if (!conf->dir) { @@ -1722,44 +1767,10 @@ static int d40_validate_conf(struct d40_chan *d40c, res = -EINVAL; } - if (conf->dst_dev_type != STEDMA40_DEV_DST_MEMORY && - d40c->base->plat_data->dev_tx[conf->dst_dev_type] == 0 && - d40c->runtime_addr == 0) { - - chan_err(d40c, "Invalid TX channel address (%d)\n", - conf->dst_dev_type); - res = -EINVAL; - } - - if (conf->src_dev_type != STEDMA40_DEV_SRC_MEMORY && - d40c->base->plat_data->dev_rx[conf->src_dev_type] == 0 && - d40c->runtime_addr == 0) { - chan_err(d40c, "Invalid RX channel address (%d)\n", - conf->src_dev_type); - res = -EINVAL; - } - - if (conf->dir == STEDMA40_MEM_TO_PERIPH && - dst_event_group == STEDMA40_DEV_DST_MEMORY) { - chan_err(d40c, "Invalid dst\n"); - res = -EINVAL; - } - - if (conf->dir == STEDMA40_PERIPH_TO_MEM && - src_event_group == STEDMA40_DEV_SRC_MEMORY) { - chan_err(d40c, "Invalid src\n"); - res = -EINVAL; - } - - if (src_event_group == STEDMA40_DEV_SRC_MEMORY && - dst_event_group == STEDMA40_DEV_DST_MEMORY && is_log) { - chan_err(d40c, "No event line\n"); - res = -EINVAL; - } - - if (conf->dir == STEDMA40_PERIPH_TO_PERIPH && - (src_event_group != dst_event_group)) { - chan_err(d40c, "Invalid event group\n"); + if ((is_log && conf->dev_type > d40c->base->num_log_chans) || + (!is_log && conf->dev_type > d40c->base->num_phy_chans) || + (conf->dev_type < 0)) { + chan_err(d40c, "Invalid device type (%d)\n", conf->dev_type); res = -EINVAL; } @@ -1880,7 +1891,7 @@ out: static int d40_allocate_channel(struct d40_chan *d40c, bool *first_phy_user) { - int dev_type; + int dev_type = d40c->dma_cfg.dev_type; int event_group; int event_line; struct d40_phy_res *phys; @@ -1895,13 +1906,11 @@ static int d40_allocate_channel(struct d40_chan *d40c, bool *first_phy_user) num_phy_chans = d40c->base->num_phy_chans; if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) { - dev_type = d40c->dma_cfg.src_dev_type; log_num = 2 * dev_type; is_src = true; } else if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH || d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) { /* dst event lines are used for logical memcpy */ - dev_type = d40c->dma_cfg.dst_dev_type; log_num = 2 * dev_type + 1; is_src = false; } else @@ -2012,14 +2021,23 @@ static int d40_config_memcpy(struct d40_chan *d40c) dma_cap_mask_t cap = d40c->chan.device->cap_mask; if (dma_has_cap(DMA_MEMCPY, cap) && !dma_has_cap(DMA_SLAVE, cap)) { - d40c->dma_cfg = *d40c->base->plat_data->memcpy_conf_log; - d40c->dma_cfg.src_dev_type = STEDMA40_DEV_SRC_MEMORY; - d40c->dma_cfg.dst_dev_type = d40c->base->plat_data-> - memcpy[d40c->chan.chan_id]; + d40c->dma_cfg = dma40_memcpy_conf_log; + d40c->dma_cfg.dev_type = dma40_memcpy_channels[d40c->chan.chan_id]; + + d40_log_cfg(&d40c->dma_cfg, + &d40c->log_def.lcsp1, &d40c->log_def.lcsp3); } else if (dma_has_cap(DMA_MEMCPY, cap) && dma_has_cap(DMA_SLAVE, cap)) { - d40c->dma_cfg = *d40c->base->plat_data->memcpy_conf_phy; + d40c->dma_cfg = dma40_memcpy_conf_phy; + + /* Generate interrrupt at end of transfer or relink. */ + d40c->dst_def_cfg |= BIT(D40_SREG_CFG_TIM_POS); + + /* Generate interrupt on error. */ + d40c->src_def_cfg |= BIT(D40_SREG_CFG_EIM_POS); + d40c->dst_def_cfg |= BIT(D40_SREG_CFG_EIM_POS); + } else { chan_err(d40c, "No memcpy\n"); return -EINVAL; @@ -2032,7 +2050,7 @@ static int d40_free_dma(struct d40_chan *d40c) { int res = 0; - u32 event; + u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dev_type); struct d40_phy_res *phy = d40c->phy_chan; bool is_src; @@ -2051,13 +2069,11 @@ static int d40_free_dma(struct d40_chan *d40c) } if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH || - d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) { - event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dst_dev_type); + d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) is_src = false; - } else if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) { - event = D40_TYPE_TO_EVENT(d40c->dma_cfg.src_dev_type); + else if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) is_src = true; - } else { + else { chan_err(d40c, "Unknown direction\n"); return -EINVAL; } @@ -2098,7 +2114,7 @@ static bool d40_is_paused(struct d40_chan *d40c) unsigned long flags; void __iomem *active_reg; u32 status; - u32 event; + u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dev_type); spin_lock_irqsave(&d40c->lock, flags); @@ -2119,10 +2135,8 @@ static bool d40_is_paused(struct d40_chan *d40c) if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH || d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) { - event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dst_dev_type); status = readl(chanbase + D40_CHAN_REG_SDLNK); } else if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) { - event = D40_TYPE_TO_EVENT(d40c->dma_cfg.src_dev_type); status = readl(chanbase + D40_CHAN_REG_SSLNK); } else { chan_err(d40c, "Unknown direction\n"); @@ -2253,24 +2267,6 @@ err: return NULL; } -static dma_addr_t -d40_get_dev_addr(struct d40_chan *chan, enum dma_transfer_direction direction) -{ - struct stedma40_platform_data *plat = chan->base->plat_data; - struct stedma40_chan_cfg *cfg = &chan->dma_cfg; - dma_addr_t addr = 0; - - if (chan->runtime_addr) - return chan->runtime_addr; - - if (direction == DMA_DEV_TO_MEM) - addr = plat->dev_rx[cfg->src_dev_type]; - else if (direction == DMA_MEM_TO_DEV) - addr = plat->dev_tx[cfg->dst_dev_type]; - - return addr; -} - static struct dma_async_tx_descriptor * d40_prep_sg(struct dma_chan *dchan, struct scatterlist *sg_src, struct scatterlist *sg_dst, unsigned int sg_len, @@ -2297,14 +2293,10 @@ d40_prep_sg(struct dma_chan *dchan, struct scatterlist *sg_src, if (sg_next(&sg_src[sg_len - 1]) == sg_src) desc->cyclic = true; - if (direction != DMA_TRANS_NONE) { - dma_addr_t dev_addr = d40_get_dev_addr(chan, direction); - - if (direction == DMA_DEV_TO_MEM) - src_dev_addr = dev_addr; - else if (direction == DMA_MEM_TO_DEV) - dst_dev_addr = dev_addr; - } + if (direction == DMA_DEV_TO_MEM) + src_dev_addr = chan->runtime_addr; + else if (direction == DMA_MEM_TO_DEV) + dst_dev_addr = chan->runtime_addr; if (chan_is_logical(chan)) ret = d40_prep_sg_log(chan, desc, sg_src, sg_dst, @@ -2397,11 +2389,55 @@ static void d40_set_prio_realtime(struct d40_chan *d40c) if ((d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) || (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_PERIPH)) - __d40_set_prio_rt(d40c, d40c->dma_cfg.src_dev_type, true); + __d40_set_prio_rt(d40c, d40c->dma_cfg.dev_type, true); if ((d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH) || (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_PERIPH)) - __d40_set_prio_rt(d40c, d40c->dma_cfg.dst_dev_type, false); + __d40_set_prio_rt(d40c, d40c->dma_cfg.dev_type, false); +} + +#define D40_DT_FLAGS_MODE(flags) ((flags >> 0) & 0x1) +#define D40_DT_FLAGS_DIR(flags) ((flags >> 1) & 0x1) +#define D40_DT_FLAGS_BIG_ENDIAN(flags) ((flags >> 2) & 0x1) +#define D40_DT_FLAGS_FIXED_CHAN(flags) ((flags >> 3) & 0x1) + +static struct dma_chan *d40_xlate(struct of_phandle_args *dma_spec, + struct of_dma *ofdma) +{ + struct stedma40_chan_cfg cfg; + dma_cap_mask_t cap; + u32 flags; + + memset(&cfg, 0, sizeof(struct stedma40_chan_cfg)); + + dma_cap_zero(cap); + dma_cap_set(DMA_SLAVE, cap); + + cfg.dev_type = dma_spec->args[0]; + flags = dma_spec->args[2]; + + switch (D40_DT_FLAGS_MODE(flags)) { + case 0: cfg.mode = STEDMA40_MODE_LOGICAL; break; + case 1: cfg.mode = STEDMA40_MODE_PHYSICAL; break; + } + + switch (D40_DT_FLAGS_DIR(flags)) { + case 0: + cfg.dir = STEDMA40_MEM_TO_PERIPH; + cfg.dst_info.big_endian = D40_DT_FLAGS_BIG_ENDIAN(flags); + break; + case 1: + cfg.dir = STEDMA40_PERIPH_TO_MEM; + cfg.src_info.big_endian = D40_DT_FLAGS_BIG_ENDIAN(flags); + break; + } + + if (D40_DT_FLAGS_FIXED_CHAN(flags)) { + cfg.phy_channel = dma_spec->args[1]; + cfg.use_fixed_channel = true; + } + + return dma_request_channel(cap, stedma40_filter, &cfg); } /* DMA ENGINE functions */ @@ -2433,23 +2469,21 @@ static int d40_alloc_chan_resources(struct dma_chan *chan) } pm_runtime_get_sync(d40c->base->dev); - /* Fill in basic CFG register values */ - d40_phy_cfg(&d40c->dma_cfg, &d40c->src_def_cfg, - &d40c->dst_def_cfg, chan_is_logical(d40c)); d40_set_prio_realtime(d40c); if (chan_is_logical(d40c)) { - d40_log_cfg(&d40c->dma_cfg, - &d40c->log_def.lcsp1, &d40c->log_def.lcsp3); - if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) d40c->lcpa = d40c->base->lcpa_base + - d40c->dma_cfg.src_dev_type * D40_LCPA_CHAN_SIZE; + d40c->dma_cfg.dev_type * D40_LCPA_CHAN_SIZE; else d40c->lcpa = d40c->base->lcpa_base + - d40c->dma_cfg.dst_dev_type * + d40c->dma_cfg.dev_type * D40_LCPA_CHAN_SIZE + D40_LCPA_CHAN_DST_DELTA; + + /* Unmask the Global Interrupt Mask. */ + d40c->src_def_cfg |= BIT(D40_SREG_CFG_LOG_GIM_POS); + d40c->dst_def_cfg |= BIT(D40_SREG_CFG_LOG_GIM_POS); } dev_dbg(chan2dev(d40c), "allocated %s channel (phy %d%s)\n", @@ -2710,15 +2744,8 @@ static int d40_set_runtime_config(struct dma_chan *chan, dst_maxburst = config->dst_maxburst; if (config->direction == DMA_DEV_TO_MEM) { - dma_addr_t dev_addr_rx = - d40c->base->plat_data->dev_rx[cfg->src_dev_type]; - config_addr = config->src_addr; - if (dev_addr_rx) - dev_dbg(d40c->base->dev, - "channel has a pre-wired RX address %08x " - "overriding with %08x\n", - dev_addr_rx, config_addr); + if (cfg->dir != STEDMA40_PERIPH_TO_MEM) dev_dbg(d40c->base->dev, "channel was not configured for peripheral " @@ -2733,15 +2760,8 @@ static int d40_set_runtime_config(struct dma_chan *chan, dst_maxburst = src_maxburst; } else if (config->direction == DMA_MEM_TO_DEV) { - dma_addr_t dev_addr_tx = - d40c->base->plat_data->dev_tx[cfg->dst_dev_type]; - config_addr = config->dst_addr; - if (dev_addr_tx) - dev_dbg(d40c->base->dev, - "channel has a pre-wired TX address %08x " - "overriding with %08x\n", - dev_addr_tx, config_addr); + if (cfg->dir != STEDMA40_MEM_TO_PERIPH) dev_dbg(d40c->base->dev, "channel was not configured for memory " @@ -2761,6 +2781,11 @@ static int d40_set_runtime_config(struct dma_chan *chan, return -EINVAL; } + if (config_addr <= 0) { + dev_err(d40c->base->dev, "no address supplied\n"); + return -EINVAL; + } + if (src_maxburst * src_addr_width != dst_maxburst * dst_addr_width) { dev_err(d40c->base->dev, "src/dst width/maxburst mismatch: %d*%d != %d*%d\n", @@ -2795,8 +2820,7 @@ static int d40_set_runtime_config(struct dma_chan *chan, if (chan_is_logical(d40c)) d40_log_cfg(cfg, &d40c->log_def.lcsp1, &d40c->log_def.lcsp3); else - d40_phy_cfg(cfg, &d40c->src_def_cfg, - &d40c->dst_def_cfg, false); + d40_phy_cfg(cfg, &d40c->src_def_cfg, &d40c->dst_def_cfg); /* These settings will take precedence later */ d40c->runtime_addr = config_addr; @@ -2927,7 +2951,7 @@ static int __init d40_dmaengine_init(struct d40_base *base, } d40_chan_init(base, &base->dma_memcpy, base->log_chans, - base->num_log_chans, base->plat_data->memcpy_len); + base->num_log_chans, ARRAY_SIZE(dma40_memcpy_channels)); dma_cap_zero(base->dma_memcpy.cap_mask); dma_cap_set(DMA_MEMCPY, base->dma_memcpy.cap_mask); @@ -3121,7 +3145,7 @@ static int __init d40_phy_res_init(struct d40_base *base) static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev) { - struct stedma40_platform_data *plat_data; + struct stedma40_platform_data *plat_data = pdev->dev.platform_data; struct clk *clk = NULL; void __iomem *virtbase = NULL; struct resource *res = NULL; @@ -3187,8 +3211,10 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev) * DB8540v1 has revision 4 */ rev = AMBA_REV_BITS(pid); - - plat_data = pdev->dev.platform_data; + if (rev < 2) { + d40_err(&pdev->dev, "hardware revision: %d is not supported", rev); + goto failure; + } /* The number of physical channels on this HW */ if (plat_data->num_of_phy_chans) @@ -3196,26 +3222,14 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev) else num_phy_chans = 4 * (readl(virtbase + D40_DREG_ICFG) & 0x7) + 4; - dev_info(&pdev->dev, "hardware revision: %d @ 0x%x with %d physical channels\n", - rev, res->start, num_phy_chans); - - if (rev < 2) { - d40_err(&pdev->dev, "hardware revision: %d is not supported", - rev); - goto failure; - } + num_log_chans = num_phy_chans * D40_MAX_LOG_CHAN_PER_PHY; - /* Count the number of logical channels in use */ - for (i = 0; i < plat_data->dev_len; i++) - if (plat_data->dev_rx[i] != 0) - num_log_chans++; - - for (i = 0; i < plat_data->dev_len; i++) - if (plat_data->dev_tx[i] != 0) - num_log_chans++; + dev_info(&pdev->dev, + "hardware rev: %d @ 0x%x with %d physical and %d logical channels\n", + rev, res->start, num_phy_chans, num_log_chans); base = kzalloc(ALIGN(sizeof(struct d40_base), 4) + - (num_phy_chans + num_log_chans + plat_data->memcpy_len) * + (num_phy_chans + num_log_chans + ARRAY_SIZE(dma40_memcpy_channels)) * sizeof(struct d40_chan), GFP_KERNEL); if (base == NULL) { @@ -3276,17 +3290,11 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev) if (!base->lookup_phy_chans) goto failure; - if (num_log_chans + plat_data->memcpy_len) { - /* - * The max number of logical channels are event lines for all - * src devices and dst devices - */ - base->lookup_log_chans = kzalloc(plat_data->dev_len * 2 * - sizeof(struct d40_chan *), - GFP_KERNEL); - if (!base->lookup_log_chans) - goto failure; - } + base->lookup_log_chans = kzalloc(num_log_chans * + sizeof(struct d40_chan *), + GFP_KERNEL); + if (!base->lookup_log_chans) + goto failure; base->reg_val_backup_chan = kmalloc(base->num_phy_chans * sizeof(d40_backup_regs_chan), @@ -3470,17 +3478,52 @@ failure: return ret; } +static int __init d40_of_probe(struct platform_device *pdev, + struct device_node *np) +{ + struct stedma40_platform_data *pdata; + + /* + * FIXME: Fill in this routine as more support is added. + * First platform enabled (u8500) doens't need any extra + * properties to run, so this is fairly sparce currently. + */ + + pdata = devm_kzalloc(&pdev->dev, + sizeof(struct stedma40_platform_data), + GFP_KERNEL); + if (!pdata) + return -ENOMEM; + + pdev->dev.platform_data = pdata; + + return 0; +} + static int __init d40_probe(struct platform_device *pdev) { + struct stedma40_platform_data *plat_data = pdev->dev.platform_data; + struct device_node *np = pdev->dev.of_node; int err; int ret = -ENOENT; - struct d40_base *base; + struct d40_base *base = NULL; struct resource *res = NULL; int num_reserved_chans; u32 val; - base = d40_hw_detect_init(pdev); + if (!plat_data) { + if (np) { + if(d40_of_probe(pdev, np)) { + ret = -ENOMEM; + goto failure; + } + } else { + d40_err(&pdev->dev, "No pdata or Device Tree provided\n"); + goto failure; + } + } + base = d40_hw_detect_init(pdev); if (!base) goto failure; @@ -3601,6 +3644,13 @@ static int __init d40_probe(struct platform_device *pdev) d40_hw_init(base); + if (np) { + err = of_dma_controller_register(np, d40_xlate, NULL); + if (err && err != -ENODEV) + dev_err(&pdev->dev, + "could not register of_dma_controller\n"); + } + dev_info(base->dev, "initialized\n"); return 0; @@ -3654,11 +3704,17 @@ failure: return ret; } +static const struct of_device_id d40_match[] = { + { .compatible = "stericsson,dma40", }, + {} +}; + static struct platform_driver d40_driver = { .driver = { .owner = THIS_MODULE, .name = D40_NAME, .pm = DMA40_PM_OPS, + .of_match_table = d40_match, }, }; diff --git a/drivers/dma/ste_dma40_ll.c b/drivers/dma/ste_dma40_ll.c index 7180e0d41722..ab5a2a706f7a 100644 --- a/drivers/dma/ste_dma40_ll.c +++ b/drivers/dma/ste_dma40_ll.c @@ -50,68 +50,58 @@ void d40_log_cfg(struct stedma40_chan_cfg *cfg, } -/* Sets up SRC and DST CFG register for both logical and physical channels */ -void d40_phy_cfg(struct stedma40_chan_cfg *cfg, - u32 *src_cfg, u32 *dst_cfg, bool is_log) +void d40_phy_cfg(struct stedma40_chan_cfg *cfg, u32 *src_cfg, u32 *dst_cfg) { u32 src = 0; u32 dst = 0; - if (!is_log) { - /* Physical channel */ - if ((cfg->dir == STEDMA40_PERIPH_TO_MEM) || - (cfg->dir == STEDMA40_PERIPH_TO_PERIPH)) { - /* Set master port to 1 */ - src |= 1 << D40_SREG_CFG_MST_POS; - src |= D40_TYPE_TO_EVENT(cfg->src_dev_type); - - if (cfg->src_info.flow_ctrl == STEDMA40_NO_FLOW_CTRL) - src |= 1 << D40_SREG_CFG_PHY_TM_POS; - else - src |= 3 << D40_SREG_CFG_PHY_TM_POS; - } - if ((cfg->dir == STEDMA40_MEM_TO_PERIPH) || - (cfg->dir == STEDMA40_PERIPH_TO_PERIPH)) { - /* Set master port to 1 */ - dst |= 1 << D40_SREG_CFG_MST_POS; - dst |= D40_TYPE_TO_EVENT(cfg->dst_dev_type); - - if (cfg->dst_info.flow_ctrl == STEDMA40_NO_FLOW_CTRL) - dst |= 1 << D40_SREG_CFG_PHY_TM_POS; - else - dst |= 3 << D40_SREG_CFG_PHY_TM_POS; - } - /* Interrupt on end of transfer for destination */ - dst |= 1 << D40_SREG_CFG_TIM_POS; - - /* Generate interrupt on error */ - src |= 1 << D40_SREG_CFG_EIM_POS; - dst |= 1 << D40_SREG_CFG_EIM_POS; - - /* PSIZE */ - if (cfg->src_info.psize != STEDMA40_PSIZE_PHY_1) { - src |= 1 << D40_SREG_CFG_PHY_PEN_POS; - src |= cfg->src_info.psize << D40_SREG_CFG_PSIZE_POS; - } - if (cfg->dst_info.psize != STEDMA40_PSIZE_PHY_1) { - dst |= 1 << D40_SREG_CFG_PHY_PEN_POS; - dst |= cfg->dst_info.psize << D40_SREG_CFG_PSIZE_POS; - } - - /* Element size */ - src |= cfg->src_info.data_width << D40_SREG_CFG_ESIZE_POS; - dst |= cfg->dst_info.data_width << D40_SREG_CFG_ESIZE_POS; - - /* Set the priority bit to high for the physical channel */ - if (cfg->high_priority) { - src |= 1 << D40_SREG_CFG_PRI_POS; - dst |= 1 << D40_SREG_CFG_PRI_POS; - } - - } else { - /* Logical channel */ - dst |= 1 << D40_SREG_CFG_LOG_GIM_POS; - src |= 1 << D40_SREG_CFG_LOG_GIM_POS; + if ((cfg->dir == STEDMA40_PERIPH_TO_MEM) || + (cfg->dir == STEDMA40_PERIPH_TO_PERIPH)) { + /* Set master port to 1 */ + src |= 1 << D40_SREG_CFG_MST_POS; + src |= D40_TYPE_TO_EVENT(cfg->dev_type); + + if (cfg->src_info.flow_ctrl == STEDMA40_NO_FLOW_CTRL) + src |= 1 << D40_SREG_CFG_PHY_TM_POS; + else + src |= 3 << D40_SREG_CFG_PHY_TM_POS; + } + if ((cfg->dir == STEDMA40_MEM_TO_PERIPH) || + (cfg->dir == STEDMA40_PERIPH_TO_PERIPH)) { + /* Set master port to 1 */ + dst |= 1 << D40_SREG_CFG_MST_POS; + dst |= D40_TYPE_TO_EVENT(cfg->dev_type); + + if (cfg->dst_info.flow_ctrl == STEDMA40_NO_FLOW_CTRL) + dst |= 1 << D40_SREG_CFG_PHY_TM_POS; + else + dst |= 3 << D40_SREG_CFG_PHY_TM_POS; + } + /* Interrupt on end of transfer for destination */ + dst |= 1 << D40_SREG_CFG_TIM_POS; + + /* Generate interrupt on error */ + src |= 1 << D40_SREG_CFG_EIM_POS; + dst |= 1 << D40_SREG_CFG_EIM_POS; + + /* PSIZE */ + if (cfg->src_info.psize != STEDMA40_PSIZE_PHY_1) { + src |= 1 << D40_SREG_CFG_PHY_PEN_POS; + src |= cfg->src_info.psize << D40_SREG_CFG_PSIZE_POS; + } + if (cfg->dst_info.psize != STEDMA40_PSIZE_PHY_1) { + dst |= 1 << D40_SREG_CFG_PHY_PEN_POS; + dst |= cfg->dst_info.psize << D40_SREG_CFG_PSIZE_POS; + } + + /* Element size */ + src |= cfg->src_info.data_width << D40_SREG_CFG_ESIZE_POS; + dst |= cfg->dst_info.data_width << D40_SREG_CFG_ESIZE_POS; + + /* Set the priority bit to high for the physical channel */ + if (cfg->high_priority) { + src |= 1 << D40_SREG_CFG_PRI_POS; + dst |= 1 << D40_SREG_CFG_PRI_POS; } if (cfg->src_info.big_endian) diff --git a/drivers/dma/ste_dma40_ll.h b/drivers/dma/ste_dma40_ll.h index fdde8ef77542..1b47312bc574 100644 --- a/drivers/dma/ste_dma40_ll.h +++ b/drivers/dma/ste_dma40_ll.h @@ -432,8 +432,7 @@ enum d40_lli_flags { void d40_phy_cfg(struct stedma40_chan_cfg *cfg, u32 *src_cfg, - u32 *dst_cfg, - bool is_log); + u32 *dst_cfg); void d40_log_cfg(struct stedma40_chan_cfg *cfg, u32 *lcsp1, |