From 92f53a85db3730ae088aaeb7900f85909fd1eda6 Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Wed, 25 Jul 2012 13:10:11 +0900 Subject: sh: pfc: Build fix for pinctrl_remove_gpio_range() changes. pinctrl_remove_gpio_range() is now handled by the pinctrl core in the unreg path for some reason, so use that instead. Signed-off-by: Paul Mundt --- drivers/sh/pfc/pinctrl.c | 2 -- 1 file changed, 2 deletions(-) (limited to 'drivers') diff --git a/drivers/sh/pfc/pinctrl.c b/drivers/sh/pfc/pinctrl.c index 0802b6c0d653..814b29255aef 100644 --- a/drivers/sh/pfc/pinctrl.c +++ b/drivers/sh/pfc/pinctrl.c @@ -276,7 +276,6 @@ static int sh_pfc_pinconf_set(struct pinctrl_dev *pctldev, unsigned pin, unsigned long config) { struct sh_pfc_pinctrl *pmx = pinctrl_dev_get_drvdata(pctldev); - struct sh_pfc *pfc = pmx->pfc; /* Validate the new type */ if (config >= PINMUX_FLAG_TYPE) @@ -481,7 +480,6 @@ static int __devexit sh_pfc_pinctrl_remove(struct platform_device *pdev) { struct sh_pfc_pinctrl *pmx = platform_get_drvdata(pdev); - pinctrl_remove_gpio_range(pmx->pctl, &sh_pfc_gpio_range); pinctrl_unregister(pmx->pctl); platform_set_drvdata(pdev, NULL); -- cgit v1.2.3 From 4f46f8ac80416b0e8fd3aba6a0d842205fb29140 Mon Sep 17 00:00:00 2001 From: Guennadi Liakhovetski Date: Mon, 30 Jul 2012 21:28:27 +0200 Subject: dmaengine: shdma: restore partial transfer calculation The recent shdma driver split has mistakenly removed support for partial DMA transfer size calculation on forced termination. This patch restores it. Signed-off-by: Guennadi Liakhovetski Acked-by: Vinod Koul Signed-off-by: Paul Mundt --- drivers/dma/sh/shdma-base.c | 9 +++++++++ drivers/dma/sh/shdma.c | 12 ++++++++++++ include/linux/shdma-base.h | 2 ++ 3 files changed, 23 insertions(+) (limited to 'drivers') diff --git a/drivers/dma/sh/shdma-base.c b/drivers/dma/sh/shdma-base.c index 27f5c781fd73..f4cd946d259d 100644 --- a/drivers/dma/sh/shdma-base.c +++ b/drivers/dma/sh/shdma-base.c @@ -483,6 +483,7 @@ static struct shdma_desc *shdma_add_desc(struct shdma_chan *schan, new->mark = DESC_PREPARED; new->async_tx.flags = flags; new->direction = direction; + new->partial = 0; *len -= copy_size; if (direction == DMA_MEM_TO_MEM || direction == DMA_MEM_TO_DEV) @@ -644,6 +645,14 @@ static int shdma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, case DMA_TERMINATE_ALL: spin_lock_irqsave(&schan->chan_lock, flags); ops->halt_channel(schan); + + if (ops->get_partial && !list_empty(&schan->ld_queue)) { + /* Record partial transfer */ + struct shdma_desc *desc = list_first_entry(&schan->ld_queue, + struct shdma_desc, node); + desc->partial = ops->get_partial(schan, desc); + } + spin_unlock_irqrestore(&schan->chan_lock, flags); shdma_chan_ld_cleanup(schan, true); diff --git a/drivers/dma/sh/shdma.c b/drivers/dma/sh/shdma.c index 027c9be97654..f41bcc5267fd 100644 --- a/drivers/dma/sh/shdma.c +++ b/drivers/dma/sh/shdma.c @@ -381,6 +381,17 @@ static bool sh_dmae_chan_irq(struct shdma_chan *schan, int irq) return true; } +static size_t sh_dmae_get_partial(struct shdma_chan *schan, + struct shdma_desc *sdesc) +{ + struct sh_dmae_chan *sh_chan = container_of(schan, struct sh_dmae_chan, + shdma_chan); + struct sh_dmae_desc *sh_desc = container_of(sdesc, + struct sh_dmae_desc, shdma_desc); + return (sh_desc->hw.tcr - sh_dmae_readl(sh_chan, TCR)) << + sh_chan->xmit_shift; +} + /* Called from error IRQ or NMI */ static bool sh_dmae_reset(struct sh_dmae_device *shdev) { @@ -632,6 +643,7 @@ static const struct shdma_ops sh_dmae_shdma_ops = { .start_xfer = sh_dmae_start_xfer, .embedded_desc = sh_dmae_embedded_desc, .chan_irq = sh_dmae_chan_irq, + .get_partial = sh_dmae_get_partial, }; static int __devinit sh_dmae_probe(struct platform_device *pdev) diff --git a/include/linux/shdma-base.h b/include/linux/shdma-base.h index 93f9821554b6..a3728bf66f0e 100644 --- a/include/linux/shdma-base.h +++ b/include/linux/shdma-base.h @@ -50,6 +50,7 @@ struct shdma_desc { struct list_head node; struct dma_async_tx_descriptor async_tx; enum dma_transfer_direction direction; + size_t partial; dma_cookie_t cookie; int chunks; int mark; @@ -98,6 +99,7 @@ struct shdma_ops { void (*start_xfer)(struct shdma_chan *, struct shdma_desc *); struct shdma_desc *(*embedded_desc)(void *, int); bool (*chan_irq)(struct shdma_chan *, int); + size_t (*get_partial)(struct shdma_chan *, struct shdma_desc *); }; struct shdma_dev { -- cgit v1.2.3 From 4dc4c51675c137c30838425ecc8d471ff5eb138b Mon Sep 17 00:00:00 2001 From: Guennadi Liakhovetski Date: Mon, 30 Jul 2012 21:28:47 +0200 Subject: serial: sh-sci: fix compilation breakage, when DMA is enabled A recent commit: commit d6fa5a4e7ab605370fd6c982782f84ef2e6660e7 Author: Guennadi Liakhovetski serial: sh-sci: prepare for conversion to the shdma base library is not sufficient to update the sh-sci driver to the new shdma driver layout. This caused compilation breakage, when CONFIG_SERIAL_SH_SCI_DMA is enabled. This patch trivially fixes the problem by updating the DMA descriptor manipulation code. Signed-off-by: Guennadi Liakhovetski Signed-off-by: Paul Mundt --- drivers/tty/serial/sh-sci.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c index d4d8c9453cd8..9be296cf7295 100644 --- a/drivers/tty/serial/sh-sci.c +++ b/drivers/tty/serial/sh-sci.c @@ -25,6 +25,7 @@ #include #include +#include #include #include #include @@ -1410,8 +1411,8 @@ static void work_fn_rx(struct work_struct *work) /* Handle incomplete DMA receive */ struct tty_struct *tty = port->state->port.tty; struct dma_chan *chan = s->chan_rx; - struct sh_desc *sh_desc = container_of(desc, struct sh_desc, - async_tx); + struct shdma_desc *sh_desc = container_of(desc, + struct shdma_desc, async_tx); unsigned long flags; int count; -- cgit v1.2.3 From 1e32dfe323d156d5d7b25b9feffe015d19713db2 Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Wed, 1 Aug 2012 16:27:38 +0900 Subject: sh: pfc: Fix up init ordering mess. Commit ca5481c68e9fbcea62bb3c78ae6cccf99ca8fb73 ("sh: pfc: Rudimentary pinctrl-backed GPIO support.") introduced a regression for platforms that were doing early GPIO API calls (from arch_initcall() or earlier), leading to a situation where our two-stage registration logic would trip itself up and we'd -ENODEV out of the pinctrl registration path, resulting in endless -EPROBE_DEFER errors. Further lack of checking any sort of errors from gpio_request() resulted in boot time warnings, tripping on the FLAG_REQUESTED test-and-set in gpio_ensure_requested(). As it turns out there's no particular need to bother with the two-stage registration, as the platform bus is already available at the point that we have to start caring. As such, it's easiest to simply fold these together in to a single init path, the ordering of which is ensured through the platform's mux registration, as usual. Reported-by: Rafael J. Wysocki Reported-by: Kuninori Morimoto Signed-off-by: Paul Mundt --- drivers/sh/pfc/pinctrl.c | 32 +++++++++++++++----------------- 1 file changed, 15 insertions(+), 17 deletions(-) (limited to 'drivers') diff --git a/drivers/sh/pfc/pinctrl.c b/drivers/sh/pfc/pinctrl.c index 814b29255aef..2804eaae804e 100644 --- a/drivers/sh/pfc/pinctrl.c +++ b/drivers/sh/pfc/pinctrl.c @@ -325,20 +325,6 @@ static struct pinctrl_desc sh_pfc_pinctrl_desc = { .confops = &sh_pfc_pinconf_ops, }; -int sh_pfc_register_pinctrl(struct sh_pfc *pfc) -{ - sh_pfc_pmx = kzalloc(sizeof(struct sh_pfc_pinctrl), GFP_KERNEL); - if (unlikely(!sh_pfc_pmx)) - return -ENOMEM; - - spin_lock_init(&sh_pfc_pmx->lock); - - sh_pfc_pmx->pfc = pfc; - - return 0; -} -EXPORT_SYMBOL_GPL(sh_pfc_register_pinctrl); - static inline void __devinit sh_pfc_map_one_gpio(struct sh_pfc *pfc, struct sh_pfc_pinctrl *pmx, struct pinmux_gpio *gpio, @@ -505,7 +491,7 @@ static struct platform_device sh_pfc_pinctrl_device = { .id = -1, }; -static int __init sh_pfc_pinctrl_init(void) +static int sh_pfc_pinctrl_init(void) { int rc; @@ -519,10 +505,22 @@ static int __init sh_pfc_pinctrl_init(void) return rc; } +int sh_pfc_register_pinctrl(struct sh_pfc *pfc) +{ + sh_pfc_pmx = kzalloc(sizeof(struct sh_pfc_pinctrl), GFP_KERNEL); + if (unlikely(!sh_pfc_pmx)) + return -ENOMEM; + + spin_lock_init(&sh_pfc_pmx->lock); + + sh_pfc_pmx->pfc = pfc; + + return sh_pfc_pinctrl_init(); +} +EXPORT_SYMBOL_GPL(sh_pfc_register_pinctrl); + static void __exit sh_pfc_pinctrl_exit(void) { platform_driver_unregister(&sh_pfc_pinctrl_driver); } - -subsys_initcall(sh_pfc_pinctrl_init); module_exit(sh_pfc_pinctrl_exit); -- cgit v1.2.3 From 1d6a21b0a672fb29b01ccf397d478e0541e17716 Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Wed, 1 Aug 2012 17:13:46 +0900 Subject: sh: intc: initial irqdomain support. Trivial support for irq domains, using either a linear map or radix tree depending on the vector layout. Signed-off-by: Paul Mundt --- drivers/sh/intc/Kconfig | 4 +++ drivers/sh/intc/Makefile | 2 +- drivers/sh/intc/core.c | 11 +++++--- drivers/sh/intc/internals.h | 5 ++++ drivers/sh/intc/irqdomain.c | 68 +++++++++++++++++++++++++++++++++++++++++++++ 5 files changed, 85 insertions(+), 5 deletions(-) create mode 100644 drivers/sh/intc/irqdomain.c (limited to 'drivers') diff --git a/drivers/sh/intc/Kconfig b/drivers/sh/intc/Kconfig index c88cbccc62b0..a305731742a9 100644 --- a/drivers/sh/intc/Kconfig +++ b/drivers/sh/intc/Kconfig @@ -1,3 +1,7 @@ +config SH_INTC + def_bool y + select IRQ_DOMAIN + comment "Interrupt controller options" config INTC_USERIMASK diff --git a/drivers/sh/intc/Makefile b/drivers/sh/intc/Makefile index 44f006d09471..54ec2a0643df 100644 --- a/drivers/sh/intc/Makefile +++ b/drivers/sh/intc/Makefile @@ -1,4 +1,4 @@ -obj-y := access.o chip.o core.o handle.o virq.o +obj-y := access.o chip.o core.o handle.o irqdomain.o virq.o obj-$(CONFIG_INTC_BALANCING) += balancing.o obj-$(CONFIG_INTC_USERIMASK) += userimask.o diff --git a/drivers/sh/intc/core.c b/drivers/sh/intc/core.c index 7e562ccb6997..2374468615ed 100644 --- a/drivers/sh/intc/core.c +++ b/drivers/sh/intc/core.c @@ -25,6 +25,7 @@ #include #include #include +#include #include #include #include @@ -310,6 +311,8 @@ int __init register_intc_controller(struct intc_desc *desc) BUG_ON(k > 256); /* _INTC_ADDR_E() and _INTC_ADDR_D() are 8 bits */ + intc_irq_domain_init(d, hw); + /* register the vectors one by one */ for (i = 0; i < hw->nr_vectors; i++) { struct intc_vect *vect = hw->vectors + i; @@ -319,8 +322,8 @@ int __init register_intc_controller(struct intc_desc *desc) if (!vect->enum_id) continue; - res = irq_alloc_desc_at(irq, numa_node_id()); - if (res != irq && res != -EEXIST) { + res = irq_create_identity_mapping(d->domain, irq); + if (unlikely(res)) { pr_err("can't get irq_desc for %d\n", irq); continue; } @@ -340,8 +343,8 @@ int __init register_intc_controller(struct intc_desc *desc) * IRQ support, each vector still needs to have * its own backing irq_desc. */ - res = irq_alloc_desc_at(irq2, numa_node_id()); - if (res != irq2 && res != -EEXIST) { + res = irq_create_identity_mapping(d->domain, irq2); + if (unlikely(res)) { pr_err("can't get irq_desc for %d\n", irq2); continue; } diff --git a/drivers/sh/intc/internals.h b/drivers/sh/intc/internals.h index f034a979a16f..7dff08e2a071 100644 --- a/drivers/sh/intc/internals.h +++ b/drivers/sh/intc/internals.h @@ -1,5 +1,6 @@ #include #include +#include #include #include #include @@ -66,6 +67,7 @@ struct intc_desc_int { unsigned int nr_sense; struct intc_window *window; unsigned int nr_windows; + struct irq_domain *domain; struct irq_chip chip; bool skip_suspend; }; @@ -187,6 +189,9 @@ unsigned long intc_get_ack_handle(unsigned int irq); void intc_enable_disable_enum(struct intc_desc *desc, struct intc_desc_int *d, intc_enum enum_id, int enable); +/* irqdomain.c */ +void intc_irq_domain_init(struct intc_desc_int *d, struct intc_hw_desc *hw); + /* virq.c */ void intc_subgroup_init(struct intc_desc *desc, struct intc_desc_int *d); void intc_irq_xlate_set(unsigned int irq, intc_enum id, struct intc_desc_int *d); diff --git a/drivers/sh/intc/irqdomain.c b/drivers/sh/intc/irqdomain.c new file mode 100644 index 000000000000..3968f1c3c5c3 --- /dev/null +++ b/drivers/sh/intc/irqdomain.c @@ -0,0 +1,68 @@ +/* + * IRQ domain support for SH INTC subsystem + * + * Copyright (C) 2012 Paul Mundt + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + */ +#define pr_fmt(fmt) "intc: " fmt + +#include +#include +#include +#include "internals.h" + +/** + * intc_irq_domain_evt_xlate() - Generic xlate for vectored IRQs. + * + * This takes care of exception vector to hwirq translation through + * by way of evt2irq() translation. + * + * Note: For platforms that use a flat vector space without INTEVT this + * basically just mimics irq_domain_xlate_onecell() by way of a nopped + * out evt2irq() implementation. + */ +static int intc_evt_xlate(struct irq_domain *d, struct device_node *ctrlr, + const u32 *intspec, unsigned int intsize, + unsigned long *out_hwirq, unsigned int *out_type) +{ + if (WARN_ON(intsize < 1)) + return -EINVAL; + + *out_hwirq = evt2irq(intspec[0]); + *out_type = IRQ_TYPE_NONE; + + return 0; +} + +static const struct irq_domain_ops intc_evt_ops = { + .xlate = intc_evt_xlate, +}; + +void __init intc_irq_domain_init(struct intc_desc_int *d, + struct intc_hw_desc *hw) +{ + unsigned int irq_base, irq_end; + + /* + * Quick linear revmap check + */ + irq_base = evt2irq(hw->vectors[0].vect); + irq_end = evt2irq(hw->vectors[hw->nr_vectors - 1].vect); + + /* + * Linear domains have a hard-wired assertion that IRQs start at + * 0 in order to make some performance optimizations. Lamely + * restrict the linear case to these conditions here, taking the + * tree penalty for linear cases with non-zero hwirq bases. + */ + if (irq_base == 0 && irq_end == (irq_base + hw->nr_vectors - 1)) + d->domain = irq_domain_add_linear(NULL, hw->nr_vectors, + &intc_evt_ops, NULL); + else + d->domain = irq_domain_add_tree(NULL, &intc_evt_ops, NULL); + + BUG_ON(!d->domain); +} -- cgit v1.2.3